From 671661758ca7767b3a312f34642501593103d067 Mon Sep 17 00:00:00 2001 From: Attila Uygun Date: Fri, 25 Dec 2020 00:22:41 +0100 Subject: [PATCH] Implement Vulkan renderer. --- README.md | 15 +- assets/engine/pass_through.glsl_fragment | 20 +- assets/engine/pass_through.glsl_vertex | 33 +- assets/engine/solid.glsl_fragment | 12 +- assets/engine/solid.glsl_vertex | 25 +- assets/sky.glsl_fragment | 42 +- assets/sky.glsl_vertex | 18 +- build/android/app/CMakeLists.txt | 54 +- build/linux/Makefile | 6 +- src/engine/BUILD.gn | 13 + src/engine/platform/platform_base.cc | 7 + src/engine/renderer/vulkan/renderer_vulkan.cc | 1710 +++++++++++++++++ src/engine/renderer/vulkan/renderer_vulkan.h | 257 +++ .../vulkan/renderer_vulkan_android.cc | 27 + .../renderer/vulkan/renderer_vulkan_linux.cc | 39 + src/engine/renderer/vulkan/vulkan_context.cc | 1397 ++++++++++++++ src/engine/renderer/vulkan/vulkan_context.h | 191 ++ .../renderer/vulkan/vulkan_context_android.cc | 39 + .../renderer/vulkan/vulkan_context_linux.cc | 43 + src/engine/shader_source.cc | 118 +- src/engine/shader_source.h | 7 +- src/third_party/android/vulkan_wrapper.cpp | 404 ++++ src/third_party/android/vulkan_wrapper.h | 236 +++ src/third_party/vma/vk_mem_alloc.cpp | 4 + 24 files changed, 4630 insertions(+), 87 deletions(-) create mode 100644 src/engine/renderer/vulkan/renderer_vulkan.cc create mode 100644 src/engine/renderer/vulkan/renderer_vulkan.h create mode 100644 src/engine/renderer/vulkan/renderer_vulkan_android.cc create mode 100644 src/engine/renderer/vulkan/renderer_vulkan_linux.cc create mode 100644 src/engine/renderer/vulkan/vulkan_context.cc create mode 100644 src/engine/renderer/vulkan/vulkan_context.h create mode 100644 src/engine/renderer/vulkan/vulkan_context_android.cc create mode 100644 src/engine/renderer/vulkan/vulkan_context_linux.cc create mode 100644 src/third_party/android/vulkan_wrapper.cpp create mode 100644 src/third_party/android/vulkan_wrapper.h diff --git a/README.md b/README.md index d5dfc5a..0b54d93 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,9 @@ -A simple, cross-platform 2D game engine with OpenGL renderer. Supports Linux and -Android (lolipop+) platforms. My personal hobby project. -I've published a game on [Google Play](https://play.google.com/store/apps/details?id=com.woom.game) based on the engine. The demo included in this repository is an early prototype of the game. +A simple, cross-platform 2D game engine with OpenGL and Vulkan renderers. +Supports Linux and Android (lolipop+) platforms. +This is a personal hobby project. I've published a little game on +[Google Play](https://play.google.com/store/apps/details?id=com.woom.game) +based on this engine. The demo included in this repository is an early prototype +of the game. #### Building the demo Linux: ```text @@ -24,4 +27,8 @@ ninja -C out/release [oboe](https://github.com/google/oboe), [stb](https://github.com/nothings/stb), [texture-compressor](https://github.com/auygun/kaliber/tree/master/src/third_party/texture_compressor), -[minizip](https://github.com/madler/zlib/tree/master/contrib/minizip) +[minizip](https://github.com/madler/zlib/tree/master/contrib/minizip), +[glslang](https://github.com/KhronosGroup/glslang), +[spirv-reflect](https://github.com/KhronosGroup/SPIRV-Reflect), +[vma](https://github.com/GPUOpen-LibrariesAndSDKs/VulkanMemoryAllocator), +[vulkan-sdk](https://vulkan.lunarg.com) diff --git a/assets/engine/pass_through.glsl_fragment b/assets/engine/pass_through.glsl_fragment index 620d3ad..f65edce 100644 --- a/assets/engine/pass_through.glsl_fragment +++ b/assets/engine/pass_through.glsl_fragment @@ -2,11 +2,23 @@ precision mediump float; #endif -uniform vec4 color; -uniform sampler2D texture_0; +IN(0) vec2 tex_coord_0; -varying vec2 tex_coord_0; +UNIFORM_BEGIN + UNIFORM_V(vec2 scale) + UNIFORM_V(vec2 offset) + UNIFORM_V(vec2 rotation) + UNIFORM_V(vec2 tex_offset) + UNIFORM_V(vec2 tex_scale) + UNIFORM_V(mat4 projection) + UNIFORM_F(vec4 color) + UNIFORM_S(sampler2D texture_0) +UNIFORM_END + +SAMPLER(sampler2D texture_0) + +FRAG_COLOR_OUT(frag_color) void main() { - gl_FragColor = texture2D(texture_0, tex_coord_0) * color; + FRAG_COLOR(frag_color) = TEXTURE(texture_0, tex_coord_0) * PARAM(color); } diff --git a/assets/engine/pass_through.glsl_vertex b/assets/engine/pass_through.glsl_vertex index 06bca35..9b35eea 100644 --- a/assets/engine/pass_through.glsl_vertex +++ b/assets/engine/pass_through.glsl_vertex @@ -1,24 +1,27 @@ -attribute vec2 in_position; -attribute vec2 in_tex_coord_0; +IN(0) vec2 in_position; +IN(1) vec2 in_tex_coord_0; -uniform vec2 scale; -uniform vec2 offset; -uniform vec2 rotation; -uniform vec2 tex_offset; -uniform vec2 tex_scale; -uniform mat4 projection; +UNIFORM_BEGIN + UNIFORM_V(vec2 scale) + UNIFORM_V(vec2 offset) + UNIFORM_V(vec2 rotation) + UNIFORM_V(vec2 tex_offset) + UNIFORM_V(vec2 tex_scale) + UNIFORM_V(mat4 projection) + UNIFORM_F(vec4 color) +UNIFORM_END -varying vec2 tex_coord_0; +OUT(0) vec2 tex_coord_0; void main() { // Simple 2d transform. vec2 position = in_position; - position *= scale; - position = vec2(position.x * rotation.y + position.y * rotation.x, - position.y * rotation.y - position.x * rotation.x); - position += offset; + position *= PARAM(scale); + position = vec2(position.x * PARAM(rotation).y + position.y * PARAM(rotation).x, + position.y * PARAM(rotation).y - position.x * PARAM(rotation).x); + position += PARAM(offset); - tex_coord_0 = (in_tex_coord_0 + tex_offset) * tex_scale; + tex_coord_0 = (in_tex_coord_0 + PARAM(tex_offset)) * PARAM(tex_scale); - gl_Position = projection * vec4(position, 0.0, 1.0); + gl_Position = PARAM(projection) * vec4(position, 0.0, 1.0); } diff --git a/assets/engine/solid.glsl_fragment b/assets/engine/solid.glsl_fragment index e8fb622..9484b2d 100644 --- a/assets/engine/solid.glsl_fragment +++ b/assets/engine/solid.glsl_fragment @@ -2,8 +2,16 @@ precision mediump float; #endif -uniform vec4 color; +UNIFORM_BEGIN + UNIFORM_V(vec2 scale) + UNIFORM_V(vec2 offset) + UNIFORM_V(vec2 rotation) + UNIFORM_V(mat4 projection) + UNIFORM_F(vec4 color) +UNIFORM_END + +FRAG_COLOR_OUT(frag_color) void main() { - gl_FragColor = color; + FRAG_COLOR(frag_color) = PARAM(color); } diff --git a/assets/engine/solid.glsl_vertex b/assets/engine/solid.glsl_vertex index 5c939fd..b767bc3 100644 --- a/assets/engine/solid.glsl_vertex +++ b/assets/engine/solid.glsl_vertex @@ -1,18 +1,21 @@ -attribute vec2 in_position; -attribute vec2 in_tex_coord_0; +IN(0) vec2 in_position; +IN(1) vec2 in_tex_coord_0; -uniform vec2 scale; -uniform vec2 offset; -uniform vec2 rotation; -uniform mat4 projection; +UNIFORM_BEGIN + UNIFORM_V(vec2 scale) + UNIFORM_V(vec2 offset) + UNIFORM_V(vec2 rotation) + UNIFORM_V(mat4 projection) + UNIFORM_F(vec4 color) +UNIFORM_END void main() { // Simple 2d transform. vec2 position = in_position; - position *= scale; - position = vec2(position.x * rotation.y + position.y * rotation.x, - position.y * rotation.y - position.x * rotation.x); - position += offset; + position *= PARAM(scale); + position = vec2(position.x * PARAM(rotation).y + position.y * PARAM(rotation).x, + position.y * PARAM(rotation).y - position.x * PARAM(rotation).x); + position += PARAM(offset); - gl_Position = projection * vec4(position, 0.0, 1.0); + gl_Position = PARAM(projection) * vec4(position, 0.0, 1.0); } diff --git a/assets/sky.glsl_fragment b/assets/sky.glsl_fragment index 8a66224..a15a60d 100644 --- a/assets/sky.glsl_fragment +++ b/assets/sky.glsl_fragment @@ -1,24 +1,30 @@ #ifdef GL_ES -precision mediump float; +precision highp float; #else #define lowp #define mediump #define highp #endif -uniform highp vec2 sky_offset; -uniform vec3 nebula_color; +IN(0) vec2 tex_coord_0; -varying highp vec2 tex_coord_0; +UNIFORM_BEGIN + UNIFORM_V(vec2 scale) + UNIFORM_V(mat4 projection) + UNIFORM_F(vec2 sky_offset) + UNIFORM_F(vec3 nebula_color) +UNIFORM_END -float random(highp vec2 p) { - highp float sd = sin(dot(p, vec2(54.90898, 18.233))); +FRAG_COLOR_OUT(frag_color) + +float random(vec2 p) { + float sd = sin(dot(p, vec2(54.90898, 18.233))); return fract(sd * 2671.6182); } -float nebula(in highp vec2 p) { - highp vec2 i = floor(p); - highp vec2 f = fract(p); +float nebula(in vec2 p) { + vec2 i = floor(p); + vec2 f = fract(p); float a = random(i); float b = random(i + vec2(1.0, 0.0)); @@ -32,24 +38,24 @@ float nebula(in highp vec2 p) { (d - b) * u.x * u.y; } -float stars(in highp vec2 p, float num_cells, float size) { - highp vec2 n = p * num_cells; - highp vec2 i = floor(n); +float stars(in vec2 p, float num_cells, float size) { + vec2 n = p * num_cells; + vec2 i = floor(n); vec2 a = n - i - random(i); a /= num_cells * size; float e = dot(a, a); - return smoothstep(0.95, 1.0, (1.0 - e * 35.0)); + return smoothstep(0.94, 1.0, (1.0 - e * 35.0)); } void main() { - highp vec2 layer1_coord = tex_coord_0 + sky_offset; - highp vec2 layer2_coord = tex_coord_0 + sky_offset * 0.7; - vec3 result = vec3(0.); + vec2 layer1_coord = tex_coord_0 + PARAM(sky_offset); + vec2 layer2_coord = tex_coord_0 + PARAM(sky_offset) * 0.7; + mediump vec3 result = vec3(0.); float c = nebula(layer2_coord * 3.0) * 0.35 - 0.05; - result += nebula_color * floor(c * 60.0) / 60.0; + result += PARAM(nebula_color) * floor(c * 60.0) / 60.0; c = stars(layer1_coord, 8.0, 0.05); result += vec3(0.97, 0.74, 0.74) * c; @@ -57,5 +63,5 @@ void main() { c = stars(layer2_coord, 16.0, 0.025) * 0.5; result += vec3(0.9, 0.9, 0.95) * c; - gl_FragColor = vec4(result, 1.0); + FRAG_COLOR(frag_color) = vec4(result, 1.0); } diff --git a/assets/sky.glsl_vertex b/assets/sky.glsl_vertex index 143a112..275cde6 100644 --- a/assets/sky.glsl_vertex +++ b/assets/sky.glsl_vertex @@ -1,17 +1,21 @@ -attribute vec2 in_position; -attribute vec2 in_tex_coord_0; +IN(0) vec2 in_position; +IN(1) vec2 in_tex_coord_0; -uniform vec2 scale; -uniform mat4 projection; +UNIFORM_BEGIN + UNIFORM_V(vec2 scale) + UNIFORM_V(mat4 projection) + UNIFORM_F(vec2 sky_offset) + UNIFORM_F(vec3 nebula_color) +UNIFORM_END -varying vec2 tex_coord_0; +OUT(0) vec2 tex_coord_0; void main() { // Simple 2d transform. vec2 position = in_position; - position *= scale; + position *= PARAM(scale); tex_coord_0 = in_tex_coord_0; - gl_Position = projection * vec4(position, 0.0, 1.0); + gl_Position = PARAM(projection) * vec4(position, 0.0, 1.0); } diff --git a/build/android/app/CMakeLists.txt b/build/android/app/CMakeLists.txt index 6c58896..3a59605 100644 --- a/build/android/app/CMakeLists.txt +++ b/build/android/app/CMakeLists.txt @@ -36,9 +36,9 @@ add_library(native_app_glue STATIC # now build app's shared lib if (CMAKE_BUILD_TYPE MATCHES Debug) - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++17 -Wall -Werror -D_DEBUG") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++17 -Wall -Werror -D_DEBUG -DVK_USE_PLATFORM_ANDROID_KHR") else () - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++17 -Wall -Werror") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++17 -Wall -Werror -DVK_USE_PLATFORM_ANDROID_KHR") endif () @@ -87,19 +87,69 @@ add_library(kaliber SHARED ../../../src/engine/renderer/renderer_types.cc ../../../src/engine/renderer/shader.cc ../../../src/engine/renderer/texture.cc + ../../../src/engine/renderer/vulkan/renderer_vulkan_android.cc + ../../../src/engine/renderer/vulkan/renderer_vulkan.cc + ../../../src/engine/renderer/vulkan/vulkan_context_android.cc + ../../../src/engine/renderer/vulkan/vulkan_context.cc ../../../src/engine/shader_source.cc ../../../src/engine/solid_quad.cc ../../../src/engine/sound_player.cc ../../../src/engine/sound.cc ../../../src/third_party/android/gl3stub.c ../../../src/third_party/android/GLContext.cpp + ../../../src/third_party/android/vulkan_wrapper.cpp + ../../../src/third_party/glslang/glslang/CInterface/glslang_c_interface.cpp + ../../../src/third_party/glslang/glslang/GenericCodeGen/CodeGen.cpp + ../../../src/third_party/glslang/glslang/GenericCodeGen/Link.cpp + ../../../src/third_party/glslang/glslang/MachineIndependent/attribute.cpp + ../../../src/third_party/glslang/glslang/MachineIndependent/Constant.cpp + ../../../src/third_party/glslang/glslang/MachineIndependent/glslang_tab.cpp + ../../../src/third_party/glslang/glslang/MachineIndependent/InfoSink.cpp + ../../../src/third_party/glslang/glslang/MachineIndependent/Initialize.cpp + ../../../src/third_party/glslang/glslang/MachineIndependent/Intermediate.cpp + ../../../src/third_party/glslang/glslang/MachineIndependent/intermOut.cpp + ../../../src/third_party/glslang/glslang/MachineIndependent/IntermTraverse.cpp + ../../../src/third_party/glslang/glslang/MachineIndependent/iomapper.cpp + ../../../src/third_party/glslang/glslang/MachineIndependent/limits.cpp + ../../../src/third_party/glslang/glslang/MachineIndependent/linkValidate.cpp + ../../../src/third_party/glslang/glslang/MachineIndependent/parseConst.cpp + ../../../src/third_party/glslang/glslang/MachineIndependent/ParseContextBase.cpp + ../../../src/third_party/glslang/glslang/MachineIndependent/ParseHelper.cpp + ../../../src/third_party/glslang/glslang/MachineIndependent/PoolAlloc.cpp + ../../../src/third_party/glslang/glslang/MachineIndependent/preprocessor/Pp.cpp + ../../../src/third_party/glslang/glslang/MachineIndependent/preprocessor/PpAtom.cpp + ../../../src/third_party/glslang/glslang/MachineIndependent/preprocessor/PpContext.cpp + ../../../src/third_party/glslang/glslang/MachineIndependent/preprocessor/PpScanner.cpp + ../../../src/third_party/glslang/glslang/MachineIndependent/preprocessor/PpTokens.cpp + ../../../src/third_party/glslang/glslang/MachineIndependent/propagateNoContraction.cpp + ../../../src/third_party/glslang/glslang/MachineIndependent/reflection.cpp + ../../../src/third_party/glslang/glslang/MachineIndependent/RemoveTree.cpp + ../../../src/third_party/glslang/glslang/MachineIndependent/Scan.cpp + ../../../src/third_party/glslang/glslang/MachineIndependent/ShaderLang.cpp + ../../../src/third_party/glslang/glslang/MachineIndependent/SymbolTable.cpp + ../../../src/third_party/glslang/glslang/MachineIndependent/Versions.cpp + ../../../src/third_party/glslang/glslang/OSDependent/Unix/ossource.cpp + ../../../src/third_party/glslang/OGLCompilersDLL/InitializeDll.cpp + ../../../src/third_party/glslang/SPIRV/CInterface/spirv_c_interface.cpp + ../../../src/third_party/glslang/SPIRV/disassemble.cpp + ../../../src/third_party/glslang/SPIRV/doc.cpp + ../../../src/third_party/glslang/SPIRV/GlslangToSpv.cpp + ../../../src/third_party/glslang/SPIRV/InReadableOrder.cpp + ../../../src/third_party/glslang/SPIRV/Logger.cpp + ../../../src/third_party/glslang/SPIRV/SpvBuilder.cpp + ../../../src/third_party/glslang/SPIRV/SpvPostProcess.cpp + ../../../src/third_party/glslang/SPIRV/SPVRemapper.cpp + ../../../src/third_party/glslang/SPIRV/SpvTools.cpp + ../../../src/third_party/glslang/StandAlone/ResourceLimits.cpp ../../../src/third_party/jsoncpp/jsoncpp.cc ../../../src/third_party/minizip/ioapi.c ../../../src/third_party/minizip/unzip.c + ../../../src/third_party/spirv-reflect/spirv_reflect.c ../../../src/third_party/texture_compressor/dxt_encoder_internals.cc ../../../src/third_party/texture_compressor/dxt_encoder.cc ../../../src/third_party/texture_compressor/texture_compressor_etc1.cc ../../../src/third_party/texture_compressor/texture_compressor.cc + ../../../src/third_party/vma/vk_mem_alloc.cpp ) if (ANDROID_ABI STREQUAL armeabi-v7a) diff --git a/build/linux/Makefile b/build/linux/Makefile index 1e14d30..e180b78 100644 --- a/build/linux/Makefile +++ b/build/linux/Makefile @@ -106,12 +106,16 @@ GLTEST_SRC := \ $(SRC_ROOT)/engine/platform/platform_linux.cc \ $(SRC_ROOT)/engine/renderer/geometry.cc \ $(SRC_ROOT)/engine/renderer/opengl/render_command.cc \ - $(SRC_ROOT)/engine/renderer/render_resource.cc \ $(SRC_ROOT)/engine/renderer/opengl/renderer_opengl_linux.cc \ $(SRC_ROOT)/engine/renderer/opengl/renderer_opengl.cc \ + $(SRC_ROOT)/engine/renderer/render_resource.cc \ $(SRC_ROOT)/engine/renderer/renderer_types.cc \ $(SRC_ROOT)/engine/renderer/shader.cc \ $(SRC_ROOT)/engine/renderer/texture.cc \ + $(SRC_ROOT)/engine/renderer/vulkan/renderer_vulkan_linux.cc \ + $(SRC_ROOT)/engine/renderer/vulkan/renderer_vulkan.cc \ + $(SRC_ROOT)/engine/renderer/vulkan/vulkan_context_linux.cc \ + $(SRC_ROOT)/engine/renderer/vulkan/vulkan_context.cc \ $(SRC_ROOT)/engine/shader_source.cc \ $(SRC_ROOT)/engine/solid_quad.cc \ $(SRC_ROOT)/engine/sound_player.cc \ diff --git a/src/engine/BUILD.gn b/src/engine/BUILD.gn index 2f8474e..8f67541 100644 --- a/src/engine/BUILD.gn +++ b/src/engine/BUILD.gn @@ -51,6 +51,10 @@ source_set("engine") { "renderer/shader.h", "renderer/texture.cc", "renderer/texture.h", + "renderer/vulkan/renderer_vulkan_linux.cc", + "renderer/vulkan/renderer_vulkan.cc", + "renderer/vulkan/vulkan_context_linux.cc", + "renderer/vulkan/vulkan_context.cc", "shader_source.cc", "shader_source.h", "solid_quad.cc", @@ -61,6 +65,15 @@ source_set("engine") { "sound.h", ] + defines = [ + "VK_USE_PLATFORM_XLIB_KHR", + "VULKAN_NON_CMAKE_BUILD", + "SYSCONFDIR=\"/etc\"", + "FALLBACK_DATA_DIRS=\"/usr/local/share:/usr/share\"", + "FALLBACK_CONFIG_DIRS=\"/etc/xdg\"", + "HAVE_SECURE_GETENV", + ] + ldflags = [] libs = [] if (target_os == "linux") { diff --git a/src/engine/platform/platform_base.cc b/src/engine/platform/platform_base.cc index e46c9b2..97fd868 100644 --- a/src/engine/platform/platform_base.cc +++ b/src/engine/platform/platform_base.cc @@ -5,6 +5,9 @@ #include "../audio/audio.h" #include "../engine.h" #include "../renderer/opengl/renderer_opengl.h" +#include "../renderer/vulkan/renderer_vulkan.h" + +#define VULKAN_RENDERER using namespace base; @@ -28,7 +31,11 @@ void PlatformBase::Initialize() { throw internal_error; } +#if defined(VULKAN_RENDERER) + renderer_ = std::make_unique(); +#else renderer_ = std::make_unique(); +#endif } void PlatformBase::Shutdown() { diff --git a/src/engine/renderer/vulkan/renderer_vulkan.cc b/src/engine/renderer/vulkan/renderer_vulkan.cc new file mode 100644 index 0000000..7ec09cb --- /dev/null +++ b/src/engine/renderer/vulkan/renderer_vulkan.cc @@ -0,0 +1,1710 @@ +#include "renderer_vulkan.h" + +#include +#include +#include +#include +#include + +#include "../../../base/log.h" +#include "../../../base/vecmath.h" +#include "../../../third_party/glslang/SPIRV/GlslangToSpv.h" +#include "../../../third_party/glslang/StandAlone/ResourceLimits.h" +#include "../../../third_party/glslang/glslang/Include/Types.h" +#include "../../../third_party/glslang/glslang/Public/ShaderLang.h" +#include "../../../third_party/spirv-reflect/spirv_reflect.h" +#include "../../image.h" +#include "../../mesh.h" +#include "../../shader_source.h" +#include "../geometry.h" +#include "../shader.h" +#include "../texture.h" + +using namespace base; + +namespace { + +using VertexInputDescription = + std::tuple, + std::vector>; + +constexpr VkPrimitiveTopology kVkPrimitiveType[eng::kPrimitive_Max] = { + VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST, + VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP, +}; + +constexpr VkFormat kVkDataType[eng::kDataType_Max][4] = { + { + VK_FORMAT_R8_UINT, + VK_FORMAT_R8G8_UINT, + VK_FORMAT_R8G8B8_UINT, + VK_FORMAT_R8G8B8A8_UINT, + }, + { + VK_FORMAT_R32_SFLOAT, + VK_FORMAT_R32G32_SFLOAT, + VK_FORMAT_R32G32B32_SFLOAT, + VK_FORMAT_R32G32B32A32_SFLOAT, + }, + { + VK_FORMAT_R32_SINT, + VK_FORMAT_R32G32_SINT, + VK_FORMAT_R32G32B32_SINT, + VK_FORMAT_R32G32B32A32_SINT, + }, + { + VK_FORMAT_R16_SINT, + VK_FORMAT_R16G16_SINT, + VK_FORMAT_R16G16B16_SINT, + VK_FORMAT_R16G16B16A16_SINT, + }, + { + VK_FORMAT_R32_UINT, + VK_FORMAT_R32G32_UINT, + VK_FORMAT_R32G32B32_UINT, + VK_FORMAT_R32G32B32A32_UINT, + }, + { + VK_FORMAT_R16_UINT, + VK_FORMAT_R16G16_UINT, + VK_FORMAT_R16G16B16_UINT, + VK_FORMAT_R16G16B16A16_UINT, + }, +}; + +constexpr size_t kMaxDescriptorsPerPool = 64; + +std::vector CompileGlsl(EShLanguage stage, + const char* source_code, + std::string* error) { + const int kClientInputSemanticsVersion = 100; // maps to #define VULKAN 100 + const int kDefaultVersion = 450; + + std::vector ret; + + glslang::EShTargetClientVersion vulkan_client_version = + glslang::EShTargetVulkan_1_0; + glslang::EShTargetLanguageVersion target_version = glslang::EShTargetSpv_1_0; + glslang::TShader::ForbidIncluder includer; + + glslang::TShader shader(stage); + const char* cs_strings = source_code; + + shader.setStrings(&cs_strings, 1); + shader.setEnvInput(glslang::EShSourceGlsl, stage, glslang::EShClientVulkan, + kClientInputSemanticsVersion); + shader.setEnvClient(glslang::EShClientVulkan, vulkan_client_version); + shader.setEnvTarget(glslang::EShTargetSpv, target_version); + + EShMessages messages = (EShMessages)(EShMsgSpvRules | EShMsgVulkanRules); + std::string pre_processed_code; + + // Preprocess + if (!shader.preprocess(&glslang::DefaultTBuiltInResource, kDefaultVersion, + ENoProfile, false, false, messages, + &pre_processed_code, includer)) { + if (error) { + (*error) = "Failed pre-process:\n"; + (*error) += shader.getInfoLog(); + (*error) += "\n"; + (*error) += shader.getInfoDebugLog(); + } + + return ret; + } + cs_strings = pre_processed_code.c_str(); + shader.setStrings(&cs_strings, 1); + + // Parse + if (!shader.parse(&glslang::DefaultTBuiltInResource, kDefaultVersion, false, + messages)) { + if (error) { + (*error) = "Failed parse:\n"; + (*error) += shader.getInfoLog(); + (*error) += "\n"; + (*error) += shader.getInfoDebugLog(); + } + return ret; + } + + // link + glslang::TProgram program; + program.addShader(&shader); + + if (!program.link(messages)) { + if (error) { + (*error) = "Failed link:\n"; + (*error) += program.getInfoLog(); + (*error) += "\n"; + (*error) += program.getInfoDebugLog(); + } + + return ret; + } + + std::vector spirv; + spv::SpvBuildLogger logger; + glslang::SpvOptions spv_options; + glslang::GlslangToSpv(*program.getIntermediate(stage), spirv, &logger, + &spv_options); + + ret.resize(spirv.size() * sizeof(uint32_t)); + { + uint8_t* w = ret.data(); + memcpy(w, &spirv[0], spirv.size() * sizeof(uint32_t)); + } + + return ret; +} + +VertexInputDescription GetVertexInputDescription( + const eng::VertexDescripton& vd) { + unsigned vertex_offset = 0; + unsigned location = 0; + + std::vector attributes; + + for (auto& attr : vd) { + auto [attrib_type, data_type, num_elements, type_size] = attr; + + VkVertexInputAttributeDescription attribute; + attribute.location = location++; + attribute.binding = 0; + attribute.format = kVkDataType[data_type][num_elements - 1]; + attribute.offset = vertex_offset; + attributes.push_back(attribute); + + vertex_offset += num_elements * type_size; + } + + std::vector bindings(1); + bindings[0].binding = 0; + bindings[0].stride = vertex_offset; + bindings[0].inputRate = VK_VERTEX_INPUT_RATE_VERTEX; + + return std::make_tuple(std::move(bindings), std::move(attributes)); +} + +} // namespace + +namespace eng { + +RendererVulkan::RendererVulkan() = default; + +RendererVulkan::~RendererVulkan() = default; + +// TODO: Support for index buffer. +void RendererVulkan::CreateGeometry(std::shared_ptr impl_data, + std::unique_ptr mesh) { + auto geometry = reinterpret_cast(impl_data.get()); + geometry->num_vertices = mesh->num_vertices(); + size_t data_size = mesh->GetVertexSize() * geometry->num_vertices; + + AllocateBuffer(geometry->buffer, data_size, + VK_BUFFER_USAGE_TRANSFER_SRC_BIT | + VK_BUFFER_USAGE_TRANSFER_DST_BIT | + VK_BUFFER_USAGE_VERTEX_BUFFER_BIT, + VMA_MEMORY_USAGE_GPU_ONLY); + + UpdateBuffer(std::get<0>(geometry->buffer), 0, mesh->GetVertices(), + data_size); + BufferMemoryBarrier( + std::get<0>(geometry->buffer), 0, data_size, + VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, + VK_ACCESS_TRANSFER_WRITE_BIT, VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT); +} + +void RendererVulkan::DestroyGeometry(std::shared_ptr impl_data) { + auto geometry = reinterpret_cast(impl_data.get()); + FreeBuffer(std::move(geometry->buffer)); + geometry = {}; +} + +void RendererVulkan::Draw(std::shared_ptr impl_data) { + auto geometry = reinterpret_cast(impl_data.get()); + VkDeviceSize offset = 0; + vkCmdBindVertexBuffers(frames_[current_frame_].draw_command_buffer, 0, 1, + &std::get<0>(geometry->buffer), &offset); + vkCmdDraw(frames_[current_frame_].draw_command_buffer, geometry->num_vertices, + 1, 0, 0); +} + +void RendererVulkan::UpdateTexture(std::shared_ptr impl_data, + std::unique_ptr image) { + auto texture = reinterpret_cast(impl_data.get()); + VkImageLayout old_layout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; + + if (texture->view != VK_NULL_HANDLE && + (texture->width != image->GetWidth() || + texture->height != image->GetHeight())) { + // Size mismatch. Recreate the texture. + FreeTexture(std::move(texture->image), texture->view, + std::move(texture->desc_set)); + *texture = {}; + } + + if (texture->view == VK_NULL_HANDLE) { + CreateTexture(texture->image, texture->view, texture->desc_set, + image->GetWidth(), image->GetHeight(), + VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_SAMPLED_BIT, + VMA_MEMORY_USAGE_GPU_ONLY); + old_layout = VK_IMAGE_LAYOUT_UNDEFINED; + texture->width = image->GetWidth(); + texture->height = image->GetHeight(); + } + + ImageMemoryBarrier( + std::get<0>(texture->image), VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, + VK_PIPELINE_STAGE_TRANSFER_BIT, 0, VK_ACCESS_TRANSFER_WRITE_BIT, + old_layout, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL); + UpdateImage(std::get<0>(texture->image), image->GetBuffer(), + image->GetWidth(), image->GetHeight()); + ImageMemoryBarrier(std::get<0>(texture->image), VK_ACCESS_TRANSFER_WRITE_BIT, + VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | + VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | + VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, + 0, VK_ACCESS_SHADER_READ_BIT, + VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, + VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL); +} + +void RendererVulkan::DestroyTexture(std::shared_ptr impl_data) { + auto texture = reinterpret_cast(impl_data.get()); + FreeTexture(std::move(texture->image), texture->view, + std::move(texture->desc_set)); + *texture = {}; +} + +void RendererVulkan::ActivateTexture(std::shared_ptr impl_data) { + auto texture = reinterpret_cast(impl_data.get()); + penging_descriptor_set_ = std::get<0>(texture->desc_set); +} + +void RendererVulkan::CreateShader(std::shared_ptr impl_data, + std::unique_ptr source, + const VertexDescripton& vertex_description, + Primitive primitive) { + auto shader = reinterpret_cast(impl_data.get()); + + VkShaderModule vert_shader_module; + { + // TODO: Reuse compiled spirv on context-lost. + std::string error; + shader->spirv_vertex = + CompileGlsl(EShLangVertex, source->GetVertexSource(), &error); + if (!error.empty()) + DLOG << source->name() << " vertex shader compile error: " << error; + + VkShaderModuleCreateInfo shader_module_info{}; + shader_module_info.sType = VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO; + shader_module_info.codeSize = shader->spirv_vertex.size(); + shader_module_info.pCode = + reinterpret_cast(shader->spirv_vertex.data()); + + if (vkCreateShaderModule(device_, &shader_module_info, nullptr, + &vert_shader_module) != VK_SUCCESS) { + DLOG << "vkCreateShaderModule failed!"; + return; + } + } + + VkShaderModule frag_shader_module; + { + // TODO: Reuse compiled spirv on context-lost. + std::string error; + shader->spirv_fragment = + CompileGlsl(EShLangFragment, source->GetFragmentSource(), &error); + if (!error.empty()) + DLOG << source->name() << " fragment shader compile error: " << error; + + VkShaderModuleCreateInfo shader_module_info{}; + shader_module_info.sType = VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO; + shader_module_info.codeSize = shader->spirv_fragment.size(); + shader_module_info.pCode = + reinterpret_cast(shader->spirv_fragment.data()); + + if (vkCreateShaderModule(device_, &shader_module_info, nullptr, + &frag_shader_module) != VK_SUCCESS) { + DLOG << "vkCreateShaderModule failed!"; + return; + } + } + + if (!CreatePipelineLayout(shader)) + return; + + VkPipelineShaderStageCreateInfo vert_shader_stage_info{}; + vert_shader_stage_info.sType = + VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO; + vert_shader_stage_info.stage = VK_SHADER_STAGE_VERTEX_BIT; + vert_shader_stage_info.module = vert_shader_module; + vert_shader_stage_info.pName = "main"; + + VkPipelineShaderStageCreateInfo frag_shader_stage_info{}; + frag_shader_stage_info.sType = + VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO; + frag_shader_stage_info.stage = VK_SHADER_STAGE_FRAGMENT_BIT; + frag_shader_stage_info.module = frag_shader_module; + frag_shader_stage_info.pName = "main"; + + VkPipelineShaderStageCreateInfo shaderStages[] = {vert_shader_stage_info, + frag_shader_stage_info}; + + VertexInputDescription vertex_input = + GetVertexInputDescription(vertex_description); + + VkPipelineVertexInputStateCreateInfo vertex_input_info{}; + vertex_input_info.sType = + VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO; + vertex_input_info.vertexBindingDescriptionCount = + std::get<0>(vertex_input).size(); + vertex_input_info.vertexAttributeDescriptionCount = + std::get<1>(vertex_input).size(); + vertex_input_info.pVertexBindingDescriptions = + std::get<0>(vertex_input).data(); + vertex_input_info.pVertexAttributeDescriptions = + std::get<1>(vertex_input).data(); + + VkPipelineInputAssemblyStateCreateInfo input_assembly{}; + input_assembly.sType = + VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO; + input_assembly.topology = kVkPrimitiveType[primitive]; + input_assembly.primitiveRestartEnable = VK_FALSE; + + VkPipelineViewportStateCreateInfo viewport_state{}; + viewport_state.sType = VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO; + viewport_state.viewportCount = 1; + viewport_state.pViewports = nullptr; + viewport_state.scissorCount = 1; + viewport_state.pScissors = nullptr; + + VkPipelineRasterizationStateCreateInfo rasterizer{}; + rasterizer.sType = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO; + rasterizer.depthClampEnable = VK_FALSE; + rasterizer.rasterizerDiscardEnable = VK_FALSE; + rasterizer.polygonMode = VK_POLYGON_MODE_FILL; + rasterizer.lineWidth = 1.0f; + rasterizer.cullMode = VK_CULL_MODE_NONE; + rasterizer.frontFace = VK_FRONT_FACE_COUNTER_CLOCKWISE; + rasterizer.depthBiasEnable = VK_FALSE; + + VkPipelineMultisampleStateCreateInfo multisampling{}; + multisampling.sType = + VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO; + multisampling.sampleShadingEnable = VK_FALSE; + multisampling.rasterizationSamples = VK_SAMPLE_COUNT_1_BIT; + + VkPipelineColorBlendAttachmentState color_blend_attachment{}; + color_blend_attachment.colorWriteMask = + VK_COLOR_COMPONENT_R_BIT | VK_COLOR_COMPONENT_G_BIT | + VK_COLOR_COMPONENT_B_BIT | VK_COLOR_COMPONENT_A_BIT; + color_blend_attachment.blendEnable = VK_TRUE; + color_blend_attachment.srcColorBlendFactor = VK_BLEND_FACTOR_SRC_ALPHA; + color_blend_attachment.dstColorBlendFactor = + VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA; + color_blend_attachment.colorBlendOp = VK_BLEND_OP_ADD; + color_blend_attachment.srcAlphaBlendFactor = VK_BLEND_FACTOR_ONE; + color_blend_attachment.dstAlphaBlendFactor = VK_BLEND_FACTOR_ZERO; + color_blend_attachment.alphaBlendOp = VK_BLEND_OP_ADD; + + VkPipelineColorBlendStateCreateInfo color_blending{}; + color_blending.sType = + VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO; + color_blending.logicOpEnable = VK_FALSE; + color_blending.logicOp = VK_LOGIC_OP_COPY; + color_blending.attachmentCount = 1; + color_blending.pAttachments = &color_blend_attachment; + color_blending.blendConstants[0] = 0.0f; + color_blending.blendConstants[1] = 0.0f; + color_blending.blendConstants[2] = 0.0f; + color_blending.blendConstants[3] = 0.0f; + + std::vector dynamic_states; + dynamic_states.push_back(VK_DYNAMIC_STATE_VIEWPORT); + dynamic_states.push_back(VK_DYNAMIC_STATE_SCISSOR); + + VkPipelineDynamicStateCreateInfo dynamic_state_create_info; + dynamic_state_create_info.sType = + VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO; + dynamic_state_create_info.pNext = nullptr; + dynamic_state_create_info.flags = 0; + dynamic_state_create_info.dynamicStateCount = dynamic_states.size(); + dynamic_state_create_info.pDynamicStates = dynamic_states.data(); + + VkGraphicsPipelineCreateInfo pipeline_info{}; + pipeline_info.sType = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO; + pipeline_info.stageCount = 2; + pipeline_info.pStages = shaderStages; + pipeline_info.pVertexInputState = &vertex_input_info; + pipeline_info.pInputAssemblyState = &input_assembly; + pipeline_info.pViewportState = &viewport_state; + pipeline_info.pRasterizationState = &rasterizer; + pipeline_info.pMultisampleState = &multisampling; + pipeline_info.pColorBlendState = &color_blending; + pipeline_info.pDynamicState = &dynamic_state_create_info; + pipeline_info.layout = shader->pipeline_layout; + pipeline_info.renderPass = context_.GetRenderPass(); + pipeline_info.subpass = 0; + pipeline_info.basePipelineHandle = VK_NULL_HANDLE; + + if (vkCreateGraphicsPipelines(device_, VK_NULL_HANDLE, 1, &pipeline_info, + nullptr, &shader->pipeline) != VK_SUCCESS) + DLOG << "failed to create graphics pipeline."; + + vkDestroyShaderModule(device_, frag_shader_module, nullptr); + vkDestroyShaderModule(device_, vert_shader_module, nullptr); +} + +void RendererVulkan::DestroyShader(std::shared_ptr impl_data) { + auto shader = reinterpret_cast(impl_data.get()); + frames_[current_frame_].pipelines_to_destroy.push_back( + std::make_tuple(shader->pipeline, shader->pipeline_layout)); + *shader = {}; +} + +void RendererVulkan::ActivateShader(std::shared_ptr impl_data) { + auto shader = reinterpret_cast(impl_data.get()); + if (active_pipeline_ != shader->pipeline) { + active_pipeline_ = shader->pipeline; + vkCmdBindPipeline(frames_[current_frame_].draw_command_buffer, + VK_PIPELINE_BIND_POINT_GRAPHICS, shader->pipeline); + } + if (shader->use_desc_set && + active_descriptor_set_ != penging_descriptor_set_) { + active_descriptor_set_ = penging_descriptor_set_; + + vkCmdBindDescriptorSets(frames_[current_frame_].draw_command_buffer, + VK_PIPELINE_BIND_POINT_GRAPHICS, + shader->pipeline_layout, 0, 1, + &active_descriptor_set_, 0, nullptr); + } +} + +void RendererVulkan::SetUniform(std::shared_ptr impl_data, + const std::string& name, + const base::Vector2& val) { + auto shader = reinterpret_cast(impl_data.get()); + SetUniformInternal(shader, name, val); +} + +void RendererVulkan::SetUniform(std::shared_ptr impl_data, + const std::string& name, + const base::Vector3& val) { + auto shader = reinterpret_cast(impl_data.get()); + SetUniformInternal(shader, name, val); +} + +void RendererVulkan::SetUniform(std::shared_ptr impl_data, + const std::string& name, + const base::Vector4& val) { + auto shader = reinterpret_cast(impl_data.get()); + SetUniformInternal(shader, name, val); +} + +void RendererVulkan::SetUniform(std::shared_ptr impl_data, + const std::string& name, + const base::Matrix4x4& val) { + auto shader = reinterpret_cast(impl_data.get()); + SetUniformInternal(shader, name, val); +} + +void RendererVulkan::SetUniform(std::shared_ptr impl_data, + const std::string& name, + float val) { + auto shader = reinterpret_cast(impl_data.get()); + SetUniformInternal(shader, name, val); +} + +void RendererVulkan::SetUniform(std::shared_ptr impl_data, + const std::string& name, + int val) { + auto shader = reinterpret_cast(impl_data.get()); + // Unlike OpenGL, no need to set a uniform for sampler. + if (name != shader->sampler_uniform_name) + SetUniformInternal(shader, name, val); +} + +void RendererVulkan::UploadUniforms(std::shared_ptr impl_data) { + auto shader = reinterpret_cast(impl_data.get()); + vkCmdPushConstants( + frames_[current_frame_].draw_command_buffer, shader->pipeline_layout, + VK_SHADER_STAGE_VERTEX_BIT | VK_SHADER_STAGE_FRAGMENT_BIT, 0, + shader->push_constants_size, shader->push_constants.get()); +} + +void RendererVulkan::PrepareForDrawing() { + context_.PrepareBuffers(); + DrawListBegin(); +} + +void RendererVulkan::Present() { + DrawListEnd(); + SwapBuffers(); +} + +bool RendererVulkan::InitializeInternal() { + glslang::InitializeProcess(); + + device_ = context_.GetDevice(); + + // Allocate one extra frame to ensure it's unused at any time without having + // to use a fence. + int frame_count = context_.GetSwapchainImageCount() + 1; + frames_.resize(frame_count); + frames_drawn_ = frame_count; + + // Initialize allocator + VmaAllocatorCreateInfo allocator_info; + memset(&allocator_info, 0, sizeof(VmaAllocatorCreateInfo)); + allocator_info.physicalDevice = context_.GetPhysicalDevice(); + allocator_info.device = device_; + allocator_info.instance = context_.GetInstance(); + vmaCreateAllocator(&allocator_info, &allocator_); + + for (int i = 0; i < frames_.size(); i++) { + // Create command pool, one per frame is recommended. + VkCommandPoolCreateInfo cmd_pool_info; + cmd_pool_info.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO; + cmd_pool_info.pNext = nullptr; + cmd_pool_info.queueFamilyIndex = context_.GetGraphicsQueue(); + cmd_pool_info.flags = 0; + + VkResult res = vkCreateCommandPool(device_, &cmd_pool_info, nullptr, + &frames_[i].command_pool); + if (res) { + DLOG << "vkCreateCommandPool failed with error " << std::to_string(res); + return false; + } + + // Create command buffers. + VkCommandBufferAllocateInfo cmdbuf_info; + cmdbuf_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO; + cmdbuf_info.pNext = nullptr; + cmdbuf_info.commandPool = frames_[i].command_pool; + cmdbuf_info.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY; + cmdbuf_info.commandBufferCount = 1; + + VkResult err = vkAllocateCommandBuffers(device_, &cmdbuf_info, + &frames_[i].setup_command_buffer); + if (err) { + DLOG << "vkAllocateCommandBuffers failed with error " + << std::to_string(err); + continue; + } + + err = vkAllocateCommandBuffers(device_, &cmdbuf_info, + &frames_[i].draw_command_buffer); + if (err) { + DLOG << "vkAllocateCommandBuffers failed with error " + << std::to_string(err); + continue; + } + } + + // Begin the first command buffer for the first frame. + BeginFrame(); + + if (max_staging_buffer_size_ < staging_buffer_size_ * 4) + max_staging_buffer_size_ = staging_buffer_size_ * 4; + + current_staging_buffer_ = 0; + staging_buffer_used_ = false; + + for (int i = 0; i < frame_count; i++) { + bool err = InsertStagingBuffer(); + LOG_IF(!err) << "Failed to create staging buffer."; + } + + // In this simple engine we use only one descriptor set that is for textures. + // We use push contants for everything else. + VkDescriptorSetLayoutBinding ds_layout_binding; + ds_layout_binding.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER; + ds_layout_binding.descriptorCount = 1; + ds_layout_binding.binding = 0; + ds_layout_binding.stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT; + ds_layout_binding.pImmutableSamplers = nullptr; + + VkDescriptorSetLayoutCreateInfo ds_layout_info; + ds_layout_info.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO; + ds_layout_info.pNext = nullptr; + ds_layout_info.flags = 0; + ds_layout_info.bindingCount = 1; + ds_layout_info.pBindings = &ds_layout_binding; + + VkResult res = vkCreateDescriptorSetLayout(device_, &ds_layout_info, nullptr, + &descriptor_set_layout_); + if (res) { + DLOG << "Error (" << std::to_string(res) + << ") creating descriptor set layout for set"; + return false; + } + + // Create sampler. + VkSamplerCreateInfo sampler_info; + sampler_info.sType = VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO; + sampler_info.pNext = nullptr; + sampler_info.flags = 0; + sampler_info.magFilter = VK_FILTER_LINEAR; + sampler_info.minFilter = VK_FILTER_LINEAR; + sampler_info.mipmapMode = VK_SAMPLER_MIPMAP_MODE_LINEAR; + sampler_info.addressModeU = VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE; + sampler_info.addressModeV = VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE; + sampler_info.addressModeW = VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE; + sampler_info.mipLodBias = 0; + sampler_info.anisotropyEnable = VK_FALSE; + sampler_info.maxAnisotropy = 0; + sampler_info.compareEnable = VK_FALSE; + sampler_info.compareOp = VK_COMPARE_OP_ALWAYS; + sampler_info.minLod = 0; + sampler_info.maxLod = 0; + sampler_info.borderColor = VK_BORDER_COLOR_INT_OPAQUE_BLACK; + sampler_info.unnormalizedCoordinates = VK_FALSE; + + res = vkCreateSampler(device_, &sampler_info, nullptr, &sampler_); + if (res) { + DLOG << "vkCreateSampler failed with error " << std::to_string(res); + return false; + } + + return true; +} + +void RendererVulkan::Shutdown() { + LOG << "Shutting down renderer."; + vkDeviceWaitIdle(device_); + + for (int i = 0; i < frames_.size(); ++i) { + FreePendingResources(i); + vkDestroyCommandPool(device_, frames_[i].command_pool, nullptr); + } + + for (int i = 0; i < staging_buffers_.size(); i++) { + auto [buffer, allocation] = staging_buffers_[i].buffer; + vmaDestroyBuffer(allocator_, buffer, allocation); + } + vmaDestroyAllocator(allocator_); + + vkDestroyDescriptorSetLayout(device_, descriptor_set_layout_, nullptr); + vkDestroySampler(device_, sampler_, nullptr); + + device_ = VK_NULL_HANDLE; + context_.DestroyWindow(); + context_.Shutdown(); + + glslang::FinalizeProcess(); +} + +void RendererVulkan::BeginFrame() { + FreePendingResources(current_frame_); + + vkResetCommandPool(device_, frames_[current_frame_].command_pool, 0); + + VkCommandBufferBeginInfo cmdbuf_begin; + cmdbuf_begin.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO; + cmdbuf_begin.pNext = nullptr; + cmdbuf_begin.flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT; + cmdbuf_begin.pInheritanceInfo = nullptr; + + VkResult err = vkBeginCommandBuffer( + frames_[current_frame_].setup_command_buffer, &cmdbuf_begin); + if (err) { + DLOG << "vkBeginCommandBuffer failed with error " << std::to_string(err); + return; + } + context_.AppendCommandBuffer(frames_[current_frame_].setup_command_buffer); + + err = vkBeginCommandBuffer(frames_[current_frame_].draw_command_buffer, + &cmdbuf_begin); + if (err) { + DLOG << "vkBeginCommandBuffer failed with error " << std::to_string(err); + return; + } + context_.AppendCommandBuffer(frames_[current_frame_].draw_command_buffer); + + // Advance current frame. + frames_drawn_++; + + // Advance staging buffer if used. + if (staging_buffer_used_) { + current_staging_buffer_ = + (current_staging_buffer_ + 1) % staging_buffers_.size(); + staging_buffer_used_ = false; + } +} + +void RendererVulkan::Flush() { + vkEndCommandBuffer(frames_[current_frame_].setup_command_buffer); + vkEndCommandBuffer(frames_[current_frame_].draw_command_buffer); + + context_.Flush(); + + vkResetCommandPool(device_, frames_[current_frame_].command_pool, 0); + + VkCommandBufferBeginInfo cmdbuf_begin; + cmdbuf_begin.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO; + cmdbuf_begin.pNext = nullptr; + cmdbuf_begin.flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT; + cmdbuf_begin.pInheritanceInfo = nullptr; + + VkResult err = vkBeginCommandBuffer( + frames_[current_frame_].setup_command_buffer, &cmdbuf_begin); + if (err) { + DLOG << "vkBeginCommandBuffer failed with error " << std::to_string(err); + return; + } + context_.AppendCommandBuffer(frames_[current_frame_].setup_command_buffer); + + err = vkBeginCommandBuffer(frames_[current_frame_].draw_command_buffer, + &cmdbuf_begin); + if (err) { + DLOG << "vkBeginCommandBuffer failed with error " << std::to_string(err); + return; + } + context_.AppendCommandBuffer(frames_[current_frame_].draw_command_buffer); +} + +void RendererVulkan::FreePendingResources(int frame) { + if (!frames_[frame].pipelines_to_destroy.empty()) { + for (auto& pipeline : frames_[frame].pipelines_to_destroy) { + vkDestroyPipeline(device_, std::get<0>(pipeline), nullptr); + vkDestroyPipelineLayout(device_, std::get<1>(pipeline), nullptr); + } + frames_[frame].pipelines_to_destroy.clear(); + } + + if (!frames_[frame].images_to_destroy.empty()) { + for (auto& image : frames_[frame].images_to_destroy) { + auto [buffer, view] = image; + vkDestroyImageView(device_, view, nullptr); + vmaDestroyImage(allocator_, std::get<0>(buffer), std::get<1>(buffer)); + } + frames_[frame].images_to_destroy.clear(); + } + + if (!frames_[frame].buffers_to_destroy.empty()) { + for (auto& buffer : frames_[frame].buffers_to_destroy) + vmaDestroyBuffer(allocator_, std::get<0>(buffer), std::get<1>(buffer)); + frames_[frame].buffers_to_destroy.clear(); + } + + if (!frames_[frame].desc_sets_to_destroy.empty()) { + for (auto& desc_set : frames_[frame].desc_sets_to_destroy) { + auto [set, pool] = desc_set; + vkFreeDescriptorSets(device_, std::get<0>(*pool), 1, &set); + FreeDescriptorPool(pool); + } + frames_[frame].desc_sets_to_destroy.clear(); + } +} + +void RendererVulkan::MemoryBarrier(VkPipelineStageFlags src_stage_mask, + VkPipelineStageFlags dst_stage_mask, + VkAccessFlags src_access, + VkAccessFlags dst_sccess) { + VkMemoryBarrier mem_barrier; + mem_barrier.sType = VK_STRUCTURE_TYPE_MEMORY_BARRIER; + mem_barrier.pNext = nullptr; + mem_barrier.srcAccessMask = src_access; + mem_barrier.dstAccessMask = dst_sccess; + + vkCmdPipelineBarrier(frames_[current_frame_].draw_command_buffer, + src_stage_mask, dst_stage_mask, 0, 1, &mem_barrier, 0, + nullptr, 0, nullptr); +} + +void RendererVulkan::FullBarrier() { + // Used for debug. + MemoryBarrier( + VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, + VK_ACCESS_INDIRECT_COMMAND_READ_BIT | VK_ACCESS_INDEX_READ_BIT | + VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT | VK_ACCESS_UNIFORM_READ_BIT | + VK_ACCESS_INPUT_ATTACHMENT_READ_BIT | VK_ACCESS_SHADER_READ_BIT | + VK_ACCESS_SHADER_WRITE_BIT | VK_ACCESS_COLOR_ATTACHMENT_READ_BIT | + VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT | + VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT | + VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT | + VK_ACCESS_TRANSFER_READ_BIT | VK_ACCESS_TRANSFER_WRITE_BIT | + VK_ACCESS_HOST_READ_BIT | VK_ACCESS_HOST_WRITE_BIT, + VK_ACCESS_INDIRECT_COMMAND_READ_BIT | VK_ACCESS_INDEX_READ_BIT | + VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT | VK_ACCESS_UNIFORM_READ_BIT | + VK_ACCESS_INPUT_ATTACHMENT_READ_BIT | VK_ACCESS_SHADER_READ_BIT | + VK_ACCESS_SHADER_WRITE_BIT | VK_ACCESS_COLOR_ATTACHMENT_READ_BIT | + VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT | + VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT | + VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT | + VK_ACCESS_TRANSFER_READ_BIT | VK_ACCESS_TRANSFER_WRITE_BIT | + VK_ACCESS_HOST_READ_BIT | VK_ACCESS_HOST_WRITE_BIT); +} + +bool RendererVulkan::AllocateStagingBuffer(uint32_t amount, + uint32_t segment, + uint32_t& alloc_offset, + uint32_t& alloc_size) { + alloc_size = amount; + + while (true) { + alloc_offset = 0; + + // See if we can use the current block. + if (staging_buffers_[current_staging_buffer_].frame_used == frames_drawn_) { + // We used this block this frame, let's see if there is still room. + uint32_t write_from = + staging_buffers_[current_staging_buffer_].fill_amount; + int32_t available_bytes = + int32_t(staging_buffer_size_) - int32_t(write_from); + + if ((int32_t)amount < available_bytes) { + // All will fit. + alloc_offset = write_from; + } else if (segment > 0 && available_bytes >= (int32_t)segment) { + // All won't fit but at least we can fit a chunk. + alloc_offset = write_from; + alloc_size = available_bytes - (available_bytes % segment); + } else { + // Try next buffer. + current_staging_buffer_ = + (current_staging_buffer_ + 1) % staging_buffers_.size(); + + if (staging_buffers_[current_staging_buffer_].frame_used == + frames_drawn_) { + // We manage to fill all blocks possible in a single frame. Check if + // we can insert a new block. + if (staging_buffers_.size() * staging_buffer_size_ < + max_staging_buffer_size_) { + if (!InsertStagingBuffer()) + return false; + + // Claim for current frame. + staging_buffers_[current_staging_buffer_].frame_used = + frames_drawn_; + } else { + // Worst case scenario, all the staging buffers belong to this frame + // and this frame is not even done. Flush everything. + Flush(); + + // Clear the whole staging buffer. + for (int i = 0; i < staging_buffers_.size(); i++) { + staging_buffers_[i].frame_used = 0; + staging_buffers_[i].fill_amount = 0; + } + // Claim for current frame. + staging_buffers_[current_staging_buffer_].frame_used = + frames_drawn_; + } + } else { + // Block is not from current frame, so continue and try again. + continue; + } + } + } else if (staging_buffers_[current_staging_buffer_].frame_used <= + frames_drawn_ - frames_.size()) { + // This is an old block, which was already processed, let's reuse. + staging_buffers_[current_staging_buffer_].frame_used = frames_drawn_; + staging_buffers_[current_staging_buffer_].fill_amount = 0; + } else if (staging_buffers_[current_staging_buffer_].frame_used > + frames_drawn_ - frames_.size()) { + // This block may still be in use, let's not touch it unless we have to. + // Check if we can insert a new block. + if (staging_buffers_.size() * staging_buffer_size_ < + max_staging_buffer_size_) { + if (!InsertStagingBuffer()) + return false; + + // Claim for current frame. + staging_buffers_[current_staging_buffer_].frame_used = frames_drawn_; + } else { + // We are out of room and we can't create more. Ensure older frames are + // executed. + vkDeviceWaitIdle(device_); + + for (int i = 0; i < staging_buffers_.size(); i++) { + // Clear all blocks but the ones from this frame. + int block_idx = + (i + current_staging_buffer_) % staging_buffers_.size(); + if (staging_buffers_[block_idx].frame_used == frames_drawn_) { + break; // We reached something from this frame, abort. + } + + staging_buffers_[block_idx].frame_used = 0; + staging_buffers_[block_idx].fill_amount = 0; + } + // Claim for current frame. + staging_buffers_[current_staging_buffer_].frame_used = frames_drawn_; + } + } + + // Done. + break; + } + + staging_buffers_[current_staging_buffer_].fill_amount = + alloc_offset + alloc_size; + staging_buffer_used_ = true; + return true; +} + +bool RendererVulkan::InsertStagingBuffer() { + VkBufferCreateInfo buffer_info; + buffer_info.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; + buffer_info.pNext = nullptr; + buffer_info.flags = 0; + buffer_info.size = staging_buffer_size_; + buffer_info.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT; + buffer_info.sharingMode = VK_SHARING_MODE_EXCLUSIVE; + buffer_info.queueFamilyIndexCount = 0; + buffer_info.pQueueFamilyIndices = nullptr; + + VmaAllocationCreateInfo alloc_info; + alloc_info.flags = VMA_ALLOCATION_CREATE_MAPPED_BIT; // Stay mapped. + alloc_info.usage = VMA_MEMORY_USAGE_CPU_ONLY; // CPU and coherent. + alloc_info.requiredFlags = 0; + alloc_info.preferredFlags = 0; + alloc_info.memoryTypeBits = 0; + alloc_info.pool = nullptr; + alloc_info.pUserData = nullptr; + + StagingBuffer block; + + VkResult err = vmaCreateBuffer(allocator_, &buffer_info, &alloc_info, + &std::get<0>(block.buffer), + &std::get<1>(block.buffer), &block.alloc_info); + if (err) { + DLOG << "vmaCreateBuffer failed with error " << std::to_string(err); + return false; + } + + block.frame_used = 0; + block.fill_amount = 0; + + staging_buffers_.insert(staging_buffers_.begin() + current_staging_buffer_, + block); + return true; +} + +RendererVulkan::DescPool* RendererVulkan::AllocateDescriptorPool() { + DescPool* selected_pool = nullptr; + + for (auto& dp : desc_pools_) { + if (std::get<1>(*dp) < kMaxDescriptorsPerPool) { + selected_pool = dp.get(); + break; + } + } + + if (!selected_pool) { + VkDescriptorPoolSize sizes; + sizes.type = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER; + sizes.descriptorCount = kMaxDescriptorsPerPool; + + VkDescriptorPoolCreateInfo descriptor_pool_create_info; + descriptor_pool_create_info.sType = + VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO; + descriptor_pool_create_info.pNext = nullptr; + descriptor_pool_create_info.flags = + VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT; + descriptor_pool_create_info.maxSets = kMaxDescriptorsPerPool; + descriptor_pool_create_info.poolSizeCount = 1; + descriptor_pool_create_info.pPoolSizes = &sizes; + + VkDescriptorPool desc_pool; + VkResult res = vkCreateDescriptorPool(device_, &descriptor_pool_create_info, + nullptr, &desc_pool); + if (res) { + DLOG << "vkCreateDescriptorPool failed with error " + << std::to_string(res); + return VK_NULL_HANDLE; + } + + auto pool = std::make_unique(std::make_tuple(desc_pool, 0)); + selected_pool = pool.get(); + desc_pools_.push_back(std::move(pool)); + } + + ++std::get<1>(*selected_pool); + return selected_pool; +} + +void RendererVulkan::FreeDescriptorPool(DescPool* desc_pool) { + if (--std::get<1>(*desc_pool) == 0) { + for (auto it = desc_pools_.begin(); it != desc_pools_.end(); ++it) { + if (std::get<0>(**it) == std::get<0>(*desc_pool)) { + vkDestroyDescriptorPool(device_, std::get<0>(*desc_pool), nullptr); + desc_pools_.erase(it); + return; + } + } + NOTREACHED; + } +} + +bool RendererVulkan::AllocateBuffer(Buffer& buffer, + uint32_t size, + uint32_t usage, + VmaMemoryUsage mapping) { + VkBufferCreateInfo buffer_info; + buffer_info.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; + buffer_info.pNext = nullptr; + buffer_info.flags = 0; + buffer_info.size = size; + buffer_info.usage = usage; + buffer_info.sharingMode = VK_SHARING_MODE_EXCLUSIVE; + buffer_info.queueFamilyIndexCount = 0; + buffer_info.pQueueFamilyIndices = nullptr; + + VmaAllocationCreateInfo allocation_info; + allocation_info.flags = 0; + allocation_info.usage = mapping; + allocation_info.requiredFlags = 0; + allocation_info.preferredFlags = 0; + allocation_info.memoryTypeBits = 0; + allocation_info.pool = nullptr; + allocation_info.pUserData = nullptr; + + VkBuffer vk_buffer; + VmaAllocation allocation = nullptr; + + VkResult err = vmaCreateBuffer(allocator_, &buffer_info, &allocation_info, + &vk_buffer, &allocation, nullptr); + if (err) { + DLOG << "Can't create buffer of size: " << std::to_string(size) + << ", error " << std::to_string(err); + return false; + } + + buffer = std::make_tuple(vk_buffer, allocation); + + return true; +} + +void RendererVulkan::FreeBuffer(Buffer buffer) { + frames_[current_frame_].buffers_to_destroy.push_back(std::move(buffer)); +} + +bool RendererVulkan::UpdateBuffer(VkBuffer buffer, + size_t offset, + const void* data, + size_t data_size) { + size_t to_submit = data_size; + size_t submit_from = 0; + + while (to_submit > 0) { + uint32_t block_write_offset; + uint32_t block_write_amount; + + if (!AllocateStagingBuffer( + std::min((uint32_t)to_submit, staging_buffer_size_), 32, + block_write_offset, block_write_amount)) + return false; + Buffer staging_buffer = + staging_buffers_[current_staging_buffer_].buffer; + + // Copy to staging buffer. + void* data_ptr = + staging_buffers_[current_staging_buffer_].alloc_info.pMappedData; + memcpy(((uint8_t*)data_ptr) + block_write_offset, (char*)data + submit_from, + block_write_amount); + + // Insert a command to copy to GPU buffer. + VkBufferCopy region; + region.srcOffset = block_write_offset; + region.dstOffset = submit_from + offset; + region.size = block_write_amount; + + vkCmdCopyBuffer(frames_[current_frame_].setup_command_buffer, + std::get<0>(staging_buffer), buffer, 1, ®ion); + + to_submit -= block_write_amount; + submit_from += block_write_amount; + } + return true; +} + +void RendererVulkan::BufferMemoryBarrier(VkBuffer buffer, + uint64_t from, + uint64_t size, + VkPipelineStageFlags src_stage_mask, + VkPipelineStageFlags dst_stage_mask, + VkAccessFlags src_access, + VkAccessFlags dst_sccess) { + VkBufferMemoryBarrier buffer_mem_barrier; + buffer_mem_barrier.sType = VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER; + buffer_mem_barrier.pNext = nullptr; + buffer_mem_barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; + buffer_mem_barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; + buffer_mem_barrier.srcAccessMask = src_access; + buffer_mem_barrier.dstAccessMask = dst_sccess; + buffer_mem_barrier.buffer = buffer; + buffer_mem_barrier.offset = from; + buffer_mem_barrier.size = size; + + vkCmdPipelineBarrier(frames_[current_frame_].setup_command_buffer, + src_stage_mask, dst_stage_mask, 0, 0, nullptr, 1, + &buffer_mem_barrier, 0, nullptr); +} + +// TODO: Support for compressed textures. +bool RendererVulkan::CreateTexture(Buffer& image, + VkImageView& view, + DescSet& desc_set, + int width, + int height, + VkImageUsageFlags usage, + VmaMemoryUsage mapping) { + VkImageCreateInfo image_create_info; + image_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; + image_create_info.pNext = nullptr; + image_create_info.flags = 0; + image_create_info.imageType = VK_IMAGE_TYPE_2D; + image_create_info.extent.width = width; + image_create_info.extent.height = height; + image_create_info.extent.depth = 1; + image_create_info.mipLevels = 1; + image_create_info.arrayLayers = 1; + image_create_info.format = VK_FORMAT_R8G8B8A8_UNORM; + image_create_info.tiling = VK_IMAGE_TILING_OPTIMAL; + image_create_info.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; + image_create_info.usage = usage; + image_create_info.samples = VK_SAMPLE_COUNT_1_BIT; + image_create_info.sharingMode = VK_SHARING_MODE_EXCLUSIVE; + image_create_info.queueFamilyIndexCount = 0; + image_create_info.pQueueFamilyIndices = nullptr; + + VmaAllocationCreateInfo allocInfo; + allocInfo.flags = 0; + allocInfo.usage = mapping; + allocInfo.requiredFlags = 0; + allocInfo.preferredFlags = 0; + allocInfo.memoryTypeBits = 0; + allocInfo.pool = nullptr; + allocInfo.pUserData = nullptr; + + VkImage vk_image; + VmaAllocation allocation = nullptr; + + VkResult err = vmaCreateImage(allocator_, &image_create_info, &allocInfo, + &vk_image, &allocation, nullptr); + if (err) { + DLOG << "vmaCreateImage failed with error " << std::to_string(err); + return false; + } + + VkImageViewCreateInfo image_view_create_info; + image_view_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO; + image_view_create_info.pNext = nullptr; + image_view_create_info.flags = 0; + image_view_create_info.image = vk_image; + image_view_create_info.viewType = VK_IMAGE_VIEW_TYPE_2D; + image_view_create_info.format = VK_FORMAT_R8G8B8A8_UNORM; + image_view_create_info.components.r = VK_COMPONENT_SWIZZLE_IDENTITY; + image_view_create_info.components.g = VK_COMPONENT_SWIZZLE_IDENTITY; + image_view_create_info.components.b = VK_COMPONENT_SWIZZLE_IDENTITY; + image_view_create_info.components.a = VK_COMPONENT_SWIZZLE_IDENTITY; + image_view_create_info.subresourceRange.baseMipLevel = 0; + image_view_create_info.subresourceRange.levelCount = 1; + image_view_create_info.subresourceRange.baseArrayLayer = 0; + image_view_create_info.subresourceRange.layerCount = 1; + image_view_create_info.subresourceRange.aspectMask = + VK_IMAGE_ASPECT_COLOR_BIT; + + err = vkCreateImageView(device_, &image_view_create_info, nullptr, &view); + + if (err) { + vmaDestroyImage(allocator_, vk_image, allocation); + DLOG << "vkCreateImageView failed with error " << std::to_string(err); + return false; + } + + image = {vk_image, allocation}; + + DescPool* desc_pool = AllocateDescriptorPool(); + + VkDescriptorSetAllocateInfo descriptor_set_allocate_info; + descriptor_set_allocate_info.sType = + VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO; + descriptor_set_allocate_info.pNext = nullptr; + descriptor_set_allocate_info.descriptorPool = std::get<0>(*desc_pool); + descriptor_set_allocate_info.descriptorSetCount = 1; + descriptor_set_allocate_info.pSetLayouts = &descriptor_set_layout_; + + VkDescriptorSet descriptor_set; + VkResult res = vkAllocateDescriptorSets( + device_, &descriptor_set_allocate_info, &descriptor_set); + if (res) { + --std::get<1>(*desc_pool); + DLOG << "Cannot allocate descriptor sets, error " << std::to_string(res); + return false; + } + + desc_set = {descriptor_set, desc_pool}; + + VkDescriptorImageInfo image_info; + image_info.sampler = sampler_; + image_info.imageView = view; + image_info.imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; + + VkWriteDescriptorSet write; + write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; + write.pNext = nullptr; + write.dstSet = descriptor_set; + write.dstBinding = 0; + write.dstArrayElement = 0; + write.descriptorCount = 1; + write.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER; + write.pImageInfo = &image_info; + write.pBufferInfo = nullptr; + write.pTexelBufferView = nullptr; + + vkUpdateDescriptorSets(device_, 1, &write, 0, nullptr); + + return true; +} + +void RendererVulkan::FreeTexture(Buffer image, + VkImageView image_view, + DescSet desc_set) { + frames_[current_frame_].images_to_destroy.push_back( + std::make_tuple(std::move(image), image_view)); + frames_[current_frame_].desc_sets_to_destroy.push_back(std::move(desc_set)); +} + +bool RendererVulkan::UpdateImage(VkImage image, + const uint8_t* data, + int width, + int height) { + constexpr uint32_t pixel_size = 4; + // Ensure a single row is small enough to fit in a staging buffer. + DCHECK(staging_buffer_size_ >= width * pixel_size); + + size_t to_submit = width * height * pixel_size; + size_t submit_from = 0; + uint32_t segment = width * pixel_size; + uint32_t max_size = staging_buffer_size_ - (staging_buffer_size_ % segment); + + while (to_submit > 0) { + uint32_t block_write_offset; + uint32_t block_write_amount; + + if (!AllocateStagingBuffer(std::min((uint32_t)to_submit, max_size), segment, + block_write_offset, block_write_amount)) + return false; + Buffer staging_buffer = + staging_buffers_[current_staging_buffer_].buffer; + + // Copy to staging buffer. + void* data_ptr = + staging_buffers_[current_staging_buffer_].alloc_info.pMappedData; + memcpy(((uint8_t*)data_ptr) + block_write_offset, (char*)data + submit_from, + block_write_amount); + + // Insert a command to copy to GPU buffer. + VkBufferImageCopy buffer_image_copy; + buffer_image_copy.bufferOffset = block_write_offset; + buffer_image_copy.bufferRowLength = 0; + buffer_image_copy.bufferImageHeight = 0; + buffer_image_copy.imageSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; + buffer_image_copy.imageSubresource.mipLevel = 0; + buffer_image_copy.imageSubresource.baseArrayLayer = 0; + buffer_image_copy.imageSubresource.layerCount = 1; + buffer_image_copy.imageOffset.x = 0; + buffer_image_copy.imageOffset.y = (submit_from / pixel_size) / width; + buffer_image_copy.imageOffset.z = 0; + buffer_image_copy.imageExtent.width = width; + buffer_image_copy.imageExtent.height = + (block_write_amount / pixel_size) / width; + buffer_image_copy.imageExtent.depth = 1; + + vkCmdCopyBufferToImage(frames_[current_frame_].setup_command_buffer, + std::get<0>(staging_buffer), image, + VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, + &buffer_image_copy); + + to_submit -= block_write_amount; + submit_from += block_write_amount; + } + return true; +} + +void RendererVulkan::ImageMemoryBarrier(VkImage& image, + VkPipelineStageFlags src_stage_mask, + VkPipelineStageFlags dst_stage_mask, + VkAccessFlags src_access, + VkAccessFlags dst_sccess, + VkImageLayout old_layout, + VkImageLayout new_layout) { + VkImageMemoryBarrier image_mem_barrier; + image_mem_barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER; + image_mem_barrier.pNext = nullptr; + image_mem_barrier.srcAccessMask = src_access; + image_mem_barrier.dstAccessMask = dst_sccess; + image_mem_barrier.oldLayout = old_layout; + image_mem_barrier.newLayout = new_layout; + image_mem_barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; + image_mem_barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; + image_mem_barrier.image = image; + image_mem_barrier.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; + image_mem_barrier.subresourceRange.baseMipLevel = 0; + image_mem_barrier.subresourceRange.levelCount = 1; + image_mem_barrier.subresourceRange.baseArrayLayer = 0; + image_mem_barrier.subresourceRange.layerCount = 1; + + vkCmdPipelineBarrier(frames_[current_frame_].setup_command_buffer, + src_stage_mask, dst_stage_mask, 0, 0, nullptr, 0, + nullptr, 1, &image_mem_barrier); +} + +bool RendererVulkan::CreatePipelineLayout(ShaderVulkan* shader) { + SpvReflectShaderModule module_vertex; + SpvReflectResult result = spvReflectCreateShaderModule( + shader->spirv_vertex.size(), shader->spirv_vertex.data(), &module_vertex); + if (result != SPV_REFLECT_RESULT_SUCCESS) { + DLOG << "SPIR-V reflection failed to parse vertex shader."; + return false; + } + + SpvReflectShaderModule module_fragment; + result = spvReflectCreateShaderModule(shader->spirv_fragment.size(), + shader->spirv_fragment.data(), + &module_fragment); + if (result != SPV_REFLECT_RESULT_SUCCESS) { + DLOG << "SPIR-V reflection failed to parse fragment shader."; + spvReflectDestroyShaderModule(&module_vertex); + return false; + } + + bool ret = false; + + // Parse descriptor bindings. + do { + uint32_t binding_count = 0; + + // Validate that the vertex shader has no descriptor binding. + result = spvReflectEnumerateDescriptorBindings(&module_vertex, + &binding_count, nullptr); + if (result != SPV_REFLECT_RESULT_SUCCESS) { + DLOG << "SPIR-V reflection failed to enumerate fragment shader " + "descriptor bindings."; + break; + } + if (binding_count > 0) { + DLOG << "SPIR-V reflection found " << binding_count + << " descriptor bindings in vertex shader."; + break; + } + + // Validate that the fragment shader has max 1 desriptor binding. + result = spvReflectEnumerateDescriptorBindings(&module_fragment, + &binding_count, nullptr); + if (result != SPV_REFLECT_RESULT_SUCCESS) { + DLOG << "SPIR-V reflection failed to enumerate fragment shader " + "descriptor bindings."; + break; + } + + DLOG << __func__ << " binding_count: " << binding_count; + + if (binding_count > 0) { + if (binding_count > 1) { + DLOG << "SPIR-V reflection found " << binding_count + << " descriptor bindings in fragment shader. Only one descriptor " + "binding is suported."; + break; + } + + // Validate that the desriptor type is COMBINED_IMAGE_SAMPLER. + std::vector bindings; + bindings.resize(binding_count); + result = spvReflectEnumerateDescriptorBindings( + &module_fragment, &binding_count, bindings.data()); + + if (result != SPV_REFLECT_RESULT_SUCCESS) { + DLOG << "SPIR-V reflection failed to get descriptor bindings for " + "fragment shader."; + break; + } + + const SpvReflectDescriptorBinding& binding = *bindings[0]; + + DLOG << __func__ << " name: " << binding.name + << " descriptor_type: " << binding.descriptor_type + << " set: " << binding.set << " binding: " << binding.binding; + + if (binding.descriptor_type != + SPV_REFLECT_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER) { + DLOG << "SPIR-V reflection found descriptor type " + << binding.descriptor_type + << " in fragment shader. Only COMBINED_IMAGE_SAMPLER type is " + "supported."; + break; + } + + shader->sampler_uniform_name = binding.name; + shader->use_desc_set = true; + } + + // Parse push constants. + auto enumerate_pc = [&](SpvReflectShaderModule& module, uint32_t& pc_count, + std::vector& pconstants, + EShLanguage stage) { + result = + spvReflectEnumeratePushConstantBlocks(&module, &pc_count, nullptr); + if (result != SPV_REFLECT_RESULT_SUCCESS) { + DLOG << "SPIR-V reflection failed to enumerate push constats in shader " + "stage " + << stage; + return false; + } + + if (pc_count) { + if (pc_count > 1) { + DLOG << "SPIR-V reflection found " << pc_count + << " push constats blocks in shader stage " << stage + << ". Only one push constant block is supported."; + return false; + } + + pconstants.resize(pc_count); + result = spvReflectEnumeratePushConstantBlocks(&module, &pc_count, + pconstants.data()); + if (result != SPV_REFLECT_RESULT_SUCCESS) { + DLOG << "SPIR-V reflection failed to obtaining push constants."; + return false; + } + } + + return true; + }; + + uint32_t pc_count_vertex = 0; + std::vector pconstants_vertex; + if (!enumerate_pc(module_vertex, pc_count_vertex, pconstants_vertex, + EShLangVertex)) + break; + + uint32_t pc_count_fragment = 0; + std::vector pconstants_fragment; + if (!enumerate_pc(module_fragment, pc_count_fragment, pconstants_fragment, + EShLangVertex)) + break; + + if (pc_count_vertex != pc_count_fragment) { + DLOG << "SPIR-V reflection found different push constant blocks across " + "shader stages."; + break; + } + + if (pc_count_vertex) { + DLOG << __func__ << " PushConstants size: " << pconstants_vertex[0]->size + << " count: " << pconstants_vertex[0]->member_count; + + if (pconstants_vertex[0]->size != pconstants_fragment[0]->size) { + DLOG << "SPIR-V reflection found different push constant blocks across " + "shader stages."; + break; + } + + shader->push_constants_size = pconstants_vertex[0]->size; + shader->push_constants = + std::make_unique(shader->push_constants_size); + memset(shader->push_constants.get(), 0, shader->push_constants_size); + + size_t offset = 0; + for (uint32_t j = 0; j < pconstants_vertex[0]->member_count; j++) { + DLOG << __func__ << " name: " << pconstants_vertex[0]->members[j].name + << " size: " << pconstants_vertex[0]->members[j].size + << " padded_size: " + << pconstants_vertex[0]->members[j].padded_size; + + shader->variables[pconstants_vertex[0]->members[j].name] = { + pconstants_vertex[0]->members[j].size, offset}; + offset += pconstants_vertex[0]->members[j].padded_size; + } + } + + VkDescriptorSetLayout desc_set_layout = descriptor_set_layout_; + + VkPipelineLayoutCreateInfo pipeline_layout_create_info; + pipeline_layout_create_info.sType = + VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO; + pipeline_layout_create_info.pNext = nullptr; + pipeline_layout_create_info.flags = 0; + if (binding_count > 0) { + pipeline_layout_create_info.setLayoutCount = 1; + pipeline_layout_create_info.pSetLayouts = &desc_set_layout; + } else { + pipeline_layout_create_info.setLayoutCount = 0; + pipeline_layout_create_info.pSetLayouts = nullptr; + } + + VkPushConstantRange push_constant_range; + if (shader->push_constants_size) { + push_constant_range.stageFlags = + VK_SHADER_STAGE_VERTEX_BIT | VK_SHADER_STAGE_FRAGMENT_BIT; + push_constant_range.offset = 0; + push_constant_range.size = shader->push_constants_size; + + pipeline_layout_create_info.pushConstantRangeCount = 1; + pipeline_layout_create_info.pPushConstantRanges = &push_constant_range; + } else { + pipeline_layout_create_info.pushConstantRangeCount = 0; + pipeline_layout_create_info.pPushConstantRanges = nullptr; + } + + if (vkCreatePipelineLayout(device_, &pipeline_layout_create_info, nullptr, + &shader->pipeline_layout) != VK_SUCCESS) { + DLOG << "Failed to create pipeline layout!"; + break; + } + + ret = true; + } while (false); + + spvReflectDestroyShaderModule(&module_vertex); + spvReflectDestroyShaderModule(&module_fragment); + return ret; +} + +void RendererVulkan::DrawListBegin() { + VkCommandBuffer command_buffer = frames_[current_frame_].draw_command_buffer; + + VkRenderPassBeginInfo render_pass_begin; + render_pass_begin.sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO; + render_pass_begin.pNext = nullptr; + render_pass_begin.renderPass = context_.GetRenderPass(); + render_pass_begin.framebuffer = context_.GetFramebuffer(); + + render_pass_begin.renderArea.extent.width = screen_width_; + render_pass_begin.renderArea.extent.height = screen_height_; + render_pass_begin.renderArea.offset.x = 0; + render_pass_begin.renderArea.offset.y = 0; + + render_pass_begin.clearValueCount = 1; + + VkClearValue clear_value; + clear_value.color.float32[0] = 0; + clear_value.color.float32[1] = 0; + clear_value.color.float32[2] = 0; + clear_value.color.float32[3] = 1; + + render_pass_begin.pClearValues = &clear_value; + + vkCmdBeginRenderPass(command_buffer, &render_pass_begin, + VK_SUBPASS_CONTENTS_INLINE); + + VkViewport viewport; + viewport.x = 0; + viewport.y = (float)screen_height_; + viewport.width = (float)screen_width_; + viewport.height = -(float)screen_height_; + viewport.minDepth = 0; + viewport.maxDepth = 1.0; + + vkCmdSetViewport(command_buffer, 0, 1, &viewport); + + VkRect2D scissor; + scissor.offset.x = 0; + scissor.offset.y = 0; + scissor.extent.width = screen_width_; + scissor.extent.height = screen_height_; + + vkCmdSetScissor(command_buffer, 0, 1, &scissor); +} + +void RendererVulkan::DrawListEnd() { + vkCmdEndRenderPass(frames_[current_frame_].draw_command_buffer); + + // To ensure proper synchronization, we must make sure rendering is done + // before: + // - Some buffer is copied. + // - Another render pass happens (since we may be done). +#ifdef FORCE_FULL_BARRIER + FullBarrier(true); +#else + MemoryBarrier(VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT | + VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT, + VK_PIPELINE_STAGE_VERTEX_INPUT_BIT | + VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT | + VK_PIPELINE_STAGE_TRANSFER_BIT, + VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT | + VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, + VK_ACCESS_INDEX_READ_BIT | VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT | + VK_ACCESS_TRANSFER_READ_BIT | VK_ACCESS_SHADER_READ_BIT | + VK_ACCESS_SHADER_WRITE_BIT); +#endif +} + +void RendererVulkan::SwapBuffers() { + vkEndCommandBuffer(frames_[current_frame_].setup_command_buffer); + vkEndCommandBuffer(frames_[current_frame_].draw_command_buffer); + + context_.SwapBuffers(); + current_frame_ = (current_frame_ + 1) % frames_.size(); + + active_pipeline_ = VK_NULL_HANDLE; + active_descriptor_set_ = VK_NULL_HANDLE; + penging_descriptor_set_ = VK_NULL_HANDLE; + + BeginFrame(); +} + +template +bool RendererVulkan::SetUniformInternal(ShaderVulkan* shader, + const std::string& name, + T val) { + auto it = shader->variables.find(name); + if (it == shader->variables.end()) { + DLOG << "No variable found with name " << name; + return false; + } + if (it->second[0] != sizeof(val)) { + DLOG << "Size mismatch for variable " << name; + return false; + } + + auto* dst = + reinterpret_cast(shader->push_constants.get() + it->second[1]); + *dst = val; + return true; +} + +void RendererVulkan::ContextLost() { + LOG << "Context lost."; + InvalidateAllResources(); + context_lost_cb_(); +} + +std::unique_ptr RendererVulkan::CreateResource( + RenderResourceFactoryBase& factory) { + static unsigned last_id = 0; + + std::shared_ptr impl_data; + if (factory.IsTypeOf()) + impl_data = std::make_shared(); + else if (factory.IsTypeOf()) + impl_data = std::make_shared(); + else if (factory.IsTypeOf()) + impl_data = std::make_shared(); + else + NOTREACHED << "- Unknown resource type."; + + unsigned resource_id = ++last_id; + auto resource = factory.Create(resource_id, impl_data, this); + resources_[resource_id] = resource.get(); + return resource; +} + +void RendererVulkan::ReleaseResource(unsigned resource_id) { + auto it = resources_.find(resource_id); + if (it != resources_.end()) + resources_.erase(it); +} + +size_t RendererVulkan::GetAndResetFPS() { + return context_.GetAndResetFPS(); +} + +void RendererVulkan::InvalidateAllResources() { + for (auto& r : resources_) + r.second->Destroy(); +} + +} // namespace eng diff --git a/src/engine/renderer/vulkan/renderer_vulkan.h b/src/engine/renderer/vulkan/renderer_vulkan.h new file mode 100644 index 0000000..5734e56 --- /dev/null +++ b/src/engine/renderer/vulkan/renderer_vulkan.h @@ -0,0 +1,257 @@ +#ifndef RENDERER_VULKAN_H +#define RENDERER_VULKAN_H + +#include +#include +#include +#include + +#include "vulkan_context.h" + +#include "../../../third_party/vma/vk_mem_alloc.h" +#include "../render_resource.h" +#include "../renderer.h" + +namespace eng { + +class Image; + +class RendererVulkan : public Renderer { + public: + RendererVulkan(); + ~RendererVulkan() override; + +#if defined(__ANDROID__) + bool Initialize(ANativeWindow* window) override; +#elif defined(__linux__) + bool Initialize(Display* display, Window window) override; +#endif + + void Shutdown() override; + + void CreateGeometry(std::shared_ptr impl_data, + std::unique_ptr mesh) override; + void DestroyGeometry(std::shared_ptr impl_data) override; + void Draw(std::shared_ptr impl_data) override; + + void UpdateTexture(std::shared_ptr impl_data, + std::unique_ptr image) override; + void DestroyTexture(std::shared_ptr impl_data) override; + void ActivateTexture(std::shared_ptr impl_data) override; + + void CreateShader(std::shared_ptr impl_data, + std::unique_ptr source, + const VertexDescripton& vertex_description, + Primitive primitive) override; + void DestroyShader(std::shared_ptr impl_data) override; + void ActivateShader(std::shared_ptr impl_data) override; + + void SetUniform(std::shared_ptr impl_data, + const std::string& name, + const base::Vector2& val) override; + void SetUniform(std::shared_ptr impl_data, + const std::string& name, + const base::Vector3& val) override; + void SetUniform(std::shared_ptr impl_data, + const std::string& name, + const base::Vector4& val) override; + void SetUniform(std::shared_ptr impl_data, + const std::string& name, + const base::Matrix4x4& val) override; + void SetUniform(std::shared_ptr impl_data, + const std::string& name, + float val) override; + void SetUniform(std::shared_ptr impl_data, + const std::string& name, + int val) override; + void UploadUniforms(std::shared_ptr impl_data) override; + + void PrepareForDrawing() override; + void Present() override; + + std::unique_ptr CreateResource( + RenderResourceFactoryBase& factory) override; + void ReleaseResource(unsigned resource_id) override; + + size_t GetAndResetFPS() override; + +#if defined(__linux__) && !defined(__ANDROID__) + XVisualInfo* GetXVisualInfo(Display* display) override; +#endif + + private: + // VkBuffer or VkImage with allocator. + template + using Buffer = std::tuple; + + // VkDescriptorPool with usage count. + using DescPool = std::tuple; + + // VkDescriptorSet with the pool which it was allocated from. + using DescSet = std::tuple; + + // Containers to keep information of resources to be destroyed. + using BufferDeathRow = std::vector>; + using ImageDeathRow = std::vector, VkImageView>>; + using DescSetDeathRow = std::vector; + using PipelineDeathRow = + std::vector>; + + struct GeometryVulkan { + Buffer buffer; + size_t num_vertices = 0; + }; + + struct ShaderVulkan { + std::unordered_map> variables; + std::unique_ptr push_constants; + size_t push_constants_size = 0; + std::string sampler_uniform_name; + bool use_desc_set = false; + std::vector spirv_vertex; + std::vector spirv_fragment; + VkPipelineLayout pipeline_layout = VK_NULL_HANDLE; + VkPipeline pipeline = VK_NULL_HANDLE; + }; + + struct TextureVulkan { + Buffer image; + VkImageView view = VK_NULL_HANDLE; + DescSet desc_set = {}; + int width = 0; + int height = 0; + }; + + // Each frame contains 2 command buffers with separate synchronization scopes. + // One for creating resources (recorded outside a render pass) and another for + // drawing (recorded inside a render pass). Also contains list of resources to + // be destroyed when the frame is cycled. There are 2 or 3 frames (double or + // tripple buffering) that are cycled constantly. + struct Frame { + VkCommandPool command_pool = VK_NULL_HANDLE; + VkCommandBuffer setup_command_buffer = VK_NULL_HANDLE; + VkCommandBuffer draw_command_buffer = VK_NULL_HANDLE; + + BufferDeathRow buffers_to_destroy; + ImageDeathRow images_to_destroy; + DescSetDeathRow desc_sets_to_destroy; + PipelineDeathRow pipelines_to_destroy; + }; + + struct StagingBuffer { + Buffer buffer{VK_NULL_HANDLE, nullptr}; + uint64_t frame_used = 0; + uint32_t fill_amount = 0; + VmaAllocationInfo alloc_info; + }; + + VulkanContext context_; + + VmaAllocator allocator_ = nullptr; + + VkDevice device_ = VK_NULL_HANDLE; + size_t frames_drawn_ = 0; + std::vector frames_; + int current_frame_ = 0; + + std::vector staging_buffers_; + int current_staging_buffer_ = 0; + uint32_t staging_buffer_size_ = 256 * 1024; + uint64_t max_staging_buffer_size_ = 16 * 1024 * 1024; + bool staging_buffer_used_ = false; + + VkPipeline active_pipeline_ = VK_NULL_HANDLE; + + std::vector> desc_pools_; + VkDescriptorSetLayout descriptor_set_layout_ = VK_NULL_HANDLE; + VkDescriptorSet active_descriptor_set_ = VK_NULL_HANDLE; + VkDescriptorSet penging_descriptor_set_ = VK_NULL_HANDLE; + + VkSampler sampler_ = VK_NULL_HANDLE; + + std::unordered_map resources_; + +#if defined(__ANDROID__) + ANativeWindow* window_; +#elif defined(__linux__) + Display* display_ = NULL; + Window window_ = 0; +#endif + + bool InitializeInternal(); + + void BeginFrame(); + + void Flush(); + + void FreePendingResources(int frame); + + void MemoryBarrier(VkPipelineStageFlags src_stage_mask, + VkPipelineStageFlags dst_stage_mask, + VkAccessFlags src_access, + VkAccessFlags dst_sccess); + void FullBarrier(); + + bool AllocateStagingBuffer(uint32_t amount, + uint32_t segment, + uint32_t& alloc_offset, + uint32_t& alloc_size); + bool InsertStagingBuffer(); + + DescPool* AllocateDescriptorPool(); + void FreeDescriptorPool(DescPool* desc_pool); + + bool AllocateBuffer(Buffer& buffer, + uint32_t size, + uint32_t usage, + VmaMemoryUsage mapping); + void FreeBuffer(Buffer buffer); + bool UpdateBuffer(VkBuffer buffer, + size_t offset, + const void* data, + size_t data_size); + void BufferMemoryBarrier(VkBuffer buffer, + uint64_t from, + uint64_t size, + VkPipelineStageFlags src_stage_mask, + VkPipelineStageFlags dst_stage_mask, + VkAccessFlags src_access, + VkAccessFlags dst_sccess); + + bool CreateTexture(Buffer& image, + VkImageView& view, + DescSet& desc_set, + int width, + int height, + VkImageUsageFlags usage, + VmaMemoryUsage mapping); + void FreeTexture(Buffer image, + VkImageView image_view, + DescSet desc_set); + bool UpdateImage(VkImage image, const uint8_t* data, int width, int height); + void ImageMemoryBarrier(VkImage& image, + VkPipelineStageFlags src_stage_mask, + VkPipelineStageFlags dst_stage_mask, + VkAccessFlags src_access, + VkAccessFlags dst_sccess, + VkImageLayout old_layout, + VkImageLayout new_layout); + + bool CreatePipelineLayout(ShaderVulkan* shader); + + void DrawListBegin(); + void DrawListEnd(); + + void SwapBuffers(); + + template + bool SetUniformInternal(ShaderVulkan* shader, const std::string& name, T val); + + void ContextLost(); + + void InvalidateAllResources(); +}; + +} // namespace eng + +#endif // RENDERER_VULKAN_H diff --git a/src/engine/renderer/vulkan/renderer_vulkan_android.cc b/src/engine/renderer/vulkan/renderer_vulkan_android.cc new file mode 100644 index 0000000..c728fa8 --- /dev/null +++ b/src/engine/renderer/vulkan/renderer_vulkan_android.cc @@ -0,0 +1,27 @@ +#include "renderer_vulkan.h" + +#include + +#include "../../../base/log.h" + +namespace eng { + +bool RendererVulkan::Initialize(ANativeWindow* window) { + LOG << "Initializing renderer."; + + screen_width_ = ANativeWindow_getWidth(window); + screen_height_ = ANativeWindow_getHeight(window); + + if (!context_.Initialize()) { + LOG << "Failed to initialize Vulkan context."; + return false; + } + if (!context_.CreateWindow(window, screen_width_, screen_height_)) { + LOG << "Vulkan context failed to create window."; + return false; + } + + return InitializeInternal(); +} + +} // namespace eng diff --git a/src/engine/renderer/vulkan/renderer_vulkan_linux.cc b/src/engine/renderer/vulkan/renderer_vulkan_linux.cc new file mode 100644 index 0000000..3ed68c4 --- /dev/null +++ b/src/engine/renderer/vulkan/renderer_vulkan_linux.cc @@ -0,0 +1,39 @@ +#include "renderer_vulkan.h" + +#include "../../../base/log.h" + +namespace eng { + +bool RendererVulkan::Initialize(Display* display, Window window) { + LOG << "Initializing renderer."; + + display_ = display; + window_ = window; + + XWindowAttributes xwa; + XGetWindowAttributes(display_, window_, &xwa); + screen_width_ = xwa.width; + screen_height_ = xwa.height; + + if (!context_.Initialize()) { + LOG << "Failed to initialize Vulkan context."; + return false; + } + if (!context_.CreateWindow(display, window, screen_width_, screen_height_)) { + LOG << "Vulkan context failed to create window."; + return false; + } + + return InitializeInternal(); +} + +XVisualInfo* RendererVulkan::GetXVisualInfo(Display* display) { + long visual_mask = VisualScreenMask; + int num_visuals; + XVisualInfo visual_info_template = {}; + visual_info_template.screen = DefaultScreen(display); + return XGetVisualInfo(display, visual_mask, &visual_info_template, + &num_visuals); +} + +} // namespace eng diff --git a/src/engine/renderer/vulkan/vulkan_context.cc b/src/engine/renderer/vulkan/vulkan_context.cc new file mode 100644 index 0000000..44febd2 --- /dev/null +++ b/src/engine/renderer/vulkan/vulkan_context.cc @@ -0,0 +1,1397 @@ +#include "vulkan_context.h" + +#include +#include +#include + +#include "../../../base/log.h" +#include "../../../third_party/vulkan/vk_enum_string_helper.h" + +#define GET_PROC_ADDR(func, obj, entrypoint) \ + { \ + entrypoint = (PFN_vk##entrypoint)func(obj, "vk" #entrypoint); \ + if (entrypoint == nullptr) { \ + DLOG << #func << " failed to find vk"; \ + return false; \ + } \ + } + +namespace eng { + +VulkanContext::VulkanContext() { +#if defined(_DEBUG) && !defined(__ANDROID__) + use_validation_layers_ = true; +#endif +} + +VulkanContext::~VulkanContext() { + if (instance_ != VK_NULL_HANDLE) { + if (use_validation_layers_) + DestroyDebugUtilsMessengerEXT(instance_, dbg_messenger_, nullptr); + vkDestroyInstance(instance_, nullptr); + instance_ = VK_NULL_HANDLE; + } +} + +bool VulkanContext::Initialize() { +#if defined(__ANDROID__) + // Initialize the Vulkan function pointers. + if (!InitVulkan()) { + LOG << "Vulkan is not availbale."; + return false; + } +#endif + + if (!CreatePhysicalDevice()) + return false; + + return true; +} + +void VulkanContext::Shutdown() { + if (device_ != VK_NULL_HANDLE) { + for (int i = 0; i < kFrameLag; i++) { + vkDestroyFence(device_, fences_[i], nullptr); + vkDestroySemaphore(device_, image_acquired_semaphores_[i], nullptr); + vkDestroySemaphore(device_, draw_complete_semaphores_[i], nullptr); + if (separate_present_queue_) { + vkDestroySemaphore(device_, image_ownership_semaphores_[i], nullptr); + } + } + vkDestroyDevice(device_, nullptr); + device_ = VK_NULL_HANDLE; + } +} + +VKAPI_ATTR VkBool32 VKAPI_CALL VulkanContext::DebugMessengerCallback( + VkDebugUtilsMessageSeverityFlagBitsEXT message_severity, + VkDebugUtilsMessageTypeFlagsEXT message_type, + const VkDebugUtilsMessengerCallbackDataEXT* callback_data, + void* user_data) { + // This error needs to be ignored because the AMD allocator will mix up memory + // types on IGP processors. + if (strstr(callback_data->pMessage, "Mapping an image with layout") != + nullptr && + strstr(callback_data->pMessage, + "can result in undefined behavior if this memory is used by the " + "device") != nullptr) { + return VK_FALSE; + } + // This needs to be ignored because Validator is wrong here. + if (strstr(callback_data->pMessage, + "SPIR-V module not valid: Pointer operand") != nullptr && + strstr(callback_data->pMessage, "must be a memory object") != nullptr) { + return VK_FALSE; + } + // Workaround for Vulkan-Loader usability bug: + // https://github.com/KhronosGroup/Vulkan-Loader/issues/262. + if (strstr(callback_data->pMessage, "wrong ELF class: ELFCLASS32") != + nullptr) { + return VK_FALSE; + } + if (callback_data->pMessageIdName && + strstr(callback_data->pMessageIdName, + "UNASSIGNED-CoreValidation-DrawState-ClearCmdBeforeDraw") != + nullptr) { + return VK_FALSE; + } + + std::string type_string; + switch (message_type) { + case (VK_DEBUG_UTILS_MESSAGE_TYPE_GENERAL_BIT_EXT): + type_string = "GENERAL"; + break; + case (VK_DEBUG_UTILS_MESSAGE_TYPE_VALIDATION_BIT_EXT): + type_string = "VALIDATION"; + break; + case (VK_DEBUG_UTILS_MESSAGE_TYPE_PERFORMANCE_BIT_EXT): + type_string = "PERFORMANCE"; + break; + case (VK_DEBUG_UTILS_MESSAGE_TYPE_VALIDATION_BIT_EXT & + VK_DEBUG_UTILS_MESSAGE_TYPE_PERFORMANCE_BIT_EXT): + type_string = "VALIDATION|PERFORMANCE"; + break; + } + + std::string objects_string; + if (callback_data->objectCount > 0) { + objects_string = + "\n\tObjects - " + std::to_string(callback_data->objectCount); + for (uint32_t object = 0; object < callback_data->objectCount; ++object) { + objects_string += + "\n\t\tObject[" + std::to_string(object) + "]" + " - " + + string_VkObjectType(callback_data->pObjects[object].objectType) + + ", Handle " + + std::to_string(callback_data->pObjects[object].objectHandle); + if (nullptr != callback_data->pObjects[object].pObjectName && + strlen(callback_data->pObjects[object].pObjectName) > 0) { + objects_string += + ", Name \"" + + std::string(callback_data->pObjects[object].pObjectName) + "\""; + } + } + } + + std::string labels_string; + if (callback_data->cmdBufLabelCount > 0) { + labels_string = "\n\tCommand Buffer Labels - " + + std::to_string(callback_data->cmdBufLabelCount); + for (uint32_t cmd_buf_label = 0; + cmd_buf_label < callback_data->cmdBufLabelCount; ++cmd_buf_label) { + labels_string += + "\n\t\tLabel[" + std::to_string(cmd_buf_label) + "]" + " - " + + callback_data->pCmdBufLabels[cmd_buf_label].pLabelName + "{ "; + for (int color_idx = 0; color_idx < 4; ++color_idx) { + labels_string += std::to_string( + callback_data->pCmdBufLabels[cmd_buf_label].color[color_idx]); + if (color_idx < 3) { + labels_string += ", "; + } + } + labels_string += " }"; + } + } + + std::string error_message( + type_string + " - Message Id Number: " + + std::to_string(callback_data->messageIdNumber) + + " | Message Id Name: " + callback_data->pMessageIdName + "\n\t" + + callback_data->pMessage + objects_string + labels_string); + + switch (message_severity) { + case VK_DEBUG_UTILS_MESSAGE_SEVERITY_VERBOSE_BIT_EXT: + LOG << error_message; + break; + case VK_DEBUG_UTILS_MESSAGE_SEVERITY_INFO_BIT_EXT: + LOG << error_message; + break; + case VK_DEBUG_UTILS_MESSAGE_SEVERITY_WARNING_BIT_EXT: + LOG << error_message; + break; + case VK_DEBUG_UTILS_MESSAGE_SEVERITY_ERROR_BIT_EXT: + LOG << error_message; + break; + case VK_DEBUG_UTILS_MESSAGE_SEVERITY_FLAG_BITS_MAX_ENUM_EXT: + break; + } + + return VK_FALSE; +} + +VkBool32 VulkanContext::CheckLayers(uint32_t check_count, + const char** check_names, + uint32_t layer_count, + VkLayerProperties* layers) { + for (uint32_t i = 0; i < check_count; i++) { + VkBool32 found = 0; + for (uint32_t j = 0; j < layer_count; j++) { + if (!strcmp(check_names[i], layers[j].layerName)) { + found = 1; + break; + } + } + if (!found) { + DLOG << "Can't find layer: " << check_names[i]; + return 0; + } + } + return 1; +} + +bool VulkanContext::CreateValidationLayers() { + VkResult err; + const char* instance_validation_layers_alt1[] = { + "VK_LAYER_KHRONOS_validation"}; + const char* instance_validation_layers_alt2[] = { + "VK_LAYER_LUNARG_standard_validation"}; + const char* instance_validation_layers_alt3[] = { + "VK_LAYER_GOOGLE_threading", "VK_LAYER_LUNARG_parameter_validation", + "VK_LAYER_LUNARG_object_tracker", "VK_LAYER_LUNARG_core_validation", + "VK_LAYER_GOOGLE_unique_objects"}; + + uint32_t instance_layer_count = 0; + err = vkEnumerateInstanceLayerProperties(&instance_layer_count, nullptr); + if (err) { + DLOG << "vkEnumerateInstanceLayerProperties failed. Error: " << err; + return false; + } + + VkBool32 validation_found = 0; + uint32_t validation_layer_count = 0; + const char** instance_validation_layers = nullptr; + if (instance_layer_count > 0) { + auto instance_layers = + std::make_unique(instance_layer_count); + err = vkEnumerateInstanceLayerProperties(&instance_layer_count, + instance_layers.get()); + if (err) { + DLOG << "vkEnumerateInstanceLayerProperties failed. Error: " << err; + return false; + } + + validation_layer_count = std::size(instance_validation_layers_alt1); + instance_validation_layers = instance_validation_layers_alt1; + validation_found = + CheckLayers(validation_layer_count, instance_validation_layers, + instance_layer_count, instance_layers.get()); + + // use alternative (deprecated, removed in SDK 1.1.126.0) set of validation + // layers. + if (!validation_found) { + validation_layer_count = std::size(instance_validation_layers_alt2); + instance_validation_layers = instance_validation_layers_alt2; + validation_found = + CheckLayers(validation_layer_count, instance_validation_layers, + instance_layer_count, instance_layers.get()); + } + + // use alternative (deprecated, removed in SDK 1.1.121.1) set of validation + // layers. + if (!validation_found) { + validation_layer_count = std::size(instance_validation_layers_alt3); + instance_validation_layers = instance_validation_layers_alt3; + validation_found = + CheckLayers(validation_layer_count, instance_validation_layers, + instance_layer_count, instance_layers.get()); + } + } + + if (validation_found) { + enabled_layer_count_ = validation_layer_count; + for (uint32_t i = 0; i < validation_layer_count; i++) { + enabled_layers_[i] = instance_validation_layers[i]; + } + } else { + return false; + } + + return true; +} + +bool VulkanContext::InitializeExtensions() { + VkResult err; + uint32_t instance_extension_count = 0; + + enabled_extension_count_ = 0; + enabled_layer_count_ = 0; + VkBool32 surfaceExtFound = 0; + VkBool32 platformSurfaceExtFound = 0; + memset(extension_names_, 0, sizeof(extension_names_)); + + err = vkEnumerateInstanceExtensionProperties( + nullptr, &instance_extension_count, nullptr); + if (err) { + DLOG << "vkEnumerateInstanceExtensionProperties failed. Error: " << err; + return false; + } + + if (instance_extension_count > 0) { + auto instance_extensions = + std::make_unique(instance_extension_count); + err = vkEnumerateInstanceExtensionProperties( + nullptr, &instance_extension_count, instance_extensions.get()); + if (err) { + DLOG << "vkEnumerateInstanceExtensionProperties failed. Error: " << err; + return false; + } + for (uint32_t i = 0; i < instance_extension_count; i++) { + if (!strcmp(VK_KHR_SURFACE_EXTENSION_NAME, + instance_extensions[i].extensionName)) { + surfaceExtFound = 1; + extension_names_[enabled_extension_count_++] = + VK_KHR_SURFACE_EXTENSION_NAME; + } + + if (!strcmp(GetPlatformSurfaceExtension(), + instance_extensions[i].extensionName)) { + platformSurfaceExtFound = 1; + extension_names_[enabled_extension_count_++] = + GetPlatformSurfaceExtension(); + } + if (!strcmp(VK_EXT_DEBUG_REPORT_EXTENSION_NAME, + instance_extensions[i].extensionName)) { + if (use_validation_layers_) { + extension_names_[enabled_extension_count_++] = + VK_EXT_DEBUG_REPORT_EXTENSION_NAME; + } + } + if (!strcmp(VK_EXT_DEBUG_UTILS_EXTENSION_NAME, + instance_extensions[i].extensionName)) { + if (use_validation_layers_) { + extension_names_[enabled_extension_count_++] = + VK_EXT_DEBUG_UTILS_EXTENSION_NAME; + } + } + if (enabled_extension_count_ >= kMaxExtensions) { + DLOG << "Enabled extension count reaches kMaxExtensions"; + return false; + } + } + } + + if (!surfaceExtFound) { + DLOG << "No surface extension found."; + return false; + } + if (!platformSurfaceExtFound) { + DLOG << "No platform surface extension found."; + return false; + } + + return true; +} + +bool VulkanContext::CreatePhysicalDevice() { + if (use_validation_layers_) { + CreateValidationLayers(); + } + + if (!InitializeExtensions()) + return false; + + const VkApplicationInfo app = { + /*sType*/ VK_STRUCTURE_TYPE_APPLICATION_INFO, + /*pNext*/ nullptr, + /*pApplicationName*/ "kaliber", + /*applicationVersion*/ 0, + /*pEngineName*/ "kaliber", + /*engineVersion*/ 0, + /*apiVersion*/ VK_API_VERSION_1_0, + }; + VkInstanceCreateInfo inst_info = { + /*sType*/ VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO, + /*pNext*/ nullptr, + /*flags*/ 0, + /*pApplicationInfo*/ &app, + /*enabledLayerCount*/ enabled_layer_count_, + /*ppEnabledLayerNames*/ (const char* const*)enabled_layers_, + /*enabledExtensionCount*/ enabled_extension_count_, + /*ppEnabledExtensionNames*/ (const char* const*)extension_names_, + }; + + // This is info for a temp callback to use during CreateInstance. After the + // instance is created, we use the instance-based function to register the + // final callback. + VkDebugUtilsMessengerCreateInfoEXT dbg_messenger_info; + if (use_validation_layers_) { + dbg_messenger_info.sType = + VK_STRUCTURE_TYPE_DEBUG_UTILS_MESSENGER_CREATE_INFO_EXT; + dbg_messenger_info.pNext = nullptr; + dbg_messenger_info.flags = 0; + dbg_messenger_info.messageSeverity = + VK_DEBUG_UTILS_MESSAGE_SEVERITY_WARNING_BIT_EXT | + VK_DEBUG_UTILS_MESSAGE_SEVERITY_ERROR_BIT_EXT; + dbg_messenger_info.messageType = + VK_DEBUG_UTILS_MESSAGE_TYPE_GENERAL_BIT_EXT | + VK_DEBUG_UTILS_MESSAGE_TYPE_VALIDATION_BIT_EXT | + VK_DEBUG_UTILS_MESSAGE_TYPE_PERFORMANCE_BIT_EXT; + dbg_messenger_info.pfnUserCallback = DebugMessengerCallback; + dbg_messenger_info.pUserData = this; + inst_info.pNext = &dbg_messenger_info; + } + + uint32_t gpu_count; + + VkResult err = vkCreateInstance(&inst_info, nullptr, &instance_); + if (err == VK_ERROR_INCOMPATIBLE_DRIVER) { + DLOG << "Cannot find a compatible Vulkan installable client driver (ICD)."; + return false; + } + if (err == VK_ERROR_EXTENSION_NOT_PRESENT) { + DLOG << "Cannot find a specified extension library. Make sure your layers " + "path is set appropriately. "; + return false; + } + if (err) { + DLOG << "vkCreateInstance failed. Error: " << err; + return false; + } + + // Make initial call to query gpu_count. + err = vkEnumeratePhysicalDevices(instance_, &gpu_count, nullptr); + if (err) { + DLOG << "vkEnumeratePhysicalDevices failed. Error: " << err; + return false; + } + + if (gpu_count == 0) { + DLOG << "vkEnumeratePhysicalDevices reported zero accessible devices."; + return false; + } + + auto physical_devices = std::make_unique(gpu_count); + err = + vkEnumeratePhysicalDevices(instance_, &gpu_count, physical_devices.get()); + if (err) { + DLOG << "vkEnumeratePhysicalDevices failed. Error: " << err; + return false; + } + // Grab the first physical device for now. + gpu_ = physical_devices[0]; + + // Look for device extensions. + uint32_t device_extension_count = 0; + VkBool32 swapchain_ext_found = 0; + enabled_extension_count_ = 0; + memset(extension_names_, 0, sizeof(extension_names_)); + + err = vkEnumerateDeviceExtensionProperties(gpu_, nullptr, + &device_extension_count, nullptr); + if (err) { + DLOG << "vkEnumerateDeviceExtensionProperties failed. Error: " << err; + return false; + } + + if (device_extension_count > 0) { + auto device_extensions = + std::make_unique(device_extension_count); + err = vkEnumerateDeviceExtensionProperties( + gpu_, nullptr, &device_extension_count, device_extensions.get()); + if (err) { + DLOG << "vkEnumerateDeviceExtensionProperties failed. Error: " << err; + return false; + } + + for (uint32_t i = 0; i < device_extension_count; i++) { + if (!strcmp(VK_KHR_SWAPCHAIN_EXTENSION_NAME, + device_extensions[i].extensionName)) { + swapchain_ext_found = 1; + extension_names_[enabled_extension_count_++] = + VK_KHR_SWAPCHAIN_EXTENSION_NAME; + } + if (enabled_extension_count_ >= kMaxExtensions) { + DLOG << "Enabled extension count reaches kMaxExtensions"; + return false; + } + } + + // Enable VK_KHR_maintenance1 extension for old vulkan drivers. + for (uint32_t i = 0; i < device_extension_count; i++) { + if (!strcmp(VK_KHR_MAINTENANCE1_EXTENSION_NAME, + device_extensions[i].extensionName)) { + extension_names_[enabled_extension_count_++] = + VK_KHR_MAINTENANCE1_EXTENSION_NAME; + } + if (enabled_extension_count_ >= kMaxExtensions) { + DLOG << "Enabled extension count reaches kMaxExtensions"; + return false; + } + } + } + + if (!swapchain_ext_found) { + DLOG << "vkEnumerateDeviceExtensionProperties failed to find " + "the " VK_KHR_SWAPCHAIN_EXTENSION_NAME " extension."; + return false; + } + + if (use_validation_layers_) { + CreateDebugUtilsMessengerEXT = + (PFN_vkCreateDebugUtilsMessengerEXT)vkGetInstanceProcAddr( + instance_, "vkCreateDebugUtilsMessengerEXT"); + DestroyDebugUtilsMessengerEXT = + (PFN_vkDestroyDebugUtilsMessengerEXT)vkGetInstanceProcAddr( + instance_, "vkDestroyDebugUtilsMessengerEXT"); + if (nullptr == CreateDebugUtilsMessengerEXT || + nullptr == DestroyDebugUtilsMessengerEXT) { + DLOG << "GetProcAddr: Failed to init VK_EXT_debug_utils"; + return false; + } + + err = CreateDebugUtilsMessengerEXT(instance_, &dbg_messenger_info, nullptr, + &dbg_messenger_); + switch (err) { + case VK_SUCCESS: + break; + case VK_ERROR_OUT_OF_HOST_MEMORY: + DLOG << "CreateDebugUtilsMessengerEXT: out of host memory"; + return false; + default: + DLOG << "CreateDebugUtilsMessengerEXT: unknown failure"; + return false; + break; + } + } + vkGetPhysicalDeviceProperties(gpu_, &gpu_props_); + + LOG << "Vulkan:"; + LOG << " Name: " << gpu_props_.deviceName; + LOG << " Tame: " << string_VkPhysicalDeviceType(gpu_props_.deviceType); + LOG << " Vendor ID: " << gpu_props_.vendorID; + LOG << " API version: " << VK_VERSION_MAJOR(gpu_props_.apiVersion) << "." + << VK_VERSION_MINOR(gpu_props_.apiVersion) << "." + << VK_VERSION_PATCH(gpu_props_.apiVersion); + LOG << " Driver version: " << VK_VERSION_MAJOR(gpu_props_.driverVersion) + << "." << VK_VERSION_MINOR(gpu_props_.driverVersion) << "." + << VK_VERSION_PATCH(gpu_props_.driverVersion); + + // Call with NULL data to get count, + vkGetPhysicalDeviceQueueFamilyProperties(gpu_, &queue_family_count_, nullptr); + if (queue_family_count_ == 0) { + DLOG << "Failed to query queue family count."; + return false; + } + + queue_props_ = + std::make_unique(queue_family_count_); + vkGetPhysicalDeviceQueueFamilyProperties(gpu_, &queue_family_count_, + queue_props_.get()); + + // Query fine-grained feature support for this device. + // If app has specific feature requirements it should check supported features + // based on this query. + vkGetPhysicalDeviceFeatures(gpu_, &physical_device_features_); + + GET_PROC_ADDR(vkGetInstanceProcAddr, instance_, + GetPhysicalDeviceSurfaceSupportKHR); + GET_PROC_ADDR(vkGetInstanceProcAddr, instance_, + GetPhysicalDeviceSurfaceCapabilitiesKHR); + GET_PROC_ADDR(vkGetInstanceProcAddr, instance_, + GetPhysicalDeviceSurfaceFormatsKHR); + GET_PROC_ADDR(vkGetInstanceProcAddr, instance_, + GetPhysicalDeviceSurfacePresentModesKHR); + GET_PROC_ADDR(vkGetInstanceProcAddr, instance_, GetSwapchainImagesKHR); + + return true; +} + +bool VulkanContext::CreateDevice() { + VkResult err; + float queue_priorities[1] = {0.0}; + VkDeviceQueueCreateInfo queues[2]; + queues[0].sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO; + queues[0].pNext = nullptr; + queues[0].queueFamilyIndex = graphics_queue_family_index_; + queues[0].queueCount = 1; + queues[0].pQueuePriorities = queue_priorities; + queues[0].flags = 0; + + VkDeviceCreateInfo sdevice = { + /*sType*/ VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO, + /*pNext*/ nullptr, + /*flags*/ 0, + /*queueCreateInfoCount*/ 1, + /*pQueueCreateInfos*/ queues, + /*enabledLayerCount*/ 0, + /*ppEnabledLayerNames*/ nullptr, + /*enabledExtensionCount*/ enabled_extension_count_, + /*ppEnabledExtensionNames*/ (const char* const*)extension_names_, + /*pEnabledFeatures*/ &physical_device_features_, // If specific features + // are required, pass + // them in here + + }; + if (separate_present_queue_) { + queues[1].sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO; + queues[1].pNext = nullptr; + queues[1].queueFamilyIndex = present_queue_family_index_; + queues[1].queueCount = 1; + queues[1].pQueuePriorities = queue_priorities; + queues[1].flags = 0; + sdevice.queueCreateInfoCount = 2; + } + err = vkCreateDevice(gpu_, &sdevice, nullptr, &device_); + if (err) { + DLOG << "vkCreateDevice failed. Error: " << err; + return false; + } + return true; +} + +bool VulkanContext::InitializeQueues(VkSurfaceKHR surface) { + // Iterate over each queue to learn whether it supports presenting: + auto supports_present = std::make_unique(queue_family_count_); + for (uint32_t i = 0; i < queue_family_count_; i++) { + GetPhysicalDeviceSurfaceSupportKHR(gpu_, i, surface, &supports_present[i]); + } + + // Search for a graphics and a present queue in the array of queue families, + // try to find one that supports both. + uint32_t graphics_queue_family_index = std::numeric_limits::max(); + uint32_t present_queue_family_index = std::numeric_limits::max(); + for (uint32_t i = 0; i < queue_family_count_; i++) { + if ((queue_props_[i].queueFlags & VK_QUEUE_GRAPHICS_BIT) != 0) { + if (graphics_queue_family_index == std::numeric_limits::max()) { + graphics_queue_family_index = i; + } + + if (supports_present[i] == VK_TRUE) { + graphics_queue_family_index = i; + present_queue_family_index = i; + break; + } + } + } + + if (present_queue_family_index == std::numeric_limits::max()) { + // If didn't find a queue that supports both graphics and present, then find + // a separate present queue. + for (uint32_t i = 0; i < queue_family_count_; ++i) { + if (supports_present[i] == VK_TRUE) { + present_queue_family_index = i; + break; + } + } + } + + // Generate error if could not find both a graphics and a present queue + if (graphics_queue_family_index == std::numeric_limits::max() || + present_queue_family_index == std::numeric_limits::max()) { + DLOG << "Could not find both graphics and present queues."; + return false; + } + + graphics_queue_family_index_ = graphics_queue_family_index; + present_queue_family_index_ = present_queue_family_index; + separate_present_queue_ = + (graphics_queue_family_index_ != present_queue_family_index_); + + CreateDevice(); + + PFN_vkGetDeviceProcAddr GetDeviceProcAddr = nullptr; + GET_PROC_ADDR(vkGetInstanceProcAddr, instance_, GetDeviceProcAddr); + + GET_PROC_ADDR(GetDeviceProcAddr, device_, CreateSwapchainKHR); + GET_PROC_ADDR(GetDeviceProcAddr, device_, DestroySwapchainKHR); + GET_PROC_ADDR(GetDeviceProcAddr, device_, GetSwapchainImagesKHR); + GET_PROC_ADDR(GetDeviceProcAddr, device_, AcquireNextImageKHR); + GET_PROC_ADDR(GetDeviceProcAddr, device_, QueuePresentKHR); + + vkGetDeviceQueue(device_, graphics_queue_family_index_, 0, &graphics_queue_); + + if (!separate_present_queue_) { + present_queue_ = graphics_queue_; + } else { + vkGetDeviceQueue(device_, present_queue_family_index_, 0, &present_queue_); + } + + // Get the list of VkFormat's that are supported. + uint32_t format_count; + VkResult err = + GetPhysicalDeviceSurfaceFormatsKHR(gpu_, surface, &format_count, nullptr); + if (err) { + DLOG << "GetPhysicalDeviceSurfaceFormatsKHR failed. Error: " << err; + return false; + } + auto surf_formats = std::make_unique(format_count); + err = GetPhysicalDeviceSurfaceFormatsKHR(gpu_, surface, &format_count, + surf_formats.get()); + if (err) { + DLOG << "GetPhysicalDeviceSurfaceFormatsKHR failed. Error: " << err; + return false; + } + +#if defined(__ANDROID__) + VkFormat desired_format = VK_FORMAT_R8G8B8A8_UNORM; +#elif defined(__linux__) + VkFormat desired_format = VK_FORMAT_B8G8R8A8_UNORM; +#endif + + // If the format list includes just one entry of VK_FORMAT_UNDEFINED, the + // surface has no preferred format. Otherwise, at least one supported format + // will be returned. + if (true || + (format_count == 1 && surf_formats[0].format == VK_FORMAT_UNDEFINED)) { + format_ = desired_format; + } else { + if (format_count < 1) { + DLOG << "Format count less than 1."; + return false; + } + format_ = surf_formats[0].format; + for (unsigned i = 0; i < format_count; ++i) { + if (surf_formats[i].format == desired_format) { + format_ = desired_format; + break; + } + } + } + color_space_ = surf_formats[0].colorSpace; + + if (!CreateSemaphores()) + return false; + + queues_initialized_ = true; + return true; +} + +bool VulkanContext::CreateSemaphores() { + VkResult err; + + // Create semaphores to synchronize acquiring presentable buffers before + // rendering and waiting for drawing to be complete before presenting. + VkSemaphoreCreateInfo semaphoreCreateInfo = { + /*sType*/ VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO, + /*pNext*/ nullptr, + /*flags*/ 0, + }; + + // Create fences that we can use to throttle if we get too far ahead of the + // image presents. + VkFenceCreateInfo fence_ci = {/*sType*/ VK_STRUCTURE_TYPE_FENCE_CREATE_INFO, + /*pNext*/ nullptr, + /*flags*/ VK_FENCE_CREATE_SIGNALED_BIT}; + + for (uint32_t i = 0; i < kFrameLag; i++) { + err = vkCreateFence(device_, &fence_ci, nullptr, &fences_[i]); + if (err) { + DLOG << "vkCreateFence failed. Error: " << err; + return false; + } + err = vkCreateSemaphore(device_, &semaphoreCreateInfo, nullptr, + &image_acquired_semaphores_[i]); + if (err) { + DLOG << "vkCreateSemaphore failed. Error: " << err; + return false; + } + err = vkCreateSemaphore(device_, &semaphoreCreateInfo, nullptr, + &draw_complete_semaphores_[i]); + if (err) { + DLOG << "vkCreateSemaphore failed. Error: " << err; + return false; + } + if (separate_present_queue_) { + err = vkCreateSemaphore(device_, &semaphoreCreateInfo, nullptr, + &image_ownership_semaphores_[i]); + if (err) { + DLOG << "vkCreateSemaphore failed. Error: " << err; + return false; + } + } + } + frame_index_ = 0; + + // Get Memory information and properties. + vkGetPhysicalDeviceMemoryProperties(gpu_, &memory_properties_); + + return true; +} + +void VulkanContext::ResizeWindow(int width, int height) { + window_.width = width; + window_.height = height; + UpdateSwapChain(&window_); +} + +void VulkanContext::DestroyWindow() { + CleanUpSwapChain(&window_); + vkDestroySurfaceKHR(instance_, window_.surface, nullptr); +} + +VkFramebuffer VulkanContext::GetFramebuffer() { + return buffers_prepared_ + ? window_.swapchain_image_resources[window_.current_buffer] + .frame_buffer + : VK_NULL_HANDLE; +} + +bool VulkanContext::CleanUpSwapChain(Window* window) { + if (!window->swapchain) + return true; + + vkDeviceWaitIdle(device_); + + DestroySwapchainKHR(device_, window->swapchain, nullptr); + window->swapchain = VK_NULL_HANDLE; + vkDestroyRenderPass(device_, window->render_pass, nullptr); + if (window->swapchain_image_resources) { + for (uint32_t i = 0; i < swapchain_image_count_; i++) { + vkDestroyImageView(device_, window->swapchain_image_resources[i].view, + nullptr); + vkDestroyFramebuffer( + device_, window->swapchain_image_resources[i].frame_buffer, nullptr); + } + + window->swapchain_image_resources.reset(); + } + + if (separate_present_queue_) + vkDestroyCommandPool(device_, window->present_cmd_pool, nullptr); + + return true; +} + +bool VulkanContext::UpdateSwapChain(Window* window) { + VkResult err; + + if (window->swapchain) + CleanUpSwapChain(window); + + // Check the surface capabilities and formats. + VkSurfaceCapabilitiesKHR surf_capabilities; + err = GetPhysicalDeviceSurfaceCapabilitiesKHR(gpu_, window->surface, + &surf_capabilities); + if (err) { + DLOG << "GetPhysicalDeviceSurfaceCapabilitiesKHR failed. Error: " << err; + return false; + } + + uint32_t present_mode_count; + err = GetPhysicalDeviceSurfacePresentModesKHR(gpu_, window->surface, + &present_mode_count, nullptr); + if (err) { + DLOG << "GetPhysicalDeviceSurfacePresentModesKHR failed. Error: " << err; + return false; + } + + auto present_modes = std::make_unique(present_mode_count); + + err = GetPhysicalDeviceSurfacePresentModesKHR( + gpu_, window->surface, &present_mode_count, present_modes.get()); + if (err) { + DLOG << "GetPhysicalDeviceSurfacePresentModesKHR failed. Error: " << err; + return false; + } + + // width and height are either both 0xFFFFFFFF, or both not 0xFFFFFFFF. + if (surf_capabilities.currentExtent.width == 0xFFFFFFFF) { + // If the surface size is undefined, the size is set to the size of the + // images requested, which must fit within the minimum and maximum values. + window->swapchain_extent.width = window->width; + window->swapchain_extent.height = window->height; + + if (window->swapchain_extent.width < + surf_capabilities.minImageExtent.width) { + window->swapchain_extent.width = surf_capabilities.minImageExtent.width; + } else if (window->swapchain_extent.width > + surf_capabilities.maxImageExtent.width) { + window->swapchain_extent.width = surf_capabilities.maxImageExtent.width; + } + + if (window->swapchain_extent.height < + surf_capabilities.minImageExtent.height) { + window->swapchain_extent.height = surf_capabilities.minImageExtent.height; + } else if (window->swapchain_extent.height > + surf_capabilities.maxImageExtent.height) { + window->swapchain_extent.height = surf_capabilities.maxImageExtent.height; + } + } else { + // If the surface size is defined, the swap chain size must match + window->swapchain_extent = surf_capabilities.currentExtent; + window->width = surf_capabilities.currentExtent.width; + window->height = surf_capabilities.currentExtent.height; + } + + if (window->width == 0 || window->height == 0) { + // likely window minimized, no swapchain created + return true; + } + + // The application will render an image, then pass it to the presentation + // engine via vkQueuePresentKHR. The presentation engine will display the + // image for the next VSync cycle, and then it will make it available to the + // application again. The only present modes which support VSync are: + // + // VK_PRESENT_MODE_FIFO_KHR: At each VSync signal, the image in front of the + // queue displays on screen and is then released. The application will acquire + // one of the available ones, draw to it and then hand it over to the + // presentation engine, which will push it to the back of the queue. If + // rendering is fast the queue can become full. The CPU and the GPU will idle + // until an image is available again. This behavior works well on mobile + // because it limits overheating and saves battery life. + // + // VK_PRESENT_MODE_MAILBOX_KHR: The application can acquire a new image + // straight away, render to it, and hand it over to the presentation engine. + // If an image is queued for presentation, it will be discarded. Being able to + // keep submitting new frames lets the application ensure it has the latest + // user input, so input latency can be lower versus FIFO. If the application + // doesn't throttle CPU and GPU, one of them may be fully utilized, resulting + // in higher power consumption. + VkPresentModeKHR swapchain_present_mode = VK_PRESENT_MODE_FIFO_KHR; + VkPresentModeKHR fallback_present_mode = VK_PRESENT_MODE_FIFO_KHR; + if (swapchain_present_mode != fallback_present_mode) { + for (size_t i = 0; i < present_mode_count; ++i) { + if (present_modes[i] == swapchain_present_mode) { + // Supported. + fallback_present_mode = swapchain_present_mode; + break; + } + } + } + + if (swapchain_present_mode != fallback_present_mode) { + LOG << "Present mode " << swapchain_present_mode << " is not supported"; + swapchain_present_mode = fallback_present_mode; + } + + // 2 for double buffering, 3 for triple buffering. + // Double buffering works well if frames can be processed within the interval + // between VSync signals, which is 16.6ms at a rate of 60 fps. The rendered + // image is presented to the swapchain, and the previously presented one is + // made available to the application again. If the GPU cannot process frames + // fast enough, VSync will be missed and the application wait have to wait for + // another whole VSync cycle, which caps framerate at 30 fps. This may be ok, + // but triple buffering can deliver higher framerate. + uint32_t desired_num_of_swapchain_images = 3; + if (desired_num_of_swapchain_images < surf_capabilities.minImageCount) { + desired_num_of_swapchain_images = surf_capabilities.minImageCount; + } + // If maxImageCount is 0, we can ask for as many images as we want; otherwise + // we're limited to maxImageCount. + if ((surf_capabilities.maxImageCount > 0) && + (desired_num_of_swapchain_images > surf_capabilities.maxImageCount)) { + // Application must settle for fewer images than desired. + desired_num_of_swapchain_images = surf_capabilities.maxImageCount; + } + + VkSurfaceTransformFlagsKHR pre_transform; + if (surf_capabilities.supportedTransforms & + VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR) { + pre_transform = VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR; + } else { + pre_transform = surf_capabilities.currentTransform; + } + + // Find a supported composite alpha mode. One of these is guaranteed to be + // set. + VkCompositeAlphaFlagBitsKHR composite_alpha = + VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR; + VkCompositeAlphaFlagBitsKHR composite_alpha_flags[4] = { + VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR, + VK_COMPOSITE_ALPHA_PRE_MULTIPLIED_BIT_KHR, + VK_COMPOSITE_ALPHA_POST_MULTIPLIED_BIT_KHR, + VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR, + }; + for (uint32_t i = 0; i < std::size(composite_alpha_flags); i++) { + if (surf_capabilities.supportedCompositeAlpha & composite_alpha_flags[i]) { + composite_alpha = composite_alpha_flags[i]; + break; + } + } + + VkSwapchainCreateInfoKHR swapchain_ci = { + /*sType*/ VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR, + /*pNext*/ nullptr, + /*flags*/ 0, + /*surface*/ window->surface, + /*minImageCount*/ desired_num_of_swapchain_images, + /*imageFormat*/ format_, + /*imageColorSpace*/ color_space_, + /*imageExtent*/ + { + /*width*/ window->swapchain_extent.width, + /*height*/ window->swapchain_extent.height, + }, + /*imageArrayLayers*/ 1, + /*imageUsage*/ VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, + /*imageSharingMode*/ VK_SHARING_MODE_EXCLUSIVE, + /*queueFamilyIndexCount*/ 0, + /*pQueueFamilyIndices*/ nullptr, + /*preTransform*/ (VkSurfaceTransformFlagBitsKHR)pre_transform, + /*compositeAlpha*/ composite_alpha, + /*presentMode*/ swapchain_present_mode, + /*clipped*/ true, + /*oldSwapchain*/ VK_NULL_HANDLE, + }; + + err = CreateSwapchainKHR(device_, &swapchain_ci, nullptr, &window->swapchain); + if (err) { + DLOG << "CreateSwapchainKHR failed. Error: " << err; + return false; + } + + uint32_t sp_image_count; + err = GetSwapchainImagesKHR(device_, window->swapchain, &sp_image_count, + nullptr); + if (err) { + DLOG << "CreateSwapchainKHR failed. Error: " << err; + return false; + } + + if (swapchain_image_count_ == 0) { + // Assign for the first time. + swapchain_image_count_ = sp_image_count; + } else if (swapchain_image_count_ != sp_image_count) { + DLOG << "Swapchain image count mismatch"; + return false; + } + + auto swapchain_images = std::make_unique(swapchain_image_count_); + + err = GetSwapchainImagesKHR(device_, window->swapchain, + &swapchain_image_count_, swapchain_images.get()); + if (err) { + DLOG << "GetSwapchainImagesKHR failed. Error: " << err; + return false; + } + + window->swapchain_image_resources = + std::make_unique(swapchain_image_count_); + + for (uint32_t i = 0; i < swapchain_image_count_; i++) { + VkImageViewCreateInfo color_image_view = { + /*sType*/ VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO, + /*pNext*/ nullptr, + /*flags*/ 0, + /*image*/ swapchain_images[i], + /*viewType*/ VK_IMAGE_VIEW_TYPE_2D, + /*format*/ format_, + /*components*/ + { + /*r*/ VK_COMPONENT_SWIZZLE_R, + /*g*/ VK_COMPONENT_SWIZZLE_G, + /*b*/ VK_COMPONENT_SWIZZLE_B, + /*a*/ VK_COMPONENT_SWIZZLE_A, + }, + /*subresourceRange*/ + {/*aspectMask*/ VK_IMAGE_ASPECT_COLOR_BIT, + /*baseMipLevel*/ 0, + /*levelCount*/ 1, + /*baseArrayLayer*/ 0, + /*layerCount*/ 1}, + }; + + window->swapchain_image_resources[i].image = swapchain_images[i]; + + color_image_view.image = window->swapchain_image_resources[i].image; + + err = vkCreateImageView(device_, &color_image_view, nullptr, + &window->swapchain_image_resources[i].view); + if (err) { + DLOG << "vkCreateImageView failed. Error: " << err; + return false; + } + } + + // Framebuffer + + { + const VkAttachmentDescription attachment = { + /*flags*/ 0, + /*format*/ format_, + /*samples*/ VK_SAMPLE_COUNT_1_BIT, + /*loadOp*/ VK_ATTACHMENT_LOAD_OP_CLEAR, + /*storeOp*/ VK_ATTACHMENT_STORE_OP_STORE, + /*stencilLoadOp*/ VK_ATTACHMENT_LOAD_OP_DONT_CARE, + /*stencilStoreOp*/ VK_ATTACHMENT_STORE_OP_DONT_CARE, + /*initialLayout*/ VK_IMAGE_LAYOUT_UNDEFINED, + /*finalLayout*/ VK_IMAGE_LAYOUT_PRESENT_SRC_KHR, + + }; + const VkAttachmentReference color_reference = { + /*attachment*/ 0, + /*layout*/ VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, + }; + + const VkSubpassDescription subpass = { + /*flags*/ 0, + /*pipelineBindPoint*/ VK_PIPELINE_BIND_POINT_GRAPHICS, + /*inputAttachmentCount*/ 0, + /*pInputAttachments*/ nullptr, + /*colorAttachmentCount*/ 1, + /*pColorAttachments*/ &color_reference, + /*pResolveAttachments*/ nullptr, + /*pDepthStencilAttachment*/ nullptr, + /*preserveAttachmentCount*/ 0, + /*pPreserveAttachments*/ nullptr, + }; + const VkRenderPassCreateInfo rp_info = { + /*sTyp*/ VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, + /*pNext*/ nullptr, + /*flags*/ 0, + /*attachmentCount*/ 1, + /*pAttachments*/ &attachment, + /*subpassCount*/ 1, + /*pSubpasses*/ &subpass, + /*dependencyCount*/ 0, + /*pDependencies*/ nullptr, + }; + + err = vkCreateRenderPass(device_, &rp_info, nullptr, &window->render_pass); + if (err) { + DLOG << "vkCreateRenderPass failed. Error: " << err; + return false; + } + + for (uint32_t i = 0; i < swapchain_image_count_; i++) { + const VkFramebufferCreateInfo fb_info = { + /*sType*/ VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO, + /*pNext*/ nullptr, + /*flags*/ 0, + /*renderPass*/ window->render_pass, + /*attachmentCount*/ 1, + /*pAttachments*/ &window->swapchain_image_resources[i].view, + /*width*/ (uint32_t)window->width, + /*height*/ (uint32_t)window->height, + /*layers*/ 1, + }; + + err = vkCreateFramebuffer( + device_, &fb_info, nullptr, + &window->swapchain_image_resources[i].frame_buffer); + if (err) { + DLOG << "vkCreateFramebuffer failed. Error: " << err; + return false; + } + } + } + + // Separate present queue + + if (separate_present_queue_) { + const VkCommandPoolCreateInfo present_cmd_pool_info = { + /*sType*/ VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO, + /*pNext*/ nullptr, + /*flags*/ 0, + /*queueFamilyIndex*/ present_queue_family_index_, + }; + err = vkCreateCommandPool(device_, &present_cmd_pool_info, nullptr, + &window->present_cmd_pool); + if (err) { + DLOG << "vkCreateCommandPool failed. Error: " << err; + return false; + } + + const VkCommandBufferAllocateInfo present_cmd_info = { + /*sType*/ VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO, + /*pNext*/ nullptr, + /*commandPool*/ window->present_cmd_pool, + /*level*/ VK_COMMAND_BUFFER_LEVEL_PRIMARY, + /*commandBufferCount*/ 1, + }; + for (uint32_t i = 0; i < swapchain_image_count_; i++) { + err = vkAllocateCommandBuffers( + device_, &present_cmd_info, + &window->swapchain_image_resources[i].graphics_to_present_cmd); + if (err) { + DLOG << "vkAllocateCommandBuffers failed. Error: " << err; + return false; + } + + const VkCommandBufferBeginInfo cmd_buf_info = { + /*sType*/ VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO, + /*pNext*/ nullptr, + /*flags*/ VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT, + /*pInheritanceInfo*/ nullptr, + }; + err = vkBeginCommandBuffer( + window->swapchain_image_resources[i].graphics_to_present_cmd, + &cmd_buf_info); + if (err) { + DLOG << "vkBeginCommandBuffer failed. Error: " << err; + return false; + } + + VkImageMemoryBarrier image_ownership_barrier = { + /*sType*/ VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, + /*pNext*/ nullptr, + /*srcAccessMask*/ 0, + /*dstAccessMask*/ VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, + /*oldLayout*/ VK_IMAGE_LAYOUT_PRESENT_SRC_KHR, + /*newLayout*/ VK_IMAGE_LAYOUT_PRESENT_SRC_KHR, + /*srcQueueFamilyIndex*/ graphics_queue_family_index_, + /*dstQueueFamilyIndex*/ present_queue_family_index_, + /*image*/ window->swapchain_image_resources[i].image, + /*subresourceRange*/ {VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1}}; + + vkCmdPipelineBarrier( + window->swapchain_image_resources[i].graphics_to_present_cmd, + VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, + VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, 0, 0, nullptr, 0, + nullptr, 1, &image_ownership_barrier); + err = vkEndCommandBuffer( + window->swapchain_image_resources[i].graphics_to_present_cmd); + if (err) { + DLOG << "vkEndCommandBuffer failed. Error: " << err; + return false; + } + } + } + + // Reset current buffer. + window->current_buffer = 0; + + return true; +} + +void VulkanContext::AppendCommandBuffer(const VkCommandBuffer& command_buffer) { + command_buffers_.push_back(command_buffer); +} + +void VulkanContext::Flush() { + // Ensure everything else pending is executed. + vkDeviceWaitIdle(device_); + + // Flush the current frame. + VkSubmitInfo submit_info; + submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; + submit_info.pNext = nullptr; + submit_info.pWaitDstStageMask = nullptr; + submit_info.waitSemaphoreCount = 0; + submit_info.pWaitSemaphores = nullptr; + submit_info.commandBufferCount = command_buffers_.size(); + submit_info.pCommandBuffers = command_buffers_.data(); + submit_info.signalSemaphoreCount = 0; + submit_info.pSignalSemaphores = nullptr; + VkResult err = + vkQueueSubmit(graphics_queue_, 1, &submit_info, VK_NULL_HANDLE); + command_buffers_[0] = nullptr; + if (err) { + DLOG << "vkQueueSubmit failed. Error: " << err; + return; + } + + command_buffers_.clear(); + + vkDeviceWaitIdle(device_); +} + +bool VulkanContext::PrepareBuffers() { + if (!queues_initialized_) + return true; + + VkResult err; + + // Ensure no more than kFrameLag renderings are outstanding. + vkWaitForFences(device_, 1, &fences_[frame_index_], VK_TRUE, + std::numeric_limits::max()); + vkResetFences(device_, 1, &fences_[frame_index_]); + + DCHECK(window_.swapchain != VK_NULL_HANDLE); + + do { + // Get the index of the next available swapchain image: + err = AcquireNextImageKHR(device_, window_.swapchain, + std::numeric_limits::max(), + image_acquired_semaphores_[frame_index_], + VK_NULL_HANDLE, &window_.current_buffer); + + if (err == VK_ERROR_OUT_OF_DATE_KHR) { + // swapchain is out of date (e.g. the window was resized) and must be + // recreated: + DLOG << "Swapchain is out of date."; + UpdateSwapChain(&window_); + } else if (err == VK_SUBOPTIMAL_KHR) { + DLOG << "Swapchain is suboptimal."; + // swapchain is not as optimal as it could be, but the platform's + // presentation engine will still present the image correctly. + break; + } else { + if (err) { + DLOG << "AcquireNextImageKHR failed. Error: " << err; + return false; + } + } + } while (err != VK_SUCCESS); + + buffers_prepared_ = true; + + return true; +} + +size_t VulkanContext::GetAndResetFPS() { + int ret = fps_; + fps_ = 0; + return ret; +} + +bool VulkanContext::SwapBuffers() { + if (!queues_initialized_) + return true; + + VkResult err; + + // Wait for the image acquired semaphore to be signaled to ensure that the + // image won't be rendered to until the presentation engine has fully released + // ownership to the application, and it is okay to render to the image. + VkPipelineStageFlags pipe_stage_flags; + VkSubmitInfo submit_info; + submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; + submit_info.pNext = nullptr; + submit_info.pWaitDstStageMask = &pipe_stage_flags; + pipe_stage_flags = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT; + submit_info.waitSemaphoreCount = 1; + submit_info.pWaitSemaphores = &image_acquired_semaphores_[frame_index_]; + submit_info.commandBufferCount = command_buffers_.size(); + submit_info.pCommandBuffers = command_buffers_.data(); + submit_info.signalSemaphoreCount = 1; + submit_info.pSignalSemaphores = &draw_complete_semaphores_[frame_index_]; + err = vkQueueSubmit(graphics_queue_, 1, &submit_info, fences_[frame_index_]); + if (err) { + DLOG << "vkQueueSubmit failed. Error: " << err; + return false; + } + + command_buffers_.clear(); + + if (separate_present_queue_) { + // If we are using separate queues, change image ownership to the present + // queue before presenting, waiting for the draw complete semaphore and + // signalling the ownership released semaphore when finished. + VkFence null_fence = VK_NULL_HANDLE; + pipe_stage_flags = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT; + submit_info.waitSemaphoreCount = 1; + submit_info.pWaitSemaphores = &draw_complete_semaphores_[frame_index_]; + submit_info.commandBufferCount = 0; + + VkCommandBuffer* cmdbufptr = + (VkCommandBuffer*)alloca(sizeof(VkCommandBuffer*)); + submit_info.pCommandBuffers = cmdbufptr; + + DCHECK(window_.swapchain != VK_NULL_HANDLE); + + cmdbufptr[submit_info.commandBufferCount] = + window_.swapchain_image_resources[window_.current_buffer] + .graphics_to_present_cmd; + submit_info.commandBufferCount++; + + submit_info.signalSemaphoreCount = 1; + submit_info.pSignalSemaphores = &image_ownership_semaphores_[frame_index_]; + err = vkQueueSubmit(present_queue_, 1, &submit_info, null_fence); + if (err) { + DLOG << "vkQueueSubmit failed. Error: " << err; + return false; + } + } + + // If we are using separate queues we have to wait for image ownership, + // otherwise wait for draw complete. + VkPresentInfoKHR present = { + /*sType*/ VK_STRUCTURE_TYPE_PRESENT_INFO_KHR, + /*pNext*/ nullptr, + /*waitSemaphoreCount*/ 1, + /*pWaitSemaphores*/ + (separate_present_queue_) ? &image_ownership_semaphores_[frame_index_] + : &draw_complete_semaphores_[frame_index_], + /*swapchainCount*/ 0, + /*pSwapchain*/ nullptr, + /*pImageIndices*/ nullptr, + /*pResults*/ nullptr, + }; + + VkSwapchainKHR* swapchains = (VkSwapchainKHR*)alloca(sizeof(VkSwapchainKHR*)); + uint32_t* pImageIndices = (uint32_t*)alloca(sizeof(uint32_t*)); + + present.pSwapchains = swapchains; + present.pImageIndices = pImageIndices; + + DCHECK(window_.swapchain != VK_NULL_HANDLE); + + swapchains[present.swapchainCount] = window_.swapchain; + pImageIndices[present.swapchainCount] = window_.current_buffer; + present.swapchainCount++; + + err = QueuePresentKHR(present_queue_, &present); + + frame_index_ += 1; + frame_index_ %= kFrameLag; + fps_++; + + if (err == VK_ERROR_OUT_OF_DATE_KHR) { + // Swapchain is out of date (e.g. the window was resized) and must be + // recreated. + DLOG << "Swapchain is out of date."; + } else if (err == VK_SUBOPTIMAL_KHR) { + // Swapchain is not as optimal as it could be, but the platform's + // presentation engine will still present the image correctly. + DLOG << "Swapchain is Suboptimal."; + } else if (err) { + DLOG << "QueuePresentKHR failed. Error: " << err; + return false; + } + + buffers_prepared_ = false; + return true; +} + +} // namespace eng diff --git a/src/engine/renderer/vulkan/vulkan_context.h b/src/engine/renderer/vulkan/vulkan_context.h new file mode 100644 index 0000000..853e3e7 --- /dev/null +++ b/src/engine/renderer/vulkan/vulkan_context.h @@ -0,0 +1,191 @@ +#ifndef VULKAN_CONTEXT_H +#define VULKAN_CONTEXT_H + +#include +#include +#include + +#if defined(__ANDROID__) +#include "../../../third_party/android/vulkan_wrapper.h" +#else +#include "../../../third_party/vulkan/vulkan.h" +#endif + +#if defined(__ANDROID__) +struct ANativeWindow; +#endif + +namespace eng { + +// Adapted from godot engin. +// https://github.com/godotengine/godot +class VulkanContext { + public: + VulkanContext(); + ~VulkanContext(); + + bool Initialize(); + void Shutdown(); + +#if defined(__ANDROID__) + bool CreateWindow(ANativeWindow* window, int width, int height); +#elif defined(__linux__) + bool CreateWindow(Display* display, ::Window window, int width, int height); +#endif + + void ResizeWindow(int width, int height); + void DestroyWindow(); + + VkFramebuffer GetFramebuffer(); + + VkRenderPass GetRenderPass() { return window_.render_pass; } + + VkExtent2D GetSwapchainExtent() { return window_.swapchain_extent; } + + void AppendCommandBuffer(const VkCommandBuffer& command_buffer); + + void Flush(); + + bool PrepareBuffers(); + bool SwapBuffers(); + + VkInstance GetInstance() { return instance_; } + VkDevice GetDevice() { return device_; } + VkPhysicalDevice GetPhysicalDevice() { return gpu_; } + + uint32_t GetSwapchainImageCount() const { return swapchain_image_count_; } + + uint32_t GetGraphicsQueue() const { return graphics_queue_family_index_; } + + VkFormat GetScreenFormat() const { return format_; } + + VkPhysicalDeviceLimits GetDeviceLimits() const { return gpu_props_.limits; } + + int GetWidth() { return window_.width; } + int GetHeight() { return window_.height; } + + size_t GetAndResetFPS(); + + private: + enum { kMaxExtensions = 128, kMaxLayers = 64, kFrameLag = 2 }; + + struct SwapchainImageResources { + VkImage image; + VkCommandBuffer graphics_to_present_cmd; + VkImageView view; + VkFramebuffer frame_buffer; + }; + + struct Window { + VkSurfaceKHR surface = VK_NULL_HANDLE; + VkSwapchainKHR swapchain = VK_NULL_HANDLE; + std::unique_ptr swapchain_image_resources; + uint32_t current_buffer = 0; + int width = 0; + int height = 0; + VkCommandPool present_cmd_pool = VK_NULL_HANDLE; + VkRenderPass render_pass = VK_NULL_HANDLE; + VkExtent2D swapchain_extent = {0, 0}; + }; + + VkInstance instance_ = VK_NULL_HANDLE; + VkPhysicalDevice gpu_ = VK_NULL_HANDLE; + VkDevice device_ = VK_NULL_HANDLE; + + VkPhysicalDeviceProperties gpu_props_; + uint32_t queue_family_count_ = 0; + std::unique_ptr queue_props_ = nullptr; + + bool buffers_prepared_ = false; + + bool queues_initialized_ = false; + bool separate_present_queue_ = false; + + uint32_t graphics_queue_family_index_ = 0; + uint32_t present_queue_family_index_ = 0; + VkQueue graphics_queue_ = VK_NULL_HANDLE; + VkQueue present_queue_ = VK_NULL_HANDLE; + + VkColorSpaceKHR color_space_; + VkFormat format_; + + uint32_t frame_index_ = 0; + + VkSemaphore image_acquired_semaphores_[kFrameLag]; + VkSemaphore draw_complete_semaphores_[kFrameLag]; + VkSemaphore image_ownership_semaphores_[kFrameLag]; + VkFence fences_[kFrameLag]; + + VkPhysicalDeviceMemoryProperties memory_properties_; + VkPhysicalDeviceFeatures physical_device_features_; + + uint32_t swapchain_image_count_ = 0; + + std::vector command_buffers_; + + Window window_; + + size_t fps_ = 0; + + // Extensions. + uint32_t enabled_extension_count_ = 0; + const char* extension_names_[kMaxExtensions]; + + uint32_t enabled_layer_count_ = 0; + const char* enabled_layers_[kMaxLayers]; + + bool use_validation_layers_ = false; + + PFN_vkCreateDebugUtilsMessengerEXT CreateDebugUtilsMessengerEXT; + PFN_vkDestroyDebugUtilsMessengerEXT DestroyDebugUtilsMessengerEXT; + + PFN_vkGetPhysicalDeviceSurfaceSupportKHR GetPhysicalDeviceSurfaceSupportKHR; + PFN_vkGetPhysicalDeviceSurfaceCapabilitiesKHR + GetPhysicalDeviceSurfaceCapabilitiesKHR; + PFN_vkGetPhysicalDeviceSurfaceFormatsKHR GetPhysicalDeviceSurfaceFormatsKHR; + PFN_vkGetPhysicalDeviceSurfacePresentModesKHR + GetPhysicalDeviceSurfacePresentModesKHR; + PFN_vkCreateSwapchainKHR CreateSwapchainKHR; + PFN_vkDestroySwapchainKHR DestroySwapchainKHR; + PFN_vkGetSwapchainImagesKHR GetSwapchainImagesKHR; + PFN_vkAcquireNextImageKHR AcquireNextImageKHR; + PFN_vkQueuePresentKHR QueuePresentKHR; + + VkDebugUtilsMessengerEXT dbg_messenger_ = VK_NULL_HANDLE; + + bool CreateValidationLayers(); + bool InitializeExtensions(); + + VkBool32 CheckLayers(uint32_t check_count, + const char** check_names, + uint32_t layer_count, + VkLayerProperties* layers); + + static VKAPI_ATTR VkBool32 VKAPI_CALL DebugMessengerCallback( + VkDebugUtilsMessageSeverityFlagBitsEXT message_severity, + VkDebugUtilsMessageTypeFlagsEXT message_type, + const VkDebugUtilsMessengerCallbackDataEXT* callback_data, + void* user_data); + + bool CreatePhysicalDevice(); + + bool InitializeQueues(VkSurfaceKHR surface); + + bool CreateDevice(); + + bool CleanUpSwapChain(Window* window); + + bool UpdateSwapChain(Window* window); + + bool CreateSwapChain(); + bool CreateSemaphores(); + + const char* GetPlatformSurfaceExtension() const; + + VulkanContext(const VulkanContext&) = delete; + VulkanContext& operator=(const VulkanContext&) = delete; +}; + +} // namespace eng + +#endif // VULKAN_DEVICE_H diff --git a/src/engine/renderer/vulkan/vulkan_context_android.cc b/src/engine/renderer/vulkan/vulkan_context_android.cc new file mode 100644 index 0000000..c0f5fb2 --- /dev/null +++ b/src/engine/renderer/vulkan/vulkan_context_android.cc @@ -0,0 +1,39 @@ +#include "vulkan_context.h" + +#include "../../../base/log.h" + +namespace eng { + +const char* VulkanContext::GetPlatformSurfaceExtension() const { + return VK_KHR_ANDROID_SURFACE_EXTENSION_NAME; +} + +bool VulkanContext::CreateWindow(ANativeWindow* window, int width, int height) { + VkAndroidSurfaceCreateInfoKHR surface_info; + surface_info.sType = VK_STRUCTURE_TYPE_ANDROID_SURFACE_CREATE_INFO_KHR; + surface_info.pNext = nullptr; + surface_info.flags = 0; + surface_info.window = window; + + VkSurfaceKHR surface; + VkResult err = + vkCreateAndroidSurfaceKHR(instance_, &surface_info, nullptr, &surface); + if (err != VK_SUCCESS) { + LOG << "vkCreateAndroidSurfaceKHR failed with error " + << std::to_string(err); + return false; + } + + if (!queues_initialized_ && !InitializeQueues(surface)) + return false; + + window_.surface = surface; + window_.width = width; + window_.height = height; + if (!UpdateSwapChain(&window_)) + return false; + + return true; +} + +} // namespace eng diff --git a/src/engine/renderer/vulkan/vulkan_context_linux.cc b/src/engine/renderer/vulkan/vulkan_context_linux.cc new file mode 100644 index 0000000..a390ce2 --- /dev/null +++ b/src/engine/renderer/vulkan/vulkan_context_linux.cc @@ -0,0 +1,43 @@ +#include "vulkan_context.h" + +#include "../../../base/log.h" + +namespace eng { + +const char* VulkanContext::GetPlatformSurfaceExtension() const { + return VK_KHR_XLIB_SURFACE_EXTENSION_NAME; +} + +bool VulkanContext::CreateWindow(Display* display, + ::Window window, + int width, + int height) { + VkXlibSurfaceCreateInfoKHR surface_info; + surface_info.sType = VK_STRUCTURE_TYPE_XLIB_SURFACE_CREATE_INFO_KHR; + surface_info.pNext = nullptr; + surface_info.flags = 0; + surface_info.dpy = display; + surface_info.window = window; + + VkSurfaceKHR surface; + VkResult err = + vkCreateXlibSurfaceKHR(instance_, &surface_info, nullptr, &surface); + if (err != VK_SUCCESS) { + LOG << "vkCreateAndroidSurfaceKHR failed with error " + << std::to_string(err); + return false; + } + + if (!queues_initialized_ && !InitializeQueues(surface)) + return false; + + window_.surface = surface; + window_.width = width; + window_.height = height; + if (!UpdateSwapChain(&window_)) + return false; + + return true; +} + +} // namespace eng diff --git a/src/engine/shader_source.cc b/src/engine/shader_source.cc index 0e035b3..793b436 100644 --- a/src/engine/shader_source.cc +++ b/src/engine/shader_source.cc @@ -6,40 +6,114 @@ #include "engine.h" #include "platform/asset_file.h" +namespace { + +// Helper macros for our glsl shaders. Makes it possible to write generic code +// that compiles both for OpenGL and Vulkan. + +const char kVertexShaderMacros[] = R"( + #if defined(VULKAN) + #define UNIFORM_BEGIN layout(push_constant) uniform Params { + #define UNIFORM_V(X) X; + #define UNIFORM_F(X) X; + #define UNIFORM_END } params; + #define IN(X) layout(location = X) in + #define OUT(X) layout(location = X) out + #define PARAM(X) params.X + #else + #define UNIFORM_BEGIN + #define UNIFORM uniform + #define UNIFORM_V(X) uniform X; + #define UNIFORM_F(X) + #define UNIFORM_END + #define IN(X) attribute + #define OUT(X) varying + #define PARAM(X) X + #endif +)"; + +const char kFragmentShaderMacros[] = R"( + #if defined(VULKAN) + #define UNIFORM_BEGIN layout(push_constant) uniform Params { + #define UNIFORM_V(X) X; + #define UNIFORM_F(X) X; + #define UNIFORM_S(X) + #define UNIFORM_END } params; + #define SAMPLER(X) layout(set = 0, binding = 0) uniform X; + #define IN(X) layout(location = X) in + #define OUT(X) layout(location = X) out + #define FRAG_COLOR_OUT(X) layout(location = 0) out vec4 X; + #define FRAG_COLOR(X) X + #define PARAM(X) params.X + #define TEXTURE texture + #else + #define UNIFORM_BEGIN + #define UNIFORM_V(X) + #define UNIFORM_F(X) uniform X; + #define UNIFORM_S(X) uniform X; + #define UNIFORM_END + #define SAMPLER(X) + #define IN(X) varying + #define OUT(X) varying + #define FRAG_COLOR_OUT(X) + #define FRAG_COLOR(X) gl_FragColor + #define PARAM(X) X + #define TEXTURE texture2D + #endif +)"; + +template +constexpr size_t length(char const (&)[N]) { + return N - 1; +} + +constexpr size_t kVertexShaderMacrosLen = length(kVertexShaderMacros); +constexpr size_t kFragmentShaderMacrosLen = length(kFragmentShaderMacros); + +} // namespace + namespace eng { bool ShaderSource::Load(const std::string& name) { - Engine& engine = Engine::Get(); - name_ = name; - std::string vertex_file_name = name; - vertex_file_name += "_vertex"; - auto vertex_source = AssetFile::ReadWholeFile(vertex_file_name.c_str(), - engine.GetRootPath().c_str(), - &vertex_source_size_, true); - if (!vertex_source) { - LOG << "Failed to read file: " << vertex_file_name; + vertex_source_size_ = + LoadInternal(name + "_vertex", vertex_source_, kVertexShaderMacros, + kVertexShaderMacrosLen); + if (!vertex_source_) return false; - } - vertex_source_ = std::move(vertex_source); - - std::string fragment_file_name = name; - fragment_file_name += "_fragment"; - auto fragment_source = AssetFile::ReadWholeFile(fragment_file_name.c_str(), - engine.GetRootPath().c_str(), - &fragment_source_size_, true); - if (!fragment_source) { - LOG << "Failed to read file: " << fragment_file_name; + fragment_source_size_ = + LoadInternal(name + "_fragment", fragment_source_, kFragmentShaderMacros, + kFragmentShaderMacrosLen); + if (!fragment_source_) return false; - } LOG << "Loaded " << name; - fragment_source_ = std::move(fragment_source); - return true; } +size_t ShaderSource::LoadInternal(const std::string& name, + std::unique_ptr& dst, + const char* inject, + size_t inject_len) { + size_t size; + auto source = AssetFile::ReadWholeFile( + name.c_str(), Engine::Get().GetRootPath().c_str(), &size, true); + if (!source) { + LOG << "Failed to read file: " << name; + return 0; + } + + // Inject macros. + size++; // Include the null-terminator. + size_t total_size = inject_len + size + 1; + dst = std::make_unique(total_size); + memcpy(dst.get(), inject, inject_len); + memcpy(dst.get() + inject_len, source.get(), size); + + return total_size; +} + } // namespace eng diff --git a/src/engine/shader_source.h b/src/engine/shader_source.h index 8b6c621..2ae83cd 100644 --- a/src/engine/shader_source.h +++ b/src/engine/shader_source.h @@ -17,7 +17,7 @@ class ShaderSource { const char* GetFragmentSource() const { return fragment_source_.get(); } size_t vertex_source_size() const { return vertex_source_size_; } - size_t fragment_source_size() const { return fragment_source_size_ ; } + size_t fragment_source_size() const { return fragment_source_size_; } const std::string& name() const { return name_; } @@ -29,6 +29,11 @@ class ShaderSource { size_t vertex_source_size_ = 0; size_t fragment_source_size_ = 0; + + size_t LoadInternal(const std::string& name, + std::unique_ptr& dst, + const char* inject, + size_t inject_len); }; } // namespace eng diff --git a/src/third_party/android/vulkan_wrapper.cpp b/src/third_party/android/vulkan_wrapper.cpp new file mode 100644 index 0000000..f186c85 --- /dev/null +++ b/src/third_party/android/vulkan_wrapper.cpp @@ -0,0 +1,404 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// This file is generated. +#include "vulkan_wrapper.h" +#include + +int InitVulkan(void) { + void* libvulkan = dlopen("libvulkan.so", RTLD_NOW | RTLD_LOCAL); + if (!libvulkan) + return 0; + + // Vulkan supported, set function addresses + vkCreateInstance = reinterpret_cast(dlsym(libvulkan, "vkCreateInstance")); + vkDestroyInstance = reinterpret_cast(dlsym(libvulkan, "vkDestroyInstance")); + vkEnumeratePhysicalDevices = reinterpret_cast(dlsym(libvulkan, "vkEnumeratePhysicalDevices")); + vkGetPhysicalDeviceFeatures = reinterpret_cast(dlsym(libvulkan, "vkGetPhysicalDeviceFeatures")); + vkGetPhysicalDeviceFormatProperties = reinterpret_cast(dlsym(libvulkan, "vkGetPhysicalDeviceFormatProperties")); + vkGetPhysicalDeviceImageFormatProperties = reinterpret_cast(dlsym(libvulkan, "vkGetPhysicalDeviceImageFormatProperties")); + vkGetPhysicalDeviceProperties = reinterpret_cast(dlsym(libvulkan, "vkGetPhysicalDeviceProperties")); + vkGetPhysicalDeviceQueueFamilyProperties = reinterpret_cast(dlsym(libvulkan, "vkGetPhysicalDeviceQueueFamilyProperties")); + vkGetPhysicalDeviceMemoryProperties = reinterpret_cast(dlsym(libvulkan, "vkGetPhysicalDeviceMemoryProperties")); + vkGetInstanceProcAddr = reinterpret_cast(dlsym(libvulkan, "vkGetInstanceProcAddr")); + vkGetDeviceProcAddr = reinterpret_cast(dlsym(libvulkan, "vkGetDeviceProcAddr")); + vkCreateDevice = reinterpret_cast(dlsym(libvulkan, "vkCreateDevice")); + vkDestroyDevice = reinterpret_cast(dlsym(libvulkan, "vkDestroyDevice")); + vkEnumerateInstanceExtensionProperties = reinterpret_cast(dlsym(libvulkan, "vkEnumerateInstanceExtensionProperties")); + vkEnumerateDeviceExtensionProperties = reinterpret_cast(dlsym(libvulkan, "vkEnumerateDeviceExtensionProperties")); + vkEnumerateInstanceLayerProperties = reinterpret_cast(dlsym(libvulkan, "vkEnumerateInstanceLayerProperties")); + vkEnumerateDeviceLayerProperties = reinterpret_cast(dlsym(libvulkan, "vkEnumerateDeviceLayerProperties")); + vkGetDeviceQueue = reinterpret_cast(dlsym(libvulkan, "vkGetDeviceQueue")); + vkQueueSubmit = reinterpret_cast(dlsym(libvulkan, "vkQueueSubmit")); + vkQueueWaitIdle = reinterpret_cast(dlsym(libvulkan, "vkQueueWaitIdle")); + vkDeviceWaitIdle = reinterpret_cast(dlsym(libvulkan, "vkDeviceWaitIdle")); + vkAllocateMemory = reinterpret_cast(dlsym(libvulkan, "vkAllocateMemory")); + vkFreeMemory = reinterpret_cast(dlsym(libvulkan, "vkFreeMemory")); + vkMapMemory = reinterpret_cast(dlsym(libvulkan, "vkMapMemory")); + vkUnmapMemory = reinterpret_cast(dlsym(libvulkan, "vkUnmapMemory")); + vkFlushMappedMemoryRanges = reinterpret_cast(dlsym(libvulkan, "vkFlushMappedMemoryRanges")); + vkInvalidateMappedMemoryRanges = reinterpret_cast(dlsym(libvulkan, "vkInvalidateMappedMemoryRanges")); + vkGetDeviceMemoryCommitment = reinterpret_cast(dlsym(libvulkan, "vkGetDeviceMemoryCommitment")); + vkBindBufferMemory = reinterpret_cast(dlsym(libvulkan, "vkBindBufferMemory")); + vkBindImageMemory = reinterpret_cast(dlsym(libvulkan, "vkBindImageMemory")); + vkGetBufferMemoryRequirements = reinterpret_cast(dlsym(libvulkan, "vkGetBufferMemoryRequirements")); + vkGetImageMemoryRequirements = reinterpret_cast(dlsym(libvulkan, "vkGetImageMemoryRequirements")); + vkGetImageSparseMemoryRequirements = reinterpret_cast(dlsym(libvulkan, "vkGetImageSparseMemoryRequirements")); + vkGetPhysicalDeviceSparseImageFormatProperties = reinterpret_cast(dlsym(libvulkan, "vkGetPhysicalDeviceSparseImageFormatProperties")); + vkQueueBindSparse = reinterpret_cast(dlsym(libvulkan, "vkQueueBindSparse")); + vkCreateFence = reinterpret_cast(dlsym(libvulkan, "vkCreateFence")); + vkDestroyFence = reinterpret_cast(dlsym(libvulkan, "vkDestroyFence")); + vkResetFences = reinterpret_cast(dlsym(libvulkan, "vkResetFences")); + vkGetFenceStatus = reinterpret_cast(dlsym(libvulkan, "vkGetFenceStatus")); + vkWaitForFences = reinterpret_cast(dlsym(libvulkan, "vkWaitForFences")); + vkCreateSemaphore = reinterpret_cast(dlsym(libvulkan, "vkCreateSemaphore")); + vkDestroySemaphore = reinterpret_cast(dlsym(libvulkan, "vkDestroySemaphore")); + vkCreateEvent = reinterpret_cast(dlsym(libvulkan, "vkCreateEvent")); + vkDestroyEvent = reinterpret_cast(dlsym(libvulkan, "vkDestroyEvent")); + vkGetEventStatus = reinterpret_cast(dlsym(libvulkan, "vkGetEventStatus")); + vkSetEvent = reinterpret_cast(dlsym(libvulkan, "vkSetEvent")); + vkResetEvent = reinterpret_cast(dlsym(libvulkan, "vkResetEvent")); + vkCreateQueryPool = reinterpret_cast(dlsym(libvulkan, "vkCreateQueryPool")); + vkDestroyQueryPool = reinterpret_cast(dlsym(libvulkan, "vkDestroyQueryPool")); + vkGetQueryPoolResults = reinterpret_cast(dlsym(libvulkan, "vkGetQueryPoolResults")); + vkCreateBuffer = reinterpret_cast(dlsym(libvulkan, "vkCreateBuffer")); + vkDestroyBuffer = reinterpret_cast(dlsym(libvulkan, "vkDestroyBuffer")); + vkCreateBufferView = reinterpret_cast(dlsym(libvulkan, "vkCreateBufferView")); + vkDestroyBufferView = reinterpret_cast(dlsym(libvulkan, "vkDestroyBufferView")); + vkCreateImage = reinterpret_cast(dlsym(libvulkan, "vkCreateImage")); + vkDestroyImage = reinterpret_cast(dlsym(libvulkan, "vkDestroyImage")); + vkGetImageSubresourceLayout = reinterpret_cast(dlsym(libvulkan, "vkGetImageSubresourceLayout")); + vkCreateImageView = reinterpret_cast(dlsym(libvulkan, "vkCreateImageView")); + vkDestroyImageView = reinterpret_cast(dlsym(libvulkan, "vkDestroyImageView")); + vkCreateShaderModule = reinterpret_cast(dlsym(libvulkan, "vkCreateShaderModule")); + vkDestroyShaderModule = reinterpret_cast(dlsym(libvulkan, "vkDestroyShaderModule")); + vkCreatePipelineCache = reinterpret_cast(dlsym(libvulkan, "vkCreatePipelineCache")); + vkDestroyPipelineCache = reinterpret_cast(dlsym(libvulkan, "vkDestroyPipelineCache")); + vkGetPipelineCacheData = reinterpret_cast(dlsym(libvulkan, "vkGetPipelineCacheData")); + vkMergePipelineCaches = reinterpret_cast(dlsym(libvulkan, "vkMergePipelineCaches")); + vkCreateGraphicsPipelines = reinterpret_cast(dlsym(libvulkan, "vkCreateGraphicsPipelines")); + vkCreateComputePipelines = reinterpret_cast(dlsym(libvulkan, "vkCreateComputePipelines")); + vkDestroyPipeline = reinterpret_cast(dlsym(libvulkan, "vkDestroyPipeline")); + vkCreatePipelineLayout = reinterpret_cast(dlsym(libvulkan, "vkCreatePipelineLayout")); + vkDestroyPipelineLayout = reinterpret_cast(dlsym(libvulkan, "vkDestroyPipelineLayout")); + vkCreateSampler = reinterpret_cast(dlsym(libvulkan, "vkCreateSampler")); + vkDestroySampler = reinterpret_cast(dlsym(libvulkan, "vkDestroySampler")); + vkCreateDescriptorSetLayout = reinterpret_cast(dlsym(libvulkan, "vkCreateDescriptorSetLayout")); + vkDestroyDescriptorSetLayout = reinterpret_cast(dlsym(libvulkan, "vkDestroyDescriptorSetLayout")); + vkCreateDescriptorPool = reinterpret_cast(dlsym(libvulkan, "vkCreateDescriptorPool")); + vkDestroyDescriptorPool = reinterpret_cast(dlsym(libvulkan, "vkDestroyDescriptorPool")); + vkResetDescriptorPool = reinterpret_cast(dlsym(libvulkan, "vkResetDescriptorPool")); + vkAllocateDescriptorSets = reinterpret_cast(dlsym(libvulkan, "vkAllocateDescriptorSets")); + vkFreeDescriptorSets = reinterpret_cast(dlsym(libvulkan, "vkFreeDescriptorSets")); + vkUpdateDescriptorSets = reinterpret_cast(dlsym(libvulkan, "vkUpdateDescriptorSets")); + vkCreateFramebuffer = reinterpret_cast(dlsym(libvulkan, "vkCreateFramebuffer")); + vkDestroyFramebuffer = reinterpret_cast(dlsym(libvulkan, "vkDestroyFramebuffer")); + vkCreateRenderPass = reinterpret_cast(dlsym(libvulkan, "vkCreateRenderPass")); + vkDestroyRenderPass = reinterpret_cast(dlsym(libvulkan, "vkDestroyRenderPass")); + vkGetRenderAreaGranularity = reinterpret_cast(dlsym(libvulkan, "vkGetRenderAreaGranularity")); + vkCreateCommandPool = reinterpret_cast(dlsym(libvulkan, "vkCreateCommandPool")); + vkDestroyCommandPool = reinterpret_cast(dlsym(libvulkan, "vkDestroyCommandPool")); + vkResetCommandPool = reinterpret_cast(dlsym(libvulkan, "vkResetCommandPool")); + vkAllocateCommandBuffers = reinterpret_cast(dlsym(libvulkan, "vkAllocateCommandBuffers")); + vkFreeCommandBuffers = reinterpret_cast(dlsym(libvulkan, "vkFreeCommandBuffers")); + vkBeginCommandBuffer = reinterpret_cast(dlsym(libvulkan, "vkBeginCommandBuffer")); + vkEndCommandBuffer = reinterpret_cast(dlsym(libvulkan, "vkEndCommandBuffer")); + vkResetCommandBuffer = reinterpret_cast(dlsym(libvulkan, "vkResetCommandBuffer")); + vkCmdBindPipeline = reinterpret_cast(dlsym(libvulkan, "vkCmdBindPipeline")); + vkCmdSetViewport = reinterpret_cast(dlsym(libvulkan, "vkCmdSetViewport")); + vkCmdSetScissor = reinterpret_cast(dlsym(libvulkan, "vkCmdSetScissor")); + vkCmdSetLineWidth = reinterpret_cast(dlsym(libvulkan, "vkCmdSetLineWidth")); + vkCmdSetDepthBias = reinterpret_cast(dlsym(libvulkan, "vkCmdSetDepthBias")); + vkCmdSetBlendConstants = reinterpret_cast(dlsym(libvulkan, "vkCmdSetBlendConstants")); + vkCmdSetDepthBounds = reinterpret_cast(dlsym(libvulkan, "vkCmdSetDepthBounds")); + vkCmdSetStencilCompareMask = reinterpret_cast(dlsym(libvulkan, "vkCmdSetStencilCompareMask")); + vkCmdSetStencilWriteMask = reinterpret_cast(dlsym(libvulkan, "vkCmdSetStencilWriteMask")); + vkCmdSetStencilReference = reinterpret_cast(dlsym(libvulkan, "vkCmdSetStencilReference")); + vkCmdBindDescriptorSets = reinterpret_cast(dlsym(libvulkan, "vkCmdBindDescriptorSets")); + vkCmdBindIndexBuffer = reinterpret_cast(dlsym(libvulkan, "vkCmdBindIndexBuffer")); + vkCmdBindVertexBuffers = reinterpret_cast(dlsym(libvulkan, "vkCmdBindVertexBuffers")); + vkCmdDraw = reinterpret_cast(dlsym(libvulkan, "vkCmdDraw")); + vkCmdDrawIndexed = reinterpret_cast(dlsym(libvulkan, "vkCmdDrawIndexed")); + vkCmdDrawIndirect = reinterpret_cast(dlsym(libvulkan, "vkCmdDrawIndirect")); + vkCmdDrawIndexedIndirect = reinterpret_cast(dlsym(libvulkan, "vkCmdDrawIndexedIndirect")); + vkCmdDispatch = reinterpret_cast(dlsym(libvulkan, "vkCmdDispatch")); + vkCmdDispatchIndirect = reinterpret_cast(dlsym(libvulkan, "vkCmdDispatchIndirect")); + vkCmdCopyBuffer = reinterpret_cast(dlsym(libvulkan, "vkCmdCopyBuffer")); + vkCmdCopyImage = reinterpret_cast(dlsym(libvulkan, "vkCmdCopyImage")); + vkCmdBlitImage = reinterpret_cast(dlsym(libvulkan, "vkCmdBlitImage")); + vkCmdCopyBufferToImage = reinterpret_cast(dlsym(libvulkan, "vkCmdCopyBufferToImage")); + vkCmdCopyImageToBuffer = reinterpret_cast(dlsym(libvulkan, "vkCmdCopyImageToBuffer")); + vkCmdUpdateBuffer = reinterpret_cast(dlsym(libvulkan, "vkCmdUpdateBuffer")); + vkCmdFillBuffer = reinterpret_cast(dlsym(libvulkan, "vkCmdFillBuffer")); + vkCmdClearColorImage = reinterpret_cast(dlsym(libvulkan, "vkCmdClearColorImage")); + vkCmdClearDepthStencilImage = reinterpret_cast(dlsym(libvulkan, "vkCmdClearDepthStencilImage")); + vkCmdClearAttachments = reinterpret_cast(dlsym(libvulkan, "vkCmdClearAttachments")); + vkCmdResolveImage = reinterpret_cast(dlsym(libvulkan, "vkCmdResolveImage")); + vkCmdSetEvent = reinterpret_cast(dlsym(libvulkan, "vkCmdSetEvent")); + vkCmdResetEvent = reinterpret_cast(dlsym(libvulkan, "vkCmdResetEvent")); + vkCmdWaitEvents = reinterpret_cast(dlsym(libvulkan, "vkCmdWaitEvents")); + vkCmdPipelineBarrier = reinterpret_cast(dlsym(libvulkan, "vkCmdPipelineBarrier")); + vkCmdBeginQuery = reinterpret_cast(dlsym(libvulkan, "vkCmdBeginQuery")); + vkCmdEndQuery = reinterpret_cast(dlsym(libvulkan, "vkCmdEndQuery")); + vkCmdResetQueryPool = reinterpret_cast(dlsym(libvulkan, "vkCmdResetQueryPool")); + vkCmdWriteTimestamp = reinterpret_cast(dlsym(libvulkan, "vkCmdWriteTimestamp")); + vkCmdCopyQueryPoolResults = reinterpret_cast(dlsym(libvulkan, "vkCmdCopyQueryPoolResults")); + vkCmdPushConstants = reinterpret_cast(dlsym(libvulkan, "vkCmdPushConstants")); + vkCmdBeginRenderPass = reinterpret_cast(dlsym(libvulkan, "vkCmdBeginRenderPass")); + vkCmdNextSubpass = reinterpret_cast(dlsym(libvulkan, "vkCmdNextSubpass")); + vkCmdEndRenderPass = reinterpret_cast(dlsym(libvulkan, "vkCmdEndRenderPass")); + vkCmdExecuteCommands = reinterpret_cast(dlsym(libvulkan, "vkCmdExecuteCommands")); + vkDestroySurfaceKHR = reinterpret_cast(dlsym(libvulkan, "vkDestroySurfaceKHR")); + vkGetPhysicalDeviceSurfaceSupportKHR = reinterpret_cast(dlsym(libvulkan, "vkGetPhysicalDeviceSurfaceSupportKHR")); + vkGetPhysicalDeviceSurfaceCapabilitiesKHR = reinterpret_cast(dlsym(libvulkan, "vkGetPhysicalDeviceSurfaceCapabilitiesKHR")); + vkGetPhysicalDeviceSurfaceFormatsKHR = reinterpret_cast(dlsym(libvulkan, "vkGetPhysicalDeviceSurfaceFormatsKHR")); + vkGetPhysicalDeviceSurfacePresentModesKHR = reinterpret_cast(dlsym(libvulkan, "vkGetPhysicalDeviceSurfacePresentModesKHR")); + vkCreateSwapchainKHR = reinterpret_cast(dlsym(libvulkan, "vkCreateSwapchainKHR")); + vkDestroySwapchainKHR = reinterpret_cast(dlsym(libvulkan, "vkDestroySwapchainKHR")); + vkGetSwapchainImagesKHR = reinterpret_cast(dlsym(libvulkan, "vkGetSwapchainImagesKHR")); + vkAcquireNextImageKHR = reinterpret_cast(dlsym(libvulkan, "vkAcquireNextImageKHR")); + vkQueuePresentKHR = reinterpret_cast(dlsym(libvulkan, "vkQueuePresentKHR")); + vkGetPhysicalDeviceDisplayPropertiesKHR = reinterpret_cast(dlsym(libvulkan, "vkGetPhysicalDeviceDisplayPropertiesKHR")); + vkGetPhysicalDeviceDisplayPlanePropertiesKHR = reinterpret_cast(dlsym(libvulkan, "vkGetPhysicalDeviceDisplayPlanePropertiesKHR")); + vkGetDisplayPlaneSupportedDisplaysKHR = reinterpret_cast(dlsym(libvulkan, "vkGetDisplayPlaneSupportedDisplaysKHR")); + vkGetDisplayModePropertiesKHR = reinterpret_cast(dlsym(libvulkan, "vkGetDisplayModePropertiesKHR")); + vkCreateDisplayModeKHR = reinterpret_cast(dlsym(libvulkan, "vkCreateDisplayModeKHR")); + vkGetDisplayPlaneCapabilitiesKHR = reinterpret_cast(dlsym(libvulkan, "vkGetDisplayPlaneCapabilitiesKHR")); + vkCreateDisplayPlaneSurfaceKHR = reinterpret_cast(dlsym(libvulkan, "vkCreateDisplayPlaneSurfaceKHR")); + vkCreateSharedSwapchainsKHR = reinterpret_cast(dlsym(libvulkan, "vkCreateSharedSwapchainsKHR")); + +#ifdef VK_USE_PLATFORM_XLIB_KHR + vkCreateXlibSurfaceKHR = reinterpret_cast(dlsym(libvulkan, "vkCreateXlibSurfaceKHR")); + vkGetPhysicalDeviceXlibPresentationSupportKHR = reinterpret_cast(dlsym(libvulkan, "vkGetPhysicalDeviceXlibPresentationSupportKHR")); +#endif + +#ifdef VK_USE_PLATFORM_XCB_KHR + vkCreateXcbSurfaceKHR = reinterpret_cast(dlsym(libvulkan, "vkCreateXcbSurfaceKHR")); + vkGetPhysicalDeviceXcbPresentationSupportKHR = reinterpret_cast(dlsym(libvulkan, "vkGetPhysicalDeviceXcbPresentationSupportKHR")); +#endif + +#ifdef VK_USE_PLATFORM_WAYLAND_KHR + vkCreateWaylandSurfaceKHR = reinterpret_cast(dlsym(libvulkan, "vkCreateWaylandSurfaceKHR")); + vkGetPhysicalDeviceWaylandPresentationSupportKHR = reinterpret_cast(dlsym(libvulkan, "vkGetPhysicalDeviceWaylandPresentationSupportKHR")); +#endif + +#ifdef VK_USE_PLATFORM_MIR_KHR + vkCreateMirSurfaceKHR = reinterpret_cast(dlsym(libvulkan, "vkCreateMirSurfaceKHR")); + vkGetPhysicalDeviceMirPresentationSupportKHR = reinterpret_cast(dlsym(libvulkan, "vkGetPhysicalDeviceMirPresentationSupportKHR")); +#endif + +#ifdef VK_USE_PLATFORM_ANDROID_KHR + vkCreateAndroidSurfaceKHR = reinterpret_cast(dlsym(libvulkan, "vkCreateAndroidSurfaceKHR")); +#endif + +#ifdef VK_USE_PLATFORM_WIN32_KHR + vkCreateWin32SurfaceKHR = reinterpret_cast(dlsym(libvulkan, "vkCreateWin32SurfaceKHR")); + vkGetPhysicalDeviceWin32PresentationSupportKHR = reinterpret_cast(dlsym(libvulkan, "vkGetPhysicalDeviceWin32PresentationSupportKHR")); +#endif +#ifdef USE_DEBUG_EXTENTIONS + vkCreateDebugReportCallbackEXT = reinterpret_cast(dlsym(libvulkan, "vkCreateDebugReportCallbackEXT")); + vkDestroyDebugReportCallbackEXT = reinterpret_cast(dlsym(libvulkan, "vkDestroyDebugReportCallbackEXT")); + vkDebugReportMessageEXT = reinterpret_cast(dlsym(libvulkan, "vkDebugReportMessageEXT")); +#endif + return 1; +} + +// No Vulkan support, do not set function addresses +PFN_vkCreateInstance vkCreateInstance; +PFN_vkDestroyInstance vkDestroyInstance; +PFN_vkEnumeratePhysicalDevices vkEnumeratePhysicalDevices; +PFN_vkGetPhysicalDeviceFeatures vkGetPhysicalDeviceFeatures; +PFN_vkGetPhysicalDeviceFormatProperties vkGetPhysicalDeviceFormatProperties; +PFN_vkGetPhysicalDeviceImageFormatProperties vkGetPhysicalDeviceImageFormatProperties; +PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties; +PFN_vkGetPhysicalDeviceQueueFamilyProperties vkGetPhysicalDeviceQueueFamilyProperties; +PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties; +PFN_vkGetInstanceProcAddr vkGetInstanceProcAddr; +PFN_vkGetDeviceProcAddr vkGetDeviceProcAddr; +PFN_vkCreateDevice vkCreateDevice; +PFN_vkDestroyDevice vkDestroyDevice; +PFN_vkEnumerateInstanceExtensionProperties vkEnumerateInstanceExtensionProperties; +PFN_vkEnumerateDeviceExtensionProperties vkEnumerateDeviceExtensionProperties; +PFN_vkEnumerateInstanceLayerProperties vkEnumerateInstanceLayerProperties; +PFN_vkEnumerateDeviceLayerProperties vkEnumerateDeviceLayerProperties; +PFN_vkGetDeviceQueue vkGetDeviceQueue; +PFN_vkQueueSubmit vkQueueSubmit; +PFN_vkQueueWaitIdle vkQueueWaitIdle; +PFN_vkDeviceWaitIdle vkDeviceWaitIdle; +PFN_vkAllocateMemory vkAllocateMemory; +PFN_vkFreeMemory vkFreeMemory; +PFN_vkMapMemory vkMapMemory; +PFN_vkUnmapMemory vkUnmapMemory; +PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges; +PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges; +PFN_vkGetDeviceMemoryCommitment vkGetDeviceMemoryCommitment; +PFN_vkBindBufferMemory vkBindBufferMemory; +PFN_vkBindImageMemory vkBindImageMemory; +PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements; +PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements; +PFN_vkGetImageSparseMemoryRequirements vkGetImageSparseMemoryRequirements; +PFN_vkGetPhysicalDeviceSparseImageFormatProperties vkGetPhysicalDeviceSparseImageFormatProperties; +PFN_vkQueueBindSparse vkQueueBindSparse; +PFN_vkCreateFence vkCreateFence; +PFN_vkDestroyFence vkDestroyFence; +PFN_vkResetFences vkResetFences; +PFN_vkGetFenceStatus vkGetFenceStatus; +PFN_vkWaitForFences vkWaitForFences; +PFN_vkCreateSemaphore vkCreateSemaphore; +PFN_vkDestroySemaphore vkDestroySemaphore; +PFN_vkCreateEvent vkCreateEvent; +PFN_vkDestroyEvent vkDestroyEvent; +PFN_vkGetEventStatus vkGetEventStatus; +PFN_vkSetEvent vkSetEvent; +PFN_vkResetEvent vkResetEvent; +PFN_vkCreateQueryPool vkCreateQueryPool; +PFN_vkDestroyQueryPool vkDestroyQueryPool; +PFN_vkGetQueryPoolResults vkGetQueryPoolResults; +PFN_vkCreateBuffer vkCreateBuffer; +PFN_vkDestroyBuffer vkDestroyBuffer; +PFN_vkCreateBufferView vkCreateBufferView; +PFN_vkDestroyBufferView vkDestroyBufferView; +PFN_vkCreateImage vkCreateImage; +PFN_vkDestroyImage vkDestroyImage; +PFN_vkGetImageSubresourceLayout vkGetImageSubresourceLayout; +PFN_vkCreateImageView vkCreateImageView; +PFN_vkDestroyImageView vkDestroyImageView; +PFN_vkCreateShaderModule vkCreateShaderModule; +PFN_vkDestroyShaderModule vkDestroyShaderModule; +PFN_vkCreatePipelineCache vkCreatePipelineCache; +PFN_vkDestroyPipelineCache vkDestroyPipelineCache; +PFN_vkGetPipelineCacheData vkGetPipelineCacheData; +PFN_vkMergePipelineCaches vkMergePipelineCaches; +PFN_vkCreateGraphicsPipelines vkCreateGraphicsPipelines; +PFN_vkCreateComputePipelines vkCreateComputePipelines; +PFN_vkDestroyPipeline vkDestroyPipeline; +PFN_vkCreatePipelineLayout vkCreatePipelineLayout; +PFN_vkDestroyPipelineLayout vkDestroyPipelineLayout; +PFN_vkCreateSampler vkCreateSampler; +PFN_vkDestroySampler vkDestroySampler; +PFN_vkCreateDescriptorSetLayout vkCreateDescriptorSetLayout; +PFN_vkDestroyDescriptorSetLayout vkDestroyDescriptorSetLayout; +PFN_vkCreateDescriptorPool vkCreateDescriptorPool; +PFN_vkDestroyDescriptorPool vkDestroyDescriptorPool; +PFN_vkResetDescriptorPool vkResetDescriptorPool; +PFN_vkAllocateDescriptorSets vkAllocateDescriptorSets; +PFN_vkFreeDescriptorSets vkFreeDescriptorSets; +PFN_vkUpdateDescriptorSets vkUpdateDescriptorSets; +PFN_vkCreateFramebuffer vkCreateFramebuffer; +PFN_vkDestroyFramebuffer vkDestroyFramebuffer; +PFN_vkCreateRenderPass vkCreateRenderPass; +PFN_vkDestroyRenderPass vkDestroyRenderPass; +PFN_vkGetRenderAreaGranularity vkGetRenderAreaGranularity; +PFN_vkCreateCommandPool vkCreateCommandPool; +PFN_vkDestroyCommandPool vkDestroyCommandPool; +PFN_vkResetCommandPool vkResetCommandPool; +PFN_vkAllocateCommandBuffers vkAllocateCommandBuffers; +PFN_vkFreeCommandBuffers vkFreeCommandBuffers; +PFN_vkBeginCommandBuffer vkBeginCommandBuffer; +PFN_vkEndCommandBuffer vkEndCommandBuffer; +PFN_vkResetCommandBuffer vkResetCommandBuffer; +PFN_vkCmdBindPipeline vkCmdBindPipeline; +PFN_vkCmdSetViewport vkCmdSetViewport; +PFN_vkCmdSetScissor vkCmdSetScissor; +PFN_vkCmdSetLineWidth vkCmdSetLineWidth; +PFN_vkCmdSetDepthBias vkCmdSetDepthBias; +PFN_vkCmdSetBlendConstants vkCmdSetBlendConstants; +PFN_vkCmdSetDepthBounds vkCmdSetDepthBounds; +PFN_vkCmdSetStencilCompareMask vkCmdSetStencilCompareMask; +PFN_vkCmdSetStencilWriteMask vkCmdSetStencilWriteMask; +PFN_vkCmdSetStencilReference vkCmdSetStencilReference; +PFN_vkCmdBindDescriptorSets vkCmdBindDescriptorSets; +PFN_vkCmdBindIndexBuffer vkCmdBindIndexBuffer; +PFN_vkCmdBindVertexBuffers vkCmdBindVertexBuffers; +PFN_vkCmdDraw vkCmdDraw; +PFN_vkCmdDrawIndexed vkCmdDrawIndexed; +PFN_vkCmdDrawIndirect vkCmdDrawIndirect; +PFN_vkCmdDrawIndexedIndirect vkCmdDrawIndexedIndirect; +PFN_vkCmdDispatch vkCmdDispatch; +PFN_vkCmdDispatchIndirect vkCmdDispatchIndirect; +PFN_vkCmdCopyBuffer vkCmdCopyBuffer; +PFN_vkCmdCopyImage vkCmdCopyImage; +PFN_vkCmdBlitImage vkCmdBlitImage; +PFN_vkCmdCopyBufferToImage vkCmdCopyBufferToImage; +PFN_vkCmdCopyImageToBuffer vkCmdCopyImageToBuffer; +PFN_vkCmdUpdateBuffer vkCmdUpdateBuffer; +PFN_vkCmdFillBuffer vkCmdFillBuffer; +PFN_vkCmdClearColorImage vkCmdClearColorImage; +PFN_vkCmdClearDepthStencilImage vkCmdClearDepthStencilImage; +PFN_vkCmdClearAttachments vkCmdClearAttachments; +PFN_vkCmdResolveImage vkCmdResolveImage; +PFN_vkCmdSetEvent vkCmdSetEvent; +PFN_vkCmdResetEvent vkCmdResetEvent; +PFN_vkCmdWaitEvents vkCmdWaitEvents; +PFN_vkCmdPipelineBarrier vkCmdPipelineBarrier; +PFN_vkCmdBeginQuery vkCmdBeginQuery; +PFN_vkCmdEndQuery vkCmdEndQuery; +PFN_vkCmdResetQueryPool vkCmdResetQueryPool; +PFN_vkCmdWriteTimestamp vkCmdWriteTimestamp; +PFN_vkCmdCopyQueryPoolResults vkCmdCopyQueryPoolResults; +PFN_vkCmdPushConstants vkCmdPushConstants; +PFN_vkCmdBeginRenderPass vkCmdBeginRenderPass; +PFN_vkCmdNextSubpass vkCmdNextSubpass; +PFN_vkCmdEndRenderPass vkCmdEndRenderPass; +PFN_vkCmdExecuteCommands vkCmdExecuteCommands; +PFN_vkDestroySurfaceKHR vkDestroySurfaceKHR; +PFN_vkGetPhysicalDeviceSurfaceSupportKHR vkGetPhysicalDeviceSurfaceSupportKHR; +PFN_vkGetPhysicalDeviceSurfaceCapabilitiesKHR vkGetPhysicalDeviceSurfaceCapabilitiesKHR; +PFN_vkGetPhysicalDeviceSurfaceFormatsKHR vkGetPhysicalDeviceSurfaceFormatsKHR; +PFN_vkGetPhysicalDeviceSurfacePresentModesKHR vkGetPhysicalDeviceSurfacePresentModesKHR; +PFN_vkCreateSwapchainKHR vkCreateSwapchainKHR; +PFN_vkDestroySwapchainKHR vkDestroySwapchainKHR; +PFN_vkGetSwapchainImagesKHR vkGetSwapchainImagesKHR; +PFN_vkAcquireNextImageKHR vkAcquireNextImageKHR; +PFN_vkQueuePresentKHR vkQueuePresentKHR; +PFN_vkGetPhysicalDeviceDisplayPropertiesKHR vkGetPhysicalDeviceDisplayPropertiesKHR; +PFN_vkGetPhysicalDeviceDisplayPlanePropertiesKHR vkGetPhysicalDeviceDisplayPlanePropertiesKHR; +PFN_vkGetDisplayPlaneSupportedDisplaysKHR vkGetDisplayPlaneSupportedDisplaysKHR; +PFN_vkGetDisplayModePropertiesKHR vkGetDisplayModePropertiesKHR; +PFN_vkCreateDisplayModeKHR vkCreateDisplayModeKHR; +PFN_vkGetDisplayPlaneCapabilitiesKHR vkGetDisplayPlaneCapabilitiesKHR; +PFN_vkCreateDisplayPlaneSurfaceKHR vkCreateDisplayPlaneSurfaceKHR; +PFN_vkCreateSharedSwapchainsKHR vkCreateSharedSwapchainsKHR; + +#ifdef VK_USE_PLATFORM_XLIB_KHR +PFN_vkCreateXlibSurfaceKHR vkCreateXlibSurfaceKHR; +PFN_vkGetPhysicalDeviceXlibPresentationSupportKHR vkGetPhysicalDeviceXlibPresentationSupportKHR; +#endif + +#ifdef VK_USE_PLATFORM_XCB_KHR +PFN_vkCreateXcbSurfaceKHR vkCreateXcbSurfaceKHR; +PFN_vkGetPhysicalDeviceXcbPresentationSupportKHR vkGetPhysicalDeviceXcbPresentationSupportKHR; +#endif + +#ifdef VK_USE_PLATFORM_WAYLAND_KHR +PFN_vkCreateWaylandSurfaceKHR vkCreateWaylandSurfaceKHR; +PFN_vkGetPhysicalDeviceWaylandPresentationSupportKHR vkGetPhysicalDeviceWaylandPresentationSupportKHR; +#endif + +#ifdef VK_USE_PLATFORM_MIR_KHR +PFN_vkCreateMirSurfaceKHR vkCreateMirSurfaceKHR; +PFN_vkGetPhysicalDeviceMirPresentationSupportKHR vkGetPhysicalDeviceMirPresentationSupportKHR; +#endif + +#ifdef VK_USE_PLATFORM_ANDROID_KHR +PFN_vkCreateAndroidSurfaceKHR vkCreateAndroidSurfaceKHR; +#endif + +#ifdef VK_USE_PLATFORM_WIN32_KHR +PFN_vkCreateWin32SurfaceKHR vkCreateWin32SurfaceKHR; +PFN_vkGetPhysicalDeviceWin32PresentationSupportKHR vkGetPhysicalDeviceWin32PresentationSupportKHR; +#endif +PFN_vkCreateDebugReportCallbackEXT vkCreateDebugReportCallbackEXT; +PFN_vkDestroyDebugReportCallbackEXT vkDestroyDebugReportCallbackEXT; +PFN_vkDebugReportMessageEXT vkDebugReportMessageEXT; + diff --git a/src/third_party/android/vulkan_wrapper.h b/src/third_party/android/vulkan_wrapper.h new file mode 100644 index 0000000..81d7c73 --- /dev/null +++ b/src/third_party/android/vulkan_wrapper.h @@ -0,0 +1,236 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This file is generated. +#ifndef VULKAN_WRAPPER_H +#define VULKAN_WRAPPER_H + +#define VK_NO_PROTOTYPES 1 +#include "../vulkan/vulkan.h" + +/* Initialize the Vulkan function pointer variables declared in this header. + * Returns 0 if vulkan is not available, non-zero if it is available. + */ +int InitVulkan(void); + +// VK_core +extern PFN_vkCreateInstance vkCreateInstance; +extern PFN_vkDestroyInstance vkDestroyInstance; +extern PFN_vkEnumeratePhysicalDevices vkEnumeratePhysicalDevices; +extern PFN_vkGetPhysicalDeviceFeatures vkGetPhysicalDeviceFeatures; +extern PFN_vkGetPhysicalDeviceFormatProperties vkGetPhysicalDeviceFormatProperties; +extern PFN_vkGetPhysicalDeviceImageFormatProperties vkGetPhysicalDeviceImageFormatProperties; +extern PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties; +extern PFN_vkGetPhysicalDeviceQueueFamilyProperties vkGetPhysicalDeviceQueueFamilyProperties; +extern PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties; +extern PFN_vkGetInstanceProcAddr vkGetInstanceProcAddr; +extern PFN_vkGetDeviceProcAddr vkGetDeviceProcAddr; +extern PFN_vkCreateDevice vkCreateDevice; +extern PFN_vkDestroyDevice vkDestroyDevice; +extern PFN_vkEnumerateInstanceExtensionProperties vkEnumerateInstanceExtensionProperties; +extern PFN_vkEnumerateDeviceExtensionProperties vkEnumerateDeviceExtensionProperties; +extern PFN_vkEnumerateInstanceLayerProperties vkEnumerateInstanceLayerProperties; +extern PFN_vkEnumerateDeviceLayerProperties vkEnumerateDeviceLayerProperties; +extern PFN_vkGetDeviceQueue vkGetDeviceQueue; +extern PFN_vkQueueSubmit vkQueueSubmit; +extern PFN_vkQueueWaitIdle vkQueueWaitIdle; +extern PFN_vkDeviceWaitIdle vkDeviceWaitIdle; +extern PFN_vkAllocateMemory vkAllocateMemory; +extern PFN_vkFreeMemory vkFreeMemory; +extern PFN_vkMapMemory vkMapMemory; +extern PFN_vkUnmapMemory vkUnmapMemory; +extern PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges; +extern PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges; +extern PFN_vkGetDeviceMemoryCommitment vkGetDeviceMemoryCommitment; +extern PFN_vkBindBufferMemory vkBindBufferMemory; +extern PFN_vkBindImageMemory vkBindImageMemory; +extern PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements; +extern PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements; +extern PFN_vkGetImageSparseMemoryRequirements vkGetImageSparseMemoryRequirements; +extern PFN_vkGetPhysicalDeviceSparseImageFormatProperties vkGetPhysicalDeviceSparseImageFormatProperties; +extern PFN_vkQueueBindSparse vkQueueBindSparse; +extern PFN_vkCreateFence vkCreateFence; +extern PFN_vkDestroyFence vkDestroyFence; +extern PFN_vkResetFences vkResetFences; +extern PFN_vkGetFenceStatus vkGetFenceStatus; +extern PFN_vkWaitForFences vkWaitForFences; +extern PFN_vkCreateSemaphore vkCreateSemaphore; +extern PFN_vkDestroySemaphore vkDestroySemaphore; +extern PFN_vkCreateEvent vkCreateEvent; +extern PFN_vkDestroyEvent vkDestroyEvent; +extern PFN_vkGetEventStatus vkGetEventStatus; +extern PFN_vkSetEvent vkSetEvent; +extern PFN_vkResetEvent vkResetEvent; +extern PFN_vkCreateQueryPool vkCreateQueryPool; +extern PFN_vkDestroyQueryPool vkDestroyQueryPool; +extern PFN_vkGetQueryPoolResults vkGetQueryPoolResults; +extern PFN_vkCreateBuffer vkCreateBuffer; +extern PFN_vkDestroyBuffer vkDestroyBuffer; +extern PFN_vkCreateBufferView vkCreateBufferView; +extern PFN_vkDestroyBufferView vkDestroyBufferView; +extern PFN_vkCreateImage vkCreateImage; +extern PFN_vkDestroyImage vkDestroyImage; +extern PFN_vkGetImageSubresourceLayout vkGetImageSubresourceLayout; +extern PFN_vkCreateImageView vkCreateImageView; +extern PFN_vkDestroyImageView vkDestroyImageView; +extern PFN_vkCreateShaderModule vkCreateShaderModule; +extern PFN_vkDestroyShaderModule vkDestroyShaderModule; +extern PFN_vkCreatePipelineCache vkCreatePipelineCache; +extern PFN_vkDestroyPipelineCache vkDestroyPipelineCache; +extern PFN_vkGetPipelineCacheData vkGetPipelineCacheData; +extern PFN_vkMergePipelineCaches vkMergePipelineCaches; +extern PFN_vkCreateGraphicsPipelines vkCreateGraphicsPipelines; +extern PFN_vkCreateComputePipelines vkCreateComputePipelines; +extern PFN_vkDestroyPipeline vkDestroyPipeline; +extern PFN_vkCreatePipelineLayout vkCreatePipelineLayout; +extern PFN_vkDestroyPipelineLayout vkDestroyPipelineLayout; +extern PFN_vkCreateSampler vkCreateSampler; +extern PFN_vkDestroySampler vkDestroySampler; +extern PFN_vkCreateDescriptorSetLayout vkCreateDescriptorSetLayout; +extern PFN_vkDestroyDescriptorSetLayout vkDestroyDescriptorSetLayout; +extern PFN_vkCreateDescriptorPool vkCreateDescriptorPool; +extern PFN_vkDestroyDescriptorPool vkDestroyDescriptorPool; +extern PFN_vkResetDescriptorPool vkResetDescriptorPool; +extern PFN_vkAllocateDescriptorSets vkAllocateDescriptorSets; +extern PFN_vkFreeDescriptorSets vkFreeDescriptorSets; +extern PFN_vkUpdateDescriptorSets vkUpdateDescriptorSets; +extern PFN_vkCreateFramebuffer vkCreateFramebuffer; +extern PFN_vkDestroyFramebuffer vkDestroyFramebuffer; +extern PFN_vkCreateRenderPass vkCreateRenderPass; +extern PFN_vkDestroyRenderPass vkDestroyRenderPass; +extern PFN_vkGetRenderAreaGranularity vkGetRenderAreaGranularity; +extern PFN_vkCreateCommandPool vkCreateCommandPool; +extern PFN_vkDestroyCommandPool vkDestroyCommandPool; +extern PFN_vkResetCommandPool vkResetCommandPool; +extern PFN_vkAllocateCommandBuffers vkAllocateCommandBuffers; +extern PFN_vkFreeCommandBuffers vkFreeCommandBuffers; +extern PFN_vkBeginCommandBuffer vkBeginCommandBuffer; +extern PFN_vkEndCommandBuffer vkEndCommandBuffer; +extern PFN_vkResetCommandBuffer vkResetCommandBuffer; +extern PFN_vkCmdBindPipeline vkCmdBindPipeline; +extern PFN_vkCmdSetViewport vkCmdSetViewport; +extern PFN_vkCmdSetScissor vkCmdSetScissor; +extern PFN_vkCmdSetLineWidth vkCmdSetLineWidth; +extern PFN_vkCmdSetDepthBias vkCmdSetDepthBias; +extern PFN_vkCmdSetBlendConstants vkCmdSetBlendConstants; +extern PFN_vkCmdSetDepthBounds vkCmdSetDepthBounds; +extern PFN_vkCmdSetStencilCompareMask vkCmdSetStencilCompareMask; +extern PFN_vkCmdSetStencilWriteMask vkCmdSetStencilWriteMask; +extern PFN_vkCmdSetStencilReference vkCmdSetStencilReference; +extern PFN_vkCmdBindDescriptorSets vkCmdBindDescriptorSets; +extern PFN_vkCmdBindIndexBuffer vkCmdBindIndexBuffer; +extern PFN_vkCmdBindVertexBuffers vkCmdBindVertexBuffers; +extern PFN_vkCmdDraw vkCmdDraw; +extern PFN_vkCmdDrawIndexed vkCmdDrawIndexed; +extern PFN_vkCmdDrawIndirect vkCmdDrawIndirect; +extern PFN_vkCmdDrawIndexedIndirect vkCmdDrawIndexedIndirect; +extern PFN_vkCmdDispatch vkCmdDispatch; +extern PFN_vkCmdDispatchIndirect vkCmdDispatchIndirect; +extern PFN_vkCmdCopyBuffer vkCmdCopyBuffer; +extern PFN_vkCmdCopyImage vkCmdCopyImage; +extern PFN_vkCmdBlitImage vkCmdBlitImage; +extern PFN_vkCmdCopyBufferToImage vkCmdCopyBufferToImage; +extern PFN_vkCmdCopyImageToBuffer vkCmdCopyImageToBuffer; +extern PFN_vkCmdUpdateBuffer vkCmdUpdateBuffer; +extern PFN_vkCmdFillBuffer vkCmdFillBuffer; +extern PFN_vkCmdClearColorImage vkCmdClearColorImage; +extern PFN_vkCmdClearDepthStencilImage vkCmdClearDepthStencilImage; +extern PFN_vkCmdClearAttachments vkCmdClearAttachments; +extern PFN_vkCmdResolveImage vkCmdResolveImage; +extern PFN_vkCmdSetEvent vkCmdSetEvent; +extern PFN_vkCmdResetEvent vkCmdResetEvent; +extern PFN_vkCmdWaitEvents vkCmdWaitEvents; +extern PFN_vkCmdPipelineBarrier vkCmdPipelineBarrier; +extern PFN_vkCmdBeginQuery vkCmdBeginQuery; +extern PFN_vkCmdEndQuery vkCmdEndQuery; +extern PFN_vkCmdResetQueryPool vkCmdResetQueryPool; +extern PFN_vkCmdWriteTimestamp vkCmdWriteTimestamp; +extern PFN_vkCmdCopyQueryPoolResults vkCmdCopyQueryPoolResults; +extern PFN_vkCmdPushConstants vkCmdPushConstants; +extern PFN_vkCmdBeginRenderPass vkCmdBeginRenderPass; +extern PFN_vkCmdNextSubpass vkCmdNextSubpass; +extern PFN_vkCmdEndRenderPass vkCmdEndRenderPass; +extern PFN_vkCmdExecuteCommands vkCmdExecuteCommands; + +// VK_KHR_surface +extern PFN_vkDestroySurfaceKHR vkDestroySurfaceKHR; +extern PFN_vkGetPhysicalDeviceSurfaceSupportKHR vkGetPhysicalDeviceSurfaceSupportKHR; +extern PFN_vkGetPhysicalDeviceSurfaceCapabilitiesKHR vkGetPhysicalDeviceSurfaceCapabilitiesKHR; +extern PFN_vkGetPhysicalDeviceSurfaceFormatsKHR vkGetPhysicalDeviceSurfaceFormatsKHR; +extern PFN_vkGetPhysicalDeviceSurfacePresentModesKHR vkGetPhysicalDeviceSurfacePresentModesKHR; + +// VK_KHR_swapchain +extern PFN_vkCreateSwapchainKHR vkCreateSwapchainKHR; +extern PFN_vkDestroySwapchainKHR vkDestroySwapchainKHR; +extern PFN_vkGetSwapchainImagesKHR vkGetSwapchainImagesKHR; +extern PFN_vkAcquireNextImageKHR vkAcquireNextImageKHR; +extern PFN_vkQueuePresentKHR vkQueuePresentKHR; + +// VK_KHR_display +extern PFN_vkGetPhysicalDeviceDisplayPropertiesKHR vkGetPhysicalDeviceDisplayPropertiesKHR; +extern PFN_vkGetPhysicalDeviceDisplayPlanePropertiesKHR vkGetPhysicalDeviceDisplayPlanePropertiesKHR; +extern PFN_vkGetDisplayPlaneSupportedDisplaysKHR vkGetDisplayPlaneSupportedDisplaysKHR; +extern PFN_vkGetDisplayModePropertiesKHR vkGetDisplayModePropertiesKHR; +extern PFN_vkCreateDisplayModeKHR vkCreateDisplayModeKHR; +extern PFN_vkGetDisplayPlaneCapabilitiesKHR vkGetDisplayPlaneCapabilitiesKHR; +extern PFN_vkCreateDisplayPlaneSurfaceKHR vkCreateDisplayPlaneSurfaceKHR; + +// VK_KHR_display_swapchain +extern PFN_vkCreateSharedSwapchainsKHR vkCreateSharedSwapchainsKHR; + +#ifdef VK_USE_PLATFORM_XLIB_KHR +// VK_KHR_xlib_surface +extern PFN_vkCreateXlibSurfaceKHR vkCreateXlibSurfaceKHR; +extern PFN_vkGetPhysicalDeviceXlibPresentationSupportKHR vkGetPhysicalDeviceXlibPresentationSupportKHR; +#endif + +#ifdef VK_USE_PLATFORM_XCB_KHR +// VK_KHR_xcb_surface +extern PFN_vkCreateXcbSurfaceKHR vkCreateXcbSurfaceKHR; +extern PFN_vkGetPhysicalDeviceXcbPresentationSupportKHR vkGetPhysicalDeviceXcbPresentationSupportKHR; +#endif + +#ifdef VK_USE_PLATFORM_WAYLAND_KHR +// VK_KHR_wayland_surface +extern PFN_vkCreateWaylandSurfaceKHR vkCreateWaylandSurfaceKHR; +extern PFN_vkGetPhysicalDeviceWaylandPresentationSupportKHR vkGetPhysicalDeviceWaylandPresentationSupportKHR; +#endif + +#ifdef VK_USE_PLATFORM_MIR_KHR +// VK_KHR_mir_surface +extern PFN_vkCreateMirSurfaceKHR vkCreateMirSurfaceKHR; +extern PFN_vkGetPhysicalDeviceMirPresentationSupportKHR vkGetPhysicalDeviceMirPresentationSupportKHR; +#endif + +#ifdef VK_USE_PLATFORM_ANDROID_KHR +// VK_KHR_android_surface +extern PFN_vkCreateAndroidSurfaceKHR vkCreateAndroidSurfaceKHR; +#endif + +#ifdef VK_USE_PLATFORM_WIN32_KHR +// VK_KHR_win32_surface +extern PFN_vkCreateWin32SurfaceKHR vkCreateWin32SurfaceKHR; +extern PFN_vkGetPhysicalDeviceWin32PresentationSupportKHR vkGetPhysicalDeviceWin32PresentationSupportKHR; +#endif + +#ifdef USE_DEBUG_EXTENTIONS +#include +// VK_EXT_debug_report +extern PFN_vkCreateDebugReportCallbackEXT vkCreateDebugReportCallbackEXT; +extern PFN_vkDestroyDebugReportCallbackEXT vkDestroyDebugReportCallbackEXT; +extern PFN_vkDebugReportMessageEXT vkDebugReportMessageEXT; +#endif + + +#endif // VULKAN_WRAPPER_H diff --git a/src/third_party/vma/vk_mem_alloc.cpp b/src/third_party/vma/vk_mem_alloc.cpp index 9534c5a..f879493 100644 --- a/src/third_party/vma/vk_mem_alloc.cpp +++ b/src/third_party/vma/vk_mem_alloc.cpp @@ -1,3 +1,7 @@ +#if defined(__ANDROID__) +#define VK_NO_PROTOTYPES 1 +#pragma clang diagnostic ignored "-Wunused-variable" +#endif #define VMA_IMPLEMENTATION #pragma clang diagnostic ignored "-Wnullability-completeness" #include "vk_mem_alloc.h"