initial commit-moved from vulkan_guide

This commit is contained in:
2025-10-10 22:53:54 +09:00
commit 8853429937
2484 changed files with 973414 additions and 0 deletions

125
src/core/asset_locator.cpp Normal file
View File

@@ -0,0 +1,125 @@
#include "asset_locator.h"
#include <cstdlib>
using std::filesystem::path;
static path get_env_path(const char *name)
{
const char *v = std::getenv(name);
if (!v || !*v) return {};
path p = v;
if (std::filesystem::exists(p)) return std::filesystem::canonical(p);
return {};
}
static path find_upwards_containing(path start, const std::string &subdir, int maxDepth = 6)
{
path cur = std::filesystem::weakly_canonical(start);
for (int i = 0; i <= maxDepth; i++)
{
path candidate = cur / subdir;
if (std::filesystem::exists(candidate)) return cur;
if (!cur.has_parent_path()) break;
cur = cur.parent_path();
}
return {};
}
AssetPaths AssetPaths::detect(const path &startDir)
{
AssetPaths out{};
if (auto root = get_env_path("VKG_ASSET_ROOT"); !root.empty())
{
out.root = root;
if (std::filesystem::exists(root / "assets")) out.assets = root / "assets";
if (std::filesystem::exists(root / "shaders")) out.shaders = root / "shaders";
return out;
}
if (auto aroot = find_upwards_containing(startDir, "assets"); !aroot.empty())
{
out.assets = aroot / "assets";
out.root = aroot;
}
if (auto sroot = find_upwards_containing(startDir, "shaders"); !sroot.empty())
{
out.shaders = sroot / "shaders";
if (out.root.empty()) out.root = sroot;
}
if (out.assets.empty())
{
path p1 = startDir / "assets";
path p2 = startDir / ".." / "assets";
if (std::filesystem::exists(p1)) out.assets = p1;
else if (std::filesystem::exists(p2)) out.assets = std::filesystem::weakly_canonical(p2);
}
if (out.shaders.empty())
{
path p1 = startDir / "shaders";
path p2 = startDir / ".." / "shaders";
if (std::filesystem::exists(p1)) out.shaders = p1;
else if (std::filesystem::exists(p2)) out.shaders = std::filesystem::weakly_canonical(p2);
}
return out;
}
void AssetLocator::init()
{
_paths = AssetPaths::detect();
}
bool AssetLocator::file_exists(const path &p)
{
std::error_code ec;
return !p.empty() && std::filesystem::exists(p, ec) && std::filesystem::is_regular_file(p, ec);
}
std::string AssetLocator::resolve_in(const path &base, std::string_view name)
{
if (name.empty()) return {};
path in = base / std::string(name);
if (file_exists(in)) return in.string();
return {};
}
std::string AssetLocator::shaderPath(std::string_view name) const
{
if (name.empty()) return {};
path np = std::string(name);
if (np.is_absolute() && file_exists(np)) return np.string();
if (file_exists(np)) return np.string();
if (!_paths.shaders.empty())
{
if (auto r = resolve_in(_paths.shaders, name); !r.empty()) return r;
}
if (auto r = resolve_in(std::filesystem::current_path() / "shaders", name); !r.empty()) return r;
if (auto r = resolve_in(std::filesystem::current_path() / ".." / "shaders", name); !r.empty()) return r;
return np.string();
}
std::string AssetLocator::assetPath(std::string_view name) const
{
if (name.empty()) return {};
path np = std::string(name);
if (np.is_absolute() && file_exists(np)) return np.string();
if (file_exists(np)) return np.string();
if (!_paths.assets.empty())
{
if (auto r = resolve_in(_paths.assets, name); !r.empty()) return r;
}
if (auto r = resolve_in(std::filesystem::current_path() / "assets", name); !r.empty()) return r;
if (auto r = resolve_in(std::filesystem::current_path() / ".." / "assets", name); !r.empty()) return r;
return np.string();
}

44
src/core/asset_locator.h Normal file
View File

@@ -0,0 +1,44 @@
#pragma once
#include <filesystem>
#include <optional>
#include <string>
#include <string_view>
struct AssetPaths
{
std::filesystem::path root;
std::filesystem::path assets;
std::filesystem::path shaders;
bool valid() const
{
return (!assets.empty() && std::filesystem::exists(assets)) ||
(!shaders.empty() && std::filesystem::exists(shaders));
}
static AssetPaths detect(const std::filesystem::path &startDir = std::filesystem::current_path());
};
class AssetLocator
{
public:
void init();
const AssetPaths &paths() const { return _paths; }
void setPaths(const AssetPaths &p) { _paths = p; }
std::string shaderPath(std::string_view name) const;
std::string assetPath(std::string_view name) const;
std::string modelPath(std::string_view name) const { return assetPath(name); }
private:
static bool file_exists(const std::filesystem::path &p);
static std::string resolve_in(const std::filesystem::path &base, std::string_view name);
AssetPaths _paths{};
};

335
src/core/asset_manager.cpp Normal file
View File

@@ -0,0 +1,335 @@
#include "asset_manager.h"
#include <cstdlib>
#include <iostream>
#include <core/vk_engine.h>
#include <core/vk_resource.h>
#include <render/vk_materials.h>
#include <render/primitives.h>
#include <stb_image.h>
#include "asset_locator.h"
using std::filesystem::path;
void AssetManager::init(VulkanEngine *engine)
{
_engine = engine;
_locator.init();
}
void AssetManager::cleanup()
{
if (_engine && _engine->_resourceManager)
{
for (auto &kv: _meshCache)
{
if (kv.second)
{
_engine->_resourceManager->destroy_buffer(kv.second->meshBuffers.indexBuffer);
_engine->_resourceManager->destroy_buffer(kv.second->meshBuffers.vertexBuffer);
}
}
for (auto &kv: _meshMaterialBuffers)
{
_engine->_resourceManager->destroy_buffer(kv.second);
}
for (auto &kv: _meshOwnedImages)
{
for (const auto &img: kv.second)
{
_engine->_resourceManager->destroy_image(img);
}
}
}
_meshCache.clear();
_meshMaterialBuffers.clear();
_meshOwnedImages.clear();
_gltfCacheByPath.clear();
}
std::string AssetManager::shaderPath(std::string_view name) const
{
return _locator.shaderPath(name);
}
std::string AssetManager::assetPath(std::string_view name) const
{
return _locator.assetPath(name);
}
std::string AssetManager::modelPath(std::string_view name) const
{
return _locator.modelPath(name);
}
std::optional<std::shared_ptr<LoadedGLTF> > AssetManager::loadGLTF(std::string_view nameOrPath)
{
if (!_engine) return {};
if (nameOrPath.empty()) return {};
std::string resolved = assetPath(nameOrPath);
path keyPath = resolved;
std::error_code ec;
keyPath = std::filesystem::weakly_canonical(keyPath, ec);
std::string key = (ec ? resolved : keyPath.string());
if (auto it = _gltfCacheByPath.find(key); it != _gltfCacheByPath.end())
{
if (auto sp = it->second.lock()) return sp;
}
auto loaded = loadGltf(_engine, resolved);
if (!loaded.has_value()) return {};
_gltfCacheByPath[key] = loaded.value();
return loaded;
}
std::shared_ptr<MeshAsset> AssetManager::getPrimitive(std::string_view name) const
{
if (name.empty()) return {};
auto findBy = [&](const std::string &key) -> std::shared_ptr<MeshAsset> {
auto it = _meshCache.find(key);
return (it != _meshCache.end()) ? it->second : nullptr;
};
if (name == std::string_view("cube") || name == std::string_view("Cube"))
{
if (auto m = findBy("cube")) return m;
if (auto m = findBy("Cube")) return m;
return {};
}
if (name == std::string_view("sphere") || name == std::string_view("Sphere"))
{
if (auto m = findBy("sphere")) return m;
if (auto m = findBy("Sphere")) return m;
return {};
}
return {};
}
std::shared_ptr<MeshAsset> AssetManager::createMesh(const MeshCreateInfo &info)
{
if (!_engine || !_engine->_resourceManager) return {};
if (info.name.empty()) return {};
if (auto it = _meshCache.find(info.name); it != _meshCache.end())
{
return it->second;
}
std::vector<Vertex> tmpVerts;
std::vector<uint32_t> tmpInds;
std::span<Vertex> vertsSpan{};
std::span<uint32_t> indsSpan{};
switch (info.geometry.type)
{
case MeshGeometryDesc::Type::Provided:
vertsSpan = info.geometry.vertices;
indsSpan = info.geometry.indices;
break;
case MeshGeometryDesc::Type::Cube:
primitives::buildCube(tmpVerts, tmpInds);
vertsSpan = tmpVerts;
indsSpan = tmpInds;
break;
case MeshGeometryDesc::Type::Sphere:
primitives::buildSphere(tmpVerts, tmpInds, info.geometry.sectors, info.geometry.stacks);
vertsSpan = tmpVerts;
indsSpan = tmpInds;
break;
}
if (info.material.kind == MeshMaterialDesc::Kind::Default)
{
return createMesh(info.name, vertsSpan, indsSpan, {});
}
const auto &opt = info.material.options;
auto [albedo, createdAlbedo] = loadImageFromAsset(opt.albedoPath, opt.albedoSRGB);
auto [mr, createdMR] = loadImageFromAsset(opt.metalRoughPath, opt.metalRoughSRGB);
const AllocatedImage &albedoRef = createdAlbedo ? albedo : _engine->_errorCheckerboardImage;
const AllocatedImage &mrRef = createdMR ? mr : _engine->_whiteImage;
AllocatedBuffer matBuffer = createMaterialBufferWithConstants(opt.constants);
GLTFMetallic_Roughness::MaterialResources res{};
res.colorImage = albedoRef;
res.colorSampler = _engine->_samplerManager->defaultLinear();
res.metalRoughImage = mrRef;
res.metalRoughSampler = _engine->_samplerManager->defaultLinear();
res.dataBuffer = matBuffer.buffer;
res.dataBufferOffset = 0;
auto mat = createMaterial(opt.pass, res);
auto mesh = createMesh(info.name, vertsSpan, indsSpan, mat);
_meshMaterialBuffers.emplace(info.name, matBuffer);
if (createdAlbedo) _meshOwnedImages[info.name].push_back(albedo);
if (createdMR) _meshOwnedImages[info.name].push_back(mr);
return mesh;
}
static Bounds compute_bounds(std::span<Vertex> vertices)
{
Bounds b{};
if (vertices.empty())
{
b.origin = glm::vec3(0.0f);
b.extents = glm::vec3(0.5f);
b.sphereRadius = glm::length(b.extents);
return b;
}
glm::vec3 minpos = vertices[0].position;
glm::vec3 maxpos = vertices[0].position;
for (const auto &v: vertices)
{
minpos = glm::min(minpos, v.position);
maxpos = glm::max(maxpos, v.position);
}
b.origin = (maxpos + minpos) / 2.f;
b.extents = (maxpos - minpos) / 2.f;
b.sphereRadius = glm::length(b.extents);
return b;
}
AllocatedBuffer AssetManager::createMaterialBufferWithConstants(
const GLTFMetallic_Roughness::MaterialConstants &constants) const
{
AllocatedBuffer matBuffer = _engine->_resourceManager->create_buffer(
sizeof(GLTFMetallic_Roughness::MaterialConstants),
VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT,
VMA_MEMORY_USAGE_CPU_TO_GPU);
VmaAllocationInfo allocInfo{};
vmaGetAllocationInfo(_engine->_deviceManager->allocator(), matBuffer.allocation, &allocInfo);
auto *matConstants = (GLTFMetallic_Roughness::MaterialConstants *) allocInfo.pMappedData;
*matConstants = constants;
if (matConstants->colorFactors == glm::vec4(0))
{
matConstants->colorFactors = glm::vec4(1.0f);
}
// Ensure writes are visible on non-coherent memory
vmaFlushAllocation(_engine->_deviceManager->allocator(), matBuffer.allocation, 0,
sizeof(GLTFMetallic_Roughness::MaterialConstants));
return matBuffer;
}
std::shared_ptr<GLTFMaterial> AssetManager::createMaterial(
MaterialPass pass, const GLTFMetallic_Roughness::MaterialResources &res) const
{
auto mat = std::make_shared<GLTFMaterial>();
mat->data = _engine->metalRoughMaterial.write_material(
_engine->_deviceManager->device(), pass, res, *_engine->_context->descriptors);
return mat;
}
std::pair<AllocatedImage, bool> AssetManager::loadImageFromAsset(std::string_view imgPath, bool srgb) const
{
AllocatedImage out{};
bool created = false;
if (!imgPath.empty())
{
std::string resolved = assetPath(imgPath);
int w = 0, h = 0, comp = 0;
stbi_uc *pixels = stbi_load(resolved.c_str(), &w, &h, &comp, 4);
if (pixels && w > 0 && h > 0)
{
VkFormat fmt = srgb ? VK_FORMAT_R8G8B8A8_SRGB : VK_FORMAT_R8G8B8A8_UNORM;
out = _engine->_resourceManager->create_image(pixels,
VkExtent3D{static_cast<uint32_t>(w), static_cast<uint32_t>(h), 1},
fmt,
VK_IMAGE_USAGE_SAMPLED_BIT,
false);
created = true;
}
if (pixels) stbi_image_free(pixels);
}
return {out, created};
}
std::shared_ptr<MeshAsset> AssetManager::createMesh(const std::string &name,
std::span<Vertex> vertices,
std::span<uint32_t> indices,
std::shared_ptr<GLTFMaterial> material)
{
if (!_engine || !_engine->_resourceManager) return {};
if (name.empty()) return {};
auto it = _meshCache.find(name);
if (it != _meshCache.end()) return it->second;
if (!material)
{
GLTFMetallic_Roughness::MaterialResources matResources{};
matResources.colorImage = _engine->_whiteImage;
matResources.colorSampler = _engine->_samplerManager->defaultLinear();
matResources.metalRoughImage = _engine->_whiteImage;
matResources.metalRoughSampler = _engine->_samplerManager->defaultLinear();
AllocatedBuffer matBuffer = createMaterialBufferWithConstants({});
matResources.dataBuffer = matBuffer.buffer;
matResources.dataBufferOffset = 0;
material = createMaterial(MaterialPass::MainColor, matResources);
_meshMaterialBuffers.emplace(name, matBuffer);
}
auto mesh = std::make_shared<MeshAsset>();
mesh->name = name;
mesh->meshBuffers = _engine->_resourceManager->uploadMesh(indices, vertices);
GeoSurface surf{};
surf.startIndex = 0;
surf.count = (uint32_t) indices.size();
surf.material = material;
surf.bounds = compute_bounds(vertices);
mesh->surfaces.push_back(surf);
_meshCache.emplace(name, mesh);
return mesh;
}
std::shared_ptr<MeshAsset> AssetManager::getMesh(const std::string &name) const
{
auto it = _meshCache.find(name);
return (it != _meshCache.end()) ? it->second : nullptr;
}
bool AssetManager::removeMesh(const std::string &name)
{
auto it = _meshCache.find(name);
if (it == _meshCache.end()) return false;
if (_engine && _engine->_resourceManager)
{
_engine->_resourceManager->destroy_buffer(it->second->meshBuffers.indexBuffer);
_engine->_resourceManager->destroy_buffer(it->second->meshBuffers.vertexBuffer);
}
_meshCache.erase(it);
auto itb = _meshMaterialBuffers.find(name);
if (itb != _meshMaterialBuffers.end())
{
if (_engine && _engine->_resourceManager)
{
_engine->_resourceManager->destroy_buffer(itb->second);
}
_meshMaterialBuffers.erase(itb);
}
auto iti = _meshOwnedImages.find(name);
if (iti != _meshOwnedImages.end())
{
if (_engine && _engine->_resourceManager)
{
for (const auto &img: iti->second)
{
_engine->_resourceManager->destroy_image(img);
}
}
_meshOwnedImages.erase(iti);
}
return true;
}

106
src/core/asset_manager.h Normal file
View File

@@ -0,0 +1,106 @@
#pragma once
#include <memory>
#include <optional>
#include <string>
#include <string_view>
#include <unordered_map>
#include <filesystem>
#include <vector>
#include <utility>
#include <scene/vk_loader.h>
#include <core/vk_types.h>
#include "vk_materials.h"
#include "asset_locator.h"
class VulkanEngine;
class MeshAsset;
class AssetManager
{
public:
struct MaterialOptions
{
std::string albedoPath;
std::string metalRoughPath;
bool albedoSRGB = true;
bool metalRoughSRGB = false;
GLTFMetallic_Roughness::MaterialConstants constants{};
MaterialPass pass = MaterialPass::MainColor;
};
struct MeshGeometryDesc
{
enum class Type { Provided, Cube, Sphere };
Type type = Type::Provided;
std::span<Vertex> vertices{};
std::span<uint32_t> indices{};
int sectors = 16;
int stacks = 16;
};
struct MeshMaterialDesc
{
enum class Kind { Default, Textured };
Kind kind = Kind::Default;
MaterialOptions options{};
};
struct MeshCreateInfo
{
std::string name;
MeshGeometryDesc geometry;
MeshMaterialDesc material;
};
void init(VulkanEngine *engine);
void cleanup();
std::string shaderPath(std::string_view name) const;
std::string modelPath(std::string_view name) const;
std::string assetPath(std::string_view name) const;
std::optional<std::shared_ptr<LoadedGLTF> > loadGLTF(std::string_view nameOrPath);
std::shared_ptr<MeshAsset> createMesh(const MeshCreateInfo &info);
std::shared_ptr<MeshAsset> getPrimitive(std::string_view name) const;
std::shared_ptr<MeshAsset> createMesh(const std::string &name,
std::span<Vertex> vertices,
std::span<uint32_t> indices,
std::shared_ptr<GLTFMaterial> material = {});
std::shared_ptr<MeshAsset> getMesh(const std::string &name) const;
bool removeMesh(const std::string &name);
const AssetPaths &paths() const { return _locator.paths(); }
void setPaths(const AssetPaths &p) { _locator.setPaths(p); }
private:
VulkanEngine *_engine = nullptr;
AssetLocator _locator;
std::unordered_map<std::string, std::weak_ptr<LoadedGLTF> > _gltfCacheByPath;
std::unordered_map<std::string, std::shared_ptr<MeshAsset> > _meshCache;
std::unordered_map<std::string, AllocatedBuffer> _meshMaterialBuffers;
std::unordered_map<std::string, std::vector<AllocatedImage> > _meshOwnedImages;
AllocatedBuffer createMaterialBufferWithConstants(const GLTFMetallic_Roughness::MaterialConstants &constants) const;
std::shared_ptr<GLTFMaterial> createMaterial(MaterialPass pass,
const GLTFMetallic_Roughness::MaterialResources &res) const;
std::pair<AllocatedImage, bool> loadImageFromAsset(std::string_view path, bool srgb) const;
};

8
src/core/config.h Normal file
View File

@@ -0,0 +1,8 @@
#pragma once
// Centralized engine configuration flags
#ifdef NDEBUG
inline constexpr bool kUseValidationLayers = false;
#else
inline constexpr bool kUseValidationLayers = true;
#endif

View File

@@ -0,0 +1,13 @@
#include "engine_context.h"
#include "scene/vk_scene.h"
const GPUSceneData &EngineContext::getSceneData() const
{
return scene->getSceneData();
}
const DrawContext &EngineContext::getMainDrawContext() const
{
return const_cast<SceneManager *>(scene)->getMainDrawContext();
}

77
src/core/engine_context.h Normal file
View File

@@ -0,0 +1,77 @@
#pragma once
#include <memory>
#include <core/vk_types.h>
#include <core/vk_descriptors.h>
// Avoid including vk_scene.h here to prevent cycles
struct EngineStats
{
float frametime;
int triangle_count;
int drawcall_count;
float scene_update_time;
float mesh_draw_time;
};
class DeviceManager;
class ResourceManager;
class SwapchainManager;
class DescriptorManager;
class SamplerManager;
class SceneManager;
class MeshAsset;
struct DrawContext;
struct GPUSceneData;
class ComputeManager;
class PipelineManager;
struct FrameResources;
struct SDL_Window;
class AssetManager;
class RenderGraph;
class EngineContext
{
public:
// Owned shared resources
std::shared_ptr<DeviceManager> device;
std::shared_ptr<ResourceManager> resources;
std::shared_ptr<DescriptorAllocatorGrowable> descriptors;
// Non-owning pointers to global managers owned by VulkanEngine
SwapchainManager* swapchain = nullptr;
DescriptorManager* descriptorLayouts = nullptr;
SamplerManager* samplers = nullptr;
SceneManager* scene = nullptr;
// Per-frame and subsystem pointers for modules to use without VulkanEngine
FrameResources* currentFrame = nullptr; // set by engine each frame
EngineStats* stats = nullptr; // points to engine stats
ComputeManager* compute = nullptr; // compute subsystem
PipelineManager* pipelines = nullptr; // graphics pipeline manager
RenderGraph* renderGraph = nullptr; // render graph (built per-frame)
SDL_Window* window = nullptr; // SDL window handle
// Frequently used values
VkExtent2D drawExtent{};
// Optional convenience content pointers (moved to AssetManager for meshes)
// Assets
AssetManager* assets = nullptr; // non-owning pointer to central AssetManager
// Accessors
DeviceManager *getDevice() const { return device.get(); }
ResourceManager *getResources() const { return resources.get(); }
DescriptorAllocatorGrowable *getDescriptors() const { return descriptors.get(); }
SwapchainManager* getSwapchain() const { return swapchain; }
DescriptorManager* getDescriptorLayouts() const { return descriptorLayouts; }
SamplerManager* getSamplers() const { return samplers; }
const GPUSceneData& getSceneData() const;
const DrawContext& getMainDrawContext() const;
VkExtent2D getDrawExtent() const { return drawExtent; }
AssetManager* getAssets() const { return assets; }
// Convenience alias (singular) requested
AssetManager* getAsset() const { return assets; }
RenderGraph* getRenderGraph() const { return renderGraph; }
};

View File

@@ -0,0 +1,53 @@
#include "frame_resources.h"
#include <span>
#include "vk_descriptors.h"
#include "vk_device.h"
#include "vk_initializers.h"
#include "vk_types.h"
void FrameResources::init(DeviceManager *deviceManager,
std::span<DescriptorAllocatorGrowable::PoolSizeRatio> framePoolSizes)
{
VkCommandPoolCreateInfo commandPoolInfo = vkinit::command_pool_create_info(
deviceManager->graphicsQueueFamily(), VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT);
VK_CHECK(vkCreateCommandPool(deviceManager->device(), &commandPoolInfo, nullptr, &_commandPool));
VkCommandBufferAllocateInfo cmdAllocInfo = vkinit::command_buffer_allocate_info(_commandPool, 1);
VK_CHECK(vkAllocateCommandBuffers(deviceManager->device(), &cmdAllocInfo, &_mainCommandBuffer));
VkFenceCreateInfo fenceCreateInfo = vkinit::fence_create_info(VK_FENCE_CREATE_SIGNALED_BIT);
VkSemaphoreCreateInfo semaphoreCreateInfo = vkinit::semaphore_create_info();
VK_CHECK(vkCreateFence(deviceManager->device(), &fenceCreateInfo, nullptr, &_renderFence));
VK_CHECK(vkCreateSemaphore(deviceManager->device(), &semaphoreCreateInfo, nullptr, &_swapchainSemaphore));
VK_CHECK(vkCreateSemaphore(deviceManager->device(), &semaphoreCreateInfo, nullptr, &_renderSemaphore));
_frameDescriptors.init(deviceManager->device(), 1000, framePoolSizes);
}
void FrameResources::cleanup(DeviceManager *deviceManager)
{
_frameDescriptors.destroy_pools(deviceManager->device());
if (_commandPool)
{
vkDestroyCommandPool(deviceManager->device(), _commandPool, nullptr);
_commandPool = VK_NULL_HANDLE;
}
if (_renderFence)
{
vkDestroyFence(deviceManager->device(), _renderFence, nullptr);
_renderFence = VK_NULL_HANDLE;
}
if (_renderSemaphore)
{
vkDestroySemaphore(deviceManager->device(), _renderSemaphore, nullptr);
_renderSemaphore = VK_NULL_HANDLE;
}
if (_swapchainSemaphore)
{
vkDestroySemaphore(deviceManager->device(), _swapchainSemaphore, nullptr);
_swapchainSemaphore = VK_NULL_HANDLE;
}
}

View File

@@ -0,0 +1,24 @@
#pragma once
#include <core/vk_types.h>
#include <core/vk_descriptors.h>
class DeviceManager;
struct FrameResources
{
VkSemaphore _swapchainSemaphore = VK_NULL_HANDLE;
VkSemaphore _renderSemaphore = VK_NULL_HANDLE;
VkFence _renderFence = VK_NULL_HANDLE;
VkCommandPool _commandPool = VK_NULL_HANDLE;
VkCommandBuffer _mainCommandBuffer = VK_NULL_HANDLE;
DeletionQueue _deletionQueue;
DescriptorAllocatorGrowable _frameDescriptors;
void init(DeviceManager *deviceManager,
std::span<DescriptorAllocatorGrowable::PoolSizeRatio> framePoolSizes);
void cleanup(DeviceManager *deviceManager);
};

53
src/core/vk_debug.cpp Normal file
View File

@@ -0,0 +1,53 @@
#include <core/vk_debug.h>
#include <cstring>
namespace vkdebug {
static inline PFN_vkCmdBeginDebugUtilsLabelEXT get_begin_fn(VkDevice device)
{
static PFN_vkCmdBeginDebugUtilsLabelEXT fn = nullptr;
static VkDevice cached = VK_NULL_HANDLE;
if (device != cached)
{
cached = device;
fn = reinterpret_cast<PFN_vkCmdBeginDebugUtilsLabelEXT>(
vkGetDeviceProcAddr(device, "vkCmdBeginDebugUtilsLabelEXT"));
}
return fn;
}
static inline PFN_vkCmdEndDebugUtilsLabelEXT get_end_fn(VkDevice device)
{
static PFN_vkCmdEndDebugUtilsLabelEXT fn = nullptr;
static VkDevice cached = VK_NULL_HANDLE;
if (device != cached)
{
cached = device;
fn = reinterpret_cast<PFN_vkCmdEndDebugUtilsLabelEXT>(
vkGetDeviceProcAddr(device, "vkCmdEndDebugUtilsLabelEXT"));
}
return fn;
}
void cmd_begin_label(VkDevice device, VkCommandBuffer cmd, const char* name,
float r, float g, float b, float a)
{
auto fn = get_begin_fn(device);
if (!fn) return;
VkDebugUtilsLabelEXT label{};
label.sType = VK_STRUCTURE_TYPE_DEBUG_UTILS_LABEL_EXT;
label.pLabelName = name;
label.color[0] = r; label.color[1] = g; label.color[2] = b; label.color[3] = a;
fn(cmd, &label);
}
void cmd_end_label(VkDevice device, VkCommandBuffer cmd)
{
auto fn = get_end_fn(device);
if (!fn) return;
fn(cmd);
}
} // namespace vkdebug

14
src/core/vk_debug.h Normal file
View File

@@ -0,0 +1,14 @@
#pragma once
#include <core/vk_types.h>
namespace vkdebug
{
// Begin a debug label on a command buffer if VK_EXT_debug_utils is available.
void cmd_begin_label(VkDevice device, VkCommandBuffer cmd, const char *name,
float r = 0.2f, float g = 0.6f, float b = 0.9f, float a = 1.0f);
// End a debug label on a command buffer if VK_EXT_debug_utils is available.
void cmd_end_label(VkDevice device, VkCommandBuffer cmd);
}

View File

@@ -0,0 +1,35 @@
#include "vk_descriptor_manager.h"
#include "vk_device.h"
#include "vk_descriptors.h"
void DescriptorManager::init(DeviceManager *deviceManager)
{
_deviceManager = deviceManager;
{
DescriptorLayoutBuilder builder;
builder.add_binding(0, VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER);
_singleImageDescriptorLayout = builder.build(_deviceManager->device(), VK_SHADER_STAGE_FRAGMENT_BIT);
} {
DescriptorLayoutBuilder builder;
builder.add_binding(0, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER);
_gpuSceneDataDescriptorLayout = builder.build(
_deviceManager->device(), VK_SHADER_STAGE_VERTEX_BIT | VK_SHADER_STAGE_FRAGMENT_BIT);
}
}
void DescriptorManager::cleanup()
{
if (!_deviceManager) return;
if (_singleImageDescriptorLayout)
{
vkDestroyDescriptorSetLayout(_deviceManager->device(), _singleImageDescriptorLayout, nullptr);
_singleImageDescriptorLayout = VK_NULL_HANDLE;
}
if (_gpuSceneDataDescriptorLayout)
{
vkDestroyDescriptorSetLayout(_deviceManager->device(), _gpuSceneDataDescriptorLayout, nullptr);
_gpuSceneDataDescriptorLayout = VK_NULL_HANDLE;
}
}

View File

@@ -0,0 +1,24 @@
#pragma once
#include <core/vk_types.h>
#include <core/vk_descriptors.h>
#include "vk_device.h"
class DeviceManager;
class DescriptorManager
{
public:
void init(DeviceManager *deviceManager);
void cleanup();
VkDescriptorSetLayout gpuSceneDataLayout() const { return _gpuSceneDataDescriptorLayout; }
VkDescriptorSetLayout singleImageLayout() const { return _singleImageDescriptorLayout; }
private:
DeviceManager *_deviceManager = nullptr;
VkDescriptorSetLayout _singleImageDescriptorLayout = VK_NULL_HANDLE;
VkDescriptorSetLayout _gpuSceneDataDescriptorLayout = VK_NULL_HANDLE;
};

257
src/core/vk_descriptors.cpp Normal file
View File

@@ -0,0 +1,257 @@
#include <core/vk_descriptors.h>
void DescriptorLayoutBuilder::add_binding(uint32_t binding, VkDescriptorType type)
{
VkDescriptorSetLayoutBinding newbind{};
newbind.binding = binding;
newbind.descriptorCount = 1;
newbind.descriptorType = type;
bindings.push_back(newbind);
}
void DescriptorLayoutBuilder::clear()
{
bindings.clear();
}
VkDescriptorSetLayout DescriptorLayoutBuilder::build(VkDevice device, VkShaderStageFlags shaderStages, void *pNext,
VkDescriptorSetLayoutCreateFlags flags)
{
for (auto &b: bindings)
{
b.stageFlags |= shaderStages;
}
VkDescriptorSetLayoutCreateInfo info = {.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO};
info.pNext = pNext;
info.pBindings = bindings.data();
info.bindingCount = (uint32_t) bindings.size();
info.flags = flags;
VkDescriptorSetLayout set;
VK_CHECK(vkCreateDescriptorSetLayout(device, &info, nullptr, &set));
return set;
}
void DescriptorWriter::write_buffer(int binding, VkBuffer buffer, size_t size, size_t offset, VkDescriptorType type)
{
VkDescriptorBufferInfo &info = bufferInfos.emplace_back(VkDescriptorBufferInfo{
.buffer = buffer,
.offset = offset,
.range = size
});
VkWriteDescriptorSet write = {.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET};
write.dstBinding = binding;
write.dstSet = VK_NULL_HANDLE; //left empty for now until we need to write it
write.descriptorCount = 1;
write.descriptorType = type;
write.pBufferInfo = &info;
writes.push_back(write);
}
void DescriptorWriter::write_image(int binding, VkImageView image, VkSampler sampler, VkImageLayout layout,
VkDescriptorType type)
{
VkDescriptorImageInfo &info = imageInfos.emplace_back(VkDescriptorImageInfo{
.sampler = sampler,
.imageView = image,
.imageLayout = layout
});
VkWriteDescriptorSet write = {.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET};
write.dstBinding = binding;
write.dstSet = VK_NULL_HANDLE; //left empty for now until we need to write it
write.descriptorCount = 1;
write.descriptorType = type;
write.pImageInfo = &info;
writes.push_back(write);
}
void DescriptorWriter::clear()
{
imageInfos.clear();
writes.clear();
bufferInfos.clear();
}
void DescriptorWriter::update_set(VkDevice device, VkDescriptorSet set)
{
for (VkWriteDescriptorSet& write : writes) {
write.dstSet = set;
}
vkUpdateDescriptorSets(device, (uint32_t)writes.size(), writes.data(), 0, nullptr);
}
void DescriptorAllocator::init_pool(VkDevice device, uint32_t maxSets, std::span<PoolSizeRatio> poolRatios)
{
std::vector<VkDescriptorPoolSize> poolSizes;
for (PoolSizeRatio ratio: poolRatios)
{
poolSizes.push_back(VkDescriptorPoolSize{
.type = ratio.type,
.descriptorCount = uint32_t(ratio.ratio * maxSets)
});
}
VkDescriptorPoolCreateInfo pool_info = {.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO};
pool_info.flags = 0;
pool_info.maxSets = maxSets;
pool_info.poolSizeCount = (uint32_t) poolSizes.size();
pool_info.pPoolSizes = poolSizes.data();
vkCreateDescriptorPool(device, &pool_info, nullptr, &pool);
}
void DescriptorAllocator::clear_descriptors(VkDevice device)
{
vkResetDescriptorPool(device, pool, 0);
}
void DescriptorAllocator::destroy_pool(VkDevice device)
{
vkDestroyDescriptorPool(device, pool, nullptr);
}
VkDescriptorSet DescriptorAllocator::allocate(VkDevice device, VkDescriptorSetLayout layout)
{
VkDescriptorSetAllocateInfo allocInfo = {.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO};
allocInfo.pNext = nullptr;
allocInfo.descriptorPool = pool;
allocInfo.descriptorSetCount = 1;
allocInfo.pSetLayouts = &layout;
VkDescriptorSet ds;
VK_CHECK(vkAllocateDescriptorSets(device, &allocInfo, &ds));
return ds;
}
VkDescriptorPool DescriptorAllocatorGrowable::get_pool(VkDevice device)
{
VkDescriptorPool newPool;
if (readyPools.size() != 0)
{
newPool = readyPools.back();
readyPools.pop_back();
}
else
{
//need to create a new pool
newPool = create_pool(device, setsPerPool, ratios);
setsPerPool = setsPerPool * 1.5;
if (setsPerPool > 4092)
{
setsPerPool = 4092;
}
}
return newPool;
}
VkDescriptorPool DescriptorAllocatorGrowable::create_pool(VkDevice device, uint32_t setCount,
std::span<PoolSizeRatio> poolRatios)
{
std::vector<VkDescriptorPoolSize> poolSizes;
for (PoolSizeRatio ratio: poolRatios)
{
poolSizes.push_back(VkDescriptorPoolSize{
.type = ratio.type,
.descriptorCount = uint32_t(ratio.ratio * setCount)
});
}
VkDescriptorPoolCreateInfo pool_info = {};
pool_info.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO;
pool_info.flags = 0;
pool_info.maxSets = setCount;
pool_info.poolSizeCount = (uint32_t) poolSizes.size();
pool_info.pPoolSizes = poolSizes.data();
VkDescriptorPool newPool;
vkCreateDescriptorPool(device, &pool_info, nullptr, &newPool);
return newPool;
}
void DescriptorAllocatorGrowable::init(VkDevice device, uint32_t maxSets, std::span<PoolSizeRatio> poolRatios)
{
ratios.clear();
for (auto r: poolRatios)
{
ratios.push_back(r);
}
VkDescriptorPool newPool = create_pool(device, maxSets, poolRatios);
setsPerPool = maxSets * 1.5; //grow it next allocation
readyPools.push_back(newPool);
}
void DescriptorAllocatorGrowable::clear_pools(VkDevice device)
{
for (auto p: readyPools)
{
vkResetDescriptorPool(device, p, 0);
}
for (auto p: fullPools)
{
vkResetDescriptorPool(device, p, 0);
readyPools.push_back(p);
}
fullPools.clear();
}
void DescriptorAllocatorGrowable::destroy_pools(VkDevice device)
{
for (auto p: readyPools)
{
vkDestroyDescriptorPool(device, p, nullptr);
}
readyPools.clear();
for (auto p: fullPools)
{
vkDestroyDescriptorPool(device, p, nullptr);
}
fullPools.clear();
}
VkDescriptorSet DescriptorAllocatorGrowable::allocate(VkDevice device, VkDescriptorSetLayout layout, void *pNext)
{
//get or create a pool to allocate from
VkDescriptorPool poolToUse = get_pool(device);
VkDescriptorSetAllocateInfo allocInfo = {};
allocInfo.pNext = pNext;
allocInfo.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO;
allocInfo.descriptorPool = poolToUse;
allocInfo.descriptorSetCount = 1;
allocInfo.pSetLayouts = &layout;
VkDescriptorSet ds;
VkResult result = vkAllocateDescriptorSets(device, &allocInfo, &ds);
//allocation failed. Try again
if (result == VK_ERROR_OUT_OF_POOL_MEMORY || result == VK_ERROR_FRAGMENTED_POOL)
{
fullPools.push_back(poolToUse);
poolToUse = get_pool(device);
allocInfo.descriptorPool = poolToUse;
VK_CHECK(vkAllocateDescriptorSets(device, &allocInfo, &ds));
}
readyPools.push_back(poolToUse);
return ds;
}

79
src/core/vk_descriptors.h Normal file
View File

@@ -0,0 +1,79 @@
#pragma once
#include <core/vk_types.h>
struct DescriptorLayoutBuilder
{
std::vector<VkDescriptorSetLayoutBinding> bindings;
void add_binding(uint32_t binding, VkDescriptorType type);
void clear();
VkDescriptorSetLayout build(VkDevice device, VkShaderStageFlags shaderStages, void *pNext = nullptr,
VkDescriptorSetLayoutCreateFlags flags = 0);
};
struct DescriptorWriter
{
std::deque<VkDescriptorImageInfo> imageInfos;
std::deque<VkDescriptorBufferInfo> bufferInfos;
std::vector<VkWriteDescriptorSet> writes;
void write_image(int binding, VkImageView image, VkSampler sampler, VkImageLayout layout, VkDescriptorType type);
void write_buffer(int binding, VkBuffer buffer, size_t size, size_t offset, VkDescriptorType type);
void clear();
void update_set(VkDevice device, VkDescriptorSet set);
};
struct DescriptorAllocator
{
struct PoolSizeRatio
{
VkDescriptorType type;
float ratio;
};
VkDescriptorPool pool;
void init_pool(VkDevice device, uint32_t maxSets, std::span<PoolSizeRatio> poolRatios);
void clear_descriptors(VkDevice device);
void destroy_pool(VkDevice device);
VkDescriptorSet allocate(VkDevice device, VkDescriptorSetLayout layout);
};
struct DescriptorAllocatorGrowable
{
public:
struct PoolSizeRatio
{
VkDescriptorType type;
float ratio;
};
void init(VkDevice device, uint32_t initialSets, std::span<PoolSizeRatio> poolRatios);
void clear_pools(VkDevice device);
void destroy_pools(VkDevice device);
VkDescriptorSet allocate(VkDevice device, VkDescriptorSetLayout layout, void *pNext = nullptr);
private:
VkDescriptorPool get_pool(VkDevice device);
VkDescriptorPool create_pool(VkDevice device, uint32_t setCount, std::span<PoolSizeRatio> poolRatios);
std::vector<PoolSizeRatio> ratios;
std::vector<VkDescriptorPool> fullPools;
std::vector<VkDescriptorPool> readyPools;
uint32_t setsPerPool;
};

83
src/core/vk_device.cpp Normal file
View File

@@ -0,0 +1,83 @@
#include "vk_device.h"
#include "config.h"
#include "SDL2/SDL.h"
#include "SDL2/SDL_vulkan.h"
void DeviceManager::init_vulkan(SDL_Window *window)
{
vkb::InstanceBuilder builder;
//make the vulkan instance, with basic debug features
auto inst_ret = builder.set_app_name("Example Vulkan Application")
.request_validation_layers(kUseValidationLayers)
.use_default_debug_messenger()
.require_api_version(1, 3, 0)
.build();
vkb::Instance vkb_inst = inst_ret.value();
//grab the instance
_instance = vkb_inst.instance;
_debug_messenger = vkb_inst.debug_messenger;
SDL_Vulkan_CreateSurface(window, _instance, &_surface);
VkPhysicalDeviceVulkan13Features features{.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_3_FEATURES};
features.dynamicRendering = true;
features.synchronization2 = true;
VkPhysicalDeviceVulkan12Features features12{.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_2_FEATURES};
features12.bufferDeviceAddress = true;
features12.descriptorIndexing = true;
//use vkbootstrap to select a gpu.
//We want a gpu that can write to the SDL surface and supports vulkan 1.2
vkb::PhysicalDeviceSelector selector{vkb_inst};
vkb::PhysicalDevice physicalDevice = selector
.set_minimum_version(1, 3)
.set_required_features_13(features)
.set_required_features_12(features12)
.set_surface(_surface)
.select()
.value();
//physicalDevice.features.
//create the final vulkan device
vkb::DeviceBuilder deviceBuilder{physicalDevice};
vkb::Device vkbDevice = deviceBuilder.build().value();
// Get the VkDevice handle used in the rest of a vulkan application
_device = vkbDevice.device;
_chosenGPU = physicalDevice.physical_device;
// use vkbootstrap to get a Graphics queue
_graphicsQueue = vkbDevice.get_queue(vkb::QueueType::graphics).value();
_graphicsQueueFamily = vkbDevice.get_queue_index(vkb::QueueType::graphics).value();
//> vma_init
//initialize the memory allocator
VmaAllocatorCreateInfo allocatorInfo = {};
allocatorInfo.physicalDevice = _chosenGPU;
allocatorInfo.device = _device;
allocatorInfo.instance = _instance;
allocatorInfo.flags = VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT;
vmaCreateAllocator(&allocatorInfo, &_allocator);
_deletionQueue.push_function([&]() {
vmaDestroyAllocator(_allocator);
});
//< vma_init
}
void DeviceManager::cleanup()
{
vkDestroySurfaceKHR(_instance, _surface, nullptr);
_deletionQueue.flush();
vkDestroyDevice(_device, nullptr);
vkb::destroy_debug_utils_messenger(_instance, _debug_messenger);
vkDestroyInstance(_instance, nullptr);
fmt::print("DeviceManager::cleanup()\n");
}

33
src/core/vk_device.h Normal file
View File

@@ -0,0 +1,33 @@
#pragma once
#include <core/vk_types.h>
#include "VkBootstrap.h"
class DeviceManager
{
public:
void init_vulkan(struct SDL_Window *window);
void cleanup();
VkDevice device() const { return _device; }
VkInstance instance() const { return _instance; }
VkPhysicalDevice physicalDevice() const { return _chosenGPU; }
VkSurfaceKHR surface() const { return _surface; }
VkQueue graphicsQueue() const { return _graphicsQueue; }
uint32_t graphicsQueueFamily() const { return _graphicsQueueFamily; }
VmaAllocator allocator() const { return _allocator; }
VkDebugUtilsMessengerEXT debugMessenger() { return _debug_messenger; }
private:
VkInstance _instance = nullptr;
VkDebugUtilsMessengerEXT _debug_messenger = nullptr;
VkPhysicalDevice _chosenGPU = nullptr;
VkDevice _device = nullptr;
VkSurfaceKHR _surface = nullptr;
VkQueue _graphicsQueue = nullptr;
uint32_t _graphicsQueueFamily = 0;
VmaAllocator _allocator = nullptr;
DeletionQueue _deletionQueue;
};

741
src/core/vk_engine.cpp Normal file
View File

@@ -0,0 +1,741 @@
//> includes
#include "vk_engine.h"
#include <core/vk_images.h>
#include "SDL2/SDL.h"
#include "SDL2/SDL_vulkan.h"
#include <core/vk_initializers.h>
#include <core/vk_types.h>
#include "VkBootstrap.h"
#include <chrono>
#include <thread>
#include "render/vk_pipelines.h"
#include <iostream>
#include <glm/gtx/transform.hpp>
#include "render/primitives.h"
#include "vk_mem_alloc.h"
#include "imgui.h"
#include "imgui_impl_sdl2.h"
#include "imgui_impl_vulkan.h"
#include "render/vk_renderpass_geometry.h"
#include "render/vk_renderpass_imgui.h"
#include "render/vk_renderpass_lighting.h"
#include "render/vk_renderpass_transparent.h"
#include "render/vk_renderpass_tonemap.h"
#include "render/vk_renderpass_shadow.h"
#include "vk_resource.h"
#include "engine_context.h"
#include "core/vk_pipeline_manager.h"
VulkanEngine *loadedEngine = nullptr;
void VulkanEngine::init()
{
// We initialize SDL and create a window with it.
SDL_Init(SDL_INIT_VIDEO);
constexpr auto window_flags = static_cast<SDL_WindowFlags>(SDL_WINDOW_VULKAN | SDL_WINDOW_RESIZABLE);
_swapchainManager = std::make_unique<SwapchainManager>();
_window = SDL_CreateWindow(
"Vulkan Engine",
SDL_WINDOWPOS_UNDEFINED,
SDL_WINDOWPOS_UNDEFINED,
_swapchainManager->windowExtent().width,
_swapchainManager->windowExtent().height,
window_flags
);
_deviceManager = std::make_shared<DeviceManager>();
_deviceManager->init_vulkan(_window);
_resourceManager = std::make_shared<ResourceManager>();
_resourceManager->init(_deviceManager.get());
_descriptorManager = std::make_unique<DescriptorManager>();
_descriptorManager->init(_deviceManager.get());
_samplerManager = std::make_unique<SamplerManager>();
_samplerManager->init(_deviceManager.get());
// Build dependency-injection context
_context = std::make_shared<EngineContext>();
_context->device = _deviceManager;
_context->resources = _resourceManager;
_context->descriptors = std::make_shared<DescriptorAllocatorGrowable>(); {
std::vector<DescriptorAllocatorGrowable::PoolSizeRatio> sizes = {
{VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, 1},
{VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1},
{VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, 4},
};
_context->descriptors->init(_deviceManager->device(), 10, sizes);
}
_swapchainManager->init(_deviceManager.get(), _resourceManager.get());
_swapchainManager->init_swapchain();
// Fill remaining context pointers now that managers exist
_context->descriptorLayouts = _descriptorManager.get();
_context->samplers = _samplerManager.get();
_context->swapchain = _swapchainManager.get();
// Create graphics pipeline manager (after swapchain is ready)
_pipelineManager = std::make_unique<PipelineManager>();
_pipelineManager->init(_context.get());
_context->pipelines = _pipelineManager.get();
// Create central AssetManager for paths and asset caching
_assetManager = std::make_unique<AssetManager>();
_assetManager->init(this);
_context->assets = _assetManager.get();
_sceneManager = std::make_unique<SceneManager>();
_sceneManager->init(_context.get());
_context->scene = _sceneManager.get();
compute.init(_context.get());
// Publish engine-owned subsystems into context for modules
_context->compute = &compute;
_context->window = _window;
_context->stats = &stats;
// Render graph skeleton
_renderGraph = std::make_unique<RenderGraph>();
_renderGraph->init(_context.get());
_context->renderGraph = _renderGraph.get();
init_frame_resources();
// Build material pipelines early so materials can be created
metalRoughMaterial.build_pipelines(this);
init_default_data();
_renderPassManager = std::make_unique<RenderPassManager>();
_renderPassManager->init(_context.get());
auto imguiPass = std::make_unique<ImGuiPass>();
_renderPassManager->setImGuiPass(std::move(imguiPass));
const std::string structurePath = _assetManager->modelPath("seoul_high.glb");
const auto structureFile = _assetManager->loadGLTF(structurePath);
assert(structureFile.has_value());
_sceneManager->loadScene("structure", *structureFile);
_resourceManager->set_deferred_uploads(true);
//everything went fine
_isInitialized = true;
}
void VulkanEngine::init_default_data()
{
//> default_img
//3 default textures, white, grey, black. 1 pixel each
uint32_t white = glm::packUnorm4x8(glm::vec4(1, 1, 1, 1));
_whiteImage = _resourceManager->create_image((void *) &white, VkExtent3D{1, 1, 1}, VK_FORMAT_R8G8B8A8_UNORM,
VK_IMAGE_USAGE_SAMPLED_BIT);
uint32_t grey = glm::packUnorm4x8(glm::vec4(0.66f, 0.66f, 0.66f, 1));
_greyImage = _resourceManager->create_image((void *) &grey, VkExtent3D{1, 1, 1}, VK_FORMAT_R8G8B8A8_UNORM,
VK_IMAGE_USAGE_SAMPLED_BIT);
uint32_t black = glm::packUnorm4x8(glm::vec4(0, 0, 0, 0));
_blackImage = _resourceManager->create_image((void *) &black, VkExtent3D{1, 1, 1}, VK_FORMAT_R8G8B8A8_UNORM,
VK_IMAGE_USAGE_SAMPLED_BIT);
//checkerboard image
uint32_t magenta = glm::packUnorm4x8(glm::vec4(1, 0, 1, 1));
std::array<uint32_t, 16 * 16> pixels{}; //for 16x16 checkerboard texture
for (int x = 0; x < 16; x++)
{
for (int y = 0; y < 16; y++)
{
pixels[y * 16 + x] = ((x % 2) ^ (y % 2)) ? magenta : black;
}
}
_errorCheckerboardImage = _resourceManager->create_image(pixels.data(), VkExtent3D{16, 16, 1},
VK_FORMAT_R8G8B8A8_UNORM,
VK_IMAGE_USAGE_SAMPLED_BIT);
// build default primitive meshes via generic AssetManager API
{
AssetManager::MeshCreateInfo ci{};
ci.name = "Cube";
ci.geometry.type = AssetManager::MeshGeometryDesc::Type::Cube;
ci.material.kind = AssetManager::MeshMaterialDesc::Kind::Default;
cubeMesh = _assetManager->createMesh(ci);
}
{
AssetManager::MeshCreateInfo ci{};
ci.name = "Sphere";
ci.geometry.type = AssetManager::MeshGeometryDesc::Type::Sphere;
ci.geometry.sectors = 16;
ci.geometry.stacks = 16;
ci.material.kind = AssetManager::MeshMaterialDesc::Kind::Default;
sphereMesh = _assetManager->createMesh(ci);
}
// Register default primitives as dynamic scene instances
if (_sceneManager)
{
_sceneManager->addMeshInstance("default.cube", cubeMesh,
glm::translate(glm::mat4(1.f), glm::vec3(-2.f, 0.f, -2.f)));
_sceneManager->addMeshInstance("default.sphere", sphereMesh,
glm::translate(glm::mat4(1.f), glm::vec3(2.f, 0.f, -2.f)));
}
_mainDeletionQueue.push_function([&]() {
_resourceManager->destroy_image(_whiteImage);
_resourceManager->destroy_image(_greyImage);
_resourceManager->destroy_image(_blackImage);
_resourceManager->destroy_image(_errorCheckerboardImage);
});
//< default_img
}
void VulkanEngine::cleanup()
{
vkDeviceWaitIdle(_deviceManager->device());
_sceneManager->cleanup();
if (_isInitialized)
{
//make sure the gpu has stopped doing its things
vkDeviceWaitIdle(_deviceManager->device());
// Flush all frame deletion queues first while VMA allocator is still alive
for (int i = 0; i < FRAME_OVERLAP; i++)
{
_frames[i]._deletionQueue.flush();
}
for (int i = 0; i < FRAME_OVERLAP; i++)
{
_frames[i].cleanup(_deviceManager.get());
}
metalRoughMaterial.clear_resources(_deviceManager->device());
_mainDeletionQueue.flush();
_renderPassManager->cleanup();
_pipelineManager->cleanup();
compute.cleanup();
_swapchainManager->cleanup();
if (_assetManager) _assetManager->cleanup();
_resourceManager->cleanup();
_samplerManager->cleanup();
_descriptorManager->cleanup();
_context->descriptors->destroy_pools(_deviceManager->device());
_deviceManager->cleanup();
SDL_DestroyWindow(_window);
}
}
void VulkanEngine::draw()
{
_sceneManager->update_scene();
//> frame_clear
//wait until the gpu has finished rendering the last frame. Timeout of 1 second
VK_CHECK(vkWaitForFences(_deviceManager->device(), 1, &get_current_frame()._renderFence, true, 1000000000));
get_current_frame()._deletionQueue.flush();
get_current_frame()._frameDescriptors.clear_pools(_deviceManager->device());
//< frame_clear
uint32_t swapchainImageIndex;
VkResult e = vkAcquireNextImageKHR(_deviceManager->device(), _swapchainManager->swapchain(), 1000000000,
get_current_frame()._swapchainSemaphore,
nullptr, &swapchainImageIndex);
if (e == VK_ERROR_OUT_OF_DATE_KHR)
{
resize_requested = true;
return;
}
_drawExtent.height = std::min(_swapchainManager->swapchainExtent().height,
_swapchainManager->drawImage().imageExtent.height) * renderScale;
_drawExtent.width = std::min(_swapchainManager->swapchainExtent().width,
_swapchainManager->drawImage().imageExtent.width) * renderScale;
VK_CHECK(vkResetFences(_deviceManager->device(), 1, &get_current_frame()._renderFence));
//now that we are sure that the commands finished executing, we can safely reset the command buffer to begin recording again.
VK_CHECK(vkResetCommandBuffer(get_current_frame()._mainCommandBuffer, 0));
//naming it cmd for shorter writing
VkCommandBuffer cmd = get_current_frame()._mainCommandBuffer;
//begin the command buffer recording. We will use this command buffer exactly once, so we want to let vulkan know that
VkCommandBufferBeginInfo cmdBeginInfo = vkinit::command_buffer_begin_info(
VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT);
//---------------------------
VK_CHECK(vkBeginCommandBuffer(cmd, &cmdBeginInfo));
// publish per-frame pointers and draw extent to context for passes
_context->currentFrame = &get_current_frame();
_context->drawExtent = _drawExtent;
// Optional: check for shader changes and hot-reload pipelines
if (_pipelineManager)
{
_pipelineManager->hotReloadChanged();
}
// --- RenderGraph frame build ---
if (_renderGraph)
{
_renderGraph->clear();
RGImageHandle hDraw = _renderGraph->import_draw_image();
RGImageHandle hDepth = _renderGraph->import_depth_image();
RGImageHandle hGBufferPosition = _renderGraph->import_gbuffer_position();
RGImageHandle hGBufferNormal = _renderGraph->import_gbuffer_normal();
RGImageHandle hGBufferAlbedo = _renderGraph->import_gbuffer_albedo();
RGImageHandle hSwapchain = _renderGraph->import_swapchain_image(swapchainImageIndex);
// Create a transient shadow depth target (fixed resolution for now)
const VkExtent2D shadowExtent{2048, 2048};
RGImageHandle hShadow = _renderGraph->create_depth_image("shadow.depth", shadowExtent, VK_FORMAT_D32_SFLOAT);
_resourceManager->register_upload_pass(*_renderGraph, get_current_frame());
ImGuiPass *imguiPass = nullptr;
RGImageHandle finalColor = hDraw; // by default, present HDR draw directly (copy)
if (_renderPassManager)
{
if (auto *background = _renderPassManager->getPass<BackgroundPass>())
{
background->register_graph(_renderGraph.get(), hDraw, hDepth);
}
if (auto *shadow = _renderPassManager->getPass<ShadowPass>())
{
shadow->register_graph(_renderGraph.get(), hShadow, shadowExtent);
}
if (auto *geometry = _renderPassManager->getPass<GeometryPass>())
{
geometry->register_graph(_renderGraph.get(), hGBufferPosition, hGBufferNormal, hGBufferAlbedo, hDepth);
}
if (auto *lighting = _renderPassManager->getPass<LightingPass>())
{
lighting->register_graph(_renderGraph.get(), hDraw, hGBufferPosition, hGBufferNormal, hGBufferAlbedo, hShadow);
}
if (auto *transparent = _renderPassManager->getPass<TransparentPass>())
{
transparent->register_graph(_renderGraph.get(), hDraw, hDepth);
}
imguiPass = _renderPassManager->getImGuiPass();
// Optional Tonemap pass: sample HDR draw -> LDR intermediate
if (auto *tonemap = _renderPassManager->getPass<TonemapPass>())
{
finalColor = tonemap->register_graph(_renderGraph.get(), hDraw);
}
}
auto appendPresentExtras = [imguiPass, hSwapchain](RenderGraph &graph)
{
if (imguiPass)
{
imguiPass->register_graph(&graph, hSwapchain);
}
};
_renderGraph->add_present_chain(finalColor, hSwapchain, appendPresentExtras);
// Apply persistent pass enable overrides
for (size_t i = 0; i < _renderGraph->pass_count(); ++i)
{
const char* name = _renderGraph->pass_name(i);
auto it = _rgPassToggles.find(name);
if (it != _rgPassToggles.end())
{
_renderGraph->set_pass_enabled(i, it->second);
}
}
if (_renderGraph->compile())
{
_renderGraph->execute(cmd);
}
}
VK_CHECK(vkEndCommandBuffer(cmd));
VkCommandBufferSubmitInfo cmdinfo = vkinit::command_buffer_submit_info(cmd);
VkSemaphoreSubmitInfo waitInfo = vkinit::semaphore_submit_info(VK_PIPELINE_STAGE_2_COLOR_ATTACHMENT_OUTPUT_BIT_KHR,
get_current_frame()._swapchainSemaphore);
VkSemaphoreSubmitInfo signalInfo = vkinit::semaphore_submit_info(VK_PIPELINE_STAGE_2_ALL_GRAPHICS_BIT,
get_current_frame()._renderSemaphore);
VkSubmitInfo2 submit = vkinit::submit_info(&cmdinfo, &signalInfo, &waitInfo);
VK_CHECK(vkQueueSubmit2(_deviceManager->graphicsQueue(), 1, &submit, get_current_frame()._renderFence));
VkPresentInfoKHR presentInfo = vkinit::present_info();
VkSwapchainKHR swapchain = _swapchainManager->swapchain();
presentInfo.pSwapchains = &swapchain;
presentInfo.swapchainCount = 1;
presentInfo.pWaitSemaphores = &get_current_frame()._renderSemaphore;
presentInfo.waitSemaphoreCount = 1;
presentInfo.pImageIndices = &swapchainImageIndex;
VkResult presentResult = vkQueuePresentKHR(_deviceManager->graphicsQueue(), &presentInfo);
if (presentResult == VK_ERROR_OUT_OF_DATE_KHR)
{
resize_requested = true;
}
_frameNumber++;
}
void VulkanEngine::run()
{
SDL_Event e;
bool bQuit = false;
//main loop
while (!bQuit)
{
auto start = std::chrono::system_clock::now();
//Handle events on queue
while (SDL_PollEvent(&e) != 0)
{
//close the window when user alt-f4s or clicks the X button
if (e.type == SDL_QUIT) bQuit = true;
if (e.type == SDL_WINDOWEVENT)
{
if (e.window.event == SDL_WINDOWEVENT_MINIMIZED)
{
freeze_rendering = true;
}
if (e.window.event == SDL_WINDOWEVENT_RESTORED)
{
freeze_rendering = false;
}
}
_sceneManager->getMainCamera().processSDLEvent(e);
ImGui_ImplSDL2_ProcessEvent(&e);
}
if (freeze_rendering)
{
//throttle the speed to avoid the endless spinning
std::this_thread::sleep_for(std::chrono::milliseconds(100));
continue;
}
if (resize_requested)
{
_swapchainManager->resize_swapchain(_window);
}
// imgui new frame
ImGui_ImplVulkan_NewFrame();
ImGui_ImplSDL2_NewFrame();
ImGui::NewFrame();
if (ImGui::Begin("background"))
{
auto background_pass = _renderPassManager->getPass<BackgroundPass>();
ComputeEffect &selected = background_pass->_backgroundEffects[background_pass->_currentEffect];
ImGui::Text("Selected effect: %s", selected.name);
ImGui::SliderInt("Effect Index", &background_pass->_currentEffect, 0,
background_pass->_backgroundEffects.size() - 1);
ImGui::InputFloat4("data1", reinterpret_cast<float *>(&selected.data.data1));
ImGui::InputFloat4("data2", reinterpret_cast<float *>(&selected.data.data2));
ImGui::InputFloat4("data3", reinterpret_cast<float *>(&selected.data.data3));
ImGui::InputFloat4("data4", reinterpret_cast<float *>(&selected.data.data4));
ImGui::SliderFloat("Render Scale", &renderScale, 0.3f, 1.f);
ImGui::End();
}
if (ImGui::Begin("Stats"))
{
ImGui::Text("frametime %f ms", stats.frametime);
ImGui::Text("draw time %f ms", stats.mesh_draw_time);
ImGui::Text("update time %f ms", _sceneManager->stats.scene_update_time);
ImGui::Text("triangles %i", stats.triangle_count);
ImGui::Text("draws %i", stats.drawcall_count);
ImGui::End();
}
// Render Graph debug window
if (ImGui::Begin("Render Graph"))
{
if (_renderGraph)
{
auto &graph = *_renderGraph;
std::vector<RenderGraph::RGDebugPassInfo> passInfos;
graph.debug_get_passes(passInfos);
if (ImGui::Button("Reload Pipelines")) { _pipelineManager->hotReloadChanged(); }
ImGui::SameLine();
ImGui::Text("%zu passes", passInfos.size());
if (ImGui::BeginTable("passes", 6, ImGuiTableFlags_RowBg | ImGuiTableFlags_SizingStretchProp))
{
ImGui::TableSetupColumn("Enable", ImGuiTableColumnFlags_WidthFixed, 70);
ImGui::TableSetupColumn("Name");
ImGui::TableSetupColumn("Type", ImGuiTableColumnFlags_WidthFixed, 90);
ImGui::TableSetupColumn("Imgs", ImGuiTableColumnFlags_WidthFixed, 60);
ImGui::TableSetupColumn("Bufs", ImGuiTableColumnFlags_WidthFixed, 60);
ImGui::TableSetupColumn("Attachments", ImGuiTableColumnFlags_WidthFixed, 100);
ImGui::TableHeadersRow();
auto typeName = [](RGPassType t){
switch (t) {
case RGPassType::Graphics: return "Graphics";
case RGPassType::Compute: return "Compute";
case RGPassType::Transfer: return "Transfer";
default: return "?";
}
};
for (size_t i = 0; i < passInfos.size(); ++i)
{
auto &pi = passInfos[i];
ImGui::TableNextRow();
ImGui::TableSetColumnIndex(0);
bool enabled = true;
if (auto it = _rgPassToggles.find(pi.name); it != _rgPassToggles.end()) enabled = it->second;
std::string chkId = std::string("##en") + std::to_string(i);
if (ImGui::Checkbox(chkId.c_str(), &enabled))
{
_rgPassToggles[pi.name] = enabled;
}
ImGui::TableSetColumnIndex(1);
ImGui::TextUnformatted(pi.name.c_str());
ImGui::TableSetColumnIndex(2);
ImGui::TextUnformatted(typeName(pi.type));
ImGui::TableSetColumnIndex(3);
ImGui::Text("%u/%u", pi.imageReads, pi.imageWrites);
ImGui::TableSetColumnIndex(4);
ImGui::Text("%u/%u", pi.bufferReads, pi.bufferWrites);
ImGui::TableSetColumnIndex(5);
ImGui::Text("%u%s", pi.colorAttachmentCount, pi.hasDepth ? "+D" : "");
}
ImGui::EndTable();
}
if (ImGui::CollapsingHeader("Images", ImGuiTreeNodeFlags_DefaultOpen))
{
std::vector<RenderGraph::RGDebugImageInfo> imgs;
graph.debug_get_images(imgs);
if (ImGui::BeginTable("images", 7, ImGuiTableFlags_RowBg | ImGuiTableFlags_SizingStretchProp))
{
ImGui::TableSetupColumn("Id", ImGuiTableColumnFlags_WidthFixed, 40);
ImGui::TableSetupColumn("Name");
ImGui::TableSetupColumn("Fmt", ImGuiTableColumnFlags_WidthFixed, 120);
ImGui::TableSetupColumn("Extent", ImGuiTableColumnFlags_WidthFixed, 120);
ImGui::TableSetupColumn("Imported", ImGuiTableColumnFlags_WidthFixed, 70);
ImGui::TableSetupColumn("Usage", ImGuiTableColumnFlags_WidthFixed, 80);
ImGui::TableSetupColumn("Life", ImGuiTableColumnFlags_WidthFixed, 80);
ImGui::TableHeadersRow();
for (const auto &im : imgs)
{
ImGui::TableNextRow();
ImGui::TableSetColumnIndex(0); ImGui::Text("%u", im.id);
ImGui::TableSetColumnIndex(1); ImGui::TextUnformatted(im.name.c_str());
ImGui::TableSetColumnIndex(2); ImGui::TextUnformatted(string_VkFormat(im.format));
ImGui::TableSetColumnIndex(3); ImGui::Text("%ux%u", im.extent.width, im.extent.height);
ImGui::TableSetColumnIndex(4); ImGui::TextUnformatted(im.imported ? "yes" : "no");
ImGui::TableSetColumnIndex(5); ImGui::Text("0x%x", (unsigned)im.creationUsage);
ImGui::TableSetColumnIndex(6); ImGui::Text("%d..%d", im.firstUse, im.lastUse);
}
ImGui::EndTable();
}
}
if (ImGui::CollapsingHeader("Buffers"))
{
std::vector<RenderGraph::RGDebugBufferInfo> bufs;
graph.debug_get_buffers(bufs);
if (ImGui::BeginTable("buffers", 6, ImGuiTableFlags_RowBg | ImGuiTableFlags_SizingStretchProp))
{
ImGui::TableSetupColumn("Id", ImGuiTableColumnFlags_WidthFixed, 40);
ImGui::TableSetupColumn("Name");
ImGui::TableSetupColumn("Size", ImGuiTableColumnFlags_WidthFixed, 100);
ImGui::TableSetupColumn("Imported", ImGuiTableColumnFlags_WidthFixed, 70);
ImGui::TableSetupColumn("Usage", ImGuiTableColumnFlags_WidthFixed, 100);
ImGui::TableSetupColumn("Life", ImGuiTableColumnFlags_WidthFixed, 80);
ImGui::TableHeadersRow();
for (const auto &bf : bufs)
{
ImGui::TableNextRow();
ImGui::TableSetColumnIndex(0); ImGui::Text("%u", bf.id);
ImGui::TableSetColumnIndex(1); ImGui::TextUnformatted(bf.name.c_str());
ImGui::TableSetColumnIndex(2); ImGui::Text("%zu", (size_t)bf.size);
ImGui::TableSetColumnIndex(3); ImGui::TextUnformatted(bf.imported ? "yes" : "no");
ImGui::TableSetColumnIndex(4); ImGui::Text("0x%x", (unsigned)bf.usage);
ImGui::TableSetColumnIndex(5); ImGui::Text("%d..%d", bf.firstUse, bf.lastUse);
}
ImGui::EndTable();
}
}
}
ImGui::End();
}
// Pipelines debug window (graphics)
if (ImGui::Begin("Pipelines"))
{
if (_pipelineManager)
{
std::vector<PipelineManager::GraphicsPipelineDebugInfo> pipes;
_pipelineManager->debug_get_graphics(pipes);
if (ImGui::Button("Reload Changed")) { _pipelineManager->hotReloadChanged(); }
ImGui::SameLine(); ImGui::Text("%zu graphics pipelines", pipes.size());
if (ImGui::BeginTable("gfxpipes", 5, ImGuiTableFlags_RowBg | ImGuiTableFlags_SizingStretchProp))
{
ImGui::TableSetupColumn("Name");
ImGui::TableSetupColumn("VS");
ImGui::TableSetupColumn("FS");
ImGui::TableSetupColumn("Valid", ImGuiTableColumnFlags_WidthFixed, 60);
ImGui::TableHeadersRow();
for (const auto &p : pipes)
{
ImGui::TableNextRow();
ImGui::TableSetColumnIndex(0); ImGui::TextUnformatted(p.name.c_str());
ImGui::TableSetColumnIndex(1); ImGui::TextUnformatted(p.vertexShaderPath.c_str());
ImGui::TableSetColumnIndex(2); ImGui::TextUnformatted(p.fragmentShaderPath.c_str());
ImGui::TableSetColumnIndex(3); ImGui::TextUnformatted(p.valid ? "yes" : "no");
}
ImGui::EndTable();
}
}
ImGui::End();
}
// Draw targets window
if (ImGui::Begin("Targets"))
{
ImGui::Text("Draw extent: %ux%u", _drawExtent.width, _drawExtent.height);
auto scExt = _swapchainManager->swapchainExtent();
ImGui::Text("Swapchain: %ux%u", scExt.width, scExt.height);
ImGui::Text("Draw fmt: %s", string_VkFormat(_swapchainManager->drawImage().imageFormat));
ImGui::Text("Swap fmt: %s", string_VkFormat(_swapchainManager->swapchainImageFormat()));
ImGui::End();
}
// PostFX window
if (ImGui::Begin("PostFX"))
{
if (auto *tm = _renderPassManager->getPass<TonemapPass>())
{
float exp = tm->exposure();
int mode = tm->mode();
if (ImGui::SliderFloat("Exposure", &exp, 0.05f, 8.0f)) { tm->setExposure(exp); }
ImGui::TextUnformatted("Operator");
ImGui::SameLine();
if (ImGui::RadioButton("Reinhard", mode == 0)) { mode = 0; tm->setMode(mode); }
ImGui::SameLine();
if (ImGui::RadioButton("ACES", mode == 1)) { mode = 1; tm->setMode(mode); }
}
else
{
ImGui::TextUnformatted("Tonemap pass not available");
}
ImGui::End();
}
// Scene window
if (ImGui::Begin("Scene"))
{
const DrawContext &dc = _context->getMainDrawContext();
ImGui::Text("Opaque draws: %zu", dc.OpaqueSurfaces.size());
ImGui::Text("Transp draws: %zu", dc.TransparentSurfaces.size());
ImGui::End();
}
ImGui::Render();
draw();
auto end = std::chrono::system_clock::now();
//convert to microseconds (integer), and then come back to miliseconds
auto elapsed = std::chrono::duration_cast<std::chrono::microseconds>(end - start);
stats.frametime = elapsed.count() / 1000.f;
}
}
void VulkanEngine::init_frame_resources()
{
// descriptor pool sizes per-frame
std::vector<DescriptorAllocatorGrowable::PoolSizeRatio> frame_sizes = {
{VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, 3},
{VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, 3},
{VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 3},
{VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, 4},
};
for (int i = 0; i < FRAME_OVERLAP; i++)
{
_frames[i].init(_deviceManager.get(), frame_sizes);
}
}
void VulkanEngine::init_pipelines()
{
metalRoughMaterial.build_pipelines(this);
}
void MeshNode::Draw(const glm::mat4 &topMatrix, DrawContext &ctx)
{
glm::mat4 nodeMatrix = topMatrix * worldTransform;
for (auto &s: mesh->surfaces)
{
RenderObject def{};
def.indexCount = s.count;
def.firstIndex = s.startIndex;
def.indexBuffer = mesh->meshBuffers.indexBuffer.buffer;
def.vertexBuffer = mesh->meshBuffers.vertexBuffer.buffer;
def.bounds = s.bounds; // ensure culling uses correct mesh-local AABB
def.material = &s.material->data;
def.transform = nodeMatrix;
def.vertexBufferAddress = mesh->meshBuffers.vertexBufferAddress;
if (s.material->data.passType == MaterialPass::Transparent)
{
ctx.TransparentSurfaces.push_back(def);
}
else
{
ctx.OpaqueSurfaces.push_back(def);
}
}
// recurse down
Node::Draw(topMatrix, ctx);
}

133
src/core/vk_engine.h Normal file
View File

@@ -0,0 +1,133 @@
// vulkan_engine.h : Include file for standard system include files,
// or project specific include files.
#pragma once
#include <core/vk_types.h>
#include <vector>
#include <string>
#include <unordered_map>
#include "vk_mem_alloc.h"
#include <deque>
#include <functional>
#include "vk_descriptors.h"
#include "scene/vk_loader.h"
#include "compute/vk_compute.h"
#include <scene/camera.h>
#include "vk_device.h"
#include "render/vk_renderpass.h"
#include "render/vk_renderpass_background.h"
#include "vk_resource.h"
#include "vk_swapchain.h"
#include "scene/vk_scene.h"
#include "render/vk_materials.h"
#include "frame_resources.h"
#include "vk_descriptor_manager.h"
#include "vk_sampler_manager.h"
#include "core/engine_context.h"
#include "core/vk_pipeline_manager.h"
#include "core/asset_manager.h"
#include "render/rg_graph.h"
constexpr unsigned int FRAME_OVERLAP = 2;
// Compute push constants and effects are declared in compute/vk_compute.h now.
struct RenderPass
{
std::string name;
std::function<void(VkCommandBuffer)> execute;
};
struct MeshNode : public Node
{
std::shared_ptr<MeshAsset> mesh;
virtual void Draw(const glm::mat4 &topMatrix, DrawContext &ctx) override;
};
class VulkanEngine
{
public:
bool _isInitialized{false};
int _frameNumber{0};
std::shared_ptr<DeviceManager> _deviceManager;
std::unique_ptr<SwapchainManager> _swapchainManager;
std::shared_ptr<ResourceManager> _resourceManager;
std::unique_ptr<RenderPassManager> _renderPassManager;
std::unique_ptr<SceneManager> _sceneManager;
std::unique_ptr<PipelineManager> _pipelineManager;
std::unique_ptr<AssetManager> _assetManager;
std::unique_ptr<RenderGraph> _renderGraph;
struct SDL_Window *_window{nullptr};
FrameResources _frames[FRAME_OVERLAP];
FrameResources &get_current_frame() { return _frames[_frameNumber % FRAME_OVERLAP]; };
VkExtent2D _drawExtent;
float renderScale = 1.f;
std::unique_ptr<DescriptorManager> _descriptorManager;
std::unique_ptr<SamplerManager> _samplerManager;
ComputeManager compute;
std::shared_ptr<EngineContext> _context;
std::vector<VkFramebuffer> _framebuffers;
DeletionQueue _mainDeletionQueue;
VkPipelineLayout _meshPipelineLayout;
VkPipeline _meshPipeline;
GPUMeshBuffers rectangle;
std::shared_ptr<MeshAsset> cubeMesh;
std::shared_ptr<MeshAsset> sphereMesh;
AllocatedImage _whiteImage;
AllocatedImage _blackImage;
AllocatedImage _greyImage;
AllocatedImage _errorCheckerboardImage;
MaterialInstance defaultData;
GLTFMetallic_Roughness metalRoughMaterial;
EngineStats stats;
std::vector<RenderPass> renderPasses;
// Debug: persistent pass enable overrides (by pass name)
std::unordered_map<std::string, bool> _rgPassToggles;
//initializes everything in the engine
void init();
//shuts down the engine
void cleanup();
//draw loop
void draw();
//run main loop
void run();
bool resize_requested{false};
bool freeze_rendering{false};
private:
void init_frame_resources();
void init_pipelines();
void init_mesh_pipeline();
void init_default_data();
};

212
src/core/vk_images.cpp Normal file
View File

@@ -0,0 +1,212 @@
#include <core/vk_images.h>
#include <core/vk_initializers.h>
#define STB_IMAGE_IMPLEMENTATION
#include "stb_image.h"
//> transition
#include <core/vk_initializers.h>
void vkutil::transition_image(VkCommandBuffer cmd, VkImage image, VkImageLayout currentLayout, VkImageLayout newLayout)
{
VkImageMemoryBarrier2 imageBarrier{.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER_2};
imageBarrier.pNext = nullptr;
// Choose aspect from the destination layout (depth vs color)
const VkImageAspectFlags aspectMask =
(newLayout == VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_OPTIMAL) ? VK_IMAGE_ASPECT_DEPTH_BIT : VK_IMAGE_ASPECT_COLOR_BIT;
// Reasoned pipeline stages + accesses per transition. This avoids over-broad
// ALL_COMMANDS barriers that can be ignored by stricter drivers (NVIDIA).
VkPipelineStageFlags2 srcStage = VK_PIPELINE_STAGE_2_TOP_OF_PIPE_BIT;
VkAccessFlags2 srcAccess = 0;
VkPipelineStageFlags2 dstStage = VK_PIPELINE_STAGE_2_BOTTOM_OF_PIPE_BIT;
VkAccessFlags2 dstAccess = 0;
switch (currentLayout)
{
case VK_IMAGE_LAYOUT_UNDEFINED:
srcStage = VK_PIPELINE_STAGE_2_TOP_OF_PIPE_BIT;
srcAccess = 0;
break;
case VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL:
srcStage = VK_PIPELINE_STAGE_2_TRANSFER_BIT;
srcAccess = VK_ACCESS_2_TRANSFER_WRITE_BIT;
break;
case VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL:
srcStage = VK_PIPELINE_STAGE_2_TRANSFER_BIT;
srcAccess = VK_ACCESS_2_TRANSFER_READ_BIT;
break;
case VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL:
srcStage = VK_PIPELINE_STAGE_2_FRAGMENT_SHADER_BIT;
srcAccess = VK_ACCESS_2_SHADER_SAMPLED_READ_BIT;
break;
case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL:
srcStage = VK_PIPELINE_STAGE_2_COLOR_ATTACHMENT_OUTPUT_BIT;
srcAccess = VK_ACCESS_2_COLOR_ATTACHMENT_WRITE_BIT | VK_ACCESS_2_COLOR_ATTACHMENT_READ_BIT;
break;
case VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_OPTIMAL:
srcStage = VK_PIPELINE_STAGE_2_EARLY_FRAGMENT_TESTS_BIT | VK_PIPELINE_STAGE_2_LATE_FRAGMENT_TESTS_BIT;
srcAccess = VK_ACCESS_2_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT | VK_ACCESS_2_DEPTH_STENCIL_ATTACHMENT_READ_BIT;
break;
default:
// Fallback to a safe superset
srcStage = VK_PIPELINE_STAGE_2_ALL_COMMANDS_BIT;
srcAccess = VK_ACCESS_2_MEMORY_WRITE_BIT | VK_ACCESS_2_MEMORY_READ_BIT;
break;
}
switch (newLayout)
{
case VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL:
dstStage = VK_PIPELINE_STAGE_2_TRANSFER_BIT;
dstAccess = VK_ACCESS_2_TRANSFER_WRITE_BIT;
break;
case VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL:
dstStage = VK_PIPELINE_STAGE_2_TRANSFER_BIT;
dstAccess = VK_ACCESS_2_TRANSFER_READ_BIT;
break;
case VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL:
// If you sample in other stages, extend this mask accordingly.
dstStage = VK_PIPELINE_STAGE_2_FRAGMENT_SHADER_BIT;
dstAccess = VK_ACCESS_2_SHADER_SAMPLED_READ_BIT;
break;
case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL:
dstStage = VK_PIPELINE_STAGE_2_COLOR_ATTACHMENT_OUTPUT_BIT;
dstAccess = VK_ACCESS_2_COLOR_ATTACHMENT_WRITE_BIT | VK_ACCESS_2_COLOR_ATTACHMENT_READ_BIT;
break;
case VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_OPTIMAL:
dstStage = VK_PIPELINE_STAGE_2_EARLY_FRAGMENT_TESTS_BIT | VK_PIPELINE_STAGE_2_LATE_FRAGMENT_TESTS_BIT;
dstAccess = VK_ACCESS_2_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT | VK_ACCESS_2_DEPTH_STENCIL_ATTACHMENT_READ_BIT;
break;
default:
dstStage = VK_PIPELINE_STAGE_2_ALL_COMMANDS_BIT;
dstAccess = VK_ACCESS_2_MEMORY_WRITE_BIT | VK_ACCESS_2_MEMORY_READ_BIT;
break;
}
imageBarrier.srcStageMask = srcStage;
imageBarrier.srcAccessMask = srcAccess;
imageBarrier.dstStageMask = dstStage;
imageBarrier.dstAccessMask = dstAccess;
imageBarrier.oldLayout = currentLayout;
imageBarrier.newLayout = newLayout;
imageBarrier.subresourceRange = vkinit::image_subresource_range(aspectMask);
imageBarrier.image = image;
VkDependencyInfo depInfo{.sType = VK_STRUCTURE_TYPE_DEPENDENCY_INFO};
depInfo.pImageMemoryBarriers = &imageBarrier;
depInfo.imageMemoryBarrierCount = 1;
vkCmdPipelineBarrier2(cmd, &depInfo);
}
//< transition
//> copyimg
void vkutil::copy_image_to_image(VkCommandBuffer cmd, VkImage source, VkImage destination, VkExtent2D srcSize, VkExtent2D dstSize)
{
VkImageBlit2 blitRegion{ .sType = VK_STRUCTURE_TYPE_IMAGE_BLIT_2, .pNext = nullptr };
blitRegion.srcOffsets[1].x = srcSize.width;
blitRegion.srcOffsets[1].y = srcSize.height;
blitRegion.srcOffsets[1].z = 1;
blitRegion.dstOffsets[1].x = dstSize.width;
blitRegion.dstOffsets[1].y = dstSize.height;
blitRegion.dstOffsets[1].z = 1;
blitRegion.srcSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
blitRegion.srcSubresource.baseArrayLayer = 0;
blitRegion.srcSubresource.layerCount = 1;
blitRegion.srcSubresource.mipLevel = 0;
blitRegion.dstSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
blitRegion.dstSubresource.baseArrayLayer = 0;
blitRegion.dstSubresource.layerCount = 1;
blitRegion.dstSubresource.mipLevel = 0;
VkBlitImageInfo2 blitInfo{ .sType = VK_STRUCTURE_TYPE_BLIT_IMAGE_INFO_2, .pNext = nullptr };
blitInfo.dstImage = destination;
blitInfo.dstImageLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL;
blitInfo.srcImage = source;
blitInfo.srcImageLayout = VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL;
blitInfo.filter = VK_FILTER_LINEAR;
blitInfo.regionCount = 1;
blitInfo.pRegions = &blitRegion;
vkCmdBlitImage2(cmd, &blitInfo);
}
//< copyimg
//> mipgen
void vkutil::generate_mipmaps(VkCommandBuffer cmd, VkImage image, VkExtent2D imageSize)
{
int mipLevels = int(std::floor(std::log2(std::max(imageSize.width, imageSize.height)))) + 1;
for (int mip = 0; mip < mipLevels; mip++) {
VkExtent2D halfSize = imageSize;
halfSize.width /= 2;
halfSize.height /= 2;
VkImageMemoryBarrier2 imageBarrier{ .sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER_2, .pNext = nullptr };
// Prepare source level for blit: DST -> SRC
imageBarrier.srcStageMask = VK_PIPELINE_STAGE_2_TRANSFER_BIT;
imageBarrier.srcAccessMask = VK_ACCESS_2_TRANSFER_WRITE_BIT;
imageBarrier.dstStageMask = VK_PIPELINE_STAGE_2_TRANSFER_BIT;
imageBarrier.dstAccessMask = VK_ACCESS_2_TRANSFER_READ_BIT;
imageBarrier.oldLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL;
imageBarrier.newLayout = VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL;
VkImageAspectFlags aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
imageBarrier.subresourceRange = vkinit::image_subresource_range(aspectMask);
imageBarrier.subresourceRange.levelCount = 1;
imageBarrier.subresourceRange.baseMipLevel = mip;
imageBarrier.image = image;
VkDependencyInfo depInfo{ .sType = VK_STRUCTURE_TYPE_DEPENDENCY_INFO, .pNext = nullptr };
depInfo.imageMemoryBarrierCount = 1;
depInfo.pImageMemoryBarriers = &imageBarrier;
vkCmdPipelineBarrier2(cmd, &depInfo);
if (mip < mipLevels - 1) {
VkImageBlit2 blitRegion { .sType = VK_STRUCTURE_TYPE_IMAGE_BLIT_2, .pNext = nullptr };
blitRegion.srcOffsets[1].x = imageSize.width;
blitRegion.srcOffsets[1].y = imageSize.height;
blitRegion.srcOffsets[1].z = 1;
blitRegion.dstOffsets[1].x = halfSize.width;
blitRegion.dstOffsets[1].y = halfSize.height;
blitRegion.dstOffsets[1].z = 1;
blitRegion.srcSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
blitRegion.srcSubresource.baseArrayLayer = 0;
blitRegion.srcSubresource.layerCount = 1;
blitRegion.srcSubresource.mipLevel = mip;
blitRegion.dstSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
blitRegion.dstSubresource.baseArrayLayer = 0;
blitRegion.dstSubresource.layerCount = 1;
blitRegion.dstSubresource.mipLevel = mip + 1;
VkBlitImageInfo2 blitInfo {.sType = VK_STRUCTURE_TYPE_BLIT_IMAGE_INFO_2, .pNext = nullptr};
blitInfo.dstImage = image;
blitInfo.dstImageLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL;
blitInfo.srcImage = image;
blitInfo.srcImageLayout = VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL;
blitInfo.filter = VK_FILTER_LINEAR;
blitInfo.regionCount = 1;
blitInfo.pRegions = &blitRegion;
vkCmdBlitImage2(cmd, &blitInfo);
imageSize = halfSize;
}
}
// transition all mip levels into the final read_only layout
transition_image(cmd, image, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL);
}
//< mipgen

10
src/core/vk_images.h Normal file
View File

@@ -0,0 +1,10 @@
#pragma once
#include <core/vk_types.h>
namespace vkutil {
void transition_image(VkCommandBuffer cmd, VkImage image, VkImageLayout currentLayout, VkImageLayout newLayout);
void copy_image_to_image(VkCommandBuffer cmd, VkImage source, VkImage destination, VkExtent2D srcSize, VkExtent2D dstSize);
void generate_mipmaps(VkCommandBuffer cmd, VkImage image, VkExtent2D imageSize);
};

View File

@@ -0,0 +1,365 @@
#include <core/vk_initializers.h>
//> init_cmd
VkCommandPoolCreateInfo vkinit::command_pool_create_info(uint32_t queueFamilyIndex,
VkCommandPoolCreateFlags flags /*= 0*/)
{
VkCommandPoolCreateInfo info = {};
info.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO;
info.pNext = nullptr;
info.queueFamilyIndex = queueFamilyIndex;
info.flags = flags;
return info;
}
VkCommandBufferAllocateInfo vkinit::command_buffer_allocate_info(
VkCommandPool pool, uint32_t count /*= 1*/)
{
VkCommandBufferAllocateInfo info = {};
info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO;
info.pNext = nullptr;
info.commandPool = pool;
info.commandBufferCount = count;
info.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY;
return info;
}
//< init_cmd
//
//> init_cmd_draw
VkCommandBufferBeginInfo vkinit::command_buffer_begin_info(VkCommandBufferUsageFlags flags /*= 0*/)
{
VkCommandBufferBeginInfo info = {};
info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
info.pNext = nullptr;
info.pInheritanceInfo = nullptr;
info.flags = flags;
return info;
}
//< init_cmd_draw
//> init_sync
VkFenceCreateInfo vkinit::fence_create_info(VkFenceCreateFlags flags /*= 0*/)
{
VkFenceCreateInfo info = {};
info.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO;
info.pNext = nullptr;
info.flags = flags;
return info;
}
VkSemaphoreCreateInfo vkinit::semaphore_create_info(VkSemaphoreCreateFlags flags /*= 0*/)
{
VkSemaphoreCreateInfo info = {};
info.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
info.pNext = nullptr;
info.flags = flags;
return info;
}
//< init_sync
//> init_submit
VkSemaphoreSubmitInfo vkinit::semaphore_submit_info(VkPipelineStageFlags2 stageMask, VkSemaphore semaphore)
{
VkSemaphoreSubmitInfo submitInfo{};
submitInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_SUBMIT_INFO;
submitInfo.pNext = nullptr;
submitInfo.semaphore = semaphore;
submitInfo.stageMask = stageMask;
submitInfo.deviceIndex = 0;
submitInfo.value = 1;
return submitInfo;
}
VkCommandBufferSubmitInfo vkinit::command_buffer_submit_info(VkCommandBuffer cmd)
{
VkCommandBufferSubmitInfo info{};
info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_SUBMIT_INFO;
info.pNext = nullptr;
info.commandBuffer = cmd;
info.deviceMask = 0;
return info;
}
VkSubmitInfo2 vkinit::submit_info(VkCommandBufferSubmitInfo *cmd, VkSemaphoreSubmitInfo *signalSemaphoreInfo,
VkSemaphoreSubmitInfo *waitSemaphoreInfo)
{
VkSubmitInfo2 info = {};
info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO_2;
info.pNext = nullptr;
info.waitSemaphoreInfoCount = waitSemaphoreInfo == nullptr ? 0 : 1;
info.pWaitSemaphoreInfos = waitSemaphoreInfo;
info.signalSemaphoreInfoCount = signalSemaphoreInfo == nullptr ? 0 : 1;
info.pSignalSemaphoreInfos = signalSemaphoreInfo;
info.commandBufferInfoCount = 1;
info.pCommandBufferInfos = cmd;
return info;
}
//< init_submit
VkPresentInfoKHR vkinit::present_info()
{
VkPresentInfoKHR info = {};
info.sType = VK_STRUCTURE_TYPE_PRESENT_INFO_KHR;
info.pNext = 0;
info.swapchainCount = 0;
info.pSwapchains = nullptr;
info.pWaitSemaphores = nullptr;
info.waitSemaphoreCount = 0;
info.pImageIndices = nullptr;
return info;
}
//> color_info
VkRenderingAttachmentInfo vkinit::attachment_info(
VkImageView view, VkClearValue *clear, VkImageLayout layout /*= VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL*/)
{
VkRenderingAttachmentInfo colorAttachment{};
colorAttachment.sType = VK_STRUCTURE_TYPE_RENDERING_ATTACHMENT_INFO;
colorAttachment.pNext = nullptr;
colorAttachment.imageView = view;
colorAttachment.imageLayout = layout;
colorAttachment.loadOp = clear ? VK_ATTACHMENT_LOAD_OP_CLEAR : VK_ATTACHMENT_LOAD_OP_LOAD;
colorAttachment.storeOp = VK_ATTACHMENT_STORE_OP_STORE;
if (clear)
{
colorAttachment.clearValue = *clear;
}
return colorAttachment;
}
//< color_info
//> depth_info
VkRenderingAttachmentInfo vkinit::depth_attachment_info(
VkImageView view, VkImageLayout layout /*= VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL*/)
{
VkRenderingAttachmentInfo depthAttachment{};
depthAttachment.sType = VK_STRUCTURE_TYPE_RENDERING_ATTACHMENT_INFO;
depthAttachment.pNext = nullptr;
depthAttachment.imageView = view;
depthAttachment.imageLayout = layout;
depthAttachment.loadOp = VK_ATTACHMENT_LOAD_OP_CLEAR;
depthAttachment.storeOp = VK_ATTACHMENT_STORE_OP_STORE;
// Reverse-Z path clears to 0.0
depthAttachment.clearValue.depthStencil.depth = 0.f;
return depthAttachment;
}
//< depth_info
//> render_info
VkRenderingInfo vkinit::rendering_info(VkExtent2D renderExtent, VkRenderingAttachmentInfo *colorAttachment,
VkRenderingAttachmentInfo *depthAttachment)
{
VkRenderingInfo renderInfo{};
renderInfo.sType = VK_STRUCTURE_TYPE_RENDERING_INFO;
renderInfo.pNext = nullptr;
renderInfo.renderArea = VkRect2D{VkOffset2D{0, 0}, renderExtent};
renderInfo.layerCount = 1;
renderInfo.colorAttachmentCount = 1;
renderInfo.pColorAttachments = colorAttachment;
renderInfo.pDepthAttachment = depthAttachment;
renderInfo.pStencilAttachment = nullptr;
return renderInfo;
}
VkRenderingInfo vkinit::rendering_info_multi(VkExtent2D renderExtent, uint32_t colorCount,
VkRenderingAttachmentInfo *colorAttachments,
VkRenderingAttachmentInfo *depthAttachment)
{
VkRenderingInfo renderInfo{};
renderInfo.sType = VK_STRUCTURE_TYPE_RENDERING_INFO;
renderInfo.pNext = nullptr;
renderInfo.renderArea = VkRect2D{VkOffset2D{0, 0}, renderExtent};
renderInfo.layerCount = 1;
renderInfo.colorAttachmentCount = colorCount;
renderInfo.pColorAttachments = colorAttachments;
renderInfo.pDepthAttachment = depthAttachment;
renderInfo.pStencilAttachment = nullptr;
return renderInfo;
}
//< render_info
//> subresource
VkImageSubresourceRange vkinit::image_subresource_range(VkImageAspectFlags aspectMask)
{
VkImageSubresourceRange subImage{};
subImage.aspectMask = aspectMask;
subImage.baseMipLevel = 0;
subImage.levelCount = VK_REMAINING_MIP_LEVELS;
subImage.baseArrayLayer = 0;
subImage.layerCount = VK_REMAINING_ARRAY_LAYERS;
return subImage;
}
//< subresource
VkDescriptorSetLayoutBinding vkinit::descriptorset_layout_binding(VkDescriptorType type, VkShaderStageFlags stageFlags,
uint32_t binding)
{
VkDescriptorSetLayoutBinding setbind = {};
setbind.binding = binding;
setbind.descriptorCount = 1;
setbind.descriptorType = type;
setbind.pImmutableSamplers = nullptr;
setbind.stageFlags = stageFlags;
return setbind;
}
VkDescriptorSetLayoutCreateInfo vkinit::descriptorset_layout_create_info(VkDescriptorSetLayoutBinding *bindings,
uint32_t bindingCount)
{
VkDescriptorSetLayoutCreateInfo info = {};
info.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO;
info.pNext = nullptr;
info.pBindings = bindings;
info.bindingCount = bindingCount;
info.flags = 0;
return info;
}
VkWriteDescriptorSet vkinit::write_descriptor_image(VkDescriptorType type, VkDescriptorSet dstSet,
VkDescriptorImageInfo *imageInfo, uint32_t binding)
{
VkWriteDescriptorSet write = {};
write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
write.pNext = nullptr;
write.dstBinding = binding;
write.dstSet = dstSet;
write.descriptorCount = 1;
write.descriptorType = type;
write.pImageInfo = imageInfo;
return write;
}
VkWriteDescriptorSet vkinit::write_descriptor_buffer(VkDescriptorType type, VkDescriptorSet dstSet,
VkDescriptorBufferInfo *bufferInfo, uint32_t binding)
{
VkWriteDescriptorSet write = {};
write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
write.pNext = nullptr;
write.dstBinding = binding;
write.dstSet = dstSet;
write.descriptorCount = 1;
write.descriptorType = type;
write.pBufferInfo = bufferInfo;
return write;
}
VkDescriptorBufferInfo vkinit::buffer_info(VkBuffer buffer, VkDeviceSize offset, VkDeviceSize range)
{
VkDescriptorBufferInfo binfo{};
binfo.buffer = buffer;
binfo.offset = offset;
binfo.range = range;
return binfo;
}
//> image_set
VkImageCreateInfo vkinit::image_create_info(VkFormat format, VkImageUsageFlags usageFlags, VkExtent3D extent)
{
VkImageCreateInfo info = {};
info.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
info.pNext = nullptr;
info.imageType = VK_IMAGE_TYPE_2D;
info.format = format;
info.extent = extent;
info.mipLevels = 1;
info.arrayLayers = 1;
//for MSAA. we will not be using it by default, so default it to 1 sample per pixel.
info.samples = VK_SAMPLE_COUNT_1_BIT;
//optimal tiling, which means the image is stored on the best gpu format
info.tiling = VK_IMAGE_TILING_OPTIMAL;
info.usage = usageFlags;
return info;
}
VkImageViewCreateInfo vkinit::imageview_create_info(VkFormat format, VkImage image, VkImageAspectFlags aspectFlags)
{
// build a image-view for the depth image to use for rendering
VkImageViewCreateInfo info = {};
info.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO;
info.pNext = nullptr;
info.viewType = VK_IMAGE_VIEW_TYPE_2D;
info.image = image;
info.format = format;
info.subresourceRange.baseMipLevel = 0;
info.subresourceRange.levelCount = 1;
info.subresourceRange.baseArrayLayer = 0;
info.subresourceRange.layerCount = 1;
info.subresourceRange.aspectMask = aspectFlags;
return info;
}
//< image_set
VkPipelineLayoutCreateInfo vkinit::pipeline_layout_create_info()
{
VkPipelineLayoutCreateInfo info{};
info.sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO;
info.pNext = nullptr;
// empty defaults
info.flags = 0;
info.setLayoutCount = 0;
info.pSetLayouts = nullptr;
info.pushConstantRangeCount = 0;
info.pPushConstantRanges = nullptr;
return info;
}
VkPipelineShaderStageCreateInfo vkinit::pipeline_shader_stage_create_info(VkShaderStageFlagBits stage,
VkShaderModule shaderModule,
const char *entry)
{
VkPipelineShaderStageCreateInfo info{};
info.sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO;
info.pNext = nullptr;
// shader stage
info.stage = stage;
// module containing the code for this shader stage
info.module = shaderModule;
// the entry point of the shader
info.pName = entry;
return info;
}

View File

@@ -0,0 +1,71 @@
// vulkan_engine.h : Include file for standard system include files,
// or project specific include files.
#pragma once
#include <core/vk_types.h>
namespace vkinit
{
//> init_cmd
VkCommandPoolCreateInfo command_pool_create_info(uint32_t queueFamilyIndex, VkCommandPoolCreateFlags flags = 0);
VkCommandBufferAllocateInfo command_buffer_allocate_info(VkCommandPool pool, uint32_t count = 1);
//< init_cmd
VkCommandBufferBeginInfo command_buffer_begin_info(VkCommandBufferUsageFlags flags = 0);
VkCommandBufferSubmitInfo command_buffer_submit_info(VkCommandBuffer cmd);
VkFenceCreateInfo fence_create_info(VkFenceCreateFlags flags = 0);
VkSemaphoreCreateInfo semaphore_create_info(VkSemaphoreCreateFlags flags = 0);
VkSubmitInfo2 submit_info(VkCommandBufferSubmitInfo *cmd, VkSemaphoreSubmitInfo *signalSemaphoreInfo,
VkSemaphoreSubmitInfo *waitSemaphoreInfo);
VkPresentInfoKHR present_info();
VkRenderingAttachmentInfo attachment_info(VkImageView view, VkClearValue *clear,
VkImageLayout layout /*= VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL*/);
VkRenderingAttachmentInfo depth_attachment_info(VkImageView view,
VkImageLayout layout
/*= VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL*/);
VkRenderingInfo rendering_info(VkExtent2D renderExtent, VkRenderingAttachmentInfo *colorAttachment,
VkRenderingAttachmentInfo *depthAttachment);
VkRenderingInfo rendering_info_multi(VkExtent2D renderExtent, uint32_t colorCount,
VkRenderingAttachmentInfo *colorAttachments,
VkRenderingAttachmentInfo *depthAttachment);
VkImageSubresourceRange image_subresource_range(VkImageAspectFlags aspectMask);
VkSemaphoreSubmitInfo semaphore_submit_info(VkPipelineStageFlags2 stageMask, VkSemaphore semaphore);
VkDescriptorSetLayoutBinding descriptorset_layout_binding(VkDescriptorType type, VkShaderStageFlags stageFlags,
uint32_t binding);
VkDescriptorSetLayoutCreateInfo descriptorset_layout_create_info(VkDescriptorSetLayoutBinding *bindings,
uint32_t bindingCount);
VkWriteDescriptorSet write_descriptor_image(VkDescriptorType type, VkDescriptorSet dstSet,
VkDescriptorImageInfo *imageInfo, uint32_t binding);
VkWriteDescriptorSet write_descriptor_buffer(VkDescriptorType type, VkDescriptorSet dstSet,
VkDescriptorBufferInfo *bufferInfo, uint32_t binding);
VkDescriptorBufferInfo buffer_info(VkBuffer buffer, VkDeviceSize offset, VkDeviceSize range);
VkImageCreateInfo image_create_info(VkFormat format, VkImageUsageFlags usageFlags, VkExtent3D extent);
VkImageViewCreateInfo imageview_create_info(VkFormat format, VkImage image, VkImageAspectFlags aspectFlags);
VkPipelineLayoutCreateInfo pipeline_layout_create_info();
VkPipelineShaderStageCreateInfo pipeline_shader_stage_create_info(VkShaderStageFlagBits stage,
VkShaderModule shaderModule,
const char *entry = "main");
} // namespace vkinit

View File

@@ -0,0 +1,308 @@
#include <core/vk_pipeline_manager.h>
#include <core/engine_context.h>
#include <core/vk_initializers.h>
#include <render/vk_pipelines.h>
#include <vk_device.h>
#include <filesystem>
PipelineManager::~PipelineManager()
{
cleanup();
}
void PipelineManager::init(EngineContext *ctx)
{
_context = ctx;
}
void PipelineManager::cleanup()
{
for (auto &kv: _graphicsPipelines)
{
destroyGraphics(kv.second);
}
_graphicsPipelines.clear();
_context = nullptr;
}
bool PipelineManager::registerGraphics(const std::string &name, const GraphicsPipelineCreateInfo &info)
{
if (! _context || !_context->getDevice()) return false;
auto it = _graphicsPipelines.find(name);
if (it != _graphicsPipelines.end())
{
fmt::println("Graphics pipeline '{}' already exists", name);
return false;
}
GraphicsPipelineRecord rec{};
rec.spec = info;
if (!buildGraphics(rec))
{
destroyGraphics(rec);
return false;
}
_graphicsPipelines.emplace(name, std::move(rec));
return true;
}
void PipelineManager::unregisterGraphics(const std::string &name)
{
auto it = _graphicsPipelines.find(name);
if (it == _graphicsPipelines.end()) return;
destroyGraphics(it->second);
_graphicsPipelines.erase(it);
}
bool PipelineManager::getGraphics(const std::string &name, VkPipeline &pipeline, VkPipelineLayout &layout) const
{
auto it = _graphicsPipelines.find(name);
if (it == _graphicsPipelines.end()) return false;
pipeline = it->second.pipeline;
layout = it->second.layout;
return pipeline != VK_NULL_HANDLE && layout != VK_NULL_HANDLE;
}
bool PipelineManager::getMaterialPipeline(const std::string &name, MaterialPipeline &out) const
{
VkPipeline p{}; VkPipelineLayout l{};
if (!getGraphics(name, p, l)) return false;
out.pipeline = p;
out.layout = l;
return true;
}
void PipelineManager::hotReloadChanged()
{
if (!_context || !_context->getDevice()) return;
for (auto &kv: _graphicsPipelines)
{
auto &rec = kv.second;
try
{
bool needReload = false;
if (!rec.spec.vertexShaderPath.empty())
{
auto t = std::filesystem::last_write_time(rec.spec.vertexShaderPath);
if (rec.vertTime != std::filesystem::file_time_type{} && t != rec.vertTime) needReload = true;
}
if (!rec.spec.fragmentShaderPath.empty())
{
auto t = std::filesystem::last_write_time(rec.spec.fragmentShaderPath);
if (rec.fragTime != std::filesystem::file_time_type{} && t != rec.fragTime) needReload = true;
}
if (needReload)
{
GraphicsPipelineRecord fresh = rec;
fresh.pipeline = VK_NULL_HANDLE;
fresh.layout = VK_NULL_HANDLE;
if (buildGraphics(fresh))
{
destroyGraphics(rec);
rec = std::move(fresh);
fmt::println("Reloaded graphics pipeline '{}'", kv.first);
}
}
}
catch (const std::exception &)
{
// ignore hot-reload errors to avoid spamming
}
}
}
void PipelineManager::debug_get_graphics(std::vector<GraphicsPipelineDebugInfo> &out) const
{
out.clear();
out.reserve(_graphicsPipelines.size());
for (const auto &kv : _graphicsPipelines)
{
const auto &rec = kv.second;
GraphicsPipelineDebugInfo info{};
info.name = kv.first;
info.vertexShaderPath = rec.spec.vertexShaderPath;
info.fragmentShaderPath = rec.spec.fragmentShaderPath;
info.valid = (rec.pipeline != VK_NULL_HANDLE) && (rec.layout != VK_NULL_HANDLE);
out.push_back(std::move(info));
}
}
bool PipelineManager::buildGraphics(GraphicsPipelineRecord &rec) const
{
VkShaderModule vert = VK_NULL_HANDLE;
VkShaderModule frag = VK_NULL_HANDLE;
if (!rec.spec.vertexShaderPath.empty())
{
if (!vkutil::load_shader_module(rec.spec.vertexShaderPath.c_str(), _context->getDevice()->device(), &vert))
{
fmt::println("Failed to load vertex shader: {}", rec.spec.vertexShaderPath);
return false;
}
}
if (!rec.spec.fragmentShaderPath.empty())
{
if (!vkutil::load_shader_module(rec.spec.fragmentShaderPath.c_str(), _context->getDevice()->device(), &frag))
{
if (vert != VK_NULL_HANDLE) vkDestroyShaderModule(_context->getDevice()->device(), vert, nullptr);
fmt::println("Failed to load fragment shader: {}", rec.spec.fragmentShaderPath);
return false;
}
}
VkPipelineLayoutCreateInfo layoutInfo = vkinit::pipeline_layout_create_info();
layoutInfo.setLayoutCount = static_cast<uint32_t>(rec.spec.setLayouts.size());
layoutInfo.pSetLayouts = rec.spec.setLayouts.empty() ? nullptr : rec.spec.setLayouts.data();
layoutInfo.pushConstantRangeCount = static_cast<uint32_t>(rec.spec.pushConstants.size());
layoutInfo.pPushConstantRanges = rec.spec.pushConstants.empty() ? nullptr : rec.spec.pushConstants.data();
VK_CHECK(vkCreatePipelineLayout(_context->getDevice()->device(), &layoutInfo, nullptr, &rec.layout));
PipelineBuilder builder;
if (vert != VK_NULL_HANDLE || frag != VK_NULL_HANDLE)
{
builder.set_shaders(vert, frag);
}
if (rec.spec.configure) rec.spec.configure(builder);
builder._pipelineLayout = rec.layout;
rec.pipeline = builder.build_pipeline(_context->getDevice()->device());
if (vert != VK_NULL_HANDLE)
vkDestroyShaderModule(_context->getDevice()->device(), vert, nullptr);
if (frag != VK_NULL_HANDLE)
vkDestroyShaderModule(_context->getDevice()->device(), frag, nullptr);
if (rec.pipeline == VK_NULL_HANDLE)
{
vkDestroyPipelineLayout(_context->getDevice()->device(), rec.layout, nullptr);
rec.layout = VK_NULL_HANDLE;
return false;
}
// Record timestamps for hot reload
try
{
if (!rec.spec.vertexShaderPath.empty())
rec.vertTime = std::filesystem::last_write_time(rec.spec.vertexShaderPath);
if (!rec.spec.fragmentShaderPath.empty())
rec.fragTime = std::filesystem::last_write_time(rec.spec.fragmentShaderPath);
}
catch (const std::exception &)
{
// ignore timestamp errors
}
return true;
}
void PipelineManager::destroyGraphics(GraphicsPipelineRecord &rec)
{
if (!_context || !_context->getDevice()) return;
if (rec.pipeline != VK_NULL_HANDLE)
{
vkDestroyPipeline(_context->getDevice()->device(), rec.pipeline, nullptr);
rec.pipeline = VK_NULL_HANDLE;
}
if (rec.layout != VK_NULL_HANDLE)
{
vkDestroyPipelineLayout(_context->getDevice()->device(), rec.layout, nullptr);
rec.layout = VK_NULL_HANDLE;
}
}
// --- Compute forwarding API ---
bool PipelineManager::createComputePipeline(const std::string &name, const ComputePipelineCreateInfo &info)
{
if (!_context || !_context->compute) return false;
return _context->compute->registerPipeline(name, info);
}
void PipelineManager::destroyComputePipeline(const std::string &name)
{
if (!_context || !_context->compute) return;
_context->compute->unregisterPipeline(name);
}
bool PipelineManager::hasComputePipeline(const std::string &name) const
{
if (!_context || !_context->compute) return false;
return _context->compute->hasPipeline(name);
}
void PipelineManager::dispatchCompute(VkCommandBuffer cmd, const std::string &name, const ComputeDispatchInfo &info)
{
if (!_context || !_context->compute) return;
_context->compute->dispatch(cmd, name, info);
}
void PipelineManager::dispatchComputeImmediate(const std::string &name, const ComputeDispatchInfo &info)
{
if (!_context || !_context->compute) return;
_context->compute->dispatchImmediate(name, info);
}
bool PipelineManager::createComputeInstance(const std::string &instanceName, const std::string &pipelineName)
{
if (!_context || !_context->compute) return false;
return _context->compute->createInstance(instanceName, pipelineName);
}
void PipelineManager::destroyComputeInstance(const std::string &instanceName)
{
if (!_context || !_context->compute) return;
_context->compute->destroyInstance(instanceName);
}
bool PipelineManager::setComputeInstanceStorageImage(const std::string &instanceName, uint32_t binding, VkImageView view,
VkImageLayout layout)
{
if (!_context || !_context->compute) return false;
return _context->compute->setInstanceStorageImage(instanceName, binding, view, layout);
}
bool PipelineManager::setComputeInstanceSampledImage(const std::string &instanceName, uint32_t binding, VkImageView view,
VkSampler sampler, VkImageLayout layout)
{
if (!_context || !_context->compute) return false;
return _context->compute->setInstanceSampledImage(instanceName, binding, view, sampler, layout);
}
bool PipelineManager::setComputeInstanceBuffer(const std::string &instanceName, uint32_t binding, VkBuffer buffer,
VkDeviceSize size, VkDescriptorType type, VkDeviceSize offset)
{
if (!_context || !_context->compute) return false;
return _context->compute->setInstanceBuffer(instanceName, binding, buffer, size, type, offset);
}
AllocatedImage PipelineManager::createAndBindComputeStorageImage(const std::string &instanceName, uint32_t binding,
VkExtent3D extent, VkFormat format,
VkImageLayout layout, VkImageUsageFlags usage)
{
if (!_context || !_context->compute) return {};
return _context->compute->createAndBindStorageImage(instanceName, binding, extent, format, layout, usage);
}
AllocatedBuffer PipelineManager::createAndBindComputeStorageBuffer(const std::string &instanceName, uint32_t binding,
VkDeviceSize size, VkBufferUsageFlags usage,
VmaMemoryUsage memUsage)
{
if (!_context || !_context->compute) return {};
return _context->compute->createAndBindStorageBuffer(instanceName, binding, size, usage, memUsage);
}
void PipelineManager::dispatchComputeInstance(VkCommandBuffer cmd, const std::string &instanceName,
const ComputeDispatchInfo &info)
{
if (!_context || !_context->compute) return;
_context->compute->dispatchInstance(cmd, instanceName, info);
}

View File

@@ -0,0 +1,128 @@
#pragma once
#include <core/vk_types.h>
#include <render/vk_pipelines.h>
#include <compute/vk_compute.h>
#include <functional>
#include <string>
#include <unordered_map>
#include <vector>
#include <filesystem>
class EngineContext;
struct GraphicsPipelineCreateInfo
{
std::string vertexShaderPath;
std::string fragmentShaderPath;
std::vector<VkDescriptorSetLayout> setLayouts;
std::vector<VkPushConstantRange> pushConstants;
// This function MUST set things like topology, rasterization, depth/blend state
// and color/depth attachment formats on the builder.
std::function<void(PipelineBuilder &)> configure;
};
class PipelineManager
{
public:
PipelineManager() = default;
~PipelineManager();
void init(EngineContext *ctx);
void cleanup();
// Register and build a graphics pipeline under a unique name
bool registerGraphics(const std::string &name, const GraphicsPipelineCreateInfo &info);
// Convenience alias for registerGraphics to match desired API
bool createGraphicsPipeline(const std::string &name, const GraphicsPipelineCreateInfo &info)
{
return registerGraphics(name, info);
}
// Compute wrappers (forward to ComputeManager for a unified API)
bool createComputePipeline(const std::string &name, const ComputePipelineCreateInfo &info);
void destroyComputePipeline(const std::string &name);
bool hasComputePipeline(const std::string &name) const;
void dispatchCompute(VkCommandBuffer cmd, const std::string &name, const ComputeDispatchInfo &info);
void dispatchComputeImmediate(const std::string &name, const ComputeDispatchInfo &info);
// Persistent compute instances (forwarded to ComputeManager)
bool createComputeInstance(const std::string &instanceName, const std::string &pipelineName);
void destroyComputeInstance(const std::string &instanceName);
bool setComputeInstanceStorageImage(const std::string &instanceName, uint32_t binding, VkImageView view,
VkImageLayout layout = VK_IMAGE_LAYOUT_GENERAL);
bool setComputeInstanceSampledImage(const std::string &instanceName, uint32_t binding, VkImageView view,
VkSampler sampler,
VkImageLayout layout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL);
bool setComputeInstanceBuffer(const std::string &instanceName, uint32_t binding, VkBuffer buffer, VkDeviceSize size,
VkDescriptorType type, VkDeviceSize offset = 0);
AllocatedImage createAndBindComputeStorageImage(const std::string &instanceName, uint32_t binding,
VkExtent3D extent,
VkFormat format,
VkImageLayout layout = VK_IMAGE_LAYOUT_GENERAL,
VkImageUsageFlags usage =
VK_IMAGE_USAGE_STORAGE_BIT | VK_IMAGE_USAGE_SAMPLED_BIT);
AllocatedBuffer createAndBindComputeStorageBuffer(const std::string &instanceName, uint32_t binding,
VkDeviceSize size,
VkBufferUsageFlags usage = VK_BUFFER_USAGE_STORAGE_BUFFER_BIT,
VmaMemoryUsage memUsage = VMA_MEMORY_USAGE_GPU_ONLY);
void dispatchComputeInstance(VkCommandBuffer cmd, const std::string &instanceName, const ComputeDispatchInfo &info);
// Remove and destroy a graphics pipeline
void unregisterGraphics(const std::string &name);
// Get pipeline handles for binding
bool getGraphics(const std::string &name, VkPipeline &pipeline, VkPipelineLayout &layout) const;
// Convenience to interop with MaterialInstance
bool getMaterialPipeline(const std::string &name, MaterialPipeline &out) const;
// Rebuild pipelines whose shaders changed on disk
void hotReloadChanged();
// Debug helpers (graphics only)
struct GraphicsPipelineDebugInfo
{
std::string name;
std::string vertexShaderPath;
std::string fragmentShaderPath;
bool valid = false;
};
void debug_get_graphics(std::vector<GraphicsPipelineDebugInfo>& out) const;
private:
struct GraphicsPipelineRecord
{
VkPipeline pipeline = VK_NULL_HANDLE;
VkPipelineLayout layout = VK_NULL_HANDLE;
GraphicsPipelineCreateInfo spec;
std::filesystem::file_time_type vertTime{};
std::filesystem::file_time_type fragTime{};
};
EngineContext *_context = nullptr;
std::unordered_map<std::string, GraphicsPipelineRecord> _graphicsPipelines;
bool buildGraphics(GraphicsPipelineRecord &rec) const;
void destroyGraphics(GraphicsPipelineRecord &rec);
};

486
src/core/vk_resource.cpp Normal file
View File

@@ -0,0 +1,486 @@
#include "vk_resource.h"
#include "vk_device.h"
#include "vk_images.h"
#include "vk_initializers.h"
#include "vk_mem_alloc.h"
#include <render/rg_graph.h>
#include <render/rg_builder.h>
#include <render/rg_resources.h>
#include "frame_resources.h"
#include <memory>
#include <string>
#include <unordered_map>
#include <utility>
void ResourceManager::init(DeviceManager *deviceManager)
{
_deviceManager = deviceManager;
VkCommandPoolCreateInfo commandPoolInfo = vkinit::command_pool_create_info(
_deviceManager->graphicsQueueFamily(),
VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT
);
VK_CHECK(vkCreateCommandPool(_deviceManager->device(), &commandPoolInfo, nullptr, &_immCommandPool));
VkCommandBufferAllocateInfo cmdAllocInfo = vkinit::command_buffer_allocate_info(_immCommandPool, 1);
VK_CHECK(vkAllocateCommandBuffers(_deviceManager->device(), &cmdAllocInfo, &_immCommandBuffer));
VkFenceCreateInfo fenceCreateInfo = vkinit::fence_create_info(VK_FENCE_CREATE_SIGNALED_BIT);
VK_CHECK(vkCreateFence(_deviceManager->device(), &fenceCreateInfo, nullptr, &_immFence));
_deletionQueue.push_function([=]() {
vkDestroyCommandPool(_deviceManager->device(), _immCommandPool, nullptr);
vkDestroyFence(_deviceManager->device(), _immFence, nullptr);
});
}
AllocatedBuffer ResourceManager::create_buffer(size_t allocSize, VkBufferUsageFlags usage,
VmaMemoryUsage memoryUsage) const
{
VkBufferCreateInfo bufferInfo = {.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO};
bufferInfo.pNext = nullptr;
bufferInfo.size = allocSize;
bufferInfo.usage = usage;
VmaAllocationCreateInfo vmaallocInfo = {};
vmaallocInfo.usage = memoryUsage;
vmaallocInfo.flags = VMA_ALLOCATION_CREATE_MAPPED_BIT;
AllocatedBuffer newBuffer{};
VK_CHECK(vmaCreateBuffer(_deviceManager->allocator(), &bufferInfo, &vmaallocInfo,
&newBuffer.buffer, &newBuffer.allocation, &newBuffer.info));
return newBuffer;
}
void ResourceManager::immediate_submit(std::function<void(VkCommandBuffer)> &&function) const
{
VK_CHECK(vkResetFences(_deviceManager->device(), 1, &_immFence));
VK_CHECK(vkResetCommandBuffer(_immCommandBuffer, 0));
VkCommandBuffer cmd = _immCommandBuffer;
VkCommandBufferBeginInfo cmdBeginInfo = vkinit::command_buffer_begin_info(
VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT);
VK_CHECK(vkBeginCommandBuffer(cmd, &cmdBeginInfo));
function(cmd);
VK_CHECK(vkEndCommandBuffer(cmd));
VkCommandBufferSubmitInfo cmdinfo = vkinit::command_buffer_submit_info(cmd);
VkSubmitInfo2 submit = vkinit::submit_info(&cmdinfo, nullptr, nullptr);
VK_CHECK(vkQueueSubmit2(_deviceManager->graphicsQueue(), 1, &submit, _immFence));
VK_CHECK(vkWaitForFences(_deviceManager->device(), 1, &_immFence, true, 9999999999));
}
void ResourceManager::destroy_buffer(const AllocatedBuffer &buffer) const
{
vmaDestroyBuffer(_deviceManager->allocator(), buffer.buffer, buffer.allocation);
}
void ResourceManager::cleanup()
{
fmt::print("ResourceManager::cleanup()\n");
clear_pending_uploads();
_deletionQueue.flush();
}
AllocatedImage ResourceManager::create_image(VkExtent3D size, VkFormat format, VkImageUsageFlags usage,
bool mipmapped) const
{
AllocatedImage newImage{};
newImage.imageFormat = format;
newImage.imageExtent = size;
VkImageCreateInfo img_info = vkinit::image_create_info(format, usage, size);
if (mipmapped)
{
img_info.mipLevels = static_cast<uint32_t>(std::floor(std::log2(std::max(size.width, size.height)))) + 1;
}
// always allocate images on dedicated GPU memory
VmaAllocationCreateInfo allocinfo = {};
allocinfo.usage = VMA_MEMORY_USAGE_GPU_ONLY;
allocinfo.requiredFlags = static_cast<VkMemoryPropertyFlags>(VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT);
// allocate and create the image
VK_CHECK(
vmaCreateImage(_deviceManager->allocator(), &img_info, &allocinfo, &newImage.image, &newImage.allocation,
nullptr));
// if the format is a depth format, we will need to have it use the correct
// aspect flag
VkImageAspectFlags aspectFlag = VK_IMAGE_ASPECT_COLOR_BIT;
if (format == VK_FORMAT_D32_SFLOAT)
{
aspectFlag = VK_IMAGE_ASPECT_DEPTH_BIT;
}
// build a image-view for the image
VkImageViewCreateInfo view_info = vkinit::imageview_create_info(format, newImage.image, aspectFlag);
view_info.subresourceRange.levelCount = img_info.mipLevels;
VK_CHECK(vkCreateImageView(_deviceManager->device(), &view_info, nullptr, &newImage.imageView));
return newImage;
}
AllocatedImage ResourceManager::create_image(const void *data, VkExtent3D size, VkFormat format,
VkImageUsageFlags usage,
bool mipmapped)
{
size_t data_size = size.depth * size.width * size.height * 4;
AllocatedBuffer uploadbuffer = create_buffer(data_size, VK_BUFFER_USAGE_TRANSFER_SRC_BIT,
VMA_MEMORY_USAGE_CPU_TO_GPU);
memcpy(uploadbuffer.info.pMappedData, data, data_size);
vmaFlushAllocation(_deviceManager->allocator(), uploadbuffer.allocation, 0, data_size);
AllocatedImage new_image = create_image(size, format,
usage | VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT,
mipmapped);
PendingImageUpload pending{};
pending.staging = uploadbuffer;
pending.image = new_image.image;
pending.extent = size;
pending.format = format;
pending.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
pending.finalLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
pending.generateMips = mipmapped;
_pendingImageUploads.push_back(std::move(pending));
if (!_deferUploads)
{
process_queued_uploads_immediate();
}
return new_image;
}
void ResourceManager::destroy_image(const AllocatedImage &img) const
{
vkDestroyImageView(_deviceManager->device(), img.imageView, nullptr);
vmaDestroyImage(_deviceManager->allocator(), img.image, img.allocation);
}
GPUMeshBuffers ResourceManager::uploadMesh(std::span<uint32_t> indices, std::span<Vertex> vertices)
{
const size_t vertexBufferSize = vertices.size() * sizeof(Vertex);
const size_t indexBufferSize = indices.size() * sizeof(uint32_t);
GPUMeshBuffers newSurface{};
//create vertex buffer
newSurface.vertexBuffer = create_buffer(vertexBufferSize,
VK_BUFFER_USAGE_STORAGE_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT |
VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT,
VMA_MEMORY_USAGE_GPU_ONLY);
//find the adress of the vertex buffer
VkBufferDeviceAddressInfo deviceAdressInfo{
.sType = VK_STRUCTURE_TYPE_BUFFER_DEVICE_ADDRESS_INFO, .buffer = newSurface.vertexBuffer.buffer
};
newSurface.vertexBufferAddress = vkGetBufferDeviceAddress(_deviceManager->device(), &deviceAdressInfo);
//create index buffer
newSurface.indexBuffer = create_buffer(indexBufferSize,
VK_BUFFER_USAGE_INDEX_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT,
VMA_MEMORY_USAGE_GPU_ONLY);
AllocatedBuffer staging = create_buffer(vertexBufferSize + indexBufferSize, VK_BUFFER_USAGE_TRANSFER_SRC_BIT,
VMA_MEMORY_USAGE_CPU_ONLY);
VmaAllocationInfo allocInfo{};
vmaGetAllocationInfo(_deviceManager->allocator(), staging.allocation, &allocInfo);
void *data = allocInfo.pMappedData;
// copy vertex/index data to staging (host visible)
memcpy(data, vertices.data(), vertexBufferSize);
memcpy((char *) data + vertexBufferSize, indices.data(), indexBufferSize);
// Ensure visibility on non-coherent memory before GPU copies
vmaFlushAllocation(_deviceManager->allocator(), staging.allocation, 0, vertexBufferSize + indexBufferSize);
PendingBufferUpload pending{};
pending.staging = staging;
pending.copies.push_back(BufferCopyRegion{
.destination = newSurface.vertexBuffer.buffer,
.dstOffset = 0,
.size = vertexBufferSize,
.stagingOffset = 0,
});
pending.copies.push_back(BufferCopyRegion{
.destination = newSurface.indexBuffer.buffer,
.dstOffset = 0,
.size = indexBufferSize,
.stagingOffset = vertexBufferSize,
});
_pendingBufferUploads.push_back(std::move(pending));
if (!_deferUploads)
{
process_queued_uploads_immediate();
}
return newSurface;
}
bool ResourceManager::has_pending_uploads() const
{
return !_pendingBufferUploads.empty() || !_pendingImageUploads.empty();
}
void ResourceManager::clear_pending_uploads()
{
for (auto &upload : _pendingBufferUploads)
{
destroy_buffer(upload.staging);
}
for (auto &upload : _pendingImageUploads)
{
destroy_buffer(upload.staging);
}
_pendingBufferUploads.clear();
_pendingImageUploads.clear();
}
void ResourceManager::process_queued_uploads_immediate()
{
if (!has_pending_uploads()) return;
immediate_submit([&](VkCommandBuffer cmd) {
for (auto &bufferUpload : _pendingBufferUploads)
{
for (const auto &copy : bufferUpload.copies)
{
VkBufferCopy region{};
region.srcOffset = copy.stagingOffset;
region.dstOffset = copy.dstOffset;
region.size = copy.size;
vkCmdCopyBuffer(cmd, bufferUpload.staging.buffer, copy.destination, 1, &region);
}
}
for (auto &imageUpload : _pendingImageUploads)
{
vkutil::transition_image(cmd, imageUpload.image, imageUpload.initialLayout,
VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL);
VkBufferImageCopy copyRegion = {};
copyRegion.bufferOffset = 0;
copyRegion.bufferRowLength = 0;
copyRegion.bufferImageHeight = 0;
copyRegion.imageSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
copyRegion.imageSubresource.mipLevel = 0;
copyRegion.imageSubresource.baseArrayLayer = 0;
copyRegion.imageSubresource.layerCount = 1;
copyRegion.imageExtent = imageUpload.extent;
vkCmdCopyBufferToImage(cmd,
imageUpload.staging.buffer,
imageUpload.image,
VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1,
&copyRegion);
if (imageUpload.generateMips)
{
vkutil::generate_mipmaps(cmd, imageUpload.image,
VkExtent2D{imageUpload.extent.width, imageUpload.extent.height});
}
else
{
vkutil::transition_image(cmd, imageUpload.image, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
imageUpload.finalLayout);
}
}
});
clear_pending_uploads();
}
void ResourceManager::register_upload_pass(RenderGraph &graph, FrameResources &frame)
{
if (_pendingBufferUploads.empty() && _pendingImageUploads.empty()) return;
auto bufferUploads = std::make_shared<std::vector<PendingBufferUpload>>(std::move(_pendingBufferUploads));
auto imageUploads = std::make_shared<std::vector<PendingImageUpload>>(std::move(_pendingImageUploads));
struct BufferBinding
{
size_t uploadIndex{};
RGBufferHandle stagingHandle{};
std::vector<RGBufferHandle> destinationHandles;
};
struct ImageBinding
{
size_t uploadIndex{};
RGBufferHandle stagingHandle{};
RGImageHandle imageHandle{};
};
auto bufferBindings = std::make_shared<std::vector<BufferBinding>>();
auto imageBindings = std::make_shared<std::vector<ImageBinding>>();
bufferBindings->reserve(bufferUploads->size());
imageBindings->reserve(imageUploads->size());
std::unordered_map<VkBuffer, RGBufferHandle> destBufferHandles;
std::unordered_map<VkImage, RGImageHandle> imageHandles;
for (size_t i = 0; i < bufferUploads->size(); ++i)
{
const auto &upload = bufferUploads->at(i);
BufferBinding binding{};
binding.uploadIndex = i;
RGImportedBufferDesc stagingDesc{};
stagingDesc.name = std::string("upload.staging.buffer.") + std::to_string(i);
stagingDesc.buffer = upload.staging.buffer;
stagingDesc.size = upload.staging.info.size;
stagingDesc.currentStage = VK_PIPELINE_STAGE_2_TOP_OF_PIPE_BIT;
stagingDesc.currentAccess = 0;
binding.stagingHandle = graph.import_buffer(stagingDesc);
binding.destinationHandles.reserve(upload.copies.size());
for (const auto &copy : upload.copies)
{
RGBufferHandle handle{};
auto it = destBufferHandles.find(copy.destination);
if (it == destBufferHandles.end())
{
RGImportedBufferDesc dstDesc{};
dstDesc.name = std::string("upload.dst.buffer.") + std::to_string(destBufferHandles.size());
dstDesc.buffer = copy.destination;
dstDesc.size = copy.dstOffset + copy.size;
dstDesc.currentStage = VK_PIPELINE_STAGE_2_TOP_OF_PIPE_BIT;
dstDesc.currentAccess = 0;
handle = graph.import_buffer(dstDesc);
destBufferHandles.emplace(copy.destination, handle);
}
else
{
handle = it->second;
}
binding.destinationHandles.push_back(handle);
}
bufferBindings->push_back(std::move(binding));
}
for (size_t i = 0; i < imageUploads->size(); ++i)
{
const auto &upload = imageUploads->at(i);
ImageBinding binding{};
binding.uploadIndex = i;
RGImportedBufferDesc stagingDesc{};
stagingDesc.name = std::string("upload.staging.image.") + std::to_string(i);
stagingDesc.buffer = upload.staging.buffer;
stagingDesc.size = upload.staging.info.size;
stagingDesc.currentStage = VK_PIPELINE_STAGE_2_TOP_OF_PIPE_BIT;
stagingDesc.currentAccess = 0;
binding.stagingHandle = graph.import_buffer(stagingDesc);
auto it = imageHandles.find(upload.image);
if (it == imageHandles.end())
{
RGImportedImageDesc imgDesc{};
imgDesc.name = std::string("upload.image.") + std::to_string(imageHandles.size());
imgDesc.image = upload.image;
imgDesc.imageView = VK_NULL_HANDLE;
imgDesc.format = upload.format;
imgDesc.extent = {upload.extent.width, upload.extent.height};
imgDesc.currentLayout = upload.initialLayout;
binding.imageHandle = graph.import_image(imgDesc);
imageHandles.emplace(upload.image, binding.imageHandle);
}
else
{
binding.imageHandle = it->second;
}
imageBindings->push_back(std::move(binding));
}
graph.add_pass("ResourceUploads", RGPassType::Transfer,
[bufferBindings, imageBindings](RGPassBuilder &builder, EngineContext *)
{
for (const auto &binding : *bufferBindings)
{
builder.read_buffer(binding.stagingHandle, RGBufferUsage::TransferSrc);
for (auto handle : binding.destinationHandles)
{
builder.write_buffer(handle, RGBufferUsage::TransferDst);
}
}
for (const auto &binding : *imageBindings)
{
builder.read_buffer(binding.stagingHandle, RGBufferUsage::TransferSrc);
builder.write(binding.imageHandle, RGImageUsage::TransferDst);
}
},
[bufferUploads, imageUploads, bufferBindings, imageBindings, this](VkCommandBuffer cmd, const RGPassResources &res, EngineContext *)
{
for (const auto &binding : *bufferBindings)
{
const auto &upload = bufferUploads->at(binding.uploadIndex);
VkBuffer staging = res.buffer(binding.stagingHandle);
for (size_t copyIndex = 0; copyIndex < upload.copies.size(); ++copyIndex)
{
const auto &copy = upload.copies[copyIndex];
VkBuffer destination = res.buffer(binding.destinationHandles[copyIndex]);
VkBufferCopy region{};
region.srcOffset = copy.stagingOffset;
region.dstOffset = copy.dstOffset;
region.size = copy.size;
vkCmdCopyBuffer(cmd, staging, destination, 1, &region);
}
}
for (const auto &binding : *imageBindings)
{
const auto &upload = imageUploads->at(binding.uploadIndex);
VkBuffer staging = res.buffer(binding.stagingHandle);
VkImage image = res.image(binding.imageHandle);
VkBufferImageCopy region{};
region.bufferOffset = 0;
region.bufferRowLength = 0;
region.bufferImageHeight = 0;
region.imageSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
region.imageSubresource.mipLevel = 0;
region.imageSubresource.baseArrayLayer = 0;
region.imageSubresource.layerCount = 1;
region.imageExtent = upload.extent;
vkCmdCopyBufferToImage(cmd, staging, image, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &region);
if (upload.generateMips)
{
vkutil::generate_mipmaps(cmd, image, VkExtent2D{upload.extent.width, upload.extent.height});
vkutil::transition_image(cmd, image, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL);
}
}
});
frame._deletionQueue.push_function([buffers = bufferUploads, images = imageUploads, this]()
{
for (const auto &upload : *buffers)
{
destroy_buffer(upload.staging);
}
for (const auto &upload : *images)
{
destroy_buffer(upload.staging);
}
});
}

82
src/core/vk_resource.h Normal file
View File

@@ -0,0 +1,82 @@
#pragma once
#include <core/vk_types.h>
#include <functional>
#include <vector>
class DeviceManager;
class RenderGraph;
struct FrameResources;
class ResourceManager
{
public:
struct BufferCopyRegion
{
VkBuffer destination = VK_NULL_HANDLE;
VkDeviceSize dstOffset = 0;
VkDeviceSize size = 0;
VkDeviceSize stagingOffset = 0;
};
struct PendingBufferUpload
{
AllocatedBuffer staging;
std::vector<BufferCopyRegion> copies;
};
struct PendingImageUpload
{
AllocatedBuffer staging;
VkImage image = VK_NULL_HANDLE;
VkExtent3D extent{0, 0, 0};
VkFormat format = VK_FORMAT_UNDEFINED;
VkImageLayout initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
VkImageLayout finalLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
bool generateMips = false;
};
void init(DeviceManager *deviceManager);
void cleanup();
AllocatedBuffer create_buffer(size_t allocSize, VkBufferUsageFlags usage, VmaMemoryUsage memoryUsage) const;
void destroy_buffer(const AllocatedBuffer &buffer) const;
AllocatedImage create_image(VkExtent3D size, VkFormat format, VkImageUsageFlags usage,
bool mipmapped = false) const;
AllocatedImage create_image(const void *data, VkExtent3D size, VkFormat format, VkImageUsageFlags usage,
bool mipmapped = false);
void destroy_image(const AllocatedImage &img) const;
GPUMeshBuffers uploadMesh(std::span<uint32_t> indices, std::span<Vertex> vertices);
void immediate_submit(std::function<void(VkCommandBuffer)> &&function) const;
bool has_pending_uploads() const;
const std::vector<PendingBufferUpload> &pending_buffer_uploads() const { return _pendingBufferUploads; }
const std::vector<PendingImageUpload> &pending_image_uploads() const { return _pendingImageUploads; }
void clear_pending_uploads();
void process_queued_uploads_immediate();
void register_upload_pass(RenderGraph &graph, FrameResources &frame);
void set_deferred_uploads(bool enabled) { _deferUploads = enabled; }
bool deferred_uploads() const { return _deferUploads; }
private:
DeviceManager *_deviceManager = nullptr;
// immediate submit structures
VkFence _immFence = nullptr;
VkCommandBuffer _immCommandBuffer = nullptr;
VkCommandPool _immCommandPool = nullptr;
std::vector<PendingBufferUpload> _pendingBufferUploads;
std::vector<PendingImageUpload> _pendingImageUploads;
bool _deferUploads = false;
DeletionQueue _deletionQueue;
};

View File

@@ -0,0 +1,47 @@
#include "vk_sampler_manager.h"
#include "vk_device.h"
void SamplerManager::init(DeviceManager *deviceManager)
{
_deviceManager = deviceManager;
VkSamplerCreateInfo sampl{.sType = VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO};
// Sensible, cross-vendor defaults
sampl.addressModeU = VK_SAMPLER_ADDRESS_MODE_REPEAT;
sampl.addressModeV = VK_SAMPLER_ADDRESS_MODE_REPEAT;
sampl.addressModeW = VK_SAMPLER_ADDRESS_MODE_REPEAT;
sampl.mipmapMode = VK_SAMPLER_MIPMAP_MODE_LINEAR;
sampl.minLod = 0.0f;
sampl.maxLod = VK_LOD_CLAMP_NONE;
sampl.mipLodBias = 0.0f;
sampl.anisotropyEnable = VK_FALSE; // set true + maxAnisotropy if feature enabled
sampl.borderColor = VK_BORDER_COLOR_INT_OPAQUE_BLACK;
sampl.unnormalizedCoordinates = VK_FALSE;
// Nearest defaults
sampl.magFilter = VK_FILTER_NEAREST;
sampl.minFilter = VK_FILTER_NEAREST;
vkCreateSampler(_deviceManager->device(), &sampl, nullptr, &_defaultSamplerNearest);
// Linear defaults
sampl.magFilter = VK_FILTER_LINEAR;
sampl.minFilter = VK_FILTER_LINEAR;
vkCreateSampler(_deviceManager->device(), &sampl, nullptr, &_defaultSamplerLinear);
}
void SamplerManager::cleanup()
{
if (!_deviceManager) return;
if (_defaultSamplerNearest)
{
vkDestroySampler(_deviceManager->device(), _defaultSamplerNearest, nullptr);
_defaultSamplerNearest = VK_NULL_HANDLE;
}
if (_defaultSamplerLinear)
{
vkDestroySampler(_deviceManager->device(), _defaultSamplerLinear, nullptr);
_defaultSamplerLinear = VK_NULL_HANDLE;
}
}

View File

@@ -0,0 +1,22 @@
#pragma once
#include <core/vk_types.h>
class DeviceManager;
class SamplerManager
{
public:
void init(DeviceManager *deviceManager);
void cleanup();
VkSampler defaultLinear() const { return _defaultSamplerLinear; }
VkSampler defaultNearest() const { return _defaultSamplerNearest; }
private:
DeviceManager *_deviceManager = nullptr;
VkSampler _defaultSamplerLinear = VK_NULL_HANDLE;
VkSampler _defaultSamplerNearest = VK_NULL_HANDLE;
};

197
src/core/vk_swapchain.cpp Normal file
View File

@@ -0,0 +1,197 @@
#include "vk_swapchain.h"
#include <SDL_video.h>
#include "vk_device.h"
#include "vk_initializers.h"
#include "vk_resource.h"
void SwapchainManager::init_swapchain()
{
create_swapchain(_windowExtent.width, _windowExtent.height);
// Create images used across the frame (draw, depth, GBuffer)
// Split to helper so we can reuse on resize
// (Definition added below)
//
// On creation we also push a cleanup lambda to _deletionQueue for final shutdown.
// On resize we will flush that queue first to destroy previous resources.
// depth/draw/gbuffer sized to current window extent
auto create_frame_images = [this]() {
VkExtent3D drawImageExtent = { _windowExtent.width, _windowExtent.height, 1 };
// Draw HDR target
_drawImage.imageFormat = VK_FORMAT_R16G16B16A16_SFLOAT;
_drawImage.imageExtent = drawImageExtent;
VkImageUsageFlags drawImageUsages{};
drawImageUsages |= VK_IMAGE_USAGE_TRANSFER_SRC_BIT;
drawImageUsages |= VK_IMAGE_USAGE_STORAGE_BIT;
drawImageUsages |= VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
VkImageCreateInfo rimg_info = vkinit::image_create_info(_drawImage.imageFormat, drawImageUsages, drawImageExtent);
VmaAllocationCreateInfo rimg_allocinfo = {};
rimg_allocinfo.usage = VMA_MEMORY_USAGE_GPU_ONLY;
rimg_allocinfo.requiredFlags = static_cast<VkMemoryPropertyFlags>(VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT);
vmaCreateImage(_deviceManager->allocator(), &rimg_info, &rimg_allocinfo,
&_drawImage.image, &_drawImage.allocation, nullptr);
VkImageViewCreateInfo rview_info = vkinit::imageview_create_info(_drawImage.imageFormat, _drawImage.image,
VK_IMAGE_ASPECT_COLOR_BIT);
VK_CHECK(vkCreateImageView(_deviceManager->device(), &rview_info, nullptr, &_drawImage.imageView));
// Depth
_depthImage.imageFormat = VK_FORMAT_D32_SFLOAT;
_depthImage.imageExtent = drawImageExtent;
VkImageUsageFlags depthImageUsages{};
depthImageUsages |= VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT;
VkImageCreateInfo dimg_info = vkinit::image_create_info(_depthImage.imageFormat, depthImageUsages, drawImageExtent);
vmaCreateImage(_deviceManager->allocator(), &dimg_info, &rimg_allocinfo, &_depthImage.image,
&_depthImage.allocation, nullptr);
VkImageViewCreateInfo dview_info = vkinit::imageview_create_info(_depthImage.imageFormat, _depthImage.image,
VK_IMAGE_ASPECT_DEPTH_BIT);
VK_CHECK(vkCreateImageView(_deviceManager->device(), &dview_info, nullptr, &_depthImage.imageView));
// GBuffer (SRGB not used to keep linear lighting)
_gBufferPosition = _resourceManager->create_image(drawImageExtent, VK_FORMAT_R16G16B16A16_SFLOAT,
VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_SAMPLED_BIT);
_gBufferNormal = _resourceManager->create_image(drawImageExtent, VK_FORMAT_R16G16B16A16_SFLOAT,
VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_SAMPLED_BIT);
_gBufferAlbedo = _resourceManager->create_image(drawImageExtent, VK_FORMAT_R8G8B8A8_UNORM,
VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_SAMPLED_BIT);
_deletionQueue.push_function([=]() {
vkDestroyImageView(_deviceManager->device(), _drawImage.imageView, nullptr);
vmaDestroyImage(_deviceManager->allocator(), _drawImage.image, _drawImage.allocation);
vkDestroyImageView(_deviceManager->device(), _depthImage.imageView, nullptr);
vmaDestroyImage(_deviceManager->allocator(), _depthImage.image, _depthImage.allocation);
_resourceManager->destroy_image(_gBufferPosition);
_resourceManager->destroy_image(_gBufferNormal);
_resourceManager->destroy_image(_gBufferAlbedo);
});
};
create_frame_images();
}
void SwapchainManager::cleanup()
{
_deletionQueue.flush();
destroy_swapchain();
fmt::print("SwapchainManager::cleanup()\n");
}
void SwapchainManager::create_swapchain(uint32_t width, uint32_t height)
{
vkb::SwapchainBuilder swapchainBuilder{
_deviceManager->physicalDevice(), _deviceManager->device(), _deviceManager->surface()
};
_swapchainImageFormat = VK_FORMAT_B8G8R8A8_UNORM;
vkb::Swapchain vkbSwapchain = swapchainBuilder
//.use_default_format_selection()
.set_desired_format(VkSurfaceFormatKHR{
.format = _swapchainImageFormat, .colorSpace = VK_COLOR_SPACE_SRGB_NONLINEAR_KHR
})
//use vsync present mode
.set_desired_present_mode(VK_PRESENT_MODE_FIFO_KHR)
.set_desired_extent(width, height)
.add_image_usage_flags(VK_IMAGE_USAGE_TRANSFER_DST_BIT)
.build()
.value();
_swapchainExtent = vkbSwapchain.extent;
//store swapchain and its related images
_swapchain = vkbSwapchain.swapchain;
_swapchainImages = vkbSwapchain.get_images().value();
_swapchainImageViews = vkbSwapchain.get_image_views().value();
}
void SwapchainManager::destroy_swapchain() const
{
vkDestroySwapchainKHR(_deviceManager->device(), _swapchain, nullptr);
for (auto _swapchainImageView: _swapchainImageViews)
{
vkDestroyImageView(_deviceManager->device(), _swapchainImageView, nullptr);
}
}
void SwapchainManager::resize_swapchain(struct SDL_Window *window)
{
vkDeviceWaitIdle(_deviceManager->device());
destroy_swapchain();
// Destroy per-frame images before recreating them
_deletionQueue.flush();
int w, h;
SDL_GetWindowSize(window, &w, &h);
_windowExtent.width = w;
_windowExtent.height = h;
create_swapchain(_windowExtent.width, _windowExtent.height);
// Recreate frame images at the new size
// (duplicate the same logic used at init time)
VkExtent3D drawImageExtent = { _windowExtent.width, _windowExtent.height, 1 };
_drawImage.imageFormat = VK_FORMAT_R16G16B16A16_SFLOAT;
_drawImage.imageExtent = drawImageExtent;
VkImageUsageFlags drawImageUsages{};
drawImageUsages |= VK_IMAGE_USAGE_TRANSFER_SRC_BIT;
drawImageUsages |= VK_IMAGE_USAGE_STORAGE_BIT;
drawImageUsages |= VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
VkImageCreateInfo rimg_info = vkinit::image_create_info(_drawImage.imageFormat, drawImageUsages, drawImageExtent);
VmaAllocationCreateInfo rimg_allocinfo = {};
rimg_allocinfo.usage = VMA_MEMORY_USAGE_GPU_ONLY;
rimg_allocinfo.requiredFlags = static_cast<VkMemoryPropertyFlags>(VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT);
vmaCreateImage(_deviceManager->allocator(), &rimg_info, &rimg_allocinfo, &_drawImage.image, &_drawImage.allocation,
nullptr);
VkImageViewCreateInfo rview_info = vkinit::imageview_create_info(_drawImage.imageFormat, _drawImage.image,
VK_IMAGE_ASPECT_COLOR_BIT);
VK_CHECK(vkCreateImageView(_deviceManager->device(), &rview_info, nullptr, &_drawImage.imageView));
_depthImage.imageFormat = VK_FORMAT_D32_SFLOAT;
_depthImage.imageExtent = drawImageExtent;
VkImageUsageFlags depthImageUsages{};
depthImageUsages |= VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT;
VkImageCreateInfo dimg_info = vkinit::image_create_info(_depthImage.imageFormat, depthImageUsages, drawImageExtent);
vmaCreateImage(_deviceManager->allocator(), &dimg_info, &rimg_allocinfo, &_depthImage.image,
&_depthImage.allocation, nullptr);
VkImageViewCreateInfo dview_info = vkinit::imageview_create_info(_depthImage.imageFormat, _depthImage.image,
VK_IMAGE_ASPECT_DEPTH_BIT);
VK_CHECK(vkCreateImageView(_deviceManager->device(), &dview_info, nullptr, &_depthImage.imageView));
_gBufferPosition = _resourceManager->create_image(drawImageExtent, VK_FORMAT_R16G16B16A16_SFLOAT,
VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_SAMPLED_BIT);
_gBufferNormal = _resourceManager->create_image(drawImageExtent, VK_FORMAT_R16G16B16A16_SFLOAT,
VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_SAMPLED_BIT);
_gBufferAlbedo = _resourceManager->create_image(drawImageExtent, VK_FORMAT_R8G8B8A8_UNORM,
VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_SAMPLED_BIT);
_deletionQueue.push_function([=]() {
vkDestroyImageView(_deviceManager->device(), _drawImage.imageView, nullptr);
vmaDestroyImage(_deviceManager->allocator(), _drawImage.image, _drawImage.allocation);
vkDestroyImageView(_deviceManager->device(), _depthImage.imageView, nullptr);
vmaDestroyImage(_deviceManager->allocator(), _depthImage.image, _depthImage.allocation);
_resourceManager->destroy_image(_gBufferPosition);
_resourceManager->destroy_image(_gBufferNormal);
_resourceManager->destroy_image(_gBufferAlbedo);
});
resize_requested = false;
}

55
src/core/vk_swapchain.h Normal file
View File

@@ -0,0 +1,55 @@
#pragma once
#include <core/vk_types.h>
class ResourceManager;
class DeviceManager;
class SwapchainManager
{
public:
void init(DeviceManager *deviceManager, ResourceManager* resourceManager)
{this->_deviceManager = deviceManager;this->_resourceManager = resourceManager;};
void cleanup();
void init_swapchain();
void create_swapchain(uint32_t width, uint32_t height);
void destroy_swapchain() const;
void resize_swapchain(struct SDL_Window *window);
VkSwapchainKHR swapchain() const { return _swapchain; }
VkFormat swapchainImageFormat() const { return _swapchainImageFormat; }
VkExtent2D swapchainExtent() const { return _swapchainExtent; }
const std::vector<VkImage> &swapchainImages() const { return _swapchainImages; }
const std::vector<VkImageView> &swapchainImageViews() const { return _swapchainImageViews; }
AllocatedImage drawImage() const { return _drawImage; }
AllocatedImage depthImage() const { return _depthImage; }
AllocatedImage gBufferPosition() const { return _gBufferPosition; }
AllocatedImage gBufferNormal() const { return _gBufferNormal; }
AllocatedImage gBufferAlbedo() const { return _gBufferAlbedo; }
VkExtent2D windowExtent() const { return _windowExtent; }
bool resize_requested{false};
private:
DeviceManager *_deviceManager = nullptr;
ResourceManager* _resourceManager = nullptr;
VkSwapchainKHR _swapchain = nullptr;
VkFormat _swapchainImageFormat = {};
VkExtent2D _swapchainExtent = {};
VkExtent2D _windowExtent{1920, 1080};
std::vector<VkImage> _swapchainImages;
std::vector<VkImageView> _swapchainImageViews;
AllocatedImage _drawImage = {};
AllocatedImage _depthImage = {};
AllocatedImage _gBufferPosition = {};
AllocatedImage _gBufferNormal = {};
AllocatedImage _gBufferAlbedo = {};
DeletionQueue _deletionQueue;
};

155
src/core/vk_types.h Normal file
View File

@@ -0,0 +1,155 @@
// vulkan_engine.h : Include file for standard system include files,
// or project specific include files.
#pragma once
#include <memory>
#include <optional>
#include <string>
#include <vector>
#include <span>
#include <array>
#include <functional>
#include <deque>
#include <vulkan/vulkan.h>
#include <vulkan/vk_enum_string_helper.h>
#include <vk_mem_alloc.h>
#include <fmt/core.h>
#include <glm/mat4x4.hpp>
#include <glm/vec4.hpp>
#define VK_CHECK(x) \
do { \
VkResult err = x; \
if (err) { \
fmt::println("Detected Vulkan error: {}", string_VkResult(err)); \
abort(); \
} \
} while (0)
struct DeletionQueue
{
std::deque<std::function<void()> > deletors;
void push_function(std::function<void()> &&function)
{
deletors.push_back(function);
}
void flush()
{
// reverse iterate the deletion queue to execute all the functions
for (auto it = deletors.rbegin(); it != deletors.rend(); it++)
{
(*it)(); //call functors
}
deletors.clear();
}
};
struct AllocatedImage
{
VkImage image;
VkImageView imageView;
VmaAllocation allocation;
VkFormat imageFormat;
VkExtent3D imageExtent;
};
struct AllocatedBuffer {
VkBuffer buffer;
VmaAllocation allocation;
VmaAllocationInfo info;
};
struct GPUSceneData {
glm::mat4 view;
glm::mat4 proj;
glm::mat4 viewproj;
glm::mat4 lightViewProj;
glm::vec4 ambientColor;
glm::vec4 sunlightDirection; // w for sun power
glm::vec4 sunlightColor;
};
enum class MaterialPass :uint8_t {
MainColor,
Transparent,
Other
};
struct MaterialPipeline {
VkPipeline pipeline;
VkPipelineLayout layout;
};
struct MaterialInstance {
MaterialPipeline* pipeline;
VkDescriptorSet materialSet;
MaterialPass passType;
};
struct Vertex {
glm::vec3 position;
float uv_x;
glm::vec3 normal;
float uv_y;
glm::vec4 color;
};
// holds the resources needed for a mesh
struct GPUMeshBuffers {
AllocatedBuffer indexBuffer;
AllocatedBuffer vertexBuffer;
VkDeviceAddress vertexBufferAddress;
};
// push constants for our mesh object draws
struct GPUDrawPushConstants {
glm::mat4 worldMatrix;
VkDeviceAddress vertexBuffer;
};
struct DrawContext;
// base class for a renderable dynamic object
class IRenderable {
virtual void Draw(const glm::mat4& topMatrix, DrawContext& ctx) = 0;
};
// implementation of a drawable scene node.
// the scene node can hold children and will also keep a transform to propagate
// to them
struct Node : public IRenderable {
// parent pointer must be a weak pointer to avoid circular dependencies
std::weak_ptr<Node> parent;
std::vector<std::shared_ptr<Node>> children;
glm::mat4 localTransform;
glm::mat4 worldTransform;
void refreshTransform(const glm::mat4& parentMatrix)
{
worldTransform = parentMatrix * localTransform;
for (auto c : children) {
c->refreshTransform(worldTransform);
}
}
virtual void Draw(const glm::mat4& topMatrix, DrawContext& ctx)
{
// draw children
for (auto& c : children) {
c->Draw(topMatrix, ctx);
}
}
};