ADD: IBL engine side

This commit is contained in:
2025-11-11 21:20:34 +09:00
parent a918fc6db8
commit 24e83061b4
14 changed files with 592 additions and 85 deletions

View File

@@ -4,6 +4,7 @@
- Vulkan SDK installed and `VULKAN_SDK` set.
- A C++20 compiler and CMake ≥ 3.8.
- GPU drivers with Vulkan 1.2+.
- KTX software with libktx
- Configure
- `cmake -S . -B build -DCMAKE_BUILD_TYPE=Debug`

View File

@@ -21,7 +21,7 @@ const float SHADOW_BORDER_SMOOTH_NDC = 0.08;
// Base PCF radius in texels for cascade 0; higher cascades scale this up slightly.
const float SHADOW_PCF_BASE_RADIUS = 1.35;
// Additional per-cascade radius scale for coarser cascades (0..1 factor added across levels)
const float SHADOW_PCF_CASCADE_GAIN = 2.0; // extra radius at far end
const float SHADOW_PCF_CASCADE_GAIN = 2.0;// extra radius at far end
// Receiver normal-based offset to reduce acne (in world units)
const float SHADOW_NORMAL_OFFSET = 0.0025;
// Scale for receiver-plane depth bias term (tweak if over/under biased)
@@ -29,8 +29,8 @@ const float SHADOW_RPDB_SCALE = 1.0;
// Minimum clamp to keep a tiny bias even on perpendicular receivers
const float SHADOW_MIN_BIAS = 1e-5;
// Ray query safety params
const float SHADOW_RAY_TMIN = 0.02; // start a bit away from the surface
const float SHADOW_RAY_ORIGIN_BIAS = 0.01; // world units
const float SHADOW_RAY_TMIN = 0.02;// start a bit away from the surface
const float SHADOW_RAY_ORIGIN_BIAS = 0.01;// world units
const float PI = 3.14159265359;
@@ -74,7 +74,7 @@ CascadeMix computeCascadeMix(vec3 worldPos)
if (primary < 3u)
{
float edge = max(abs(ndcP.x), abs(ndcP.y)); // 0..1, 1 at border
float edge = max(abs(ndcP.x), abs(ndcP.y));// 0..1, 1 at border
// start blending when we are within S of the border
float t = clamp((edge - (1.0 - SHADOW_BORDER_SMOOTH_NDC)) / max(SHADOW_BORDER_SMOOTH_NDC, 1e-4), 0.0, 1.0);
float w = smoothstep(0.0, 1.0, t);
@@ -121,9 +121,9 @@ vec2 receiverPlaneDepthGradient(vec3 ndc, vec3 dndc_dx, vec3 dndc_dy)
}
// Manual inverse for stability/perf on some drivers
mat2 invJ = (1.0 / det) * mat2( J[1][1], -J[0][1],
mat2 invJ = (1.0 / det) * mat2(J[1][1], -J[0][1],
-J[1][0], J[0][0]);
return invJ * dz_dxdy; // (dz/du, dz/dv)
return invJ * dz_dxdy;// (dz/du, dz/dv)
}
float sampleCascadeShadow(uint ci, vec3 worldPos, vec3 N, vec3 L)
@@ -165,7 +165,7 @@ float sampleCascadeShadow(uint ci, vec3 worldPos, vec3 N, vec3 L)
for (int i = 0; i < TAP_COUNT; ++i)
{
vec2 pu = rot * POISSON_16[i];
vec2 off = pu * radius * texelSize; // uv-space offset of this tap
vec2 off = pu * radius * texelSize;// uv-space offset of this tap
float pr = length(pu);
float w = 1.0 - smoothstep(0.0, 0.65, pr);

View File

@@ -34,11 +34,15 @@ add_executable (vulkan_engine
core/frame_resources.cpp
core/texture_cache.h
core/texture_cache.cpp
core/ktx_loader.h
core/ktx_loader.cpp
core/config.h
core/vk_engine.h
core/vk_engine.cpp
core/vk_raytracing.h
core/vk_raytracing.cpp
core/ibl_manager.h
core/ibl_manager.cpp
# render
render/vk_pipelines.h
render/vk_pipelines.cpp

View File

@@ -31,6 +31,7 @@ class AssetManager;
class RenderGraph;
class RayTracingManager;
class TextureCache;
class IBLManager;
struct ShadowSettings
{
@@ -95,4 +96,5 @@ public:
// Streaming subsystems (engine-owned)
TextureCache* textures = nullptr; // texture streaming + cache
IBLManager* ibl = nullptr; // optional IBL owner (if created by engine)
};

79
src/core/ibl_manager.cpp Normal file
View File

@@ -0,0 +1,79 @@
#include "ibl_manager.h"
#include <core/engine_context.h>
#include <core/vk_resource.h>
#include <core/ktx_loader.h>
#include <core/vk_sampler_manager.h>
bool IBLManager::load(const IBLPaths &paths)
{
if (_ctx == nullptr || _ctx->getResources() == nullptr) return false;
ResourceManager* rm = _ctx->getResources();
// Specular cubemap
if (!paths.specularCube.empty())
{
ktxutil::KtxCubemap kcm{};
if (ktxutil::load_ktx2_cubemap(paths.specularCube.c_str(), kcm))
{
_spec = rm->create_image_compressed_layers(
kcm.bytes.data(), kcm.bytes.size(),
kcm.fmt, kcm.mipLevels, kcm.layers,
kcm.copies,
VK_IMAGE_USAGE_SAMPLED_BIT,
kcm.imgFlags
);
}
}
// Diffuse cubemap
if (!paths.diffuseCube.empty())
{
ktxutil::KtxCubemap kcm{};
if (ktxutil::load_ktx2_cubemap(paths.diffuseCube.c_str(), kcm))
{
_diff = rm->create_image_compressed_layers(
kcm.bytes.data(), kcm.bytes.size(),
kcm.fmt, kcm.mipLevels, kcm.layers,
kcm.copies,
VK_IMAGE_USAGE_SAMPLED_BIT,
kcm.imgFlags
);
}
}
// BRDF LUT (optional)
if (!paths.brdfLut2D.empty())
{
ktxutil::Ktx2D lut{};
if (ktxutil::load_ktx2_2d(paths.brdfLut2D.c_str(), lut))
{
// Build regions into ResourceManager::MipLevelCopy to reuse compressed 2D helper
std::vector<ResourceManager::MipLevelCopy> lv;
lv.reserve(lut.mipLevels);
for (uint32_t mip = 0; mip < lut.mipLevels; ++mip)
{
const auto &r = lut.copies[mip];
lv.push_back(ResourceManager::MipLevelCopy{
.offset = r.bufferOffset,
.length = 0, // not needed for copy scheduling
.width = r.imageExtent.width,
.height = r.imageExtent.height,
});
}
_brdf = rm->create_image_compressed(lut.bytes.data(), lut.bytes.size(), lut.fmt, lv,
VK_IMAGE_USAGE_SAMPLED_BIT);
}
}
return (_spec.image != VK_NULL_HANDLE) && (_diff.image != VK_NULL_HANDLE);
}
void IBLManager::unload()
{
if (_ctx == nullptr || _ctx->getResources() == nullptr) return;
auto* rm = _ctx->getResources();
if (_spec.image) { rm->destroy_image(_spec); _spec = {}; }
if (_diff.image) { rm->destroy_image(_diff); _diff = {}; }
if (_brdf.image) { rm->destroy_image(_brdf); _brdf = {}; }
}

39
src/core/ibl_manager.h Normal file
View File

@@ -0,0 +1,39 @@
#pragma once
#include <core/vk_types.h>
#include <string>
class EngineContext;
struct IBLPaths
{
std::string specularCube; // .ktx2 (GPU-ready BC6H or R16G16B16A16)
std::string diffuseCube; // .ktx2
std::string brdfLut2D; // .ktx2 (BC5 RG UNORM or similar)
};
// Minimal IBL asset owner with optional residency control.
class IBLManager
{
public:
void init(EngineContext* ctx) { _ctx = ctx; }
// Load all three textures. Returns true when specular+diffuse (and optional LUT) are resident.
bool load(const IBLPaths& paths);
// Release GPU memory and patch to fallbacks handled by the caller.
void unload();
bool resident() const { return _spec.image != VK_NULL_HANDLE || _diff.image != VK_NULL_HANDLE; }
AllocatedImage specular() const { return _spec; }
AllocatedImage diffuse() const { return _diff; }
AllocatedImage brdf() const { return _brdf; }
private:
EngineContext* _ctx{nullptr};
AllocatedImage _spec{};
AllocatedImage _diff{};
AllocatedImage _brdf{};
};

176
src/core/ktx_loader.cpp Normal file
View File

@@ -0,0 +1,176 @@
#include "ktx_loader.h"
#include <ktx.h>
#include <ktxvulkan.h>
#include <filesystem>
namespace ktxutil
{
static inline bool is_bc_format(VkFormat f)
{
switch (f)
{
case VK_FORMAT_BC1_RGB_UNORM_BLOCK:
case VK_FORMAT_BC1_RGB_SRGB_BLOCK:
case VK_FORMAT_BC1_RGBA_UNORM_BLOCK:
case VK_FORMAT_BC1_RGBA_SRGB_BLOCK:
case VK_FORMAT_BC2_UNORM_BLOCK:
case VK_FORMAT_BC2_SRGB_BLOCK:
case VK_FORMAT_BC3_UNORM_BLOCK:
case VK_FORMAT_BC3_SRGB_BLOCK:
case VK_FORMAT_BC4_UNORM_BLOCK:
case VK_FORMAT_BC4_SNORM_BLOCK:
case VK_FORMAT_BC5_UNORM_BLOCK:
case VK_FORMAT_BC5_SNORM_BLOCK:
case VK_FORMAT_BC6H_UFLOAT_BLOCK:
case VK_FORMAT_BC6H_SFLOAT_BLOCK:
case VK_FORMAT_BC7_UNORM_BLOCK:
case VK_FORMAT_BC7_SRGB_BLOCK:
return true;
default: return false;
}
}
static inline bool exists_file(const char* path)
{
std::error_code ec; return std::filesystem::exists(path, ec) && !ec;
}
bool load_ktx2_cubemap(const char* path, KtxCubemap& out)
{
out = KtxCubemap{};
if (path == nullptr || !exists_file(path)) return false;
ktxTexture2* ktex = nullptr;
ktxResult kres = ktxTexture2_CreateFromNamedFile(path, KTX_TEXTURE_CREATE_LOAD_IMAGE_DATA_BIT, &ktex);
if (kres != KTX_SUCCESS || !ktex) return false;
// Ensure it is a cubemap or cubemap array
if (ktex->numFaces != 6)
{
ktxTexture_Destroy(ktxTexture(ktex));
return false;
}
// Transcoding path: for IBL HDR cubemaps we expect GPU-ready formats (e.g., BC6H or R16G16B16A16).
// BasisU does not support BC6H transcoding. If the KTX2 requires transcoding, we bail out here
// and expect assets to be pre-encoded to a GPU format.
if (ktxTexture2_NeedsTranscoding(ktex))
{
ktxTexture_Destroy(ktxTexture(ktex));
return false;
}
VkFormat vkfmt = static_cast<VkFormat>(ktex->vkFormat);
// Accept any GPU format (BC6H preferred). Non-BC formats like R16G16B16A16 are valid too.
const uint32_t mipLevels = ktex->numLevels;
const uint32_t baseW = ktex->baseWidth;
const uint32_t baseH = ktex->baseHeight;
const uint32_t layers = std::max(1u, ktex->numLayers) * 6u; // arrayLayers = layers × faces
ktx_size_t totalSize = ktxTexture_GetDataSize(ktxTexture(ktex));
const uint8_t* dataPtr = reinterpret_cast<const uint8_t*>(ktxTexture_GetData(ktxTexture(ktex)));
out.fmt = vkfmt;
out.baseW = baseW;
out.baseH = baseH;
out.mipLevels = mipLevels;
out.layers = layers;
out.imgFlags = VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT;
out.bytes.assign(dataPtr, dataPtr + totalSize);
out.copies.clear();
out.copies.reserve(static_cast<size_t>(mipLevels) * layers);
for (uint32_t mip = 0; mip < mipLevels; ++mip)
{
const uint32_t w = std::max(1u, baseW >> mip);
const uint32_t h = std::max(1u, baseH >> mip);
for (uint32_t layer = 0; layer < std::max(1u, ktex->numLayers); ++layer)
{
for (uint32_t face = 0; face < 6; ++face)
{
ktx_size_t off = 0;
ktxTexture_GetImageOffset(ktxTexture(ktex), mip, layer, face, &off);
VkBufferImageCopy r{};
r.bufferOffset = static_cast<VkDeviceSize>(off);
r.bufferRowLength = 0; // tightly packed
r.bufferImageHeight = 0;
r.imageSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
r.imageSubresource.mipLevel = mip;
r.imageSubresource.baseArrayLayer = layer * 6u + face;
r.imageSubresource.layerCount = 1;
r.imageExtent = { w, h, 1 };
out.copies.push_back(r);
}
}
}
ktxTexture_Destroy(ktxTexture(ktex));
return true;
}
bool load_ktx2_2d(const char* path, Ktx2D& out)
{
out = Ktx2D{};
if (path == nullptr || !exists_file(path)) return false;
ktxTexture2* ktex = nullptr;
ktxResult kres = ktxTexture2_CreateFromNamedFile(path, KTX_TEXTURE_CREATE_LOAD_IMAGE_DATA_BIT, &ktex);
if (kres != KTX_SUCCESS || !ktex) return false;
if (ktxTexture2_NeedsTranscoding(ktex))
{
// Common for BRDF LUTs: BC5 RG UNORM
kres = ktxTexture2_TranscodeBasis(ktex, KTX_TTF_BC5_RG, 0);
if (kres != KTX_SUCCESS)
{
ktxTexture_Destroy(ktxTexture(ktex));
return false;
}
}
VkFormat vkfmt = static_cast<VkFormat>(ktex->vkFormat);
if (!is_bc_format(vkfmt))
{
ktxTexture_Destroy(ktxTexture(ktex));
return false;
}
const uint32_t mipLevels = ktex->numLevels;
const uint32_t baseW = ktex->baseWidth;
const uint32_t baseH = ktex->baseHeight;
ktx_size_t totalSize = ktxTexture_GetDataSize(ktxTexture(ktex));
const uint8_t* dataPtr = reinterpret_cast<const uint8_t*>(ktxTexture_GetData(ktxTexture(ktex)));
out.fmt = vkfmt;
out.baseW = baseW;
out.baseH = baseH;
out.mipLevels = mipLevels;
out.bytes.assign(dataPtr, dataPtr + totalSize);
out.copies.clear();
out.copies.reserve(mipLevels);
for (uint32_t mip = 0; mip < mipLevels; ++mip)
{
ktx_size_t off = 0;
ktxTexture_GetImageOffset(ktxTexture(ktex), mip, 0, 0, &off);
const uint32_t w = std::max(1u, baseW >> mip);
const uint32_t h = std::max(1u, baseH >> mip);
VkBufferImageCopy r{};
r.bufferOffset = static_cast<VkDeviceSize>(off);
r.imageSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
r.imageSubresource.mipLevel = mip;
r.imageSubresource.baseArrayLayer = 0;
r.imageSubresource.layerCount = 1;
r.imageExtent = { w, h, 1 };
out.copies.push_back(r);
}
ktxTexture_Destroy(ktxTexture(ktex));
return true;
}
}

39
src/core/ktx_loader.h Normal file
View File

@@ -0,0 +1,39 @@
#pragma once
#include <core/vk_types.h>
#include <vector>
// Simple KTX2 helpers focused on IBL assets.
// Uses libktx to open and (if needed) transcode to GPU-ready BC formats.
namespace ktxutil
{
struct KtxCubemap
{
VkFormat fmt{VK_FORMAT_UNDEFINED};
uint32_t baseW{0};
uint32_t baseH{0};
uint32_t mipLevels{0};
uint32_t layers{0}; // total array layers in the Vulkan image (faces × layers)
std::vector<uint8_t> bytes; // full file data block returned by libktx
std::vector<VkBufferImageCopy> copies; // one per (mip × layer)
VkImageCreateFlags imgFlags{0}; // e.g., VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT
};
// Loads a .ktx2 cubemap (or cubemap array) and prepares copy regions for upload.
// - Prefers BC6H UFLOAT for HDR content; leaves existing GPU BC formats intact.
// - Returns true on success and fills 'out'.
bool load_ktx2_cubemap(const char* path, KtxCubemap& out);
// Optional: minimal 2D loader for BRDF LUTs (RG/BC5 etc.). Returns VkFormat and copies per mip.
struct Ktx2D
{
VkFormat fmt{VK_FORMAT_UNDEFINED};
uint32_t baseW{0};
uint32_t baseH{0};
uint32_t mipLevels{0};
std::vector<uint8_t> bytes;
std::vector<VkBufferImageCopy> copies;
};
bool load_ktx2_2d(const char* path, Ktx2D& out);
}

View File

@@ -573,7 +573,7 @@ void VulkanEngine::init()
// Conservative defaults to avoid CPU/RAM/VRAM spikes during heavy glTF loads.
_textureCache->set_max_loads_per_pump(3);
_textureCache->set_keep_source_bytes(false);
_textureCache->set_cpu_source_budget(64ull * 1024ull * 1024ull); // 32 MiB
_textureCache->set_cpu_source_budget(64ull * 1024ull * 1024ull); // 64 MiB
_textureCache->set_max_bytes_per_pump(128ull * 1024ull * 1024ull); // 128 MiB/frame
_textureCache->set_max_upload_dimension(4096);

View File

@@ -312,6 +312,29 @@ VkImageCreateInfo vkinit::image_create_info(VkFormat format, VkImageUsageFlags u
return info;
}
VkImageCreateInfo vkinit::image_create_info(VkFormat format,
VkImageUsageFlags usageFlags,
VkExtent3D extent,
uint32_t mipLevels,
uint32_t arrayLayers,
VkImageCreateFlags flags)
{
VkImageCreateInfo info = {};
info.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
info.pNext = nullptr;
info.flags = flags;
info.imageType = VK_IMAGE_TYPE_2D;
info.format = format;
info.extent = extent;
info.mipLevels = mipLevels > 0 ? mipLevels : 1;
info.arrayLayers = arrayLayers > 0 ? arrayLayers : 1;
info.samples = VK_SAMPLE_COUNT_1_BIT;
info.tiling = VK_IMAGE_TILING_OPTIMAL;
info.usage = usageFlags;
return info;
}
VkImageViewCreateInfo vkinit::imageview_create_info(VkFormat format, VkImage image, VkImageAspectFlags aspectFlags)
{
// build a image-view for the depth image to use for rendering
@@ -331,6 +354,29 @@ VkImageViewCreateInfo vkinit::imageview_create_info(VkFormat format, VkImage ima
return info;
}
VkImageViewCreateInfo vkinit::imageview_create_info(VkImageViewType viewType,
VkFormat format,
VkImage image,
VkImageAspectFlags aspectFlags,
uint32_t baseMipLevel,
uint32_t levelCount,
uint32_t baseArrayLayer,
uint32_t layerCount)
{
VkImageViewCreateInfo info = {};
info.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO;
info.pNext = nullptr;
info.viewType = viewType;
info.image = image;
info.format = format;
info.subresourceRange.aspectMask = aspectFlags;
info.subresourceRange.baseMipLevel = baseMipLevel;
info.subresourceRange.levelCount = levelCount;
info.subresourceRange.baseArrayLayer = baseArrayLayer;
info.subresourceRange.layerCount = layerCount;
return info;
}
//< image_set
VkPipelineLayoutCreateInfo vkinit::pipeline_layout_create_info()
{

View File

@@ -63,6 +63,24 @@ namespace vkinit
VkImageViewCreateInfo imageview_create_info(VkFormat format, VkImage image, VkImageAspectFlags aspectFlags);
// Overload: explicit mip/array counts and image flags (e.g., cube compatible)
VkImageCreateInfo image_create_info(VkFormat format,
VkImageUsageFlags usageFlags,
VkExtent3D extent,
uint32_t mipLevels,
uint32_t arrayLayers,
VkImageCreateFlags flags);
// Overload: explicit view type and subresource counts for layered/cubemap views
VkImageViewCreateInfo imageview_create_info(VkImageViewType viewType,
VkFormat format,
VkImage image,
VkImageAspectFlags aspectFlags,
uint32_t baseMipLevel,
uint32_t levelCount,
uint32_t baseArrayLayer,
uint32_t layerCount);
VkPipelineLayoutCreateInfo pipeline_layout_create_info();
VkPipelineShaderStageCreateInfo pipeline_shader_stage_create_info(VkShaderStageFlagBits stage,

View File

@@ -689,3 +689,79 @@ AllocatedImage ResourceManager::create_image_compressed(const void* bytes, size_
return new_image;
}
AllocatedImage ResourceManager::create_image_compressed_layers(const void* bytes, size_t size,
VkFormat fmt,
uint32_t mipLevels,
uint32_t layerCount,
std::span<const VkBufferImageCopy> regions,
VkImageUsageFlags usage,
VkImageCreateFlags flags)
{
if (bytes == nullptr || size == 0 || regions.empty() || mipLevels == 0 || layerCount == 0)
{
return {};
}
// Infer base extent from mip 0 entry
VkExtent3D extent{1, 1, 1};
// Find first region for mip 0 to get base dimensions
for (const auto &r : regions)
{
if (r.imageSubresource.mipLevel == 0)
{
extent = { r.imageExtent.width, r.imageExtent.height, r.imageExtent.depth > 0 ? r.imageExtent.depth : 1u };
break;
}
}
// Create staging buffer with compressed payload
AllocatedBuffer uploadbuffer = create_buffer(size, VK_BUFFER_USAGE_TRANSFER_SRC_BIT,
VMA_MEMORY_USAGE_CPU_TO_GPU);
std::memcpy(uploadbuffer.info.pMappedData, bytes, size);
vmaFlushAllocation(_deviceManager->allocator(), uploadbuffer.allocation, 0, size);
// Create the destination image with explicit mips/layers and any requested flags
VkImageUsageFlags imageUsage = usage | VK_IMAGE_USAGE_TRANSFER_DST_BIT;
VkImageCreateInfo img_info = vkinit::image_create_info(fmt, imageUsage, extent, mipLevels, layerCount, flags);
AllocatedImage newImage{};
newImage.imageFormat = fmt;
newImage.imageExtent = extent;
// GPU-only device local memory
VmaAllocationCreateInfo allocinfo{};
allocinfo.usage = VMA_MEMORY_USAGE_GPU_ONLY;
allocinfo.requiredFlags = static_cast<VkMemoryPropertyFlags>(VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT);
VK_CHECK(vmaCreateImage(_deviceManager->allocator(), &img_info, &allocinfo, &newImage.image, &newImage.allocation, nullptr));
// Build appropriate image view: cube when cube-compatible and 6 layers; array view otherwise.
const bool isDepth = (fmt == VK_FORMAT_D32_SFLOAT);
VkImageAspectFlags aspect = isDepth ? VK_IMAGE_ASPECT_DEPTH_BIT : VK_IMAGE_ASPECT_COLOR_BIT;
const bool isCube = ((flags & VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT) != 0) && (layerCount == 6);
VkImageViewType viewType = isCube ? VK_IMAGE_VIEW_TYPE_CUBE : (layerCount > 1 ? VK_IMAGE_VIEW_TYPE_2D_ARRAY : VK_IMAGE_VIEW_TYPE_2D);
VkImageViewCreateInfo viewInfo = vkinit::imageview_create_info(viewType, fmt, newImage.image, aspect, 0, mipLevels, 0, layerCount);
VK_CHECK(vkCreateImageView(_deviceManager->device(), &viewInfo, nullptr, &newImage.imageView));
// Queue copy regions for the RenderGraph upload or immediate path
PendingImageUpload pending{};
pending.staging = uploadbuffer;
pending.image = newImage.image;
pending.extent = extent;
pending.format = fmt;
pending.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
pending.finalLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
pending.generateMips = false; // compressed, mips provided
pending.mipLevels = mipLevels;
pending.copies.assign(regions.begin(), regions.end());
_pendingImageUploads.push_back(std::move(pending));
if (!_deferUploads)
{
process_queued_uploads_immediate();
}
return newImage;
}

View File

@@ -76,6 +76,19 @@ public:
std::span<const MipLevelCopy> levels,
VkImageUsageFlags usage = VK_IMAGE_USAGE_SAMPLED_BIT);
// Create a layered image (2D array or cubemap) from a compressed payload.
// - 'bytes' is the full KTX2 data payload staged into one buffer
// - 'regions' lists VkBufferImageCopy entries (one per mip × layer)
// - 'mipLevels' and 'layerCount' define the image subresource counts
// - for cubemaps, pass flags |= VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT and layerCount=6
AllocatedImage create_image_compressed_layers(const void* bytes, size_t size,
VkFormat fmt,
uint32_t mipLevels,
uint32_t layerCount,
std::span<const VkBufferImageCopy> regions,
VkImageUsageFlags usage = VK_IMAGE_USAGE_SAMPLED_BIT,
VkImageCreateFlags flags = 0);
void destroy_image(const AllocatedImage &img) const;
GPUMeshBuffers uploadMesh(std::span<uint32_t> indices, std::span<Vertex> vertices);

View File

@@ -4,30 +4,39 @@
#include <glm/gtc/constants.hpp>
#include "core/vk_types.h"
namespace primitives {
inline void buildCube(std::vector<Vertex>& vertices, std::vector<uint32_t>& indices) {
namespace primitives
{
inline void buildCube(std::vector<Vertex> &vertices, std::vector<uint32_t> &indices)
{
vertices.clear();
indices.clear();
struct Face {
struct Face
{
glm::vec3 normal;
glm::vec3 v0, v1, v2, v3;
} faces[6] = {
{ {0,0,1}, { -0.5f,-0.5f, 0.5f}, { 0.5f,-0.5f, 0.5f}, { -0.5f, 0.5f, 0.5f}, { 0.5f, 0.5f, 0.5f} },
{ {0,0,-1},{ -0.5f,-0.5f,-0.5f}, { -0.5f, 0.5f,-0.5f}, { 0.5f,-0.5f,-0.5f}, { 0.5f, 0.5f,-0.5f} },
{ {0,1,0}, { -0.5f, 0.5f, 0.5f}, { 0.5f, 0.5f, 0.5f}, { -0.5f, 0.5f,-0.5f}, { 0.5f, 0.5f,-0.5f} },
{ {0,-1,0},{ -0.5f,-0.5f, 0.5f}, { -0.5f,-0.5f,-0.5f}, { 0.5f,-0.5f, 0.5f}, { 0.5f,-0.5f,-0.5f} },
{ {1,0,0}, { 0.5f,-0.5f, 0.5f}, { 0.5f,-0.5f,-0.5f}, { 0.5f, 0.5f, 0.5f}, { 0.5f, 0.5f,-0.5f} },
{ {-1,0,0},{ -0.5f,-0.5f, 0.5f}, { -0.5f, 0.5f, 0.5f}, { -0.5f,-0.5f,-0.5f}, { -0.5f, 0.5f,-0.5f} }
{{0, 0, 1}, {-0.5f, -0.5f, 0.5f}, {0.5f, -0.5f, 0.5f}, {-0.5f, 0.5f, 0.5f}, {0.5f, 0.5f, 0.5f}},
{
{0, 0, -1}, {-0.5f, -0.5f, -0.5f}, {-0.5f, 0.5f, -0.5f}, {0.5f, -0.5f, -0.5f},
{0.5f, 0.5f, -0.5f}
},
{{0, 1, 0}, {-0.5f, 0.5f, 0.5f}, {0.5f, 0.5f, 0.5f}, {-0.5f, 0.5f, -0.5f}, {0.5f, 0.5f, -0.5f}},
{
{0, -1, 0}, {-0.5f, -0.5f, 0.5f}, {-0.5f, -0.5f, -0.5f}, {0.5f, -0.5f, 0.5f},
{0.5f, -0.5f, -0.5f}
},
{{1, 0, 0}, {0.5f, -0.5f, 0.5f}, {0.5f, -0.5f, -0.5f}, {0.5f, 0.5f, 0.5f}, {0.5f, 0.5f, -0.5f}},
{{-1, 0, 0}, {-0.5f, -0.5f, 0.5f}, {-0.5f, 0.5f, 0.5f}, {-0.5f, -0.5f, -0.5f}, {-0.5f, 0.5f, -0.5f}}
};
for (auto& f : faces) {
uint32_t start = (uint32_t)vertices.size();
Vertex v0{f.v0, 0, f.normal, 0, glm::vec4(1.0f), glm::vec4(1,0,0,1)};
Vertex v1{f.v1, 1, f.normal, 0, glm::vec4(1.0f), glm::vec4(1,0,0,1)};
Vertex v2{f.v2, 0, f.normal, 1, glm::vec4(1.0f), glm::vec4(1,0,0,1)};
Vertex v3{f.v3, 1, f.normal, 1, glm::vec4(1.0f), glm::vec4(1,0,0,1)};
for (auto &f: faces)
{
uint32_t start = (uint32_t) vertices.size();
Vertex v0{f.v0, 0, f.normal, 0, glm::vec4(1.0f), glm::vec4(1, 0, 0, 1)};
Vertex v1{f.v1, 1, f.normal, 0, glm::vec4(1.0f), glm::vec4(1, 0, 0, 1)};
Vertex v2{f.v2, 0, f.normal, 1, glm::vec4(1.0f), glm::vec4(1, 0, 0, 1)};
Vertex v3{f.v3, 1, f.normal, 1, glm::vec4(1.0f), glm::vec4(1, 0, 0, 1)};
vertices.push_back(v0);
vertices.push_back(v1);
vertices.push_back(v2);
@@ -39,19 +48,23 @@ inline void buildCube(std::vector<Vertex>& vertices, std::vector<uint32_t>& indi
indices.push_back(start + 1);
indices.push_back(start + 3);
}
}
}
inline void buildSphere(std::vector<Vertex>& vertices, std::vector<uint32_t>& indices, int sectors = 16, int stacks = 16) {
inline void buildSphere(std::vector<Vertex> &vertices, std::vector<uint32_t> &indices, int sectors = 16,
int stacks = 16)
{
vertices.clear();
indices.clear();
float radius = 0.5f;
for (int i = 0; i <= stacks; ++i) {
float v = (float)i / stacks;
for (int i = 0; i <= stacks; ++i)
{
float v = (float) i / stacks;
const float phi = v * glm::pi<float>();
float y = cos(phi);
float r = sin(phi);
for (int j = 0; j <= sectors; ++j) {
float u = (float)j / sectors;
for (int j = 0; j <= sectors; ++j)
{
float u = (float) j / sectors;
float theta = u * glm::two_pi<float>();
float x = r * cos(theta);
float z = r * sin(theta);
@@ -61,12 +74,14 @@ inline void buildSphere(std::vector<Vertex>& vertices, std::vector<uint32_t>& in
vert.uv_x = u;
vert.uv_y = 1.0f - v;
vert.color = glm::vec4(1.0f);
vert.tangent = glm::vec4(1,0,0,1);
vert.tangent = glm::vec4(1, 0, 0, 1);
vertices.push_back(vert);
}
}
for (int i = 0; i < stacks; ++i) {
for (int j = 0; j < sectors; ++j) {
for (int i = 0; i < stacks; ++i)
{
for (int j = 0; j < sectors; ++j)
{
uint32_t first = i * (sectors + 1) + j;
uint32_t second = first + sectors + 1;
indices.push_back(first);
@@ -77,7 +92,6 @@ inline void buildSphere(std::vector<Vertex>& vertices, std::vector<uint32_t>& in
indices.push_back(second + 1);
}
}
}
}
} // namespace primitives