initial commit-moved from vulkan_guide

This commit is contained in:
2025-10-10 22:53:54 +09:00
commit 8853429937
2484 changed files with 973414 additions and 0 deletions

View File

@@ -0,0 +1,91 @@
/*
* Copyright (C) 2022 - 2023 spnda
* This file is part of fastgltf <https://github.com/spnda/fastgltf>.
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use,
* copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following
* conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
* OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
* HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#pragma once
#include <cassert>
#include <cstddef>
#include <cstdint>
#include <string_view>
#include <vector>
#ifdef _MSC_VER
#pragma warning(push) // attribute 'x' is not recognized
#pragma warning(disable : 5030)
#endif
#if defined(__x86_64__) || defined(_M_AMD64) || defined(_M_IX86)
#define FASTGLTF_IS_X86
#elif defined(_M_ARM64) || defined(__aarch64__)
// __ARM_NEON is only for general Neon availability. It does not guarantee the full A64 instruction set.
#define FASTGLTF_IS_A64
#endif
namespace fastgltf::base64 {
/**
* Calculates the amount of base64 padding chars ('=') at the end of the encoded string.
* @note There's at most 2 padding chars, and this function expects that the input string
* points to the original string that has a size that is a multiple of 4 and is at least
* 4 chars long.
*/
[[gnu::always_inline]] constexpr std::size_t getPadding(std::string_view string) {
assert(string.size() >= 4 && string.size() % 4 == 0);
const auto size = string.size();
for (auto i = 1; i < 4; ++i)
if (string[size - i] != '=')
return i - 1;
return 0;
}
/**
* Calculates the size of the decoded string based on the size of the base64 encoded string and
* the amount of padding the encoded data contains.
*/
[[gnu::always_inline]] constexpr std::size_t getOutputSize(std::size_t encodedSize, std::size_t padding) noexcept {
assert(encodedSize % 4 == 0);
return (encodedSize / 4) * 3 - padding;
}
#if defined(FASTGLTF_IS_X86)
void sse4_decode_inplace(std::string_view encoded, std::uint8_t* output, std::size_t padding);
void avx2_decode_inplace(std::string_view encoded, std::uint8_t* output, std::size_t padding);
[[nodiscard]] std::vector<std::uint8_t> sse4_decode(std::string_view encoded);
[[nodiscard]] std::vector<std::uint8_t> avx2_decode(std::string_view encoded);
#elif defined(FASTGLTF_IS_A64)
void neon_decode_inplace(std::string_view encoded, std::uint8_t* output, std::size_t padding);
[[nodiscard]] std::vector<std::uint8_t> neon_decode(std::string_view encoded);
#endif
void fallback_decode_inplace(std::string_view encoded, std::uint8_t* output, std::size_t padding);
void decode_inplace(std::string_view encoded, std::uint8_t* output, std::size_t padding);
[[nodiscard]] std::vector<std::uint8_t> fallback_decode(std::string_view encoded);
[[nodiscard]] std::vector<std::uint8_t> decode(std::string_view encoded);
} // namespace fastgltf::base64
#ifdef _MSC_VER
#pragma warning(pop)
#endif

View File

@@ -0,0 +1,76 @@
#pragma once
#include <fastgltf/tools.hpp>
// If we find glm in the default include path, we'll also include it ourselfs.
// However, it is generally expected that the user will include glm before including this header.
#if __has_include(<glm/glm.hpp>)
#include <glm/glm.hpp>
#endif
namespace fastgltf {
template<>
struct ElementTraits<glm::vec2> : ElementTraitsBase<glm::vec2, AccessorType::Vec2, float> {};
template<>
struct ElementTraits<glm::vec3> : ElementTraitsBase<glm::vec3, AccessorType::Vec3, float> {};
template<>
struct ElementTraits<glm::vec4> : ElementTraitsBase<glm::vec4, AccessorType::Vec4, float> {};
template<>
struct ElementTraits<glm::i8vec2> : ElementTraitsBase<glm::i8vec2, AccessorType::Vec2, std::int8_t> {};
template<>
struct ElementTraits<glm::i8vec3> : ElementTraitsBase<glm::i8vec3, AccessorType::Vec3, std::int8_t> {};
template<>
struct ElementTraits<glm::i8vec4> : ElementTraitsBase<glm::i8vec4, AccessorType::Vec4, std::int8_t> {};
template<>
struct ElementTraits<glm::u8vec2> : ElementTraitsBase<glm::u8vec2, AccessorType::Vec2, std::uint8_t> {};
template<>
struct ElementTraits<glm::u8vec3> : ElementTraitsBase<glm::u8vec3, AccessorType::Vec3, std::uint8_t> {};
template<>
struct ElementTraits<glm::u8vec4> : ElementTraitsBase<glm::u8vec4, AccessorType::Vec4, std::uint8_t> {};
template<>
struct ElementTraits<glm::i16vec2> : ElementTraitsBase<glm::i16vec2, AccessorType::Vec2, std::int16_t> {};
template<>
struct ElementTraits<glm::i16vec3> : ElementTraitsBase<glm::i16vec3, AccessorType::Vec3, std::int16_t> {};
template<>
struct ElementTraits<glm::i16vec4> : ElementTraitsBase<glm::i16vec4, AccessorType::Vec4, std::int16_t> {};
template<>
struct ElementTraits<glm::u16vec2> : ElementTraitsBase<glm::u16vec2, AccessorType::Vec2, std::uint16_t> {};
template<>
struct ElementTraits<glm::u16vec3> : ElementTraitsBase<glm::u16vec3, AccessorType::Vec3, std::uint16_t> {};
template<>
struct ElementTraits<glm::u16vec4> : ElementTraitsBase<glm::u16vec4, AccessorType::Vec4, std::uint16_t> {};
template<>
struct ElementTraits<glm::u32vec2> : ElementTraitsBase<glm::u32vec2, AccessorType::Vec2, std::uint32_t> {};
template<>
struct ElementTraits<glm::u32vec3> : ElementTraitsBase<glm::u32vec3, AccessorType::Vec3, std::uint32_t> {};
template<>
struct ElementTraits<glm::u32vec4> : ElementTraitsBase<glm::u32vec4, AccessorType::Vec4, std::uint32_t> {};
template<>
struct ElementTraits<glm::mat2> : ElementTraitsBase<glm::mat2, AccessorType::Mat2, float> {};
template<>
struct ElementTraits<glm::mat3> : ElementTraitsBase<glm::mat3, AccessorType::Mat3, float> {};
template<>
struct ElementTraits<glm::mat4> : ElementTraitsBase<glm::mat4, AccessorType::Mat4, float> {};
} // namespace fastgltf

View File

@@ -0,0 +1,755 @@
/*
* Copyright (C) 2022 - 2023 spnda
* This file is part of fastgltf <https://github.com/spnda/fastgltf>.
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use,
* copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following
* conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
* OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
* HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#pragma once
#include <memory>
#include <tuple>
#include "types.hpp"
#ifdef _MSC_VER
#pragma warning(push)
#pragma warning(disable : 5030) // attribute 'x' is not recognized
#pragma warning(disable : 4514) // unreferenced inline function has been removed
#endif
// fwd
#if defined(__ANDROID__)
struct AAssetManager;
#endif
namespace simdjson::dom {
class array;
class object;
class parser;
} // namespace simdjson::dom
namespace fastgltf {
enum class Error : std::uint64_t;
template <typename T>
class Expected;
} // namespace fastgltf
namespace std {
template <typename T>
struct tuple_size<fastgltf::Expected<T>> : std::integral_constant<std::size_t, 2> {};
template <typename T>
struct tuple_element<0, fastgltf::Expected<T>> { using type = fastgltf::Error; };
template <typename T>
struct tuple_element<1, fastgltf::Expected<T>> { using type = T; };
} // namespace std
namespace fastgltf {
struct BinaryGltfChunk;
class GltfDataBuffer;
enum class Error : std::uint64_t {
None = 0,
InvalidPath = 1, ///< The glTF directory passed to load*GLTF is invalid.
MissingExtensions = 2, ///< One or more extensions are required by the glTF but not enabled in the Parser.
UnknownRequiredExtension = 3, ///< An extension required by the glTF is not supported by fastgltf.
InvalidJson = 4, ///< An error occurred while parsing the JSON.
InvalidGltf = 5, ///< The glTF is either missing something or has invalid data.
InvalidOrMissingAssetField = 6, ///< The glTF asset object is missing or invalid.
InvalidGLB = 7, ///< The GLB container is invalid.
/**
* A field is missing in the JSON.
* @note This is only used internally.
*/
MissingField = 8,
MissingExternalBuffer = 9, ///< With Options::LoadExternalBuffers, an external buffer was not found.
UnsupportedVersion = 10, ///< The glTF version is not supported by fastgltf.
InvalidURI = 11, ///< A URI from a buffer or image failed to be parsed.
};
inline std::string_view getErrorName(Error error) {
switch (error) {
case Error::None: return "None";
case Error::InvalidPath: return "InvalidPath";
case Error::MissingExtensions: return "MissingExtensions";
case Error::UnknownRequiredExtension: return "UnknownRequiredExtension";
case Error::InvalidJson: return "InvalidJson";
case Error::InvalidGltf: return "InvalidGltf";
case Error::InvalidOrMissingAssetField: return "InvalidOrMissingAssetField";
case Error::InvalidGLB: return "InvalidGLB";
case Error::MissingField: return "MissingField";
case Error::MissingExternalBuffer: return "MissingExternalBuffer";
case Error::UnsupportedVersion: return "UnsupportedVersion";
case Error::InvalidURI: return "InvalidURI";
default: FASTGLTF_UNREACHABLE
}
}
inline std::string_view getErrorMessage(Error error) {
switch (error) {
case Error::None: return "";
case Error::InvalidPath: return "The glTF directory passed to load*GLTF is invalid";
case Error::MissingExtensions: return "One or more extensions are required by the glTF but not enabled in the Parser.";
case Error::UnknownRequiredExtension: return "An extension required by the glTF is not supported by fastgltf.";
case Error::InvalidJson: return "An error occurred while parsing the JSON.";
case Error::InvalidGltf: return "The glTF is either missing something or has invalid data.";
case Error::InvalidOrMissingAssetField: return "The glTF asset object is missing or invalid.";
case Error::InvalidGLB: return "The GLB container is invalid.";
case Error::MissingField: return "";
case Error::MissingExternalBuffer: return "An external buffer was not found.";
case Error::UnsupportedVersion: return "The glTF version is not supported by fastgltf.";
case Error::InvalidURI: return "A URI from a buffer or image failed to be parsed.";
default: FASTGLTF_UNREACHABLE
}
}
// clang-format off
enum class Extensions : std::uint64_t {
None = 0,
// See https://github.com/KhronosGroup/glTF/blob/main/extensions/2.0/Khronos/KHR_texture_transform/README.md
KHR_texture_transform = 1 << 1,
// See https://github.com/KhronosGroup/glTF/blob/main/extensions/2.0/Khronos/KHR_texture_basisu/README.md
KHR_texture_basisu = 1 << 2,
// See https://github.com/KhronosGroup/glTF/blob/main/extensions/2.0/Vendor/MSFT_texture_dds/README.md
MSFT_texture_dds = 1 << 3,
// See https://github.com/KhronosGroup/glTF/blob/main/extensions/2.0/Khronos/KHR_mesh_quantization/README.md
KHR_mesh_quantization = 1 << 4,
// See https://github.com/KhronosGroup/glTF/tree/main/extensions/2.0/Vendor/EXT_meshopt_compression/README.md
EXT_meshopt_compression = 1 << 5,
// See https://github.com/KhronosGroup/glTF/blob/main/extensions/2.0/Khronos/KHR_lights_punctual/README.md
KHR_lights_punctual = 1 << 6,
// See https://github.com/KhronosGroup/glTF/blob/main/extensions/2.0/Vendor/EXT_texture_webp/README.md
EXT_texture_webp = 1 << 8,
// See https://github.com/KhronosGroup/glTF/blob/main/extensions/2.0/Khronos/KHR_materials_specular/README.md
KHR_materials_specular = 1 << 9,
// See https://github.com/KhronosGroup/glTF/blob/main/extensions/2.0/Khronos/KHR_materials_ior/README.md
KHR_materials_ior = 1 << 10,
// See https://github.com/KhronosGroup/glTF/blob/main/extensions/2.0/Khronos/KHR_materials_iridescence/README.md
KHR_materials_iridescence = 1 << 11,
// See https://github.com/KhronosGroup/glTF/blob/main/extensions/2.0/Khronos/KHR_materials_volume/README.md
KHR_materials_volume = 1 << 12,
// See https://github.com/KhronosGroup/glTF/blob/main/extensions/2.0/Khronos/KHR_materials_transmission/README.md
KHR_materials_transmission = 1 << 13,
// See https://github.com/KhronosGroup/glTF/blob/main/extensions/2.0/Khronos/KHR_materials_clearcoat/README.md
KHR_materials_clearcoat = 1 << 14,
// See https://github.com/KhronosGroup/glTF/blob/main/extensions/2.0/Khronos/KHR_materials_emissive_strength/README.md
KHR_materials_emissive_strength = 1 << 15,
// See https://github.com/KhronosGroup/glTF/blob/main/extensions/2.0/Khronos/KHR_materials_sheen/README.md
KHR_materials_sheen = 1 << 16,
// See https://github.com/KhronosGroup/glTF/blob/main/extensions/2.0/Khronos/KHR_materials_unlit/README.md
KHR_materials_unlit = 1 << 17,
// See https://github.com/KhronosGroup/glTF/blob/main/extensions/2.0/Khronos/KHR_materials_anisotropy/README.md
KHR_materials_anisotropy = 1 << 18,
// See https://github.com/KhronosGroup/glTF/tree/main/extensions/2.0/Vendor/EXT_mesh_gpu_instancing/README.md
EXT_mesh_gpu_instancing = 1 << 19,
#if FASTGLTF_ENABLE_DEPRECATED_EXT
// See https://github.com/KhronosGroup/glTF/tree/main/extensions/2.0/Archived/KHR_materials_pbrSpecularGlossiness/README.md
KHR_materials_pbrSpecularGlossiness = 1 << 20,
#endif
// See https://github.com/KhronosGroup/glTF/blob/main/extensions/2.0/Vendor/MSFT_packing_normalRoughnessMetallic/README.md
MSFT_packing_normalRoughnessMetallic = 1 << 21,
// See https://github.com/KhronosGroup/glTF/blob/main/extensions/2.0/Vendor/MSFT_packing_occlusionRoughnessMetallic/README.md
MSFT_packing_occlusionRoughnessMetallic = 1 << 22,
};
// clang-format on
FASTGLTF_ARITHMETIC_OP_TEMPLATE_MACRO(Extensions, Extensions, |)
FASTGLTF_ARITHMETIC_OP_TEMPLATE_MACRO(Extensions, Extensions, &)
FASTGLTF_ASSIGNMENT_OP_TEMPLATE_MACRO(Extensions, Extensions, |)
FASTGLTF_ASSIGNMENT_OP_TEMPLATE_MACRO(Extensions, Extensions, &)
FASTGLTF_UNARY_OP_TEMPLATE_MACRO(Extensions, ~)
// clang-format off
enum class Options : std::uint64_t {
None = 0,
/**
* This allows 5130 as an accessor component type. 5130 is the OpenGL constant GL_DOUBLE,
* which is by default not listed as an allowed component type in the glTF spec.
*
* The glTF normally only allows these component types:
* https://registry.khronos.org/glTF/specs/2.0/glTF-2.0.html#accessor-data-types
*/
AllowDouble = 1 << 0,
/**
* This skips validating the asset field, as it is usually there and not used anyway.
*/
DontRequireValidAssetMember = 1 << 1,
/**
* Loads all the GLB buffers into CPU memory. If disabled, fastgltf will only provide
* a byte offset and length into the GLB file, which can be useful when using APIs like
* DirectStorage or Metal IO.
*/
LoadGLBBuffers = 1 << 3,
/**
* Loads all external buffers into CPU memory. If disabled, fastgltf will only provide
* a full file path to the file holding the buffer, which can be useful when using APIs
* like DirectStorage or Metal IO. For images, LoadExternalImages has to be explicitly
* specified, too, if required.
*/
LoadExternalBuffers = 1 << 4,
/**
* This option makes fastgltf automatically decompose the transformation matrices of nodes
* into the translation, rotation, and scale components. This might be useful to have only
* TRS components, instead of matrices or TRS, which should simplify working with nodes,
* especially with animations.
*/
DecomposeNodeMatrices = 1 << 5,
/**
* This option makes fastgltf minimise the JSON file before parsing. In most cases,
* minimising it beforehand actually reduces the time spent. However, there are plenty
* of cases where this option slows down parsing drastically, which from my testing seem
* to all be glTFs which contain embedded buffers and/or are already minimised. Note that
* fastgltf only minimises the string if the data was loaded using GltfDataBuffer::loadFromFile
* or GltfDataBuffer::copyBytes, and that the bytes will also be overwritten.
*/
MinimiseJsonBeforeParsing = 1 << 6,
/**
* Loads all external images into CPU memory. It does not decode any texture data. Complementary
* to LoadExternalBuffers.
*/
LoadExternalImages = 1 << 7,
/**
* Lets fastgltf generate indices for all mesh primitives without indices. This currently
* does not de-duplicate the vertices. This is entirely for compatibility and simplifying the
* loading process.
*/
GenerateMeshIndices = 1 << 8,
};
// clang-format on
FASTGLTF_ARITHMETIC_OP_TEMPLATE_MACRO(Options, Options, |)
FASTGLTF_ARITHMETIC_OP_TEMPLATE_MACRO(Options, Options, &)
FASTGLTF_ASSIGNMENT_OP_TEMPLATE_MACRO(Options, Options, |)
FASTGLTF_ASSIGNMENT_OP_TEMPLATE_MACRO(Options, Options, &)
FASTGLTF_UNARY_OP_TEMPLATE_MACRO(Options, ~)
// String representations of glTF 2.0 extension identifiers.
namespace extensions {
constexpr std::string_view EXT_mesh_gpu_instancing = "EXT_mesh_gpu_instancing";
constexpr std::string_view EXT_meshopt_compression = "EXT_meshopt_compression";
constexpr std::string_view EXT_texture_webp = "EXT_texture_webp";
constexpr std::string_view KHR_lights_punctual = "KHR_lights_punctual";
constexpr std::string_view KHR_materials_anisotropy = "KHR_materials_anisotropy";
constexpr std::string_view KHR_materials_clearcoat = "KHR_materials_clearcoat";
constexpr std::string_view KHR_materials_emissive_strength = "KHR_materials_emissive_strength";
constexpr std::string_view KHR_materials_ior = "KHR_materials_ior";
constexpr std::string_view KHR_materials_iridescence = "KHR_materials_iridescence";
constexpr std::string_view KHR_materials_sheen = "KHR_materials_sheen";
constexpr std::string_view KHR_materials_specular = "KHR_materials_specular";
constexpr std::string_view KHR_materials_transmission = "KHR_materials_transmission";
constexpr std::string_view KHR_materials_unlit = "KHR_materials_unlit";
constexpr std::string_view KHR_materials_volume = "KHR_materials_volume";
constexpr std::string_view KHR_mesh_quantization = "KHR_mesh_quantization";
constexpr std::string_view KHR_texture_basisu = "KHR_texture_basisu";
constexpr std::string_view KHR_texture_transform = "KHR_texture_transform";
constexpr std::string_view MSFT_packing_normalRoughnessMetallic = "MSFT_packing_normalRoughnessMetallic";
constexpr std::string_view MSFT_packing_occlusionRoughnessMetallic = "MSFT_packing_occlusionRoughnessMetallic";
constexpr std::string_view MSFT_texture_dds = "MSFT_texture_dds";
#if FASTGLTF_ENABLE_DEPRECATED_EXT
constexpr std::string_view KHR_materials_pbrSpecularGlossiness = "KHR_materials_pbrSpecularGlossiness";
#endif
} // namespace extensions
// clang-format off
// An array of pairs of string representations of extension identifiers and their respective enum
// value used for enabling/disabling the loading of it. This also represents all extensions that
// fastgltf supports and understands.
#if FASTGLTF_ENABLE_DEPRECATED_EXT
static constexpr size_t SUPPORTED_EXTENSION_COUNT = 21;
#else
static constexpr size_t SUPPORTED_EXTENSION_COUNT = 20;
#endif
static constexpr std::array<std::pair<std::string_view, Extensions>, SUPPORTED_EXTENSION_COUNT> extensionStrings = {{
{ extensions::EXT_mesh_gpu_instancing, Extensions::EXT_mesh_gpu_instancing },
{ extensions::EXT_meshopt_compression, Extensions::EXT_meshopt_compression },
{ extensions::EXT_texture_webp, Extensions::EXT_texture_webp },
{ extensions::KHR_lights_punctual, Extensions::KHR_lights_punctual },
{ extensions::KHR_materials_anisotropy, Extensions::KHR_materials_anisotropy },
{ extensions::KHR_materials_clearcoat, Extensions::KHR_materials_clearcoat },
{ extensions::KHR_materials_emissive_strength, Extensions::KHR_materials_emissive_strength },
{ extensions::KHR_materials_ior, Extensions::KHR_materials_ior },
{ extensions::KHR_materials_iridescence, Extensions::KHR_materials_iridescence },
{ extensions::KHR_materials_sheen, Extensions::KHR_materials_sheen },
{ extensions::KHR_materials_specular, Extensions::KHR_materials_specular },
{ extensions::KHR_materials_transmission, Extensions::KHR_materials_transmission },
{ extensions::KHR_materials_unlit, Extensions::KHR_materials_unlit },
{ extensions::KHR_materials_volume, Extensions::KHR_materials_volume },
{ extensions::KHR_mesh_quantization, Extensions::KHR_mesh_quantization },
{ extensions::KHR_texture_basisu, Extensions::KHR_texture_basisu },
{ extensions::KHR_texture_transform, Extensions::KHR_texture_transform },
{ extensions::MSFT_packing_normalRoughnessMetallic, Extensions::MSFT_packing_normalRoughnessMetallic },
{ extensions::MSFT_packing_occlusionRoughnessMetallic, Extensions::MSFT_packing_occlusionRoughnessMetallic },
{ extensions::MSFT_texture_dds, Extensions::MSFT_texture_dds },
#if FASTGLTF_ENABLE_DEPRECATED_EXT
{ extensions::KHR_materials_pbrSpecularGlossiness,Extensions::KHR_materials_pbrSpecularGlossiness },
#endif
}};
// clang-format on
/**
* Returns the name of the passed glTF extension.
*
* @note If \p extensions has more than one bit set (multiple extensions), this
* will return the name of the first set bit.
*/
#if FASTGLTF_CPP_20
constexpr
#else
inline
#endif
std::string_view stringifyExtension(Extensions extensions) {
// Find the first set bit and mask the value to that
std::uint8_t position = 0;
while (position < std::numeric_limits<std::underlying_type_t<Extensions>>::digits) {
if (((to_underlying(extensions) >> position) & 1) != 0) {
extensions &= static_cast<Extensions>(1 << position);
break;
}
++position;
}
for (const auto& extensionString : extensionStrings)
if (extensionString.second == extensions)
return extensionString.first;
return "";
}
#if !FASTGLTF_DISABLE_CUSTOM_MEMORY_POOL
class ChunkMemoryResource : public std::pmr::memory_resource {
/**
* The default size of the individual blocks we allocate.
*/
constexpr static std::size_t blockSize = 2048;
struct Block {
std::unique_ptr<std::byte[]> data;
std::size_t size;
std::byte* dataPointer;
};
SmallVector<Block, 4> blocks;
std::size_t blockIdx = 0;
public:
explicit ChunkMemoryResource() {
allocateNewBlock();
}
void allocateNewBlock() {
auto& block = blocks.emplace_back();
block.data = std::unique_ptr<std::byte[]>(new std::byte[blockSize]);
block.dataPointer = block.data.get();
block.size = blockSize;
}
[[nodiscard]] void* do_allocate(std::size_t bytes, std::size_t alignment) override {
auto& block = blocks[blockIdx];
auto availableSize = static_cast<std::size_t>(block.dataPointer - block.data.get());
if ((availableSize + bytes) > block.size) {
// The block can't fit the new allocation. We'll just create a new block and use that.
allocateNewBlock();
++blockIdx;
return do_allocate(bytes, alignment);
}
void* alloc = block.dataPointer;
std::size_t space = availableSize;
if (std::align(alignment, availableSize, alloc, space) == nullptr) {
// Not enough space after alignment
allocateNewBlock();
++blockIdx;
return do_allocate(bytes, alignment);
}
// Get the number of bytes used for padding, and calculate the new offset using that
block.dataPointer = block.dataPointer + (availableSize - space) + bytes;
return alloc;
}
void do_deallocate([[maybe_unused]] void* p, [[maybe_unused]] std::size_t bytes, [[maybe_unused]] std::size_t alignment) override {
// We currently do nothing, as we don't keep track of what portions of the blocks are still used.
// Therefore, we keep all blocks alive until the destruction of this resource (parser).
}
[[nodiscard]] bool do_is_equal(const std::pmr::memory_resource& other) const noexcept override {
return this == std::addressof(other);
}
};
#endif
/**
* A type that stores an error together with an expected value.
* To use this type, first call error() to inspect if any errors have occurred.
* If error() is not fastgltf::Error::None,
* calling get(), operator->(), and operator*() is undefined behaviour.
*/
template <typename T>
class Expected {
static_assert(std::is_default_constructible_v<T>);
static_assert(!std::is_same_v<Error, T>);
Error err;
T value;
public:
explicit Expected(Error error) : err(error) {}
explicit Expected(T&& value) : err(Error::None), value(std::move(value)) {}
Expected(const Expected<T>& other) = delete;
Expected(Expected<T>&& other) noexcept : err(other.err), value(std::move(other.value)) {}
Expected<T>& operator=(const Expected<T>& other) = delete;
Expected<T>& operator=(Expected<T>&& other) noexcept {
err = other.err;
value = std::move(other.value);
return *this;
}
[[nodiscard]] Error error() const noexcept {
return err;
}
/**
* Returns a reference to the value of T.
* When error() returns anything but Error::None, the returned value is undefined.
*/
[[nodiscard]] T& get() noexcept {
assert(err == Error::None);
return value;
}
/**
* Returns the address of the value of T, or nullptr if error() returns anything but Error::None.
*/
[[nodiscard]] T* get_if() noexcept {
if (err != Error::None)
return nullptr;
return std::addressof(value);
}
template <std::size_t I>
auto& get() noexcept {
if constexpr (I == 0) return err;
else if constexpr (I == 1) return value;
}
template <std::size_t I>
const auto& get() const noexcept {
if constexpr (I == 0) return err;
else if constexpr (I == 1) return value;
}
/**
* Returns the address of the value of T.
* When error() returns anything but Error::None, the returned value is undefined.
*/
T* operator->() noexcept {
assert(err == Error::None);
return std::addressof(value);
}
/**
* Returns the address of the const value of T.
* When error() returns anything but Error::None, the returned value is undefined.
*/
const T* operator->() const noexcept {
assert(err == Error::None);
return std::addressof(value);
}
T&& operator*() && noexcept {
assert(err == Error::None);
return std::move(value);
}
operator bool() const noexcept {
return err == Error::None;
}
};
struct BufferInfo {
void* mappedMemory;
CustomBufferId customId;
};
using BufferMapCallback = BufferInfo(std::uint64_t bufferSize, void* userPointer);
using BufferUnmapCallback = void(BufferInfo* bufferInfo, void* userPointer);
using Base64DecodeCallback = void(std::string_view base64, std::uint8_t* dataOutput, std::size_t padding, std::size_t dataOutputSize, void* userPointer);
/**
* Enum to represent the type of a glTF file. glTFs can either be the standard JSON file with
* paths to buffers or with a base64 embedded buffers, or they can be in a so called GLB
* container format which has two or more chunks of binary data, where one represents buffers
* and the other contains the JSON string.
*/
enum class GltfType {
glTF,
GLB,
Invalid,
};
/**
* This function starts reading into the buffer and tries to determine what type of glTF container it is.
* This should be used to know whether to call Parser::loadGLTF or Parser::loadBinaryGLTF.
*
* @return The type of the glTF file, either glTF, GLB, or Invalid if it was not determinable. If this function
* returns Invalid it is highly likely that the buffer does not actually represent a valid glTF file.
*/
GltfType determineGltfFileType(GltfDataBuffer* buffer);
/**
* Gets the amount of byte padding required on the GltfDataBuffer, as simdjson requires to be
* able to overflow as it uses SIMD to load N bytes at a time.
*/
std::size_t getGltfBufferPadding() noexcept;
/**
* This class holds a chunk of data that makes up a JSON string that the glTF parser will use
* and read from.
*/
class GltfDataBuffer {
friend class Parser;
friend GltfType determineGltfFileType(GltfDataBuffer* buffer);
protected:
std::size_t allocatedSize = 0;
std::size_t dataSize = 0;
std::byte* bufferPointer = nullptr;
std::unique_ptr<std::byte[]> buffer;
std::filesystem::path filePath = {};
public:
explicit GltfDataBuffer() noexcept;
/**
* Constructs a new GltfDataBuffer from a span object, copying its data as there
* is no guarantee for the allocation size to have the adequate padding.
*/
explicit GltfDataBuffer(span<std::byte> data) noexcept;
virtual ~GltfDataBuffer() noexcept;
/**
* Saves the pointer including its range. Does not copy any data. This requires the
* original allocation to outlive the parsing of the glTF, so after the last relevant
* call to fastgltf::Parser::loadGLTF. However, this function asks for a capacity size, as
* the JSON parsing requires some padding. See fastgltf::getGltfBufferPadding for more information.
* If the capacity does not have enough padding, the function will instead copy the bytes
* with the copyBytes method. Also, it will set the padding bytes all to 0, so be sure to
* not use that for any other data.
*/
bool fromByteView(std::uint8_t* bytes, std::size_t byteCount, std::size_t capacity) noexcept;
/**
* This will create a copy of the passed bytes and allocate an adequately sized buffer.
*/
bool copyBytes(const std::uint8_t* bytes, std::size_t byteCount) noexcept;
/**
* Loads the file with a optional byte offset into a memory buffer.
*/
bool loadFromFile(const std::filesystem::path& path, std::uint64_t byteOffset = 0) noexcept;
/**
* Returns the size, in bytes,
* @return
*/
[[nodiscard]] inline std::size_t getBufferSize() const noexcept {
return dataSize;
}
[[nodiscard]] explicit operator span<std::byte>() {
return span<std::byte>(bufferPointer, dataSize);
}
};
#if defined(__ANDROID__)
class AndroidGltfDataBuffer : public GltfDataBuffer {
AAssetManager* assetManager;
public:
explicit AndroidGltfDataBuffer(AAssetManager* assetManager) noexcept;
~AndroidGltfDataBuffer() noexcept = default;
/**
* Loads a file from within an Android APK
*/
bool loadFromAndroidAsset(const std::filesystem::path& path, std::uint64_t byteOffset = 0) noexcept;
};
#endif
/**
* This function further validates all the input more strictly that is parsed from the glTF.
* Realistically, this should not be necessary in Release applications, but could be helpful
* when debugging an asset related issue.
*/
[[nodiscard]] Error validate(const Asset& asset);
/**
* Some internals the parser passes on to each glTF instance.
*/
struct ParserInternalConfig {
BufferMapCallback* mapCallback = nullptr;
BufferUnmapCallback* unmapCallback = nullptr;
Base64DecodeCallback* decodeCallback = nullptr;
void* userPointer = nullptr;
Extensions extensions = Extensions::None;
};
/**
* A parser for one or more glTF files. It uses a SIMD based JSON parser to maximize efficiency
* and performance at runtime.
*
* @note This class is not thread-safe.
*/
class Parser {
// The simdjson parser object. We want to share it between runs, so it does not need to
// reallocate over and over again. We're hiding it here to not leak the simdjson header.
std::unique_ptr<simdjson::dom::parser> jsonParser;
ParserInternalConfig config = {};
DataSource glbBuffer;
#if !FASTGLTF_DISABLE_CUSTOM_MEMORY_POOL
std::shared_ptr<ChunkMemoryResource> resourceAllocator;
#endif
std::filesystem::path directory;
Options options;
static auto getMimeTypeFromString(std::string_view mime) -> MimeType;
static void fillCategories(Category& inputCategories) noexcept;
[[nodiscard]] auto decodeDataUri(URIView& uri) const noexcept -> Expected<DataSource>;
[[nodiscard]] auto loadFileFromUri(URIView& uri) const noexcept -> Expected<DataSource>;
Error generateMeshIndices(Asset& asset) const;
Error parseAccessors(simdjson::dom::array& array, Asset& asset);
Error parseAnimations(simdjson::dom::array& array, Asset& asset);
Error parseBuffers(simdjson::dom::array& array, Asset& asset);
Error parseBufferViews(simdjson::dom::array& array, Asset& asset);
Error parseCameras(simdjson::dom::array& array, Asset& asset);
Error parseExtensions(simdjson::dom::object& extensionsObject, Asset& asset);
Error parseImages(simdjson::dom::array& array, Asset& asset);
Error parseLights(simdjson::dom::array& array, Asset& asset);
Error parseMaterials(simdjson::dom::array& array, Asset& asset);
Error parseMeshes(simdjson::dom::array& array, Asset& asset);
Error parseNodes(simdjson::dom::array& array, Asset& asset);
Error parseSamplers(simdjson::dom::array& array, Asset& asset);
Error parseScenes(simdjson::dom::array& array, Asset& asset);
Error parseSkins(simdjson::dom::array& array, Asset& asset);
Error parseTextures(simdjson::dom::array& array, Asset& asset);
Expected<Asset> parse(simdjson::dom::object root, Category categories);
public:
explicit Parser(Extensions extensionsToLoad = Extensions::None) noexcept;
explicit Parser(const Parser& parser) = delete;
Parser(Parser&& parser) noexcept;
Parser& operator=(const Parser& parser) = delete;
Parser& operator=(Parser&& other) noexcept;
~Parser();
/**
* Loads a glTF file from pre-loaded bytes representing a JSON file.
*
* @return An Asset wrapped in an Expected type, which may contain an error if one occurred.
*/
[[nodiscard]] Expected<Asset> loadGLTF(GltfDataBuffer* buffer, std::filesystem::path directory, Options options = Options::None, Category categories = Category::All);
/**
* Loads a glTF file embedded within a GLB container, which may contain the first buffer of the glTF asset.
*
* @return An Asset wrapped in an Expected type, which may contain an error if one occurred.
*/
[[nodiscard]] Expected<Asset> loadBinaryGLTF(GltfDataBuffer* buffer, std::filesystem::path directory, Options options = Options::None, Category categories = Category::All);
/**
* This function can be used to set callbacks so that you can control memory allocation for
* large buffers and images that are loaded from a glTF file. For example, one could use
* the callbacks to map a GPU buffer through Vulkan or DirectX so that fastgltf can write
* the buffer directly to the GPU to avoid a copy into RAM first. To remove the callbacks
* for a specific load, call this method with both parameters as nullptr before load*GLTF.
* Using Parser::setUserPointer you can also set a user pointer to access your
* own class or other data you may need.
*
* @param mapCallback function called when the parser requires a buffer to write data
* embedded in a GLB file or decoded from a base64 URI, cannot be nullptr.
* @param unmapCallback function called when the parser is done with writing into a
* buffer, can be nullptr.
* @note This is likely only useful for advanced users who know what they're doing.
*/
void setBufferAllocationCallback(BufferMapCallback* mapCallback, BufferUnmapCallback* unmapCallback = nullptr) noexcept;
/**
* Allows setting callbacks for base64 decoding.
* This can be useful if you have another base64 decoder optimised for a certain platform or architecture,
* or want to use your own scheduler to schedule multiple threads for working on decoding individual chunks of the data.
* Using Parser::setUserPointer you can also set a user pointer to access your own class or other data you may need.
*
* It is still recommended to use fastgltf's base64 decoding features as they're highly optimised
* for SSE4, AVX2, and ARM Neon.
*
* @param decodeCallback function called when the parser tries to decode a base64 buffer
*/
void setBase64DecodeCallback(Base64DecodeCallback* decodeCallback) noexcept;
void setUserPointer(void* pointer) noexcept;
};
} // namespace fastgltf
#ifdef _MSC_VER
#pragma warning(pop)
#endif

View File

@@ -0,0 +1,653 @@
/*
* Copyright (C) 2022 - 2023 spnda
* This file is part of fastgltf <https://github.com/spnda/fastgltf>.
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use,
* copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following
* conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
* OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
* HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#pragma once
#include <cstring>
#include <iterator>
#include "types.hpp"
namespace fastgltf {
template <typename>
struct ComponentTypeConverter;
template<>
struct ComponentTypeConverter<std::int8_t> {
static constexpr auto type = ComponentType::Byte;
};
template<>
struct ComponentTypeConverter<std::uint8_t> {
static constexpr auto type = ComponentType::UnsignedByte;
};
template<>
struct ComponentTypeConverter<std::int16_t> {
static constexpr auto type = ComponentType::Short;
};
template<>
struct ComponentTypeConverter<std::uint16_t> {
static constexpr auto type = ComponentType::UnsignedShort;
};
template<>
struct ComponentTypeConverter<std::int32_t> {
static constexpr auto type = ComponentType::Int;
};
template<>
struct ComponentTypeConverter<std::uint32_t> {
static constexpr auto type = ComponentType::UnsignedInt;
};
template<>
struct ComponentTypeConverter<float> {
static constexpr auto type = ComponentType::Float;
};
template<>
struct ComponentTypeConverter<double> {
static constexpr auto type = ComponentType::Double;
};
template <typename ElementType, AccessorType EnumAccessorType, typename ComponentType = ElementType>
struct ElementTraitsBase {
using element_type = ElementType;
using component_type = ComponentType;
static constexpr auto type = EnumAccessorType;
static constexpr auto enum_component_type = ComponentTypeConverter<ComponentType>::type;
};
template <typename>
struct ElementTraits;
template<>
struct ElementTraits<std::int8_t> : ElementTraitsBase<std::int8_t, AccessorType::Scalar> {};
template<>
struct ElementTraits<std::uint8_t> : ElementTraitsBase<std::uint8_t, AccessorType::Scalar> {};
template<>
struct ElementTraits<std::int16_t> : ElementTraitsBase<std::int16_t, AccessorType::Scalar> {};
template<>
struct ElementTraits<std::uint16_t> : ElementTraitsBase<std::uint16_t, AccessorType::Scalar> {};
template<>
struct ElementTraits<std::int32_t> : ElementTraitsBase<std::int32_t, AccessorType::Scalar> {};
template<>
struct ElementTraits<std::uint32_t> : ElementTraitsBase<std::uint32_t, AccessorType::Scalar> {};
template<>
struct ElementTraits<float> : ElementTraitsBase<float, AccessorType::Scalar> {};
template<>
struct ElementTraits<double> : ElementTraitsBase<double, AccessorType::Scalar> {};
#if FASTGLTF_HAS_CONCEPTS
template <typename ElementType>
concept Element = std::is_arithmetic_v<typename ElementTraits<ElementType>::component_type>
&& ElementTraits<ElementType>::type != AccessorType::Invalid
&& ElementTraits<ElementType>::enum_component_type != ComponentType::Invalid
&& std::is_default_constructible_v<ElementType>
&& std::is_constructible_v<ElementType>
&& std::is_move_assignable_v<ElementType>;
#endif
namespace internal {
template <typename DestType, typename SourceType>
constexpr DestType convertComponent(const SourceType& source, bool normalized) {
if (normalized) {
if constexpr (std::is_floating_point_v<SourceType> && std::is_integral_v<DestType>) {
// float -> int conversion
return static_cast<DestType>(std::round(source * static_cast<SourceType>(std::numeric_limits<DestType>::max())));
} else if constexpr (std::is_integral_v<SourceType> && std::is_floating_point_v<DestType>) {
// int -> float conversion
DestType minValue;
if constexpr (std::is_signed_v<DestType>) {
minValue = static_cast<DestType>(-1.0);
} else {
minValue = static_cast<DestType>(0.0);
}
// We have to use max here because for uchar -> float we could have -128 but 1.0 should represent 127,
// which is why -128 and -127 both equate to 1.0.
return fastgltf::max(static_cast<DestType>(source) / static_cast<DestType>(std::numeric_limits<SourceType>::max()),
minValue);
}
}
return static_cast<DestType>(source);
}
template <typename SourceType, typename DestType, std::size_t Index>
constexpr DestType convertComponent(const std::byte* bytes, bool normalized) {
return convertComponent<DestType>(reinterpret_cast<const SourceType*>(bytes)[Index], normalized);
}
template <typename ElementType, typename SourceType, std::size_t... I>
#if FASTGLTF_HAS_CONCEPTS
requires Element<ElementType>
#endif
constexpr ElementType convertAccessorElement(const std::byte* bytes, bool normalized, std::index_sequence<I...>) {
using DestType = typename ElementTraits<ElementType>::component_type;
static_assert(std::is_arithmetic_v<DestType>, "Accessor traits must provide a valid component type");
if constexpr (std::is_aggregate_v<ElementType>) {
return {convertComponent<SourceType, DestType, I>(bytes, normalized)...};
} else {
return ElementType{convertComponent<SourceType, DestType, I>(bytes, normalized)...};
}
}
template <typename ElementType,
typename Seq = std::make_index_sequence<getNumComponents(ElementTraits<ElementType>::type)>>
#if FASTGLTF_HAS_CONCEPTS
requires Element<ElementType>
#endif
ElementType getAccessorElementAt(ComponentType componentType, const std::byte* bytes, bool normalized = false) {
switch (componentType) {
case ComponentType::Byte:
return convertAccessorElement<ElementType, std::int8_t>(bytes, normalized, Seq{});
case ComponentType::UnsignedByte:
return convertAccessorElement<ElementType, std::uint8_t>(bytes, normalized, Seq{});
case ComponentType::Short:
return convertAccessorElement<ElementType, std::int16_t>(bytes, normalized, Seq{});
case ComponentType::UnsignedShort:
return convertAccessorElement<ElementType, std::uint16_t>(bytes, normalized, Seq{});
case ComponentType::Int:
return convertAccessorElement<ElementType, std::int32_t>(bytes, normalized, Seq{});
case ComponentType::UnsignedInt:
return convertAccessorElement<ElementType, std::uint32_t>(bytes, normalized, Seq{});
case ComponentType::Float:
return convertAccessorElement<ElementType, float>(bytes, normalized, Seq{});
case ComponentType::Double:
return convertAccessorElement<ElementType, double>(bytes, normalized, Seq{});
case ComponentType::Invalid:
default:
return ElementType{};
}
}
// Performs a binary search for the index into the sparse index list whose value matches the desired index
template <typename ElementType>
bool findSparseIndex(const std::byte* bytes, std::size_t indexCount, std::size_t desiredIndex,
std::size_t& resultIndex) {
auto* elements = reinterpret_cast<const ElementType*>(bytes);
auto count = indexCount;
resultIndex = 0;
while (count > 0) {
auto step = count / 2;
auto index = resultIndex + step;
if (elements[index] < static_cast<ElementType>(desiredIndex)) {
resultIndex = index + 1;
count -= step + 1;
} else {
count = step;
}
}
return resultIndex < indexCount && elements[resultIndex] == static_cast<ElementType>(desiredIndex);
}
// Finds the index of the nearest sparse index to the desired index
inline bool findSparseIndex(ComponentType componentType, const std::byte* bytes, std::size_t indexCount,
std::size_t desiredIndex, std::size_t& resultIndex) {
switch (componentType) {
case ComponentType::Byte:
return findSparseIndex<std::int8_t>(bytes, indexCount, desiredIndex, resultIndex);
case ComponentType::UnsignedByte:
return findSparseIndex<std::uint8_t>(bytes, indexCount, desiredIndex, resultIndex);
case ComponentType::Short:
return findSparseIndex<std::int16_t>(bytes, indexCount, desiredIndex, resultIndex);
case ComponentType::UnsignedShort:
return findSparseIndex<std::uint16_t>(bytes, indexCount, desiredIndex, resultIndex);
case ComponentType::Int:
return findSparseIndex<std::int32_t>(bytes, indexCount, desiredIndex, resultIndex);
case ComponentType::UnsignedInt:
return findSparseIndex<std::uint32_t>(bytes, indexCount, desiredIndex, resultIndex);
case ComponentType::Float:
case ComponentType::Double:
case ComponentType::Invalid:
return false;
}
return false;
}
} // namespace internal
struct DefaultBufferDataAdapter {
const std::byte* operator()(const Buffer& buffer) const {
return std::visit(visitor {
[](auto&) -> const std::byte* {
return nullptr;
},
[&](const sources::Vector& vec) {
return reinterpret_cast<const std::byte*>(vec.bytes.data());
},
[&](const sources::ByteView& bv) {
return bv.bytes.data();
},
}, buffer.data);
}
};
template <typename ElementType, typename BufferDataAdapter>
class IterableAccessor;
template <typename ElementType, typename BufferDataAdapter = DefaultBufferDataAdapter>
class AccessorIterator {
protected:
const IterableAccessor<ElementType, BufferDataAdapter>* accessor;
std::size_t idx;
std::size_t sparseIdx = 0;
std::size_t nextSparseIndex = 0;
public:
using value_type = ElementType;
using reference = ElementType&;
using pointer = ElementType*;
using difference_type = std::ptrdiff_t;
// This iterator isn't truly random access (as per the C++ definition), but we do want to support
// some things that these come with (e.g. std::distance using operator-).
using iterator_category = std::random_access_iterator_tag;
AccessorIterator(const IterableAccessor<ElementType, BufferDataAdapter>* accessor, std::size_t idx = 0)
: accessor(accessor), idx(idx) {
if (accessor->accessor.sparse.has_value()) {
// Get the first sparse index.
nextSparseIndex = internal::getAccessorElementAt<std::uint32_t>(accessor->indexComponentType,
accessor->indicesBytes + accessor->indexStride * sparseIdx);
}
}
AccessorIterator& operator++() noexcept {
++idx;
return *this;
}
AccessorIterator operator++(int) noexcept {
auto x = *this;
++(*this);
return x;
}
[[nodiscard]] difference_type operator-(const AccessorIterator& other) const noexcept {
return static_cast<difference_type>(idx - other.idx);
}
[[nodiscard]] bool operator==(const AccessorIterator& iterator) const noexcept {
// We don't compare sparse properties
return idx == iterator.idx &&
accessor->bufferBytes == iterator.accessor->bufferBytes &&
accessor->stride == iterator.accessor->stride &&
accessor->componentType == iterator.accessor->componentType;
}
[[nodiscard]] bool operator!=(const AccessorIterator& iterator) const noexcept {
return !(*this == iterator);
}
[[nodiscard]] ElementType operator*() noexcept {
if (accessor->accessor.sparse.has_value()) {
if (idx == nextSparseIndex) {
// Get the sparse value for this index
auto value = internal::getAccessorElementAt<ElementType>(accessor->componentType,
accessor->valuesBytes + accessor->valueStride * sparseIdx,
accessor->accessor.normalized);
// Find the next sparse index.
++sparseIdx;
if (sparseIdx < accessor->sparseCount) {
nextSparseIndex = internal::getAccessorElementAt<std::uint32_t>(accessor->indexComponentType,
accessor->indicesBytes + accessor->indexStride * sparseIdx);
}
return value;
}
}
return internal::getAccessorElementAt<ElementType>(accessor->componentType,
accessor->bufferBytes + idx * accessor->stride,
accessor->accessor.normalized);
}
};
template <typename ElementType, typename BufferDataAdapter = DefaultBufferDataAdapter>
class IterableAccessor {
friend class AccessorIterator<ElementType, BufferDataAdapter>;
const Asset& asset;
const Accessor& accessor;
const std::byte* bufferBytes;
std::size_t stride;
fastgltf::ComponentType componentType;
// Data needed for sparse accessors
fastgltf::ComponentType indexComponentType;
const std::byte* indicesBytes;
const std::byte* valuesBytes;
std::size_t indexStride;
std::size_t valueStride;
std::size_t sparseCount;
public:
using iterator = AccessorIterator<ElementType, BufferDataAdapter>;
explicit IterableAccessor(const Asset& asset, const Accessor& accessor, const BufferDataAdapter& adapter) : asset(asset), accessor(accessor) {
componentType = accessor.componentType;
const auto& view = asset.bufferViews[*accessor.bufferViewIndex];
stride = view.byteStride ? *view.byteStride : getElementByteSize(accessor.type, accessor.componentType);
bufferBytes = adapter(asset.buffers[view.bufferIndex]);
bufferBytes += view.byteOffset + accessor.byteOffset;
if (accessor.sparse.has_value()) {
const auto& indicesView = asset.bufferViews[accessor.sparse->indicesBufferView];
indicesBytes = adapter(asset.buffers[indicesView.bufferIndex])
+ indicesView.byteOffset + accessor.sparse->indicesByteOffset;
indexStride = getElementByteSize(AccessorType::Scalar, accessor.sparse->indexComponentType);
const auto& valuesView = asset.bufferViews[accessor.sparse->valuesBufferView];
valuesBytes = adapter(asset.buffers[valuesView.bufferIndex])
+ valuesView.byteOffset + accessor.sparse->valuesByteOffset;
// "The index of the bufferView with sparse values. The referenced buffer view MUST NOT
// have its target or byteStride properties defined."
valueStride = getElementByteSize(accessor.type, accessor.componentType);
indexComponentType = accessor.sparse->indexComponentType;
sparseCount = accessor.sparse->count;
}
}
[[nodiscard]] iterator begin() const noexcept {
return iterator(this, 0);
}
[[nodiscard]] iterator end() const noexcept {
return iterator(this, accessor.count);
}
};
template <typename ElementType, typename BufferDataAdapter = DefaultBufferDataAdapter>
#if FASTGLTF_HAS_CONCEPTS
requires Element<ElementType>
#endif
ElementType getAccessorElement(const Asset& asset, const Accessor& accessor, size_t index,
const BufferDataAdapter& adapter = {}) {
using Traits = ElementTraits<ElementType>;
static_assert(Traits::type != AccessorType::Invalid, "Accessor traits must provide a valid Accessor Type");
static_assert(std::is_default_constructible_v<ElementType>, "Element type must be default constructible");
static_assert(std::is_constructible_v<ElementType>, "Element type must be constructible");
static_assert(std::is_move_assignable_v<ElementType>, "Element type must be move-assignable");
if (accessor.sparse) {
const auto& indicesView = asset.bufferViews[accessor.sparse->indicesBufferView];
auto* indicesBytes = adapter(asset.buffers[indicesView.bufferIndex])
+ indicesView.byteOffset + accessor.sparse->indicesByteOffset;
const auto& valuesView = asset.bufferViews[accessor.sparse->valuesBufferView];
auto* valuesBytes = adapter(asset.buffers[valuesView.bufferIndex])
+ valuesView.byteOffset + accessor.sparse->valuesByteOffset;
// "The index of the bufferView with sparse values. The referenced buffer view MUST NOT
// have its target or byteStride properties defined."
auto valueStride = getElementByteSize(accessor.type, accessor.componentType);
std::size_t sparseIndex{};
if (internal::findSparseIndex(accessor.sparse->indexComponentType, indicesBytes, accessor.sparse->count,
index, sparseIndex)) {
return internal::getAccessorElementAt<ElementType>(accessor.componentType,
valuesBytes + valueStride * sparseIndex,
accessor.normalized);
}
}
// 5.1.1. accessor.bufferView
// The index of the buffer view. When undefined, the accessor MUST be initialized with zeros; sparse
// property or extensions MAY override zeros with actual values.
if (!accessor.bufferViewIndex) {
if constexpr (std::is_aggregate_v<ElementType>) {
return {};
} else {
return ElementType{};
}
}
const auto& view = asset.bufferViews[*accessor.bufferViewIndex];
auto stride = view.byteStride ? *view.byteStride : getElementByteSize(accessor.type, accessor.componentType);
auto* bytes = adapter(asset.buffers[view.bufferIndex]);
bytes += view.byteOffset + accessor.byteOffset;
return internal::getAccessorElementAt<ElementType>(accessor.componentType, bytes + index * stride, accessor.normalized);
}
template<typename ElementType, typename BufferDataAdapter = DefaultBufferDataAdapter>
#if FASTGLTF_HAS_CONCEPTS
requires Element<ElementType>
#endif
IterableAccessor<ElementType, BufferDataAdapter> iterateAccessor(const Asset& asset, const Accessor& accessor, const BufferDataAdapter& adapter = {}) {
return IterableAccessor<ElementType, BufferDataAdapter>(asset, accessor, adapter);
}
template <typename ElementType, typename Functor, typename BufferDataAdapter = DefaultBufferDataAdapter>
#if FASTGLTF_HAS_CONCEPTS
requires Element<ElementType>
#endif
void iterateAccessor(const Asset& asset, const Accessor& accessor, Functor&& func,
const BufferDataAdapter& adapter = {}) {
using Traits = ElementTraits<ElementType>;
static_assert(Traits::type != AccessorType::Invalid, "Accessor traits must provide a valid accessor type");
static_assert(Traits::enum_component_type != ComponentType::Invalid, "Accessor traits must provide a valid component type");
static_assert(std::is_default_constructible_v<ElementType>, "Element type must be default constructible");
static_assert(std::is_constructible_v<ElementType>, "Element type must be constructible");
static_assert(std::is_move_assignable_v<ElementType>, "Element type must be move-assignable");
if (accessor.type != Traits::type) {
return;
}
if (accessor.sparse && accessor.sparse->count > 0) {
auto& indicesView = asset.bufferViews[accessor.sparse->indicesBufferView];
auto* indicesBytes = adapter(asset.buffers[indicesView.bufferIndex])
+ indicesView.byteOffset + accessor.sparse->indicesByteOffset;
auto indexStride = getElementByteSize(AccessorType::Scalar, accessor.sparse->indexComponentType);
auto& valuesView = asset.bufferViews[accessor.sparse->valuesBufferView];
auto* valuesBytes = adapter(asset.buffers[valuesView.bufferIndex])
+ valuesView.byteOffset + accessor.sparse->valuesByteOffset;
// "The index of the bufferView with sparse values. The referenced buffer view MUST NOT
// have its target or byteStride properties defined."
auto valueStride = getElementByteSize(accessor.type, accessor.componentType);
const std::byte* srcBytes = nullptr;
std::size_t srcStride = 0;
// 5.1.1. accessor.bufferView
// The index of the buffer view. When undefined, the accessor MUST be initialized with zeros; sparse
// property or extensions MAY override zeros with actual values.
if (accessor.bufferViewIndex) {
auto& view = asset.bufferViews[*accessor.bufferViewIndex];
srcBytes = adapter(asset.buffers[view.bufferIndex]) + view.byteOffset + accessor.byteOffset;
srcStride = view.byteStride ? *view.byteStride
: getElementByteSize(accessor.type, accessor.componentType);
}
auto nextSparseIndex = internal::getAccessorElementAt<std::uint32_t>(
accessor.sparse->indexComponentType, indicesBytes);
std::size_t sparseIndexCount = 0;
for (std::size_t i = 0; i < accessor.count; ++i) {
if (i == nextSparseIndex) {
func(internal::getAccessorElementAt<ElementType>(accessor.componentType,
valuesBytes + valueStride * sparseIndexCount,
accessor.normalized));
++sparseIndexCount;
if (sparseIndexCount < accessor.sparse->count) {
nextSparseIndex = internal::getAccessorElementAt<std::uint32_t>(
accessor.sparse->indexComponentType, indicesBytes + indexStride * sparseIndexCount);
}
} else if (accessor.bufferViewIndex) {
func(internal::getAccessorElementAt<ElementType>(accessor.componentType,
srcBytes + srcStride * i,
accessor.normalized));
} else {
func(ElementType{});
}
}
return;
}
// 5.1.1. accessor.bufferView
// The index of the buffer view. When undefined, the accessor MUST be initialized with zeros; sparse
// property or extensions MAY override zeros with actual values.
if (!accessor.bufferViewIndex) {
for (std::size_t i = 0; i < accessor.count; ++i) {
func(ElementType{});
}
}
else {
auto& view = asset.bufferViews[*accessor.bufferViewIndex];
auto stride = view.byteStride ? *view.byteStride : getElementByteSize(accessor.type, accessor.componentType);
auto* bytes = adapter(asset.buffers[view.bufferIndex]);
bytes += view.byteOffset + accessor.byteOffset;
for (std::size_t i = 0; i < accessor.count; ++i) {
func(internal::getAccessorElementAt<ElementType>(accessor.componentType, bytes + i * stride, accessor.normalized));
}
}
}
template <typename ElementType, typename Functor, typename BufferDataAdapter = DefaultBufferDataAdapter>
#if FASTGLTF_HAS_CONCEPTS
requires Element<ElementType>
#endif
void iterateAccessorWithIndex(const Asset& asset, const Accessor& accessor, Functor&& func,
const BufferDataAdapter& adapter = {}) {
std::size_t idx = 0;
iterateAccessor<ElementType>(asset, accessor, [&](auto&& elementType) {
func(std::forward<ElementType>(elementType), idx++);
}, adapter);
}
template <typename ElementType, std::size_t TargetStride = sizeof(ElementType),
typename BufferDataAdapter = DefaultBufferDataAdapter>
#if FASTGLTF_HAS_CONCEPTS
requires Element<ElementType>
#endif
void copyFromAccessor(const Asset& asset, const Accessor& accessor, void* dest,
const BufferDataAdapter& adapter = {}) {
using Traits = ElementTraits<ElementType>;
static_assert(Traits::type != AccessorType::Invalid, "Accessor traits must provide a valid accessor type");
static_assert(Traits::enum_component_type != ComponentType::Invalid, "Accessor traits must provide a valid component type");
static_assert(std::is_default_constructible_v<ElementType>, "Element type must be default constructible");
static_assert(std::is_constructible_v<ElementType>, "Element type must be constructible");
static_assert(std::is_move_assignable_v<ElementType>, "Element type must be move-assignable");
if (accessor.type != Traits::type) {
return;
}
auto* dstBytes = reinterpret_cast<std::byte*>(dest);
if (accessor.sparse && accessor.sparse->count > 0) {
return iterateAccessorWithIndex<ElementType>(asset, accessor, [&](auto&& value, std::size_t index) {
auto* pDest = reinterpret_cast<ElementType*>(dstBytes + TargetStride * index);
*pDest = std::forward<ElementType>(value);
}, adapter);
}
auto elemSize = getElementByteSize(accessor.type, accessor.componentType);
// 5.1.1. accessor.bufferView
// The index of the buffer view. When undefined, the accessor MUST be initialized with zeros; sparse
// property or extensions MAY override zeros with actual values.
if (!accessor.bufferViewIndex) {
if constexpr (std::is_trivially_copyable_v<ElementType>) {
if (TargetStride == elemSize) {
std::memset(dest, 0, elemSize * accessor.count);
} else {
for (std::size_t i = 0; i < accessor.count; ++i) {
std::memset(dstBytes + i * TargetStride, 0, elemSize);
}
}
} else {
for (std::size_t i = 0; i < accessor.count; ++i) {
auto* pDest = reinterpret_cast<ElementType*>(dstBytes + TargetStride * i);
if constexpr (std::is_aggregate_v<ElementType>) {
*pDest = {};
} else {
*pDest = ElementType{};
}
}
}
return;
}
auto& view = asset.bufferViews[*accessor.bufferViewIndex];
auto srcStride = view.byteStride ? *view.byteStride
: getElementByteSize(accessor.type, accessor.componentType);
auto* srcBytes = adapter(asset.buffers[view.bufferIndex]) + view.byteOffset + accessor.byteOffset;
// We have to perform normalization if the accessor is marked as containing normalized data, which is why
// we can't just memcpy then.
if (std::is_trivially_copyable_v<ElementType> && !accessor.normalized && accessor.componentType == Traits::enum_component_type) {
if (srcStride == elemSize && srcStride == TargetStride) {
std::memcpy(dest, srcBytes, elemSize * accessor.count);
} else {
for (std::size_t i = 0; i < accessor.count; ++i) {
std::memcpy(dstBytes + TargetStride * i, srcBytes + srcStride * i, elemSize);
}
}
} else {
for (std::size_t i = 0; i < accessor.count; ++i) {
auto* pDest = reinterpret_cast<ElementType*>(dstBytes + TargetStride * i);
*pDest = internal::getAccessorElementAt<ElementType>(accessor.componentType, srcBytes + srcStride * i);
}
}
}
} // namespace fastgltf

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,334 @@
/*
* Copyright (C) 2022 - 2023 spnda
* This file is part of fastgltf <https://github.com/spnda/fastgltf>.
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use,
* copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following
* conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
* OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
* HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#pragma once
#include <array>
#include <cmath>
#include <limits>
#include <string_view>
#include <type_traits>
// Macros to determine C++ standard version
#if (!defined(_MSVC_LANG) && __cplusplus >= 201703L) || (defined(_MSVC_LANG) && _MSVC_LANG >= 201703L)
#define FASTGLTF_CPP_17 1
#else
#error "fastgltf requires C++17"
#endif
#if (!defined(_MSVC_LANG) && __cplusplus >= 202002L) || (defined(_MSVC_LANG) && _MSVC_LANG >= 202002L)
#define FASTGLTF_CPP_20 1
#else
#define FASTGLTF_CPP_20 0
#endif
#if (!defined(_MSVC_LANG) && __cplusplus >= 202302L) || (defined(_MSVC_LANG) && _MSVC_LANG >= 202302L)
#define FASTGLTF_CPP_23 1
#else
#define FASTGLTF_CPP_23 0
#endif
#if FASTGLTF_CPP_20 && defined(__cpp_lib_bitops) && __cpp_lib_bitops >= 201907L
#define FASTGLTF_HAS_BIT 1
#include <bit>
#else
#define FASTGLTF_HAS_BIT 0
#endif
#if FASTGLTF_CPP_20 && defined(__cpp_concepts) && __cpp_concepts >= 202002L
#define FASTGLTF_HAS_CONCEPTS 1
#include <concepts>
#else
#define FASTGLTF_HAS_CONCEPTS 0
#endif
#if FASTGLTF_CPP_23
#define FASTGLTF_UNREACHABLE std::unreachable();
#elif defined(__GNUC__) || defined(__clang__)
#define FASTGLTF_UNREACHABLE __builtin_unreachable();
#elif defined(_MSC_VER)
#define FASTGLTF_UNREACHABLE __assume(false);
#else
#define FASTGLTF_UNREACHABLE assert(0);
#endif
#ifdef _MSC_VER
#pragma warning(push)
#pragma warning(disable : 5030) // attribute 'x' is not recognized
#pragma warning(disable : 4514) // unreferenced inline function has been removed
#endif
namespace fastgltf {
template<typename T>
#if FASTGLTF_HAS_CONCEPTS
requires std::is_enum_v<T>
#endif
[[nodiscard]] constexpr std::underlying_type_t<T> to_underlying(T t) noexcept {
#if !FASTGLTF_HAS_CONCEPTS
static_assert(std::is_enum_v<T>, "to_underlying only works with enum types.");
#endif
return static_cast<std::underlying_type_t<T>>(t);
}
template <typename T, typename U>
#if FASTGLTF_HAS_CONCEPTS
requires ((std::is_enum_v<T> && std::integral<std::underlying_type_t<T>>) || std::integral<T>) && requires (T t, U u) {
{ t & u } -> std::same_as<U>;
}
#endif
[[nodiscard]] constexpr bool hasBit(T flags, U bit) {
#if !FASTGLTF_HAS_CONCEPTS
static_assert((std::is_enum_v<T> && std::is_integral_v<std::underlying_type_t<T>>) || std::is_integral_v<T>);
#endif
return (flags & bit) == bit;
}
template <typename T>
[[nodiscard]] constexpr T alignUp(T base, T alignment) {
static_assert(std::is_signed_v<T>, "alignUp requires type T to be signed.");
return (base + alignment - 1) & -alignment;
}
template <typename T>
[[nodiscard]] constexpr T alignDown(T base, T alignment) {
return base - (base % alignment);
}
template <typename T>
#if FASTGLTF_HAS_CONCEPTS
requires requires (T t) {
{ t > t } -> std::same_as<bool>;
}
#endif
[[nodiscard]] constexpr T max(T a, T b) noexcept {
return (a > b) ? a : b;
}
/**
* Decomposes a transform matrix into the translation, rotation, and scale components. This
* function does not support skew, shear, or perspective. This currently uses a quick algorithm
* to calculate the quaternion from the rotation matrix, which might occasionally loose some
* precision, though we try to use doubles here.
*/
inline void decomposeTransformMatrix(std::array<float, 16> matrix, std::array<float, 3>& scale, std::array<float, 4>& rotation, std::array<float, 3>& translation) {
// Extract the translation. We zero the translation out, as we reuse the matrix as
// the rotation matrix at the end.
translation = {matrix[12], matrix[13], matrix[14]};
matrix[12] = matrix[13] = matrix[14] = 0;
// Extract the scale. We calculate the euclidean length of the columns. We then
// construct a vector with those lengths. My gcc's stdlib doesn't include std::sqrtf
// for some reason...
auto s1 = sqrtf(matrix[0] * matrix[0] + matrix[1] * matrix[1] + matrix[2] * matrix[2]);
auto s2 = sqrtf(matrix[4] * matrix[4] + matrix[5] * matrix[5] + matrix[6] * matrix[6]);
auto s3 = sqrtf(matrix[8] * matrix[8] + matrix[9] * matrix[9] + matrix[10] * matrix[10]);
scale = {s1, s2, s3};
// Remove the scaling from the matrix, leaving only the rotation. matrix is now the
// rotation matrix.
matrix[0] /= s1; matrix[1] /= s1; matrix[2] /= s1;
matrix[4] /= s2; matrix[5] /= s2; matrix[6] /= s2;
matrix[8] /= s3; matrix[9] /= s3; matrix[10] /= s3;
// Construct the quaternion. This algo is copied from here:
// https://www.euclideanspace.com/maths/geometry/rotations/conversions/matrixToQuaternion/christian.htm.
// glTF orders the components as x,y,z,w
rotation = {
max(.0f, 1 + matrix[0] - matrix[5] - matrix[10]),
max(.0f, 1 - matrix[0] + matrix[5] - matrix[10]),
max(.0f, 1 - matrix[0] - matrix[5] + matrix[10]),
max(.0f, 1 + matrix[0] + matrix[5] + matrix[10]),
};
for (auto& x : rotation) {
x = static_cast<float>(std::sqrt(static_cast<double>(x)) / 2);
}
rotation[0] = std::copysignf(rotation[0], matrix[6] - matrix[9]);
rotation[1] = std::copysignf(rotation[1], matrix[8] - matrix[2]);
rotation[2] = std::copysignf(rotation[2], matrix[1] - matrix[4]);
}
/**
* Constants generated using 0x82f63b79u CRC poly.
*/
static constexpr std::array<std::uint32_t, 256> crcHashTable = {{
0x00000000, 0xf26b8303, 0xe13b70f7, 0x1350f3f4, 0xc79a971f, 0x35f1141c, 0x26a1e7e8, 0xd4ca64eb,
0x8ad958cf, 0x78b2dbcc, 0x6be22838, 0x9989ab3b, 0x4d43cfd0, 0xbf284cd3, 0xac78bf27, 0x5e133c24,
0x105ec76f, 0xe235446c, 0xf165b798, 0x030e349b, 0xd7c45070, 0x25afd373, 0x36ff2087, 0xc494a384,
0x9a879fa0, 0x68ec1ca3, 0x7bbcef57, 0x89d76c54, 0x5d1d08bf, 0xaf768bbc, 0xbc267848, 0x4e4dfb4b,
0x20bd8ede, 0xd2d60ddd, 0xc186fe29, 0x33ed7d2a, 0xe72719c1, 0x154c9ac2, 0x061c6936, 0xf477ea35,
0xaa64d611, 0x580f5512, 0x4b5fa6e6, 0xb93425e5, 0x6dfe410e, 0x9f95c20d, 0x8cc531f9, 0x7eaeb2fa,
0x30e349b1, 0xc288cab2, 0xd1d83946, 0x23b3ba45, 0xf779deae, 0x05125dad, 0x1642ae59, 0xe4292d5a,
0xba3a117e, 0x4851927d, 0x5b016189, 0xa96ae28a, 0x7da08661, 0x8fcb0562, 0x9c9bf696, 0x6ef07595,
0x417b1dbc, 0xb3109ebf, 0xa0406d4b, 0x522bee48, 0x86e18aa3, 0x748a09a0, 0x67dafa54, 0x95b17957,
0xcba24573, 0x39c9c670, 0x2a993584, 0xd8f2b687, 0x0c38d26c, 0xfe53516f, 0xed03a29b, 0x1f682198,
0x5125dad3, 0xa34e59d0, 0xb01eaa24, 0x42752927, 0x96bf4dcc, 0x64d4cecf, 0x77843d3b, 0x85efbe38,
0xdbfc821c, 0x2997011f, 0x3ac7f2eb, 0xc8ac71e8, 0x1c661503, 0xee0d9600, 0xfd5d65f4, 0x0f36e6f7,
0x61c69362, 0x93ad1061, 0x80fde395, 0x72966096, 0xa65c047d, 0x5437877e, 0x4767748a, 0xb50cf789,
0xeb1fcbad, 0x197448ae, 0x0a24bb5a, 0xf84f3859, 0x2c855cb2, 0xdeeedfb1, 0xcdbe2c45, 0x3fd5af46,
0x7198540d, 0x83f3d70e, 0x90a324fa, 0x62c8a7f9, 0xb602c312, 0x44694011, 0x5739b3e5, 0xa55230e6,
0xfb410cc2, 0x092a8fc1, 0x1a7a7c35, 0xe811ff36, 0x3cdb9bdd, 0xceb018de, 0xdde0eb2a, 0x2f8b6829,
0x82f63b78, 0x709db87b, 0x63cd4b8f, 0x91a6c88c, 0x456cac67, 0xb7072f64, 0xa457dc90, 0x563c5f93,
0x082f63b7, 0xfa44e0b4, 0xe9141340, 0x1b7f9043, 0xcfb5f4a8, 0x3dde77ab, 0x2e8e845f, 0xdce5075c,
0x92a8fc17, 0x60c37f14, 0x73938ce0, 0x81f80fe3, 0x55326b08, 0xa759e80b, 0xb4091bff, 0x466298fc,
0x1871a4d8, 0xea1a27db, 0xf94ad42f, 0x0b21572c, 0xdfeb33c7, 0x2d80b0c4, 0x3ed04330, 0xccbbc033,
0xa24bb5a6, 0x502036a5, 0x4370c551, 0xb11b4652, 0x65d122b9, 0x97baa1ba, 0x84ea524e, 0x7681d14d,
0x2892ed69, 0xdaf96e6a, 0xc9a99d9e, 0x3bc21e9d, 0xef087a76, 0x1d63f975, 0x0e330a81, 0xfc588982,
0xb21572c9, 0x407ef1ca, 0x532e023e, 0xa145813d, 0x758fe5d6, 0x87e466d5, 0x94b49521, 0x66df1622,
0x38cc2a06, 0xcaa7a905, 0xd9f75af1, 0x2b9cd9f2, 0xff56bd19, 0x0d3d3e1a, 0x1e6dcdee, 0xec064eed,
0xc38d26c4, 0x31e6a5c7, 0x22b65633, 0xd0ddd530, 0x0417b1db, 0xf67c32d8, 0xe52cc12c, 0x1747422f,
0x49547e0b, 0xbb3ffd08, 0xa86f0efc, 0x5a048dff, 0x8ecee914, 0x7ca56a17, 0x6ff599e3, 0x9d9e1ae0,
0xd3d3e1ab, 0x21b862a8, 0x32e8915c, 0xc083125f, 0x144976b4, 0xe622f5b7, 0xf5720643, 0x07198540,
0x590ab964, 0xab613a67, 0xb831c993, 0x4a5a4a90, 0x9e902e7b, 0x6cfbad78, 0x7fab5e8c, 0x8dc0dd8f,
0xe330a81a, 0x115b2b19, 0x020bd8ed, 0xf0605bee, 0x24aa3f05, 0xd6c1bc06, 0xc5914ff2, 0x37faccf1,
0x69e9f0d5, 0x9b8273d6, 0x88d28022, 0x7ab90321, 0xae7367ca, 0x5c18e4c9, 0x4f48173d, 0xbd23943e,
0xf36e6f75, 0x0105ec76, 0x12551f82, 0xe03e9c81, 0x34f4f86a, 0xc69f7b69, 0xd5cf889d, 0x27a40b9e,
0x79b737ba, 0x8bdcb4b9, 0x988c474d, 0x6ae7c44e, 0xbe2da0a5, 0x4c4623a6, 0x5f16d052, 0xad7d5351,
}};
[[gnu::hot, gnu::const]] constexpr std::uint32_t crc32c(std::string_view str) noexcept {
std::uint32_t crc = 0;
for (auto c : str)
crc = (crc >> 8) ^ crcHashTable[(crc ^ static_cast<std::uint8_t>(c)) & 0xff];
return crc;
}
[[gnu::hot, gnu::const]] constexpr std::uint32_t crc32c(const std::uint8_t* d, std::size_t len) noexcept {
std::uint32_t crc = 0;
for (std::size_t i = 0; i < len; ++i)
crc = (crc >> 8) ^ crcHashTable[(crc ^ d[i]) & 0xff];
return crc;
}
#if defined(__x86_64__) || defined(_M_AMD64) || defined(_M_IX86)
/**
* Variant of crc32 that uses SSE4.2 instructions to increase performance. Note that this does not
* check for availability of said instructions.
*/
[[gnu::hot, gnu::const]] std::uint32_t hwcrc32c(std::string_view str) noexcept;
[[gnu::hot, gnu::const]] std::uint32_t hwcrc32c(const std::uint8_t* d, std::size_t len) noexcept;
#endif
/**
* Helper to force evaluation of constexpr functions at compile-time in C++17. One example of
* this is with crc32: force_consteval<crc32("string")>. No matter the context, this will
* always be evaluated to a constant.
*/
template <auto V>
static constexpr auto force_consteval = V;
/**
* Counts the leading zeros from starting the most significant bit. Returns a std::uint8_t as there
* can only ever be 2^6 zeros with 64-bit types.
*/
template <typename T>
#if FASTGLTF_HAS_CONCEPTS
requires std::integral<T>
#endif
[[gnu::const]] inline std::uint8_t clz(T value) {
static_assert(std::is_integral_v<T>);
#if FASTGLTF_HAS_BIT
return static_cast<std::uint8_t>(std::countl_zero(value));
#else
// Very naive but working implementation of counting zero bits. Any sane compiler will
// optimise this away, like instead use the bsr x86 instruction.
if (value == 0) return 64;
std::uint8_t count = 0;
for (auto i = std::numeric_limits<T>::digits - 1; i > 0; --i) {
if ((value >> i) == 1) {
return count;
}
++count;
}
return count;
#endif
}
template <typename T>
[[gnu::const]] inline std::uint8_t popcount(T value) {
static_assert(std::is_integral_v<T>);
#if FASTGLTF_HAS_BIT
return static_cast<std::uint8_t>(std::popcount(value));
#else
std::uint8_t bits = 0;
while (value) {
if (value & 1)
++bits;
value >>= 1;
}
return bits;
#endif
}
/**
* Essentially the same as std::same<T, U> but it accepts multiple different types for U,
* checking if T is any of U...
*/
template <typename T, typename... Ts>
using is_any = std::disjunction<std::is_same<T, Ts>...>;
/**
* Simple function to check if the given string starts with a given set of characters.
*/
inline bool startsWith(std::string_view str, std::string_view search) {
return str.rfind(search, 0) == 0;
}
/**
* Helper type in order to allow building a visitor out of multiple lambdas within a call to
* std::visit
*/
template<class... Ts>
struct visitor : Ts... {
using Ts::operator()...;
};
template<class... Ts> visitor(Ts...) -> visitor<Ts...>;
// For simple ops like &, |, +, - taking a left and right operand.
#define FASTGLTF_ARITHMETIC_OP_TEMPLATE_MACRO(T1, T2, op) \
constexpr T1 operator op(const T1& a, const T2& b) noexcept { \
static_assert(std::is_enum_v<T1> && std::is_enum_v<T2>); \
return static_cast<T1>(to_underlying(a) op to_underlying(b)); \
}
// For any ops like |=, &=, +=, -=
#define FASTGLTF_ASSIGNMENT_OP_TEMPLATE_MACRO(T1, T2, op) \
constexpr T1& operator op##=(T1& a, const T2& b) noexcept { \
static_assert(std::is_enum_v<T1> && std::is_enum_v<T2>); \
return a = static_cast<T1>(to_underlying(a) op to_underlying(b)), a; \
}
// For unary +, unary -, and bitwise NOT
#define FASTGLTF_UNARY_OP_TEMPLATE_MACRO(T, op) \
constexpr T operator op(const T& a) noexcept { \
static_assert(std::is_enum_v<T>); \
return static_cast<T>(op to_underlying(a)); \
}
} // namespace fastgltf
#ifdef _MSC_VER
#pragma warning(pop)
#endif