initial commit-moved from vulkan_guide

This commit is contained in:
2025-10-10 22:53:54 +09:00
commit 8853429937
2484 changed files with 973414 additions and 0 deletions

View File

@@ -0,0 +1,60 @@
name: CI arm
on:
push:
branches: [ "main" ]
paths:
- '.github/workflows/**'
- '**.cpp'
- '**.hpp'
- '**.txt'
pull_request:
branches: [ "main" ]
env:
BUILD_TYPE: Release
jobs:
build_windows:
runs-on: windows-latest
strategy:
fail-fast: false
matrix:
arch: [ARM64]
steps:
- uses: actions/checkout@v3
- name: Install Python dependencies
run: pip3 install Jinja2
- name: Download dependencies
run: python3 fetch_test_deps.py
- name: Configure CMake
run: cmake -A ${{ matrix.arch }} -DCMAKE_CROSSCOMPILING=1 -B ${{ github.workspace }}/build -DCMAKE_BUILD_TYPE=${{ env.BUILD_TYPE }} -DFASTGLTF_ENABLE_TESTS=ON
- name: Build
run: cmake --build ${{ github.workspace }}/build --config ${{ env.BUILD_TYPE }} --verbose
build_windows_deprecated_extensions:
runs-on: windows-latest
strategy:
fail-fast: false
matrix:
arch: [ARM64]
steps:
- uses: actions/checkout@v3
- name: Install Python dependencies
run: pip3 install Jinja2
- name: Download dependencies
run: python3 fetch_test_deps.py
- name: Configure CMake
run: cmake -A ${{ matrix.arch }} -DCMAKE_CROSSCOMPILING=1 -B ${{ github.workspace }}/build -DCMAKE_BUILD_TYPE=${{ env.BUILD_TYPE }} -DFASTGLTF_ENABLE_TESTS=ON -DFASTGLTF_ENABLE_DEPRECATED_EXT=ON
- name: Build
run: cmake --build ${{ github.workspace }}/build --config ${{ env.BUILD_TYPE }} --verbose

View File

@@ -0,0 +1,173 @@
name: CI x64
on:
push:
branches: [ "main" ]
paths:
- '.github/workflows/**'
- '**.cpp'
- '**.hpp'
- '**.txt'
pull_request:
branches: [ "main" ]
env:
BUILD_TYPE: Release
SAMPLE_MODELS_LOCATION: tests/gltf/glTF-Sample-Models
jobs:
build_windows:
runs-on: windows-latest
steps:
- uses: actions/checkout@v3
- uses: actions/cache@v3
id: sample-models-cache
with:
path: ${{ github.workspace }}/${{ env.SAMPLE_MODELS_LOCATION }}
key: gltf-sample-models
- name: Install Python dependencies
run: pip3 install Jinja2
- name: Download dependencies
run: python3 fetch_test_deps.py
- name: Clone glTF-Sample-Models
if: steps.sample-models-cache.outputs.cache-hit != 'true'
run: git clone https://github.com/KhronosGroup/glTF-Sample-Models ${{ github.workspace }}/${{ env.SAMPLE_MODELS_LOCATION }}
- name: Configure CMake
run: cmake -B ${{ github.workspace }}/build -DCMAKE_BUILD_TYPE=${{ env.BUILD_TYPE }} -DFASTGLTF_ENABLE_TESTS=ON
- name: Build (Windows)
run: cmake --build ${{ github.workspace }}/build --config ${{ env.BUILD_TYPE }} --target tests/fastgltf_tests --verbose
- name: Run tests
run: ${{ github.workspace }}\build\tests\Release\fastgltf_tests.exe -d yes --order lex [base64],[gltf-loader],[gltf-tools],[uri-tests],[vector-tests]
build_windows_deprecated_extensions:
runs-on: windows-latest
steps:
- uses: actions/checkout@v3
- uses: actions/cache@v3
id: sample-models-cache
with:
path: ${{ github.workspace }}/${{ env.SAMPLE_MODELS_LOCATION }}
key: gltf-sample-models
- name: Install Python dependencies
run: pip3 install Jinja2
- name: Download dependencies
run: python3 fetch_test_deps.py
- name: Clone glTF-Sample-Models
if: steps.sample-models-cache.outputs.cache-hit != 'true'
run: git clone https://github.com/KhronosGroup/glTF-Sample-Models ${{ github.workspace }}/${{ env.SAMPLE_MODELS_LOCATION }}
- name: Configure CMake
run: cmake -B ${{ github.workspace }}/build -DCMAKE_BUILD_TYPE=${{ env.BUILD_TYPE }} -DFASTGLTF_ENABLE_TESTS=ON -DFASTGLTF_ENABLE_DEPRECATED_EXT=ON
- name: Build (Windows)
run: cmake --build ${{ github.workspace }}/build --config ${{ env.BUILD_TYPE }} --target tests/fastgltf_tests --verbose
- name: Run tests
run: ${{ github.workspace }}\build\tests\Release\fastgltf_tests.exe -d yes --order lex [base64],[gltf-loader],[gltf-tools],[uri-tests],[vector-tests]
build_linux:
strategy:
matrix:
c_compiler: [gcc-9, gcc-10, clang-13]
include:
- cxx_compiler: g++-9
c_compiler: gcc-9
- cxx_compiler: g++-10
c_compiler: gcc-10
- cxx_compiler: clang++-13
c_compiler: clang-13
runs-on: ubuntu-latest
env:
CC: ${{ matrix.c_compiler }}
CXX: ${{ matrix.cxx_compiler }}
steps:
- uses: actions/checkout@v3
- uses: actions/cache@v3
id: sample-models-cache
with:
path: ${{ github.workspace }}/${{ env.SAMPLE_MODELS_LOCATION }}
key: gltf-sample-models
- name: Install Python dependencies
run: pip3 install Jinja2
- name: Download dependencies
run: python3 fetch_test_deps.py
- name: Clone glTF-Sample-Models
if: steps.sample-models-cache.outputs.cache-hit != 'true'
run: git clone https://github.com/KhronosGroup/glTF-Sample-Models ${{ github.workspace }}/${{ env.SAMPLE_MODELS_LOCATION }}
# GLFW requires these libs to be present so that configuring succeeds.
- name: Install libxandr
run: sudo apt-get install -y libxrandr-dev libxinerama-dev libx11-dev libxcursor-dev libxi-dev
- name: Configure CMake
run: cmake -B ${{ github.workspace }}/build -DCMAKE_BUILD_TYPE=${{ env.BUILD_TYPE }} -DFASTGLTF_ENABLE_TESTS=ON
- name: Build
run: cmake --build ${{ github.workspace }}/build --config ${{ env.BUILD_TYPE }} --target fastgltf_tests --verbose
- name: Run tests
run: ${{ github.workspace }}/build/tests/fastgltf_tests -d yes --order lex [base64],[gltf-loader],[gltf-tools],[uri-tests],[vector-tests]
build_linux_deprecated_extensions:
strategy:
matrix:
c_compiler: [gcc-9, gcc-10, clang-13]
include:
- cxx_compiler: g++-9
c_compiler: gcc-9
- cxx_compiler: g++-10
c_compiler: gcc-10
- cxx_compiler: clang++-13
c_compiler: clang-13
runs-on: ubuntu-latest
env:
CC: ${{ matrix.c_compiler }}
CXX: ${{ matrix.cxx_compiler }}
steps:
- uses: actions/checkout@v3
- uses: actions/cache@v3
id: sample-models-cache
with:
path: ${{ github.workspace }}/${{ env.SAMPLE_MODELS_LOCATION }}
key: gltf-sample-models
- name: Install Python dependencies
run: pip3 install Jinja2
- name: Download dependencies
run: python3 fetch_test_deps.py
- name: Clone glTF-Sample-Models
if: steps.sample-models-cache.outputs.cache-hit != 'true'
run: git clone https://github.com/KhronosGroup/glTF-Sample-Models ${{ github.workspace }}/${{ env.SAMPLE_MODELS_LOCATION }}
# GLFW requires these libs to be present so that configuring succeeds.
- name: Install libxandr
run: sudo apt-get install -y libxrandr-dev libxinerama-dev libx11-dev libxcursor-dev libxi-dev
- name: Configure CMake
run: cmake -B ${{ github.workspace }}/build -DCMAKE_BUILD_TYPE=${{ env.BUILD_TYPE }} -DFASTGLTF_ENABLE_TESTS=ON -DFASTGLTF_ENABLE_DEPRECATED_EXT=ON
- name: Build
run: cmake --build ${{ github.workspace }}/build --config ${{ env.BUILD_TYPE }} --target fastgltf_tests --verbose
- name: Run tests
run: ${{ github.workspace }}/build/tests/fastgltf_tests -d yes --order lex [base64],[gltf-loader],[gltf-tools],[uri-tests],[vector-tests]

View File

@@ -0,0 +1,50 @@
name: Deploy documentation
on:
push:
branches:
- main
paths:
- 'docs/*'
- 'include/fastgltf/*.hpp'
pull_request:
paths:
- 'docs/*'
- 'include/fastgltf/*.hpp'
workflow_dispatch:
jobs:
deploy:
runs-on: ubuntu-latest
permissions:
pages: write
id-token: write
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
steps:
- uses: actions/checkout@v3
- name: Install CMake dependencies
run: sudo apt-get install doxygen
- name: Install Python dependencies
run: pip3 install -U Sphinx breathe sphinx_rtd_theme
- name: Add ~/.local/bin to PATH
run: PATH=$HOME/.local/bin:$PATH
- name: Configure CMake project
run: cmake -B ${{ github.workspace }}/build -DFASTGLTF_ENABLE_DOCS=ON
- name: Build docs CMake target
run: cmake --build ${{ github.workspace }}/build --target generate_sphinx --verbose
- name: Upload pages artifact
uses: actions/upload-pages-artifact@v2
if: github.ref == 'refs/heads/main'
with:
path: ${{ github.workspace }}/build/docs/sphinx
- name: Deploy site
uses: actions/deploy-pages@v2
if: github.ref == 'refs/heads/main'

46
third_party/fastgltf/.gitignore vendored Normal file
View File

@@ -0,0 +1,46 @@
# CMake and build folders.
bin/*
build/*
lib/*
out/*
CMakeSettings.json
CMakeUserPresets.json
vcpkg_installed/*
cmake-build-*/
# IDE specific folders
.idea/*
.vscode/*
.vs/*
# Binary files
*.exe
*.dll
*.lib
*.a
*.dylib
**/.DS_Store
# Dependencies
simdjson/
deps/
# Test files
tests/gltf/intel_sponza/
tests/gltf/sample-models/
tests/gltf/good-froge/
tests/gltf/deccer-cubes/
tests/gltf_loaders/
# gltf Rust wrapper
tests/gltf-rs/target
tests/gltf-rs/Cargo.lock
# Example files
*.ngfx-proj
SpirvTemp/
# Docs
docs/xml/
docs/Doxyfile
docs/conf.py

0
third_party/fastgltf/.gitmodules vendored Normal file
View File

156
third_party/fastgltf/CMakeLists.txt vendored Normal file
View File

@@ -0,0 +1,156 @@
cmake_minimum_required(VERSION 3.12)
cmake_policy(SET CMP0077 NEW)
if ("${CMAKE_MAJOR_VERSION}.${CMAKE_MINOR_VERSION}" VERSION_GREATER_EQUAL "3.24")
cmake_policy(SET CMP0135 NEW)
endif()
project(fastgltf VERSION 0.6.1 LANGUAGES C CXX)
option(FASTGLTF_DOWNLOAD_SIMDJSON "Downloads a copy of simdjson itself to satisfy the dependency" ON)
option(FASTGLTF_USE_CUSTOM_SMALLVECTOR "Uses a custom SmallVector type optimised for small arrays" OFF)
option(FASTGLTF_ENABLE_TESTS "Enables test targets for fastlgtf" OFF)
option(FASTGLTF_ENABLE_EXAMPLES "Enables example targets for fastgltf" OFF)
option(FASTGLTF_ENABLE_DOCS "Enables the configuration of targets that build/generate documentation" OFF)
option(FASTGLTF_ENABLE_GLTF_RS "Enables the benchmark usage of gltf-rs" OFF)
option(FASTGLTF_ENABLE_ASSIMP "Enables the benchmark usage of assimp" OFF)
option(FASTGLTF_ENABLE_DEPRECATED_EXT "Enables support for deprecated extensions" OFF)
option(FASTGLTF_DISABLE_CUSTOM_MEMORY_POOL "Disables the memory allocation algorithm based on polymorphic resources" OFF)
option(FASTGLTF_USE_64BIT_FLOAT "Default to 64-bit double precision floats for everything" OFF)
include(${CMAKE_CURRENT_SOURCE_DIR}/cmake/add_source_directory.cmake)
include(${CMAKE_CURRENT_SOURCE_DIR}/cmake/compiler_flags.cmake)
if (FASTGLTF_DOWNLOAD_SIMDJSON)
# Download and configure simdjson
set(SIMDJSON_TARGET_VERSION "3.3.0")
set(SIMDJSON_DL_DIR "${CMAKE_CURRENT_SOURCE_DIR}/deps/simdjson")
file(MAKE_DIRECTORY ${SIMDJSON_DL_DIR})
set(SIMDJSON_HEADER_FILE "${SIMDJSON_DL_DIR}/simdjson.h")
set(SIMDJSON_SOURCE_FILE "${SIMDJSON_DL_DIR}/simdjson.cpp")
macro(download_simdjson)
file(DOWNLOAD "https://raw.githubusercontent.com/simdjson/simdjson/v${SIMDJSON_TARGET_VERSION}/singleheader/simdjson.h" ${SIMDJSON_HEADER_FILE})
file(DOWNLOAD "https://raw.githubusercontent.com/simdjson/simdjson/v${SIMDJSON_TARGET_VERSION}/singleheader/simdjson.cpp" ${SIMDJSON_SOURCE_FILE})
endmacro()
if (EXISTS ${SIMDJSON_HEADER_FILE})
# Look for the SIMDJSON_VERSION define in the header to check the version.
file(STRINGS ${SIMDJSON_HEADER_FILE} SIMDJSON_HEADER_VERSION_LINE REGEX "^#define SIMDJSON_VERSION ")
string(REGEX MATCHALL "[0-9.]+" SIMDJSON_HEADER_VERSION "${SIMDJSON_HEADER_VERSION_LINE}")
message(STATUS "fastgltf: Found simdjson (Version ${SIMDJSON_HEADER_VERSION})")
if (SIMDJSON_HEADER_VERSION VERSION_LESS SIMDJSON_TARGET_VERSION)
message(STATUS "fastgltf: simdjson outdated, downloading...")
download_simdjson()
endif()
else()
message(STATUS "fastgltf: Did not find simdjson, downloading...")
download_simdjson()
if (NOT EXISTS "${SIMDJSON_HEADER_FILE}")
message(FATAL_ERROR "fastgltf: Failed to download simdjson.")
endif()
endif()
add_library(fastgltf_simdjson ${SIMDJSON_HEADER_FILE} ${SIMDJSON_SOURCE_FILE})
target_compile_features(fastgltf_simdjson PRIVATE cxx_std_17)
target_include_directories(fastgltf_simdjson PUBLIC $<BUILD_INTERFACE:${CMAKE_CURRENT_LIST_DIR}/deps/simdjson> $<INSTALL_INTERFACE:include>)
fastgltf_compiler_flags(fastgltf_simdjson)
fastgltf_enable_debug_inlining(fastgltf_simdjson)
install(
FILES deps/simdjson/simdjson.h
DESTINATION include
)
install(
TARGETS fastgltf_simdjson
EXPORT fastgltf_simdjson-targets
LIBRARY DESTINATION lib
ARCHIVE DESTINATION lib
RUNTIME DESTINATION bin
INCLUDES DESTINATION include
)
install(
EXPORT fastgltf_simdjson-targets
FILE fastgltf_simdjsonTargets.cmake
NAMESPACE fastgltf::
DESTINATION lib/cmake/fastgltf
)
endif()
# Create the library target
add_library(fastgltf
"src/fastgltf.cpp" "src/base64.cpp"
"include/fastgltf/base64.hpp" "include/fastgltf/glm_element_traits.hpp" "include/fastgltf/parser.hpp" "include/fastgltf/tools.hpp" "include/fastgltf/types.hpp" "include/fastgltf/util.hpp")
add_library(fastgltf::fastgltf ALIAS fastgltf)
fastgltf_compiler_flags(fastgltf)
fastgltf_enable_debug_inlining(fastgltf)
target_compile_features(fastgltf PUBLIC cxx_std_17)
target_include_directories(fastgltf PUBLIC $<BUILD_INTERFACE:${CMAKE_CURRENT_LIST_DIR}/include> $<INSTALL_INTERFACE:include>)
set_target_properties(fastgltf PROPERTIES WINDOWS_EXPORT_ALL_SYMBOLS YES)
set_target_properties(fastgltf PROPERTIES VERSION ${PROJECT_VERSION})
if (TARGET fastgltf_simdjson)
target_link_libraries(fastgltf PRIVATE fastgltf_simdjson)
elseif(TARGET simdjson::simdjson)
target_link_libraries(fastgltf PRIVATE simdjson::simdjson)
endif()
if (SIMDJSON_TARGET_VERSION)
target_compile_definitions(fastgltf PRIVATE SIMDJSON_TARGET_VERSION="${SIMDJSON_TARGET_VERSION}")
endif()
target_compile_definitions(fastgltf PUBLIC "FASTGLTF_USE_CUSTOM_SMALLVECTOR=$<BOOL:${FASTGLTF_USE_CUSTOM_SMALLVECTOR}>")
target_compile_definitions(fastgltf PUBLIC "FASTGLTF_ENABLE_DEPRECATED_EXT=$<BOOL:${FASTGLTF_ENABLE_DEPRECATED_EXT}>")
target_compile_definitions(fastgltf PUBLIC "FASTGLTF_DISABLE_CUSTOM_MEMORY_POOL=$<BOOL:${FASTGLTF_DISABLE_CUSTOM_MEMORY_POOL}>")
target_compile_definitions(fastgltf PUBLIC "FASTGLTF_USE_64BIT_FLOAT=$<BOOL:${FASTGLTF_USE_64BIT_FLOAT}>")
if (ANDROID)
target_link_libraries(fastgltf PRIVATE android)
endif()
install(
FILES "include/fastgltf/base64.hpp" "include/fastgltf/glm_element_traits.hpp" "include/fastgltf/parser.hpp" "include/fastgltf/tools.hpp" "include/fastgltf/types.hpp" "include/fastgltf/util.hpp"
DESTINATION include/fastgltf
)
install(
TARGETS fastgltf
EXPORT fastgltf-targets
LIBRARY DESTINATION lib
ARCHIVE DESTINATION lib
RUNTIME DESTINATION bin
INCLUDES DESTINATION include
)
install(
EXPORT fastgltf-targets
FILE fastgltfConfig.cmake
NAMESPACE fastgltf::
DESTINATION lib/cmake/fastgltf
)
if (FASTGLTF_ENABLE_TESTS OR FASTGLTF_ENABLE_EXAMPLES)
# This is required so that Catch2 compiles with C++17, enabling various features we use in tests.
if (NOT DEFINED CMAKE_CXX_STANDARD OR CMAKE_CXX_STANDARD STREQUAL "" OR CMAKE_CXX_STANDARD LESS 17)
set(CMAKE_CXX_STANDARD "17" CACHE STRING "C++ standard" FORCE)
endif()
add_subdirectory(deps)
endif()
if (FASTGLTF_ENABLE_EXAMPLES)
add_subdirectory(examples)
endif()
if (FASTGLTF_ENABLE_TESTS)
add_subdirectory(tests)
endif()
if (FASTGLTF_ENABLE_DOCS)
add_subdirectory(docs)
endif()

22
third_party/fastgltf/LICENSE.md vendored Normal file
View File

@@ -0,0 +1,22 @@
The MIT License
Copyright (c) 2022 spnda.
All rights reserved.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.

33
third_party/fastgltf/README.md vendored Normal file
View File

@@ -0,0 +1,33 @@
# fastgltf
![vcpkg](https://img.shields.io/vcpkg/v/fastgltf?style=flat-square)
![conan center](https://img.shields.io/conan/v/fastgltf?style=flat-square)
![CI_x64 workflow status](https://img.shields.io/github/actions/workflow/status/spnda/fastgltf/ci_x64.yml?label=CI%20x64&style=flat-square)
![CI_arm workflow status](https://img.shields.io/github/actions/workflow/status/spnda/fastgltf/ci_arm.yml?label=CI%20ARM&style=flat-square)
**fastgltf** is a speed and usability focused glTF 2.0 parser written in modern C++17 with minimal dependencies.
It uses SIMD in various areas to decrease the time the application spends parsing and loading glTF data.
By taking advantage of modern C++17 (and optionally C++20) it also provides easy and safe access to the properties and data.
The parser supports the entirety of glTF 2.0 specification, including many extensions.
By default, fastgltf will only do the absolute minimum to work with a glTF model.
However, it brings many additional features to ease working with the data,
including accessor tools, the ability to directly write to mapped GPU buffers, and decomposing transform matrices.
To learn more about fastgltf, its features, performance and API you can read [the docs](https://spnda.github.io/fastgltf).
## License
The **fastgltf** library is licensed under the MIT License.
----
Libraries embedded in fastgltf:
- [simdjson](https://github.com/simdjson/simdjson): Licensed under Apache 2.0.
Libraries used in examples and tests:
- [Catch2](https://github.com/catchorg/Catch2): Licensed under BSL-1.0.
- [glad](https://github.com/Dav1dde/glad): Licensed under MIT.
- [glfw](https://github.com/glfw/glfw): Licensed under Zlib.
- [glm](https://github.com/g-truc/glm): Licensed under MIT.

View File

@@ -0,0 +1,16 @@
# This will search for all files with .c, .cpp, .h, .hpp extensions in the given folder directory but no subdirectories and add them as
# sources to the target.
function(fastgltf_add_source_directory)
cmake_parse_arguments(PARAM "" "TARGET;FOLDER" "CONDITIONAL" ${ARGN})
# Generic C/C++/ObjC file extensions
file(GLOB TARGET_SOURCES ${PARAM_FOLDER}/*.c ${PARAM_FOLDER}/*.cpp ${PARAM_FOLDER}/*.cc ${PARAM_FOLDER}/*.cxx ${PARAM_FOLDER}/*.m ${PARAM_FOLDER}/*.mm)
file(GLOB TARGET_HEADERS ${PARAM_FOLDER}/*.h ${PARAM_FOLDER}/*.hpp ${PARAM_FOLDER}/*.hh)
foreach(SOURCE ${TARGET_SOURCES})
target_sources(${PARAM_TARGET} PRIVATE ${SOURCE})
endforeach()
foreach(HEADER ${TARGET_HEADERS})
target_sources(${PARAM_TARGET} PRIVATE ${HEADER})
endforeach()
endfunction()

View File

@@ -0,0 +1,31 @@
macro(fastgltf_compiler_flags TARGET)
if (NOT ${TARGET} STREQUAL "" AND TARGET ${TARGET})
# Note that simdjson automatically figures out which SIMD intrinsics to use at runtime based on
# cpuid, meaning no architecture flags or other compile flags need to be passed.
# See https://github.com/simdjson/simdjson/blob/master/doc/implementation-selection.md.
if (MSVC)
target_compile_options(${TARGET} PRIVATE /EHsc /utf-8 $<$<CONFIG:RELEASE>:/O2 /Ob3 /Ot>)
if (MSVC_VERSION GREATER 1929)
target_compile_options(${TARGET} PRIVATE /external:W0 /external:anglebrackets)
endif()
elseif(CMAKE_CXX_COMPILER_ID STREQUAL "Clang" OR CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
target_compile_options(${TARGET} PRIVATE $<$<CONFIG:RELEASE>:-O3>)
# Issue with MinGW: https://github.com/simdjson/simdjson/issues/1963
target_compile_options(${TARGET} PUBLIC $<$<CONFIG:DEBUG>:-Og>)
# https://github.com/simdjson/simdjson/blob/master/doc/basics.md#performance-tips
target_compile_options(${TARGET} PRIVATE $<$<CONFIG:RELEASE>:-DNDEBUG>)
endif()
endif()
endmacro()
macro(fastgltf_enable_debug_inlining TARGET)
if (NOT ${TARGET} STREQUAL "" AND TARGET ${TARGET})
if (MSVC)
target_compile_options(${TARGET} PRIVATE $<$<CONFIG:DEBUG>:/Ob2>)
elseif(CMAKE_CXX_COMPILER_ID STREQUAL "Clang" OR CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
target_compile_options(${TARGET} PRIVATE $<$<CONFIG:DEBUG>:-finline-functions>)
endif()
endif()
endmacro()

View File

@@ -0,0 +1,64 @@
find_package(Doxygen)
if (NOT DOXYGEN)
message(STATUS "fastgltf: Doxygen not found; docs will not be built")
return()
endif()
# Get the sources and create proper absolute paths
get_target_property(FASTGLTF_SOURCES fastgltf SOURCES)
set(FASTGLTF_ABS_SOURCES "")
foreach (SOURCE_FILE ${FASTGLTF_SOURCES})
list(APPEND FASTGLTF_ABS_SOURCES "${CMAKE_CURRENT_SOURCE_DIR}/../${SOURCE_FILE}")
endforeach()
# For the Doxygen file we remove the semicolons for spaces
list(JOIN FASTGLTF_ABS_SOURCES " " DOXYGEN_INPUT)
set(DOXYGEN_OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/doxygen)
set(DOXYGEN_INDEX_FILE ${DOXYGEN_OUTPUT}/xml/index.xml)
set(DOXYGEN_FILE ${CMAKE_CURRENT_BINARY_DIR}/Doxyfile)
configure_file(${CMAKE_CURRENT_SOURCE_DIR}/Doxyfile.in ${DOXYGEN_FILE} @ONLY)
# Generate doxygen XML
add_custom_command(
OUTPUT ${DOXYGEN_INDEX_FILE}
WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}
DEPENDS ${FASTGLTF_ABS_SOURCES}
COMMAND ${DOXYGEN_EXECUTABLE} ${DOXYGEN_FILE}
MAIN_DEPENDENCY ${DOXYGEN_FILE} Doxyfile.in
COMMENT "Generating docs"
)
add_custom_target(generate_docs DEPENDS ${DOXYGEN_INDEX_FILE})
# Find sphinx-build
find_program(SPHINX_EXECUTABLE
NAMES sphinx-build
DOC "Path to sphinx-build executable")
if (NOT SPHINX_EXECUTABLE)
message(STATUS "fastgltf: Sphinx not found; docs website will not be built")
return()
endif()
set(SPHINX_OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/sphinx)
set(SPHINX_INDEX_FILE ${SPHINX_OUTPUT}/index.html)
set(SPHINX_CONF ${CMAKE_CURRENT_SOURCE_DIR}/conf.py)
configure_file(${CMAKE_CURRENT_SOURCE_DIR}/conf.py.in ${SPHINX_CONF} @ONLY)
# Let Sphinx generate our site's HTML
add_custom_command(
OUTPUT ${SPHINX_INDEX_FILE}
WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
DEPENDS ${DOXYGEN_INDEX_FILE} ${CMAKE_CURRENT_SOURCE_DIR}/index.rst ${CMAKE_CURRENT_SOURCE_DIR}/overview.rst
${CMAKE_CURRENT_SOURCE_DIR}/api.rst ${CMAKE_CURRENT_SOURCE_DIR}/tools.rst ${CMAKE_CURRENT_SOURCE_DIR}/options.rst
COMMAND sphinx-build -b html
-Dbreathe_projects.fastgltf=\"${DOXYGEN_OUTPUT}/xml\"
${CMAKE_CURRENT_SOURCE_DIR} ${SPHINX_OUTPUT}
MAIN_DEPENDENCY ${SPHINX_CONF}
COMMENT "Generating docs with Sphinx"
)
add_custom_target(generate_sphinx DEPENDS ${SPHINX_INDEX_FILE})

18
third_party/fastgltf/docs/Doxyfile.in vendored Normal file
View File

@@ -0,0 +1,18 @@
PROJECT_NAME = "fastgltf"
PROJECT_NUMBER = "@PROJECT_VERSION@"
DOXYFILE_ENCODING = UTF-8
GENERATE_LATEX = NO
GENERATE_MAN = NO
GENERATE_RTF = NO
GENERATE_HTML = NO
GENERATE_XML = YES
ENABLE_PREPROCESSING = YES
QUIET = YES
JAVADOC_AUTOBRIEF = YES
MACRO_EXPANSION = YES
INPUT = @DOXYGEN_INPUT@
OUTPUT_DIRECTORY = @DOXYGEN_OUTPUT@

268
third_party/fastgltf/docs/api.rst vendored Normal file
View File

@@ -0,0 +1,268 @@
***
API
***
.. contents:: Table of Contents
glTF structs
============
This section contains all types fastgltf provides to represent data from a glTF asset.
DataSource
----------
.. doxygentypedef:: fastgltf::DataSource
AssetInfo
---------
.. doxygenstruct:: fastgltf::AssetInfo
:members:
:undoc-members:
Accessor
---------
.. doxygenstruct:: fastgltf::Accessor
:members:
:undoc-members:
Animation
---------
.. doxygenstruct:: fastgltf::Animation
:members:
:undoc-members:
Buffer
------
.. doxygenstruct:: fastgltf::Buffer
:members:
:undoc-members:
BufferView
----------
.. doxygenstruct:: fastgltf::BufferView
:members:
:undoc-members:
Camera
------
.. doxygenstruct:: fastgltf::Camera
:members:
:undoc-members:
Image
-----
.. doxygenstruct:: fastgltf::Image
:members:
:undoc-members:
Light
-----
.. doxygenstruct:: fastgltf::Light
:members:
:undoc-members:
Material
--------
.. doxygenstruct:: fastgltf::Material
:members:
:undoc-members:
Mesh
----
.. doxygenstruct:: fastgltf::Mesh
:members:
:undoc-members:
Node
----
.. doxygenstruct:: fastgltf::Node
:members:
:undoc-members:
Sampler
-------
.. doxygenstruct:: fastgltf::Sampler
:members:
:undoc-members:
Scene
-----
.. doxygenstruct:: fastgltf::Scene
:members:
:undoc-members:
Skin
----
.. doxygenstruct:: fastgltf::Skin
:members:
:undoc-members:
Texture
-------
.. doxygenstruct:: fastgltf::Texture
:members:
:undoc-members:
Asset
-----
.. doxygenclass:: fastgltf::Asset
:members:
:undoc-members:
Parser
======
This section contains all types that one requires to load a glTF file using fastgltf.
This includes the Parser class, options, and data buffers.
Error
-----
.. doxygenenum:: fastgltf::Error
Extensions
----------
.. doxygenenum:: fastgltf::Extensions
.. doxygenfunction:: fastgltf::stringifyExtension
Category
--------
.. doxygenenum:: fastgltf::Category
.. _options:
Options
-------
.. doxygenenum:: fastgltf::Options
Expected
--------
.. doxygenclass:: fastgltf::Expected
:members:
:undoc-members:
GltfDataBuffer
--------------
.. doxygenfunction:: fastgltf::getGltfBufferPadding
.. doxygenclass:: fastgltf::GltfDataBuffer
:members:
:undoc-members:
Parser
------
.. doxygenclass:: fastgltf::Parser
:members:
:undoc-members:
.. doxygenfunction:: fastgltf::determineGltfFileType
.. doxygenstruct:: fastgltf::BufferInfo
:members:
Utility
=======
This sections contains various types used by fastgltf to simplify & process glTF data,
and for types used to enhance performance & minimize memory usage.
URIView
-------
.. doxygenclass:: fastgltf::URIView
:members:
:undoc-members:
URI
---
.. doxygenclass:: fastgltf::URI
:members:
:undoc-members:
span
----
.. doxygenclass:: fastgltf::span
:members:
:undoc-members:
SmallVector
-----------
.. doxygenclass:: fastgltf::SmallVector
:members:
:undoc-members:
Optional
--------
.. doxygentypedef:: fastgltf::Optional
OptionalFlagValue
-----------------
.. doxygenstruct:: fastgltf::OptionalFlagValue
:members:
:undoc-members:
OptionalWithFlagValue
---------------------
.. doxygenclass:: fastgltf::OptionalWithFlagValue
:members:
:undoc-members:

22
third_party/fastgltf/docs/conf.py.in vendored Normal file
View File

@@ -0,0 +1,22 @@
project = 'fastgltf'
copyright = '2023, spnda'
author = 'spnda'
release = '@PROJECT_VERSION@'
extensions = ["breathe", "sphinx_rtd_theme"]
templates_path = ['_templates']
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
html_theme = 'sphinx_rtd_theme'
# html_static_path = ['_static']
html_theme_options = {
'display_version': True,
'titles_only': True,
'navigation_depth': 5,
}
fastgltf_sources = "@DOXYGEN_INPUT@".split()
breathe_default_project = "fastgltf"
breathe_projects = {"fastgltf": "@DOXYGEN_OUTPUT@/xml"}

31
third_party/fastgltf/docs/index.rst vendored Normal file
View File

@@ -0,0 +1,31 @@
fastgltf
========
**fastgltf** is a speed and usability focused glTF 2.0 parser written in modern C++17 with minimal dependencies.
It uses SIMD in various areas to decrease the time the application spends parsing and loading glTF data.
By taking advantage of modern C++17 (and optionally C++20) it also provides easy and safe access to the properties and data.
The parser supports the entirety of glTF 2.0 specification, including many extensions.
By default, fastgltf will only do the absolute minimum to work with a glTF model.
However, it brings many additional features to ease working with the data,
including accessor tools, the ability to directly write to mapped GPU buffers, and decomposing transform matrices.
Indices and tables
------------------
* :doc:`overview`
* :doc:`tools`
* :doc:`options`
* :doc:`api`
.. toctree::
:caption: Documentation
:hidden:
:maxdepth: 2
overview
tools
options
api

67
third_party/fastgltf/docs/options.rst vendored Normal file
View File

@@ -0,0 +1,67 @@
*******
Options
*******
.. contents:: Table of Contents
CMake options
=============
FASTGLTF_DOWNLOAD_SIMDJSON
--------------------------
A ``BOOL`` option that tells fastgltf's CMake script whether it should download the simdjson sources itself.
* If set to ``YES`` the script will automatically download the amalgamated simdjson header and source files and link them into fastgltf.
* If set to ``NO`` the CMake script expects a ``simdjson::simdjson`` target to exist against which it tries to link.
This target needs to also use the amalgamated simdjson.h header.
FASTGLTF_USE_CUSTOM_SMALLVECTOR
-------------------------------
While fastgltf uses its custom ``SmallVector`` class in various areas by default, it might be useful to enable it in more places.
FASTGLTF_ENABLE_TESTS
---------------------
To build and run the tests and benchmarks you need to set this ``BOOL`` option to ``YES``.
When this option is set, the ``fastgltf_tests`` target will be configured.
The tests target depends on various dependencies, which will need to be downloaded before configuring CMake using ``fetch_test_deps.py``.
FASTGLTF_ENABLE_EXAMPLES
------------------------
To build and run the examples you need to this ``BOOL`` option to ``YES``.
When this option is set, all targets from examples will be configured.
The CMake targets depend on various dependencies, which will need to be downloaded before configuring CMake using ``fetch_test_deps.py``.
FASTGLTF_ENABLE_DOCS
--------------------
.. _doxygen: https://https://www.doxygen.nl/
.. _sphinx: https://github.com/sphinx-doc/sphinx
.. _breathe: https://github.com/breathe-doc/breathe
This ``BOOL`` option controls whether the targets and commands related to the documentation should be configured.
Setting this to ``YES`` requires `Doxygen`_, `Sphinx`_ and `breathe`_ to be installed.
FASTGLTF_ENABLE_GLTF_RS
-----------------------
.. _corrosion: https://github.com/corrosion-rs/corrosion/
.. _gltf-rs: https://github.com/gltf-rs/gltf
When this ``BOOL`` option is set to ``YES`` fastgltf will use `corrosion`_, which is downloaded using ``fetch_test_deps.py``,
to link against the `gltf-rs`_ Rust library for comparison within the benchmarks.
Note that this option has no effect when ``FASTGLTF_ENABLE_TESTS`` is set to ``NO``.
Parsing options
===============
For more information about the options when parsing a file, see :ref:`the API reference<options>`.

222
third_party/fastgltf/docs/overview.rst vendored Normal file
View File

@@ -0,0 +1,222 @@
********
Overview
********
.. contents:: Table of Contents
**fastgltf** is a speed and usability focused glTF 2.0 parser written in modern C++17 with minimal dependencies.
It uses SIMD in various areas to decrease the time the application spends parsing and loading glTF data.
By taking advantage of modern C++17 (and optionally C++20) it also provides easy and safe access to the properties and data.
The parser supports the entirety of glTF 2.0 specification, including many extensions.
By default, fastgltf will only do the absolute minimum to work with a glTF model.
However, it brings many additional features to ease working with the data,
including accessor tools, the ability to directly write to mapped GPU buffers, and decomposing transform matrices.
.. _why:
Why use fastgltf?
=================
There are many other options for working with glTF in C and C++, including the two most popular libraries tinygltf_ and cgltf_.
These have been around for years and support virtually everything you need, so why would you even switch?
.. _tinygltf: https://github.com/syoyo/tinygltf
.. _cgltf: https://github.com/jkuhlmann/cgltf
This table includes a quick overview of a comparison of the general quality-of-life features of the popular
glTF libraries.
.. list-table::
:header-rows: 1
* -
- cgltf
- tinygltf
- fastgltf
* - glTF 2.0 reading
- ✔️
- ✔️
- ✔️
* - glTF 2.0 writing
- ✔️
- ✔️
- ❌
* - Extension support
- ✔️
- 🟡¹
- ✔️
* - Image decoding (PNG, JPEG, ...)
- ✔️
- ✔️
- ❌
* - Built-in Draco decompression
- ❌
- ✔️
- ❌
* - Memory callbacks
- ✔️
- ❌
- 🟡²
* - Android asset functionality
- ❌
- ✔️
- ✔️
* - Accessor utilities
- ✔️
- ❌
- ✔️
* - Sparse accessor utilities
- 🟡³
- ❌
- ✔️
* - Matrix accessor utilities
- 🟡³
- ❌
- ✔️
* - Node transform utilities
- ✔️
- ❌
- ✔️
¹ tinygltf does provide the JSON structure for extension data, but leaves the deserialization for you to do.
² fastgltf allows the user to allocate memory for buffers and images.
It does not provide any mechanism for controlling all the heap allocations the library performs.
³ cgltf supports sparse accessors and matrix data only with some accessor functions, but not all.
You can read more about the accessor utilities from fastgltf :ref:`here <accessor-tools>`.
fastgltf follows C++'s concept of "you don't pay for what you don't use" by only doing the absolute minimum by default.
Without specifying any options, fastgltf will only parse the specified parts of the glTF JSON.
For buffers and images, fastgltf will by default only either give you buffers,
when the buffer or image data is embedded within the glTF, or just the plain old URIs.
Still, fastgltf offers various options that will let the library load buffers and images into memory,
which can be controlled with the memory map/unmap callbacks.
These can also be used for mapping GPU buffers so that fastgltf will write or decode base64 data directly into GPU memory.
By using modern C++ features, the code that reads data and properties from the glTF becomes simpler and vastly more descriptive,
which is a big aspect of guaranteeing code-correctness.
A big factor for this improvement is the use of types which enforce certain properties about the data, like e.g. ``std::variant`` or ``std::optional``.
Compared with tinygltf, where, for example, optional values are simply represented by a boolean or a ``-1`` for indices, this is a big improvement.
The biggest difference, which may not be as relevant to everyone, is the drastic increase in deserialization speed.
In some cases, fastgltf is at least 2 times quicker than its competitors, while in others it can be as much as 20 times.
You can read more about fastgltf's performance in the :ref:`performance chapter <performance>`.
.. _usage:
Usage
=====
.. _vcpkg: https://github.com/microsoft/vcpkg
.. _conan: https://conan.io/
fastgltf is a pure C++17 library and only depends on simdjson.
By using the included CMake 3.11 script, simdjson is automatically downloaded while configuring by default.
The library is tested on GCC 9, GCC 10, Clang 13, and MSVC 14 (Visual Studio 2022) using CI.
fastgltf is also available from vcpkg_ and conan_.
The following snippet illustrates how to use fastgltf to load a glTF file.
.. code:: c++
#include <fastgltf/parser.hpp>
#include <fastgltf/types.hpp>
void load(std::filesystem::path path) {
// Creates a Parser instance. Optimally, you should reuse this across loads, but don't use it
// across threads. To enable extensions, you have to pass them into the parser's constructor.
fastgltf::Parser parser;
// The GltfDataBuffer class is designed for re-usability of the same JSON string. It contains
// utility functions to load data from a std::filesystem::path, copy from an existing buffer,
// or re-use an already existing allocation. Note that it has to outlive the process of every
// parsing function you call.
fastgltf::GltfDataBuffer data;
data.loadFromFile(path);
// This loads the glTF file into the gltf object and parses the JSON. For GLB files, use
// Parser::loadBinaryGLTF instead.
// You can detect the type of glTF using fastgltf::determineGltfFileType.
auto asset = parser.loadGLTF(&data, path.parent_path(), fastgltf::Options::None);
if (auto error = asset.error(); error != fastgltf::Error::None) {
// Some error occurred while reading the buffer, parsing the JSON, or validating the data.
}
// The glTF 2.0 asset is now ready to be used. Simply call asset.get(), asset.get_if() or
// asset-> to get a direct reference to the Asset class. You can then access the glTF data
// structures, like, for example, with buffers:
for (auto& buffer : asset->buffers) {
// Process the buffers.
}
// Optionally, you can now also call the fastgltf::validate method. This will more strictly
// enforce the glTF spec and is not needed most of the time, though I would certainly
// recommend it in a development environment or when debugging to avoid mishaps.
// fastgltf::validate(asset.get());
}
All the nodes, meshes, buffers, textures, ... can now be accessed through the ``fastgltf::Asset`` type.
References in between objects are done with a single ``size_t``,
which is used to index into the various vectors in the asset.
.. _accessor-tools:
Accessor tools
==============
fastgltf provides a utility header for working with accessors.
The header contains various functions and utilities for reading, copying, and converting accessor data.
All of these tools also directly support sparse accessors to help add support for these without having to understand how they work.
These utilities are meant to drastically simplify using glTF accessors and buffers.
You can learn more about this feature of fastgltf in the dedicated chapter: :doc:`tools`.
However, to give a quick overview this is a simple example of how to load the indices of a primitive:
.. code:: c++
fastgltf::Primitive& primitive = ...;
std::vector<std::uint32_t> indices;
if (primitive.indicesAccessor.has_value()) {
auto& accessor = asset->accessors[primitive.indicesAccessor.value()];
indices.resize(accessor.count);
fastgltf::iterateAccessorWithIndex<std::uint32_t>(
asset.get(), accessor, [&](std::uint32_t index, std::size_t idx) {
indices[idx] = index;
});
}
.. _performance:
Performance
===========
.. _spreadsheet-link: https://docs.google.com/spreadsheets/d/1ocdHGoty-rF0N46ZlAlswzcPHVRsqG_tncy8paD3iMY/edit?usp=sharing
.. _two-cylinder-engine: https://github.com/KhronosGroup/glTF-Sample-Models/tree/master/2.0/2CylinderEngine
.. _bistro: https://developer.nvidia.com/orca/amazon-lumberyard-bistro
In this chapter, I'll show some graphs on how fastgltf compares to the two most used glTF libraries, cgltf and tinygltf.
I've disabled loading of images and buffers to only compare the JSON parsing and deserialization of the glTF data.
The values and the graphs themselves can be found in `this spreadsheet <spreadsheet-link>`_.
These numbers were benchmarked using Catch2's benchmark tool on a Ryzen 5800X (with AVX2) with 32GB of RAM using Clang 16,
as Clang showed a significant performance improvement over MSVC in every test.
First, I compared the performance with embedded buffers that are encoded with base64.
This uses the `2CylinderEngine asset <two-cylinder-engine>`_ which contains a 1.7MB embedded buffer.
fastgltf includes an optimised base64 decoding algorithm that can take advantage of AVX2, SSE4, and ARM Neon.
With this asset, fastgltf is **20.56 times faster** than tinygltf using RapidJSON and **6.5 times faster** than cgltf.
.. image:: https://cdn.discordapp.com/attachments/442748131898032138/1088470860333060207/Mean_time_parsing_2CylinderEngine_ms_8.png
`Amazon's Bistro <bistro>`_ (converted to glTF 2.0 using Blender) is another excellent test subject, as it's a 148k line long JSON.
This shows the raw deserialization speed of all the parsers.
In this case fastgltf is **2.1 times faster** than tinygltf and **5.6 times faster** than cgltf.
.. image:: https://cdn.discordapp.com/attachments/442748131898032138/1088470983024840754/Bistro_load_from_memory_without_images_and_buffer_load_1.png

125
third_party/fastgltf/docs/tools.rst vendored Normal file
View File

@@ -0,0 +1,125 @@
**************
Accessor tools
**************
.. contents:: Table of Contents
fastgltf provides a utility header for working with accessors. The header contains various functions
and utilities for reading, copying, and converting accessor data. All of these tools also directly
support sparse accessors to help add support for these without having to understand how they work.
This header was written by `forenoonwatch <https://github.com/forenoonwatch>`_ with the help of
`Eearslya <https://github.com/Eearslya>`_ and me.
All related functions are templated and take ``T`` as an argument.
This type has to be have a ``ElementTraits`` specialization, which provides information about the
vector properties and data properties.
Using this information, fastgltf can convert the accessor data into your preferred format.
For example, ``glm::vec3`` would be a vector of 3 floats, which would be defined like this:
.. code:: c++
template <>
struct fastgltf::ElementTraits<glm::vec3> : fastgltf::ElementTraitsBase<glm::vec3, AccessorType::Vec3, float> {};
Note that, for glm types, there is a header with all pre-defined types shipped with fastgltf: ``fastgltf/glm_element_traits.hpp``.
This header includes the ElementTraits definition for all relevant glm types.
.. warning::
Note that, by default, these functions will only be able to load from buffers where the source is either a ``sources::ByteView`` or a ``sources::Vector``.
For other data sources, you'll need to provide a functor similar to the already provided ``DefaultBufferDataAdapter`` to the last parameter of each function.
For more detailed documentation about this see :ref:`this section <bufferdataadapter>`.
getAccessorElement
==================
This function can be used to retrieve a single element from an accessor using an index.
It handles sparse accessors and can properly convert the type.
.. doxygenfunction:: getAccessorElement(const Asset& asset, const Accessor& accessor, size_t index, const BufferDataAdapter& adapter) -> ElementType
iterateAccessor
===============
Using ``iterateAccessor`` you can iterate over the data of an accessor using a lambda, similarly to ``std::for_each``.
.. doxygenfunction:: iterateAccessor(const Asset &asset, const Accessor &accessor, Functor &&func, const BufferDataAdapter &adapter) -> void
.. code:: c++
fastgltf::Primitive& primitive = ...;
std::vector<std::uint32_t> indices;
if (primitive.indicesAccessor.has_value()) {
auto& accessor = asset->accessors[primitive.indicesAccessor.value()];
indices.resize(accessor.count);
std::size_t idx = 0;
fastgltf::iterateAccessor<std::uint32_t>(asset.get(), accessor, [&](std::uint32_t index) {
indices[idx++] = index;
});
}
iterateAccessorWithIndex
========================
Functionally identical to ``iterateAccessor``, but provides you with the current index as the second parameter to the lambda.
.. doxygenfunction:: iterateAccessorWithIndex(const Asset &asset, const Accessor &accessor, Functor &&func, const BufferDataAdapter &adapter) -> void
copyFromAccessor
================
This function essentially does a ``memcpy`` on the contents of the accessor data.
In cases where the `ElementType` is default-constructible, and the accessor type allows direct copying, this performs a direct ``memcpy``.
Otherwise, this function properly respects normalization and sparse accessors while copying and converting the data.
.. doxygenfunction:: copyFromAccessor(const Asset &asset, const Accessor &accessor, void *dest, const BufferDataAdapter &adapter = {}) -> void
Accessor iterators
==================
fastgltf also provides C++ iterators over accessor data to support the syntactic sugar of C++11's range-based for-loops.
These iterators can be obtained using ``iterateAccessor``, and can be used like so:
.. doxygenfunction:: iterateAccessor(const Asset& asset, const Accessor& accessor, const BufferDataAdapter& adapter = {}) -> IterableAccessor<ElementType, BufferDataAdapter>
.. code:: c++
std::size_t idx = 0;
for (auto element : fastgltf::iterateAccessor(asset.get(), accessor)) {
array[idx++] = element;
}
.. _bufferdataadapter:
BufferDataAdapter interface
===========================
The accessor tools acquire the binary data through this functional interface.
By default, fastgltf provides a ``DefaultBufferDataAdapter`` struct.
The accessor functions also default to using this class,
however it is important to note that this default interface only works with buffers or images that have a ``sources::Vectors`` or a ``sources::ByteView`` in the ``DataSource`` member.
.. doxygenstruct:: fastgltf::DefaultBufferDataAdapter
:members:
:undoc-members:
If you do not provide Options::LoadExternalBuffers to the Parser while loading the glTF,
external buffers will be available as ``sources::URI`` and will not work with the ``DefaultBufferDataAdapter``.
Therefore, you'll either have to set that option or provide a custom functional interface that properly returns a pointer to the memory.
As this is a functional interface it is possible to also use lambdas for this:
.. code:: c++
std::vector<std::byte> fileBytes;
std::vector<std::uint8_t> accessorData(accessor.count);
fastgltf::copyFromAccessor(asset.get(), accessor, accessorData.data(), [&](const fastgltf::Buffer& buffer) const {
return fileBytes.data();
});

View File

@@ -0,0 +1,3 @@
set_directory_properties(PROPERTIES EXCLUDE_FROM_ALL TRUE)
add_subdirectory(gl_viewer)

View File

@@ -0,0 +1,6 @@
add_executable(fastgltf_gl_viewer EXCLUDE_FROM_ALL)
target_compile_features(fastgltf_gl_viewer PUBLIC cxx_std_17)
target_link_libraries(fastgltf_gl_viewer PRIVATE fastgltf fg_glad_gl46)
target_link_libraries(fastgltf_gl_viewer PRIVATE glfw::glfw glm::glm stb)
fastgltf_add_source_directory(TARGET fastgltf_gl_viewer FOLDER ".")

View File

@@ -0,0 +1,3 @@
# gl_viewer
A fairly simple GL 4.6 glTF viewer made using fastgltf.

View File

@@ -0,0 +1,751 @@
/*
* Copyright (C) 2022 - 2023 spnda
* This file is part of fastgltf <https://github.com/spnda/fastgltf>.
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use,
* copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following
* conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
* OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
* HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include <chrono>
#include <iostream>
#include <glad/gl.h>
#include <GLFW/glfw3.h>
#include <glm/glm.hpp>
#include <glm/gtc/matrix_transform.hpp>
#include <glm/gtc/type_ptr.hpp>
#include <glm/gtx/quaternion.hpp>
#define STB_IMAGE_IMPLEMENTATION
#include "stb_image.h"
#include <fastgltf/parser.hpp>
#include <fastgltf/types.hpp>
constexpr std::string_view vertexShaderSource = R"(
#version 460 core
layout(location = 0) in vec3 position;
layout(location = 1) in vec2 inTexCoord;
uniform mat4 modelMatrix;
uniform mat4 viewProjectionMatrix;
out vec2 texCoord;
void main() {
gl_Position = viewProjectionMatrix * modelMatrix * vec4(position, 1.0);
texCoord = inTexCoord;
}
)";
constexpr std::string_view fragmentShaderSource = R"(
#version 460 core
in vec2 texCoord;
out vec4 finalColor;
const uint HAS_BASE_COLOR_TEXTURE = 1;
layout(location = 0) uniform sampler2D albedoTexture;
layout(location = 0, std140) uniform MaterialUniforms {
vec4 baseColorFactor;
float alphaCutoff;
uint flags;
} material;
float rand(vec2 co){
return fract(sin(dot(co, vec2(12.9898, 78.233))) * 43758.5453);
}
void main() {
vec4 color = material.baseColorFactor;
if ((material.flags & HAS_BASE_COLOR_TEXTURE) == HAS_BASE_COLOR_TEXTURE) {
color *= texture(albedoTexture, texCoord);
}
float factor = (rand(gl_FragCoord.xy) - 0.5) / 8;
if (color.a < material.alphaCutoff + factor)
discard;
finalColor = color;
}
)";
void glMessageCallback(GLenum source,GLenum type,GLuint id,GLenum severity,GLsizei length,const GLchar *message,const void *userParam) {
if (severity == GL_DEBUG_SEVERITY_HIGH) {
std::cerr << message << '\n';
} else {
std::cout << message << '\n';
}
}
bool checkGlCompileErrors(GLuint shader) {
GLint success;
constexpr int length = 1024;
std::string log;
log.resize(length);
glGetShaderiv(shader, GL_COMPILE_STATUS, &success);
if (success != GL_TRUE) {
glGetShaderInfoLog(shader, length, nullptr, log.data());
std::cout << "Shader compilation error: " << "\n"
<< log << "\n -- --------------------------------------------------- -- " << '\n';
return false;
}
return true;
}
bool checkGlLinkErrors(GLuint target) {
GLint success;
constexpr int length = 1024;
std::string log;
log.resize(length);
glGetProgramiv(target, GL_LINK_STATUS, &success);
if (success != GL_TRUE) {
glGetShaderInfoLog(target, length, nullptr, log.data());
std::cout << "Shader program linking error: " << "\n"
<< log << "\n -- --------------------------------------------------- -- " << '\n';
return false;
}
return true;
}
struct IndirectDrawCommand {
uint32_t count;
uint32_t instanceCount;
uint32_t firstIndex;
int32_t baseVertex;
uint32_t baseInstance;
};
struct Primitive {
IndirectDrawCommand draw;
GLenum primitiveType;
GLenum indexType;
GLuint vertexArray;
size_t materialUniformsIndex;
GLuint albedoTexture;
};
struct Mesh {
GLuint drawsBuffer;
std::vector<Primitive> primitives;
};
struct Texture {
GLuint texture;
};
enum MaterialUniformFlags : uint32_t {
None = 0 << 0,
HasBaseColorTexture = 1 << 0,
};
struct MaterialUniforms {
glm::fvec4 baseColorFactor;
float alphaCutoff;
uint32_t flags;
};
struct Viewer {
fastgltf::Asset asset;
std::vector<GLuint> buffers;
std::vector<GLuint> bufferAllocations;
std::vector<Mesh> meshes;
std::vector<Texture> textures;
std::vector<MaterialUniforms> materials;
std::vector<GLuint> materialBuffers;
glm::mat4 viewMatrix = glm::mat4(1.0f);
glm::mat4 projectionMatrix = glm::mat4(1.0f);
GLint viewProjectionMatrixUniform = GL_NONE;
GLint modelMatrixUniform = GL_NONE;
float lastFrame = 0.0f;
float deltaTime = 0.0f;
glm::vec3 accelerationVector = glm::vec3(0.0f);
glm::vec3 velocity = glm::vec3(0.0f);
glm::vec3 position = glm::vec3(0.0f, 0.0f, 0.0f);
glm::dvec2 lastCursorPosition = glm::dvec2(0.0f);
glm::vec3 direction = glm::vec3(0.0f, 0.0f, -1.0f);
float yaw = -90.0f;
float pitch = 0.0f;
bool firstMouse = true;
};
void updateCameraMatrix(Viewer* viewer) {
glm::mat4 viewProjection = viewer->projectionMatrix * viewer->viewMatrix;
glUniformMatrix4fv(viewer->viewProjectionMatrixUniform, 1, GL_FALSE, &viewProjection[0][0]);
}
void windowSizeCallback(GLFWwindow* window, int width, int height) {
void* ptr = glfwGetWindowUserPointer(window);
auto* viewer = static_cast<Viewer*>(ptr);
viewer->projectionMatrix = glm::perspective(glm::radians(75.0f),
static_cast<float>(width) / static_cast<float>(height),
0.01f, 1000.0f);
glViewport(0, 0, width, height);
}
void cursorCallback(GLFWwindow* window, double xpos, double ypos) {
void* ptr = glfwGetWindowUserPointer(window);
auto* viewer = static_cast<Viewer*>(ptr);
if (viewer->firstMouse) {
viewer->lastCursorPosition = { xpos, ypos };
viewer->firstMouse = false;
}
auto offset = glm::vec2(xpos - viewer->lastCursorPosition.x, viewer->lastCursorPosition.y - ypos);
viewer->lastCursorPosition = { xpos, ypos };
offset *= 0.1f;
viewer->yaw += offset.x;
viewer->pitch += offset.y;
viewer->pitch = glm::clamp(viewer->pitch, -89.0f, 89.0f);
auto& direction = viewer->direction;
direction.x = cos(glm::radians(viewer->yaw)) * cos(glm::radians(viewer->pitch));
direction.y = sin(glm::radians(viewer->pitch));
direction.z = sin(glm::radians(viewer->yaw)) * cos(glm::radians(viewer->pitch));
direction = glm::normalize(direction);
}
void keyCallback(GLFWwindow* window, int key, int scancode, int action, int mods) {
void* ptr = glfwGetWindowUserPointer(window);
auto* viewer = static_cast<Viewer*>(ptr);
constexpr glm::vec3 cameraUp = glm::vec3(0.0f, 1.0f, 0.0f);
auto& acceleration = viewer->accelerationVector;
switch (key) {
case GLFW_KEY_W:
acceleration += viewer->direction;
break;
case GLFW_KEY_S:
acceleration -= viewer->direction;
break;
case GLFW_KEY_D:
acceleration += glm::normalize(glm::cross(viewer->direction, cameraUp));
break;
case GLFW_KEY_A:
acceleration -= glm::normalize(glm::cross(viewer->direction, cameraUp));
break;
default:
break;
}
}
glm::mat4 getTransformMatrix(const fastgltf::Node& node, glm::mat4x4& base) {
/** Both a matrix and TRS values are not allowed
* to exist at the same time according to the spec */
if (const auto* pMatrix = std::get_if<fastgltf::Node::TransformMatrix>(&node.transform)) {
return base * glm::mat4x4(glm::make_mat4x4(pMatrix->data()));
}
if (const auto* pTransform = std::get_if<fastgltf::Node::TRS>(&node.transform)) {
// Warning: The quaternion to mat4x4 conversion here is not correct with all versions of glm.
// glTF provides the quaternion as (x, y, z, w), which is the same layout glm used up to version 0.9.9.8.
// However, with commit 59ddeb7 (May 2021) the default order was changed to (w, x, y, z).
// You could either define GLM_FORCE_QUAT_DATA_XYZW to return to the old layout,
// or you could use the recently added static factory constructor glm::quat::wxyz(w, x, y, z),
// which guarantees the parameter order.
return base
* glm::translate(glm::mat4(1.0f), glm::make_vec3(pTransform->translation.data()))
* glm::toMat4(glm::make_quat(pTransform->rotation.data()))
* glm::scale(glm::mat4(1.0f), glm::make_vec3(pTransform->scale.data()));
}
return base;
}
bool loadGltf(Viewer* viewer, std::string_view cPath) {
if (!std::filesystem::exists(cPath)) {
std::cout << "Failed to find " << cPath << '\n';
return false;
}
std::cout << "Loading " << cPath << '\n';
// Parse the glTF file and get the constructed asset
{
fastgltf::Parser parser(fastgltf::Extensions::KHR_mesh_quantization);
auto path = std::filesystem::path{cPath};
constexpr auto gltfOptions =
fastgltf::Options::DontRequireValidAssetMember |
fastgltf::Options::AllowDouble |
fastgltf::Options::LoadGLBBuffers |
fastgltf::Options::LoadExternalBuffers |
fastgltf::Options::LoadExternalImages |
fastgltf::Options::GenerateMeshIndices;
fastgltf::GltfDataBuffer data;
data.loadFromFile(path);
auto type = fastgltf::determineGltfFileType(&data);
fastgltf::Expected<fastgltf::Asset> asset(fastgltf::Error::None);
if (type == fastgltf::GltfType::glTF) {
asset = parser.loadGLTF(&data, path.parent_path(), gltfOptions);
} else if (type == fastgltf::GltfType::GLB) {
asset = parser.loadBinaryGLTF(&data, path.parent_path(), gltfOptions);
} else {
std::cerr << "Failed to determine glTF container" << '\n';
return false;
}
if (asset.error() != fastgltf::Error::None) {
std::cerr << "Failed to load glTF: " << fastgltf::getErrorMessage(asset.error()) << '\n';
return false;
}
viewer->asset = std::move(asset.get());
}
// Some buffers are already allocated during parsing of the glTF, like e.g. base64 buffers
// through our callback functions. Therefore, we only resize our output buffer vector, but
// create our buffer handles later on.
auto& buffers = viewer->asset.buffers;
viewer->buffers.reserve(buffers.size());
for (auto& buffer : buffers) {
constexpr GLuint bufferUsage = GL_STATIC_DRAW;
std::visit(fastgltf::visitor {
[](auto& arg) {}, // Covers FilePathWithOffset, BufferView, ... which are all not possible
[&](fastgltf::sources::Vector& vector) {
GLuint glBuffer;
glCreateBuffers(1, &glBuffer);
glNamedBufferData(glBuffer, static_cast<int64_t>(buffer.byteLength),
vector.bytes.data(), bufferUsage);
viewer->buffers.emplace_back(glBuffer);
},
[&](fastgltf::sources::CustomBuffer& customBuffer) {
// We don't need to do anything special here, the buffer has already been created.
viewer->buffers.emplace_back(static_cast<GLuint>(customBuffer.id));
},
}, buffer.data);
}
return true;
}
bool loadMesh(Viewer* viewer, fastgltf::Mesh& mesh) {
auto& asset = viewer->asset;
Mesh outMesh = {};
outMesh.primitives.resize(mesh.primitives.size());
for (auto it = mesh.primitives.begin(); it != mesh.primitives.end(); ++it) {
auto* positionIt = it->findAttribute("POSITION");
// A mesh primitive is required to hold the POSITION attribute.
assert(positionIt != it->attributes.end());
// We only support indexed geometry.
if (!it->indicesAccessor.has_value()) {
return false;
}
// Generate the VAO
GLuint vao = GL_NONE;
glCreateVertexArrays(1, &vao);
// Get the output primitive
auto index = std::distance(mesh.primitives.begin(), it);
auto& primitive = outMesh.primitives[index];
primitive.primitiveType = fastgltf::to_underlying(it->type);
primitive.vertexArray = vao;
if (it->materialIndex.has_value()) {
primitive.materialUniformsIndex = it->materialIndex.value() + 1; // Adjust for default material
auto& material = viewer->asset.materials[it->materialIndex.value()];
if (material.pbrData.baseColorTexture.has_value()) {
auto& texture = viewer->asset.textures[material.pbrData.baseColorTexture->textureIndex];
if (!texture.imageIndex.has_value())
return false;
primitive.albedoTexture = viewer->textures[texture.imageIndex.value()].texture;
}
} else {
primitive.materialUniformsIndex = 0;
}
{
// Position
auto& positionAccessor = asset.accessors[positionIt->second];
if (!positionAccessor.bufferViewIndex.has_value())
continue;
glEnableVertexArrayAttrib(vao, 0);
glVertexArrayAttribFormat(vao, 0,
static_cast<GLint>(fastgltf::getNumComponents(positionAccessor.type)),
fastgltf::getGLComponentType(positionAccessor.componentType),
GL_FALSE, 0);
glVertexArrayAttribBinding(vao, 0, 0);
auto& positionView = asset.bufferViews[positionAccessor.bufferViewIndex.value()];
auto offset = positionView.byteOffset + positionAccessor.byteOffset;
if (positionView.byteStride.has_value()) {
glVertexArrayVertexBuffer(vao, 0, viewer->buffers[positionView.bufferIndex],
static_cast<GLintptr>(offset),
static_cast<GLsizei>(positionView.byteStride.value()));
} else {
glVertexArrayVertexBuffer(vao, 0, viewer->buffers[positionView.bufferIndex],
static_cast<GLintptr>(offset),
static_cast<GLsizei>(fastgltf::getElementByteSize(positionAccessor.type, positionAccessor.componentType)));
}
}
{
// Tex coord
auto texcoord0 = it->findAttribute("TEXCOORD_0");
auto& texCoordAccessor = asset.accessors[texcoord0->second];
if (!texCoordAccessor.bufferViewIndex.has_value())
continue;
glEnableVertexArrayAttrib(vao, 1);
glVertexArrayAttribFormat(vao, 1, static_cast<GLint>(fastgltf::getNumComponents(texCoordAccessor.type)),
fastgltf::getGLComponentType(texCoordAccessor.componentType),
GL_FALSE, 0);
glVertexArrayAttribBinding(vao, 1, 1);
auto& texCoordView = asset.bufferViews[texCoordAccessor.bufferViewIndex.value()];
auto offset = texCoordView.byteOffset + texCoordAccessor.byteOffset;
if (texCoordView.byteStride.has_value()) {
glVertexArrayVertexBuffer(vao, 1, viewer->buffers[texCoordView.bufferIndex],
static_cast<GLintptr>(offset),
static_cast<GLsizei>(texCoordView.byteStride.value()));
} else {
glVertexArrayVertexBuffer(vao, 1, viewer->buffers[texCoordView.bufferIndex],
static_cast<GLintptr>(offset),
static_cast<GLsizei>(fastgltf::getElementByteSize(texCoordAccessor.type, texCoordAccessor.componentType)));
}
}
// Generate the indirect draw command
auto& draw = primitive.draw;
draw.instanceCount = 1;
draw.baseInstance = 0;
draw.baseVertex = 0;
auto& indices = asset.accessors[it->indicesAccessor.value()];
if (!indices.bufferViewIndex.has_value())
return false;
draw.count = static_cast<uint32_t>(indices.count);
auto& indicesView = asset.bufferViews[indices.bufferViewIndex.value()];
draw.firstIndex = static_cast<uint32_t>(indices.byteOffset + indicesView.byteOffset) / fastgltf::getElementByteSize(indices.type, indices.componentType);
primitive.indexType = getGLComponentType(indices.componentType);
glVertexArrayElementBuffer(vao, viewer->buffers[indicesView.bufferIndex]);
}
// Create the buffer holding all of our primitive structs.
glCreateBuffers(1, &outMesh.drawsBuffer);
glNamedBufferData(outMesh.drawsBuffer, static_cast<GLsizeiptr>(outMesh.primitives.size() * sizeof(Primitive)),
outMesh.primitives.data(), GL_STATIC_DRAW);
viewer->meshes.emplace_back(outMesh);
return true;
}
bool loadImage(Viewer* viewer, fastgltf::Image& image) {
auto getLevelCount = [](int width, int height) -> GLsizei {
return static_cast<GLsizei>(1 + floor(log2(width > height ? width : height)));
};
GLuint texture;
glCreateTextures(GL_TEXTURE_2D, 1, &texture);
std::visit(fastgltf::visitor {
[](auto& arg) {},
[&](fastgltf::sources::URI& filePath) {
assert(filePath.fileByteOffset == 0); // We don't support offsets with stbi.
assert(filePath.uri.isLocalPath()); // We're only capable of loading local files.
int width, height, nrChannels;
const std::string path(filePath.uri.path().begin(), filePath.uri.path().end()); // Thanks C++.
unsigned char *data = stbi_load(path.c_str(), &width, &height, &nrChannels, 4);
glTextureStorage2D(texture, getLevelCount(width, height), GL_RGBA8, width, height);
glTextureSubImage2D(texture, 0, 0, 0, width, height, GL_RGBA, GL_UNSIGNED_BYTE, data);
stbi_image_free(data);
},
[&](fastgltf::sources::Vector& vector) {
int width, height, nrChannels;
unsigned char *data = stbi_load_from_memory(vector.bytes.data(), static_cast<int>(vector.bytes.size()), &width, &height, &nrChannels, 4);
glTextureStorage2D(texture, getLevelCount(width, height), GL_RGBA8, width, height);
glTextureSubImage2D(texture, 0, 0, 0, width, height, GL_RGBA, GL_UNSIGNED_BYTE, data);
stbi_image_free(data);
},
[&](fastgltf::sources::BufferView& view) {
auto& bufferView = viewer->asset.bufferViews[view.bufferViewIndex];
auto& buffer = viewer->asset.buffers[bufferView.bufferIndex];
// Yes, we've already loaded every buffer into some GL buffer. However, with GL it's simpler
// to just copy the buffer data again for the texture. Besides, this is just an example.
std::visit(fastgltf::visitor {
// We only care about VectorWithMime here, because we specify LoadExternalBuffers, meaning
// all buffers are already loaded into a vector.
[](auto& arg) {},
[&](fastgltf::sources::Vector& vector) {
int width, height, nrChannels;
unsigned char* data = stbi_load_from_memory(vector.bytes.data() + bufferView.byteOffset, static_cast<int>(bufferView.byteLength), &width, &height, &nrChannels, 4);
glTextureStorage2D(texture, getLevelCount(width, height), GL_RGBA8, width, height);
glTextureSubImage2D(texture, 0, 0, 0, width, height, GL_RGBA, GL_UNSIGNED_BYTE, data);
stbi_image_free(data);
}
}, buffer.data);
},
}, image.data);
glGenerateTextureMipmap(texture);
viewer->textures.emplace_back(Texture { texture });
return true;
}
bool loadMaterial(Viewer* viewer, fastgltf::Material& material) {
MaterialUniforms uniforms = {};
uniforms.alphaCutoff = material.alphaCutoff;
uniforms.baseColorFactor = glm::make_vec4(material.pbrData.baseColorFactor.data());
if (material.pbrData.baseColorTexture.has_value()) {
uniforms.flags |= MaterialUniformFlags::HasBaseColorTexture;
}
viewer->materials.emplace_back(uniforms);
return true;
}
void drawMesh(Viewer* viewer, size_t meshIndex, glm::mat4 matrix) {
auto& mesh = viewer->meshes[meshIndex];
glBindBuffer(GL_DRAW_INDIRECT_BUFFER, mesh.drawsBuffer);
glUniformMatrix4fv(viewer->modelMatrixUniform, 1, GL_FALSE, &matrix[0][0]);
for (auto i = 0U; i < mesh.primitives.size(); ++i) {
auto& prim = mesh.primitives[i];
auto& material = viewer->materialBuffers[prim.materialUniformsIndex];
glBindTextureUnit(0, prim.albedoTexture);
glBindBufferBase(GL_UNIFORM_BUFFER, 0, material);
glBindVertexArray(prim.vertexArray);
glDrawElementsIndirect(prim.primitiveType, prim.indexType,
reinterpret_cast<const void*>(i * sizeof(Primitive)));
}
}
void drawNode(Viewer* viewer, size_t nodeIndex, glm::mat4 matrix) {
auto& node = viewer->asset.nodes[nodeIndex];
matrix = getTransformMatrix(node, matrix);
if (node.meshIndex.has_value()) {
drawMesh(viewer, node.meshIndex.value(), matrix);
}
for (auto& child : node.children) {
drawNode(viewer, child, matrix);
}
}
int main(int argc, char* argv[]) {
if (argc < 2) {
std::cerr << "No gltf file specified." << '\n';
return -1;
}
auto gltfFile = std::string_view { argv[1] };
Viewer viewer;
if (glfwInit() != GLFW_TRUE) {
std::cerr << "Failed to initialize glfw." << '\n';
return -1;
}
auto* mainMonitor = glfwGetPrimaryMonitor();
const auto* vidMode = glfwGetVideoMode(mainMonitor);
glfwWindowHint(GLFW_SAMPLES, 4);
GLFWwindow* window = glfwCreateWindow(static_cast<int>(static_cast<float>(vidMode->width) * 0.9f), static_cast<int>(static_cast<float>(vidMode->height) * 0.9f), "gl_viewer", nullptr, nullptr);
if (window == nullptr) {
std::cerr << "Failed to create window" << '\n';
return -1;
}
glfwSetWindowUserPointer(window, &viewer);
glfwMakeContextCurrent(window);
glfwSetKeyCallback(window, keyCallback);
glfwSetCursorPosCallback(window, cursorCallback);
glfwSetWindowSizeCallback(window, windowSizeCallback);
glfwSetInputMode(window, GLFW_CURSOR, GLFW_CURSOR_DISABLED);
if (!gladLoadGL(glfwGetProcAddress)) {
std::cerr << "Failed to initialize OpenGL context." << '\n';
return -1;
}
const auto *glRenderer = glGetString(GL_RENDERER);
const auto *glVersion = glGetString(GL_VERSION);
std::cout << "GL Renderer: " << glRenderer << "\nGL Version: " << glVersion << '\n';
if (GLAD_GL_VERSION_4_6 != 1) {
std::cerr << "Missing support for GL 4.6" << '\n';
return -1;
}
glEnable(GL_DEBUG_OUTPUT);
glEnable(GL_DEBUG_OUTPUT_SYNCHRONOUS);
glDebugMessageCallback(glMessageCallback, nullptr);
// Compile the shaders
GLuint program = GL_NONE;
{
const GLuint fragmentShader = glCreateShader(GL_FRAGMENT_SHADER);
const GLuint vertexShader = glCreateShader(GL_VERTEX_SHADER);
const auto* frag = fragmentShaderSource.data();
const auto* vert = vertexShaderSource.data();
auto fragSize = static_cast<GLint>(fragmentShaderSource.size());
auto vertSize = static_cast<GLint>(vertexShaderSource.size());
glShaderSource(fragmentShader, 1, &frag, &fragSize);
glShaderSource(vertexShader, 1, &vert, &vertSize);
glCompileShader(fragmentShader);
glCompileShader(vertexShader);
if (!checkGlCompileErrors(fragmentShader))
return -1;
if (!checkGlCompileErrors(vertexShader))
return -1;
program = glCreateProgram();
glAttachShader(program, fragmentShader);
glAttachShader(program, vertexShader);
glLinkProgram(program);
if (!checkGlLinkErrors(program))
return -1;
glDeleteShader(fragmentShader);
glDeleteShader(vertexShader);
}
// Load the glTF file
auto start = std::chrono::high_resolution_clock::now();
if (!loadGltf(&viewer, gltfFile)) {
std::cerr << "Failed to parse glTF" << '\n';
return -1;
}
// Add a default material
auto& defaultMaterial = viewer.materials.emplace_back();
defaultMaterial.baseColorFactor = glm::vec4(1.0f);
defaultMaterial.alphaCutoff = 0.0f;
defaultMaterial.flags = 0;
// We load images first.
auto& asset = viewer.asset;
for (auto& image : asset.images) {
loadImage(&viewer, image);
}
for (auto& material : asset.materials) {
loadMaterial(&viewer, material);
}
for (auto& mesh : asset.meshes) {
loadMesh(&viewer, mesh);
}
auto diff = std::chrono::duration_cast<std::chrono::milliseconds>(std::chrono::high_resolution_clock::now() - start);
std::cout << "Loaded glTF file in " << diff.count() << "ms." << '\n';
// Create the material uniform buffer
viewer.materialBuffers.resize(viewer.materials.size(), GL_NONE);
glCreateBuffers(static_cast<GLsizei>(viewer.materials.size()), viewer.materialBuffers.data());
for (auto i = 0UL; i < viewer.materialBuffers.size(); ++i) {
glNamedBufferStorage(viewer.materialBuffers[i], static_cast<GLsizeiptr>(sizeof(MaterialUniforms)),
&viewer.materials[i], GL_MAP_WRITE_BIT);
}
viewer.modelMatrixUniform = glGetUniformLocation(program, "modelMatrix");
viewer.viewProjectionMatrixUniform = glGetUniformLocation(program, "viewProjectionMatrix");
glUseProgram(program);
{
// We just emulate the initial sizing of the window with a manual call.
int width, height;
glfwGetWindowSize(window, &width, &height);
windowSizeCallback(window, width, height);
}
glEnable(GL_BLEND);
glEnable(GL_MULTISAMPLE);
glEnable(GL_DEPTH_TEST);
viewer.lastFrame = static_cast<float>(glfwGetTime());
while (glfwWindowShouldClose(window) != GLFW_TRUE) {
auto currentFrame = static_cast<float>(glfwGetTime());
viewer.deltaTime = currentFrame - viewer.lastFrame;
viewer.lastFrame = currentFrame;
// Reset the acceleration
viewer.accelerationVector = glm::vec3(0.0f);
// Updates the acceleration vector and direction vectors.
glfwPollEvents();
// Factor the deltaTime into the amount of acceleration
viewer.velocity += (viewer.accelerationVector * 50.0f) * viewer.deltaTime;
// Lerp the velocity to 0, adding deceleration.
viewer.velocity = viewer.velocity + (2.0f * viewer.deltaTime) * (glm::vec3(0.0f) - viewer.velocity);
// Add the velocity into the position
viewer.position += viewer.velocity * viewer.deltaTime;
viewer.viewMatrix = glm::lookAt(viewer.position, viewer.position + viewer.direction, glm::vec3(0.0f, 1.0f, 0.0f));
updateCameraMatrix(&viewer);
glClearColor(0.1f, 0.2f, 0.3f, 1.0f);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
std::size_t sceneIndex = 0;
if (viewer.asset.defaultScene.has_value())
sceneIndex = viewer.asset.defaultScene.value();
auto& scene = viewer.asset.scenes[sceneIndex];
for (auto& node : scene.nodeIndices) {
drawNode(&viewer, node, glm::mat4(1.0f));
}
glfwSwapBuffers(window);
}
for (auto& mesh : viewer.meshes) {
glDeleteBuffers(1, &mesh.drawsBuffer);
for (auto& prim : mesh.primitives) {
glDeleteVertexArrays(1, &prim.vertexArray);
}
}
glDeleteProgram(program);
glDeleteBuffers(static_cast<GLint>(viewer.buffers.size()), viewer.buffers.data());
glfwDestroyWindow(window);
glfwTerminate();
}

60
third_party/fastgltf/fetch_test_deps.py vendored Normal file
View File

@@ -0,0 +1,60 @@
#!/usr/bin/env python3
import os
import shutil
import sys
import urllib.error
import urllib.request
import zipfile
example_deps_urls = {
'glfw': "https://github.com/glfw/glfw/releases/download/3.3.8/glfw-3.3.8.zip",
'glm': "https://github.com/g-truc/glm/releases/download/0.9.9.8/glm-0.9.9.8.zip",
'stb': "https://github.com/nothings/stb/archive/refs/heads/master.zip",
'glad': "https://github.com/Dav1dde/glad/archive/refs/heads/glad2.zip",
}
test_deps_urls = {
'catch2': "https://github.com/catchorg/Catch2/archive/refs/tags/v3.3.2.zip",
'corrosion': "https://github.com/corrosion-rs/corrosion/archive/refs/heads/master.zip",
}
deps_folder = "deps/"
def download_zip_and_extract(url, output_folder, name):
output = f'{output_folder}{name}'
file_path, _ = urllib.request.urlretrieve(url, f'{output}.zip')
with zipfile.ZipFile(file_path, "r") as zip_ref:
names = zip_ref.namelist()
if len(names) == 0:
return
# Remove any old versions of the downloaded dependency
if os.path.isdir(output):
shutil.rmtree(output)
zip_ref.extractall(output_folder)
# The zip file contains another folder called the same name.
if name.lower() in names[0].lower():
os.rename(f'{output_folder}{names[0]}', output)
os.remove(file_path)
def main():
for name, url in example_deps_urls.items():
try:
download_zip_and_extract(url, deps_folder, name)
print(f'Finished downloading {name}')
except urllib.error.HTTPError:
print(f'Could not download {url}.', file=sys.stderr)
break
for name, url in test_deps_urls.items():
try:
download_zip_and_extract(url, deps_folder, name)
print(f'Finished downloading {name}')
except urllib.error.HTTPError:
print(f'Could not download {url}.', file=sys.stderr)
if __name__ == '__main__':
main()

View File

@@ -0,0 +1,91 @@
/*
* Copyright (C) 2022 - 2023 spnda
* This file is part of fastgltf <https://github.com/spnda/fastgltf>.
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use,
* copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following
* conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
* OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
* HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#pragma once
#include <cassert>
#include <cstddef>
#include <cstdint>
#include <string_view>
#include <vector>
#ifdef _MSC_VER
#pragma warning(push) // attribute 'x' is not recognized
#pragma warning(disable : 5030)
#endif
#if defined(__x86_64__) || defined(_M_AMD64) || defined(_M_IX86)
#define FASTGLTF_IS_X86
#elif defined(_M_ARM64) || defined(__aarch64__)
// __ARM_NEON is only for general Neon availability. It does not guarantee the full A64 instruction set.
#define FASTGLTF_IS_A64
#endif
namespace fastgltf::base64 {
/**
* Calculates the amount of base64 padding chars ('=') at the end of the encoded string.
* @note There's at most 2 padding chars, and this function expects that the input string
* points to the original string that has a size that is a multiple of 4 and is at least
* 4 chars long.
*/
[[gnu::always_inline]] constexpr std::size_t getPadding(std::string_view string) {
assert(string.size() >= 4 && string.size() % 4 == 0);
const auto size = string.size();
for (auto i = 1; i < 4; ++i)
if (string[size - i] != '=')
return i - 1;
return 0;
}
/**
* Calculates the size of the decoded string based on the size of the base64 encoded string and
* the amount of padding the encoded data contains.
*/
[[gnu::always_inline]] constexpr std::size_t getOutputSize(std::size_t encodedSize, std::size_t padding) noexcept {
assert(encodedSize % 4 == 0);
return (encodedSize / 4) * 3 - padding;
}
#if defined(FASTGLTF_IS_X86)
void sse4_decode_inplace(std::string_view encoded, std::uint8_t* output, std::size_t padding);
void avx2_decode_inplace(std::string_view encoded, std::uint8_t* output, std::size_t padding);
[[nodiscard]] std::vector<std::uint8_t> sse4_decode(std::string_view encoded);
[[nodiscard]] std::vector<std::uint8_t> avx2_decode(std::string_view encoded);
#elif defined(FASTGLTF_IS_A64)
void neon_decode_inplace(std::string_view encoded, std::uint8_t* output, std::size_t padding);
[[nodiscard]] std::vector<std::uint8_t> neon_decode(std::string_view encoded);
#endif
void fallback_decode_inplace(std::string_view encoded, std::uint8_t* output, std::size_t padding);
void decode_inplace(std::string_view encoded, std::uint8_t* output, std::size_t padding);
[[nodiscard]] std::vector<std::uint8_t> fallback_decode(std::string_view encoded);
[[nodiscard]] std::vector<std::uint8_t> decode(std::string_view encoded);
} // namespace fastgltf::base64
#ifdef _MSC_VER
#pragma warning(pop)
#endif

View File

@@ -0,0 +1,76 @@
#pragma once
#include <fastgltf/tools.hpp>
// If we find glm in the default include path, we'll also include it ourselfs.
// However, it is generally expected that the user will include glm before including this header.
#if __has_include(<glm/glm.hpp>)
#include <glm/glm.hpp>
#endif
namespace fastgltf {
template<>
struct ElementTraits<glm::vec2> : ElementTraitsBase<glm::vec2, AccessorType::Vec2, float> {};
template<>
struct ElementTraits<glm::vec3> : ElementTraitsBase<glm::vec3, AccessorType::Vec3, float> {};
template<>
struct ElementTraits<glm::vec4> : ElementTraitsBase<glm::vec4, AccessorType::Vec4, float> {};
template<>
struct ElementTraits<glm::i8vec2> : ElementTraitsBase<glm::i8vec2, AccessorType::Vec2, std::int8_t> {};
template<>
struct ElementTraits<glm::i8vec3> : ElementTraitsBase<glm::i8vec3, AccessorType::Vec3, std::int8_t> {};
template<>
struct ElementTraits<glm::i8vec4> : ElementTraitsBase<glm::i8vec4, AccessorType::Vec4, std::int8_t> {};
template<>
struct ElementTraits<glm::u8vec2> : ElementTraitsBase<glm::u8vec2, AccessorType::Vec2, std::uint8_t> {};
template<>
struct ElementTraits<glm::u8vec3> : ElementTraitsBase<glm::u8vec3, AccessorType::Vec3, std::uint8_t> {};
template<>
struct ElementTraits<glm::u8vec4> : ElementTraitsBase<glm::u8vec4, AccessorType::Vec4, std::uint8_t> {};
template<>
struct ElementTraits<glm::i16vec2> : ElementTraitsBase<glm::i16vec2, AccessorType::Vec2, std::int16_t> {};
template<>
struct ElementTraits<glm::i16vec3> : ElementTraitsBase<glm::i16vec3, AccessorType::Vec3, std::int16_t> {};
template<>
struct ElementTraits<glm::i16vec4> : ElementTraitsBase<glm::i16vec4, AccessorType::Vec4, std::int16_t> {};
template<>
struct ElementTraits<glm::u16vec2> : ElementTraitsBase<glm::u16vec2, AccessorType::Vec2, std::uint16_t> {};
template<>
struct ElementTraits<glm::u16vec3> : ElementTraitsBase<glm::u16vec3, AccessorType::Vec3, std::uint16_t> {};
template<>
struct ElementTraits<glm::u16vec4> : ElementTraitsBase<glm::u16vec4, AccessorType::Vec4, std::uint16_t> {};
template<>
struct ElementTraits<glm::u32vec2> : ElementTraitsBase<glm::u32vec2, AccessorType::Vec2, std::uint32_t> {};
template<>
struct ElementTraits<glm::u32vec3> : ElementTraitsBase<glm::u32vec3, AccessorType::Vec3, std::uint32_t> {};
template<>
struct ElementTraits<glm::u32vec4> : ElementTraitsBase<glm::u32vec4, AccessorType::Vec4, std::uint32_t> {};
template<>
struct ElementTraits<glm::mat2> : ElementTraitsBase<glm::mat2, AccessorType::Mat2, float> {};
template<>
struct ElementTraits<glm::mat3> : ElementTraitsBase<glm::mat3, AccessorType::Mat3, float> {};
template<>
struct ElementTraits<glm::mat4> : ElementTraitsBase<glm::mat4, AccessorType::Mat4, float> {};
} // namespace fastgltf

View File

@@ -0,0 +1,755 @@
/*
* Copyright (C) 2022 - 2023 spnda
* This file is part of fastgltf <https://github.com/spnda/fastgltf>.
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use,
* copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following
* conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
* OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
* HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#pragma once
#include <memory>
#include <tuple>
#include "types.hpp"
#ifdef _MSC_VER
#pragma warning(push)
#pragma warning(disable : 5030) // attribute 'x' is not recognized
#pragma warning(disable : 4514) // unreferenced inline function has been removed
#endif
// fwd
#if defined(__ANDROID__)
struct AAssetManager;
#endif
namespace simdjson::dom {
class array;
class object;
class parser;
} // namespace simdjson::dom
namespace fastgltf {
enum class Error : std::uint64_t;
template <typename T>
class Expected;
} // namespace fastgltf
namespace std {
template <typename T>
struct tuple_size<fastgltf::Expected<T>> : std::integral_constant<std::size_t, 2> {};
template <typename T>
struct tuple_element<0, fastgltf::Expected<T>> { using type = fastgltf::Error; };
template <typename T>
struct tuple_element<1, fastgltf::Expected<T>> { using type = T; };
} // namespace std
namespace fastgltf {
struct BinaryGltfChunk;
class GltfDataBuffer;
enum class Error : std::uint64_t {
None = 0,
InvalidPath = 1, ///< The glTF directory passed to load*GLTF is invalid.
MissingExtensions = 2, ///< One or more extensions are required by the glTF but not enabled in the Parser.
UnknownRequiredExtension = 3, ///< An extension required by the glTF is not supported by fastgltf.
InvalidJson = 4, ///< An error occurred while parsing the JSON.
InvalidGltf = 5, ///< The glTF is either missing something or has invalid data.
InvalidOrMissingAssetField = 6, ///< The glTF asset object is missing or invalid.
InvalidGLB = 7, ///< The GLB container is invalid.
/**
* A field is missing in the JSON.
* @note This is only used internally.
*/
MissingField = 8,
MissingExternalBuffer = 9, ///< With Options::LoadExternalBuffers, an external buffer was not found.
UnsupportedVersion = 10, ///< The glTF version is not supported by fastgltf.
InvalidURI = 11, ///< A URI from a buffer or image failed to be parsed.
};
inline std::string_view getErrorName(Error error) {
switch (error) {
case Error::None: return "None";
case Error::InvalidPath: return "InvalidPath";
case Error::MissingExtensions: return "MissingExtensions";
case Error::UnknownRequiredExtension: return "UnknownRequiredExtension";
case Error::InvalidJson: return "InvalidJson";
case Error::InvalidGltf: return "InvalidGltf";
case Error::InvalidOrMissingAssetField: return "InvalidOrMissingAssetField";
case Error::InvalidGLB: return "InvalidGLB";
case Error::MissingField: return "MissingField";
case Error::MissingExternalBuffer: return "MissingExternalBuffer";
case Error::UnsupportedVersion: return "UnsupportedVersion";
case Error::InvalidURI: return "InvalidURI";
default: FASTGLTF_UNREACHABLE
}
}
inline std::string_view getErrorMessage(Error error) {
switch (error) {
case Error::None: return "";
case Error::InvalidPath: return "The glTF directory passed to load*GLTF is invalid";
case Error::MissingExtensions: return "One or more extensions are required by the glTF but not enabled in the Parser.";
case Error::UnknownRequiredExtension: return "An extension required by the glTF is not supported by fastgltf.";
case Error::InvalidJson: return "An error occurred while parsing the JSON.";
case Error::InvalidGltf: return "The glTF is either missing something or has invalid data.";
case Error::InvalidOrMissingAssetField: return "The glTF asset object is missing or invalid.";
case Error::InvalidGLB: return "The GLB container is invalid.";
case Error::MissingField: return "";
case Error::MissingExternalBuffer: return "An external buffer was not found.";
case Error::UnsupportedVersion: return "The glTF version is not supported by fastgltf.";
case Error::InvalidURI: return "A URI from a buffer or image failed to be parsed.";
default: FASTGLTF_UNREACHABLE
}
}
// clang-format off
enum class Extensions : std::uint64_t {
None = 0,
// See https://github.com/KhronosGroup/glTF/blob/main/extensions/2.0/Khronos/KHR_texture_transform/README.md
KHR_texture_transform = 1 << 1,
// See https://github.com/KhronosGroup/glTF/blob/main/extensions/2.0/Khronos/KHR_texture_basisu/README.md
KHR_texture_basisu = 1 << 2,
// See https://github.com/KhronosGroup/glTF/blob/main/extensions/2.0/Vendor/MSFT_texture_dds/README.md
MSFT_texture_dds = 1 << 3,
// See https://github.com/KhronosGroup/glTF/blob/main/extensions/2.0/Khronos/KHR_mesh_quantization/README.md
KHR_mesh_quantization = 1 << 4,
// See https://github.com/KhronosGroup/glTF/tree/main/extensions/2.0/Vendor/EXT_meshopt_compression/README.md
EXT_meshopt_compression = 1 << 5,
// See https://github.com/KhronosGroup/glTF/blob/main/extensions/2.0/Khronos/KHR_lights_punctual/README.md
KHR_lights_punctual = 1 << 6,
// See https://github.com/KhronosGroup/glTF/blob/main/extensions/2.0/Vendor/EXT_texture_webp/README.md
EXT_texture_webp = 1 << 8,
// See https://github.com/KhronosGroup/glTF/blob/main/extensions/2.0/Khronos/KHR_materials_specular/README.md
KHR_materials_specular = 1 << 9,
// See https://github.com/KhronosGroup/glTF/blob/main/extensions/2.0/Khronos/KHR_materials_ior/README.md
KHR_materials_ior = 1 << 10,
// See https://github.com/KhronosGroup/glTF/blob/main/extensions/2.0/Khronos/KHR_materials_iridescence/README.md
KHR_materials_iridescence = 1 << 11,
// See https://github.com/KhronosGroup/glTF/blob/main/extensions/2.0/Khronos/KHR_materials_volume/README.md
KHR_materials_volume = 1 << 12,
// See https://github.com/KhronosGroup/glTF/blob/main/extensions/2.0/Khronos/KHR_materials_transmission/README.md
KHR_materials_transmission = 1 << 13,
// See https://github.com/KhronosGroup/glTF/blob/main/extensions/2.0/Khronos/KHR_materials_clearcoat/README.md
KHR_materials_clearcoat = 1 << 14,
// See https://github.com/KhronosGroup/glTF/blob/main/extensions/2.0/Khronos/KHR_materials_emissive_strength/README.md
KHR_materials_emissive_strength = 1 << 15,
// See https://github.com/KhronosGroup/glTF/blob/main/extensions/2.0/Khronos/KHR_materials_sheen/README.md
KHR_materials_sheen = 1 << 16,
// See https://github.com/KhronosGroup/glTF/blob/main/extensions/2.0/Khronos/KHR_materials_unlit/README.md
KHR_materials_unlit = 1 << 17,
// See https://github.com/KhronosGroup/glTF/blob/main/extensions/2.0/Khronos/KHR_materials_anisotropy/README.md
KHR_materials_anisotropy = 1 << 18,
// See https://github.com/KhronosGroup/glTF/tree/main/extensions/2.0/Vendor/EXT_mesh_gpu_instancing/README.md
EXT_mesh_gpu_instancing = 1 << 19,
#if FASTGLTF_ENABLE_DEPRECATED_EXT
// See https://github.com/KhronosGroup/glTF/tree/main/extensions/2.0/Archived/KHR_materials_pbrSpecularGlossiness/README.md
KHR_materials_pbrSpecularGlossiness = 1 << 20,
#endif
// See https://github.com/KhronosGroup/glTF/blob/main/extensions/2.0/Vendor/MSFT_packing_normalRoughnessMetallic/README.md
MSFT_packing_normalRoughnessMetallic = 1 << 21,
// See https://github.com/KhronosGroup/glTF/blob/main/extensions/2.0/Vendor/MSFT_packing_occlusionRoughnessMetallic/README.md
MSFT_packing_occlusionRoughnessMetallic = 1 << 22,
};
// clang-format on
FASTGLTF_ARITHMETIC_OP_TEMPLATE_MACRO(Extensions, Extensions, |)
FASTGLTF_ARITHMETIC_OP_TEMPLATE_MACRO(Extensions, Extensions, &)
FASTGLTF_ASSIGNMENT_OP_TEMPLATE_MACRO(Extensions, Extensions, |)
FASTGLTF_ASSIGNMENT_OP_TEMPLATE_MACRO(Extensions, Extensions, &)
FASTGLTF_UNARY_OP_TEMPLATE_MACRO(Extensions, ~)
// clang-format off
enum class Options : std::uint64_t {
None = 0,
/**
* This allows 5130 as an accessor component type. 5130 is the OpenGL constant GL_DOUBLE,
* which is by default not listed as an allowed component type in the glTF spec.
*
* The glTF normally only allows these component types:
* https://registry.khronos.org/glTF/specs/2.0/glTF-2.0.html#accessor-data-types
*/
AllowDouble = 1 << 0,
/**
* This skips validating the asset field, as it is usually there and not used anyway.
*/
DontRequireValidAssetMember = 1 << 1,
/**
* Loads all the GLB buffers into CPU memory. If disabled, fastgltf will only provide
* a byte offset and length into the GLB file, which can be useful when using APIs like
* DirectStorage or Metal IO.
*/
LoadGLBBuffers = 1 << 3,
/**
* Loads all external buffers into CPU memory. If disabled, fastgltf will only provide
* a full file path to the file holding the buffer, which can be useful when using APIs
* like DirectStorage or Metal IO. For images, LoadExternalImages has to be explicitly
* specified, too, if required.
*/
LoadExternalBuffers = 1 << 4,
/**
* This option makes fastgltf automatically decompose the transformation matrices of nodes
* into the translation, rotation, and scale components. This might be useful to have only
* TRS components, instead of matrices or TRS, which should simplify working with nodes,
* especially with animations.
*/
DecomposeNodeMatrices = 1 << 5,
/**
* This option makes fastgltf minimise the JSON file before parsing. In most cases,
* minimising it beforehand actually reduces the time spent. However, there are plenty
* of cases where this option slows down parsing drastically, which from my testing seem
* to all be glTFs which contain embedded buffers and/or are already minimised. Note that
* fastgltf only minimises the string if the data was loaded using GltfDataBuffer::loadFromFile
* or GltfDataBuffer::copyBytes, and that the bytes will also be overwritten.
*/
MinimiseJsonBeforeParsing = 1 << 6,
/**
* Loads all external images into CPU memory. It does not decode any texture data. Complementary
* to LoadExternalBuffers.
*/
LoadExternalImages = 1 << 7,
/**
* Lets fastgltf generate indices for all mesh primitives without indices. This currently
* does not de-duplicate the vertices. This is entirely for compatibility and simplifying the
* loading process.
*/
GenerateMeshIndices = 1 << 8,
};
// clang-format on
FASTGLTF_ARITHMETIC_OP_TEMPLATE_MACRO(Options, Options, |)
FASTGLTF_ARITHMETIC_OP_TEMPLATE_MACRO(Options, Options, &)
FASTGLTF_ASSIGNMENT_OP_TEMPLATE_MACRO(Options, Options, |)
FASTGLTF_ASSIGNMENT_OP_TEMPLATE_MACRO(Options, Options, &)
FASTGLTF_UNARY_OP_TEMPLATE_MACRO(Options, ~)
// String representations of glTF 2.0 extension identifiers.
namespace extensions {
constexpr std::string_view EXT_mesh_gpu_instancing = "EXT_mesh_gpu_instancing";
constexpr std::string_view EXT_meshopt_compression = "EXT_meshopt_compression";
constexpr std::string_view EXT_texture_webp = "EXT_texture_webp";
constexpr std::string_view KHR_lights_punctual = "KHR_lights_punctual";
constexpr std::string_view KHR_materials_anisotropy = "KHR_materials_anisotropy";
constexpr std::string_view KHR_materials_clearcoat = "KHR_materials_clearcoat";
constexpr std::string_view KHR_materials_emissive_strength = "KHR_materials_emissive_strength";
constexpr std::string_view KHR_materials_ior = "KHR_materials_ior";
constexpr std::string_view KHR_materials_iridescence = "KHR_materials_iridescence";
constexpr std::string_view KHR_materials_sheen = "KHR_materials_sheen";
constexpr std::string_view KHR_materials_specular = "KHR_materials_specular";
constexpr std::string_view KHR_materials_transmission = "KHR_materials_transmission";
constexpr std::string_view KHR_materials_unlit = "KHR_materials_unlit";
constexpr std::string_view KHR_materials_volume = "KHR_materials_volume";
constexpr std::string_view KHR_mesh_quantization = "KHR_mesh_quantization";
constexpr std::string_view KHR_texture_basisu = "KHR_texture_basisu";
constexpr std::string_view KHR_texture_transform = "KHR_texture_transform";
constexpr std::string_view MSFT_packing_normalRoughnessMetallic = "MSFT_packing_normalRoughnessMetallic";
constexpr std::string_view MSFT_packing_occlusionRoughnessMetallic = "MSFT_packing_occlusionRoughnessMetallic";
constexpr std::string_view MSFT_texture_dds = "MSFT_texture_dds";
#if FASTGLTF_ENABLE_DEPRECATED_EXT
constexpr std::string_view KHR_materials_pbrSpecularGlossiness = "KHR_materials_pbrSpecularGlossiness";
#endif
} // namespace extensions
// clang-format off
// An array of pairs of string representations of extension identifiers and their respective enum
// value used for enabling/disabling the loading of it. This also represents all extensions that
// fastgltf supports and understands.
#if FASTGLTF_ENABLE_DEPRECATED_EXT
static constexpr size_t SUPPORTED_EXTENSION_COUNT = 21;
#else
static constexpr size_t SUPPORTED_EXTENSION_COUNT = 20;
#endif
static constexpr std::array<std::pair<std::string_view, Extensions>, SUPPORTED_EXTENSION_COUNT> extensionStrings = {{
{ extensions::EXT_mesh_gpu_instancing, Extensions::EXT_mesh_gpu_instancing },
{ extensions::EXT_meshopt_compression, Extensions::EXT_meshopt_compression },
{ extensions::EXT_texture_webp, Extensions::EXT_texture_webp },
{ extensions::KHR_lights_punctual, Extensions::KHR_lights_punctual },
{ extensions::KHR_materials_anisotropy, Extensions::KHR_materials_anisotropy },
{ extensions::KHR_materials_clearcoat, Extensions::KHR_materials_clearcoat },
{ extensions::KHR_materials_emissive_strength, Extensions::KHR_materials_emissive_strength },
{ extensions::KHR_materials_ior, Extensions::KHR_materials_ior },
{ extensions::KHR_materials_iridescence, Extensions::KHR_materials_iridescence },
{ extensions::KHR_materials_sheen, Extensions::KHR_materials_sheen },
{ extensions::KHR_materials_specular, Extensions::KHR_materials_specular },
{ extensions::KHR_materials_transmission, Extensions::KHR_materials_transmission },
{ extensions::KHR_materials_unlit, Extensions::KHR_materials_unlit },
{ extensions::KHR_materials_volume, Extensions::KHR_materials_volume },
{ extensions::KHR_mesh_quantization, Extensions::KHR_mesh_quantization },
{ extensions::KHR_texture_basisu, Extensions::KHR_texture_basisu },
{ extensions::KHR_texture_transform, Extensions::KHR_texture_transform },
{ extensions::MSFT_packing_normalRoughnessMetallic, Extensions::MSFT_packing_normalRoughnessMetallic },
{ extensions::MSFT_packing_occlusionRoughnessMetallic, Extensions::MSFT_packing_occlusionRoughnessMetallic },
{ extensions::MSFT_texture_dds, Extensions::MSFT_texture_dds },
#if FASTGLTF_ENABLE_DEPRECATED_EXT
{ extensions::KHR_materials_pbrSpecularGlossiness,Extensions::KHR_materials_pbrSpecularGlossiness },
#endif
}};
// clang-format on
/**
* Returns the name of the passed glTF extension.
*
* @note If \p extensions has more than one bit set (multiple extensions), this
* will return the name of the first set bit.
*/
#if FASTGLTF_CPP_20
constexpr
#else
inline
#endif
std::string_view stringifyExtension(Extensions extensions) {
// Find the first set bit and mask the value to that
std::uint8_t position = 0;
while (position < std::numeric_limits<std::underlying_type_t<Extensions>>::digits) {
if (((to_underlying(extensions) >> position) & 1) != 0) {
extensions &= static_cast<Extensions>(1 << position);
break;
}
++position;
}
for (const auto& extensionString : extensionStrings)
if (extensionString.second == extensions)
return extensionString.first;
return "";
}
#if !FASTGLTF_DISABLE_CUSTOM_MEMORY_POOL
class ChunkMemoryResource : public std::pmr::memory_resource {
/**
* The default size of the individual blocks we allocate.
*/
constexpr static std::size_t blockSize = 2048;
struct Block {
std::unique_ptr<std::byte[]> data;
std::size_t size;
std::byte* dataPointer;
};
SmallVector<Block, 4> blocks;
std::size_t blockIdx = 0;
public:
explicit ChunkMemoryResource() {
allocateNewBlock();
}
void allocateNewBlock() {
auto& block = blocks.emplace_back();
block.data = std::unique_ptr<std::byte[]>(new std::byte[blockSize]);
block.dataPointer = block.data.get();
block.size = blockSize;
}
[[nodiscard]] void* do_allocate(std::size_t bytes, std::size_t alignment) override {
auto& block = blocks[blockIdx];
auto availableSize = static_cast<std::size_t>(block.dataPointer - block.data.get());
if ((availableSize + bytes) > block.size) {
// The block can't fit the new allocation. We'll just create a new block and use that.
allocateNewBlock();
++blockIdx;
return do_allocate(bytes, alignment);
}
void* alloc = block.dataPointer;
std::size_t space = availableSize;
if (std::align(alignment, availableSize, alloc, space) == nullptr) {
// Not enough space after alignment
allocateNewBlock();
++blockIdx;
return do_allocate(bytes, alignment);
}
// Get the number of bytes used for padding, and calculate the new offset using that
block.dataPointer = block.dataPointer + (availableSize - space) + bytes;
return alloc;
}
void do_deallocate([[maybe_unused]] void* p, [[maybe_unused]] std::size_t bytes, [[maybe_unused]] std::size_t alignment) override {
// We currently do nothing, as we don't keep track of what portions of the blocks are still used.
// Therefore, we keep all blocks alive until the destruction of this resource (parser).
}
[[nodiscard]] bool do_is_equal(const std::pmr::memory_resource& other) const noexcept override {
return this == std::addressof(other);
}
};
#endif
/**
* A type that stores an error together with an expected value.
* To use this type, first call error() to inspect if any errors have occurred.
* If error() is not fastgltf::Error::None,
* calling get(), operator->(), and operator*() is undefined behaviour.
*/
template <typename T>
class Expected {
static_assert(std::is_default_constructible_v<T>);
static_assert(!std::is_same_v<Error, T>);
Error err;
T value;
public:
explicit Expected(Error error) : err(error) {}
explicit Expected(T&& value) : err(Error::None), value(std::move(value)) {}
Expected(const Expected<T>& other) = delete;
Expected(Expected<T>&& other) noexcept : err(other.err), value(std::move(other.value)) {}
Expected<T>& operator=(const Expected<T>& other) = delete;
Expected<T>& operator=(Expected<T>&& other) noexcept {
err = other.err;
value = std::move(other.value);
return *this;
}
[[nodiscard]] Error error() const noexcept {
return err;
}
/**
* Returns a reference to the value of T.
* When error() returns anything but Error::None, the returned value is undefined.
*/
[[nodiscard]] T& get() noexcept {
assert(err == Error::None);
return value;
}
/**
* Returns the address of the value of T, or nullptr if error() returns anything but Error::None.
*/
[[nodiscard]] T* get_if() noexcept {
if (err != Error::None)
return nullptr;
return std::addressof(value);
}
template <std::size_t I>
auto& get() noexcept {
if constexpr (I == 0) return err;
else if constexpr (I == 1) return value;
}
template <std::size_t I>
const auto& get() const noexcept {
if constexpr (I == 0) return err;
else if constexpr (I == 1) return value;
}
/**
* Returns the address of the value of T.
* When error() returns anything but Error::None, the returned value is undefined.
*/
T* operator->() noexcept {
assert(err == Error::None);
return std::addressof(value);
}
/**
* Returns the address of the const value of T.
* When error() returns anything but Error::None, the returned value is undefined.
*/
const T* operator->() const noexcept {
assert(err == Error::None);
return std::addressof(value);
}
T&& operator*() && noexcept {
assert(err == Error::None);
return std::move(value);
}
operator bool() const noexcept {
return err == Error::None;
}
};
struct BufferInfo {
void* mappedMemory;
CustomBufferId customId;
};
using BufferMapCallback = BufferInfo(std::uint64_t bufferSize, void* userPointer);
using BufferUnmapCallback = void(BufferInfo* bufferInfo, void* userPointer);
using Base64DecodeCallback = void(std::string_view base64, std::uint8_t* dataOutput, std::size_t padding, std::size_t dataOutputSize, void* userPointer);
/**
* Enum to represent the type of a glTF file. glTFs can either be the standard JSON file with
* paths to buffers or with a base64 embedded buffers, or they can be in a so called GLB
* container format which has two or more chunks of binary data, where one represents buffers
* and the other contains the JSON string.
*/
enum class GltfType {
glTF,
GLB,
Invalid,
};
/**
* This function starts reading into the buffer and tries to determine what type of glTF container it is.
* This should be used to know whether to call Parser::loadGLTF or Parser::loadBinaryGLTF.
*
* @return The type of the glTF file, either glTF, GLB, or Invalid if it was not determinable. If this function
* returns Invalid it is highly likely that the buffer does not actually represent a valid glTF file.
*/
GltfType determineGltfFileType(GltfDataBuffer* buffer);
/**
* Gets the amount of byte padding required on the GltfDataBuffer, as simdjson requires to be
* able to overflow as it uses SIMD to load N bytes at a time.
*/
std::size_t getGltfBufferPadding() noexcept;
/**
* This class holds a chunk of data that makes up a JSON string that the glTF parser will use
* and read from.
*/
class GltfDataBuffer {
friend class Parser;
friend GltfType determineGltfFileType(GltfDataBuffer* buffer);
protected:
std::size_t allocatedSize = 0;
std::size_t dataSize = 0;
std::byte* bufferPointer = nullptr;
std::unique_ptr<std::byte[]> buffer;
std::filesystem::path filePath = {};
public:
explicit GltfDataBuffer() noexcept;
/**
* Constructs a new GltfDataBuffer from a span object, copying its data as there
* is no guarantee for the allocation size to have the adequate padding.
*/
explicit GltfDataBuffer(span<std::byte> data) noexcept;
virtual ~GltfDataBuffer() noexcept;
/**
* Saves the pointer including its range. Does not copy any data. This requires the
* original allocation to outlive the parsing of the glTF, so after the last relevant
* call to fastgltf::Parser::loadGLTF. However, this function asks for a capacity size, as
* the JSON parsing requires some padding. See fastgltf::getGltfBufferPadding for more information.
* If the capacity does not have enough padding, the function will instead copy the bytes
* with the copyBytes method. Also, it will set the padding bytes all to 0, so be sure to
* not use that for any other data.
*/
bool fromByteView(std::uint8_t* bytes, std::size_t byteCount, std::size_t capacity) noexcept;
/**
* This will create a copy of the passed bytes and allocate an adequately sized buffer.
*/
bool copyBytes(const std::uint8_t* bytes, std::size_t byteCount) noexcept;
/**
* Loads the file with a optional byte offset into a memory buffer.
*/
bool loadFromFile(const std::filesystem::path& path, std::uint64_t byteOffset = 0) noexcept;
/**
* Returns the size, in bytes,
* @return
*/
[[nodiscard]] inline std::size_t getBufferSize() const noexcept {
return dataSize;
}
[[nodiscard]] explicit operator span<std::byte>() {
return span<std::byte>(bufferPointer, dataSize);
}
};
#if defined(__ANDROID__)
class AndroidGltfDataBuffer : public GltfDataBuffer {
AAssetManager* assetManager;
public:
explicit AndroidGltfDataBuffer(AAssetManager* assetManager) noexcept;
~AndroidGltfDataBuffer() noexcept = default;
/**
* Loads a file from within an Android APK
*/
bool loadFromAndroidAsset(const std::filesystem::path& path, std::uint64_t byteOffset = 0) noexcept;
};
#endif
/**
* This function further validates all the input more strictly that is parsed from the glTF.
* Realistically, this should not be necessary in Release applications, but could be helpful
* when debugging an asset related issue.
*/
[[nodiscard]] Error validate(const Asset& asset);
/**
* Some internals the parser passes on to each glTF instance.
*/
struct ParserInternalConfig {
BufferMapCallback* mapCallback = nullptr;
BufferUnmapCallback* unmapCallback = nullptr;
Base64DecodeCallback* decodeCallback = nullptr;
void* userPointer = nullptr;
Extensions extensions = Extensions::None;
};
/**
* A parser for one or more glTF files. It uses a SIMD based JSON parser to maximize efficiency
* and performance at runtime.
*
* @note This class is not thread-safe.
*/
class Parser {
// The simdjson parser object. We want to share it between runs, so it does not need to
// reallocate over and over again. We're hiding it here to not leak the simdjson header.
std::unique_ptr<simdjson::dom::parser> jsonParser;
ParserInternalConfig config = {};
DataSource glbBuffer;
#if !FASTGLTF_DISABLE_CUSTOM_MEMORY_POOL
std::shared_ptr<ChunkMemoryResource> resourceAllocator;
#endif
std::filesystem::path directory;
Options options;
static auto getMimeTypeFromString(std::string_view mime) -> MimeType;
static void fillCategories(Category& inputCategories) noexcept;
[[nodiscard]] auto decodeDataUri(URIView& uri) const noexcept -> Expected<DataSource>;
[[nodiscard]] auto loadFileFromUri(URIView& uri) const noexcept -> Expected<DataSource>;
Error generateMeshIndices(Asset& asset) const;
Error parseAccessors(simdjson::dom::array& array, Asset& asset);
Error parseAnimations(simdjson::dom::array& array, Asset& asset);
Error parseBuffers(simdjson::dom::array& array, Asset& asset);
Error parseBufferViews(simdjson::dom::array& array, Asset& asset);
Error parseCameras(simdjson::dom::array& array, Asset& asset);
Error parseExtensions(simdjson::dom::object& extensionsObject, Asset& asset);
Error parseImages(simdjson::dom::array& array, Asset& asset);
Error parseLights(simdjson::dom::array& array, Asset& asset);
Error parseMaterials(simdjson::dom::array& array, Asset& asset);
Error parseMeshes(simdjson::dom::array& array, Asset& asset);
Error parseNodes(simdjson::dom::array& array, Asset& asset);
Error parseSamplers(simdjson::dom::array& array, Asset& asset);
Error parseScenes(simdjson::dom::array& array, Asset& asset);
Error parseSkins(simdjson::dom::array& array, Asset& asset);
Error parseTextures(simdjson::dom::array& array, Asset& asset);
Expected<Asset> parse(simdjson::dom::object root, Category categories);
public:
explicit Parser(Extensions extensionsToLoad = Extensions::None) noexcept;
explicit Parser(const Parser& parser) = delete;
Parser(Parser&& parser) noexcept;
Parser& operator=(const Parser& parser) = delete;
Parser& operator=(Parser&& other) noexcept;
~Parser();
/**
* Loads a glTF file from pre-loaded bytes representing a JSON file.
*
* @return An Asset wrapped in an Expected type, which may contain an error if one occurred.
*/
[[nodiscard]] Expected<Asset> loadGLTF(GltfDataBuffer* buffer, std::filesystem::path directory, Options options = Options::None, Category categories = Category::All);
/**
* Loads a glTF file embedded within a GLB container, which may contain the first buffer of the glTF asset.
*
* @return An Asset wrapped in an Expected type, which may contain an error if one occurred.
*/
[[nodiscard]] Expected<Asset> loadBinaryGLTF(GltfDataBuffer* buffer, std::filesystem::path directory, Options options = Options::None, Category categories = Category::All);
/**
* This function can be used to set callbacks so that you can control memory allocation for
* large buffers and images that are loaded from a glTF file. For example, one could use
* the callbacks to map a GPU buffer through Vulkan or DirectX so that fastgltf can write
* the buffer directly to the GPU to avoid a copy into RAM first. To remove the callbacks
* for a specific load, call this method with both parameters as nullptr before load*GLTF.
* Using Parser::setUserPointer you can also set a user pointer to access your
* own class or other data you may need.
*
* @param mapCallback function called when the parser requires a buffer to write data
* embedded in a GLB file or decoded from a base64 URI, cannot be nullptr.
* @param unmapCallback function called when the parser is done with writing into a
* buffer, can be nullptr.
* @note This is likely only useful for advanced users who know what they're doing.
*/
void setBufferAllocationCallback(BufferMapCallback* mapCallback, BufferUnmapCallback* unmapCallback = nullptr) noexcept;
/**
* Allows setting callbacks for base64 decoding.
* This can be useful if you have another base64 decoder optimised for a certain platform or architecture,
* or want to use your own scheduler to schedule multiple threads for working on decoding individual chunks of the data.
* Using Parser::setUserPointer you can also set a user pointer to access your own class or other data you may need.
*
* It is still recommended to use fastgltf's base64 decoding features as they're highly optimised
* for SSE4, AVX2, and ARM Neon.
*
* @param decodeCallback function called when the parser tries to decode a base64 buffer
*/
void setBase64DecodeCallback(Base64DecodeCallback* decodeCallback) noexcept;
void setUserPointer(void* pointer) noexcept;
};
} // namespace fastgltf
#ifdef _MSC_VER
#pragma warning(pop)
#endif

View File

@@ -0,0 +1,653 @@
/*
* Copyright (C) 2022 - 2023 spnda
* This file is part of fastgltf <https://github.com/spnda/fastgltf>.
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use,
* copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following
* conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
* OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
* HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#pragma once
#include <cstring>
#include <iterator>
#include "types.hpp"
namespace fastgltf {
template <typename>
struct ComponentTypeConverter;
template<>
struct ComponentTypeConverter<std::int8_t> {
static constexpr auto type = ComponentType::Byte;
};
template<>
struct ComponentTypeConverter<std::uint8_t> {
static constexpr auto type = ComponentType::UnsignedByte;
};
template<>
struct ComponentTypeConverter<std::int16_t> {
static constexpr auto type = ComponentType::Short;
};
template<>
struct ComponentTypeConverter<std::uint16_t> {
static constexpr auto type = ComponentType::UnsignedShort;
};
template<>
struct ComponentTypeConverter<std::int32_t> {
static constexpr auto type = ComponentType::Int;
};
template<>
struct ComponentTypeConverter<std::uint32_t> {
static constexpr auto type = ComponentType::UnsignedInt;
};
template<>
struct ComponentTypeConverter<float> {
static constexpr auto type = ComponentType::Float;
};
template<>
struct ComponentTypeConverter<double> {
static constexpr auto type = ComponentType::Double;
};
template <typename ElementType, AccessorType EnumAccessorType, typename ComponentType = ElementType>
struct ElementTraitsBase {
using element_type = ElementType;
using component_type = ComponentType;
static constexpr auto type = EnumAccessorType;
static constexpr auto enum_component_type = ComponentTypeConverter<ComponentType>::type;
};
template <typename>
struct ElementTraits;
template<>
struct ElementTraits<std::int8_t> : ElementTraitsBase<std::int8_t, AccessorType::Scalar> {};
template<>
struct ElementTraits<std::uint8_t> : ElementTraitsBase<std::uint8_t, AccessorType::Scalar> {};
template<>
struct ElementTraits<std::int16_t> : ElementTraitsBase<std::int16_t, AccessorType::Scalar> {};
template<>
struct ElementTraits<std::uint16_t> : ElementTraitsBase<std::uint16_t, AccessorType::Scalar> {};
template<>
struct ElementTraits<std::int32_t> : ElementTraitsBase<std::int32_t, AccessorType::Scalar> {};
template<>
struct ElementTraits<std::uint32_t> : ElementTraitsBase<std::uint32_t, AccessorType::Scalar> {};
template<>
struct ElementTraits<float> : ElementTraitsBase<float, AccessorType::Scalar> {};
template<>
struct ElementTraits<double> : ElementTraitsBase<double, AccessorType::Scalar> {};
#if FASTGLTF_HAS_CONCEPTS
template <typename ElementType>
concept Element = std::is_arithmetic_v<typename ElementTraits<ElementType>::component_type>
&& ElementTraits<ElementType>::type != AccessorType::Invalid
&& ElementTraits<ElementType>::enum_component_type != ComponentType::Invalid
&& std::is_default_constructible_v<ElementType>
&& std::is_constructible_v<ElementType>
&& std::is_move_assignable_v<ElementType>;
#endif
namespace internal {
template <typename DestType, typename SourceType>
constexpr DestType convertComponent(const SourceType& source, bool normalized) {
if (normalized) {
if constexpr (std::is_floating_point_v<SourceType> && std::is_integral_v<DestType>) {
// float -> int conversion
return static_cast<DestType>(std::round(source * static_cast<SourceType>(std::numeric_limits<DestType>::max())));
} else if constexpr (std::is_integral_v<SourceType> && std::is_floating_point_v<DestType>) {
// int -> float conversion
DestType minValue;
if constexpr (std::is_signed_v<DestType>) {
minValue = static_cast<DestType>(-1.0);
} else {
minValue = static_cast<DestType>(0.0);
}
// We have to use max here because for uchar -> float we could have -128 but 1.0 should represent 127,
// which is why -128 and -127 both equate to 1.0.
return fastgltf::max(static_cast<DestType>(source) / static_cast<DestType>(std::numeric_limits<SourceType>::max()),
minValue);
}
}
return static_cast<DestType>(source);
}
template <typename SourceType, typename DestType, std::size_t Index>
constexpr DestType convertComponent(const std::byte* bytes, bool normalized) {
return convertComponent<DestType>(reinterpret_cast<const SourceType*>(bytes)[Index], normalized);
}
template <typename ElementType, typename SourceType, std::size_t... I>
#if FASTGLTF_HAS_CONCEPTS
requires Element<ElementType>
#endif
constexpr ElementType convertAccessorElement(const std::byte* bytes, bool normalized, std::index_sequence<I...>) {
using DestType = typename ElementTraits<ElementType>::component_type;
static_assert(std::is_arithmetic_v<DestType>, "Accessor traits must provide a valid component type");
if constexpr (std::is_aggregate_v<ElementType>) {
return {convertComponent<SourceType, DestType, I>(bytes, normalized)...};
} else {
return ElementType{convertComponent<SourceType, DestType, I>(bytes, normalized)...};
}
}
template <typename ElementType,
typename Seq = std::make_index_sequence<getNumComponents(ElementTraits<ElementType>::type)>>
#if FASTGLTF_HAS_CONCEPTS
requires Element<ElementType>
#endif
ElementType getAccessorElementAt(ComponentType componentType, const std::byte* bytes, bool normalized = false) {
switch (componentType) {
case ComponentType::Byte:
return convertAccessorElement<ElementType, std::int8_t>(bytes, normalized, Seq{});
case ComponentType::UnsignedByte:
return convertAccessorElement<ElementType, std::uint8_t>(bytes, normalized, Seq{});
case ComponentType::Short:
return convertAccessorElement<ElementType, std::int16_t>(bytes, normalized, Seq{});
case ComponentType::UnsignedShort:
return convertAccessorElement<ElementType, std::uint16_t>(bytes, normalized, Seq{});
case ComponentType::Int:
return convertAccessorElement<ElementType, std::int32_t>(bytes, normalized, Seq{});
case ComponentType::UnsignedInt:
return convertAccessorElement<ElementType, std::uint32_t>(bytes, normalized, Seq{});
case ComponentType::Float:
return convertAccessorElement<ElementType, float>(bytes, normalized, Seq{});
case ComponentType::Double:
return convertAccessorElement<ElementType, double>(bytes, normalized, Seq{});
case ComponentType::Invalid:
default:
return ElementType{};
}
}
// Performs a binary search for the index into the sparse index list whose value matches the desired index
template <typename ElementType>
bool findSparseIndex(const std::byte* bytes, std::size_t indexCount, std::size_t desiredIndex,
std::size_t& resultIndex) {
auto* elements = reinterpret_cast<const ElementType*>(bytes);
auto count = indexCount;
resultIndex = 0;
while (count > 0) {
auto step = count / 2;
auto index = resultIndex + step;
if (elements[index] < static_cast<ElementType>(desiredIndex)) {
resultIndex = index + 1;
count -= step + 1;
} else {
count = step;
}
}
return resultIndex < indexCount && elements[resultIndex] == static_cast<ElementType>(desiredIndex);
}
// Finds the index of the nearest sparse index to the desired index
inline bool findSparseIndex(ComponentType componentType, const std::byte* bytes, std::size_t indexCount,
std::size_t desiredIndex, std::size_t& resultIndex) {
switch (componentType) {
case ComponentType::Byte:
return findSparseIndex<std::int8_t>(bytes, indexCount, desiredIndex, resultIndex);
case ComponentType::UnsignedByte:
return findSparseIndex<std::uint8_t>(bytes, indexCount, desiredIndex, resultIndex);
case ComponentType::Short:
return findSparseIndex<std::int16_t>(bytes, indexCount, desiredIndex, resultIndex);
case ComponentType::UnsignedShort:
return findSparseIndex<std::uint16_t>(bytes, indexCount, desiredIndex, resultIndex);
case ComponentType::Int:
return findSparseIndex<std::int32_t>(bytes, indexCount, desiredIndex, resultIndex);
case ComponentType::UnsignedInt:
return findSparseIndex<std::uint32_t>(bytes, indexCount, desiredIndex, resultIndex);
case ComponentType::Float:
case ComponentType::Double:
case ComponentType::Invalid:
return false;
}
return false;
}
} // namespace internal
struct DefaultBufferDataAdapter {
const std::byte* operator()(const Buffer& buffer) const {
return std::visit(visitor {
[](auto&) -> const std::byte* {
return nullptr;
},
[&](const sources::Vector& vec) {
return reinterpret_cast<const std::byte*>(vec.bytes.data());
},
[&](const sources::ByteView& bv) {
return bv.bytes.data();
},
}, buffer.data);
}
};
template <typename ElementType, typename BufferDataAdapter>
class IterableAccessor;
template <typename ElementType, typename BufferDataAdapter = DefaultBufferDataAdapter>
class AccessorIterator {
protected:
const IterableAccessor<ElementType, BufferDataAdapter>* accessor;
std::size_t idx;
std::size_t sparseIdx = 0;
std::size_t nextSparseIndex = 0;
public:
using value_type = ElementType;
using reference = ElementType&;
using pointer = ElementType*;
using difference_type = std::ptrdiff_t;
// This iterator isn't truly random access (as per the C++ definition), but we do want to support
// some things that these come with (e.g. std::distance using operator-).
using iterator_category = std::random_access_iterator_tag;
AccessorIterator(const IterableAccessor<ElementType, BufferDataAdapter>* accessor, std::size_t idx = 0)
: accessor(accessor), idx(idx) {
if (accessor->accessor.sparse.has_value()) {
// Get the first sparse index.
nextSparseIndex = internal::getAccessorElementAt<std::uint32_t>(accessor->indexComponentType,
accessor->indicesBytes + accessor->indexStride * sparseIdx);
}
}
AccessorIterator& operator++() noexcept {
++idx;
return *this;
}
AccessorIterator operator++(int) noexcept {
auto x = *this;
++(*this);
return x;
}
[[nodiscard]] difference_type operator-(const AccessorIterator& other) const noexcept {
return static_cast<difference_type>(idx - other.idx);
}
[[nodiscard]] bool operator==(const AccessorIterator& iterator) const noexcept {
// We don't compare sparse properties
return idx == iterator.idx &&
accessor->bufferBytes == iterator.accessor->bufferBytes &&
accessor->stride == iterator.accessor->stride &&
accessor->componentType == iterator.accessor->componentType;
}
[[nodiscard]] bool operator!=(const AccessorIterator& iterator) const noexcept {
return !(*this == iterator);
}
[[nodiscard]] ElementType operator*() noexcept {
if (accessor->accessor.sparse.has_value()) {
if (idx == nextSparseIndex) {
// Get the sparse value for this index
auto value = internal::getAccessorElementAt<ElementType>(accessor->componentType,
accessor->valuesBytes + accessor->valueStride * sparseIdx,
accessor->accessor.normalized);
// Find the next sparse index.
++sparseIdx;
if (sparseIdx < accessor->sparseCount) {
nextSparseIndex = internal::getAccessorElementAt<std::uint32_t>(accessor->indexComponentType,
accessor->indicesBytes + accessor->indexStride * sparseIdx);
}
return value;
}
}
return internal::getAccessorElementAt<ElementType>(accessor->componentType,
accessor->bufferBytes + idx * accessor->stride,
accessor->accessor.normalized);
}
};
template <typename ElementType, typename BufferDataAdapter = DefaultBufferDataAdapter>
class IterableAccessor {
friend class AccessorIterator<ElementType, BufferDataAdapter>;
const Asset& asset;
const Accessor& accessor;
const std::byte* bufferBytes;
std::size_t stride;
fastgltf::ComponentType componentType;
// Data needed for sparse accessors
fastgltf::ComponentType indexComponentType;
const std::byte* indicesBytes;
const std::byte* valuesBytes;
std::size_t indexStride;
std::size_t valueStride;
std::size_t sparseCount;
public:
using iterator = AccessorIterator<ElementType, BufferDataAdapter>;
explicit IterableAccessor(const Asset& asset, const Accessor& accessor, const BufferDataAdapter& adapter) : asset(asset), accessor(accessor) {
componentType = accessor.componentType;
const auto& view = asset.bufferViews[*accessor.bufferViewIndex];
stride = view.byteStride ? *view.byteStride : getElementByteSize(accessor.type, accessor.componentType);
bufferBytes = adapter(asset.buffers[view.bufferIndex]);
bufferBytes += view.byteOffset + accessor.byteOffset;
if (accessor.sparse.has_value()) {
const auto& indicesView = asset.bufferViews[accessor.sparse->indicesBufferView];
indicesBytes = adapter(asset.buffers[indicesView.bufferIndex])
+ indicesView.byteOffset + accessor.sparse->indicesByteOffset;
indexStride = getElementByteSize(AccessorType::Scalar, accessor.sparse->indexComponentType);
const auto& valuesView = asset.bufferViews[accessor.sparse->valuesBufferView];
valuesBytes = adapter(asset.buffers[valuesView.bufferIndex])
+ valuesView.byteOffset + accessor.sparse->valuesByteOffset;
// "The index of the bufferView with sparse values. The referenced buffer view MUST NOT
// have its target or byteStride properties defined."
valueStride = getElementByteSize(accessor.type, accessor.componentType);
indexComponentType = accessor.sparse->indexComponentType;
sparseCount = accessor.sparse->count;
}
}
[[nodiscard]] iterator begin() const noexcept {
return iterator(this, 0);
}
[[nodiscard]] iterator end() const noexcept {
return iterator(this, accessor.count);
}
};
template <typename ElementType, typename BufferDataAdapter = DefaultBufferDataAdapter>
#if FASTGLTF_HAS_CONCEPTS
requires Element<ElementType>
#endif
ElementType getAccessorElement(const Asset& asset, const Accessor& accessor, size_t index,
const BufferDataAdapter& adapter = {}) {
using Traits = ElementTraits<ElementType>;
static_assert(Traits::type != AccessorType::Invalid, "Accessor traits must provide a valid Accessor Type");
static_assert(std::is_default_constructible_v<ElementType>, "Element type must be default constructible");
static_assert(std::is_constructible_v<ElementType>, "Element type must be constructible");
static_assert(std::is_move_assignable_v<ElementType>, "Element type must be move-assignable");
if (accessor.sparse) {
const auto& indicesView = asset.bufferViews[accessor.sparse->indicesBufferView];
auto* indicesBytes = adapter(asset.buffers[indicesView.bufferIndex])
+ indicesView.byteOffset + accessor.sparse->indicesByteOffset;
const auto& valuesView = asset.bufferViews[accessor.sparse->valuesBufferView];
auto* valuesBytes = adapter(asset.buffers[valuesView.bufferIndex])
+ valuesView.byteOffset + accessor.sparse->valuesByteOffset;
// "The index of the bufferView with sparse values. The referenced buffer view MUST NOT
// have its target or byteStride properties defined."
auto valueStride = getElementByteSize(accessor.type, accessor.componentType);
std::size_t sparseIndex{};
if (internal::findSparseIndex(accessor.sparse->indexComponentType, indicesBytes, accessor.sparse->count,
index, sparseIndex)) {
return internal::getAccessorElementAt<ElementType>(accessor.componentType,
valuesBytes + valueStride * sparseIndex,
accessor.normalized);
}
}
// 5.1.1. accessor.bufferView
// The index of the buffer view. When undefined, the accessor MUST be initialized with zeros; sparse
// property or extensions MAY override zeros with actual values.
if (!accessor.bufferViewIndex) {
if constexpr (std::is_aggregate_v<ElementType>) {
return {};
} else {
return ElementType{};
}
}
const auto& view = asset.bufferViews[*accessor.bufferViewIndex];
auto stride = view.byteStride ? *view.byteStride : getElementByteSize(accessor.type, accessor.componentType);
auto* bytes = adapter(asset.buffers[view.bufferIndex]);
bytes += view.byteOffset + accessor.byteOffset;
return internal::getAccessorElementAt<ElementType>(accessor.componentType, bytes + index * stride, accessor.normalized);
}
template<typename ElementType, typename BufferDataAdapter = DefaultBufferDataAdapter>
#if FASTGLTF_HAS_CONCEPTS
requires Element<ElementType>
#endif
IterableAccessor<ElementType, BufferDataAdapter> iterateAccessor(const Asset& asset, const Accessor& accessor, const BufferDataAdapter& adapter = {}) {
return IterableAccessor<ElementType, BufferDataAdapter>(asset, accessor, adapter);
}
template <typename ElementType, typename Functor, typename BufferDataAdapter = DefaultBufferDataAdapter>
#if FASTGLTF_HAS_CONCEPTS
requires Element<ElementType>
#endif
void iterateAccessor(const Asset& asset, const Accessor& accessor, Functor&& func,
const BufferDataAdapter& adapter = {}) {
using Traits = ElementTraits<ElementType>;
static_assert(Traits::type != AccessorType::Invalid, "Accessor traits must provide a valid accessor type");
static_assert(Traits::enum_component_type != ComponentType::Invalid, "Accessor traits must provide a valid component type");
static_assert(std::is_default_constructible_v<ElementType>, "Element type must be default constructible");
static_assert(std::is_constructible_v<ElementType>, "Element type must be constructible");
static_assert(std::is_move_assignable_v<ElementType>, "Element type must be move-assignable");
if (accessor.type != Traits::type) {
return;
}
if (accessor.sparse && accessor.sparse->count > 0) {
auto& indicesView = asset.bufferViews[accessor.sparse->indicesBufferView];
auto* indicesBytes = adapter(asset.buffers[indicesView.bufferIndex])
+ indicesView.byteOffset + accessor.sparse->indicesByteOffset;
auto indexStride = getElementByteSize(AccessorType::Scalar, accessor.sparse->indexComponentType);
auto& valuesView = asset.bufferViews[accessor.sparse->valuesBufferView];
auto* valuesBytes = adapter(asset.buffers[valuesView.bufferIndex])
+ valuesView.byteOffset + accessor.sparse->valuesByteOffset;
// "The index of the bufferView with sparse values. The referenced buffer view MUST NOT
// have its target or byteStride properties defined."
auto valueStride = getElementByteSize(accessor.type, accessor.componentType);
const std::byte* srcBytes = nullptr;
std::size_t srcStride = 0;
// 5.1.1. accessor.bufferView
// The index of the buffer view. When undefined, the accessor MUST be initialized with zeros; sparse
// property or extensions MAY override zeros with actual values.
if (accessor.bufferViewIndex) {
auto& view = asset.bufferViews[*accessor.bufferViewIndex];
srcBytes = adapter(asset.buffers[view.bufferIndex]) + view.byteOffset + accessor.byteOffset;
srcStride = view.byteStride ? *view.byteStride
: getElementByteSize(accessor.type, accessor.componentType);
}
auto nextSparseIndex = internal::getAccessorElementAt<std::uint32_t>(
accessor.sparse->indexComponentType, indicesBytes);
std::size_t sparseIndexCount = 0;
for (std::size_t i = 0; i < accessor.count; ++i) {
if (i == nextSparseIndex) {
func(internal::getAccessorElementAt<ElementType>(accessor.componentType,
valuesBytes + valueStride * sparseIndexCount,
accessor.normalized));
++sparseIndexCount;
if (sparseIndexCount < accessor.sparse->count) {
nextSparseIndex = internal::getAccessorElementAt<std::uint32_t>(
accessor.sparse->indexComponentType, indicesBytes + indexStride * sparseIndexCount);
}
} else if (accessor.bufferViewIndex) {
func(internal::getAccessorElementAt<ElementType>(accessor.componentType,
srcBytes + srcStride * i,
accessor.normalized));
} else {
func(ElementType{});
}
}
return;
}
// 5.1.1. accessor.bufferView
// The index of the buffer view. When undefined, the accessor MUST be initialized with zeros; sparse
// property or extensions MAY override zeros with actual values.
if (!accessor.bufferViewIndex) {
for (std::size_t i = 0; i < accessor.count; ++i) {
func(ElementType{});
}
}
else {
auto& view = asset.bufferViews[*accessor.bufferViewIndex];
auto stride = view.byteStride ? *view.byteStride : getElementByteSize(accessor.type, accessor.componentType);
auto* bytes = adapter(asset.buffers[view.bufferIndex]);
bytes += view.byteOffset + accessor.byteOffset;
for (std::size_t i = 0; i < accessor.count; ++i) {
func(internal::getAccessorElementAt<ElementType>(accessor.componentType, bytes + i * stride, accessor.normalized));
}
}
}
template <typename ElementType, typename Functor, typename BufferDataAdapter = DefaultBufferDataAdapter>
#if FASTGLTF_HAS_CONCEPTS
requires Element<ElementType>
#endif
void iterateAccessorWithIndex(const Asset& asset, const Accessor& accessor, Functor&& func,
const BufferDataAdapter& adapter = {}) {
std::size_t idx = 0;
iterateAccessor<ElementType>(asset, accessor, [&](auto&& elementType) {
func(std::forward<ElementType>(elementType), idx++);
}, adapter);
}
template <typename ElementType, std::size_t TargetStride = sizeof(ElementType),
typename BufferDataAdapter = DefaultBufferDataAdapter>
#if FASTGLTF_HAS_CONCEPTS
requires Element<ElementType>
#endif
void copyFromAccessor(const Asset& asset, const Accessor& accessor, void* dest,
const BufferDataAdapter& adapter = {}) {
using Traits = ElementTraits<ElementType>;
static_assert(Traits::type != AccessorType::Invalid, "Accessor traits must provide a valid accessor type");
static_assert(Traits::enum_component_type != ComponentType::Invalid, "Accessor traits must provide a valid component type");
static_assert(std::is_default_constructible_v<ElementType>, "Element type must be default constructible");
static_assert(std::is_constructible_v<ElementType>, "Element type must be constructible");
static_assert(std::is_move_assignable_v<ElementType>, "Element type must be move-assignable");
if (accessor.type != Traits::type) {
return;
}
auto* dstBytes = reinterpret_cast<std::byte*>(dest);
if (accessor.sparse && accessor.sparse->count > 0) {
return iterateAccessorWithIndex<ElementType>(asset, accessor, [&](auto&& value, std::size_t index) {
auto* pDest = reinterpret_cast<ElementType*>(dstBytes + TargetStride * index);
*pDest = std::forward<ElementType>(value);
}, adapter);
}
auto elemSize = getElementByteSize(accessor.type, accessor.componentType);
// 5.1.1. accessor.bufferView
// The index of the buffer view. When undefined, the accessor MUST be initialized with zeros; sparse
// property or extensions MAY override zeros with actual values.
if (!accessor.bufferViewIndex) {
if constexpr (std::is_trivially_copyable_v<ElementType>) {
if (TargetStride == elemSize) {
std::memset(dest, 0, elemSize * accessor.count);
} else {
for (std::size_t i = 0; i < accessor.count; ++i) {
std::memset(dstBytes + i * TargetStride, 0, elemSize);
}
}
} else {
for (std::size_t i = 0; i < accessor.count; ++i) {
auto* pDest = reinterpret_cast<ElementType*>(dstBytes + TargetStride * i);
if constexpr (std::is_aggregate_v<ElementType>) {
*pDest = {};
} else {
*pDest = ElementType{};
}
}
}
return;
}
auto& view = asset.bufferViews[*accessor.bufferViewIndex];
auto srcStride = view.byteStride ? *view.byteStride
: getElementByteSize(accessor.type, accessor.componentType);
auto* srcBytes = adapter(asset.buffers[view.bufferIndex]) + view.byteOffset + accessor.byteOffset;
// We have to perform normalization if the accessor is marked as containing normalized data, which is why
// we can't just memcpy then.
if (std::is_trivially_copyable_v<ElementType> && !accessor.normalized && accessor.componentType == Traits::enum_component_type) {
if (srcStride == elemSize && srcStride == TargetStride) {
std::memcpy(dest, srcBytes, elemSize * accessor.count);
} else {
for (std::size_t i = 0; i < accessor.count; ++i) {
std::memcpy(dstBytes + TargetStride * i, srcBytes + srcStride * i, elemSize);
}
}
} else {
for (std::size_t i = 0; i < accessor.count; ++i) {
auto* pDest = reinterpret_cast<ElementType*>(dstBytes + TargetStride * i);
*pDest = internal::getAccessorElementAt<ElementType>(accessor.componentType, srcBytes + srcStride * i);
}
}
}
} // namespace fastgltf

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,334 @@
/*
* Copyright (C) 2022 - 2023 spnda
* This file is part of fastgltf <https://github.com/spnda/fastgltf>.
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use,
* copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following
* conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
* OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
* HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#pragma once
#include <array>
#include <cmath>
#include <limits>
#include <string_view>
#include <type_traits>
// Macros to determine C++ standard version
#if (!defined(_MSVC_LANG) && __cplusplus >= 201703L) || (defined(_MSVC_LANG) && _MSVC_LANG >= 201703L)
#define FASTGLTF_CPP_17 1
#else
#error "fastgltf requires C++17"
#endif
#if (!defined(_MSVC_LANG) && __cplusplus >= 202002L) || (defined(_MSVC_LANG) && _MSVC_LANG >= 202002L)
#define FASTGLTF_CPP_20 1
#else
#define FASTGLTF_CPP_20 0
#endif
#if (!defined(_MSVC_LANG) && __cplusplus >= 202302L) || (defined(_MSVC_LANG) && _MSVC_LANG >= 202302L)
#define FASTGLTF_CPP_23 1
#else
#define FASTGLTF_CPP_23 0
#endif
#if FASTGLTF_CPP_20 && defined(__cpp_lib_bitops) && __cpp_lib_bitops >= 201907L
#define FASTGLTF_HAS_BIT 1
#include <bit>
#else
#define FASTGLTF_HAS_BIT 0
#endif
#if FASTGLTF_CPP_20 && defined(__cpp_concepts) && __cpp_concepts >= 202002L
#define FASTGLTF_HAS_CONCEPTS 1
#include <concepts>
#else
#define FASTGLTF_HAS_CONCEPTS 0
#endif
#if FASTGLTF_CPP_23
#define FASTGLTF_UNREACHABLE std::unreachable();
#elif defined(__GNUC__) || defined(__clang__)
#define FASTGLTF_UNREACHABLE __builtin_unreachable();
#elif defined(_MSC_VER)
#define FASTGLTF_UNREACHABLE __assume(false);
#else
#define FASTGLTF_UNREACHABLE assert(0);
#endif
#ifdef _MSC_VER
#pragma warning(push)
#pragma warning(disable : 5030) // attribute 'x' is not recognized
#pragma warning(disable : 4514) // unreferenced inline function has been removed
#endif
namespace fastgltf {
template<typename T>
#if FASTGLTF_HAS_CONCEPTS
requires std::is_enum_v<T>
#endif
[[nodiscard]] constexpr std::underlying_type_t<T> to_underlying(T t) noexcept {
#if !FASTGLTF_HAS_CONCEPTS
static_assert(std::is_enum_v<T>, "to_underlying only works with enum types.");
#endif
return static_cast<std::underlying_type_t<T>>(t);
}
template <typename T, typename U>
#if FASTGLTF_HAS_CONCEPTS
requires ((std::is_enum_v<T> && std::integral<std::underlying_type_t<T>>) || std::integral<T>) && requires (T t, U u) {
{ t & u } -> std::same_as<U>;
}
#endif
[[nodiscard]] constexpr bool hasBit(T flags, U bit) {
#if !FASTGLTF_HAS_CONCEPTS
static_assert((std::is_enum_v<T> && std::is_integral_v<std::underlying_type_t<T>>) || std::is_integral_v<T>);
#endif
return (flags & bit) == bit;
}
template <typename T>
[[nodiscard]] constexpr T alignUp(T base, T alignment) {
static_assert(std::is_signed_v<T>, "alignUp requires type T to be signed.");
return (base + alignment - 1) & -alignment;
}
template <typename T>
[[nodiscard]] constexpr T alignDown(T base, T alignment) {
return base - (base % alignment);
}
template <typename T>
#if FASTGLTF_HAS_CONCEPTS
requires requires (T t) {
{ t > t } -> std::same_as<bool>;
}
#endif
[[nodiscard]] constexpr T max(T a, T b) noexcept {
return (a > b) ? a : b;
}
/**
* Decomposes a transform matrix into the translation, rotation, and scale components. This
* function does not support skew, shear, or perspective. This currently uses a quick algorithm
* to calculate the quaternion from the rotation matrix, which might occasionally loose some
* precision, though we try to use doubles here.
*/
inline void decomposeTransformMatrix(std::array<float, 16> matrix, std::array<float, 3>& scale, std::array<float, 4>& rotation, std::array<float, 3>& translation) {
// Extract the translation. We zero the translation out, as we reuse the matrix as
// the rotation matrix at the end.
translation = {matrix[12], matrix[13], matrix[14]};
matrix[12] = matrix[13] = matrix[14] = 0;
// Extract the scale. We calculate the euclidean length of the columns. We then
// construct a vector with those lengths. My gcc's stdlib doesn't include std::sqrtf
// for some reason...
auto s1 = sqrtf(matrix[0] * matrix[0] + matrix[1] * matrix[1] + matrix[2] * matrix[2]);
auto s2 = sqrtf(matrix[4] * matrix[4] + matrix[5] * matrix[5] + matrix[6] * matrix[6]);
auto s3 = sqrtf(matrix[8] * matrix[8] + matrix[9] * matrix[9] + matrix[10] * matrix[10]);
scale = {s1, s2, s3};
// Remove the scaling from the matrix, leaving only the rotation. matrix is now the
// rotation matrix.
matrix[0] /= s1; matrix[1] /= s1; matrix[2] /= s1;
matrix[4] /= s2; matrix[5] /= s2; matrix[6] /= s2;
matrix[8] /= s3; matrix[9] /= s3; matrix[10] /= s3;
// Construct the quaternion. This algo is copied from here:
// https://www.euclideanspace.com/maths/geometry/rotations/conversions/matrixToQuaternion/christian.htm.
// glTF orders the components as x,y,z,w
rotation = {
max(.0f, 1 + matrix[0] - matrix[5] - matrix[10]),
max(.0f, 1 - matrix[0] + matrix[5] - matrix[10]),
max(.0f, 1 - matrix[0] - matrix[5] + matrix[10]),
max(.0f, 1 + matrix[0] + matrix[5] + matrix[10]),
};
for (auto& x : rotation) {
x = static_cast<float>(std::sqrt(static_cast<double>(x)) / 2);
}
rotation[0] = std::copysignf(rotation[0], matrix[6] - matrix[9]);
rotation[1] = std::copysignf(rotation[1], matrix[8] - matrix[2]);
rotation[2] = std::copysignf(rotation[2], matrix[1] - matrix[4]);
}
/**
* Constants generated using 0x82f63b79u CRC poly.
*/
static constexpr std::array<std::uint32_t, 256> crcHashTable = {{
0x00000000, 0xf26b8303, 0xe13b70f7, 0x1350f3f4, 0xc79a971f, 0x35f1141c, 0x26a1e7e8, 0xd4ca64eb,
0x8ad958cf, 0x78b2dbcc, 0x6be22838, 0x9989ab3b, 0x4d43cfd0, 0xbf284cd3, 0xac78bf27, 0x5e133c24,
0x105ec76f, 0xe235446c, 0xf165b798, 0x030e349b, 0xd7c45070, 0x25afd373, 0x36ff2087, 0xc494a384,
0x9a879fa0, 0x68ec1ca3, 0x7bbcef57, 0x89d76c54, 0x5d1d08bf, 0xaf768bbc, 0xbc267848, 0x4e4dfb4b,
0x20bd8ede, 0xd2d60ddd, 0xc186fe29, 0x33ed7d2a, 0xe72719c1, 0x154c9ac2, 0x061c6936, 0xf477ea35,
0xaa64d611, 0x580f5512, 0x4b5fa6e6, 0xb93425e5, 0x6dfe410e, 0x9f95c20d, 0x8cc531f9, 0x7eaeb2fa,
0x30e349b1, 0xc288cab2, 0xd1d83946, 0x23b3ba45, 0xf779deae, 0x05125dad, 0x1642ae59, 0xe4292d5a,
0xba3a117e, 0x4851927d, 0x5b016189, 0xa96ae28a, 0x7da08661, 0x8fcb0562, 0x9c9bf696, 0x6ef07595,
0x417b1dbc, 0xb3109ebf, 0xa0406d4b, 0x522bee48, 0x86e18aa3, 0x748a09a0, 0x67dafa54, 0x95b17957,
0xcba24573, 0x39c9c670, 0x2a993584, 0xd8f2b687, 0x0c38d26c, 0xfe53516f, 0xed03a29b, 0x1f682198,
0x5125dad3, 0xa34e59d0, 0xb01eaa24, 0x42752927, 0x96bf4dcc, 0x64d4cecf, 0x77843d3b, 0x85efbe38,
0xdbfc821c, 0x2997011f, 0x3ac7f2eb, 0xc8ac71e8, 0x1c661503, 0xee0d9600, 0xfd5d65f4, 0x0f36e6f7,
0x61c69362, 0x93ad1061, 0x80fde395, 0x72966096, 0xa65c047d, 0x5437877e, 0x4767748a, 0xb50cf789,
0xeb1fcbad, 0x197448ae, 0x0a24bb5a, 0xf84f3859, 0x2c855cb2, 0xdeeedfb1, 0xcdbe2c45, 0x3fd5af46,
0x7198540d, 0x83f3d70e, 0x90a324fa, 0x62c8a7f9, 0xb602c312, 0x44694011, 0x5739b3e5, 0xa55230e6,
0xfb410cc2, 0x092a8fc1, 0x1a7a7c35, 0xe811ff36, 0x3cdb9bdd, 0xceb018de, 0xdde0eb2a, 0x2f8b6829,
0x82f63b78, 0x709db87b, 0x63cd4b8f, 0x91a6c88c, 0x456cac67, 0xb7072f64, 0xa457dc90, 0x563c5f93,
0x082f63b7, 0xfa44e0b4, 0xe9141340, 0x1b7f9043, 0xcfb5f4a8, 0x3dde77ab, 0x2e8e845f, 0xdce5075c,
0x92a8fc17, 0x60c37f14, 0x73938ce0, 0x81f80fe3, 0x55326b08, 0xa759e80b, 0xb4091bff, 0x466298fc,
0x1871a4d8, 0xea1a27db, 0xf94ad42f, 0x0b21572c, 0xdfeb33c7, 0x2d80b0c4, 0x3ed04330, 0xccbbc033,
0xa24bb5a6, 0x502036a5, 0x4370c551, 0xb11b4652, 0x65d122b9, 0x97baa1ba, 0x84ea524e, 0x7681d14d,
0x2892ed69, 0xdaf96e6a, 0xc9a99d9e, 0x3bc21e9d, 0xef087a76, 0x1d63f975, 0x0e330a81, 0xfc588982,
0xb21572c9, 0x407ef1ca, 0x532e023e, 0xa145813d, 0x758fe5d6, 0x87e466d5, 0x94b49521, 0x66df1622,
0x38cc2a06, 0xcaa7a905, 0xd9f75af1, 0x2b9cd9f2, 0xff56bd19, 0x0d3d3e1a, 0x1e6dcdee, 0xec064eed,
0xc38d26c4, 0x31e6a5c7, 0x22b65633, 0xd0ddd530, 0x0417b1db, 0xf67c32d8, 0xe52cc12c, 0x1747422f,
0x49547e0b, 0xbb3ffd08, 0xa86f0efc, 0x5a048dff, 0x8ecee914, 0x7ca56a17, 0x6ff599e3, 0x9d9e1ae0,
0xd3d3e1ab, 0x21b862a8, 0x32e8915c, 0xc083125f, 0x144976b4, 0xe622f5b7, 0xf5720643, 0x07198540,
0x590ab964, 0xab613a67, 0xb831c993, 0x4a5a4a90, 0x9e902e7b, 0x6cfbad78, 0x7fab5e8c, 0x8dc0dd8f,
0xe330a81a, 0x115b2b19, 0x020bd8ed, 0xf0605bee, 0x24aa3f05, 0xd6c1bc06, 0xc5914ff2, 0x37faccf1,
0x69e9f0d5, 0x9b8273d6, 0x88d28022, 0x7ab90321, 0xae7367ca, 0x5c18e4c9, 0x4f48173d, 0xbd23943e,
0xf36e6f75, 0x0105ec76, 0x12551f82, 0xe03e9c81, 0x34f4f86a, 0xc69f7b69, 0xd5cf889d, 0x27a40b9e,
0x79b737ba, 0x8bdcb4b9, 0x988c474d, 0x6ae7c44e, 0xbe2da0a5, 0x4c4623a6, 0x5f16d052, 0xad7d5351,
}};
[[gnu::hot, gnu::const]] constexpr std::uint32_t crc32c(std::string_view str) noexcept {
std::uint32_t crc = 0;
for (auto c : str)
crc = (crc >> 8) ^ crcHashTable[(crc ^ static_cast<std::uint8_t>(c)) & 0xff];
return crc;
}
[[gnu::hot, gnu::const]] constexpr std::uint32_t crc32c(const std::uint8_t* d, std::size_t len) noexcept {
std::uint32_t crc = 0;
for (std::size_t i = 0; i < len; ++i)
crc = (crc >> 8) ^ crcHashTable[(crc ^ d[i]) & 0xff];
return crc;
}
#if defined(__x86_64__) || defined(_M_AMD64) || defined(_M_IX86)
/**
* Variant of crc32 that uses SSE4.2 instructions to increase performance. Note that this does not
* check for availability of said instructions.
*/
[[gnu::hot, gnu::const]] std::uint32_t hwcrc32c(std::string_view str) noexcept;
[[gnu::hot, gnu::const]] std::uint32_t hwcrc32c(const std::uint8_t* d, std::size_t len) noexcept;
#endif
/**
* Helper to force evaluation of constexpr functions at compile-time in C++17. One example of
* this is with crc32: force_consteval<crc32("string")>. No matter the context, this will
* always be evaluated to a constant.
*/
template <auto V>
static constexpr auto force_consteval = V;
/**
* Counts the leading zeros from starting the most significant bit. Returns a std::uint8_t as there
* can only ever be 2^6 zeros with 64-bit types.
*/
template <typename T>
#if FASTGLTF_HAS_CONCEPTS
requires std::integral<T>
#endif
[[gnu::const]] inline std::uint8_t clz(T value) {
static_assert(std::is_integral_v<T>);
#if FASTGLTF_HAS_BIT
return static_cast<std::uint8_t>(std::countl_zero(value));
#else
// Very naive but working implementation of counting zero bits. Any sane compiler will
// optimise this away, like instead use the bsr x86 instruction.
if (value == 0) return 64;
std::uint8_t count = 0;
for (auto i = std::numeric_limits<T>::digits - 1; i > 0; --i) {
if ((value >> i) == 1) {
return count;
}
++count;
}
return count;
#endif
}
template <typename T>
[[gnu::const]] inline std::uint8_t popcount(T value) {
static_assert(std::is_integral_v<T>);
#if FASTGLTF_HAS_BIT
return static_cast<std::uint8_t>(std::popcount(value));
#else
std::uint8_t bits = 0;
while (value) {
if (value & 1)
++bits;
value >>= 1;
}
return bits;
#endif
}
/**
* Essentially the same as std::same<T, U> but it accepts multiple different types for U,
* checking if T is any of U...
*/
template <typename T, typename... Ts>
using is_any = std::disjunction<std::is_same<T, Ts>...>;
/**
* Simple function to check if the given string starts with a given set of characters.
*/
inline bool startsWith(std::string_view str, std::string_view search) {
return str.rfind(search, 0) == 0;
}
/**
* Helper type in order to allow building a visitor out of multiple lambdas within a call to
* std::visit
*/
template<class... Ts>
struct visitor : Ts... {
using Ts::operator()...;
};
template<class... Ts> visitor(Ts...) -> visitor<Ts...>;
// For simple ops like &, |, +, - taking a left and right operand.
#define FASTGLTF_ARITHMETIC_OP_TEMPLATE_MACRO(T1, T2, op) \
constexpr T1 operator op(const T1& a, const T2& b) noexcept { \
static_assert(std::is_enum_v<T1> && std::is_enum_v<T2>); \
return static_cast<T1>(to_underlying(a) op to_underlying(b)); \
}
// For any ops like |=, &=, +=, -=
#define FASTGLTF_ASSIGNMENT_OP_TEMPLATE_MACRO(T1, T2, op) \
constexpr T1& operator op##=(T1& a, const T2& b) noexcept { \
static_assert(std::is_enum_v<T1> && std::is_enum_v<T2>); \
return a = static_cast<T1>(to_underlying(a) op to_underlying(b)), a; \
}
// For unary +, unary -, and bitwise NOT
#define FASTGLTF_UNARY_OP_TEMPLATE_MACRO(T, op) \
constexpr T operator op(const T& a) noexcept { \
static_assert(std::is_enum_v<T>); \
return static_cast<T>(op to_underlying(a)); \
}
} // namespace fastgltf
#ifdef _MSC_VER
#pragma warning(pop)
#endif

452
third_party/fastgltf/src/base64.cpp vendored Normal file
View File

@@ -0,0 +1,452 @@
/*
* Copyright (C) 2022 - 2023 spnda
* This file is part of fastgltf <https://github.com/spnda/fastgltf>.
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use,
* copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following
* conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
* OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
* HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#if !defined(__cplusplus) || (!defined(_MSVC_LANG) && __cplusplus < 201703L) || (defined(_MSVC_LANG) && _MSVC_LANG < 201703L)
#error "fastgltf requires C++17"
#endif
#include <array>
#include <cmath>
#include <functional>
#include "simdjson.h"
#include <fastgltf/base64.hpp>
#if defined(FASTGLTF_IS_X86)
#if defined(__clang__) || defined(__GNUC__)
// The idea behind manually including all headers with the required intrinsics
// is that the usual intrin.h will only include these under Clang when -mavx or
// -mavx2 is specified, which in turn would have the entire program be compiled
// with these instructions used in optimisations.
#include <immintrin.h>
#include <smmintrin.h>
#include <avxintrin.h>
#include <avx2intrin.h>
#else
#include <intrin.h>
#endif
#elif defined(FASTGLTF_IS_A64)
#include <arm_neon.h> // Includes arm64_neon.h on MSVC
#endif
#ifdef _MSC_VER
#pragma warning(push)
#pragma warning(disable : 5030)
#endif
namespace fg = fastgltf;
#if defined(_MSC_VER)
#define FORCEINLINE __forceinline
#else
// On other compilers we need the inline specifier, so that the functions in this compilation unit
// can be properly inlined without the "function body can be overwritten at link time" error.
#define FORCEINLINE inline
#endif
namespace fastgltf::base64 {
using DecodeFunctionInplace = std::function<void(std::string_view, std::uint8_t*, std::size_t)>;
using DecodeFunction = std::function<std::vector<std::uint8_t>(std::string_view)>;
struct DecodeFunctionGetter {
DecodeFunction func;
DecodeFunctionInplace inplace;
explicit DecodeFunctionGetter() {
// We use simdjson's helper functions to determine which SIMD intrinsics are available at runtime.
// The different implementations, because they're SIMD based, require a minimum amount of chars, as
// they load multiple at once.
const auto& impls = simdjson::get_available_implementations();
#if defined(FASTGLTF_IS_X86)
if (const auto* avx2 = impls["haswell"]; avx2 != nullptr && avx2->supported_by_runtime_system()) {
func = avx2_decode;
inplace = avx2_decode_inplace;
} else if (const auto* sse4 = impls["westmere"]; sse4 != nullptr && sse4->supported_by_runtime_system()) {
func = sse4_decode;
inplace = sse4_decode_inplace;
}
#elif defined(FASTGLTF_IS_A64)
// _M_ARM64 always guarantees 64-bit ARM processors that support NEON, defined by MSVC.
// __aarch64__ always guarantees 64-bit ARMv8 processors that support NEON, defined by Clang.
// __ARM_NEON always guarantees NEON support, defined by Clang and GCC.
if (const auto* neon = impls["arm64"]; neon && neon->supported_by_runtime_system()) {
func = neon_decode;
inplace = neon_decode_inplace;
}
#else
if (false) {}
#endif
else {
func = fallback_decode;
inplace = fallback_decode_inplace;
}
}
static DecodeFunctionGetter* get() {
static DecodeFunctionGetter getter;
return &getter;
}
};
} // namespace fastgltf::base64
#if defined(FASTGLTF_IS_X86)
// The AVX and SSE decoding functions are based on http://0x80.pl/notesen/2016-01-17-sse-base64-decoding.html.
// It covers various methods of en-/decoding base64 using SSE and AVX and also shows their
// performance metrics.
// TODO: Mark these functions with msvc::forceinline which is available from C++20
[[gnu::target("avx2"), gnu::always_inline]] FORCEINLINE auto avx2_lookup_pshufb_bitmask(const __m256i input) {
const auto higher_nibble = _mm256_and_si256(_mm256_srli_epi32(input, 4), _mm256_set1_epi8(0x0f));
const auto shiftLUT = _mm256_setr_epi8(
0, 0, 19, 4, -65, -65, -71, -71,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 19, 4, -65, -65, -71, -71,
0, 0, 0, 0, 0, 0, 0, 0);
const auto sh = _mm256_shuffle_epi8(shiftLUT, higher_nibble);
const auto eq_2f = _mm256_cmpeq_epi8(input, _mm256_set1_epi8(0x2f));
const auto shift = _mm256_blendv_epi8(sh, _mm256_set1_epi8(16), eq_2f);
return _mm256_add_epi8(input, shift);
}
[[gnu::target("avx2"), gnu::always_inline]] FORCEINLINE auto avx2_pack_ints(__m256i input) {
const auto merge = _mm256_maddubs_epi16(input, _mm256_set1_epi32(0x01400140));
return _mm256_madd_epi16(merge, _mm256_set1_epi32(0x00011000));
}
[[gnu::target("avx2")]] void fg::base64::avx2_decode_inplace(std::string_view encoded, std::uint8_t* output, std::size_t padding) {
constexpr auto dataSetSize = 32;
constexpr auto dataOutputSize = 24;
if (encoded.size() < dataSetSize) {
fallback_decode_inplace(encoded, output, padding);
return;
}
// We align the size to the highest size divisible by 32. By doing this, we don't need to
// allocate any new memory to hold the encoded data and let the fallback decoder decode the
// remaining data.
const auto encodedSize = encoded.size();
const auto outputSize = getOutputSize(encodedSize, padding);
const auto alignedSize = outputSize - (outputSize % dataOutputSize);
auto* out = output;
// _mm256_setr_epi8 accepts only 'char' but 0xff would overflow a signed char.
// This gets optimised to the same assembly as a call to the aformentioned intrinsic.
static const std::array<std::uint8_t, 32> shuffleData = {{
2, 1, 0,
6, 5, 4,
10, 9, 8,
14, 13, 12,
0xff, 0xff, 0xff, 0xff,
2, 1, 0,
6, 5, 4,
10, 9, 8,
14, 13, 12,
0xff, 0xff, 0xff, 0xff
}};
__m256i shuffle;
std::memcpy(&shuffle, shuffleData.data(), shuffleData.size());
std::size_t pos = 0;
while ((pos + dataSetSize) < alignedSize) {
auto in = _mm256_loadu_si256(reinterpret_cast<const __m256i*>(&encoded[pos]));
auto values = avx2_lookup_pshufb_bitmask(in);
const auto merged = avx2_pack_ints(values);
const auto shuffled = _mm256_shuffle_epi8(merged, shuffle);
// Beware: This writes 32 bytes, we just discard the top 8 bytes.
_mm_storeu_si128(reinterpret_cast<__m128i*>(out), _mm256_castsi256_si128(shuffled));
_mm_storeu_si128(reinterpret_cast<__m128i*>(out + (dataOutputSize / 2)), _mm256_extracti128_si256(shuffled, 1));
out += dataOutputSize;
pos += dataSetSize;
}
// Decode the last chunk traditionally
fallback_decode_inplace(encoded.substr(pos, encodedSize), out, padding);
}
[[gnu::target("avx2")]] std::vector<std::uint8_t> fg::base64::avx2_decode(std::string_view encoded) {
const auto encodedSize = encoded.size();
const auto padding = getPadding(encoded);
std::vector<std::uint8_t> ret(getOutputSize(encodedSize, padding));
avx2_decode_inplace(encoded, ret.data(), padding);
return ret;
}
[[gnu::target("sse4.1"), gnu::always_inline]] FORCEINLINE auto sse4_lookup_pshufb_bitmask(const __m128i input) {
const auto higher_nibble = _mm_and_si128(_mm_srli_epi32(input, 4), _mm_set1_epi8(0x0f));
const auto shiftLUT = _mm_setr_epi8(
0, 0, 19, 4, -65, -65, -71, -71,
0, 0, 0, 0, 0, 0, 0, 0);
const auto sh = _mm_shuffle_epi8(shiftLUT, higher_nibble);
const auto eq_2f = _mm_cmpeq_epi8(input, _mm_set1_epi8(0x2f));
const auto shift = _mm_blendv_epi8(sh, _mm_set1_epi8(16), eq_2f);
return _mm_add_epi8(input, shift);
}
[[gnu::target("sse4.1"), gnu::always_inline]] FORCEINLINE auto sse4_pack_ints(__m128i input) {
const auto merge = _mm_maddubs_epi16(input, _mm_set1_epi32(0x01400140));
return _mm_madd_epi16(merge, _mm_set1_epi32(0x00011000));
}
[[gnu::target("sse4.1")]] void fg::base64::sse4_decode_inplace(std::string_view encoded, std::uint8_t* output, std::size_t padding) {
constexpr auto dataSetSize = 16;
constexpr auto dataOutputSize = 12;
if (encoded.size() < dataSetSize) {
fallback_decode_inplace(encoded, output, padding);
return;
}
// We align the size to the highest size divisible by 16. By doing this, we don't need to
// allocate any new memory to hold the encoded data and let the fallback decoder decode the
// remaining data.
const auto encodedSize = encoded.size();
const auto outputSize = getOutputSize(encodedSize, padding);
const auto alignedSize = outputSize - (outputSize % dataOutputSize);
auto* out = output;
// _mm_setr_epi8 accepts only 'char' but 0xff would overflow a signed char.
// This gets optimised to the same assembly as a call to the aformentioned intrinsic.
static const std::array<std::uint8_t, 16> shuffleData = {{
2, 1, 0,
6, 5, 4,
10, 9, 8,
14, 13, 12,
0xff, 0xff, 0xff, 0xff,
}};
__m128i shuffle;
std::memcpy(&shuffle, shuffleData.data(), shuffleData.size());
std::size_t pos = 0;
while ((pos + dataSetSize) < alignedSize) {
auto in = _mm_loadu_si128(reinterpret_cast<const __m128i*>(&encoded[pos]));
auto values = sse4_lookup_pshufb_bitmask(in);
const auto merged = sse4_pack_ints(values);
const auto shuffled = _mm_shuffle_epi8(merged, shuffle);
// Beware: This writes 16 bytes, we just discard the top 4 bytes.
_mm_storeu_si128(reinterpret_cast<__m128i*>(out), shuffled);
out += dataOutputSize;
pos += dataSetSize;
}
// Decode the last chunk traditionally
fallback_decode_inplace(encoded.substr(pos, encodedSize), out, padding);
}
[[gnu::target("sse4.1")]] std::vector<std::uint8_t> fg::base64::sse4_decode(std::string_view encoded) {
const auto encodedSize = encoded.size();
const auto padding = getPadding(encoded);
std::vector<std::uint8_t> ret(getOutputSize(encodedSize, padding));
sse4_decode_inplace(encoded, ret.data(), padding);
return ret;
}
#elif defined(FASTGLTF_IS_A64)
[[gnu::always_inline]] FORCEINLINE int8x16_t neon_lookup_pshufb_bitmask(const uint8x16_t input) {
// clang-format off
constexpr std::array<int8_t, 16> shiftLUTdata = {
0, 0, 19, 4, -65, -65, -71, -71,
0, 0, 0, 0, 0, 0, 0, 0
};
// clang-fomat on
const uint64x2_t higher_nibble = vandq_s32(vshlq_u32(vreinterpretq_u32_u8(input), vdupq_n_s32(-4)), vdupq_n_s8(0x0f));
const int8x16_t shiftLUT = vld1q_s8(shiftLUTdata.data());
const int8x16_t sh = vqtbl1q_s8(shiftLUT, vandq_u8(higher_nibble, vdupq_n_u8(0x8F)));
const uint8x16_t eq_2f = vceqq_s8(input, vdupq_n_s8(0x2F));
const uint8x16_t shift = vbslq_u8(vshrq_n_s8(eq_2f, 7), vdupq_n_s8(16), sh);
return vaddq_s8(input, shift);
}
[[gnu::always_inline]] FORCEINLINE int16x8_t neon_pack_ints(const int8x16_t input) {
const uint32x4_t mask = vdupq_n_u32(0x01400140);
const int16x8_t tl = vmulq_s16(vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(input))), vmovl_s8(vget_low_s8(mask)));
const int16x8_t th = vmulq_s16(vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(input))), vmovl_s8(vget_high_s8(mask)));
const int16x8_t merge = vqaddq_s16(vuzp1q_s16(tl, th), vuzp2q_s16(tl, th));
// Multiply the 8 signed 16-bit integers from a and b and add the n and n + 1 results together,
// resulting in 4 32-bit integers.
const uint32x4_t mergeMask = vdupq_n_u32(0x00011000);
const int32x4_t pl = vmull_s16(vget_low_s16(merge), vget_low_s16(mergeMask));
const int32x4_t ph = vmull_high_s16(merge, mergeMask);
return vpaddq_s32(pl, ph);
}
// clang-format off
[[gnu::aligned(16)]] static constexpr std::array<uint8_t, 16> shuffleData = {
2, 1, 0,
6, 5, 4,
10, 9, 8,
14, 13, 12,
0xff, 0xff, 0xff, 0xff
};
// clang-fomat on
void fg::base64::neon_decode_inplace(std::string_view encoded, std::uint8_t* output, std::size_t padding) {
constexpr auto dataSetSize = 16;
constexpr auto dataOutputSize = 12;
if (encoded.size() < dataSetSize) {
fallback_decode_inplace(encoded, output, padding);
return;
}
// We align the size to the highest size divisible by 16. By doing this, we don't need to
// allocate any new memory to hold the encoded data and let the fallback decoder decode the
// remaining data.
const auto encodedSize = encoded.size();
const auto alignedSize = encodedSize - (encodedSize % dataSetSize);
auto* out = output;
// Decode the first 16 long chunks with Neon intrinsics
const auto shuffle = vld1q_u8(shuffleData.data());
std::size_t pos = 0;
while ((pos + dataSetSize) < alignedSize) {
// Load 16 8-bit values into a 128-bit register.
auto in = vld1q_u8(reinterpret_cast<const std::uint8_t*>(&encoded[pos]));
auto values = neon_lookup_pshufb_bitmask(in);
const auto merged = neon_pack_ints(values);
const auto masked = vandq_u8(shuffle, vdupq_n_u8(0x8F));
const auto shuffled = vqtbl1q_s8(merged, masked);
// Store 16 8-bit values into output pointer
vst1q_u8(out, shuffled);
out += dataOutputSize;
pos += dataSetSize;
}
// Decode the last chunk traditionally
fallback_decode_inplace(encoded.substr(pos, encodedSize), out, padding);
}
std::vector<std::uint8_t> fg::base64::neon_decode(std::string_view encoded) {
const auto encodedSize = encoded.size();
const auto padding = getPadding(encoded);
std::vector<std::uint8_t> ret(getOutputSize(encodedSize, padding));
neon_decode_inplace(encoded, ret.data(), padding);
return ret;
}
#endif
// clang-format off
// ASCII value -> base64 value LUT
static constexpr std::array<std::uint8_t, 128> base64lut = {
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,62,0,0,0,63,
52, 53, 54, 55, 56, 57, 58, 59, 60, 61,
0,0,0,0,0,0,0,
0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,
0,0,0,0,0,0,
26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,
0,0,0,0,0,
};
// clang-format on
namespace fastgltf::base64 {
[[gnu::always_inline]] FORCEINLINE void decode_block(std::array<std::uint8_t, 4>& sixBitChars, std::uint8_t* output) {
for (std::size_t i = 0; i < 4; i++) {
assert(static_cast<std::size_t>(sixBitChars[i]) < base64lut.size());
sixBitChars[i] = base64lut[sixBitChars[i]];
}
output[0] = (sixBitChars[0] << 2) + ((sixBitChars[1] & 0x30) >> 4);
output[1] = ((sixBitChars[1] & 0xf) << 4) + ((sixBitChars[2] & 0x3c) >> 2);
output[2] = ((sixBitChars[2] & 0x3) << 6) + sixBitChars[3];
}
} // namespace fastgltf::base64
void fg::base64::fallback_decode_inplace(std::string_view encoded, std::uint8_t* output, std::size_t padding) {
constexpr std::size_t blockSize = 4 * sizeof(char);
std::array<std::uint8_t, 4> sixBitChars = {};
// We use i here to track how many we've parsed and to batch 4 chars together.
const auto encodedSize = encoded.size();
std::size_t cursor = 0U;
for (auto pos = 0U; pos + 4 < encodedSize; pos += 4) {
std::memcpy(sixBitChars.data(), &encoded[pos], blockSize);
decode_block(sixBitChars, &output[cursor]);
cursor += 3;
}
// Decode the last (possibly) padded characters
std::memcpy(sixBitChars.data(), &encoded[encodedSize - 4], blockSize);
std::array<std::uint8_t, 4> eightBitChars = {};
decode_block(sixBitChars, eightBitChars.data());
// Write the last characters, making sure not to write over the end.
const std::size_t charsToWrite = 3 - padding;
for (std::size_t j = 0; j < charsToWrite; ++j) {
output[cursor++] = eightBitChars[j];
}
}
std::vector<std::uint8_t> fg::base64::fallback_decode(std::string_view encoded) {
const auto encodedSize = encoded.size();
const auto padding = getPadding(encoded);
std::vector<std::uint8_t> ret(getOutputSize(encodedSize, padding));
fallback_decode_inplace(encoded, ret.data(), padding);
return ret;
}
void fg::base64::decode_inplace(std::string_view encoded, std::uint8_t* output, std::size_t padding) {
assert(encoded.size() % 4 == 0);
return DecodeFunctionGetter::get()->inplace(encoded, output, padding);
}
std::vector<std::uint8_t> fg::base64::decode(std::string_view encoded) {
assert(encoded.size() % 4 == 0);
return DecodeFunctionGetter::get()->func(encoded);
}
#ifdef _MSC_VER
#pragma warning(pop)
#endif

3476
third_party/fastgltf/src/fastgltf.cpp vendored Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,63 @@
set_directory_properties(PROPERTIES EXCLUDE_FROM_ALL TRUE)
# We want these tests to be a optional executable.
add_executable(fastgltf_tests EXCLUDE_FROM_ALL
"base64_tests.cpp" "basic_test.cpp" "benchmarks.cpp" "glb_tests.cpp" "gltf_path.hpp"
"vector_tests.cpp" "uri_tests.cpp" "extension_tests.cpp" "accessor_tests.cpp")
target_compile_features(fastgltf_tests PRIVATE cxx_std_17)
target_link_libraries(fastgltf_tests PRIVATE fastgltf fastgltf_simdjson)
target_link_libraries(fastgltf_tests PRIVATE glm::glm Catch2::Catch2WithMain)
fastgltf_compiler_flags(fastgltf_tests)
if(EXISTS "${CMAKE_CURRENT_SOURCE_DIR}/deps/catch2")
add_subdirectory(deps/catch2)
target_link_libraries(fastgltf_tests PRIVATE Catch2::Catch2)
endif()
# We only use tinygltf to compare against.
if(EXISTS "${CMAKE_CURRENT_SOURCE_DIR}/gltf_loaders/tinygltf/tiny_gltf.h")
message(STATUS "fastgltf: Found tinygltf")
set(TINYGLTF_INSTALL OFF CACHE BOOL "")
set(TINYGLTF_BUILD_LOADER_EXAMPLE OFF CACHE BOOL "")
set(TINYGLTF_HEADER_ONLY ON CACHE BOOL "")
add_subdirectory(gltf_loaders/tinygltf)
target_link_libraries(fastgltf_tests PRIVATE tinygltf)
target_compile_definitions(fastgltf_tests PRIVATE HAS_TINYGLTF=1)
if (EXISTS "${CMAKE_CURRENT_SOURCE_DIR}/gltf_loaders/RapidJSON")
# RapidJSON's CMake is weird
message(STATUS "fastgltf: Found RapidJSON")
target_include_directories(fastgltf_tests PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/gltf_loaders/RapidJSON/include")
target_compile_definitions(fastgltf_tests PRIVATE HAS_RAPIDJSON=1 TINYGLTF_USE_RAPIDJSON=1 TINYGLTF_NO_INCLUDE_RAPIDJSON)
endif()
endif()
if(EXISTS "${CMAKE_CURRENT_SOURCE_DIR}/gltf_loaders/cgltf/cgltf.h")
message(STATUS "fastgltf: Found cgltf")
target_include_directories(fastgltf_tests PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/gltf_loaders/cgltf")
target_compile_definitions(fastgltf_tests PRIVATE HAS_CGLTF=1)
endif()
if (FASTGLTF_ENABLE_GLTF_RS AND EXISTS "${CMAKE_CURRENT_SOURCE_DIR}/gltf-rs/src/lib.rs")
message(STATUS "fastgltf: Found gltf-rs")
corrosion_import_crate(MANIFEST_PATH gltf-rs/Cargo.toml)
corrosion_add_cxxbridge(gltf-rs-bridge CRATE gltf_rs MANIFEST_PATH gltf-rs FILES lib.rs)
target_link_libraries(fastgltf_tests PUBLIC gltf-rs-bridge)
target_compile_definitions(fastgltf_tests PRIVATE HAS_GLTFRS=1)
endif()
if(FASTGLTF_ENABLE_ASSIMP AND EXISTS "${CMAKE_CURRENT_SOURCE_DIR}/gltf_loaders/assimp")
message(STATUS "fastgltf: Found assimp")
# Only enable glTF importer
set(ASSIMP_NO_EXPORT ON CACHE BOOL "")
set(ASSIMP_BUILD_TESTS OFF CACHE BOOL "")
set(ASSIMP_BUILD_ALL_IMPORTERS_BY_DEFAULT OFF CACHE BOOL "")
set(ASSIMP_BUILD_GLTF_IMPORTER ON CACHE BOOL "")
add_subdirectory(gltf_loaders/assimp)
target_link_libraries(fastgltf_tests PRIVATE assimp::assimp)
target_compile_definitions(fastgltf_tests PRIVATE HAS_ASSIMP=1)
endif()
fastgltf_add_source_directory(TARGET fastgltf_tests FOLDER ".")

4
third_party/fastgltf/tests/README.md vendored Normal file
View File

@@ -0,0 +1,4 @@
# tests
The tests are written with C++20 and [Catch2](https://github.com/catchorg/Catch2). To run, one also
needs to init the submodules and therefore download the [glTF-Sample-Models](https://github.com/KhronosGroup/glTF-Sample-Models/).

View File

@@ -0,0 +1,218 @@
#include <catch2/catch_test_macros.hpp>
#include <glm/vec3.hpp>
#include <glm/gtc/epsilon.hpp>
#include <glm/ext/scalar_constants.hpp>
#include <fastgltf/parser.hpp>
#include <fastgltf/tools.hpp>
#include "gltf_path.hpp"
template<>
struct fastgltf::ElementTraits<glm::vec3> : fastgltf::ElementTraitsBase<glm::vec3, AccessorType::Vec3, float> {};
static const std::byte* getBufferData(const fastgltf::Buffer& buffer) {
const std::byte* result = nullptr;
std::visit(fastgltf::visitor {
[](auto&) {},
[&](const fastgltf::sources::Vector& vec) {
result = reinterpret_cast<const std::byte*>(vec.bytes.data());
},
[&](const fastgltf::sources::ByteView& bv) {
result = bv.bytes.data();
},
}, buffer.data);
return result;
}
TEST_CASE("Test data type conversion", "[gltf-tools]") {
// normalized int-to-float and normalized float-to-int
for (auto i = std::numeric_limits<std::int8_t>::min(); i < std::numeric_limits<std::int8_t>::max(); ++i) {
auto converted = fastgltf::internal::convertComponent<float>(i, true);
REQUIRE(glm::epsilonEqual<float>(converted, fastgltf::max<float>(i / 127.0f, -1), glm::epsilon<float>()));
REQUIRE(fastgltf::internal::convertComponent<std::int8_t>(converted, true) == std::round(converted * 127.0f));
}
for (auto i = std::numeric_limits<std::uint8_t>::min(); i < std::numeric_limits<std::uint8_t>::max(); ++i) {
auto converted = fastgltf::internal::convertComponent<float>(i, true);
REQUIRE(glm::epsilonEqual<float>(converted, i / 255.0f, glm::epsilon<float>()));
REQUIRE(fastgltf::internal::convertComponent<std::uint8_t>(converted, true) == std::round(converted * 255.0f));
}
for (auto i = std::numeric_limits<std::int16_t>::min(); i < std::numeric_limits<std::int16_t>::max(); ++i) {
auto converted = fastgltf::internal::convertComponent<float>(i, true);
REQUIRE(glm::epsilonEqual<float>(converted, fastgltf::max<float>(i / 32767.0f, -1), glm::epsilon<float>()));
REQUIRE(fastgltf::internal::convertComponent<std::int16_t>(converted, true) == std::round(converted * 32767.0f));
}
for (auto i = std::numeric_limits<std::uint16_t>::min(); i < std::numeric_limits<std::uint16_t>::max(); ++i) {
auto converted = fastgltf::internal::convertComponent<float>(i, true);
REQUIRE(glm::epsilonEqual<float>(converted, i / 65535.0f, glm::epsilon<float>()));
REQUIRE(fastgltf::internal::convertComponent<std::uint16_t>(converted, true) == std::round(converted * 65535.0f));
}
}
TEST_CASE("Test accessor", "[gltf-tools]") {
auto lightsLamp = sampleModels / "2.0" / "LightsPunctualLamp" / "glTF";
fastgltf::GltfDataBuffer jsonData;
REQUIRE(jsonData.loadFromFile(lightsLamp / "LightsPunctualLamp.gltf"));
fastgltf::Parser parser(fastgltf::Extensions::KHR_lights_punctual);
auto asset = parser.loadGLTF(&jsonData, lightsLamp, fastgltf::Options::LoadExternalBuffers,
fastgltf::Category::Buffers | fastgltf::Category::BufferViews | fastgltf::Category::Accessors);
REQUIRE(asset.error() == fastgltf::Error::None);
REQUIRE(asset->accessors.size() == 15);
auto& accessors = asset->accessors;
SECTION("getAccessorElement<std::uint16_t>") {
auto& firstAccessor = accessors[0];
REQUIRE(firstAccessor.type == fastgltf::AccessorType::Scalar);
REQUIRE(firstAccessor.componentType == fastgltf::ComponentType::UnsignedShort);
REQUIRE(firstAccessor.bufferViewIndex.has_value());
auto& view = asset->bufferViews[*firstAccessor.bufferViewIndex];
auto* bufferData = getBufferData(asset->buffers[view.bufferIndex]);
REQUIRE(bufferData != nullptr);
auto* checkData = reinterpret_cast<const std::uint16_t*>(bufferData + view.byteOffset
+ firstAccessor.byteOffset);
REQUIRE(*checkData == fastgltf::getAccessorElement<std::uint16_t>(asset.get(), firstAccessor, 0));
}
{
auto& secondAccessor = accessors[1];
REQUIRE(secondAccessor.type == fastgltf::AccessorType::Vec3);
REQUIRE(secondAccessor.componentType == fastgltf::ComponentType::Float);
REQUIRE(secondAccessor.bufferViewIndex.has_value());
auto& view = asset->bufferViews[*secondAccessor.bufferViewIndex];
auto* bufferData = getBufferData(asset->buffers[view.bufferIndex]);
REQUIRE(bufferData != nullptr);
auto* checkData = reinterpret_cast<const glm::vec3*>(bufferData + view.byteOffset
+ secondAccessor.byteOffset);
SECTION("getAccessorElement<glm::vec3>") {
REQUIRE(*checkData == fastgltf::getAccessorElement<glm::vec3>(asset.get(), secondAccessor, 0));
}
SECTION("iterateAccessor") {
auto dstCopy = std::make_unique<glm::vec3[]>(secondAccessor.count);
std::size_t i = 0;
fastgltf::iterateAccessor<glm::vec3>(asset.get(), secondAccessor, [&](auto&& v3) {
dstCopy[i++] = std::forward<glm::vec3>(v3);
});
REQUIRE(std::memcmp(dstCopy.get(), checkData, secondAccessor.count * sizeof(glm::vec3)) == 0);
}
SECTION("copyFromAccessor") {
auto dstCopy = std::make_unique<glm::vec3[]>(secondAccessor.count);
fastgltf::copyFromAccessor<glm::vec3>(asset.get(), secondAccessor, dstCopy.get());
REQUIRE(std::memcmp(dstCopy.get(), checkData, secondAccessor.count * sizeof(glm::vec3)) == 0);
}
SECTION("Iterator test") {
auto dstCopy = std::make_unique<glm::vec3[]>(secondAccessor.count);
auto accessor = fastgltf::iterateAccessor<glm::vec3>(asset.get(), secondAccessor);
for (auto it = accessor.begin(); it != accessor.end(); ++it) {
dstCopy[std::distance(accessor.begin(), it)] = *it;
}
REQUIRE(std::memcmp(dstCopy.get(), checkData, secondAccessor.count * sizeof(glm::vec3)) == 0);
}
}
}
TEST_CASE("Test sparse accessor", "[gltf-tools]") {
auto simpleSparseAccessor = sampleModels / "2.0" / "SimpleSparseAccessor" / "glTF";
auto jsonData = std::make_unique<fastgltf::GltfDataBuffer>();
REQUIRE(jsonData->loadFromFile(simpleSparseAccessor / "SimpleSparseAccessor.gltf"));
fastgltf::Parser parser;
auto asset = parser.loadGLTF(jsonData.get(), simpleSparseAccessor, fastgltf::Options::LoadExternalBuffers,
fastgltf::Category::Buffers | fastgltf::Category::BufferViews | fastgltf::Category::Accessors);
REQUIRE(asset.error() == fastgltf::Error::None);
REQUIRE(asset->accessors.size() == 2);
REQUIRE(!asset->accessors[0].sparse.has_value());
REQUIRE(asset->accessors[1].sparse.has_value());
auto& sparse = asset->accessors[1].sparse.value();
REQUIRE(sparse.count == 3);
REQUIRE(sparse.indicesBufferView == 2);
REQUIRE(sparse.indicesByteOffset == 0);
REQUIRE(sparse.valuesBufferView == 3);
REQUIRE(sparse.valuesByteOffset == 0);
REQUIRE(sparse.indexComponentType == fastgltf::ComponentType::UnsignedShort);
auto& secondAccessor = asset->accessors[1];
auto& viewIndices = asset->bufferViews[secondAccessor.sparse->indicesBufferView];
auto& viewValues = asset->bufferViews[secondAccessor.sparse->valuesBufferView];
auto& viewData = asset->bufferViews[*secondAccessor.bufferViewIndex];
auto* bufferData = getBufferData(asset->buffers[viewData.bufferIndex]) + viewData.byteOffset
+ secondAccessor.byteOffset;
auto dataStride = viewData.byteStride ? *viewData.byteStride
: fastgltf::getElementByteSize(secondAccessor.type, secondAccessor.componentType);
auto* dataIndices = reinterpret_cast<const std::uint16_t*>(getBufferData(asset->buffers[viewIndices.bufferIndex])
+ viewIndices.byteOffset + secondAccessor.sparse->indicesByteOffset);
auto* dataValues = reinterpret_cast<const glm::vec3*>(getBufferData(asset->buffers[viewValues.bufferIndex])
+ viewValues.byteOffset + secondAccessor.sparse->valuesByteOffset);
auto checkValues = std::make_unique<glm::vec3[]>(secondAccessor.count);
for (std::size_t i = 0, sparseIndex = 0; i < secondAccessor.count; ++i) {
if (sparseIndex < secondAccessor.sparse->count && dataIndices[sparseIndex] == i) {
checkValues[i] = dataValues[sparseIndex];
++sparseIndex;
} else {
checkValues[i] = *reinterpret_cast<const glm::vec3*>(bufferData + dataStride * i);
}
}
SECTION("getAccessorElement") {
for (std::size_t i = 0; i < secondAccessor.count; ++i) {
REQUIRE(checkValues[i] == fastgltf::getAccessorElement<glm::vec3>(asset.get(), secondAccessor, i));
}
}
SECTION("iterateAccessor") {
auto dstCopy = std::make_unique<glm::vec3[]>(secondAccessor.count);
std::size_t i = 0;
fastgltf::iterateAccessor<glm::vec3>(asset.get(), secondAccessor, [&](auto&& v3) {
dstCopy[i++] = std::forward<glm::vec3>(v3);
});
REQUIRE(std::memcmp(dstCopy.get(), checkValues.get(), secondAccessor.count * sizeof(glm::vec3)) == 0);
}
SECTION("iterateAccessor with idx") {
auto dstCopy = std::make_unique<glm::vec3[]>(secondAccessor.count);
fastgltf::iterateAccessorWithIndex<glm::vec3>(asset.get(), secondAccessor, [&](auto&& v3, std::size_t i) {
dstCopy[i] = std::forward<glm::vec3>(v3);
});
REQUIRE(std::memcmp(dstCopy.get(), checkValues.get(), secondAccessor.count * sizeof(glm::vec3)) == 0);
}
SECTION("copyFromAccessor") {
auto dstCopy = std::make_unique<glm::vec3[]>(secondAccessor.count);
fastgltf::copyFromAccessor<glm::vec3>(asset.get(), secondAccessor, dstCopy.get());
REQUIRE(std::memcmp(dstCopy.get(), checkValues.get(), secondAccessor.count * sizeof(glm::vec3)) == 0);
}
SECTION("Iterator test") {
auto dstCopy = std::make_unique<glm::vec3[]>(secondAccessor.count);
auto accessor = fastgltf::iterateAccessor<glm::vec3>(asset.get(), secondAccessor);
for (auto it = accessor.begin(); it != accessor.end(); ++it) {
dstCopy[std::distance(accessor.begin(), it)] = *it;
}
REQUIRE(std::memcmp(dstCopy.get(), checkValues.get(), secondAccessor.count * sizeof(glm::vec3)) == 0);
}
}

View File

@@ -0,0 +1,115 @@
#include <fstream>
#include <sstream>
#include <catch2/catch_test_macros.hpp>
#include <catch2/benchmark/catch_benchmark.hpp>
#include <fastgltf/base64.hpp>
#include <fastgltf/types.hpp>
#include <fastgltf/parser.hpp>
#include "gltf_path.hpp"
constexpr std::string_view testBase64 = "SGVsbG8gV29ybGQuIEhlbGxvIFdvcmxkLiBIZWxsbyBXb3JsZC4=";
TEST_CASE("Check base64 utility functions", "[base64]") {
REQUIRE(fastgltf::base64::getPadding("Li==") == 2);
REQUIRE(fastgltf::base64::getPadding("Li4=") == 1);
REQUIRE(fastgltf::base64::getPadding("Li4u") == 0);
REQUIRE(fastgltf::base64::getOutputSize(4, 0) == 3); // Li4u
REQUIRE(fastgltf::base64::getOutputSize(4, 1) == 2); // Li4=
REQUIRE(fastgltf::base64::getOutputSize(4, 2) == 1); // Li==
}
TEST_CASE("Check base64 decoding", "[base64]") {
// This is "Hello World. Hello World.". The decode function
// uses the best possible SIMD version of the algorithm.
auto bytes = fastgltf::base64::decode(testBase64);
std::string strings(bytes.begin(), bytes.end());
REQUIRE(strings == "Hello World. Hello World. Hello World.");
}
TEST_CASE("Check all base64 decoders", "[base64]") {
// Checks that the base64 decoders return the same.
auto bytes = fastgltf::base64::fallback_decode(testBase64);
std::string strings(bytes.begin(), bytes.end());
REQUIRE(strings == "Hello World. Hello World. Hello World.");
#if defined(__x86_64__) || defined(_M_AMD64) || defined(_M_IX86)
REQUIRE(bytes == fastgltf::base64::avx2_decode(testBase64));
REQUIRE(bytes == fastgltf::base64::sse4_decode(testBase64));
#endif
#if defined(__aarch64__)
REQUIRE(bytes == fastgltf::base64::neon_decode(testBase64));
#endif
}
TEST_CASE("Check big base64 data decoding", "[base64]") {
std::ifstream file(path / "base64.txt");
REQUIRE(file.is_open());
std::stringstream buffer;
buffer << file.rdbuf();
auto encodedBytes = buffer.str();
auto bytes = fastgltf::base64::decode(encodedBytes);
REQUIRE(!bytes.empty());
std::ifstream output(path / "base64.txt.out", std::ios::binary | std::ios::ate);
REQUIRE(output.is_open());
std::vector<uint8_t> decodedBytes(output.tellg());
output.seekg(0);
output.read(reinterpret_cast<char*>(decodedBytes.data()), static_cast<std::streamsize>(decodedBytes.size()));
REQUIRE(bytes == decodedBytes);
}
TEST_CASE("Test base64 buffer decoding", "[base64]") {
fastgltf::Parser parser;
fastgltf::Image texture;
std::string bufferData;
auto cylinderEngine = sampleModels / "2.0" / "2CylinderEngine" / "glTF-Embedded";
auto boxTextured = sampleModels / "2.0" / "BoxTextured" / "glTF-Embedded";
auto tceJsonData = std::make_unique<fastgltf::GltfDataBuffer>();
REQUIRE(tceJsonData->loadFromFile(cylinderEngine / "2CylinderEngine.gltf"));
auto btJsonData = std::make_unique<fastgltf::GltfDataBuffer>();
REQUIRE(btJsonData->loadFromFile(boxTextured / "BoxTextured.gltf"));
SECTION("Validate large buffer load from glTF") {
auto asset = parser.loadGLTF(tceJsonData.get(), cylinderEngine, fastgltf::Options::None, fastgltf::Category::Buffers);
REQUIRE(asset.error() == fastgltf::Error::None);
REQUIRE(asset->buffers.size() == 1);
// Load the buffer from the parsed glTF file.
auto& buffer = asset->buffers.front();
REQUIRE(buffer.byteLength == 1794612);
auto bufferVector = std::get_if<fastgltf::sources::Vector>(&buffer.data);
REQUIRE(bufferVector != nullptr);
REQUIRE(bufferVector->mimeType == fastgltf::MimeType::OctetStream);
REQUIRE(!bufferVector->bytes.empty());
}
SECTION("Validate base64 buffer and image load from glTF") {
auto asset = parser.loadGLTF(btJsonData.get(), boxTextured, fastgltf::Options::None, fastgltf::Category::Images | fastgltf::Category::Buffers);
REQUIRE(asset.error() == fastgltf::Error::None);
REQUIRE(asset->buffers.size() == 1);
REQUIRE(asset->images.size() == 1);
auto& buffer = asset->buffers.front();
REQUIRE(buffer.byteLength == 840);
auto bufferVector = std::get_if<fastgltf::sources::Vector>(&buffer.data);
REQUIRE(bufferVector != nullptr);
REQUIRE(bufferVector->mimeType == fastgltf::MimeType::OctetStream);
REQUIRE(!bufferVector->bytes.empty());
auto& image = asset->images.front();
auto imageVector = std::get_if<fastgltf::sources::Vector>(&image.data);
REQUIRE(imageVector != nullptr);
REQUIRE(imageVector->mimeType == fastgltf::MimeType::PNG);
REQUIRE(!imageVector->bytes.empty());
}
}

View File

@@ -0,0 +1,583 @@
#include <algorithm>
#include <cstdlib>
#include <random>
#include <catch2/catch_approx.hpp>
#include <catch2/catch_test_macros.hpp>
#include <catch2/benchmark/catch_benchmark.hpp>
#include <glm/glm.hpp>
#include <glm/gtc/type_ptr.hpp>
#include <glm/gtx/quaternion.hpp>
#include <glm/gtx/matrix_decompose.hpp>
#include <fastgltf/base64.hpp>
#include <fastgltf/parser.hpp>
#include <fastgltf/types.hpp>
#include "gltf_path.hpp"
constexpr auto noOptions = fastgltf::Options::None;
TEST_CASE("Component type tests", "[gltf-loader]") {
using namespace fastgltf;
// clang-format off
REQUIRE(fastgltf::getNumComponents(AccessorType::Scalar) == 1);
REQUIRE(fastgltf::getNumComponents(AccessorType::Vec2) == 2);
REQUIRE(fastgltf::getNumComponents(AccessorType::Vec3) == 3);
REQUIRE(fastgltf::getNumComponents(AccessorType::Vec4) == 4);
REQUIRE(fastgltf::getNumComponents(AccessorType::Mat2) == 4);
REQUIRE(fastgltf::getNumComponents(AccessorType::Mat3) == 9);
REQUIRE(fastgltf::getNumComponents(AccessorType::Mat4) == 16);
REQUIRE(fastgltf::getComponentBitSize(ComponentType::Byte) == 8);
REQUIRE(fastgltf::getComponentBitSize(ComponentType::UnsignedByte) == 8);
REQUIRE(fastgltf::getComponentBitSize(ComponentType::Short) == 16);
REQUIRE(fastgltf::getComponentBitSize(ComponentType::UnsignedShort) == 16);
REQUIRE(fastgltf::getComponentBitSize(ComponentType::UnsignedInt) == 32);
REQUIRE(fastgltf::getComponentBitSize(ComponentType::Float) == 32);
REQUIRE(fastgltf::getComponentBitSize(ComponentType::Double) == 64);
REQUIRE(fastgltf::getComponentBitSize(ComponentType::Invalid) == 0);
REQUIRE(fastgltf::getElementByteSize(AccessorType::Scalar, ComponentType::Byte) == 1);
REQUIRE(fastgltf::getElementByteSize(AccessorType::Vec4, ComponentType::Byte) == 4);
REQUIRE(fastgltf::getElementByteSize(AccessorType::Vec4, ComponentType::Short) == 8);
REQUIRE(fastgltf::getComponentType(5120) == ComponentType::Byte);
REQUIRE(fastgltf::getComponentType(5121) == ComponentType::UnsignedByte);
REQUIRE(fastgltf::getComponentType(5122) == ComponentType::Short);
REQUIRE(fastgltf::getComponentType(5123) == ComponentType::UnsignedShort);
REQUIRE(fastgltf::getComponentType(5125) == ComponentType::UnsignedInt);
REQUIRE(fastgltf::getComponentType(5126) == ComponentType::Float);
REQUIRE(fastgltf::getComponentType(5130) == ComponentType::Double);
REQUIRE(fastgltf::getComponentType(5131) == ComponentType::Invalid);
// clang-format on
}
TEST_CASE("Test all variants of CRC32-C hashing", "[gltf-loader]") {
// TODO: Determine SSE4.2 support here.
for (std::size_t i = 0; i < 256; ++i) {
// Generate a random string up to 256 chars long.
static constexpr std::string_view chars =
"0123456789"
"ABCDEFGHIJKLMNOPQRSTUVWXYZ"
"abcdefghijklmnopqrstuvwxyz";
static std::mt19937 rng(std::random_device{}());
static std::uniform_int_distribution<std::string::size_type> pick(0, chars.size() - 1);
std::string str(i, '\0');
for (std::size_t j = 0; j < i; ++j)
str[j] = chars[pick(rng)];
#if defined(__x86_64__) || defined(_M_AMD64) || defined(_M_IX86)
// We'll try and test if the hardware accelerated version generates the same, correct results.
REQUIRE(fastgltf::crc32c(str) == fastgltf::hwcrc32c(str));
#endif
}
}
TEST_CASE("Test extension stringification", "[gltf-loader]") {
auto stringified = stringifyExtension(fastgltf::Extensions::EXT_meshopt_compression);
REQUIRE(stringified == fastgltf::extensions::EXT_meshopt_compression);
stringified = stringifyExtension(fastgltf::Extensions::EXT_meshopt_compression | fastgltf::Extensions::EXT_texture_webp);
REQUIRE(stringified == fastgltf::extensions::EXT_meshopt_compression);
}
TEST_CASE("Test if glTF type detection works", "[gltf-loader]") {
fastgltf::Parser parser;
SECTION("glTF") {
auto gltfPath = sampleModels / "2.0" / "ABeautifulGame" / "glTF";
REQUIRE(std::filesystem::exists(gltfPath));
fastgltf::GltfDataBuffer jsonData;
REQUIRE(jsonData.loadFromFile(gltfPath / "ABeautifulGame.gltf"));
REQUIRE(fastgltf::determineGltfFileType(&jsonData) == fastgltf::GltfType::glTF);
auto model = parser.loadGLTF(&jsonData, gltfPath);
REQUIRE(model.error() == fastgltf::Error::None);
REQUIRE(model.get_if() != nullptr);
REQUIRE(fastgltf::validate(model.get()) == fastgltf::Error::None);
}
SECTION("GLB") {
auto glbPath = sampleModels / "2.0" / "BoomBox" / "glTF-Binary";
REQUIRE(std::filesystem::exists(glbPath));
fastgltf::GltfDataBuffer jsonData;
REQUIRE(jsonData.loadFromFile(glbPath / "BoomBox.glb"));
REQUIRE(fastgltf::determineGltfFileType(&jsonData) == fastgltf::GltfType::GLB);
auto model = parser.loadBinaryGLTF(&jsonData, glbPath);
REQUIRE(model.error() == fastgltf::Error::None);
REQUIRE(model.get_if() != nullptr);
}
SECTION("Invalid") {
auto gltfPath = path / "base64.txt"; // Random file in the test directory that's not a glTF file.
REQUIRE(std::filesystem::exists(gltfPath));
fastgltf::GltfDataBuffer jsonData;
REQUIRE(jsonData.loadFromFile(gltfPath));
REQUIRE(fastgltf::determineGltfFileType(&jsonData) == fastgltf::GltfType::Invalid);
}
}
TEST_CASE("Loading some basic glTF", "[gltf-loader]") {
fastgltf::Parser parser;
SECTION("Loading basic invalid glTF files") {
auto jsonData = std::make_unique<fastgltf::GltfDataBuffer>();
REQUIRE(jsonData->loadFromFile(path / "empty_json.gltf"));
auto emptyGltf = parser.loadGLTF(jsonData.get(), path);
REQUIRE(emptyGltf.error() == fastgltf::Error::InvalidOrMissingAssetField);
}
SECTION("Load basic glTF file") {
auto basicJsonData = std::make_unique<fastgltf::GltfDataBuffer>();
REQUIRE(basicJsonData->loadFromFile(path / "basic_gltf.gltf"));
auto basicGltf = parser.loadGLTF(basicJsonData.get(), path);
REQUIRE(basicGltf.error() == fastgltf::Error::None);
REQUIRE(fastgltf::validate(basicGltf.get()) == fastgltf::Error::None);
}
SECTION("Loading basic Cube.gltf") {
auto cubePath = sampleModels / "2.0" / "Cube" / "glTF";
auto cubeJsonData = std::make_unique<fastgltf::GltfDataBuffer>();
REQUIRE(cubeJsonData->loadFromFile(cubePath / "Cube.gltf"));
auto cube = parser.loadGLTF(cubeJsonData.get(), cubePath, noOptions, fastgltf::Category::OnlyRenderable);
REQUIRE(cube.error() == fastgltf::Error::None);
REQUIRE(fastgltf::validate(cube.get()) == fastgltf::Error::None);
REQUIRE(cube->scenes.size() == 1);
REQUIRE(cube->scenes.front().nodeIndices.size() == 1);
REQUIRE(cube->scenes.front().nodeIndices.front() == 0);
REQUIRE(cube->nodes.size() == 1);
REQUIRE(cube->nodes.front().name == "Cube");
REQUIRE(std::holds_alternative<fastgltf::Node::TRS>(cube->nodes.front().transform));
REQUIRE(cube->accessors.size() == 5);
REQUIRE(cube->accessors[0].type == fastgltf::AccessorType::Scalar);
REQUIRE(cube->accessors[0].componentType == fastgltf::ComponentType::UnsignedShort);
REQUIRE(cube->accessors[1].type == fastgltf::AccessorType::Vec3);
REQUIRE(cube->accessors[1].componentType == fastgltf::ComponentType::Float);
REQUIRE(cube->bufferViews.size() == 5);
REQUIRE(cube->buffers.size() == 1);
REQUIRE(cube->materials.size() == 1);
auto& material = cube->materials.front();
REQUIRE(material.name == "Cube");
REQUIRE(material.pbrData.baseColorTexture.has_value());
REQUIRE(material.pbrData.baseColorTexture->textureIndex == 0);
REQUIRE(material.pbrData.metallicRoughnessTexture.has_value());
REQUIRE(material.pbrData.metallicRoughnessTexture->textureIndex == 1);
REQUIRE(!material.normalTexture.has_value());
REQUIRE(!material.emissiveTexture.has_value());
REQUIRE(!material.occlusionTexture.has_value());
}
SECTION("Loading basic Box.gltf") {
auto boxPath = sampleModels / "2.0" / "Box" / "glTF";
auto boxJsonData = std::make_unique<fastgltf::GltfDataBuffer>();
REQUIRE(boxJsonData->loadFromFile(boxPath / "Box.gltf"));
auto box = parser.loadGLTF(boxJsonData.get(), boxPath, noOptions, fastgltf::Category::OnlyRenderable);
REQUIRE(box.error() == fastgltf::Error::None);
REQUIRE(fastgltf::validate(box.get()) == fastgltf::Error::None);
REQUIRE(box->defaultScene.has_value());
REQUIRE(box->defaultScene.value() == 0);
REQUIRE(box->nodes.size() == 2);
REQUIRE(box->nodes[0].children.size() == 1);
REQUIRE(box->nodes[0].children[0] == 1);
REQUIRE(box->nodes[1].children.empty());
REQUIRE(box->nodes[1].meshIndex.has_value());
REQUIRE(box->nodes[1].meshIndex.value() == 0);
REQUIRE(box->materials.size() == 1);
REQUIRE(box->materials[0].name == "Red");
REQUIRE(box->materials[0].pbrData.baseColorFactor[3] == 1.0f);
REQUIRE(box->materials[0].pbrData.metallicFactor == 0.0f);
}
}
TEST_CASE("Loading glTF animation", "[gltf-loader]") {
auto animatedCube = sampleModels / "2.0" / "AnimatedCube" / "glTF";
auto jsonData = std::make_unique<fastgltf::GltfDataBuffer>();
REQUIRE(jsonData->loadFromFile(animatedCube / "AnimatedCube.gltf"));
fastgltf::Parser parser;
auto asset = parser.loadGLTF(jsonData.get(), animatedCube, noOptions, fastgltf::Category::OnlyAnimations);
REQUIRE(asset.error() == fastgltf::Error::None);
REQUIRE(fastgltf::validate(asset.get()) == fastgltf::Error::None);
REQUIRE(!asset->animations.empty());
auto& animation = asset->animations.front();
REQUIRE(animation.name == "animation_AnimatedCube");
REQUIRE(!animation.channels.empty());
REQUIRE(animation.channels.front().nodeIndex == 0);
REQUIRE(animation.channels.front().samplerIndex == 0);
REQUIRE(animation.channels.front().path == fastgltf::AnimationPath::Rotation);
REQUIRE(!animation.samplers.empty());
REQUIRE(animation.samplers.front().interpolation == fastgltf::AnimationInterpolation::Linear);
REQUIRE(animation.samplers.front().inputAccessor == 0);
REQUIRE(animation.samplers.front().outputAccessor == 1);
}
TEST_CASE("Loading glTF skins", "[gltf-loader]") {
auto simpleSkin = sampleModels / "2.0" / "SimpleSkin" / "glTF";
auto jsonData = std::make_unique<fastgltf::GltfDataBuffer>();
REQUIRE(jsonData->loadFromFile(simpleSkin / "SimpleSkin.gltf"));
fastgltf::Parser parser;
auto asset = parser.loadGLTF(jsonData.get(), simpleSkin, noOptions, fastgltf::Category::Skins | fastgltf::Category::Nodes);
REQUIRE(asset.error() == fastgltf::Error::None);
REQUIRE(fastgltf::validate(asset.get()) == fastgltf::Error::None);
REQUIRE(!asset->skins.empty());
auto& skin = asset->skins.front();
REQUIRE(skin.joints.size() == 2);
REQUIRE(skin.joints[0] == 1);
REQUIRE(skin.joints[1] == 2);
REQUIRE(skin.inverseBindMatrices.has_value());
REQUIRE(skin.inverseBindMatrices.value() == 4);
REQUIRE(!asset->nodes.empty());
auto& node = asset->nodes.front();
REQUIRE(node.skinIndex.has_value());
REQUIRE(node.skinIndex == 0);
}
TEST_CASE("Loading glTF cameras", "[gltf-loader]") {
auto cameras = sampleModels / "2.0" / "Cameras" / "glTF";
auto jsonData = std::make_unique<fastgltf::GltfDataBuffer>();
REQUIRE(jsonData->loadFromFile(cameras / "Cameras.gltf"));
fastgltf::Parser parser;
auto asset = parser.loadGLTF(jsonData.get(), cameras, noOptions, fastgltf::Category::Cameras);
REQUIRE(asset.error() == fastgltf::Error::None);
REQUIRE(fastgltf::validate(asset.get()) == fastgltf::Error::None);
REQUIRE(asset->cameras.size() == 2);
REQUIRE(std::holds_alternative<fastgltf::Camera::Perspective>(asset->cameras[0].camera));
REQUIRE(std::holds_alternative<fastgltf::Camera::Orthographic>(asset->cameras[1].camera));
const auto* pPerspective = std::get_if<fastgltf::Camera::Perspective>(&asset->cameras[0].camera);
REQUIRE(pPerspective != nullptr);
REQUIRE(pPerspective->aspectRatio == 1.0f);
REQUIRE(pPerspective->yfov == 0.7f);
REQUIRE(pPerspective->zfar == 100);
REQUIRE(pPerspective->znear == 0.01f);
const auto* pOrthographic = std::get_if<fastgltf::Camera::Orthographic>(&asset->cameras[1].camera);
REQUIRE(pOrthographic != nullptr);
REQUIRE(pOrthographic->xmag == 1.0f);
REQUIRE(pOrthographic->ymag == 1.0f);
REQUIRE(pOrthographic->zfar == 100);
REQUIRE(pOrthographic->znear == 0.01f);
}
TEST_CASE("Validate whole glTF", "[gltf-loader]") {
auto sponza = sampleModels / "2.0" / "Sponza" / "glTF";
auto jsonData = std::make_unique<fastgltf::GltfDataBuffer>();
REQUIRE(jsonData->loadFromFile(sponza / "Sponza.gltf"));
fastgltf::Parser parser;
auto model = parser.loadGLTF(jsonData.get(), sponza);
REQUIRE(model.error() == fastgltf::Error::None);
REQUIRE(fastgltf::validate(model.get()) == fastgltf::Error::None);
auto brainStem = sampleModels / "2.0" / "BrainStem" / "glTF";
jsonData = std::make_unique<fastgltf::GltfDataBuffer>();
REQUIRE(jsonData->loadFromFile(brainStem / "BrainStem.gltf"));
auto model2 = parser.loadGLTF(jsonData.get(), brainStem);
REQUIRE(model2.error() == fastgltf::Error::None);
REQUIRE(fastgltf::validate(model2.get()) == fastgltf::Error::None);
}
TEST_CASE("Test allocation callbacks for embedded buffers", "[gltf-loader]") {
auto boxPath = sampleModels / "2.0" / "Box" / "glTF-Embedded";
auto jsonData = std::make_unique<fastgltf::GltfDataBuffer>();
REQUIRE(jsonData->loadFromFile(boxPath / "Box.gltf"));
std::vector<void*> allocations;
auto mapCallback = [](uint64_t bufferSize, void* userPointer) -> fastgltf::BufferInfo {
REQUIRE(userPointer != nullptr);
auto* allocations = static_cast<std::vector<void*>*>(userPointer);
allocations->emplace_back(std::malloc(bufferSize));
return fastgltf::BufferInfo {
allocations->back(),
allocations->size() - 1,
};
};
fastgltf::Parser parser;
parser.setUserPointer(&allocations);
parser.setBufferAllocationCallback(mapCallback, nullptr);
auto asset = parser.loadGLTF(jsonData.get(), boxPath, noOptions, fastgltf::Category::Buffers);
REQUIRE(asset.error() == fastgltf::Error::None);
REQUIRE(fastgltf::validate(asset.get()) == fastgltf::Error::None);
REQUIRE(allocations.size() == 1);
REQUIRE(asset->buffers.size() == 1);
auto& buffer = asset->buffers.front();
const auto* customBuffer = std::get_if<fastgltf::sources::CustomBuffer>(&buffer.data);
REQUIRE(customBuffer != nullptr);
REQUIRE(customBuffer->id == 0);
for (auto& allocation : allocations) {
REQUIRE(allocation != nullptr);
std::free(allocation);
}
}
TEST_CASE("Test base64 decoding callbacks", "[gltf-loader]") {
auto boxPath = sampleModels / "2.0" / "Box" / "glTF-Embedded";
auto jsonData = std::make_unique<fastgltf::GltfDataBuffer>();
REQUIRE(jsonData->loadFromFile(boxPath / "Box.gltf"));
size_t decodeCounter = 0;
auto decodeCallback = [](std::string_view encodedData, uint8_t* outputData, size_t padding, size_t outputSize, void* userPointer) {
(*static_cast<size_t*>(userPointer))++;
fastgltf::base64::decode_inplace(encodedData, outputData, padding);
};
fastgltf::Parser parser;
parser.setUserPointer(&decodeCounter);
parser.setBase64DecodeCallback(decodeCallback);
auto model = parser.loadGLTF(jsonData.get(), boxPath, noOptions, fastgltf::Category::Buffers);
REQUIRE(model.error() == fastgltf::Error::None);
REQUIRE(fastgltf::validate(model.get()) == fastgltf::Error::None);
REQUIRE(decodeCounter != 0);
}
TEST_CASE("Test TRS parsing and optional decomposition", "[gltf-loader]") {
SECTION("Test decomposition on glTF asset") {
auto jsonData = std::make_unique<fastgltf::GltfDataBuffer>();
REQUIRE(jsonData->loadFromFile(path / "transform_matrices.gltf"));
// Parse once without decomposing, once with decomposing the matrix.
fastgltf::Parser parser;
auto assetWithMatrix = parser.loadGLTF(jsonData.get(), path, noOptions, fastgltf::Category::Nodes | fastgltf::Category::Cameras);
REQUIRE(assetWithMatrix.error() == fastgltf::Error::None);
REQUIRE(fastgltf::validate(assetWithMatrix.get()) == fastgltf::Error::None);
auto assetDecomposed = parser.loadGLTF(jsonData.get(), path, fastgltf::Options::DecomposeNodeMatrices, fastgltf::Category::Nodes | fastgltf::Category::Cameras);
REQUIRE(assetDecomposed.error() == fastgltf::Error::None);
REQUIRE(fastgltf::validate(assetDecomposed.get()) == fastgltf::Error::None);
REQUIRE(assetWithMatrix->cameras.size() == 1);
REQUIRE(assetDecomposed->cameras.size() == 1);
REQUIRE(assetWithMatrix->nodes.size() == 2);
REQUIRE(assetDecomposed->nodes.size() == 2);
REQUIRE(std::holds_alternative<fastgltf::Node::TransformMatrix>(assetWithMatrix->nodes.back().transform));
REQUIRE(std::holds_alternative<fastgltf::Node::TRS>(assetDecomposed->nodes.back().transform));
// Get the TRS components from the first node and use them as the test data for decomposing.
const auto* pDefaultTRS = std::get_if<fastgltf::Node::TRS>(&assetWithMatrix->nodes.front().transform);
REQUIRE(pDefaultTRS != nullptr);
auto translation = glm::make_vec3(pDefaultTRS->translation.data());
auto rotation = glm::make_quat(pDefaultTRS->rotation.data());
auto scale = glm::make_vec3(pDefaultTRS->scale.data());
auto rotationMatrix = glm::toMat4(rotation);
auto transform = glm::translate(glm::mat4(1.0f), translation) * rotationMatrix * glm::scale(glm::mat4(1.0f), scale);
// Check if the parsed matrix is correct.
const auto* pMatrix = std::get_if<fastgltf::Node::TransformMatrix>(&assetWithMatrix->nodes.back().transform);
REQUIRE(pMatrix != nullptr);
REQUIRE(glm::make_mat4x4(pMatrix->data()) == transform);
// Check if the decomposed components equal the original components.
const auto* pDecomposedTRS = std::get_if<fastgltf::Node::TRS>(&assetDecomposed->nodes.back().transform);
REQUIRE(glm::make_vec3(pDecomposedTRS->translation.data()) == translation);
REQUIRE(glm::make_quat(pDecomposedTRS->rotation.data()) == rotation);
REQUIRE(glm::make_vec3(pDecomposedTRS->scale.data()) == scale);
}
SECTION("Test decomposition against glm decomposition") {
// Some random complex transform matrix from one of the glTF sample models.
std::array<float, 16> matrix = {
-0.4234085381031037F,
-0.9059388637542724F,
-7.575183536001616e-11F,
0.0F,
-0.9059388637542724F,
0.4234085381031037F,
-4.821281221478735e-11F,
0.0F,
7.575183536001616e-11F,
4.821281221478735e-11F,
-1.0F,
0.0F,
-90.59386444091796F,
-24.379817962646489F,
-40.05522918701172F,
1.0F
};
std::array<float, 3> translation = {}, scale = {};
std::array<float, 4> rotation = {};
fastgltf::decomposeTransformMatrix(matrix, scale, rotation, translation);
auto glmMatrix = glm::make_mat4x4(matrix.data());
glm::vec3 glmScale, glmTranslation, glmSkew;
glm::quat glmRotation;
glm::vec4 glmPerspective;
glm::decompose(glmMatrix, glmScale, glmRotation, glmTranslation, glmSkew, glmPerspective);
// I use glm::epsilon<float>() * 10 here because some matrices I tested this with resulted
// in an error margin greater than the normal epsilon value. I will investigate this in the
// future, but I suspect using double in the decompose functions should help mitigate most
// of it.
REQUIRE(glm::make_vec3(translation.data()) == glmTranslation);
REQUIRE(glm::all(glm::epsilonEqual(glm::make_quat(rotation.data()), glmRotation, glm::epsilon<float>() * 10)));
REQUIRE(glm::all(glm::epsilonEqual(glm::make_vec3(scale.data()), glmScale, glm::epsilon<float>())));
}
}
TEST_CASE("Validate sparse accessor parsing", "[gltf-loader]") {
auto simpleSparseAccessor = sampleModels / "2.0" / "SimpleSparseAccessor" / "glTF";
auto jsonData = std::make_unique<fastgltf::GltfDataBuffer>();
REQUIRE(jsonData->loadFromFile(simpleSparseAccessor / "SimpleSparseAccessor.gltf"));
fastgltf::Parser parser;
auto asset = parser.loadGLTF(jsonData.get(), simpleSparseAccessor, noOptions, fastgltf::Category::Accessors);
REQUIRE(asset.error() == fastgltf::Error::None);
REQUIRE(fastgltf::validate(asset.get()) == fastgltf::Error::None);
REQUIRE(asset->accessors.size() == 2);
REQUIRE(!asset->accessors[0].sparse.has_value());
REQUIRE(asset->accessors[1].sparse.has_value());
auto& sparse = asset->accessors[1].sparse.value();
REQUIRE(sparse.count == 3);
REQUIRE(sparse.indicesBufferView == 2);
REQUIRE(sparse.indicesByteOffset == 0);
REQUIRE(sparse.valuesBufferView == 3);
REQUIRE(sparse.valuesByteOffset == 0);
REQUIRE(sparse.indexComponentType == fastgltf::ComponentType::UnsignedShort);
}
TEST_CASE("Validate morph target parsing", "[gltf-loader]") {
auto simpleMorph = sampleModels / "2.0" / "SimpleMorph" / "glTF";
auto jsonData = std::make_unique<fastgltf::GltfDataBuffer>();
REQUIRE(jsonData->loadFromFile(simpleMorph / "SimpleMorph.gltf"));
fastgltf::Parser parser;
auto asset = parser.loadGLTF(jsonData.get(), simpleMorph, noOptions, fastgltf::Category::Meshes);
REQUIRE(asset.error() == fastgltf::Error::None);
REQUIRE(fastgltf::validate(asset.get()) == fastgltf::Error::None);
REQUIRE(asset->meshes.size() == 1);
REQUIRE(asset->meshes.front().weights.size() == 2);
REQUIRE(asset->meshes.front().primitives.size() == 1);
auto& primitive = asset->meshes.front().primitives.front();
auto position = primitive.findAttribute("POSITION");
REQUIRE(position != primitive.attributes.end());
REQUIRE((*position).second == 1);
REQUIRE(primitive.targets.size() == 2);
auto positionTarget0 = primitive.findTargetAttribute(0, "POSITION");
REQUIRE(positionTarget0 != primitive.targets[0].end());
REQUIRE((*positionTarget0).second == 2);
auto positionTarget1 = primitive.findTargetAttribute(1, "POSITION");
REQUIRE(positionTarget0 != primitive.targets[1].end());
REQUIRE((*positionTarget1).second == 3);
}
TEST_CASE("Test accessors min/max", "[gltf-loader]") {
auto lightsLamp = sampleModels / "2.0" / "LightsPunctualLamp" / "glTF";
fastgltf::GltfDataBuffer jsonData;
REQUIRE(jsonData.loadFromFile(lightsLamp / "LightsPunctualLamp.gltf"));
fastgltf::Parser parser(fastgltf::Extensions::KHR_lights_punctual);
auto asset = parser.loadGLTF(&jsonData, lightsLamp, noOptions, fastgltf::Category::Accessors);
REQUIRE(asset.error() == fastgltf::Error::None);
REQUIRE(fastgltf::validate(asset.get()) == fastgltf::Error::None);
REQUIRE(std::find_if(asset->extensionsUsed.begin(), asset->extensionsUsed.end(), [](auto& string) {
return string == fastgltf::extensions::KHR_lights_punctual;
}) != asset->extensionsUsed.end());
REQUIRE(asset->accessors.size() == 15);
auto& accessors = asset->accessors;
{
auto& firstAccessor = accessors[0];
const auto* max = std::get_if<FASTGLTF_STD_PMR_NS::vector<std::int64_t>>(&firstAccessor.max);
const auto* min = std::get_if<FASTGLTF_STD_PMR_NS::vector<std::int64_t>>(&firstAccessor.min);
REQUIRE(max != nullptr);
REQUIRE(min != nullptr);
REQUIRE(max->size() == fastgltf::getNumComponents(firstAccessor.type));
REQUIRE(max->size() == 1);
REQUIRE(min->size() == 1);
REQUIRE(max->front() == 3211);
REQUIRE(min->front() == 0);
}
{
auto& secondAccessor = accessors[1];
const auto* max = std::get_if<FASTGLTF_STD_PMR_NS::vector<double>>(&secondAccessor.max);
const auto* min = std::get_if<FASTGLTF_STD_PMR_NS::vector<double>>(&secondAccessor.min);
REQUIRE(max != nullptr);
REQUIRE(min != nullptr);
REQUIRE(max->size() == fastgltf::getNumComponents(secondAccessor.type));
REQUIRE(max->size() == 3);
REQUIRE(min->size() == 3);
REQUIRE(glm::epsilonEqual(max->at(0), 0.81497824192047119, glm::epsilon<double>()));
REQUIRE(glm::epsilonEqual(max->at(1), 1.8746249675750732, glm::epsilon<double>()));
REQUIRE(glm::epsilonEqual(max->at(2), 0.32295516133308411, glm::epsilon<double>()));
REQUIRE(glm::epsilonEqual(min->at(0), -0.12269512563943863, glm::epsilon<double>()));
REQUIRE(glm::epsilonEqual(min->at(1), 0.013025385327637196, glm::epsilon<double>()));
REQUIRE(glm::epsilonEqual(min->at(2), -0.32393229007720947, glm::epsilon<double>()));
}
{
auto& fifthAccessor = accessors[4];
const auto* max = std::get_if<FASTGLTF_STD_PMR_NS::vector<double>>(&fifthAccessor.max);
const auto* min = std::get_if<FASTGLTF_STD_PMR_NS::vector<double>>(&fifthAccessor.min);
REQUIRE(max != nullptr);
REQUIRE(min != nullptr);
REQUIRE(max->size() == fastgltf::getNumComponents(fifthAccessor.type));
REQUIRE(max->size() == 4);
REQUIRE(min->size() == 4);
REQUIRE(max->back() == 1.0);
}
}
TEST_CASE("Test unicode characters", "[gltf-loader]") {
auto lightsLamp = sampleModels / "2.0" / std::filesystem::u8path(u8"Unicode❤♻Test") / "glTF";
fastgltf::GltfDataBuffer jsonData;
REQUIRE(jsonData.loadFromFile(lightsLamp / std::filesystem::u8path(u8"Unicode❤♻Test.gltf")));
fastgltf::Parser parser;
auto asset = parser.loadGLTF(&jsonData, lightsLamp);
REQUIRE(asset.error() == fastgltf::Error::None);
REQUIRE(fastgltf::validate(asset.get()) == fastgltf::Error::None);
REQUIRE(!asset->materials.empty());
REQUIRE(asset->materials[0].name == u8"Unicode❤♻Material");
REQUIRE(!asset->buffers.empty());
auto bufferUri = std::get<fastgltf::sources::URI>(asset->buffers[0].data);
REQUIRE(bufferUri.uri.path() == u8"Unicode❤♻Binary.bin");
}

View File

@@ -0,0 +1,423 @@
#include <fstream>
#include <random>
#include <catch2/benchmark/catch_benchmark.hpp>
#include <catch2/catch_test_macros.hpp>
#include "simdjson.h"
#include <fastgltf/parser.hpp>
#include <fastgltf/base64.hpp>
#include "gltf_path.hpp"
constexpr auto benchmarkOptions = fastgltf::Options::DontRequireValidAssetMember;
#ifdef HAS_RAPIDJSON
#include "rapidjson/document.h"
#include "rapidjson/prettywriter.h"
#include "rapidjson/rapidjson.h"
#include "rapidjson/stringbuffer.h"
#include "rapidjson/writer.h"
#endif
#ifdef HAS_TINYGLTF
// We don't want tinygltf to load/write images.
#define TINYGLTF_NO_STB_IMAGE_WRITE
#define TINYGLTF_NO_STB_IMAGE
#define TINYGLTF_NO_FS
#define TINYGLTF_IMPLEMENTATION
#include <tiny_gltf.h>
bool tinygltf_FileExistsFunction([[maybe_unused]] const std::string& filename, [[maybe_unused]] void* user) {
return true;
}
std::string tinygltf_ExpandFilePathFunction(const std::string& path, [[maybe_unused]] void* user) {
return path;
}
bool tinygltf_ReadWholeFileFunction(std::vector<unsigned char>* data, std::string*, const std::string&, void*) {
// tinygltf checks if size == 1. It also checks if the size is correct for glb files, but
// well ignore that for now.
data->resize(1);
return true;
}
bool tinygltf_LoadImageData(tinygltf::Image *image, const int image_idx, std::string *err,
std::string *warn, int req_width, int req_height,
const unsigned char *bytes, int size, void *user_data) {
return true;
}
void setTinyGLTFCallbacks(tinygltf::TinyGLTF& gltf) {
gltf.SetFsCallbacks({
tinygltf_FileExistsFunction,
tinygltf_ExpandFilePathFunction,
tinygltf_ReadWholeFileFunction,
nullptr, nullptr,
});
gltf.SetImageLoader(tinygltf_LoadImageData, nullptr);
}
#endif
#ifdef HAS_CGLTF
#define CGLTF_IMPLEMENTATION
#include <cgltf.h>
#endif
#ifdef HAS_GLTFRS
#include "rust/cxx.h"
#include "gltf-rs-bridge/lib.h"
#endif
#ifdef HAS_ASSIMP
#include <assimp/cimport.h>
#include <assimp/scene.h>
#include <assimp/Base64.hpp>
#endif
std::vector<uint8_t> readFileAsBytes(std::filesystem::path path) {
std::ifstream file(path, std::ios::ate | std::ios::binary);
if (!file.is_open())
throw std::runtime_error(std::string { "Failed to open file: " } + path.string());
auto fileSize = file.tellg();
std::vector<uint8_t> bytes(static_cast<size_t>(fileSize) + fastgltf::getGltfBufferPadding());
file.seekg(0, std::ifstream::beg);
file.read(reinterpret_cast<char*>(bytes.data()), fileSize);
file.close();
return bytes;
}
TEST_CASE("Benchmark loading of NewSponza", "[gltf-benchmark]") {
if (!std::filesystem::exists(intelSponza / "NewSponza_Main_glTF_002.gltf")) {
// NewSponza is not part of gltf-Sample-Models, and therefore not always available.
SKIP("Intel's NewSponza (GLTF) is required for this benchmark.");
}
fastgltf::Parser parser;
#ifdef HAS_TINYGLTF
tinygltf::TinyGLTF tinygltf;
tinygltf::Model model;
std::string warn, err;
#endif
auto bytes = readFileAsBytes(intelSponza / "NewSponza_Main_glTF_002.gltf");
auto jsonData = std::make_unique<fastgltf::GltfDataBuffer>();
REQUIRE(jsonData->fromByteView(bytes.data(), bytes.size() - fastgltf::getGltfBufferPadding(), bytes.size()));
BENCHMARK("Parse NewSponza") {
return parser.loadGLTF(jsonData.get(), intelSponza, benchmarkOptions);
};
#ifdef HAS_TINYGLTF
setTinyGLTFCallbacks(tinygltf);
BENCHMARK("Parse NewSponza with tinygltf") {
return tinygltf.LoadASCIIFromString(&model, &err, &warn, reinterpret_cast<char*>(bytes.data()), bytes.size(), intelSponza.string());
};
#endif
#ifdef HAS_CGLTF
BENCHMARK("Parse NewSponza with cgltf") {
cgltf_options options = {};
cgltf_data* data = nullptr;
cgltf_result result = cgltf_parse(&options, bytes.data(), bytes.size(), &data);
REQUIRE(result == cgltf_result_success);
cgltf_free(data);
return result;
};
#endif
#ifdef HAS_GLTFRS
auto padding = fastgltf::getGltfBufferPadding();
BENCHMARK("Parse NewSponza with gltf-rs") {
auto slice = rust::Slice<const std::uint8_t>(reinterpret_cast<std::uint8_t*>(bytes.data()), bytes.size() - padding);
return rust::gltf::run(slice);
};
#endif
#ifdef HAS_ASSIMP
BENCHMARK("Parse NewSponza with assimp") {
return aiImportFileFromMemory(reinterpret_cast<const char*>(bytes.data()), jsonData->getBufferSize(), 0, nullptr);
};
#endif
}
TEST_CASE("Benchmark base64 decoding from glTF file", "[gltf-benchmark]") {
fastgltf::Parser parser;
#ifdef HAS_TINYGLTF
tinygltf::TinyGLTF tinygltf;
tinygltf::Model model;
std::string warn, err;
#endif
auto cylinderEngine = sampleModels / "2.0" / "2CylinderEngine" / "glTF-Embedded";
auto bytes = readFileAsBytes(cylinderEngine / "2CylinderEngine.gltf");
auto jsonData = std::make_unique<fastgltf::GltfDataBuffer>();
REQUIRE(jsonData->fromByteView(bytes.data(), bytes.size() - fastgltf::getGltfBufferPadding(), bytes.size()));
BENCHMARK("Parse 2CylinderEngine and decode base64") {
return parser.loadGLTF(jsonData.get(), cylinderEngine, benchmarkOptions);
};
#ifdef HAS_TINYGLTF
setTinyGLTFCallbacks(tinygltf);
BENCHMARK("2CylinderEngine decode with tinygltf") {
return tinygltf.LoadASCIIFromString(&model, &err, &warn, reinterpret_cast<char*>(bytes.data()), bytes.size(), cylinderEngine.string());
};
#endif
#ifdef HAS_CGLTF
BENCHMARK("2CylinderEngine decode with cgltf") {
cgltf_options options = {};
cgltf_data* data = nullptr;
auto filePath = cylinderEngine.string();
cgltf_result result = cgltf_parse(&options, bytes.data(), bytes.size(), &data);
REQUIRE(result == cgltf_result_success);
result = cgltf_load_buffers(&options, data, filePath.c_str());
cgltf_free(data);
return result;
};
#endif
#ifdef HAS_GLTFRS
auto padding = fastgltf::getGltfBufferPadding();
BENCHMARK("2CylinderEngine with gltf-rs") {
auto slice = rust::Slice<const std::uint8_t>(reinterpret_cast<std::uint8_t*>(bytes.data()), bytes.size() - padding);
return rust::gltf::run(slice);
};
#endif
#ifdef HAS_ASSIMP
BENCHMARK("2CylinderEngine with assimp") {
const auto* scene = aiImportFileFromMemory(reinterpret_cast<const char*>(bytes.data()), jsonData->getBufferSize(), 0, nullptr);
REQUIRE(scene != nullptr);
return scene;
};
#endif
}
TEST_CASE("Benchmark raw JSON parsing", "[gltf-benchmark]") {
fastgltf::Parser parser;
#ifdef HAS_TINYGLTF
tinygltf::TinyGLTF tinygltf;
tinygltf::Model model;
std::string warn, err;
#endif
auto buggyPath = sampleModels / "2.0" / "Buggy" / "glTF";
auto bytes = readFileAsBytes(buggyPath / "Buggy.gltf");
auto jsonData = std::make_unique<fastgltf::GltfDataBuffer>();
REQUIRE(jsonData->fromByteView(bytes.data(), bytes.size() - fastgltf::getGltfBufferPadding(), bytes.size()));
BENCHMARK("Parse Buggy.gltf") {
return parser.loadGLTF(jsonData.get(), buggyPath, benchmarkOptions);
};
#ifdef HAS_TINYGLTF
setTinyGLTFCallbacks(tinygltf);
BENCHMARK("Parse Buggy.gltf with tinygltf") {
return tinygltf.LoadASCIIFromString(&model, &err, &warn, reinterpret_cast<char*>(bytes.data()), bytes.size(), buggyPath.string());
};
#endif
#ifdef HAS_CGLTF
BENCHMARK("Parse Buggy.gltf with cgltf") {
cgltf_options options = {};
cgltf_data* data = nullptr;
auto filePath = buggyPath.string();
cgltf_result result = cgltf_parse(&options, bytes.data(), bytes.size(), &data);
REQUIRE(result == cgltf_result_success);
cgltf_free(data);
return result;
};
#endif
#ifdef HAS_GLTFRS
auto padding = fastgltf::getGltfBufferPadding();
BENCHMARK("Parse Buggy.gltf with gltf-rs") {
auto slice = rust::Slice<const std::uint8_t>(reinterpret_cast<std::uint8_t*>(bytes.data()), bytes.size() - padding);
return rust::gltf::run(slice);
};
#endif
#ifdef HAS_ASSIMP
BENCHMARK("Parse Buggy.gltf with assimp") {
return aiImportFileFromMemory(reinterpret_cast<const char*>(bytes.data()), jsonData->getBufferSize(), 0, nullptr);
};
#endif
}
TEST_CASE("Benchmark massive gltf file", "[gltf-benchmark]") {
if (!std::filesystem::exists(bistroPath / "bistro.gltf")) {
// Bistro is not part of gltf-Sample-Models, and therefore not always available.
SKIP("Amazon's Bistro (GLTF) is required for this benchmark.");
}
fastgltf::Parser parser(fastgltf::Extensions::KHR_mesh_quantization);
#ifdef HAS_TINYGLTF
tinygltf::TinyGLTF tinygltf;
tinygltf::Model model;
std::string warn, err;
#endif
auto bytes = readFileAsBytes(bistroPath / "bistro.gltf");
auto jsonData = std::make_unique<fastgltf::GltfDataBuffer>();
REQUIRE(jsonData->fromByteView(bytes.data(), bytes.size() - fastgltf::getGltfBufferPadding(), bytes.size()));
BENCHMARK("Parse Bistro") {
return parser.loadGLTF(jsonData.get(), bistroPath, benchmarkOptions);
};
#ifdef HAS_TINYGLTF
setTinyGLTFCallbacks(tinygltf);
BENCHMARK("Parse Bistro with tinygltf") {
return tinygltf.LoadASCIIFromString(&model, &err, &warn, reinterpret_cast<char*>(bytes.data()), bytes.size(), bistroPath.string());
};
#endif
#ifdef HAS_CGLTF
BENCHMARK("Parse Bistro with cgltf") {
cgltf_options options = {};
cgltf_data* data = nullptr;
auto filePath = bistroPath.string();
cgltf_result result = cgltf_parse(&options, bytes.data(), bytes.size(), &data);
REQUIRE(result == cgltf_result_success);
cgltf_free(data);
return result;
};
#endif
#ifdef HAS_GLTFRS
auto padding = fastgltf::getGltfBufferPadding();
BENCHMARK("Parse Bistro with gltf-rs") {
auto slice = rust::Slice<const std::uint8_t>(reinterpret_cast<std::uint8_t*>(bytes.data()), bytes.size() - padding);
return rust::gltf::run(slice);
};
#endif
#ifdef HAS_ASSIMP
BENCHMARK("Parse Bistro with assimp") {
return aiImportFileFromMemory(reinterpret_cast<const char*>(bytes.data()), jsonData->getBufferSize(), 0, nullptr);
};
#endif
}
TEST_CASE("Compare parsing performance with minified documents", "[gltf-benchmark]") {
auto buggyPath = sampleModels / "2.0" / "Buggy" / "glTF";
auto bytes = readFileAsBytes(buggyPath / "Buggy.gltf");
auto jsonData = std::make_unique<fastgltf::GltfDataBuffer>();
REQUIRE(jsonData->fromByteView(bytes.data(), bytes.size() - fastgltf::getGltfBufferPadding(), bytes.size()));
// Create a minified JSON string
std::vector<uint8_t> minified(bytes.size());
size_t dstLen = 0;
auto result = simdjson::minify(reinterpret_cast<const char*>(bytes.data()), bytes.size(),
reinterpret_cast<char*>(minified.data()), dstLen);
REQUIRE(result == simdjson::SUCCESS);
minified.resize(dstLen);
// For completeness, benchmark minifying the JSON
BENCHMARK("Minify Buggy.gltf") {
auto result = simdjson::minify(reinterpret_cast<const char*>(bytes.data()), bytes.size(),
reinterpret_cast<char*>(minified.data()), dstLen);
REQUIRE(result == simdjson::SUCCESS);
return result;
};
auto minifiedJsonData = std::make_unique<fastgltf::GltfDataBuffer>();
REQUIRE(minifiedJsonData->fromByteView(minified.data(), minified.size() - fastgltf::getGltfBufferPadding(), minified.size()));
fastgltf::Parser parser;
BENCHMARK("Parse Buggy.gltf with normal JSON") {
return parser.loadGLTF(jsonData.get(), buggyPath, benchmarkOptions);
};
BENCHMARK("Parse Buggy.gltf with minified JSON") {
return parser.loadGLTF(minifiedJsonData.get(), buggyPath, benchmarkOptions);
};
}
#if defined(FASTGLTF_IS_X86)
TEST_CASE("Small CRC32-C benchmark", "[gltf-benchmark]") {
static constexpr std::string_view test = "abcdefghijklmnopqrstuvwxyz";
BENCHMARK("Default 1-byte tabular algorithm") {
return fastgltf::crc32c(reinterpret_cast<const std::uint8_t*>(test.data()), test.size());
};
BENCHMARK("SSE4 hardware algorithm") {
return fastgltf::hwcrc32c(reinterpret_cast<const std::uint8_t*>(test.data()), test.size());
};
}
#endif
TEST_CASE("Compare base64 decoding performance", "[gltf-benchmark]") {
std::string base64Characters = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/";
constexpr std::size_t bufferSize = 2 * 1024 * 1024;
// We'll generate a random base64 buffer
std::random_device device;
std::mt19937 gen(device());
std::uniform_int_distribution<> distribution(0, base64Characters.size() - 1);
std::string generatedData;
generatedData.reserve(bufferSize);
for (std::size_t i = 0; i < bufferSize; ++i) {
generatedData.push_back(base64Characters[distribution(gen)]);
}
#ifdef HAS_TINYGLTF
BENCHMARK("Run tinygltf's base64 decoder") {
return tinygltf::base64_decode(generatedData);
};
#endif
#ifdef HAS_CGLTF
cgltf_options options {};
BENCHMARK("Run cgltf's base64 decoder") {
auto padding = fastgltf::base64::getPadding(generatedData);
auto outputSize = fastgltf::base64::getOutputSize(generatedData.size(), padding);
std::string output;
output.resize(outputSize);
auto* outputData = output.data();
return cgltf_load_buffer_base64(&options, generatedData.size(), generatedData.data(), reinterpret_cast<void**>(&outputData));
};
#endif
#ifdef HAS_GLTFRS
BENCHMARK("Run base64 Rust library decoder") {
auto slice = rust::Slice<const std::uint8_t>(reinterpret_cast<std::uint8_t*>(generatedData.data()), generatedData.size());
return rust::gltf::run_base64(slice);
};
#endif
#ifdef HAS_ASSIMP
BENCHMARK("Run Assimp's base64 decoder") {
return Assimp::Base64::Decode(generatedData);
};
#endif
BENCHMARK("Run fastgltf's fallback base64 decoder") {
return fastgltf::base64::fallback_decode(generatedData);
};
#if defined(FASTGLTF_IS_X86)
const auto& impls = simdjson::get_available_implementations();
if (const auto* sse4 = impls["westmere"]; sse4 != nullptr && sse4->supported_by_runtime_system()) {
BENCHMARK("Run fastgltf's SSE4 base64 decoder") {
return fastgltf::base64::sse4_decode(generatedData);
};
}
if (const auto* avx2 = impls["haswell"]; avx2 != nullptr && avx2->supported_by_runtime_system()) {
BENCHMARK("Run fastgltf's AVX2 base64 decoder") {
return fastgltf::base64::avx2_decode(generatedData);
};
}
#elif defined(FASTGLTF_IS_A64)
const auto& impls = simdjson::get_available_implementations();
if (const auto* neon = impls["arm64"]; avx2 != nullptr && neon->supported_by_runtime_system()) {
BENCHMARK("Run fastgltf's Neon base64 decoder") {
return fastgltf::base64::neon_decode(generatedData);
};
}
#endif
}

View File

@@ -0,0 +1,262 @@
#include <catch2/catch_approx.hpp>
#include <catch2/catch_test_macros.hpp>
#include <catch2/benchmark/catch_benchmark.hpp>
#include <glm/gtc/epsilon.hpp>
#include <glm/gtc/type_ptr.hpp>
#include <fastgltf/parser.hpp>
#include "gltf_path.hpp"
TEST_CASE("Loading KHR_texture_basisu glTF files", "[gltf-loader]") {
auto stainedLamp = sampleModels / "2.0" / "StainedGlassLamp" / "glTF-KTX-BasisU";
auto jsonData = std::make_unique<fastgltf::GltfDataBuffer>();
REQUIRE(jsonData->loadFromFile(stainedLamp / "StainedGlassLamp.gltf"));
SECTION("Loading KHR_texture_basisu") {
fastgltf::Parser parser(fastgltf::Extensions::KHR_texture_basisu);
auto asset = parser.loadGLTF(jsonData.get(), path, fastgltf::Options::DontRequireValidAssetMember,
fastgltf::Category::Textures | fastgltf::Category::Images);
REQUIRE(asset.error() == fastgltf::Error::None);
REQUIRE(fastgltf::validate(asset.get()) == fastgltf::Error::None);
REQUIRE(asset->textures.size() == 19);
REQUIRE(!asset->images.empty());
auto& texture = asset->textures[1];
REQUIRE(!texture.imageIndex.has_value());
REQUIRE(texture.samplerIndex == 0);
REQUIRE(texture.basisuImageIndex.has_value());
REQUIRE(texture.basisuImageIndex.value() == 1);
auto& image = asset->images.front();
auto* filePath = std::get_if<fastgltf::sources::URI>(&image.data);
REQUIRE(filePath != nullptr);
REQUIRE(filePath->uri.valid());
REQUIRE(filePath->uri.isLocalPath());
REQUIRE(filePath->mimeType == fastgltf::MimeType::KTX2);
}
SECTION("Testing requiredExtensions") {
// We specify no extensions, yet the StainedGlassLamp requires KHR_texture_basisu.
fastgltf::Parser parser(fastgltf::Extensions::None);
auto stainedGlassLamp = parser.loadGLTF(jsonData.get(), path, fastgltf::Options::DontRequireValidAssetMember);
REQUIRE(stainedGlassLamp.error() == fastgltf::Error::MissingExtensions);
}
}
TEST_CASE("Loading KHR_texture_transform glTF files", "[gltf-loader]") {
auto transformTest = sampleModels / "2.0" / "TextureTransformMultiTest" / "glTF";
auto jsonData = std::make_unique<fastgltf::GltfDataBuffer>();
REQUIRE(jsonData->loadFromFile(transformTest / "TextureTransformMultiTest.gltf"));
fastgltf::Parser parser(fastgltf::Extensions::KHR_texture_transform);
auto asset = parser.loadGLTF(jsonData.get(), transformTest, fastgltf::Options::DontRequireValidAssetMember, fastgltf::Category::Materials);
REQUIRE(asset.error() == fastgltf::Error::None);
REQUIRE(fastgltf::validate(asset.get()) == fastgltf::Error::None);
REQUIRE(!asset->materials.empty());
auto& material = asset->materials.front();
REQUIRE(material.pbrData.baseColorTexture.has_value());
REQUIRE(material.pbrData.baseColorTexture->transform != nullptr);
REQUIRE(material.pbrData.baseColorTexture->transform->uvOffset[0] == 0.705f);
REQUIRE(material.pbrData.baseColorTexture->transform->rotation == Catch::Approx(1.5707963705062866f));
}
TEST_CASE("Test KHR_lights_punctual", "[gltf-loader]") {
auto lightsLamp = sampleModels / "2.0" / "LightsPunctualLamp" / "glTF";
fastgltf::GltfDataBuffer jsonData;
REQUIRE(jsonData.loadFromFile(lightsLamp / "LightsPunctualLamp.gltf"));
fastgltf::Parser parser(fastgltf::Extensions::KHR_lights_punctual);
auto asset = parser.loadGLTF(&jsonData, lightsLamp, fastgltf::Options::None, fastgltf::Category::Nodes);
REQUIRE(asset.error() == fastgltf::Error::None);
REQUIRE(fastgltf::validate(asset.get()) == fastgltf::Error::None);
REQUIRE(asset->lights.size() == 5);
REQUIRE(asset->nodes.size() > 4);
auto& nodes = asset->nodes;
REQUIRE(nodes[3].lightIndex.has_value());
REQUIRE(nodes[3].lightIndex.value() == 0);
auto& lights = asset->lights;
REQUIRE(lights[0].name == "Point");
REQUIRE(lights[0].type == fastgltf::LightType::Point);
REQUIRE(lights[0].intensity == 15.0f);
REQUIRE(glm::epsilonEqual(lights[0].color[0], 1.0f, glm::epsilon<float>()));
REQUIRE(glm::epsilonEqual(lights[0].color[1], 0.63187497854232788f, glm::epsilon<float>()));
REQUIRE(glm::epsilonEqual(lights[0].color[2], 0.23909975588321689f, glm::epsilon<float>()));
}
TEST_CASE("Test KHR_materials_specular", "[gltf-loader]") {
auto specularTest = sampleModels / "2.0" / "SpecularTest" / "glTF";
fastgltf::GltfDataBuffer jsonData;
REQUIRE(jsonData.loadFromFile(specularTest / "SpecularTest.gltf"));
fastgltf::Parser parser(fastgltf::Extensions::KHR_materials_specular);
auto asset = parser.loadGLTF(&jsonData, specularTest, fastgltf::Options::None, fastgltf::Category::Materials);
REQUIRE(asset.error() == fastgltf::Error::None);
REQUIRE(fastgltf::validate(asset.get()) == fastgltf::Error::None);
REQUIRE(asset->materials.size() >= 12);
auto& materials = asset->materials;
REQUIRE(materials[1].specular != nullptr);
REQUIRE(materials[1].specular->specularFactor == 0.0f);
REQUIRE(materials[2].specular != nullptr);
REQUIRE(glm::epsilonEqual(materials[2].specular->specularFactor, 0.051269f, glm::epsilon<float>()));
REQUIRE(materials[8].specular != nullptr);
REQUIRE(glm::epsilonEqual(materials[8].specular->specularColorFactor[0], 0.051269f, glm::epsilon<float>()));
REQUIRE(glm::epsilonEqual(materials[8].specular->specularColorFactor[1], 0.051269f, glm::epsilon<float>()));
REQUIRE(glm::epsilonEqual(materials[8].specular->specularColorFactor[2], 0.051269f, glm::epsilon<float>()));
REQUIRE(materials[12].specular != nullptr);
REQUIRE(materials[12].specular->specularColorTexture.has_value());
REQUIRE(materials[12].specular->specularColorTexture.value().textureIndex == 2);
}
TEST_CASE("Test KHR_materials_ior and KHR_materials_iridescence", "[gltf-loader]") {
auto specularTest = sampleModels / "2.0" / "IridescenceDielectricSpheres" / "glTF";
fastgltf::GltfDataBuffer jsonData;
REQUIRE(jsonData.loadFromFile(specularTest / "IridescenceDielectricSpheres.gltf"));
fastgltf::Parser parser(fastgltf::Extensions::KHR_materials_iridescence | fastgltf::Extensions::KHR_materials_ior);
auto asset = parser.loadGLTF(&jsonData, specularTest, fastgltf::Options::None, fastgltf::Category::Materials);
REQUIRE(asset.error() == fastgltf::Error::None);
REQUIRE(fastgltf::validate(asset.get()) == fastgltf::Error::None);
REQUIRE(asset->materials.size() >= 50);
auto& materials = asset->materials;
REQUIRE(materials[0].iridescence != nullptr);
REQUIRE(materials[0].iridescence->iridescenceFactor == 1.0f);
REQUIRE(materials[0].iridescence->iridescenceIor == 1.0f);
REQUIRE(materials[0].iridescence->iridescenceThicknessMaximum == 100.0f);
REQUIRE(materials[0].ior.has_value());
REQUIRE(materials[0].ior.value() == 1.0f);
REQUIRE(materials[7].ior.has_value());
REQUIRE(materials[7].ior.value() == 1.17f);
REQUIRE(materials[50].iridescence != nullptr);
REQUIRE(materials[50].iridescence->iridescenceFactor == 1.0f);
REQUIRE(materials[50].iridescence->iridescenceIor == 1.17f);
REQUIRE(materials[50].iridescence->iridescenceThicknessMaximum == 200.0f);
}
TEST_CASE("Test KHR_materials_volume and KHR_materials_transmission", "[gltf-loader]") {
auto beautifulGame = sampleModels / "2.0" / "ABeautifulGame" / "glTF";
fastgltf::GltfDataBuffer jsonData;
REQUIRE(jsonData.loadFromFile(beautifulGame / "ABeautifulGame.gltf"));
fastgltf::Parser parser(fastgltf::Extensions::KHR_materials_volume | fastgltf::Extensions::KHR_materials_transmission);
auto asset = parser.loadGLTF(&jsonData, beautifulGame, fastgltf::Options::None, fastgltf::Category::Materials);
REQUIRE(asset.error() == fastgltf::Error::None);
REQUIRE(fastgltf::validate(asset.get()) == fastgltf::Error::None);
REQUIRE(asset->materials.size() >= 5);
auto& materials = asset->materials;
REQUIRE(materials[5].volume != nullptr);
REQUIRE(glm::epsilonEqual(materials[5].volume->thicknessFactor, 0.2199999988079071f, glm::epsilon<float>()));
REQUIRE(glm::epsilonEqual(materials[5].volume->attenuationColor[0], 0.800000011920929f, glm::epsilon<float>()));
REQUIRE(glm::epsilonEqual(materials[5].volume->attenuationColor[1], 0.800000011920929f, glm::epsilon<float>()));
REQUIRE(glm::epsilonEqual(materials[5].volume->attenuationColor[2], 0.800000011920929f, glm::epsilon<float>()));
REQUIRE(materials[5].transmission != nullptr);
REQUIRE(materials[5].transmission->transmissionFactor == 1.0f);
}
TEST_CASE("Test KHR_materials_clearcoat", "[gltf-loader]") {
auto clearcoatTest = sampleModels / "2.0" / "ClearCoatTest" / "glTF";
fastgltf::GltfDataBuffer jsonData;
REQUIRE(jsonData.loadFromFile(clearcoatTest / "ClearCoatTest.gltf"));
fastgltf::Parser parser(fastgltf::Extensions::KHR_materials_clearcoat);
auto asset = parser.loadGLTF(&jsonData, clearcoatTest, fastgltf::Options::None, fastgltf::Category::Materials);
REQUIRE(asset.error() == fastgltf::Error::None);
REQUIRE(fastgltf::validate(asset.get()) == fastgltf::Error::None);
REQUIRE(asset->materials.size() >= 7);
auto& materials = asset->materials;
REQUIRE(materials[1].clearcoat != nullptr);
REQUIRE(materials[1].clearcoat->clearcoatFactor == 1.0f);
REQUIRE(materials[1].clearcoat->clearcoatRoughnessFactor == 0.03f);
REQUIRE(materials[7].clearcoat != nullptr);
REQUIRE(materials[7].clearcoat->clearcoatFactor == 1.0f);
REQUIRE(materials[7].clearcoat->clearcoatRoughnessFactor == 1.0f);
REQUIRE(materials[7].clearcoat->clearcoatRoughnessTexture.has_value());
REQUIRE(materials[7].clearcoat->clearcoatRoughnessTexture->textureIndex == 2);
REQUIRE(materials[7].clearcoat->clearcoatRoughnessTexture->texCoordIndex == 0);
}
TEST_CASE("Test EXT_mesh_gpu_instancing", "[gltf-loader]") {
auto simpleInstancingTest = sampleModels / "2.0" / "SimpleInstancing" / "glTF";
fastgltf::GltfDataBuffer jsonData;
REQUIRE(jsonData.loadFromFile(simpleInstancingTest / "SimpleInstancing.gltf"));
fastgltf::Parser parser(fastgltf::Extensions::EXT_mesh_gpu_instancing);
auto asset = parser.loadGLTF(&jsonData, simpleInstancingTest, fastgltf::Options::None, fastgltf::Category::Accessors | fastgltf::Category::Nodes);
REQUIRE(asset.error() == fastgltf::Error::None);
REQUIRE(fastgltf::validate(asset.get()) == fastgltf::Error::None);
REQUIRE(asset->accessors.size() >= 6);
REQUIRE(asset->nodes.size() >= 1);
auto& nodes = asset->nodes;
REQUIRE(nodes[0].instancingAttributes.size() == 3u);
REQUIRE(nodes[0].findInstancingAttribute("TRANSLATION") != nodes[0].instancingAttributes.cend());
REQUIRE(nodes[0].findInstancingAttribute("SCALE") != nodes[0].instancingAttributes.cend());
REQUIRE(nodes[0].findInstancingAttribute("ROTATION") != nodes[0].instancingAttributes.cend());
}
#if FASTGLTF_ENABLE_DEPRECATED_EXT
TEST_CASE("Test KHR_materials_pbrSpecularGlossiness", "[gltf-loader]") {
auto specularGlossinessTest = sampleModels / "2.0" / "SpecGlossVsMetalRough" / "glTF";
fastgltf::GltfDataBuffer jsonData;
REQUIRE(jsonData.loadFromFile(specularGlossinessTest / "SpecGlossVsMetalRough.gltf"));
fastgltf::Parser parser(fastgltf::Extensions::KHR_materials_pbrSpecularGlossiness | fastgltf::Extensions::KHR_materials_specular);
auto asset = parser.loadGLTF(&jsonData, specularGlossinessTest);
REQUIRE(asset.error() == fastgltf::Error::None);
REQUIRE(fastgltf::validate(asset.get()) == fastgltf::Error::None);
REQUIRE(asset->materials.size() == 4);
auto& materials = asset->materials;
REQUIRE(materials[0].specularGlossiness != nullptr);
REQUIRE(materials[0].specularGlossiness->diffuseFactor[0] == 1.0f);
REQUIRE(materials[0].specularGlossiness->diffuseFactor[1] == 1.0f);
REQUIRE(materials[0].specularGlossiness->diffuseFactor[2] == 1.0f);
REQUIRE(materials[0].specularGlossiness->diffuseFactor[3] == 1.0f);
REQUIRE(materials[0].specularGlossiness->specularFactor[0] == 1.0f);
REQUIRE(materials[0].specularGlossiness->specularFactor[1] == 1.0f);
REQUIRE(materials[0].specularGlossiness->specularFactor[2] == 1.0f);
REQUIRE(materials[0].specularGlossiness->glossinessFactor == 1.0f);
REQUIRE(materials[0].specularGlossiness->diffuseTexture.has_value());
REQUIRE(materials[0].specularGlossiness->diffuseTexture.value().textureIndex == 5);
REQUIRE(materials[0].specularGlossiness->specularGlossinessTexture.has_value());
REQUIRE(materials[0].specularGlossiness->specularGlossinessTexture.value().textureIndex == 6);
REQUIRE(materials[3].specularGlossiness != nullptr);
REQUIRE(materials[3].specularGlossiness->diffuseFactor[0] == 1.0f);
REQUIRE(materials[3].specularGlossiness->diffuseFactor[1] == 1.0f);
REQUIRE(materials[3].specularGlossiness->diffuseFactor[2] == 1.0f);
REQUIRE(materials[3].specularGlossiness->diffuseFactor[3] == 1.0f);
REQUIRE(materials[3].specularGlossiness->specularFactor[0] == 0.0f);
REQUIRE(materials[3].specularGlossiness->specularFactor[1] == 0.0f);
REQUIRE(materials[3].specularGlossiness->specularFactor[2] == 0.0f);
REQUIRE(materials[3].specularGlossiness->glossinessFactor == 0.0f);
REQUIRE(materials[3].specularGlossiness->diffuseTexture.has_value());
REQUIRE(materials[3].specularGlossiness->diffuseTexture.value().textureIndex == 7);
}
#endif

View File

@@ -0,0 +1,57 @@
#include <fstream>
#include <catch2/catch_test_macros.hpp>
#include <fastgltf/parser.hpp>
#include <fastgltf/types.hpp>
#include "gltf_path.hpp"
TEST_CASE("Load basic GLB file", "[gltf-loader]") {
fastgltf::Parser parser;
auto folder = sampleModels / "2.0" / "Box" / "glTF-Binary";
fastgltf::GltfDataBuffer jsonData;
REQUIRE(jsonData.loadFromFile(folder / "Box.glb"));
SECTION("Load basic Box.glb") {
auto asset = parser.loadBinaryGLTF(&jsonData, folder, fastgltf::Options::None, fastgltf::Category::Buffers);
REQUIRE(asset.error() == fastgltf::Error::None);
REQUIRE(fastgltf::validate(asset.get()) == fastgltf::Error::None);
REQUIRE(asset->buffers.size() == 1);
auto& buffer = asset->buffers.front();
auto* bufferView = std::get_if<fastgltf::sources::ByteView>(&buffer.data);
REQUIRE(bufferView != nullptr);
auto jsonSpan = fastgltf::span<std::byte>(jsonData);
REQUIRE(bufferView->bytes.data() - jsonSpan.data() == 1016);
REQUIRE(jsonSpan.size() == 1664);
}
SECTION("Load basic Box.glb and load buffers") {
auto asset = parser.loadBinaryGLTF(&jsonData, folder, fastgltf::Options::LoadGLBBuffers, fastgltf::Category::Buffers);
REQUIRE(asset.error() == fastgltf::Error::None);
REQUIRE(fastgltf::validate(asset.get()) == fastgltf::Error::None);
REQUIRE(asset->buffers.size() == 1);
auto& buffer = asset->buffers.front();
auto* bufferVector = std::get_if<fastgltf::sources::Vector>(&buffer.data);
REQUIRE(bufferVector != nullptr);
REQUIRE(!bufferVector->bytes.empty());
REQUIRE(static_cast<uint64_t>(bufferVector->bytes.size() - buffer.byteLength) < 3);
}
SECTION("Load GLB by bytes") {
std::ifstream file(folder / "Box.glb", std::ios::binary | std::ios::ate);
auto length = static_cast<size_t>(file.tellg());
file.seekg(0, std::ifstream::beg);
std::vector<uint8_t> bytes(length + fastgltf::getGltfBufferPadding());
file.read(reinterpret_cast<char*>(bytes.data()), static_cast<std::streamsize>(length));
fastgltf::GltfDataBuffer byteBuffer;
REQUIRE(byteBuffer.fromByteView(bytes.data(), length, length + fastgltf::getGltfBufferPadding()));
auto asset = parser.loadBinaryGLTF(&byteBuffer, folder, fastgltf::Options::LoadGLBBuffers, fastgltf::Category::Buffers);
REQUIRE(asset.error() == fastgltf::Error::None);
}
}

View File

@@ -0,0 +1,16 @@
[package]
name = "gltf_rs"
version = "0.1.0"
edition = "2021"
[dependencies]
cxx = "1.0"
gltf = "1.1.0"
base64 = "0.21.2"
[build-dependencies]
cxx-build = "1.0"
[lib]
crate-type = ["staticlib"]
path = "src/lib.rs"

View File

@@ -0,0 +1,54 @@
use base64::Engine;
use base64::{alphabet, engine};
#[cxx::bridge(namespace = "rust::gltf")]
mod ffi {
extern "Rust" {
fn run(data: &[u8]) -> i32;
fn run_base64(data: &[u8]) -> Vec<u8>;
}
}
fn run(data: &[u8]) -> i32 {
// TODO: Decode URIs and data URIs?
let gltf = gltf::Gltf::from_slice(data)
.unwrap();
// Decode URIs
let json = gltf.document.into_json();
let mut uri_count = 0;
for x in json.buffers {
// gltf-rs doesn't automatically decode base64. Using its "import" feature won't work,
// because we're not interested in file-loaded buffer/image data.
if x.uri.is_some() {
let uri = x.uri.unwrap();
if let Some(rest) = uri.strip_prefix("data:") {
let mut it = rest.split(";base64,");
let data = match (it.next(), it.next()) {
(_, Some(data)) => Some(data),
(Some(data), _) => Some(data),
_ => None
};
if data.is_none() {
continue;
}
base64::engine::GeneralPurpose::new(
&base64::alphabet::STANDARD,
base64::engine::general_purpose::PAD)
.decode(data.unwrap()).expect("Decoded bytes");
uri_count += 1;
}
}
}
uri_count
}
fn run_base64(data: &[u8]) -> Vec<u8> {
return engine::GeneralPurpose::new(
&alphabet::STANDARD,
engine::general_purpose::PAD)
.decode(data).expect("Decoded bytes");
}

View File

@@ -0,0 +1 @@
AAAAAAAAAAAAAIA/AAAAAAAAAAAAAIA/AAAAAAAAAAAAAIA/AAAAAAAAAAAAAIA/AAAAAAAAgL8AAAAAAAAAAAAAgL8AAAAAAAAAAAAAgL8AAAAAAAAAAAAAgL8AAAAAAACAPwAAAAAAAAAAAACAPwAAAAAAAAAAAACAPwAAAAAAAAAAAACAPwAAAAAAAAAAAAAAAAAAgD8AAAAAAAAAAAAAgD8AAAAAAAAAAAAAgD8AAAAAAAAAAAAAgD8AAAAAAACAvwAAAAAAAAAAAACAvwAAAAAAAAAAAACAvwAAAAAAAAAAAACAvwAAAAAAAAAAAAAAAAAAAAAAAIC/AAAAAAAAAAAAAIC/AAAAAAAAAAAAAIC/AAAAAAAAAAAAAIC/AAAAvwAAAL8AAAA/AAAAPwAAAL8AAAA/AAAAvwAAAD8AAAA/AAAAPwAAAD8AAAA/AAAAPwAAAL8AAAA/AAAAvwAAAL8AAAA/AAAAPwAAAL8AAAC/AAAAvwAAAL8AAAC/AAAAPwAAAD8AAAA/AAAAPwAAAL8AAAA/AAAAPwAAAD8AAAC/AAAAPwAAAL8AAAC/AAAAvwAAAD8AAAA/AAAAPwAAAD8AAAA/AAAAvwAAAD8AAAC/AAAAPwAAAD8AAAC/AAAAvwAAAL8AAAA/AAAAvwAAAD8AAAA/AAAAvwAAAL8AAAC/AAAAvwAAAD8AAAC/AAAAvwAAAL8AAAC/AAAAvwAAAD8AAAC/AAAAPwAAAL8AAAC/AAAAPwAAAD8AAAC/AAABAAIAAwACAAEABAAFAAYABwAGAAUACAAJAAoACwAKAAkADAANAA4ADwAOAA0AEAARABIAEwASABEAFAAVABYAFwAWABUA

Binary file not shown.

View File

@@ -0,0 +1,5 @@
{
"asset": {
"version": "2.0"
}
}

View File

@@ -0,0 +1 @@
{}

View File

@@ -0,0 +1,40 @@
{
"asset": {
"version": "2.0"
},
"cameras": [
{
"perspective": {
"yfov": 1.0,
"zfar": 1.0,
"znear": 0.001
},
"type": "perspective"
}
],
"nodes": [
{
"name": "TRS components",
"camera": 0,
"translation": [
1.0, 1.0, 1.0
],
"rotation": [
0.0, 1.0, 0.0, 0.0
],
"scale": [
2.0, 0.5, 1.0
]
},
{
"name": "Matrix",
"camera": 0,
"matrix": [
-2.0, 0.0, 0.0, 0.0,
0.0, 0.5, 0.0, 0.0,
0.0, 0.0, -1.0, 0.0,
1.0, 1.0, 1.0, 1.0
]
}
]
}

View File

@@ -0,0 +1,9 @@
#pragma once
#include <filesystem>
// We need to use the __FILE__ macro so that we have access to test glTF files in this
// directory. As Clang does not yet fully support std::source_location, we cannot use that.
inline auto path = std::filesystem::path { __FILE__ }.parent_path() / "gltf";
inline auto sampleModels = std::filesystem::path { __FILE__ }.parent_path() / "gltf" / "glTF-Sample-Models";
inline auto intelSponza = std::filesystem::path { __FILE__ }.parent_path() / "gltf" / "intel_sponza";
inline auto bistroPath = std::filesystem::path {};

153
third_party/fastgltf/tests/uri_tests.cpp vendored Normal file
View File

@@ -0,0 +1,153 @@
#include <catch2/catch_test_macros.hpp>
#include <fastgltf/types.hpp>
#include <fastgltf/parser.hpp>
#include "gltf_path.hpp"
TEST_CASE("Test basic URIs", "[uri-tests]") {
const fastgltf::URI uri1(std::string_view(""));
REQUIRE(uri1.scheme().empty());
REQUIRE(uri1.path().empty());
std::string_view path = "path/somewhere.xyz";
SECTION("Basic local path") {
const fastgltf::URI uri2(path);
REQUIRE(uri2.scheme().empty());
REQUIRE(uri2.path() == path);
REQUIRE(uri2.isLocalPath());
REQUIRE(uri2.fspath() == path);
}
std::string_view abspath = "/path/somewhere.xyz";
SECTION("File scheme path") {
const std::string_view filePath = "file:/path/somewhere.xyz";
const fastgltf::URI uri3(filePath);
REQUIRE(uri3.scheme() == "file");
REQUIRE(uri3.isLocalPath());
REQUIRE(uri3.path() == abspath);
}
SECTION("File scheme localhost path") {
const std::string_view localhostPath = "file://localhost/path/somewhere.xyz";
const fastgltf::URI uri4(localhostPath);
REQUIRE(uri4.scheme() == "file");
REQUIRE(uri4.path() == abspath);
REQUIRE(!uri4.isLocalPath());
}
}
TEST_CASE("Test generic URIs", "[uri-tests]") {
// These are a bunch of example URIs from https://en.wikipedia.org/wiki/Uniform_Resource_Identifier#Example_URIs
const fastgltf::URI uri(std::string_view("https://john.doe@www.example.com:123/forum/questions/?tag=networking&order=newest#top"));
REQUIRE(uri.scheme() == "https");
REQUIRE(uri.userinfo() == "john.doe");
REQUIRE(uri.host() == "www.example.com");
REQUIRE(uri.port() == "123");
REQUIRE(uri.path() == "/forum/questions/");
REQUIRE(uri.query() == "tag=networking&order=newest");
REQUIRE(uri.fragment() == "top");
const fastgltf::URI uri1(std::string_view("ldap://[2001:db8::7]/c=GB?objectClass?one"));
REQUIRE(uri1.scheme() == "ldap");
REQUIRE(uri1.host() == "2001:db8::7");
REQUIRE(uri1.path() == "/c=GB");
REQUIRE(uri1.query() == "objectClass?one");
const fastgltf::URI uri2(std::string_view("mailto:John.Doe@example.com"));
REQUIRE(uri2.scheme() == "mailto");
REQUIRE(uri2.path() == "John.Doe@example.com");
const fastgltf::URI uri3(std::string_view("telnet://192.0.2.16:80/"));
REQUIRE(uri3.scheme() == "telnet");
REQUIRE(uri3.host() == "192.0.2.16");
REQUIRE(uri3.port() == "80");
REQUIRE(uri3.path() == "/");
}
TEST_CASE("Test percent decoding", "[uri-tests]") {
std::string test = "%22 %25";
fastgltf::URI::decodePercents(test);
REQUIRE(test == "\" %");
}
TEST_CASE("Test data URI parsing", "[uri-tests]") {
// This example base64 data is from an example on https://en.wikipedia.org/wiki/Data_URI_scheme.
const std::string_view data = "data:image/png;base64,iVBORw0KGgoAAA"
"ANSUhEUgAAAAUAAAAFCAYAAACNbyblAAAAHElEQVQI12P4"
"//8/w38GIAXDIBKE0DHxgljNBAAO9TXL0Y4OHwAAAABJRU"
"5ErkJggg==";
const fastgltf::URI uri(data);
REQUIRE(uri.scheme() == "data");
REQUIRE(uri.path() == data.substr(5));
}
TEST_CASE("Validate URI copying/moving", "[uri-tests]") {
const std::string_view data = "test.bin";
SECTION("Copy semantics") {
fastgltf::URI uri(data);
REQUIRE(uri.path() == data);
fastgltf::URI uri2(uri);
REQUIRE(uri2.string().data() != uri.string().data());
REQUIRE(uri2.path() == data);
}
SECTION("Move semantics") {
fastgltf::URI uri;
{
fastgltf::URI uri2(data);
uri = std::move(uri2);
REQUIRE(uri2.string().empty());
}
// Test that the values were copied over and that the string views are still valid.
REQUIRE(uri.string() == data);
REQUIRE(uri.path() == uri.string());
}
}
TEST_CASE("Validate escaped/percent-encoded URI", "[uri-tests]") {
const std::string_view gltfString = R"({"images": [{"uri": "grande_sph\u00E8re.png"}]})";
fastgltf::GltfDataBuffer dataBuffer;
dataBuffer.copyBytes((uint8_t*) gltfString.data(), gltfString.size());
fastgltf::Parser parser;
auto asset = parser.loadGLTF(&dataBuffer, "", fastgltf::Options::DontRequireValidAssetMember);
REQUIRE(asset.error() == fastgltf::Error::None);
auto escaped = std::get<fastgltf::sources::URI>(asset->images.front().data);
// This only tests wether the default ctor of fastgltf::URI can handle percent-encoding correctly.
const fastgltf::URI original(std::string_view("grande_sphère.png"));
const fastgltf::URI encoded(std::string_view("grande_sph%C3%A8re.png"));
REQUIRE(original.string() == escaped.uri.string());
REQUIRE(original.string() == encoded.string());
}
TEST_CASE("Test percent-encoded URIs in glTF", "[uri-tests]") {
auto boxWithSpaces = sampleModels / "2.0" / "Box With Spaces" / "glTF";
fastgltf::GltfDataBuffer jsonData;
REQUIRE(jsonData.loadFromFile(boxWithSpaces / "Box With Spaces.gltf"));
fastgltf::Parser parser;
auto asset = parser.loadGLTF(&jsonData, boxWithSpaces);
REQUIRE(asset.error() == fastgltf::Error::None);
REQUIRE(fastgltf::validate(asset.get()) == fastgltf::Error::None);
REQUIRE(asset->images.size() == 3);
auto* image0 = std::get_if<fastgltf::sources::URI>(&asset->images[0].data);
REQUIRE(image0 != nullptr);
REQUIRE(image0->uri.path() == "Normal Map.png");
auto* image1 = std::get_if<fastgltf::sources::URI>(&asset->images[1].data);
REQUIRE(image1 != nullptr);
REQUIRE(image1->uri.path() == "glTF Logo With Spaces.png");
auto* image2 = std::get_if<fastgltf::sources::URI>(&asset->images[2].data);
REQUIRE(image2 != nullptr);
REQUIRE(image2->uri.path() == "Roughness Metallic.png");
auto* buffer0 = std::get_if<fastgltf::sources::URI>(&asset->buffers[0].data);
REQUIRE(buffer0 != nullptr);
REQUIRE(buffer0->uri.path() == "Box With Spaces.bin");
}

View File

@@ -0,0 +1,115 @@
#include <catch2/catch_test_macros.hpp>
#include <fastgltf/types.hpp>
TEST_CASE("Verify clz", "[vector-tests]") {
REQUIRE(fastgltf::clz<std::uint8_t>(0b00000001) == 7);
REQUIRE(fastgltf::clz<std::uint8_t>(0b00000010) == 6);
REQUIRE(fastgltf::clz<std::uint8_t>(0b00000100) == 5);
REQUIRE(fastgltf::clz<std::uint8_t>(0b00001000) == 4);
REQUIRE(fastgltf::clz<std::uint8_t>(0b00010000) == 3);
REQUIRE(fastgltf::clz<std::uint8_t>(0b00100000) == 2);
REQUIRE(fastgltf::clz<std::uint8_t>(0b01000000) == 1);
REQUIRE(fastgltf::clz<std::uint8_t>(0b10000000) == 0);
}
TEST_CASE("Test resize/reserve", "[vector-tests]") {
fastgltf::SmallVector<uint32_t, 4> vec = {1, 2, 3};
REQUIRE(vec[0] == 1);
REQUIRE(vec[1] == 2);
REQUIRE(vec[2] == 3);
vec.resize(5);
REQUIRE(vec.size() == 5);
REQUIRE(vec[3] == 0);
REQUIRE(vec[4] == 0);
vec.resize(2);
REQUIRE(vec.size() == 2);
REQUIRE(vec[0] == 1);
REQUIRE(vec[1] == 2);
vec.resize(6, 4);
REQUIRE(vec.size() == 6);
for (std::size_t i = 2; i < vec.size(); ++i) {
REQUIRE(vec[i] == 4);
}
vec.reserve(8);
REQUIRE(vec.size() == 6);
REQUIRE(vec.capacity() == 8);
vec.shrink_to_fit();
REQUIRE(vec.capacity() == 6);
}
TEST_CASE("Test constructors", "[vector-tests]") {
fastgltf::SmallVector<uint32_t, 4> vec = {0, 1, 2, 3};
for (std::size_t i = 0; i < vec.size(); ++i) {
REQUIRE(vec[i] == i);
}
fastgltf::SmallVector<uint32_t, 4> vec2(vec);
for (std::size_t i = 0; i < vec2.size(); ++i) {
REQUIRE(vec2[i] == i);
}
fastgltf::SmallVector<uint32_t, 4> vec3 = std::move(vec2);
REQUIRE(vec2.empty());
vec3.resize(6);
for (std::size_t i = 0; i < 4; ++i) {
REQUIRE(vec3[i] == i);
}
REQUIRE(vec3[4] == 0);
REQUIRE(vec3[5] == 0);
}
TEST_CASE("Nested SmallVector", "[vector-tests]") {
fastgltf::SmallVector<fastgltf::SmallVector<uint32_t, 2>, 4> vectors(6, {4}); // This should heap allocate straight away.
REQUIRE(vectors.size() == 6);
for (auto& vector : vectors) {
REQUIRE(vector.size() == 1);
REQUIRE(vector.front() == 4);
vector.reserve(6);
}
}
struct RefCountedObject {
static inline std::size_t aliveObjects = 0;
RefCountedObject() {
++aliveObjects;
}
RefCountedObject(const RefCountedObject& other) {
++aliveObjects;
}
RefCountedObject(RefCountedObject&& other) = delete;
~RefCountedObject() {
--aliveObjects;
}
};
TEST_CASE("Test shrinking vectors", "[vector-tests]") {
fastgltf::SmallVector<RefCountedObject, 4> objects;
for (std::size_t i = 0; i < 4; ++i) {
objects.emplace_back();
}
REQUIRE(RefCountedObject::aliveObjects == 4);
objects.emplace_back();
REQUIRE(RefCountedObject::aliveObjects == 5);
objects.resize(4);
REQUIRE(RefCountedObject::aliveObjects == 4);
}
TEST_CASE("Test vectors with polymorphic allocators", "[vector-tests]") {
fastgltf::pmr::SmallVector<std::uint32_t, 4> ints;
ints.assign(10, 5);
REQUIRE(ints.size() == 10);
REQUIRE(ints.data() != nullptr);
for (auto& i : ints) {
REQUIRE(i == 5);
}
}