commit 6dc0eb0fcfae684ec2c7c08eb88c60cd7c22c9d6 Author: Ivan Date: Tue Apr 5 11:42:28 2022 +0300 v01 diff --git a/.clang-format b/.clang-format new file mode 100644 index 0000000..1d57c03 --- /dev/null +++ b/.clang-format @@ -0,0 +1,7 @@ +--- +Language: Cpp +BasedOnStyle: Google +IndentWidth: 2 +IncludeBlocks: Preserve +... + diff --git a/.clang-tidy b/.clang-tidy new file mode 100644 index 0000000..42864a0 --- /dev/null +++ b/.clang-tidy @@ -0,0 +1,80 @@ +Checks: 'readability-*, -readability-magic-numbers, -readability-function-cognitive-complexity, -readability-else-after-return, -readability-redundant-access-specifiers, performance-*, modernize-*, -modernize-use-trailing-return-type, -modernize-avoid-c-arrays, -modernize-use-nodiscard, -modernize-use-auto, -modernize-pass-by-value, misc-assert-side-effect, -clang-analyzer-osx.*, -clang-analyzer-cplusplus.Move, -clang-analyzer-core.uninitialized.UndefReturn, -clang-analyzer-optin.portability.UnixAPI, -clang-analyzer-unix.Malloc' +WarningsAsErrors: '*' +HeaderFilterRegex: '.*/thirdparty/basalt-headers/include/basalt/*' + +CheckOptions: +# Classes, structs, ... + - key: readability-identifier-naming.NamespaceCase + value: lower_case + - key: readability-identifier-naming.ClassCase + value: CamelCase + - key: readability-identifier-naming.StructCase + value: CamelCase + - key: readability-identifier-naming.EnumCase + value: CamelCase + - key: readability-identifier-naming.UnionCase + value: CamelCase + - key: readability-identifier-naming.TypedefCase + value: CamelCase + +# Variables, member variables, ... + - key: readability-identifier-naming.ParameterCase + value: lower_case + - key: readability-identifier-naming.ParameterIgnoredRegexp + value: 'Q[12]?_.*|[A-Z]|[JH]_.*|[TR]_[a-z]+_[a-z]+.*' + - key: readability-identifier-naming.VariableCase + value: lower_case + - key: readability-identifier-naming.VariableIgnoredRegexp + value: 'Q[12]?_.*|[A-Z]|[JH]_.*|[TR]_[a-z]+_[a-z]+.*' + - key: readability-identifier-naming.MemberCase + value: lower_case + - key: readability-identifier-naming.PublicMemberCase + value: lower_case + - key: readability-identifier-naming.PublicMemberIgnoredRegexp + value: 'Q[12]?_.*|[A-Z]|[JH]_.*|[TR]_[a-z]+_[a-z]+.*' + - key: readability-identifier-naming.ProtectedMemberCase + value: lower_case + - key: readability-identifier-naming.ProtectedMemberSuffix + value: _ + - key: readability-identifier-naming.ProtectedMemberIgnoredRegexp + value: 'Q[12]?_.*|[A-Z]|[JH]_.*|[TR]_[a-z]+_[a-z]+.*' + - key: readability-identifier-naming.PrivateMemberCase + value: lower_case + - key: readability-identifier-naming.PrivateMemberIgnoredRegexp + value: 'Q[12]?_.*|[A-Z]|[JH]_.*|[TR]_[a-z]+_[a-z]+.*' + - key: readability-identifier-naming.PrivateMemberSuffix + value: _ + +# Functions, methods, ... + - key: readability-identifier-naming.FunctionCase + value: camelBack + - key: readability-identifier-naming.MethodCase + value: camelBack + +# Constants + - key: readability-identifier-naming.ConstantPrefix + value: '' + - key: readability-identifier-naming.ConstantCase + value: UPPER_CASE + - key: readability-identifier-naming.ConstantMemberPrefix + value: '' + - key: readability-identifier-naming.ConstantMemberCase + value: lower_case + - key: readability-identifier-naming.ConstantMemberIgnoredRegexp + value: '^.*_$' + - key: readability-identifier-naming.ConstantParameterPrefix + value: '' + - key: readability-identifier-naming.ConstantParameterCase + value: lower_case + - key: readability-identifier-naming.ConstantParameterIgnoredRegexp + value: 'Q[12]?_.*|[A-Z]|[JH]_.*|[TR]_[a-z]+_[a-z]+.*' + - key: readability-identifier-naming.LocalConstantParameterPrefix + value: '' + - key: readability-identifier-naming.LocalConstantCase + value: lower_case + - key: readability-identifier-naming.LocalConstantIgnoredRegexp + value: 'Q[12]?_.*|[A-Z]|[JH]_.*|[TR]_[a-z]+_[a-z]+.*' + - key: readability-identifier-naming.ConstexprVariablePrefix + value: '' + - key: readability-identifier-naming.ConstexprVariableCase + value: UPPER_CASE diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..a208c1a --- /dev/null +++ b/.gitignore @@ -0,0 +1,8 @@ +cmake-build* +.idea +CMakeLists.txt.user +build* +scripts/eval/eval_* +scripts/eval_full/eval_* +stats_*.*json +__pycache__ diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml new file mode 100644 index 0000000..2a9d109 --- /dev/null +++ b/.gitlab-ci.yml @@ -0,0 +1,474 @@ +image: vladyslavusenko/b_image_focal:latest + +variables: + GIT_SUBMODULE_STRATEGY: recursive + BUILD_TYPE: Release + CXX_MARCH: native + CMAKE_INSTALL_PREFIX: /usr/ + DEB_DIR: deb + +stages: + - docker + - build + - test + - eval + - results + - deploy + - repository + +# template for docker builds with ccache +.prepare_docker_template: &prepare_docker_definition + tags: + - docker + before_script: + - mkdir -p ccache + - export CCACHE_BASEDIR=${PWD} + - export CCACHE_DIR=${PWD}/ccache + - ccache -s + cache: + paths: + - ccache/ + key: ${CI_JOB_NAME} + +# template for build & unit test & make deb configurations +.compile_test_package_template: &compile_test_package_definition + stage: build + script: + - mkdir build + - cd build + - cmake .. -DCMAKE_BUILD_TYPE=${BUILD_TYPE} -DCXX_MARCH=${CXX_MARCH} -DCMAKE_INSTALL_PREFIX=${CMAKE_INSTALL_PREFIX} + - make -j3 + - ctest + - make package + - cd .. + - mkdir $DEB_DIR + - mv build/*.deb $DEB_DIR/ + - dpkg -i $DEB_DIR/*.deb + # smoke test to see if all executables at least start up + - basalt_calibrate --help + - basalt_calibrate_imu --help + - basalt_mapper --help + - basalt_mapper_sim --help + - basalt_mapper_sim_naive --help + - basalt_opt_flow --help + - basalt_vio --help + - basalt_vio_sim --help + +# template for build & unit test configurations (no deb) +.compile_test_template: &compile_test_definition + stage: build + script: + - mkdir build + - cd build + - cmake .. -DCMAKE_BUILD_TYPE=${BUILD_TYPE} -DCXX_MARCH=${CXX_MARCH} -DCMAKE_INSTALL_PREFIX=${CMAKE_INSTALL_PREFIX} + - make -j3 + - ctest + # smoke test to see if all executables at least start up + - ./basalt_calibrate --help + - ./basalt_calibrate_imu --help + - ./basalt_mapper --help + - ./basalt_mapper_sim --help + - ./basalt_mapper_sim_naive --help + - ./basalt_opt_flow --help + - ./basalt_vio --help + - ./basalt_vio_sim --help + +# template to test debian +.test_deb_template: &test_deb_definition + stage: test + tags: + - docker + script: + - dpkg -i $DEB_DIR/*.deb + # smoke test to see if all executables at least start up + - basalt_calibrate --help + - basalt_calibrate_imu --help + - basalt_mapper --help + - basalt_mapper_sim --help + - basalt_mapper_sim_naive --help + - basalt_opt_flow --help + - basalt_vio --help + - basalt_vio_sim --help + +# template to evaluate on EuRoC sequences +.eval_euroc_template: &eval_euroc_definition + stage: eval + parallel: 10 + tags: [docker, dataset-eval] + variables: + GIT_STRATEGY: none + artifacts: + paths: + - scripts/eval_full/eval_results/* + expire_in: 1 week + script: + - dpkg -i deb_focal/*.deb + - cd scripts/eval_full + - ./run_evaluations.sh + +# template to evaluate on TUM-VI sequences +.eval_tumvi_template: &eval_tumvi_definition + stage: eval + parallel: 4 + tags: [docker, dataset-eval] + variables: + GIT_STRATEGY: none + artifacts: + paths: + - scripts/eval_full/eval_results_tumvi/* + expire_in: 1 week + script: + - dpkg -i deb_focal/*.deb + - cd scripts/eval_full + - ./run_evaluations_tumvi.sh + + +.eval_kitti_template: &eval_kitti_definition + stage: eval + parallel: 10 + tags: [docker, dataset-eval] + variables: + GIT_STRATEGY: none + artifacts: + paths: + - scripts/eval_full/eval_results_kitti/* + expire_in: 1 week + script: + - dpkg -i deb_focal/*.deb + - cd scripts/eval_full + - ./run_evaluations_kitti.sh + +bionic-release-compile: + <<: *prepare_docker_definition + <<: *compile_test_package_definition + image: vladyslavusenko/b_image_bionic:latest +# only: +# - master + variables: + # compile w/ clang; GCC 7.5 has multiple issues: magic_enum is not + # supported, Eigen hast false positive maybe-uninitialized + # warnings, structured bindings give false positive + # "unused-variable" warnings. Using GCC 9 from the Ubuntu's + # toolchain testing PPA would make the built debian package depend + # on the updatd libstdc++. So we use clang instead, which works + # with the default libstdc++ on bionic and doesn't lead to + # additional runtime dependencies for the debian package. + CC: clang-12 + CXX: clang++-12 + CXX_MARCH: 'haswell' + DEB_DIR: deb_bionic + artifacts: + paths: + - deb_bionic/*.deb + expire_in: 1 week + +focal-release-compile: + <<: *prepare_docker_definition + <<: *compile_test_package_definition + image: vladyslavusenko/b_image_focal:latest + variables: + CXX_MARCH: 'haswell' + DEB_DIR: deb_focal + artifacts: + paths: + - deb_focal/*.deb + - scripts/eval_full/* + expire_in: 1 week + +focal-debug-compile: + <<: *prepare_docker_definition + <<: *compile_test_package_definition + image: vladyslavusenko/b_image_focal:latest + only: + - master + variables: + BUILD_TYPE: CiDebug + +focal-relwithdebinfo-compile: + <<: *prepare_docker_definition + <<: *compile_test_package_definition + image: vladyslavusenko/b_image_focal:latest + variables: + BUILD_TYPE: CiRelWithDebInfo + only: + - master + +focal-gcc10-relwithdebinfo-compile: + <<: *prepare_docker_definition + image: vladyslavusenko/b_image_focal:latest + variables: + BUILD_TYPE: CiRelWithDebInfo + CC: gcc-10 + CXX: g++-10 + only: +# - master + stage: build + script: + - add-apt-repository ppa:ubuntu-toolchain-r/ppa -y + - apt-get update + - apt-get install -y g++-10 + - mkdir build + - cd build + - cmake .. -DCMAKE_BUILD_TYPE=${BUILD_TYPE} -DCXX_MARCH=${CXX_MARCH} -DCMAKE_INSTALL_PREFIX=${CMAKE_INSTALL_PREFIX} + - make -j3 + - ctest + # smoke test to see if all executables at least start up + - ./basalt_calibrate --help + - ./basalt_calibrate_imu --help + - ./basalt_mapper --help + - ./basalt_mapper_sim --help + - ./basalt_mapper_sim_naive --help + - ./basalt_opt_flow --help + - ./basalt_vio --help + - ./basalt_vio_sim --help + +# Compilation with GCC 11 is broken due to a bug that is fixed only in future relases 11.3 and 12 +# See: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=100438 +# See: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=99744#c15 +# focal-gcc11-relwithdebinfo-compile: +# <<: *prepare_docker_definition +# image: vladyslavusenko/b_image_focal:latest +# variables: +# BUILD_TYPE: CiRelWithDebInfo +# CC: gcc-11 +# CXX: g++-11 +# only: +# # - master +# stage: build +# script: +# - add-apt-repository ppa:ubuntu-toolchain-r/test -y +# - apt-get update +# - apt-get install -y g++-11 +# - mkdir build +# - cd build +# - cmake .. -DCMAKE_BUILD_TYPE=${BUILD_TYPE} -DCXX_MARCH=${CXX_MARCH} -DCMAKE_INSTALL_PREFIX=${CMAKE_INSTALL_PREFIX} +# - make -j3 +# - ctest +# # smoke test to see if all executables at least start up +# - ./basalt_calibrate --help +# - ./basalt_calibrate_imu --help +# - ./basalt_mapper --help +# - ./basalt_mapper_sim --help +# - ./basalt_mapper_sim_naive --help +# - ./basalt_opt_flow --help +# - ./basalt_vio --help +# - ./basalt_vio_sim --help + +catalina-relwithdebinfo-compile: + <<: *compile_test_definition + tags: [macos, "10.15"] + only: +# - master + variables: + BUILD_TYPE: CiRelWithDebInfo + +catalina-brewclang-relwithdebinfo-compile: + <<: *compile_test_definition + tags: [macos, "10.15"] + only: +# - master + variables: + BUILD_TYPE: CiRelWithDebInfo + CC: /usr/local/opt/llvm/bin/clang + CXX: /usr/local/opt/llvm/bin/clang++ + +bigsur-relwithdebinfo-compile: + <<: *compile_test_definition + tags: [macos, "11", x86_64] + only: +# - master + variables: + BUILD_TYPE: CiRelWithDebInfo + +monterey-arm-relwithdebinfo-compile: + <<: *compile_test_definition + tags: [macos, "12", arm64] + only: +# - master + variables: + BUILD_TYPE: CiRelWithDebInfo + + +# check if clang-format would make any changes +clang-format: + tags: + - docker + stage: build + variables: + GIT_SUBMODULE_STRATEGY: none + script: + - ./scripts/clang-format-all.sh + # check if any files are now modified and error if yes + - (if [ -n "`git diff --name-only --diff-filter=M --ignore-submodules`" ]; then echo $'\n Some files are not properly formatted. You can use "./scripts/clang-format-all.sh".\n'; git diff --diff-filter=M; false; fi) + +bionic-test: + <<: *test_deb_definition + image: vladyslavusenko/b_image_bionic:latest + only: + - master + variables: + DEB_DIR: deb_bionic + GIT_STRATEGY: none + +focal-test: + <<: *test_deb_definition + image: vladyslavusenko/b_image_focal:latest + only: + - master + variables: + DEB_DIR: deb_focal + GIT_STRATEGY: none + +# evaluate on EuRoC sequences +eval_euroc_master: + <<: *eval_euroc_definition + only: + - master + +# evaluate on EuRoC sequences +eval_euroc: + <<: *eval_euroc_definition + when: manual + except: + - master + allow_failure: false + +# evaluate on TUM-VI sequences +eval_tumvi_master: + <<: *eval_tumvi_definition + only: + - master + +# evaluate on TUM-VI sequences +eval_tumvi: + <<: *eval_tumvi_definition + when: manual + except: + - master + allow_failure: false + +# evaluate on KITTI sequences +eval_kitti_master: + <<: *eval_kitti_definition + only: + - master + +# evaluate on KITTI sequences +eval_kitti: + <<: *eval_kitti_definition + when: manual + except: + - master + allow_failure: false + +# aggregate results for all EuRoC sequences +gen_results: + stage: results + variables: + GIT_STRATEGY: none + tags: + - docker + when: on_success + artifacts: + paths: + - euroc_results.txt + - kitti_results.txt + - tumvi_results.txt + - scripts/eval_full/eval_results/* + script: + - cd scripts/eval_full + - ./gen_results.py eval_results > euroc_results.txt + - cat euroc_results.txt + - ./gen_results_kitti.py eval_results_kitti > kitti_results.txt + - cat kitti_results.txt + - ./gen_results_tumvi.py eval_results_tumvi > tumvi_results.txt + - cat tumvi_results.txt + - mv euroc_results.txt ../../ + - mv kitti_results.txt ../../ + - mv tumvi_results.txt ../../ + +# deploy deb packages +deploy: + stage: deploy + when: manual + variables: + GIT_STRATEGY: none + tags: + - docker + only: + - master + before_script: + - eval $(ssh-agent -s) + - echo "$SSH_PRIVATE_KEY" | tr -d '\r' | ssh-add - > /dev/null + - mkdir -p ~/.ssh + - chmod 700 ~/.ssh + - echo "$SSH_KNOWN_HOSTS" > ~/.ssh/known_hosts + - chmod 644 ~/.ssh/known_hosts + script: + - scp $SCP_FLAGS deb_bionic/*.deb $REPOSITORY_URL/bionic/ + - scp $SCP_FLAGS deb_focal/*.deb $REPOSITORY_URL/focal/ + + +.docker_build_template: &docker_build_definition + image: docker:stable + stage: docker + when: manual + only: + - master + services: + - docker:stable-dind + tags: + - docker-dind + before_script: + - echo "$CI_REGISTRY_PASSWORD" | docker login -u "$CI_REGISTRY_USER" --password-stdin $CI_REGISTRY + script: + - cd docker/"$B_IMAGE_NAME" + - docker build -t "$CI_REGISTRY_IMAGE"/"$B_IMAGE_NAME" . + - docker push "$CI_REGISTRY_IMAGE"/"$B_IMAGE_NAME" + + +docker-build-bionic: + <<: *docker_build_definition + variables: + DOCKER_TLS_CERTDIR: '/certs' + GIT_SUBMODULE_STRATEGY: none + B_IMAGE_NAME: b_image_bionic + +docker-build-focal: + <<: *docker_build_definition + variables: + DOCKER_TLS_CERTDIR: '/certs' + GIT_SUBMODULE_STRATEGY: none + B_IMAGE_NAME: b_image_focal + + + +.repository_check_template: &repository_check_definition + stage: repository + when: manual + tags: + - docker + only: + - master + script: + - apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y gnupg2 rsync linux-generic + - apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys 0AD9A3000D97B6C9 + - echo "deb [arch=amd64] http://packages.usenko.eu/ubuntu $REPO_NAME $REPO_NAME/main" > /etc/apt/sources.list.d/basalt.list + - apt-get update && apt-get dist-upgrade -y + - DEBIAN_FRONTEND=noninteractive apt-get install -y basalt + - basalt_rs_t265_vio --help + - basalt_rs_t265_record --help + +bionic-repository-check: + image: ubuntu:18.04 + <<: *repository_check_definition + variables: + GIT_STRATEGY: none + REPO_NAME: bionic + +focal-repository-check: + image: ubuntu:20.04 + <<: *repository_check_definition + variables: + GIT_STRATEGY: none + REPO_NAME: focal diff --git a/.gitmodules b/.gitmodules new file mode 100644 index 0000000..fb57be6 --- /dev/null +++ b/.gitmodules @@ -0,0 +1,25 @@ +[submodule "thirdparty/CLI11"] + path = thirdparty/CLI11 + url = https://github.com/CLIUtils/CLI11.git +[submodule "thirdparty/opengv"] + path = thirdparty/opengv + url = https://github.com/laurentkneip/opengv.git +[submodule "thirdparty/Pangolin"] + path = thirdparty/Pangolin + url = https://github.com/stevenlovegrove/Pangolin.git +[submodule "thirdparty/ros_comm"] + path = thirdparty/ros/ros_comm + url = https://github.com/ros/ros_comm.git +[submodule "thirdparty/roscpp_core"] + path = thirdparty/ros/roscpp_core +# url = https://github.com/ros/roscpp_core.git + url = https://github.com/NikolausDemmel/roscpp_core.git +[submodule "thirdparty/console_bridge"] + path = thirdparty/ros/console_bridge + url = https://github.com/ros/console_bridge.git +[submodule "thirdparty/basalt-headers"] + path = thirdparty/basalt-headers + url = https://gitlab.com/VladyslavUsenko/basalt-headers.git +[submodule "thirdparty/magic_enum"] + path = thirdparty/magic_enum + url = https://github.com/Neargye/magic_enum.git diff --git a/.style.yapf b/.style.yapf new file mode 100644 index 0000000..b3d849f --- /dev/null +++ b/.style.yapf @@ -0,0 +1,3 @@ +[style] +based_on_style = google +column_limit = 120 diff --git a/CMakeLists.txt b/CMakeLists.txt new file mode 100644 index 0000000..a8a15b9 --- /dev/null +++ b/CMakeLists.txt @@ -0,0 +1,411 @@ +cmake_minimum_required(VERSION 3.10...3.18) + +project(basalt) + +set(CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/cmake_modules/" ${CMAKE_MODULE_PATH}) + +if(NOT EIGEN_ROOT) + set(EIGEN_ROOT "${CMAKE_CURRENT_SOURCE_DIR}/thirdparty/basalt-headers/thirdparty/eigen") +endif() + +string(TOLOWER "${PROJECT_NAME}" PROJECT_NAME_LOWERCASE) +find_program(DPKG_PROGRAM dpkg DOC "dpkg program of Debian-based systems") +if(DPKG_PROGRAM) + execute_process( + COMMAND ${DPKG_PROGRAM} --print-architecture + OUTPUT_VARIABLE CPACK_DEBIAN_PACKAGE_ARCHITECTURE + OUTPUT_STRIP_TRAILING_WHITESPACE) +endif(DPKG_PROGRAM) + + +find_program(LSB_RELEASE_PROGRAM lsb_release DOC "lsb_release program of Debian-based systems") +if(LSB_RELEASE_PROGRAM) + execute_process(COMMAND ${LSB_RELEASE_PROGRAM} -rs + OUTPUT_VARIABLE LSB_RELEASE_ID_SHORT + OUTPUT_STRIP_TRAILING_WHITESPACE) + + if(${LSB_RELEASE_ID_SHORT} EQUAL "20.04") + set(DEBIAN_DEPENDS "libtbb2, liblz4-1, libbz2-1.0, libboost-filesystem1.71.0, libboost-date-time1.71.0, libboost-program-options1.71.0, libboost-regex1.71.0, libopencv-dev, libglew2.1, libjpeg8, libpng16-16, librealsense2, librealsense2-dkms, librealsense2-gl, librealsense2-utils") + + elseif(${LSB_RELEASE_ID_SHORT} EQUAL "18.04") + set(DEBIAN_DEPENDS "libtbb2, liblz4-1, libbz2-1.0, libboost-filesystem1.65.1, libboost-date-time1.65.1, libboost-program-options1.65.1, libboost-regex1.65.1, libopencv-dev, libglew2.0, libjpeg8, libpng16-16, librealsense2, librealsense2-dkms, librealsense2-gl, librealsense2-utils") + + elseif(${LSB_RELEASE_ID_SHORT} EQUAL "16.04") + set(DEBIAN_DEPENDS "libtbb2, liblz4-1, libbz2-1.0, libboost-filesystem1.58.0, libboost-date-time1.58.0, libboost-program-options1.58.0, libboost-regex1.58.0, libopencv-dev, libglew1.13, libjpeg8, libpng12-0, libstdc++6, librealsense2, librealsense2-dkms, librealsense2-gl, librealsense2-utils") + endif() + +endif(LSB_RELEASE_PROGRAM) + +string(TIMESTAMP PROJECT_VERSION_REVISION "%Y%m%d%H%M") + +set(CPACK_GENERATOR "DEB") +set(CPACK_DEBIAN_PACKAGE_MAINTAINER "Vladyslav Usenko ") +set(CPACK_PACKAGE_VERSION_MAJOR "0") +set(CPACK_PACKAGE_VERSION_MINOR "1") +set(CPACK_PACKAGE_VERSION_PATCH "0-${PROJECT_VERSION_REVISION}~${LSB_RELEASE_ID_SHORT}") +set(CPACK_DEBIAN_PACKAGE_DEPENDS ${DEBIAN_DEPENDS}) +set(CPACK_PACKAGE_FILE_NAME "${PROJECT_NAME_LOWERCASE}_${CPACK_PACKAGE_VERSION_MAJOR}.${CPACK_PACKAGE_VERSION_MINOR}.${CPACK_PACKAGE_VERSION_PATCH}_${CPACK_DEBIAN_PACKAGE_ARCHITECTURE}") +set(CPACK_RESOURCE_FILE_LICENSE "${CMAKE_CURRENT_SOURCE_DIR}/LICENSE") +include(CPack) + + +# Configure CCache if available +if (NOT CMAKE_C_COMPILER_LAUNCHER AND NOT CMAKE_CXX_COMPILER_LAUNCHER) + find_program(CCACHE_PROGRAM ccache) + if(CCACHE_PROGRAM) + message(STATUS "Found ccache: ${CCACHE_PROGRAM}") + set(CMAKE_C_COMPILER_LAUNCHER ${CCACHE_PROGRAM}) + set(CMAKE_CXX_COMPILER_LAUNCHER ${CCACHE_PROGRAM}) + else() + message(STATUS "Dind't find ccache") + endif() +else() + message(STATUS "Compiler launcher already set. Not configuring ccache.") + message(STATUS "CMAKE_C_COMPILER_LAUNCHER: ${CMAKE_C_COMPILER_LAUNCHER}") + message(STATUS "CMAKE_CXX_COMPILER_LAUNCHER: ${CMAKE_CXX_COMPILER_LAUNCHER}") +endif() + +if( NOT CMAKE_BUILD_TYPE ) + set( CMAKE_BUILD_TYPE Release) +endif() + +if(NOT CXX_MARCH) + set(CXX_MARCH native) +endif() + +IF(NOT APPLE OR NOT CMAKE_SYSTEM_PROCESSOR STREQUAL "arm64") + set(BASALT_MARCH_FLAGS "-march=${CXX_MARCH}") +ELSE() + message(STATUS "Running on Apple ${CMAKE_SYSTEM_PROCESSOR}. Disabled -march flag.") +ENDIF() + + + +set(CMAKE_CXX_STANDARD 17) +set(CMAKE_CXX_STANDARD_REQUIRED ON) +set(CMAKE_CXX_EXTENSIONS OFF) +set(CMAKE_POSITION_INDEPENDENT_CODE ON) + + +# Flags used for CHECK_CXX_SOURCE_COMPILES +set(CMAKE_REQUIRED_FLAGS "-Wno-error") + + +# save flags passed by user +set(BASALT_PASSED_CXX_FLAGS "${CMAKE_CXX_FLAGS}") + +set(CMAKE_CXX_FLAGS_DEBUG "-O0 -g -DEIGEN_INITIALIZE_MATRICES_BY_NAN") # cmake default: "-g" +set(CMAKE_CXX_FLAGS_RELWITHDEBINFO "-O3 -g -DEIGEN_INITIALIZE_MATRICES_BY_NAN") # cmake default: "-O2 -g -DNDEBUG" +set(CMAKE_CXX_FLAGS_RELEASE "-O3 -DNDEBUG") # cmake default: "-O3 -DNDEBUG" +set(CMAKE_CXX_FLAGS_CIDEBUG "-O0 -DEIGEN_INITIALIZE_MATRICES_BY_NAN") # CI version with no debug symbols +set(CMAKE_CXX_FLAGS_CIRELWITHDEBINFO "-O3 -DEIGEN_INITIALIZE_MATRICES_BY_NAN") # CI version with no debug symbols + +# base set of compile flags +set(BASALT_CXX_FLAGS "-Wall -Wextra -Werror -Wno-error=unused-parameter -ftemplate-backtrace-limit=0") + +# clang-specific compile flags +if(CMAKE_CXX_COMPILER_ID STREQUAL "Clang" OR CMAKE_CXX_COMPILER_ID STREQUAL "AppleClang") + set(BASALT_CXX_FLAGS "${BASALT_CXX_FLAGS} -Wno-exceptions -fcolor-diagnostics -frelaxed-template-template-args -Wno-error=deprecated-declarations") + + if(CMAKE_CXX_COMPILER_ID STREQUAL "Clang" AND CMAKE_CXX_COMPILER_VERSION VERSION_GREATER_EQUAL 10) + # These are disabled to avoid lot's of warnings in Eigen code with clang 10 + set(BASALT_CXX_FLAGS "${BASALT_CXX_FLAGS} -Wno-misleading-indentation -Wno-error=deprecated-copy") + endif() + + # - Added TBB_USE_GLIBCXX_VERSION macro to specify the version of GNU + # libstdc++ when it cannot be properly recognized, e.g. when used + # with Clang on Linux* OS. Adopted from https://github.com/wjakob/tbb + if(NOT TBB_USE_GLIBCXX_VERSION AND UNIX AND NOT APPLE) + if ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "Clang") + string(REPLACE "." "0" TBB_USE_GLIBCXX_VERSION ${CMAKE_CXX_COMPILER_VERSION}) + endif() + add_definitions(-DTBB_USE_GLIBCXX_VERSION=${TBB_USE_GLIBCXX_VERSION}) + endif() +else() + set(BASALT_CXX_FLAGS "${BASALT_CXX_FLAGS} -Wno-error=maybe-uninitialized -Wno-error=implicit-fallthrough") + + if(CMAKE_CXX_COMPILER_VERSION VERSION_GREATER_EQUAL 9) + # These are disabled to avoid lot's of warnings in Eigen code with gcc-9 + set(BASALT_CXX_FLAGS "${BASALT_CXX_FLAGS} -Wno-error=deprecated-copy") + endif() + if(CMAKE_CXX_COMPILER_VERSION VERSION_GREATER_EQUAL 10.0) + # These are disabled due to warnings in pangolin + #set(BASALT_CXX_FLAGS "${BASALT_CXX_FLAGS} -Wno-error=parentheses") + set(BASALT_CXX_FLAGS "${BASALT_CXX_FLAGS} -Wno-parentheses") + endif() +endif() + + +# Set platform / compiler specific compile flags and checks +if(APPLE) + # Need to investigate how to reliably detect and use OpenMP on macOS... +# set(USE_OPENMP_DEFAULT OFF) + + # Among others, setting CMAKE_FIND_FRAMEWORK to LAST fixed issues + # with installed Mono that contains old headers (libpng, ...). + # See: https://github.com/openMVG/openMVG/issues/1349#issuecomment-401492811 + set(CMAKE_FIND_FRAMEWORK LAST) + + if(CMAKE_SYSTEM_VERSION VERSION_LESS 19.0.0) + # use brewed llvm's libc++ + include_directories("/usr/local/opt/llvm/include/c++/v1") + link_directories("/usr/local/opt/llvm/lib") + add_compile_options("-nostdinc++") + #set(STD_CXX_FS c++fs) + + # Workaround for cmake not to filter the manually added standard include path + # See: https://gitlab.kitware.com/cmake/cmake/issues/19227#note_669894 + list(REMOVE_ITEM CMAKE_CXX_IMPLICIT_INCLUDE_DIRECTORIES "/usr/local/opt/llvm/include/c++/v1") + endif() + + if(CMAKE_CXX_COMPILER_ID STREQUAL "Clang") + message(STATUS "Detected macOS with non-Apple clang") + + elseif(CMAKE_CXX_COMPILER_ID STREQUAL "AppleClang") + message(STATUS "Detected macOS with Apple clang") + # Apple clang on macOS < 10.14 Mojave is too old + if(CMAKE_SYSTEM_VERSION VERSION_LESS 18.0.0) + message(WARNING "Detected Darwin version ${CMAKE_SYSTEM_VERSION}, which is earlier than macos 10.14 Mojave. Apple clang is too old and not supported. Use clang from homebrew.") + endif() + + else() + message(WARNING "Detected macOS with unsupported compiler ${CMAKE_CXX_COMPILER_ID}") + endif() + +elseif(UNIX) +# set(USE_OPENMP_DEFAULT ON) + + # assume libstdc++ + set(STD_CXX_FS stdc++fs) + + if(CMAKE_CXX_COMPILER_ID STREQUAL "Clang") + message(STATUS "Detected Linux with clang.") + + # Note: Whatever the issue was, this seems to not be an issue any more, at least with more recent clang and Ubuntu versions (tested clang-12 and Ubuntu 18.04) + #message(WARNING "Clang on Linux is currently not fully supported. You'll likely need to get a recent version of TBB.") + #set(LINUX_CLANG 1) + + elseif(CMAKE_CXX_COMPILER_ID STREQUAL "GNU") + message(STATUS "Detected Linux with gcc.") + + else() + message(WARNING "Detected Linux with unsupported compiler ${CMAKE_CXX_COMPILER_ID}") + endif() + +else() + message(WARNING "Only Linux and macOS are currently supported") +endif() + + +# OpenMP option and compile flags +# +# Note: OpenMP and TBB don't mix well, so we disable Eigen's parallelization. +# It's trying to paralellize matrix products during SC, which we run in a parallel_reduce using TBB. +# Turns out using OpenMP can slow down the computby factor 10-100x! So for now we discable it completely. +# One place where Eigen's parallelization could still have been useful is the CG solver in the mapper. +# We could in the future investiagte other implementations (paralellized with TBB) or selectively enabling +# Eigen's parallelization just for CG, setting number of threads to 1 everywhere else. +# Another way to ensure Eigen doesn't use OpenMP regardless of how it was built is setting the environment +# variable OMP_NUM_THREADS=1 beofre running the application. +# +# See: https://eigen.tuxfamily.org/dox/TopicMultiThreading.html +# +# If we enable BLAS / LAPACK either directly or via thirdparty libs like ceres, +# make sure to disable OpenMP for the linked BLAS library. In particular on Ubuntu it seems OpenBLAS is often installed, +# and it can have similar issues in multithreaded applications if it's own parallelization with OpenMP is enabled. +# You can set the environment varaibles OPENBLAS_NUM_THREADS=1 or OMP_NUM_THREADS=1. This is also mentioned in the ceres +# installation documentation. +# +# See also: https://github.com/xianyi/OpenBLAS/wiki/faq#multi-threaded +# +# Set EIGEN_DONT_PARALLELIZE to be sure it doesn't use openmp, +# just in case some dependency enables openmp without us noticing. +set(BASALT_CXX_FLAGS "${BASALT_CXX_FLAGS} -DEIGEN_DONT_PARALLELIZE") + +#option(USE_OPENMP "Use OpenMP (e.g. for parallel computation in Eigen)" ${USE_OPENMP_DEFAULT}) +#if(USE_OPENMP) +# message(STATUS "OpenMP Enabled") +# set(BASALT_CXX_FLAGS "${BASALT_CXX_FLAGS} -fopenmp") +#else() +# message(STATUS "OpenMP Disabled") +#endif() + +# Make specialization for double / float optional. +# Disabling them can be useful for better compile times during development. +option(BASALT_INSTANTIATIONS_DOUBLE "Instatiate templates for Scalar=double." ON) +option(BASALT_INSTANTIATIONS_FLOAT "Instatiate templates for Scalar=float." ON) + +if(BASALT_INSTANTIATIONS_DOUBLE) + list(APPEND BASALT_COMPILE_DEFINITIONS BASALT_INSTANTIATIONS_DOUBLE) +endif() +if(BASALT_INSTANTIATIONS_FLOAT) + list(APPEND BASALT_COMPILE_DEFINITIONS BASALT_INSTANTIATIONS_FLOAT) +endif() + + +# setup combined compiler flags +set(CMAKE_CXX_FLAGS "${BASALT_CXX_FLAGS} ${BASALT_MARCH_FLAGS} ${BASALT_PASSED_CXX_FLAGS}") + + +set(EIGEN_INCLUDE_DIR_HINTS ${EIGEN_ROOT}) +find_package(Eigen3 3.4.0 EXACT REQUIRED MODULE) +message(STATUS "Found Eigen headers in: ${EIGEN3_INCLUDE_DIR}") +if(NOT EIGEN3_INCLUDE_DIR MATCHES "^${EIGEN_ROOT}") + message(WARNING "Found Eigen headers are outside of specified EIGEN_ROOT '${EIGEN_ROOT}'") +endif() + +find_package(TBB REQUIRED) +message(STATUS "Found TBB ${TBB_VERSION_MAJOR}.${TBB_VERSION_MINOR} (interface version ${TBB_INTERFACE_VERSION}) headers in: ${TBB_INCLUDE_DIRS}") +if (TBB_INTERFACE_VERSION LESS 11004) + # enable global_control header for earlier TBB versions (Ubuntu 16.04, 18.04) + add_definitions(-DTBB_PREVIEW_GLOBAL_CONTROL) +endif() + +# NOTE: not specifying version, since 2, 3 or 4 is fine +find_package(OpenCV REQUIRED COMPONENTS core imgproc calib3d highgui) +message(STATUS "Found OpenCV ${OpenCV_VERSION} headers in: ${OpenCV_INCLUDE_DIRS}") +message(STATUS "Found OpenCV_LIBS: ${OpenCV_LIBS}") + +# Add our own custom scoped opencv target since none is provided by OpenCV itself +add_library(basalt::opencv INTERFACE IMPORTED) +set_property(TARGET basalt::opencv PROPERTY INTERFACE_INCLUDE_DIRECTORIES ${OpenCV_INCLUDE_DIRS}) +set_property(TARGET basalt::opencv PROPERTY INTERFACE_LINK_LIBRARIES ${OpenCV_LIBS}) + +find_package(fmt REQUIRED) +message(STATUS "Found {fmt} ${fmt_VERSION} in: ${fmt_DIR}") + +add_subdirectory(thirdparty) + +# custom scoped cli11 target +add_library(basalt::cli11 INTERFACE IMPORTED) +set_property(TARGET basalt::cli11 PROPERTY INTERFACE_INCLUDE_DIRECTORIES ${CMAKE_CURRENT_SOURCE_DIR}/thirdparty/CLI11/include) + +# custom scoped magic_enum target +add_library(basalt::magic_enum INTERFACE IMPORTED) +set_property(TARGET basalt::magic_enum PROPERTY INTERFACE_INCLUDE_DIRECTORIES ${CMAKE_CURRENT_SOURCE_DIR}/thirdparty/magic_enum/include) + + +add_library(basalt SHARED + src/io/dataset_io.cpp + src/io/marg_data_io.cpp + src/calibration/aprilgrid.cpp + src/calibration/calibraiton_helper.cpp + src/calibration/vignette.cpp + src/optical_flow/optical_flow.cpp + src/linearization/landmark_block.cpp + src/linearization/linearization_base.cpp + src/linearization/linearization_abs_qr.cpp + src/linearization/linearization_abs_sc.cpp + src/linearization/linearization_rel_sc.cpp + src/utils/vio_config.cpp + src/utils/system_utils.cpp + src/utils/time_utils.cpp + src/utils/keypoints.cpp + src/vi_estimator/marg_helper.cpp + src/vi_estimator/sqrt_keypoint_vio.cpp + src/vi_estimator/sqrt_keypoint_vo.cpp + src/vi_estimator/vio_estimator.cpp + src/vi_estimator/ba_base.cpp + src/vi_estimator/sqrt_ba_base.cpp + src/vi_estimator/sc_ba_base.cpp + src/vi_estimator/nfr_mapper.cpp + src/vi_estimator/landmark_database.cpp) + + +target_link_libraries(basalt + PUBLIC ${STD_CXX_FS} basalt::opencv basalt::basalt-headers TBB::tbb + PRIVATE basalt::magic_enum rosbag apriltag opengv nlohmann::json fmt::fmt) +target_include_directories(basalt PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/include) +target_compile_definitions(basalt PUBLIC ${BASALT_COMPILE_DEFINITIONS}) +#target_compile_definitions(basalt PUBLIC BASALT_DISABLE_ASSERTS) + + +add_executable(basalt_calibrate src/calibrate.cpp src/calibration/cam_calib.cpp) +target_link_libraries(basalt_calibrate basalt pangolin basalt::cli11) + +add_executable(euler2RotTest test/ivan-tests/euler2RotTest.cpp) + +add_executable(basalt_calibrate_imu src/calibrate_imu.cpp src/calibration/cam_imu_calib.cpp) +target_link_libraries(basalt_calibrate_imu basalt pangolin basalt::cli11) + + +add_executable(basalt_vio_sim src/vio_sim.cpp) +target_link_libraries(basalt_vio_sim basalt pangolin basalt::cli11) + +add_executable(basalt_mapper_sim src/mapper_sim.cpp) +target_link_libraries(basalt_mapper_sim basalt pangolin basalt::cli11) + +# mapper sim native doesn't use template free interface +if(BASALT_INSTANTIATIONS_DOUBLE) + add_executable(basalt_mapper_sim_naive src/mapper_sim_naive.cpp) + target_link_libraries(basalt_mapper_sim_naive basalt pangolin basalt::cli11) +endif() + +add_executable(basalt_mapper src/mapper.cpp) +target_link_libraries(basalt_mapper basalt pangolin basalt::cli11) + + +add_executable(basalt_opt_flow src/opt_flow.cpp) +target_link_libraries(basalt_opt_flow basalt pangolin basalt::cli11) + +add_executable(basalt_vio src/vio.cpp) +target_link_libraries(basalt_vio basalt pangolin basalt::cli11) + +add_executable(basalt_time_alignment src/time_alignment.cpp) +target_link_libraries(basalt_time_alignment basalt pangolin basalt::cli11) + +add_executable(basalt_kitti_eval src/kitti_eval.cpp) +target_link_libraries(basalt_kitti_eval basalt::basalt-headers basalt::cli11) + +find_package(realsense2 QUIET) +if(realsense2_FOUND) + add_executable(basalt_rs_t265_record src/rs_t265_record.cpp src/device/rs_t265.cpp) + target_link_libraries(basalt_rs_t265_record basalt realsense2::realsense2 pangolin basalt::cli11) + + add_executable(basalt_rs_t265_vio src/rs_t265_vio.cpp src/device/rs_t265.cpp) + target_link_libraries(basalt_rs_t265_vio basalt realsense2::realsense2 pangolin basalt::cli11) +endif() + + + +install(TARGETS basalt_calibrate basalt_calibrate_imu basalt_vio_sim basalt_mapper_sim basalt_mapper basalt_opt_flow basalt_vio basalt_kitti_eval basalt_time_alignment basalt + EXPORT BasaltTargets + RUNTIME DESTINATION ${CMAKE_INSTALL_PREFIX}/bin + LIBRARY DESTINATION ${CMAKE_INSTALL_PREFIX}/lib + ARCHIVE DESTINATION ${CMAKE_INSTALL_PREFIX}/lib) + +if(BASALT_INSTANTIATIONS_DOUBLE) + install(TARGETS basalt_mapper_sim_naive + EXPORT BasaltTargets + RUNTIME DESTINATION ${CMAKE_INSTALL_PREFIX}/bin + LIBRARY DESTINATION ${CMAKE_INSTALL_PREFIX}/lib + ARCHIVE DESTINATION ${CMAKE_INSTALL_PREFIX}/lib) +endif() + +if(realsense2_FOUND) + install(TARGETS basalt_rs_t265_record basalt_rs_t265_vio + EXPORT BasaltTargets + RUNTIME DESTINATION ${CMAKE_INSTALL_PREFIX}/bin + LIBRARY DESTINATION ${CMAKE_INSTALL_PREFIX}/lib + ARCHIVE DESTINATION ${CMAKE_INSTALL_PREFIX}/lib) +endif() + +file(GLOB SCRIPTS_TO_INSTALL "${CMAKE_CURRENT_SOURCE_DIR}/scripts/basalt_*.py") +install(PROGRAMS ${SCRIPTS_TO_INSTALL} DESTINATION ${CMAKE_INSTALL_PREFIX}/bin) + +file(GLOB CONFIG_FILES "${CMAKE_CURRENT_SOURCE_DIR}/data/*.json") +install(FILES ${CONFIG_FILES} + DESTINATION ${CMAKE_INSTALL_PREFIX}/etc/basalt) + + +# Replace install() to do-nothing macro. +macro(install) +endmacro() +# Include subproject (or any other CMake code) with "disabled" install(). +enable_testing() +add_subdirectory(thirdparty/basalt-headers/test) +add_subdirectory(test) +# Restore original install() behavior. +macro(install) + _install(${ARGN}) +endmacro() diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..03fd6f3 --- /dev/null +++ b/LICENSE @@ -0,0 +1,29 @@ +BSD 3-Clause License + +Copyright (c) 2019, Vladyslav Usenko and Nikolaus Demmel. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/README.md b/README.md new file mode 100644 index 0000000..ee2d7d3 --- /dev/null +++ b/README.md @@ -0,0 +1,75 @@ +[![pipeline status](https://gitlab.com/VladyslavUsenko/basalt/badges/master/pipeline.svg)](https://gitlab.com/VladyslavUsenko/basalt/commits/master) + +## Basalt +For more information see https://vision.in.tum.de/research/vslam/basalt + +![teaser](doc/img/teaser.png) + +This project contains tools for: +* Camera, IMU and motion capture calibration. +* Visual-inertial odometry and mapping. +* Simulated environment to test different components of the system. + +Some reusable components of the system are available as a separate [header-only library](https://gitlab.com/VladyslavUsenko/basalt-headers) ([Documentation](https://vladyslavusenko.gitlab.io/basalt-headers/)). + +There is also a [Github mirror](https://github.com/VladyslavUsenko/basalt-mirror) of this project to enable easy forking. + +## Related Publications +Visual-Inertial Odometry and Mapping: +* **Visual-Inertial Mapping with Non-Linear Factor Recovery**, V. Usenko, N. Demmel, D. Schubert, J. Stückler, D. Cremers, In IEEE Robotics and Automation Letters (RA-L) [[DOI:10.1109/LRA.2019.2961227]](https://doi.org/10.1109/LRA.2019.2961227) [[arXiv:1904.06504]](https://arxiv.org/abs/1904.06504). + +Calibration (explains implemented camera models): +* **The Double Sphere Camera Model**, V. Usenko and N. Demmel and D. Cremers, In 2018 International Conference on 3D Vision (3DV), [[DOI:10.1109/3DV.2018.00069]](https://doi.org/10.1109/3DV.2018.00069), [[arXiv:1807.08957]](https://arxiv.org/abs/1807.08957). + +Calibration (demonstrates how these tools can be used for dataset calibration): +* **The TUM VI Benchmark for Evaluating Visual-Inertial Odometry**, D. Schubert, T. Goll, N. Demmel, V. Usenko, J. Stückler, D. Cremers, In 2018 International Conference on Intelligent Robots and Systems (IROS), [[DOI:10.1109/IROS.2018.8593419]](https://doi.org/10.1109/IROS.2018.8593419), [[arXiv:1804.06120]](https://arxiv.org/abs/1804.06120). + +Calibration (describes B-spline trajectory representation used in camera-IMU calibration): +* **Efficient Derivative Computation for Cumulative B-Splines on Lie Groups**, C. Sommer, V. Usenko, D. Schubert, N. Demmel, D. Cremers, In 2020 Conference on Computer Vision and Pattern Recognition (CVPR), [[DOI:10.1109/CVPR42600.2020.01116]](https://doi.org/10.1109/CVPR42600.2020.01116), [[arXiv:1911.08860]](https://arxiv.org/abs/1911.08860). + +Optimization (describes square-root optimization and marginalization used in VIO/VO): +* **Square Root Marginalization for Sliding-Window Bundle Adjustment**, N. Demmel, D. Schubert, C. Sommer, D. Cremers, V. Usenko, In 2021 International Conference on Computer Vision (ICCV), [[arXiv:2109.02182]](https://arxiv.org/abs/2109.02182) + + +## Installation +### APT installation for Ubuntu 20.04 and 18.04 (Fast) +Set up keys, add the repository to the sources list, update the Ubuntu package index and install Basalt: +``` +sudo apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys 0AD9A3000D97B6C9 +sudo sh -c 'echo "deb [arch=amd64] http://packages.usenko.eu/ubuntu $(lsb_release -sc) $(lsb_release -sc)/main" > /etc/apt/sources.list.d/basalt.list' +sudo apt-get update +sudo apt-get dist-upgrade +sudo apt-get install basalt +``` + +### Source installation for Ubuntu >= 18.04 and MacOS >= 10.14 Mojave +Clone the source code for the project and build it. For MacOS you should have [Homebrew](https://brew.sh/) installed. +``` +git clone --recursive https://gitlab.com/VladyslavUsenko/basalt.git +cd basalt +./scripts/install_deps.sh +mkdir build +cd build +cmake .. -DCMAKE_BUILD_TYPE=RelWithDebInfo +make -j8 +``` + +## Usage +* [Camera, IMU and Mocap calibration. (TUM-VI, Euroc, UZH-FPV and Kalibr datasets)](doc/Calibration.md) +* [Visual-inertial odometry and mapping. (TUM-VI and Euroc datasets)](doc/VioMapping.md) +* [Visual odometry (no IMU). (KITTI dataset)](doc/Vo.md) +* [Simulation tools to test different components of the system.](doc/Simulation.md) +* [Batch evaluation tutorial (ICCV'21 experiments)](doc/BatchEvaluation.md) + +## Device support +* [Tutorial on Camera-IMU and Motion capture calibration with Realsense T265.](doc/Realsense.md) + +## Development +* [Development environment setup.](doc/DevSetup.md) + +## Licence +The code is provided under a BSD 3-clause license. See the LICENSE file for details. +Note also the different licenses of thirdparty submodules. + +Some improvements are ported back from the fork +[granite](https://github.com/DLR-RM/granite) (MIT license). diff --git a/cmake_modules/FindEigen3.cmake b/cmake_modules/FindEigen3.cmake new file mode 100644 index 0000000..9826d69 --- /dev/null +++ b/cmake_modules/FindEigen3.cmake @@ -0,0 +1,82 @@ +# - Try to find Eigen3 lib +# +# This module supports requiring a minimum version, e.g. you can do +# find_package(Eigen3 3.1.2) +# to require version 3.1.2 or newer of Eigen3. +# +# Once done this will define +# +# EIGEN3_FOUND - system has eigen lib with correct version +# EIGEN3_INCLUDE_DIR - the eigen include directory +# EIGEN3_VERSION - eigen version + +# Copyright (c) 2006, 2007 Montel Laurent, +# Copyright (c) 2008, 2009 Gael Guennebaud, +# Copyright (c) 2009 Benoit Jacob +# Redistribution and use is allowed according to the terms of the 2-clause BSD license. + +# Adaptations (c) Nikolaus Demmel 2019 +# - pass NO_DEFAULT_PATH --> only works when passing exact HINTS + + +if(NOT Eigen3_FIND_VERSION) + if(NOT Eigen3_FIND_VERSION_MAJOR) + set(Eigen3_FIND_VERSION_MAJOR 2) + endif(NOT Eigen3_FIND_VERSION_MAJOR) + if(NOT Eigen3_FIND_VERSION_MINOR) + set(Eigen3_FIND_VERSION_MINOR 91) + endif(NOT Eigen3_FIND_VERSION_MINOR) + if(NOT Eigen3_FIND_VERSION_PATCH) + set(Eigen3_FIND_VERSION_PATCH 0) + endif(NOT Eigen3_FIND_VERSION_PATCH) + + set(Eigen3_FIND_VERSION "${Eigen3_FIND_VERSION_MAJOR}.${Eigen3_FIND_VERSION_MINOR}.${Eigen3_FIND_VERSION_PATCH}") +endif(NOT Eigen3_FIND_VERSION) + +macro(_eigen3_check_version) + file(READ "${EIGEN3_INCLUDE_DIR}/Eigen/src/Core/util/Macros.h" _eigen3_version_header) + + string(REGEX MATCH "define[ \t]+EIGEN_WORLD_VERSION[ \t]+([0-9]+)" _eigen3_world_version_match "${_eigen3_version_header}") + set(EIGEN3_WORLD_VERSION "${CMAKE_MATCH_1}") + string(REGEX MATCH "define[ \t]+EIGEN_MAJOR_VERSION[ \t]+([0-9]+)" _eigen3_major_version_match "${_eigen3_version_header}") + set(EIGEN3_MAJOR_VERSION "${CMAKE_MATCH_1}") + string(REGEX MATCH "define[ \t]+EIGEN_MINOR_VERSION[ \t]+([0-9]+)" _eigen3_minor_version_match "${_eigen3_version_header}") + set(EIGEN3_MINOR_VERSION "${CMAKE_MATCH_1}") + + set(EIGEN3_VERSION ${EIGEN3_WORLD_VERSION}.${EIGEN3_MAJOR_VERSION}.${EIGEN3_MINOR_VERSION}) + if(${EIGEN3_VERSION} VERSION_LESS ${Eigen3_FIND_VERSION}) + set(EIGEN3_VERSION_OK FALSE) + else(${EIGEN3_VERSION} VERSION_LESS ${Eigen3_FIND_VERSION}) + set(EIGEN3_VERSION_OK TRUE) + endif(${EIGEN3_VERSION} VERSION_LESS ${Eigen3_FIND_VERSION}) + + if(NOT EIGEN3_VERSION_OK) + + message(STATUS "Eigen3 version ${EIGEN3_VERSION} found in ${EIGEN3_INCLUDE_DIR}, " + "but at least version ${Eigen3_FIND_VERSION} is required") + endif(NOT EIGEN3_VERSION_OK) +endmacro(_eigen3_check_version) + +if (EIGEN3_INCLUDE_DIR) + + # in cache already + _eigen3_check_version() + set(EIGEN3_FOUND ${EIGEN3_VERSION_OK}) + +else (EIGEN3_INCLUDE_DIR) + + find_path(EIGEN3_INCLUDE_DIR NAMES signature_of_eigen3_matrix_library + HINTS ${EIGEN_INCLUDE_DIR_HINTS} + NO_DEFAULT_PATH + ) + + if(EIGEN3_INCLUDE_DIR) + _eigen3_check_version() + endif(EIGEN3_INCLUDE_DIR) + + include(FindPackageHandleStandardArgs) + find_package_handle_standard_args(Eigen3 DEFAULT_MSG EIGEN3_INCLUDE_DIR EIGEN3_VERSION_OK) + + mark_as_advanced(EIGEN3_INCLUDE_DIR) + +endif(EIGEN3_INCLUDE_DIR) diff --git a/cmake_modules/FindOpenGV.cmake b/cmake_modules/FindOpenGV.cmake new file mode 100644 index 0000000..ad244c6 --- /dev/null +++ b/cmake_modules/FindOpenGV.cmake @@ -0,0 +1,85 @@ + +# This is FindOPENGV.cmake +# CMake module to locate the OPENGV package +# +# The following cache variables may be set before calling this script: +# +# OPENGV_DIR (or OPENGV_ROOT): (Optional) The install prefix OR source tree of opengv (e.g. /usr/local or src/opengv) +# OPENGV_BUILD_NAME: (Optional) If compiling against a source tree, the name of the build directory +# within it (e.g build-debug). Without this defined, this script tries to +# intelligently find the build directory based on the project's build directory name +# or based on the build type (Debug/Release/etc). +# +# The following variables will be defined: +# +# OPENGV_FOUND : TRUE if the package has been successfully found +# OPENGV_INCLUDE_DIR : paths to OPENGV's INCLUDE directories +# OPENGV_LIBS : paths to OPENGV's libraries +# +# NOTES on compiling against an uninstalled OPENGV build tree: +# - A OPENGV source tree will be automatically searched for in the directory +# 'opengv' next to your project directory, after searching +# CMAKE_INSTALL_PREFIX and $HOME, but before searching /usr/local and /usr. +# - The build directory will be searched first with the same name as your +# project's build directory, e.g. if you build from 'MyProject/build-optimized', +# 'opengv/build-optimized' will be searched first. Next, a build directory for +# your project's build type, e.g. if CMAKE_BUILD_TYPE in your project is +# 'Release', then 'opengv/build-release' will be searched next. Finally, plain +# 'opengv/build' will be searched. +# - You can control the opengv build directory name directly by defining the CMake +# cache variable 'OPENGV_BUILD_NAME', then only 'opengv/${OPENGV_BUILD_NAME} will +# be searched. +# - Use the standard CMAKE_PREFIX_PATH, or OPENGV_DIR, to find a specific opengv +# directory. + +# Get path suffixes to help look for opengv +if(OPENGV_BUILD_NAME) + set(opengv_build_names "${OPENGV_BUILD_NAME}/opengv") +else() + # lowercase build type + string(TOLOWER "${CMAKE_BUILD_TYPE}" build_type_suffix) + # build suffix of this project + get_filename_component(my_build_name "${CMAKE_BINARY_DIR}" NAME) + + set(opengv_build_names "${my_build_name}/opengv" "build-${build_type_suffix}/opengv" "build/opengv" "build/lib") +endif() + +# Use OPENGV_ROOT or OPENGV_DIR equivalently +if(OPENGV_ROOT AND NOT OPENGV_DIR) + set(OPENGV_DIR "${OPENGV_ROOT}") +endif() + +if(OPENGV_DIR) + # Find include dirs + find_path(OPENGV_INCLUDE_DIR opengv/types.hpp + PATHS "${OPENGV_DIR}/include" "${OPENGV_DIR}" NO_DEFAULT_PATH + DOC "OPENGV include directories") + + # Find libraries + find_library(OPENGV_LIBS NAMES opengv + HINTS "${OPENGV_DIR}/lib" "${OPENGV_DIR}" NO_DEFAULT_PATH + PATH_SUFFIXES ${opengv_build_names} + DOC "OPENGV libraries") +else() + # Find include dirs + set(extra_include_paths ${CMAKE_INSTALL_PREFIX}/include "$ENV{HOME}/include" "${PROJECT_SOURCE_DIR}/../opengv" /usr/local/include /usr/include) + find_path(OPENGV_INCLUDE_DIR opengv/types.hpp + PATHS ${extra_include_paths} + DOC "OPENGV include directories") + if(NOT OPENGV_INCLUDE_DIR) + message(STATUS "Searched for opengv headers in default paths plus ${extra_include_paths}") + endif() + + # Find libraries + find_library(OPENGV_LIBS NAMES opengv + HINTS ${CMAKE_INSTALL_PREFIX}/lib "$ENV{HOME}/lib" "${PROJECT_SOURCE_DIR}/../opengv" /usr/local/lib /usr/lib + PATH_SUFFIXES ${opengv_build_names} + DOC "OPENGV libraries") +endif() + +# handle the QUIETLY and REQUIRED arguments and set OPENGV_FOUND to TRUE +# if all listed variables are TRUE +include(FindPackageHandleStandardArgs) +find_package_handle_standard_args(OPENGV DEFAULT_MSG + OPENGV_LIBS OPENGV_INCLUDE_DIR) + diff --git a/cmake_modules/FindTBB.cmake b/cmake_modules/FindTBB.cmake new file mode 100644 index 0000000..10e540d --- /dev/null +++ b/cmake_modules/FindTBB.cmake @@ -0,0 +1,468 @@ +# - Find ThreadingBuildingBlocks include dirs and libraries +# Use this module by invoking find_package with the form: +# find_package(TBB +# [REQUIRED] # Fail with error if TBB is not found +# ) # +# Once done, this will define +# +# TBB_FOUND - system has TBB +# TBB_INCLUDE_DIRS - the TBB include directories +# TBB_LIBRARIES - TBB libraries to be lined, doesn't include malloc or +# malloc proxy +# TBB::tbb - imported target for the TBB library +# +# TBB_VERSION_MAJOR - Major Product Version Number +# TBB_VERSION_MINOR - Minor Product Version Number +# TBB_INTERFACE_VERSION - Engineering Focused Version Number +# TBB_COMPATIBLE_INTERFACE_VERSION - The oldest major interface version +# still supported. This uses the engineering +# focused interface version numbers. +# +# TBB_MALLOC_FOUND - system has TBB malloc library +# TBB_MALLOC_INCLUDE_DIRS - the TBB malloc include directories +# TBB_MALLOC_LIBRARIES - The TBB malloc libraries to be lined +# TBB::malloc - imported target for the TBB malloc library +# +# TBB_MALLOC_PROXY_FOUND - system has TBB malloc proxy library +# TBB_MALLOC_PROXY_INCLUDE_DIRS = the TBB malloc proxy include directories +# TBB_MALLOC_PROXY_LIBRARIES - The TBB malloc proxy libraries to be lined +# TBB::malloc_proxy - imported target for the TBB malloc proxy library +# +# +# This module reads hints about search locations from variables: +# ENV TBB_ARCH_PLATFORM - for eg. set it to "mic" for Xeon Phi builds +# ENV TBB_ROOT or just TBB_ROOT - root directory of tbb installation +# ENV TBB_BUILD_PREFIX - specifies the build prefix for user built tbb +# libraries. Should be specified with ENV TBB_ROOT +# and optionally... +# ENV TBB_BUILD_DIR - if build directory is different than ${TBB_ROOT}/build +# +# +# Modified by Robert Maynard from the original OGRE source +# +#------------------------------------------------------------------- +# This file is part of the CMake build system for OGRE +# (Object-oriented Graphics Rendering Engine) +# For the latest info, see http://www.ogre3d.org/ +# +# The contents of this file are placed in the public domain. Feel +# free to make use of it in any way you like. +#------------------------------------------------------------------- +# +# ========================================================================= +# Taken from Copyright.txt in the root of the VTK source tree as per +# instructions to substitute the full license in place of the summary +# reference when distributing outside of VTK +# ========================================================================= +# +# Program: Visualization Toolkit +# Module: Copyright.txt +# +# Copyright (c) 1993-2015 Ken Martin, Will Schroeder, Bill Lorensen +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# * Neither name of Ken Martin, Will Schroeder, or Bill Lorensen nor the names +# of any contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS'' +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE FOR +# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +# =========================================================================*/ + +#============================================================================= +# FindTBB helper functions and macros +# + +#==================================================== +# Fix the library path in case it is a linker script +#==================================================== +function(tbb_extract_real_library library real_library) + if(NOT UNIX OR NOT EXISTS ${library}) + set(${real_library} "${library}" PARENT_SCOPE) + return() + endif() + + #Read in the first 4 bytes and see if they are the ELF magic number + set(_elf_magic "7f454c46") + file(READ ${library} _hex_data OFFSET 0 LIMIT 4 HEX) + if(_hex_data STREQUAL _elf_magic) + #we have opened a elf binary so this is what + #we should link to + set(${real_library} "${library}" PARENT_SCOPE) + return() + endif() + + file(READ ${library} _data OFFSET 0 LIMIT 1024) + if("${_data}" MATCHES "INPUT \\(([^(]+)\\)") + #extract out the .so name from REGEX MATCH command + set(_proper_so_name "${CMAKE_MATCH_1}") + + #construct path to the real .so which is presumed to be in the same directory + #as the input file + get_filename_component(_so_dir "${library}" DIRECTORY) + set(${real_library} "${_so_dir}/${_proper_so_name}" PARENT_SCOPE) + else() + #unable to determine what this library is so just hope everything works + #and pass it unmodified. + set(${real_library} "${library}" PARENT_SCOPE) + endif() +endfunction() + +#=============================================== +# Do the final processing for the package find. +#=============================================== +macro(findpkg_finish PREFIX TARGET_NAME) + if (${PREFIX}_INCLUDE_DIR AND ${PREFIX}_LIBRARY) + set(${PREFIX}_FOUND TRUE) + set (${PREFIX}_INCLUDE_DIRS ${${PREFIX}_INCLUDE_DIR}) + set (${PREFIX}_LIBRARIES ${${PREFIX}_LIBRARY}) + else () + if (${PREFIX}_FIND_REQUIRED AND NOT ${PREFIX}_FIND_QUIETLY) + message(FATAL_ERROR "Required library ${PREFIX} not found.") + endif () + endif () + + if (NOT TARGET "TBB::${TARGET_NAME}") + if (${PREFIX}_LIBRARY_RELEASE) + tbb_extract_real_library(${${PREFIX}_LIBRARY_RELEASE} real_release) + endif () + if (${PREFIX}_LIBRARY_DEBUG) + tbb_extract_real_library(${${PREFIX}_LIBRARY_DEBUG} real_debug) + endif () + add_library(TBB::${TARGET_NAME} UNKNOWN IMPORTED) + set_target_properties(TBB::${TARGET_NAME} PROPERTIES + INTERFACE_INCLUDE_DIRECTORIES "${${PREFIX}_INCLUDE_DIR}") + if (${PREFIX}_LIBRARY_DEBUG AND ${PREFIX}_LIBRARY_RELEASE) + set_target_properties(TBB::${TARGET_NAME} PROPERTIES + IMPORTED_LOCATION "${real_release}" + IMPORTED_LOCATION_DEBUG "${real_debug}" + IMPORTED_LOCATION_RELEASE "${real_release}") + elseif (${PREFIX}_LIBRARY_RELEASE) + set_target_properties(TBB::${TARGET_NAME} PROPERTIES + IMPORTED_LOCATION "${real_release}") + elseif (${PREFIX}_LIBRARY_DEBUG) + set_target_properties(TBB::${TARGET_NAME} PROPERTIES + IMPORTED_LOCATION "${real_debug}") + endif () + endif () + + #mark the following variables as internal variables + mark_as_advanced(${PREFIX}_INCLUDE_DIR + ${PREFIX}_LIBRARY + ${PREFIX}_LIBRARY_DEBUG + ${PREFIX}_LIBRARY_RELEASE) +endmacro() + +#=============================================== +# Generate debug names from given release names +#=============================================== +macro(get_debug_names PREFIX) + foreach(i ${${PREFIX}}) + set(${PREFIX}_DEBUG ${${PREFIX}_DEBUG} ${i}d ${i}D ${i}_d ${i}_D ${i}_debug ${i}) + endforeach() +endmacro() + +#=============================================== +# See if we have env vars to help us find tbb +#=============================================== +macro(getenv_path VAR) + set(ENV_${VAR} $ENV{${VAR}}) + # replace won't work if var is blank + if (ENV_${VAR}) + string( REGEX REPLACE "\\\\" "/" ENV_${VAR} ${ENV_${VAR}} ) + endif () +endmacro() + +#=============================================== +# Couple a set of release AND debug libraries +#=============================================== +macro(make_library_set PREFIX) + if (${PREFIX}_RELEASE AND ${PREFIX}_DEBUG) + set(${PREFIX} optimized ${${PREFIX}_RELEASE} debug ${${PREFIX}_DEBUG}) + elseif (${PREFIX}_RELEASE) + set(${PREFIX} ${${PREFIX}_RELEASE}) + elseif (${PREFIX}_DEBUG) + set(${PREFIX} ${${PREFIX}_DEBUG}) + endif () +endmacro() + +#=============================================== +# Ensure that the release & debug libraries found are from the same installation. +#=============================================== +macro(find_tbb_library_verifying_release_debug_locations PREFIX) + find_library(${PREFIX}_RELEASE + NAMES ${${PREFIX}_NAMES} + HINTS ${TBB_LIB_SEARCH_PATH}) + if (${PREFIX}_RELEASE) + # To avoid finding a mismatched set of release & debug libraries from + # different installations if the first found does not have debug libraries + # by forcing the search for debug to only occur within the detected release + # library directory (if found). Although this would break detection if the + # release & debug libraries were shipped in different directories, this is + # not the case in the official TBB releases for any platform. + get_filename_component( + FOUND_RELEASE_LIB_DIR "${${PREFIX}_RELEASE}" DIRECTORY) + find_library(${PREFIX}_DEBUG + NAMES ${${PREFIX}_NAMES_DEBUG} + HINTS ${FOUND_RELEASE_LIB_DIR} + NO_DEFAULT_PATH) + else() + find_library(${PREFIX}_DEBUG + NAMES ${${PREFIX}_NAMES_DEBUG} + HINTS ${TBB_LIB_SEARCH_PATH}) + endif() +endmacro() + +#============================================================================= +# Now to actually find TBB +# + +# Get path, convert backslashes as ${ENV_${var}} +getenv_path(TBB_ROOT) + +# initialize search paths +set(TBB_PREFIX_PATH ${TBB_ROOT} ${ENV_TBB_ROOT}) +set(TBB_INC_SEARCH_PATH "") +set(TBB_LIB_SEARCH_PATH "") + + +# If user built from sources +set(TBB_BUILD_PREFIX $ENV{TBB_BUILD_PREFIX}) +if (TBB_BUILD_PREFIX AND ENV_TBB_ROOT) + getenv_path(TBB_BUILD_DIR) + if (NOT ENV_TBB_BUILD_DIR) + set(ENV_TBB_BUILD_DIR ${ENV_TBB_ROOT}/build) + endif () + + # include directory under ${ENV_TBB_ROOT}/include + list(APPEND TBB_LIB_SEARCH_PATH + ${ENV_TBB_BUILD_DIR}/${TBB_BUILD_PREFIX}_release + ${ENV_TBB_BUILD_DIR}/${TBB_BUILD_PREFIX}_debug) +endif () + + +# For Windows, let's assume that the user might be using the precompiled +# TBB packages from the main website. These use a rather awkward directory +# structure (at least for automatically finding the right files) depending +# on platform and compiler, but we'll do our best to accommodate it. +# Not adding the same effort for the precompiled linux builds, though. Those +# have different versions for CC compiler versions and linux kernels which +# will never adequately match the user's setup, so there is no feasible way +# to detect the "best" version to use. The user will have to manually +# select the right files. (Chances are the distributions are shipping their +# custom version of tbb, anyway, so the problem is probably nonexistent.) +if (WIN32 AND MSVC) + set(COMPILER_PREFIX "vc7.1") + if (MSVC_VERSION EQUAL 1400) + set(COMPILER_PREFIX "vc8") + elseif(MSVC_VERSION EQUAL 1500) + set(COMPILER_PREFIX "vc9") + elseif(MSVC_VERSION EQUAL 1600) + set(COMPILER_PREFIX "vc10") + elseif(MSVC_VERSION EQUAL 1700) + set(COMPILER_PREFIX "vc11") + elseif(MSVC_VERSION EQUAL 1800) + set(COMPILER_PREFIX "vc12") + elseif(MSVC_VERSION GREATER_EQUAL 1900) + set(COMPILER_PREFIX "vc14") + endif () + + # for each prefix path, add ia32/64\${COMPILER_PREFIX}\lib to the lib search path + foreach (dir IN LISTS TBB_PREFIX_PATH) + if (CMAKE_CL_64) + list(APPEND TBB_LIB_SEARCH_PATH ${dir}/ia64/${COMPILER_PREFIX}/lib) + list(APPEND TBB_LIB_SEARCH_PATH ${dir}/lib/ia64/${COMPILER_PREFIX}) + list(APPEND TBB_LIB_SEARCH_PATH ${dir}/intel64/${COMPILER_PREFIX}/lib) + list(APPEND TBB_LIB_SEARCH_PATH ${dir}/lib/intel64/${COMPILER_PREFIX}) + else () + list(APPEND TBB_LIB_SEARCH_PATH ${dir}/ia32/${COMPILER_PREFIX}/lib) + list(APPEND TBB_LIB_SEARCH_PATH ${dir}/lib/ia32/${COMPILER_PREFIX}) + endif () + endforeach () +endif () + +# For OS X binary distribution, choose libc++ based libraries for Mavericks (10.9) +# and above and AppleClang +if (CMAKE_SYSTEM_NAME STREQUAL "Darwin" AND + NOT CMAKE_SYSTEM_VERSION VERSION_LESS 13.0) + set (USE_LIBCXX OFF) + cmake_policy(GET CMP0025 POLICY_VAR) + + if (POLICY_VAR STREQUAL "NEW") + if (CMAKE_CXX_COMPILER_ID STREQUAL "AppleClang") + set (USE_LIBCXX ON) + endif () + else () + if (CMAKE_CXX_COMPILER_ID STREQUAL "Clang") + set (USE_LIBCXX ON) + endif () + endif () + + if (USE_LIBCXX) + foreach (dir IN LISTS TBB_PREFIX_PATH) + list (APPEND TBB_LIB_SEARCH_PATH ${dir}/lib/libc++ ${dir}/libc++/lib) + endforeach () + endif () +endif () + +# check compiler ABI +if (CMAKE_CXX_COMPILER_ID STREQUAL "GNU") + set(COMPILER_PREFIX) + if (NOT CMAKE_CXX_COMPILER_VERSION VERSION_LESS 4.8) + list(APPEND COMPILER_PREFIX "gcc4.8") + endif() + if (NOT CMAKE_CXX_COMPILER_VERSION VERSION_LESS 4.7) + list(APPEND COMPILER_PREFIX "gcc4.7") + endif() + if (NOT CMAKE_CXX_COMPILER_VERSION VERSION_LESS 4.4) + list(APPEND COMPILER_PREFIX "gcc4.4") + endif() + list(APPEND COMPILER_PREFIX "gcc4.1") +elseif(CMAKE_CXX_COMPILER_ID MATCHES "Clang") + set(COMPILER_PREFIX) + if (NOT CMAKE_CXX_COMPILER_VERSION VERSION_LESS 4.0) # Complete guess + list(APPEND COMPILER_PREFIX "gcc4.8") + endif() + if (NOT CMAKE_CXX_COMPILER_VERSION VERSION_LESS 3.6) + list(APPEND COMPILER_PREFIX "gcc4.7") + endif() + list(APPEND COMPILER_PREFIX "gcc4.4") +else() # Assume compatibility with 4.4 for other compilers + list(APPEND COMPILER_PREFIX "gcc4.4") +endif () + +# if platform architecture is explicitly specified +set(TBB_ARCH_PLATFORM $ENV{TBB_ARCH_PLATFORM}) +if (TBB_ARCH_PLATFORM) + foreach (dir IN LISTS TBB_PREFIX_PATH) + list(APPEND TBB_LIB_SEARCH_PATH ${dir}/${TBB_ARCH_PLATFORM}/lib) + list(APPEND TBB_LIB_SEARCH_PATH ${dir}/lib/${TBB_ARCH_PLATFORM}) + endforeach () +endif () + +foreach (dir IN LISTS TBB_PREFIX_PATH) + foreach (prefix IN LISTS COMPILER_PREFIX) + if (CMAKE_SIZEOF_VOID_P EQUAL 8) + list(APPEND TBB_LIB_SEARCH_PATH ${dir}/lib/intel64) + list(APPEND TBB_LIB_SEARCH_PATH ${dir}/lib/intel64/${prefix}) + list(APPEND TBB_LIB_SEARCH_PATH ${dir}/intel64/lib) + list(APPEND TBB_LIB_SEARCH_PATH ${dir}/intel64/${prefix}/lib) + else () + list(APPEND TBB_LIB_SEARCH_PATH ${dir}/lib/ia32) + list(APPEND TBB_LIB_SEARCH_PATH ${dir}/lib/ia32/${prefix}) + list(APPEND TBB_LIB_SEARCH_PATH ${dir}/ia32/lib) + list(APPEND TBB_LIB_SEARCH_PATH ${dir}/ia32/${prefix}/lib) + endif () + endforeach() +endforeach () + +# add general search paths +foreach (dir IN LISTS TBB_PREFIX_PATH) + list(APPEND TBB_LIB_SEARCH_PATH ${dir}/lib ${dir}/Lib ${dir}/lib/tbb + ${dir}/Libs) + list(APPEND TBB_INC_SEARCH_PATH ${dir}/include ${dir}/Include + ${dir}/include/tbb) +endforeach () + +set(TBB_LIBRARY_NAMES tbb) +get_debug_names(TBB_LIBRARY_NAMES) + +find_path(TBB_INCLUDE_DIR + NAMES tbb/tbb.h + HINTS ${TBB_INC_SEARCH_PATH}) +find_tbb_library_verifying_release_debug_locations(TBB_LIBRARY) +make_library_set(TBB_LIBRARY) + +findpkg_finish(TBB tbb) + +#if we haven't found TBB no point on going any further +if (NOT TBB_FOUND) + return() +endif () + +#============================================================================= +# Look for TBB's malloc package +set(TBB_MALLOC_LIBRARY_NAMES tbbmalloc) +get_debug_names(TBB_MALLOC_LIBRARY_NAMES) + +find_path(TBB_MALLOC_INCLUDE_DIR + NAMES tbb/tbb.h + HINTS ${TBB_INC_SEARCH_PATH}) +find_tbb_library_verifying_release_debug_locations(TBB_MALLOC_LIBRARY) +make_library_set(TBB_MALLOC_LIBRARY) + +findpkg_finish(TBB_MALLOC tbbmalloc) + +#============================================================================= +# Look for TBB's malloc proxy package +set(TBB_MALLOC_PROXY_LIBRARY_NAMES tbbmalloc_proxy) +get_debug_names(TBB_MALLOC_PROXY_LIBRARY_NAMES) + +find_path(TBB_MALLOC_PROXY_INCLUDE_DIR + NAMES tbb/tbbmalloc_proxy.h + HINTS ${TBB_INC_SEARCH_PATH}) +find_tbb_library_verifying_release_debug_locations(TBB_MALLOC_PROXY_LIBRARY) +make_library_set(TBB_MALLOC_PROXY_LIBRARY) + +findpkg_finish(TBB_MALLOC_PROXY tbbmalloc_proxy) + + +#============================================================================= +#parse all the version numbers from tbb +if(NOT TBB_VERSION) + set(TBB_VERSION_FILE_PRIOR_TO_TBB_2021_1 + "${TBB_INCLUDE_DIR}/tbb/tbb_stddef.h") + set(TBB_VERSION_FILE_AFTER_TBB_2021_1 + "${TBB_INCLUDE_DIR}/oneapi/tbb/version.h") + + if (EXISTS "${TBB_VERSION_FILE_PRIOR_TO_TBB_2021_1}") + set(TBB_VERSION_FILE "${TBB_VERSION_FILE_PRIOR_TO_TBB_2021_1}") + elseif (EXISTS "${TBB_VERSION_FILE_AFTER_TBB_2021_1}") + set(TBB_VERSION_FILE "${TBB_VERSION_FILE_AFTER_TBB_2021_1}") + else() + message(FATAL_ERROR "Found TBB installation: ${TBB_INCLUDE_DIR} " + "missing version header.") + endif() + + #only read the start of the file + file(STRINGS + "${TBB_VERSION_FILE}" + TBB_VERSION_CONTENTS + REGEX "VERSION") + + string(REGEX REPLACE + ".*#define TBB_VERSION_MAJOR ([0-9]+).*" "\\1" + TBB_VERSION_MAJOR "${TBB_VERSION_CONTENTS}") + + string(REGEX REPLACE + ".*#define TBB_VERSION_MINOR ([0-9]+).*" "\\1" + TBB_VERSION_MINOR "${TBB_VERSION_CONTENTS}") + + string(REGEX REPLACE + ".*#define TBB_INTERFACE_VERSION ([0-9]+).*" "\\1" + TBB_INTERFACE_VERSION "${TBB_VERSION_CONTENTS}") + + string(REGEX REPLACE + ".*#define TBB_COMPATIBLE_INTERFACE_VERSION ([0-9]+).*" "\\1" + TBB_COMPATIBLE_INTERFACE_VERSION "${TBB_VERSION_CONTENTS}") + +endif() diff --git a/data/aprilgrid_5x4_uzh.json b/data/aprilgrid_5x4_uzh.json new file mode 100644 index 0000000..6d2ba4c --- /dev/null +++ b/data/aprilgrid_5x4_uzh.json @@ -0,0 +1,6 @@ +{ + "tagCols": 5, + "tagRows": 4, + "tagSize": 0.075, + "tagSpacing": 0.2 +} diff --git a/data/aprilgrid_6x6.json b/data/aprilgrid_6x6.json new file mode 100644 index 0000000..1361c77 --- /dev/null +++ b/data/aprilgrid_6x6.json @@ -0,0 +1,6 @@ +{ + "tagCols": 6, + "tagRows": 6, + "tagSize": 0.088, + "tagSpacing": 0.3 +} diff --git a/data/euroc_config.json b/data/euroc_config.json new file mode 100644 index 0000000..0cf10f2 --- /dev/null +++ b/data/euroc_config.json @@ -0,0 +1,56 @@ +{ + "value0": { + "config.optical_flow_type": "frame_to_frame", + "config.optical_flow_detection_grid_size": 50, + "config.optical_flow_max_recovered_dist2": 0.04, + "config.optical_flow_pattern": 51, + "config.optical_flow_max_iterations": 5, + "config.optical_flow_epipolar_error": 0.005, + "config.optical_flow_levels": 3, + "config.optical_flow_skip_frames": 1, + "config.vio_linearization_type": "ABS_QR", + "config.vio_sqrt_marg": true, + "config.vio_max_states": 3, + "config.vio_max_kfs": 7, + "config.vio_min_frames_after_kf": 5, + "config.vio_new_kf_keypoints_thresh": 0.7, + "config.vio_debug": false, + "config.vio_extended_logging": false, + "config.vio_obs_std_dev": 0.5, + "config.vio_obs_huber_thresh": 1.0, + "config.vio_min_triangulation_dist": 0.05, + "config.vio_outlier_threshold": 3.0, + "config.vio_filter_iteration": 4, + "config.vio_max_iterations": 7, + "config.vio_enforce_realtime": false, + "config.vio_use_lm": true, + "config.vio_lm_lambda_initial": 1e-4, + "config.vio_lm_lambda_min": 1e-6, + "config.vio_lm_lambda_max": 1e2, + "config.vio_lm_landmark_damping_variant": 1, + "config.vio_lm_pose_damping_variant": 1, + "config.vio_scale_jacobian": false, + "config.vio_init_pose_weight": 1e8, + "config.vio_init_ba_weight": 1e1, + "config.vio_init_bg_weight": 1e2, + "config.vio_marg_lost_landmarks": true, + "config.vio_kf_marg_feature_ratio": 0.1, + "config.mapper_obs_std_dev": 0.25, + "config.mapper_obs_huber_thresh": 1.5, + "config.mapper_detection_num_points": 800, + "config.mapper_num_frames_to_match": 30, + "config.mapper_frames_to_match_threshold": 0.04, + "config.mapper_min_matches": 20, + "config.mapper_ransac_threshold": 5e-5, + "config.mapper_min_track_length": 5, + "config.mapper_max_hamming_distance": 70, + "config.mapper_second_best_test_ratio": 1.2, + "config.mapper_bow_num_bits": 16, + "config.mapper_min_triangulation_dist": 0.07, + "config.mapper_no_factor_weights": false, + "config.mapper_use_factors": true, + "config.mapper_use_lm": true, + "config.mapper_lm_lambda_min": 1e-32, + "config.mapper_lm_lambda_max": 1e3 + } +} diff --git a/data/euroc_config_no_factors.json b/data/euroc_config_no_factors.json new file mode 100644 index 0000000..9ed26d2 --- /dev/null +++ b/data/euroc_config_no_factors.json @@ -0,0 +1,56 @@ +{ + "value0": { + "config.optical_flow_type": "frame_to_frame", + "config.optical_flow_detection_grid_size": 50, + "config.optical_flow_max_recovered_dist2": 0.04, + "config.optical_flow_pattern": 51, + "config.optical_flow_max_iterations": 5, + "config.optical_flow_epipolar_error": 0.005, + "config.optical_flow_levels": 3, + "config.optical_flow_skip_frames": 1, + "config.vio_linearization_type": "ABS_QR", + "config.vio_sqrt_marg": true, + "config.vio_max_states": 3, + "config.vio_max_kfs": 7, + "config.vio_min_frames_after_kf": 5, + "config.vio_new_kf_keypoints_thresh": 0.7, + "config.vio_debug": false, + "config.vio_extended_logging": false, + "config.vio_obs_std_dev": 0.5, + "config.vio_obs_huber_thresh": 1.0, + "config.vio_min_triangulation_dist": 0.05, + "config.vio_outlier_threshold": 3.0, + "config.vio_filter_iteration": 4, + "config.vio_max_iterations": 7, + "config.vio_enforce_realtime": false, + "config.vio_use_lm": false, + "config.vio_lm_lambda_initial": 1e-8, + "config.vio_lm_lambda_min": 1e-32, + "config.vio_lm_lambda_max": 1e2, + "config.vio_lm_landmark_damping_variant": 1, + "config.vio_lm_pose_damping_variant": 1, + "config.vio_scale_jacobian": false, + "config.vio_init_pose_weight": 1e8, + "config.vio_init_ba_weight": 1e1, + "config.vio_init_bg_weight": 1e2, + "config.vio_marg_lost_landmarks": true, + "config.vio_kf_marg_feature_ratio": 0.1, + "config.mapper_obs_std_dev": 0.25, + "config.mapper_obs_huber_thresh": 1.5, + "config.mapper_detection_num_points": 800, + "config.mapper_num_frames_to_match": 30, + "config.mapper_frames_to_match_threshold": 0.04, + "config.mapper_min_matches": 20, + "config.mapper_ransac_threshold": 5e-5, + "config.mapper_min_track_length": 5, + "config.mapper_max_hamming_distance": 70, + "config.mapper_second_best_test_ratio": 1.2, + "config.mapper_bow_num_bits": 16, + "config.mapper_min_triangulation_dist": 0.07, + "config.mapper_no_factor_weights": false, + "config.mapper_use_factors": false, + "config.mapper_use_lm": true, + "config.mapper_lm_lambda_min": 1e-32, + "config.mapper_lm_lambda_max": 1e3 + } +} diff --git a/data/euroc_config_no_weights.json b/data/euroc_config_no_weights.json new file mode 100644 index 0000000..c4962b5 --- /dev/null +++ b/data/euroc_config_no_weights.json @@ -0,0 +1,56 @@ +{ + "value0": { + "config.optical_flow_type": "frame_to_frame", + "config.optical_flow_detection_grid_size": 50, + "config.optical_flow_max_recovered_dist2": 0.04, + "config.optical_flow_pattern": 51, + "config.optical_flow_max_iterations": 5, + "config.optical_flow_epipolar_error": 0.005, + "config.optical_flow_levels": 3, + "config.optical_flow_skip_frames": 1, + "config.vio_linearization_type": "ABS_QR", + "config.vio_sqrt_marg": true, + "config.vio_max_states": 3, + "config.vio_max_kfs": 7, + "config.vio_min_frames_after_kf": 5, + "config.vio_new_kf_keypoints_thresh": 0.7, + "config.vio_debug": false, + "config.vio_extended_logging": false, + "config.vio_obs_std_dev": 0.5, + "config.vio_obs_huber_thresh": 1.0, + "config.vio_min_triangulation_dist": 0.05, + "config.vio_outlier_threshold": 3.0, + "config.vio_filter_iteration": 4, + "config.vio_max_iterations": 7, + "config.vio_enforce_realtime": false, + "config.vio_use_lm": false, + "config.vio_lm_lambda_initial": 1e-8, + "config.vio_lm_lambda_min": 1e-32, + "config.vio_lm_lambda_max": 1e2, + "config.vio_lm_landmark_damping_variant": 1, + "config.vio_lm_pose_damping_variant": 1, + "config.vio_scale_jacobian": false, + "config.vio_init_pose_weight": 1e8, + "config.vio_init_ba_weight": 1e1, + "config.vio_init_bg_weight": 1e2, + "config.vio_marg_lost_landmarks": true, + "config.vio_kf_marg_feature_ratio": 0.1, + "config.mapper_obs_std_dev": 0.25, + "config.mapper_obs_huber_thresh": 1.5, + "config.mapper_detection_num_points": 800, + "config.mapper_num_frames_to_match": 30, + "config.mapper_frames_to_match_threshold": 0.04, + "config.mapper_min_matches": 20, + "config.mapper_ransac_threshold": 5e-5, + "config.mapper_min_track_length": 5, + "config.mapper_max_hamming_distance": 70, + "config.mapper_second_best_test_ratio": 1.2, + "config.mapper_bow_num_bits": 16, + "config.mapper_min_triangulation_dist": 0.07, + "config.mapper_no_factor_weights": true, + "config.mapper_use_factors": true, + "config.mapper_use_lm": true, + "config.mapper_lm_lambda_min": 1e-32, + "config.mapper_lm_lambda_max": 1e3 + } +} diff --git a/data/euroc_config_vo.json b/data/euroc_config_vo.json new file mode 100644 index 0000000..c11c0b4 --- /dev/null +++ b/data/euroc_config_vo.json @@ -0,0 +1,56 @@ +{ + "value0": { + "config.optical_flow_type": "frame_to_frame", + "config.optical_flow_detection_grid_size": 40, + "config.optical_flow_max_recovered_dist2": 0.04, + "config.optical_flow_pattern": 51, + "config.optical_flow_max_iterations": 5, + "config.optical_flow_epipolar_error": 0.005, + "config.optical_flow_levels": 3, + "config.optical_flow_skip_frames": 1, + "config.vio_linearization_type": "ABS_QR", + "config.vio_sqrt_marg": true, + "config.vio_max_states": 3, + "config.vio_max_kfs": 7, + "config.vio_min_frames_after_kf": 1, + "config.vio_new_kf_keypoints_thresh": 0.8, + "config.vio_debug": false, + "config.vio_extended_logging": false, + "config.vio_obs_std_dev": 0.5, + "config.vio_obs_huber_thresh": 1.0, + "config.vio_min_triangulation_dist": 0.05, + "config.vio_outlier_threshold": 3.0, + "config.vio_filter_iteration": 4, + "config.vio_max_iterations": 7, + "config.vio_enforce_realtime": false, + "config.vio_use_lm": true, + "config.vio_lm_lambda_initial": 1e-4, + "config.vio_lm_lambda_min": 1e-5, + "config.vio_lm_lambda_max": 1e2, + "config.vio_lm_landmark_damping_variant": 1, + "config.vio_lm_pose_damping_variant": 1, + "config.vio_scale_jacobian": false, + "config.vio_init_pose_weight": 1e8, + "config.vio_init_ba_weight": 1e1, + "config.vio_init_bg_weight": 1e2, + "config.vio_marg_lost_landmarks": true, + "config.vio_kf_marg_feature_ratio": 0.2, + "config.mapper_obs_std_dev": 0.25, + "config.mapper_obs_huber_thresh": 1.5, + "config.mapper_detection_num_points": 800, + "config.mapper_num_frames_to_match": 30, + "config.mapper_frames_to_match_threshold": 0.04, + "config.mapper_min_matches": 20, + "config.mapper_ransac_threshold": 5e-5, + "config.mapper_min_track_length": 5, + "config.mapper_max_hamming_distance": 70, + "config.mapper_second_best_test_ratio": 1.2, + "config.mapper_bow_num_bits": 16, + "config.mapper_min_triangulation_dist": 0.07, + "config.mapper_no_factor_weights": false, + "config.mapper_use_factors": true, + "config.mapper_use_lm": true, + "config.mapper_lm_lambda_min": 1e-32, + "config.mapper_lm_lambda_max": 1e3 + } +} diff --git a/data/euroc_ds_calib.json b/data/euroc_ds_calib.json new file mode 100644 index 0000000..25c22a0 --- /dev/null +++ b/data/euroc_ds_calib.json @@ -0,0 +1,229 @@ +{ + "value0": { + "T_imu_cam": [ + { + "px": -0.016774788924641534, + "py": -0.068938940687127, + "pz": 0.005139123188382424, + "qx": -0.007239825785317818, + "qy": 0.007541278561558601, + "qz": 0.7017845426564943, + "qw": 0.7123125505904486 + }, + { + "px": -0.01507436282032619, + "py": 0.0412627204046637, + "pz": 0.00316287258752953, + "qx": -0.0023360576185881625, + "qy": 0.013000769689092388, + "qz": 0.7024677108343111, + "qw": 0.7115930283929829 + } + ], + "intrinsics": [ + { + "camera_type": "ds", + "intrinsics": { + "fx": 349.7560023050409, + "fy": 348.72454229977037, + "cx": 365.89440762590149, + "cy": 249.32995565708704, + "xi": -0.2409573942178872, + "alpha": 0.566996899163044 + } + }, + { + "camera_type": "ds", + "intrinsics": { + "fx": 361.6713883800533, + "fy": 360.5856493689301, + "cx": 379.40818394080869, + "cy": 255.9772968522045, + "xi": -0.21300835384809328, + "alpha": 0.5767008625037023 + } + } + ], + "resolution": [ + [ + 752, + 480 + ], + [ + 752, + 480 + ] + ], + "vignette": [ + { + "value0": 0, + "value1": 50000000000, + "value2": [ + [ + 1.0 + ], + [ + 1.0 + ], + [ + 1.0 + ], + [ + 1.0 + ], + [ + 1.0 + ], + [ + 0.977014799737916 + ], + [ + 0.963099657065011 + ], + [ + 0.922192868484681 + ], + [ + 0.854947649264748 + ], + [ + 0.775979681731632 + ], + [ + 0.722888431826212 + ], + [ + 0.0686113338332548 + ], + [ + 0.0686113338332548 + ], + [ + 0.0686113338332548 + ], + [ + 0.0686113338332548 + ] + ] + }, + { + "value0": 0, + "value1": 50000000000, + "value2": [ + [ + 1.0 + ], + [ + 1.0 + ], + [ + 1.0 + ], + [ + 1.0 + ], + [ + 1.0 + ], + [ + 0.977014799737916 + ], + [ + 0.963099657065011 + ], + [ + 0.922192868484681 + ], + [ + 0.854947649264748 + ], + [ + 0.775979681731632 + ], + [ + 0.722888431826212 + ], + [ + 0.0686113338332548 + ], + [ + 0.0686113338332548 + ], + [ + 0.0686113338332548 + ], + [ + 0.0686113338332548 + ] + ] + } + ], + "calib_accel_bias": [ + -0.003025405479279035, + 0.1200005286487319, + 0.06708820471592454, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0 + ], + "calib_gyro_bias": [ + -0.002186848441668376, + 0.020427823167917037, + 0.07668367023977922, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0 + ], + "imu_update_rate": 200.0, + "accel_noise_std": [ + 0.016, + 0.016, + 0.016 + ], + "gyro_noise_std": [ + 0.000282, + 0.000282, + 0.000282 + ], + "accel_bias_std": [ + 0.001, + 0.001, + 0.001 + ], + "gyro_bias_std": [ + 0.0001, + 0.0001, + 0.0001 + ], + "T_mocap_world": { + "px": 0.0, + "py": 0.0, + "pz": 0.0, + "qx": 0.0, + "qy": 0.0, + "qz": 0.0, + "qw": 1.0 + }, + "T_imu_marker": { + "px": 0.0, + "py": 0.0, + "pz": 0.0, + "qx": 0.0, + "qy": 0.0, + "qz": 0.0, + "qw": 1.0 + }, + "mocap_time_offset_ns": 0, + "mocap_to_imu_offset_ns": 140763258159875, + "cam_time_offset_ns": 0 + } +} diff --git a/data/euroc_eucm_calib.json b/data/euroc_eucm_calib.json new file mode 100644 index 0000000..347d6b0 --- /dev/null +++ b/data/euroc_eucm_calib.json @@ -0,0 +1,213 @@ +{ + "value0": { + "T_imu_cam": [ + { + "px": -0.016774788924641534, + "py": -0.068938940687127, + "pz": 0.005139123188382424, + "qx": -0.007239825785317818, + "qy": 0.007541278561558601, + "qz": 0.7017845426564943, + "qw": 0.7123125505904486 + }, + { + "px": -0.01507436282032619, + "py": 0.0412627204046637, + "pz": 0.00316287258752953, + "qx": -0.0023360576185881625, + "qy": 0.013000769689092388, + "qz": 0.7024677108343111, + "qw": 0.7115930283929829 + } + ], + "intrinsics": [ + { + "camera_type": "eucm", + "intrinsics": { + "fx": 460.76484651566468, + "fy": 459.4051018049483, + "cx": 365.8937161309615, + "cy": 249.33499869752445, + "alpha": 0.5903365915227143, + "beta": 1.127468196965374 + } + }, + { + "camera_type": "eucm", + "intrinsics": { + "fx": 459.55216904505178, + "fy": 458.17181312352059, + "cx": 379.4066773637502, + "cy": 255.98301446219285, + "alpha": 0.6049889282227827, + "beta": 1.0907289821146678 + } + } + ], + "resolution": [ + [ + 752, + 480 + ], + [ + 752, + 480 + ] + ], + "vignette": [ + { + "value0": 0, + "value1": 50000000000, + "value2": [ + [ + 1.0 + ], + [ + 1.0 + ], + [ + 1.0 + ], + [ + 1.0 + ], + [ + 1.0 + ], + [ + 0.977014799737916 + ], + [ + 0.963099657065011 + ], + [ + 0.922192868484681 + ], + [ + 0.854947649264748 + ], + [ + 0.775979681731632 + ], + [ + 0.722888431826212 + ], + [ + 0.0686113338332548 + ], + [ + 0.0686113338332548 + ], + [ + 0.0686113338332548 + ], + [ + 0.0686113338332548 + ] + ] + }, + { + "value0": 0, + "value1": 50000000000, + "value2": [ + [ + 1.0 + ], + [ + 1.0 + ], + [ + 1.0 + ], + [ + 1.0 + ], + [ + 1.0 + ], + [ + 0.977014799737916 + ], + [ + 0.963099657065011 + ], + [ + 0.922192868484681 + ], + [ + 0.854947649264748 + ], + [ + 0.775979681731632 + ], + [ + 0.722888431826212 + ], + [ + 0.0686113338332548 + ], + [ + 0.0686113338332548 + ], + [ + 0.0686113338332548 + ], + [ + 0.0686113338332548 + ] + ] + } + ], + "calib_accel_bias": [ + -0.003025405479279035, + 0.1200005286487319, + 0.06708820471592454, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0 + ], + "calib_gyro_bias": [ + -0.002186848441668376, + 0.020427823167917037, + 0.07668367023977922, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0 + ], + "imu_update_rate": 200.0, + "accel_noise_std": [0.016, 0.016, 0.016], + "gyro_noise_std": [0.000282, 0.000282, 0.000282], + "accel_bias_std": [0.001, 0.001, 0.001], + "gyro_bias_std": [0.0001, 0.0001, 0.0001], + "T_mocap_world": { + "px": 0.0, + "py": 0.0, + "pz": 0.0, + "qx": 0.0, + "qy": 0.0, + "qz": 0.0, + "qw": 1.0 + }, + "T_imu_marker": { + "px": 0.0, + "py": 0.0, + "pz": 0.0, + "qx": 0.0, + "qy": 0.0, + "qz": 0.0, + "qw": 1.0 + }, + "mocap_time_offset_ns": 0, + "mocap_to_imu_offset_ns": 140763258159875, + "cam_time_offset_ns": 0 + } +} diff --git a/data/iccv21/basalt_batch_config.toml b/data/iccv21/basalt_batch_config.toml new file mode 100644 index 0000000..a5c51f2 --- /dev/null +++ b/data/iccv21/basalt_batch_config.toml @@ -0,0 +1,424 @@ + +############################################################################## +## Base config that ends up in basalt_config.json +## These values are overwritten by the named config elements below. +############################################################################## + +# config read by Basalt +[value0] + +"config.optical_flow_type" = "frame_to_frame" +"config.optical_flow_detection_grid_size" = 50 +"config.optical_flow_max_recovered_dist2" = 0.04 +"config.optical_flow_pattern" = 51 +"config.optical_flow_max_iterations" = 5 +"config.optical_flow_epipolar_error" = 0.005 +"config.optical_flow_levels" = 3 +"config.optical_flow_skip_frames" = 1 + +"config.vio_linearization_type" = "ABS_QR" +"config.vio_sqrt_marg" = true +"config.vio_max_states" = 3 +"config.vio_max_kfs" = 7 +"config.vio_min_frames_after_kf" = 5 +"config.vio_new_kf_keypoints_thresh" = 0.7 +"config.vio_debug" = false +"config.vio_extended_logging" = false +"config.vio_obs_std_dev" = 0.5 +"config.vio_obs_huber_thresh" = 1.0 +"config.vio_min_triangulation_dist" = 0.05 +"config.vio_outlier_threshold" = 3.0 +"config.vio_filter_iteration" = 4 +"config.vio_max_iterations" = 7 +"config.vio_enforce_realtime" = false +"config.vio_use_lm" = true +"config.vio_lm_lambda_initial" = 1e-4 +"config.vio_lm_lambda_min" = 1e-7 +"config.vio_lm_lambda_max" = 1e2 +"config.vio_lm_landmark_damping_variant" = 1 +"config.vio_lm_pose_damping_variant" = 1 +"config.vio_scale_jacobian" = false +"config.vio_init_pose_weight" = 1e8 +"config.vio_init_ba_weight" = 1e1 +"config.vio_init_bg_weight" = 1e2 +"config.vio_marg_lost_landmarks" = true +"config.vio_kf_marg_feature_ratio" = 0.1 + +"config.mapper_obs_std_dev" = 0.25 +"config.mapper_obs_huber_thresh" = 1.5 +"config.mapper_detection_num_points" = 800 +"config.mapper_num_frames_to_match" = 30 +"config.mapper_frames_to_match_threshold" = 0.04 +"config.mapper_min_matches" = 20 +"config.mapper_ransac_threshold" = 5e-5 +"config.mapper_min_track_length" = 5 +"config.mapper_max_hamming_distance" = 70 +"config.mapper_second_best_test_ratio" = 1.2 +"config.mapper_bow_num_bits" = 16 +"config.mapper_min_triangulation_dist" = 0.07 +"config.mapper_no_factor_weights" = false +"config.mapper_use_factors" = true +"config.mapper_use_lm" = true +"config.mapper_lm_lambda_min" = 1e-32 +"config.mapper_lm_lambda_max" = 1e3 + +# which executable to run +[batch_run] +executable = "basalt_vio" + +# environment variables set for the executable +[batch_run.env] +OPENBLAS_NUM_THREADS = 1 +OMP_NUM_THREADS = 1 + +# command line arguments to the executable ("--" is prepended) +[batch_run.args] +"config-path" = "basalt_config.json" +"show-gui" = "0" +"step-by-step" = "false" +#"max-frames" = "1000" +"use-imu" = "false" +"use-double" = "false" +"num-threads" = "0" + + +############################################################################## +## Named config elements to overwrite the base config: batch run script options +############################################################################## + +[_batch.config.vio] +batch_run.args."use-imu" = "true" + +[_batch.config.vo] +batch_run.args."use-imu" = "false" + + + +[_batch.config.double] +batch_run.args."use-double" = "true" + +[_batch.config.float] +batch_run.args."use-double" = "false" + + + +[_batch.config.threads0] +batch_run.args."num-threads" = "0" + +[_batch.config.threads1] +batch_run.args."num-threads" = "1" + + + +[_batch.config.savetum] +batch_run.args."save-trajectory" = "tum" + +# saving gt produces quite large output, but is needed for trajectory plots +[_batch.config.savetumgt] +batch_run.args."save-trajectory" = "tum" +batch_run.args."save-groundtruth" = "1" + + + +# run each experiment twice to make sure file system caches are hot +[_batch.config.runtwice] +batch_run.num_runs = 2 + + + +############################################################################## +## Named config elements to overwrite the base config: datasets +############################################################################## + +# NOTE: Replace the absolute paths in "dataset-path" entries with the +# path to your download of EuRoC, TUMVI, Kitti. The "cam-calib" paths +# are relative and point to locations in the basalt repository. This +# assumes a folder setup like: +# +# parent-folder/ +# ├─ basalt/ +# │ ├─ data/ +# │ │ ├─ euroc_ds_calib.json +# │ │ ├─ ... +# │ ├─ ... +# ├─ experiments/ +# │ ├─ iccv_tutorial/ +# │ │ ├─ basalt_batch_config.toml +# │ │ ├─ ... +# +# The calibration for Kitti is expected in basalt format (you can use the +# basalt_convert_kitti_calib.py script) + +[_batch.config.eurocMH01] +batch_run.args."dataset-path" = "/storage/local/ssd/demmeln/tsm-exclude/euroc/MH_01_easy" +batch_run.args."cam-calib" = "../../../../../../basalt/data/euroc_ds_calib.json" +batch_run.args."dataset-type" = "euroc" +[_batch.config.eurocMH02] +batch_run.args."dataset-path" = "/storage/local/ssd/demmeln/tsm-exclude/euroc/MH_02_easy" +batch_run.args."cam-calib" = "../../../../../../basalt/data/euroc_ds_calib.json" +batch_run.args."dataset-type" = "euroc" +[_batch.config.eurocMH03] +batch_run.args."dataset-path" = "/storage/local/ssd/demmeln/tsm-exclude/euroc/MH_03_medium" +batch_run.args."cam-calib" = "../../../../../../basalt/data/euroc_ds_calib.json" +batch_run.args."dataset-type" = "euroc" +[_batch.config.eurocMH04] +batch_run.args."dataset-path" = "/storage/local/ssd/demmeln/tsm-exclude/euroc/MH_04_difficult" +batch_run.args."cam-calib" = "../../../../../../basalt/data/euroc_ds_calib.json" +batch_run.args."dataset-type" = "euroc" +[_batch.config.eurocMH05] +batch_run.args."dataset-path" = "/storage/local/ssd/demmeln/tsm-exclude/euroc/MH_05_difficult" +batch_run.args."cam-calib" = "../../../../../../basalt/data/euroc_ds_calib.json" +batch_run.args."dataset-type" = "euroc" +[_batch.config.eurocV101] +batch_run.args."dataset-path" = "/storage/local/ssd/demmeln/tsm-exclude/euroc/V1_01_easy" +batch_run.args."cam-calib" = "../../../../../../basalt/data/euroc_ds_calib.json" +batch_run.args."dataset-type" = "euroc" +[_batch.config.eurocV102] +batch_run.args."dataset-path" = "/storage/local/ssd/demmeln/tsm-exclude/euroc/V1_02_medium" +batch_run.args."cam-calib" = "../../../../../../basalt/data/euroc_ds_calib.json" +batch_run.args."dataset-type" = "euroc" +[_batch.config.eurocV103] +batch_run.args."dataset-path" = "/storage/local/ssd/demmeln/tsm-exclude/euroc/V1_03_difficult" +batch_run.args."cam-calib" = "../../../../../../basalt/data/euroc_ds_calib.json" +batch_run.args."dataset-type" = "euroc" +[_batch.config.eurocV201] +batch_run.args."dataset-path" = "/storage/local/ssd/demmeln/tsm-exclude/euroc/V2_01_easy" +batch_run.args."cam-calib" = "../../../../../../basalt/data/euroc_ds_calib.json" +batch_run.args."dataset-type" = "euroc" +[_batch.config.eurocV202] +batch_run.args."dataset-path" = "/storage/local/ssd/demmeln/tsm-exclude/euroc/V2_02_medium" +batch_run.args."cam-calib" = "../../../../../../basalt/data/euroc_ds_calib.json" +batch_run.args."dataset-type" = "euroc" +[_batch.config.eurocV203] +batch_run.args."dataset-path" = "/storage/local/ssd/demmeln/tsm-exclude/euroc/V2_03_difficult" +batch_run.args."cam-calib" = "../../../../../../basalt/data/euroc_ds_calib.json" +batch_run.args."dataset-type" = "euroc" + + + +[_batch.config.tumvi-corridor1] +batch_run.args."dataset-path" = "/storage/local/ssd/demmeln/tsm-exclude/tumvi/512_16/dataset-corridor1_512_16" +batch_run.args."cam-calib" = "../../../../../../basalt/data/tumvi_512_eucm_calib.json" +batch_run.args."dataset-type" = "euroc" +[_batch.config.tumvi-corridor2] +batch_run.args."dataset-path" = "/storage/local/ssd/demmeln/tsm-exclude/tumvi/512_16/dataset-corridor2_512_16" +batch_run.args."cam-calib" = "../../../../../../basalt/data/tumvi_512_eucm_calib.json" +batch_run.args."dataset-type" = "euroc" +[_batch.config.tumvi-corridor3] +batch_run.args."dataset-path" = "/storage/local/ssd/demmeln/tsm-exclude/tumvi/512_16/dataset-corridor3_512_16" +batch_run.args."cam-calib" = "../../../../../../basalt/data/tumvi_512_eucm_calib.json" +batch_run.args."dataset-type" = "euroc" +[_batch.config.tumvi-corridor4] +batch_run.args."dataset-path" = "/storage/local/ssd/demmeln/tsm-exclude/tumvi/512_16/dataset-corridor4_512_16" +batch_run.args."cam-calib" = "../../../../../../basalt/data/tumvi_512_eucm_calib.json" +batch_run.args."dataset-type" = "euroc" +[_batch.config.tumvi-corridor5] +batch_run.args."dataset-path" = "/storage/local/ssd/demmeln/tsm-exclude/tumvi/512_16/dataset-corridor5_512_16" +batch_run.args."cam-calib" = "../../../../../../basalt/data/tumvi_512_eucm_calib.json" +batch_run.args."dataset-type" = "euroc" +[_batch.config.tumvi-magistrale1] +batch_run.args."dataset-path" = "/storage/local/ssd/demmeln/tsm-exclude/tumvi/512_16/dataset-magistrale1_512_16" +batch_run.args."cam-calib" = "../../../../../../basalt/data/tumvi_512_eucm_calib.json" +batch_run.args."dataset-type" = "euroc" +[_batch.config.tumvi-magistrale2] +batch_run.args."dataset-path" = "/storage/local/ssd/demmeln/tsm-exclude/tumvi/512_16/dataset-magistrale2_512_16" +batch_run.args."cam-calib" = "../../../../../../basalt/data/tumvi_512_eucm_calib.json" +batch_run.args."dataset-type" = "euroc" +[_batch.config.tumvi-magistrale3] +batch_run.args."dataset-path" = "/storage/local/ssd/demmeln/tsm-exclude/tumvi/512_16/dataset-magistrale3_512_16" +batch_run.args."cam-calib" = "../../../../../../basalt/data/tumvi_512_eucm_calib.json" +batch_run.args."dataset-type" = "euroc" +[_batch.config.tumvi-magistrale4] +batch_run.args."dataset-path" = "/storage/local/ssd/demmeln/tsm-exclude/tumvi/512_16/dataset-magistrale4_512_16" +batch_run.args."cam-calib" = "../../../../../../basalt/data/tumvi_512_eucm_calib.json" +batch_run.args."dataset-type" = "euroc" +[_batch.config.tumvi-magistrale5] +batch_run.args."dataset-path" = "/storage/local/ssd/demmeln/tsm-exclude/tumvi/512_16/dataset-magistrale5_512_16" +batch_run.args."cam-calib" = "../../../../../../basalt/data/tumvi_512_eucm_calib.json" +batch_run.args."dataset-type" = "euroc" +[_batch.config.tumvi-magistrale6] +batch_run.args."dataset-path" = "/storage/local/ssd/demmeln/tsm-exclude/tumvi/512_16/dataset-magistrale6_512_16" +batch_run.args."cam-calib" = "../../../../../../basalt/data/tumvi_512_eucm_calib.json" +batch_run.args."dataset-type" = "euroc" +[_batch.config.tumvi-room1] +batch_run.args."dataset-path" = "/storage/local/ssd/demmeln/tsm-exclude/tumvi/512_16/dataset-room1_512_16" +batch_run.args."cam-calib" = "../../../../../../basalt/data/tumvi_512_eucm_calib.json" +batch_run.args."dataset-type" = "euroc" +[_batch.config.tumvi-room2] +batch_run.args."dataset-path" = "/storage/local/ssd/demmeln/tsm-exclude/tumvi/512_16/dataset-room2_512_16" +batch_run.args."cam-calib" = "../../../../../../basalt/data/tumvi_512_eucm_calib.json" +batch_run.args."dataset-type" = "euroc" +[_batch.config.tumvi-room3] +batch_run.args."dataset-path" = "/storage/local/ssd/demmeln/tsm-exclude/tumvi/512_16/dataset-room3_512_16" +batch_run.args."cam-calib" = "../../../../../../basalt/data/tumvi_512_eucm_calib.json" +batch_run.args."dataset-type" = "euroc" +[_batch.config.tumvi-room4] +batch_run.args."dataset-path" = "/storage/local/ssd/demmeln/tsm-exclude/tumvi/512_16/dataset-room4_512_16" +batch_run.args."cam-calib" = "../../../../../../basalt/data/tumvi_512_eucm_calib.json" +batch_run.args."dataset-type" = "euroc" +[_batch.config.tumvi-room5] +batch_run.args."dataset-path" = "/storage/local/ssd/demmeln/tsm-exclude/tumvi/512_16/dataset-room5_512_16" +batch_run.args."cam-calib" = "../../../../../../basalt/data/tumvi_512_eucm_calib.json" +batch_run.args."dataset-type" = "euroc" +[_batch.config.tumvi-room6] +batch_run.args."dataset-path" = "/storage/local/ssd/demmeln/tsm-exclude/tumvi/512_16/dataset-room6_512_16" +batch_run.args."cam-calib" = "../../../../../../basalt/data/tumvi_512_eucm_calib.json" +batch_run.args."dataset-type" = "euroc" +[_batch.config.tumvi-slides1] +batch_run.args."dataset-path" = "/storage/local/ssd/demmeln/tsm-exclude/tumvi/512_16/dataset-slides1_512_16" +batch_run.args."cam-calib" = "../../../../../../basalt/data/tumvi_512_eucm_calib.json" +batch_run.args."dataset-type" = "euroc" +[_batch.config.tumvi-slides2] +batch_run.args."dataset-path" = "/storage/local/ssd/demmeln/tsm-exclude/tumvi/512_16/dataset-slides2_512_16" +batch_run.args."cam-calib" = "../../../../../../basalt/data/tumvi_512_eucm_calib.json" +batch_run.args."dataset-type" = "euroc" +[_batch.config.tumvi-slides3] +batch_run.args."dataset-path" = "/storage/local/ssd/demmeln/tsm-exclude/tumvi/512_16/dataset-slides3_512_16" +batch_run.args."cam-calib" = "../../../../../../basalt/data/tumvi_512_eucm_calib.json" +batch_run.args."dataset-type" = "euroc" + + + +[_batch.config.kitti00] +batch_run.args."dataset-path" = "/storage/local/ssd/demmeln/tsm-exclude/kitti_odom_grey/sequences/00" +batch_run.args."cam-calib" = "/storage/local/ssd/demmeln/tsm-exclude/kitti_odom_grey/sequences/00/basalt_calib.json" +batch_run.args."dataset-type" = "kitti" +[_batch.config.kitti01] +batch_run.args."dataset-path" = "/storage/local/ssd/demmeln/tsm-exclude/kitti_odom_grey/sequences/01" +batch_run.args."cam-calib" = "/storage/local/ssd/demmeln/tsm-exclude/kitti_odom_grey/sequences/01/basalt_calib.json" +batch_run.args."dataset-type" = "kitti" +[_batch.config.kitti02] +batch_run.args."dataset-path" = "/storage/local/ssd/demmeln/tsm-exclude/kitti_odom_grey/sequences/02" +batch_run.args."cam-calib" = "/storage/local/ssd/demmeln/tsm-exclude/kitti_odom_grey/sequences/02/basalt_calib.json" +batch_run.args."dataset-type" = "kitti" +[_batch.config.kitti03] +batch_run.args."dataset-path" = "/storage/local/ssd/demmeln/tsm-exclude/kitti_odom_grey/sequences/03" +batch_run.args."cam-calib" = "/storage/local/ssd/demmeln/tsm-exclude/kitti_odom_grey/sequences/03/basalt_calib.json" +batch_run.args."dataset-type" = "kitti" +[_batch.config.kitti04] +batch_run.args."dataset-path" = "/storage/local/ssd/demmeln/tsm-exclude/kitti_odom_grey/sequences/04" +batch_run.args."cam-calib" = "/storage/local/ssd/demmeln/tsm-exclude/kitti_odom_grey/sequences/04/basalt_calib.json" +batch_run.args."dataset-type" = "kitti" +[_batch.config.kitti05] +batch_run.args."dataset-path" = "/storage/local/ssd/demmeln/tsm-exclude/kitti_odom_grey/sequences/05" +batch_run.args."cam-calib" = "/storage/local/ssd/demmeln/tsm-exclude/kitti_odom_grey/sequences/05/basalt_calib.json" +batch_run.args."dataset-type" = "kitti" +[_batch.config.kitti06] +batch_run.args."dataset-path" = "/storage/local/ssd/demmeln/tsm-exclude/kitti_odom_grey/sequences/06" +batch_run.args."cam-calib" = "/storage/local/ssd/demmeln/tsm-exclude/kitti_odom_grey/sequences/06/basalt_calib.json" +batch_run.args."dataset-type" = "kitti" +[_batch.config.kitti07] +batch_run.args."dataset-path" = "/storage/local/ssd/demmeln/tsm-exclude/kitti_odom_grey/sequences/07" +batch_run.args."cam-calib" = "/storage/local/ssd/demmeln/tsm-exclude/kitti_odom_grey/sequences/07/basalt_calib.json" +batch_run.args."dataset-type" = "kitti" +[_batch.config.kitti08] +batch_run.args."dataset-path" = "/storage/local/ssd/demmeln/tsm-exclude/kitti_odom_grey/sequences/08" +batch_run.args."cam-calib" = "/storage/local/ssd/demmeln/tsm-exclude/kitti_odom_grey/sequences/08/basalt_calib.json" +batch_run.args."dataset-type" = "kitti" +[_batch.config.kitti09] +batch_run.args."dataset-path" = "/storage/local/ssd/demmeln/tsm-exclude/kitti_odom_grey/sequences/09" +batch_run.args."cam-calib" = "/storage/local/ssd/demmeln/tsm-exclude/kitti_odom_grey/sequences/09/basalt_calib.json" +batch_run.args."dataset-type" = "kitti" +[_batch.config.kitti10] +batch_run.args."dataset-path" = "/storage/local/ssd/demmeln/tsm-exclude/kitti_odom_grey/sequences/10" +batch_run.args."cam-calib" = "/storage/local/ssd/demmeln/tsm-exclude/kitti_odom_grey/sequences/10/basalt_calib.json" +batch_run.args."dataset-type" = "kitti" + + + +############################################################################## +## Named config elements to overwrite the base config: basalt config +############################################################################## + +[_batch.config.debug] +value0."config.vio_debug" = true +value0."config.vio_extended_logging" = true + +[_batch.config.nodebug] +value0."config.vio_debug" = false +value0."config.vio_extended_logging" = false + + + +[_batch.config.extlog] +value0."config.vio_extended_logging" = true + +[_batch.config.noextlog] +value0."config.vio_extended_logging" = false + + + +[_batch.config.eurocvo] +value0."config.optical_flow_detection_grid_size" = 40 +value0."config.vio_min_frames_after_kf" = 1 +value0."config.vio_new_kf_keypoints_thresh" = 0.8 +value0."config.vio_kf_marg_feature_ratio" = 0.2 + +[_batch.config.tumvivio] +value0."config.optical_flow_detection_grid_size" = 40 + +[_batch.config.kittivo] +value0."config.optical_flow_detection_grid_size" = 30 +value0."config.optical_flow_epipolar_error" = 0.001 +value0."config.optical_flow_levels" = 4 +value0."config.vio_min_frames_after_kf" = 1 + + + +[_batch.config.sq-sc-sc] +value0."config.vio_sqrt_marg" = false +value0."config.vio_linearization_type" = "ABS_SC" + +[_batch.config.sqrt-sc-sc] +value0."config.vio_sqrt_marg" = true +value0."config.vio_linearization_type" = "ABS_SC" + +[_batch.config.sqrt-nsllt-qr] +value0."config.vio_sqrt_marg" = true +value0."config.vio_linearization_type" = "ABS_QR" + + + +############################################################################## +## Named alternatives, which generate a different experiment for each entry ("or" relation) +############################################################################## + +[_batch.alternatives] +all_euroc = ["eurocMH01", "eurocMH02", "eurocMH03", "eurocMH04", "eurocMH05", "eurocV101", "eurocV102", "eurocV103", "eurocV201", "eurocV202"] +some_euroc = ["eurocMH01", "eurocMH02", "eurocV101", "eurocV103", "eurocV201", "eurocV202"] + +some_tumvi = ["tumvi-corridor1", "tumvi-magistrale1", "tumvi-room1", "tumvi-slides1"] +more_tumvi = ["tumvi-corridor1", "tumvi-corridor2", "tumvi-magistrale1", "tumvi-magistrale2", "tumvi-room1", "tumvi-room2", "tumvi-slides1", "tumvi-slides2"] +some_more_tumvi = ["tumvi-corridor1", "tumvi-corridor2", "tumvi-corridor3", "tumvi-corridor4", "tumvi-corridor5", "tumvi-magistrale1", "tumvi-magistrale2", "tumvi-magistrale3", "tumvi-magistrale4", "tumvi-magistrale5", "tumvi-magistrale6", "tumvi-room1", "tumvi-room2", "tumvi-room3", "tumvi-room4", "tumvi-room5", "tumvi-room6", "tumvi-slides1", "tumvi-slides2", "tumvi-slides3"] + +all_kitti = ["kitti00", "kitti01", "kitti02", "kitti03", "kitti04", "kitti05", "kitti06", "kitti07", "kitti08", "kitti09", "kitti10"] + +all_imu = ["vio", "vo"] +all_double = ["double", "float"] + +all_meth = ["sq-sc-sc", "sqrt-sc-sc", "sqrt-nsllt-qr"] + + + +############################################################################## +## Config combinations, which composing named configs and alternatives ("and" relation) +############################################################################## + +[_batch.combinations] +vio_euroc = ["vio", "savetumgt", "extlog", "runtwice", "all_meth", "all_double", "all_euroc"] +vio_tumvi = ["vio", "tumvivio", "savetumgt", "extlog", "runtwice", "all_meth", "all_double", "more_tumvi"] +vo_kitti = ["vo", "kittivo", "savetumgt", "extlog", "runtwice", "all_meth", "all_double", "all_kitti"] + +#vio_euroc = ["vio", "runtwice", "all_meth", "all_double", "all_euroc"] +#vio_tumvi = ["vio", "tumvivio", "runtwice", "all_meth", "all_double", "more_tumvi"] +#vo_kitti = ["vo", "kittivo", "runtwice", "all_meth", "all_double", "all_kitti"] + + + +############################################################################## +## Revision is the primary subfolder name where generated configs are placed +############################################################################## + +[_batch] +revision = "01_iccv_all" +#revision = "02_iccv_runtime" diff --git a/data/iccv21/experiments-iccv.toml b/data/iccv21/experiments-iccv.toml new file mode 100644 index 0000000..6e1d66f --- /dev/null +++ b/data/iccv21/experiments-iccv.toml @@ -0,0 +1,593 @@ +[options] +base_path = "$config_dir" +output_path = "./tables/experiments-iccv" +cache_dir = "./tables/cache" +show_values_failed_runs = false +screenread = true +#overwrite_cache = true + + +################### +## abbreviate some sequence names for more compact tables / legends +[seq_displayname_mapping] +"tumvi-corridor1" = "tumvi-corr1" +"tumvi-corridor2" = "tumvi-corr2" +"tumvi-magistrale1" = "tumvi-mag1" +"tumvi-magistrale2" = "tumvi-mag2" + + +################### +## define method names in table headers / legends globally +[[substitutions]] +DISPLAY_NAME_SQRTVIO64 = "$\\sqrt{VIO}$-64" +DISPLAY_NAME_SQRTVIO32 = "$\\sqrt{VIO}$-32" +DISPLAY_NAME_SQRTVO64 = "$\\sqrt{VO}$-64" +DISPLAY_NAME_SQRTVO32 = "$\\sqrt{VO}$-32" +DISPLAY_NAME_SQVIO64 = "$VIO$-64" +DISPLAY_NAME_SQVIO32 = "$VIO$-32" +DISPLAY_NAME_SQVO64 = "$VO$-64" +DISPLAY_NAME_SQVO32 = "$VO$-32" + + +################### +## where to find experimental runs +[[substitutions]] + +EXP_PATTERN_VIO = "01_iccv_all/*-*/vio_*/" +EXP_PATTERN_VO = "01_iccv_all/*-*/vo_*/" + +#EXP_PATTERN_VIO = "02_iccv_runtime/*-*/vio_*/" +#EXP_PATTERN_VO = "02_iccv_runtime/*-*/vo_*/" + + +################### +## which kind of plots to show +[[substitutions]] +SHOW_TRAJECTORY_PLOTS = true +SHOW_EIGENVALUE_PLOTS = true +SHOW_NULLSPACE_PLOTS = true + + +################### +## define which sequences to show in plots +[[substitutions]] +SEQUENCES_EUROC = [ + "eurocMH01", + "eurocMH02", + "eurocMH03", + "eurocMH04", + "eurocMH05", + "eurocV101", + "eurocV102", + "eurocV103", + "eurocV201", + "eurocV202", +] +SEQUENCES_TUMVI = [ + "tumvi-corridor1", + "tumvi-corridor2", + "tumvi-magistrale1", + "tumvi-magistrale2", + "tumvi-room1", + "tumvi-room2", + "tumvi-slides1", + "tumvi-slides2", +] +SEQUENCES_KITTI = [ + "kitti00", + "kitti02", + "kitti03", + "kitti04", + "kitti05", + "kitti06", + "kitti07", + "kitti08", + "kitti09", + "kitti10", +] + + +################### +## VIO experiments +[[experiments]] +name = "vio_sqrt-nsllt-qr-64" +display_name = "${DISPLAY_NAME_SQRTVIO64}${}^{\\textrm{NS}}_{\\textrm{QR}}$" +pattern = "${EXP_PATTERN_VIO}/*_sqrt-nsllt-qr_double_*" +[[experiments]] +name = "vio_sqrt-nsllt-qr-32" +display_name = "${DISPLAY_NAME_SQRTVIO32}${}^{\\textrm{NS}}_{\\textrm{QR}}$" +pattern = "${EXP_PATTERN_VIO}/*_sqrt-nsllt-qr_float_*" +[[experiments]] +name = "vio_sqrt-sc-sc-64" +display_name = "${DISPLAY_NAME_SQRTVIO64}${}^{\\textrm{SC}}_{\\textrm{SC}}$" +pattern = "${EXP_PATTERN_VIO}/*_sqrt-sc-sc_double_*" +[[experiments]] +name = "vio_sqrt-sc-sc-32" +display_name = "${DISPLAY_NAME_SQRTVIO32}${}^{\\textrm{SC}}_{\\textrm{SC}}$" +pattern = "${EXP_PATTERN_VIO}/*_sqrt-sc-sc_float_*" +[[experiments]] +name = "vio_sq-sc-sc-64" +display_name = "${DISPLAY_NAME_SQVIO64}${}^{\\textrm{SC}}_{\\textrm{SC}}$" +pattern = "${EXP_PATTERN_VIO}/*_sq-sc-sc_double_*" +[[experiments]] +name = "vio_sq-sc-sc-32" +display_name = "${DISPLAY_NAME_SQVIO32}${}^{\\textrm{SC}}_{\\textrm{SC}}$" +pattern = "${EXP_PATTERN_VIO}/*_sq-sc-sc_float_*" + +################### +## VIO experiments -- aliases with shorter display names for plots +[[experiments]] +name = "vio_sqrt-64" +display_name = "${DISPLAY_NAME_SQRTVIO64}" +extend = "vio_sqrt-nsllt-qr-64" +[[experiments]] +name = "vio_sqrt-32" +display_name = "${DISPLAY_NAME_SQRTVIO32}" +extend = "vio_sqrt-nsllt-qr-32" +[[experiments]] +name = "vio_sq-64" +display_name = "${DISPLAY_NAME_SQVIO64}" +extend = "vio_sq-sc-sc-64" +[[experiments]] +name = "vio_sq-32" +display_name = "${DISPLAY_NAME_SQVIO32}" +extend = "vio_sq-sc-sc-32" + + + +################### +## VO experiments +[[experiments]] +name = "vo_sqrt-nsllt-qr-64" +display_name = "${DISPLAY_NAME_SQRTVO64}${}^{\\textrm{NS}}_{\\textrm{QR}}$" +pattern = "${EXP_PATTERN_VO}/*_sqrt-nsllt-qr_double_*" +[[experiments]] +name = "vo_sqrt-nsllt-qr-32" +display_name = "${DISPLAY_NAME_SQRTVO32}${}^{\\textrm{NS}}_{\\textrm{QR}}$" +pattern = "${EXP_PATTERN_VO}/*_sqrt-nsllt-qr_float_*" +[[experiments]] +name = "vo_sqrt-sc-sc-64" +display_name = "${DISPLAY_NAME_SQRTVO64}${}^{\\textrm{SC}}_{\\textrm{SC}}$" +pattern = "${EXP_PATTERN_VO}/*_sqrt-sc-sc_double_*" +[[experiments]] +name = "vo_sqrt-sc-sc-32" +display_name = "${DISPLAY_NAME_SQRTVO32}${}^{\\textrm{SC}}_{\\textrm{SC}}$" +pattern = "${EXP_PATTERN_VO}/*_sqrt-sc-sc_float_*" +[[experiments]] +name = "vo_sq-sc-sc-64" +display_name = "${DISPLAY_NAME_SQVO64}${}^{\\textrm{SC}}_{\\textrm{SC}}$" +pattern = "${EXP_PATTERN_VO}/*_sq-sc-sc_double_*" +[[experiments]] +name = "vo_sq-sc-sc-32" +display_name = "${DISPLAY_NAME_SQVO32}${}^{\\textrm{SC}}_{\\textrm{SC}}$" +pattern = "${EXP_PATTERN_VO}/*_sq-sc-sc_float_*" + +################### +## VIO experiments -- aliases with shorter display names for plots +[[experiments]] +name = "vo_sqrt-64" +display_name = "${DISPLAY_NAME_SQRTVO64}" +extend = "vo_sqrt-nsllt-qr-64" +[[experiments]] +name = "vo_sqrt-32" +display_name = "${DISPLAY_NAME_SQRTVO32}" +extend = "vo_sqrt-nsllt-qr-32" +[[experiments]] +name = "vo_sq-64" +display_name = "${DISPLAY_NAME_SQVO64}" +extend = "vo_sq-sc-sc-64" +[[experiments]] +name = "vo_sq-32" +display_name = "${DISPLAY_NAME_SQVO32}" +extend = "vo_sq-sc-sc-32" + + + +################################################################################ +################################################################################ +[[results]] +class = "section" +name = "Accuracy VIO & VO" +show = true + +################### +## accuracy results table for all VIO sequences +[[results]] +class = "results_table" +name = "vio ate" +show = true +export_latex = "vio_ate" +metrics_legend = false +rotate_header = true +escape_latex_header = false +vertical_bars = false +color_failed = "" +filter_regex = "euroc|tumvi.*1|tumvi.*2" +experiments = [ + "vio_sqrt-nsllt-qr-64", + "vio_sqrt-nsllt-qr-32", + "vio_sq-sc-sc-64", + "vio_sq-sc-sc-32", +] +metrics = [ + "ate_rmse", +] + +################### +## accuracy results table for all VO sequences +[[results]] +class = "results_table" +name = "vo ate" +show = true +export_latex = "vo_ate" +metrics_legend = false +rotate_header = true +escape_latex_header = false +vertical_bars = false +color_failed = "" +#filter_regex = "kitti00|kitti0[2-9]|kitti10" +#filter_regex = "kitti" +override_as_failed = ["kitti01"] +experiments = [ + "vo_sqrt-nsllt-qr-64", + "vo_sqrt-nsllt-qr-32", + "vo_sq-sc-sc-64", + "vo_sq-sc-sc-32", +] +metrics = [ + {name = "ate_rmse", failed_threshold = 100, decimals = 2}, +] + + + +################################################################################ +################################################################################ +[[results]] +class = "section" +name = "Runtime VIO & VO" +show = true + +################### +## runtime results table for all VIO sequences +[[results]] +class = "results_table" +name = "vio time optimize / marginalize" +show = true +export_latex = "vio_runtime" +metrics_legend = false +rotate_header = true +escape_latex_header = false +vertical_bars = false +color_failed = "" +multirow = false +filter_regex = "euroc|tumvi.*1|tumvi.*2" +experiments = [ + "vio_sqrt-nsllt-qr-64", + "vio_sqrt-nsllt-qr-32", + "vio_sq-sc-sc-64", + "vio_sq-sc-sc-32", +] +metrics = [ + {name = "time_opt", decimals = 1}, + {name = "time_marg", decimals = 1}, +] + +################### +## runtime results table for all VO sequences +[[results]] +class = "results_table" +name = "vo time optimize / marginalize" +show = true +export_latex = "vo_runtime" +metrics_legend = false +rotate_header = true +escape_latex_header = false +vertical_bars = false +color_failed = "" +multirow = false +#filter_regex = "kitti00|kitti0[2-9]|kitti10" +#filter_regex = "kitti" +override_as_failed = ["kitti01"] +experiments = [ + "vo_sqrt-nsllt-qr-64", + "vo_sqrt-nsllt-qr-32", + "vo_sq-sc-sc-64", + "vo_sq-sc-sc-32", +] +metrics = [ + "time_opt", + "time_marg", +] + + + +################################################################################ +################################################################################ +[[results]] +class = "section" +name = "Ablation VIO & VO" +show = true + +################### +## ablation study table for VIO summarized over all EuRoC sequences +[[results]] +class = "summarize_sequences_table" +name = "summary euroc ablation" +export_latex = "vio_euroc_ablation" +escape_latex_header = false +filter_regex = "euroc" +experiments = [ + "vio_sqrt-nsllt-qr-64", + "vio_sqrt-nsllt-qr-32", + "vio_sqrt-sc-sc-64", + "vio_sqrt-sc-sc-32", +] +metrics = [ + {name = "ate_rmse", display_name = "ATE [m]", geometric_mean = false, decimals = 3}, + {name = "time_exec_realtimefactor", display_name = "real-time", geometric_mean = true, format_string = "{:.1f}x"}, + {name = "time_exec", display_name = "t total [s]", geometric_mean = false, decimals = 1}, + {name = "time_opt", display_name = "t opt [s]", geometric_mean = false, decimals = 1}, + {name = "time_marg", display_name = "t marg [s]", geometric_mean = false, decimals = 1}, +# {name = "time_marg", display_name = "t marg (frac. of meas)", relative_to_metric = "time_measure", geometric_mean = true, format_string = "{:.2f}"}, +# {name = "time_opt", display_name = "t opt (frac. of meas)", relative_to_metric = "time_measure", geometric_mean = true, format_string = "{:.2f}"}, +# {name = "time_marg", display_name = "t marg (frac. of total)", relative_to_metric = "time_exec", geometric_mean = true, format_string = "{:.2f}"}, +# {name = "time_opt", display_name = "t opt (frac. of total)", relative_to_metric = "time_exec", geometric_mean = true, format_string = "{:.2f}"}, + {name = "avg_num_it", display_name = "avg #it", geometric_mean = false, decimals = 1}, + {name = "avg_num_it_failed", display_name = "avg #it-failed", geometric_mean = false, decimals = 1}, +] + +################### +## ablation study table for VO summarized over all Kitti sequences (w/o kitti01) +[[results]] +class = "summarize_sequences_table" +name = "summary kitti (w/o kitti01) ablation" +show = true +export_latex = "vo_kitti_ablation" +escape_latex_header = false +filter_regex = "kitti00|kitti0[2-9]|kitti10" +experiments = [ + "vo_sqrt-nsllt-qr-64", + "vo_sqrt-nsllt-qr-32", + "vo_sqrt-sc-sc-64", + "vo_sqrt-sc-sc-32", +] +metrics = [ + {name = "ate_rmse", display_name = "ATE [m]", geometric_mean = false, decimals = 3}, + {name = "time_exec_realtimefactor", display_name = "real-time", geometric_mean = true, format_string = "{:.1f}x"}, + {name = "time_exec", display_name = "t total [s]", geometric_mean = false, decimals = 1}, + {name = "time_opt", display_name = "t opt [s]", geometric_mean = false, decimals = 1}, + {name = "time_marg", display_name = "t marg [s]", geometric_mean = false, decimals = 1}, + {name = "avg_num_it", display_name = "avg #it", geometric_mean = false, decimals = 1}, + {name = "avg_num_it_failed", display_name = "avg #it-failed", geometric_mean = false, decimals = 1}, +] + + + +################################################################################ +################################################################################ +[[results]] +class = "section" +name = "Trajectories EuRoC" +show = "" + +################### +## template for trajectory plots for a set of sequences +[[templates]] +_name = "TRAJECTORY" +_arguments = ["SEQUENCE", "EXPERIMENTS", "AXES"] +class = "plot" +name = "trajectory" +type = "trajectory" +figsize = [4.0,2.5] +trajectory_axes = "" +sequence = "" +experiments = "" + +################### +## instantiate trajectory plot template for EuRoC +[[results]] + [results._template] + _name = "TRAJECTORY" + AXES = "xy" + SEQUENCE = {_argument = "product", _value = ""} + EXPERIMENTS = [ + "vio_sqrt-32", + "vio_sq-32", + ] + + + +################################################################################ +################################################################################ +[[results]] +class = "section" +name = "Eigenvalues EuRoC" +show = "" + +################### +## template for eigenvalue plots for a set of sequences +[[templates]] +_name = "EIGENVALUES" +_arguments = ["SEQUENCE", "EXPERIMENTS"] +class = "plot" +type = "eigenvalues" +name = "vio" +#legend_loc = "upper left" +#ylim.top = 1e6 +#ylim.bottom = -1e6 +sequence = "" +experiments = "" + +################### +## instantiate eigenvalue plot template for EuRoC +[[results]] + [results._template] + _name = "EIGENVALUES" + SEQUENCE = {_argument = "product", _value = ""} + EXPERIMENTS = [ + "vio_sqrt-64", + "vio_sqrt-32", + "vio_sq-64", + "vio_sq-32", + ] + + + +################################################################################ +################################################################################ +[[results]] +class = "section" +name = "Nullspace EuRoC" +show = "" + +################### +## template for nullspace plots for a set of sequences +[[templates]] +_name = "NULLSPACE" +_arguments = ["SEQUENCE", "EXPERIMENTS"] +class = "plot" +type = "nullspace" +name = "vio" +#legend_loc = "lower right" +sequence = "" +experiments = "" + +################### +## instantiate nullspace plot template for EuRoC +[[results]] + [results._template] + _name = "NULLSPACE" + SEQUENCE = {_argument = "product", _value = ""} + EXPERIMENTS = [ + "vio_sqrt-64", + "vio_sqrt-32", + "vio_sq-64", + "vio_sq-32", + ] + + +################################################################################ +################################################################################ +[[results]] +class = "section" +name = "Trajectories TUMVI" +show = "" + +################### +## instantiate trajectory plot template for TUMVI +[[results]] + [results._template] + _name = "TRAJECTORY" + AXES = "xy" + SEQUENCE = {_argument = "product", _value = ""} + EXPERIMENTS = [ + "vio_sqrt-32", + "vio_sq-32", + ] + + +################################################################################ +################################################################################ +[[results]] +class = "section" +name = "Eigenvalues TUMVI" +show = "" + +################### +## instantiate eigenvalue plot template for TUMVI +[[results]] + [results._template] + _name = "EIGENVALUES" + SEQUENCE = {_argument = "product", _value = ""} + EXPERIMENTS = [ + "vio_sqrt-64", + "vio_sqrt-32", + "vio_sq-64", + "vio_sq-32", + ] + + +################################################################################ +################################################################################ +[[results]] +class = "section" +name = "Nullspace TUMVI" +show = "" + +################### +## instantiate nullspace plot template for TUMVI +[[results]] + [results._template] + _name = "NULLSPACE" + SEQUENCE = {_argument = "product", _value = ""} + EXPERIMENTS = [ + "vio_sqrt-64", + "vio_sqrt-32", + "vio_sq-64", + "vio_sq-32", + ] + + +################################################################################ +################################################################################ +[[results]] +class = "section" +name = "Trajectories Kitti" +show = "" + +################### +## instantiate trajectory plot template for Kitti +[[results]] + [results._template] + _name = "TRAJECTORY" + AXES = "xz" + SEQUENCE = {_argument = "product", _value = ""} + EXPERIMENTS = [ + "vo_sqrt-32", + "vo_sq-32", + ] + + +################################################################################ +################################################################################ +[[results]] +class = "section" +name = "Eigenvalues Kitti" +show = "" + +################### +## instantiate eigenvalue plot template for Kitti +[[results]] + [results._template] + _name = "EIGENVALUES" + SEQUENCE = {_argument = "product", _value = ""} + EXPERIMENTS = [ + "vo_sqrt-64", + "vo_sqrt-32", + "vo_sq-64", + "vo_sq-32", + ] + + +################################################################################ +################################################################################ +[[results]] +class = "section" +name = "Nullspace Kitti" +show = "" + +################### +## instantiate nullspace plot template for Kitti +[[results]] + [results._template] + _name = "NULLSPACE" + SEQUENCE = {_argument = "product", _value = ""} + EXPERIMENTS = [ + "vo_sqrt-64", + "vo_sqrt-32", + "vo_sq-64", + "vo_sq-32", + ] + + + diff --git a/data/kitti_config.json b/data/kitti_config.json new file mode 100644 index 0000000..6200044 --- /dev/null +++ b/data/kitti_config.json @@ -0,0 +1,56 @@ +{ + "value0": { + "config.optical_flow_type": "frame_to_frame", + "config.optical_flow_detection_grid_size": 30, + "config.optical_flow_max_recovered_dist2": 0.04, + "config.optical_flow_pattern": 51, + "config.optical_flow_max_iterations": 5, + "config.optical_flow_epipolar_error": 0.001, + "config.optical_flow_levels": 4, + "config.optical_flow_skip_frames": 1, + "config.vio_linearization_type": "ABS_QR", + "config.vio_sqrt_marg": true, + "config.vio_max_states": 3, + "config.vio_max_kfs": 7, + "config.vio_min_frames_after_kf": 1, + "config.vio_new_kf_keypoints_thresh": 0.7, + "config.vio_debug": false, + "config.vio_extended_logging": false, + "config.vio_obs_std_dev": 0.5, + "config.vio_obs_huber_thresh": 1.0, + "config.vio_min_triangulation_dist": 0.05, + "config.vio_outlier_threshold": 3.0, + "config.vio_filter_iteration": 4, + "config.vio_max_iterations": 7, + "config.vio_enforce_realtime": false, + "config.vio_use_lm": false, + "config.vio_lm_lambda_initial": 1e-4, + "config.vio_lm_lambda_min": 1e-6, + "config.vio_lm_lambda_max": 1e2, + "config.vio_lm_landmark_damping_variant": 1, + "config.vio_lm_pose_damping_variant": 1, + "config.vio_scale_jacobian": false, + "config.vio_init_pose_weight": 1e8, + "config.vio_init_ba_weight": 1e1, + "config.vio_init_bg_weight": 1e2, + "config.vio_marg_lost_landmarks": true, + "config.vio_kf_marg_feature_ratio": 0.1, + "config.mapper_obs_std_dev": 0.25, + "config.mapper_obs_huber_thresh": 1.5, + "config.mapper_detection_num_points": 800, + "config.mapper_num_frames_to_match": 30, + "config.mapper_frames_to_match_threshold": 0.04, + "config.mapper_min_matches": 20, + "config.mapper_ransac_threshold": 5e-5, + "config.mapper_min_track_length": 5, + "config.mapper_max_hamming_distance": 70, + "config.mapper_second_best_test_ratio": 1.2, + "config.mapper_bow_num_bits": 16, + "config.mapper_min_triangulation_dist": 0.07, + "config.mapper_no_factor_weights": false, + "config.mapper_use_factors": true, + "config.mapper_use_lm": true, + "config.mapper_lm_lambda_min": 1e-32, + "config.mapper_lm_lambda_max": 1e3 + } +} diff --git a/data/tumvi_512_config.json b/data/tumvi_512_config.json new file mode 100644 index 0000000..08a4be7 --- /dev/null +++ b/data/tumvi_512_config.json @@ -0,0 +1,56 @@ +{ + "value0": { + "config.optical_flow_type": "frame_to_frame", + "config.optical_flow_detection_grid_size": 40, + "config.optical_flow_max_recovered_dist2": 0.04, + "config.optical_flow_pattern": 51, + "config.optical_flow_max_iterations": 5, + "config.optical_flow_epipolar_error": 0.005, + "config.optical_flow_levels": 3, + "config.optical_flow_skip_frames": 1, + "config.vio_linearization_type": "ABS_QR", + "config.vio_sqrt_marg": true, + "config.vio_max_states": 3, + "config.vio_max_kfs": 7, + "config.vio_min_frames_after_kf": 5, + "config.vio_new_kf_keypoints_thresh": 0.7, + "config.vio_debug": false, + "config.vio_extended_logging": false, + "config.vio_obs_std_dev": 0.5, + "config.vio_obs_huber_thresh": 1.0, + "config.vio_min_triangulation_dist": 0.05, + "config.vio_outlier_threshold": 3.0, + "config.vio_filter_iteration": 4, + "config.vio_max_iterations": 7, + "config.vio_enforce_realtime": false, + "config.vio_use_lm": false, + "config.vio_lm_lambda_initial": 1e-7, + "config.vio_lm_lambda_min": 1e-6, + "config.vio_lm_lambda_max": 1e2, + "config.vio_lm_landmark_damping_variant": 1, + "config.vio_lm_pose_damping_variant": 1, + "config.vio_scale_jacobian": false, + "config.vio_init_pose_weight": 1e8, + "config.vio_init_ba_weight": 1e1, + "config.vio_init_bg_weight": 1e2, + "config.vio_marg_lost_landmarks": true, + "config.vio_kf_marg_feature_ratio": 0.1, + "config.mapper_obs_std_dev": 0.25, + "config.mapper_obs_huber_thresh": 1.5, + "config.mapper_detection_num_points": 800, + "config.mapper_num_frames_to_match": 30, + "config.mapper_frames_to_match_threshold": 0.04, + "config.mapper_min_matches": 20, + "config.mapper_ransac_threshold": 5e-5, + "config.mapper_min_track_length": 5, + "config.mapper_max_hamming_distance": 70, + "config.mapper_second_best_test_ratio": 1.2, + "config.mapper_bow_num_bits": 16, + "config.mapper_min_triangulation_dist": 0.07, + "config.mapper_no_factor_weights": false, + "config.mapper_use_factors": true, + "config.mapper_use_lm": false, + "config.mapper_lm_lambda_min": 1e-32, + "config.mapper_lm_lambda_max": 1e3 + } +} diff --git a/data/tumvi_512_ds_calib.json b/data/tumvi_512_ds_calib.json new file mode 100644 index 0000000..a9a441c --- /dev/null +++ b/data/tumvi_512_ds_calib.json @@ -0,0 +1,381 @@ +{ + "value0": { + "T_imu_cam": [ + { + "px": 0.04548094812071685, + "py": -0.07145370002838907, + "pz": -0.046315428444919249, + "qx": -0.013392900690257393, + "qy": -0.6945866755293793, + "qz": 0.7192437840259219, + "qw": 0.007639340823570553 + }, + { + "px": -0.05546984222234079, + "py": -0.06999334244486549, + "pz": -0.049092582974927929, + "qx": -0.01340980138125811, + "qy": -0.7115668842588793, + "qz": 0.7024477338114514, + "qw": 0.007741299385907546 + } + ], + "intrinsics": [ + { + "camera_type": "ds", + "intrinsics": { + "fx": 158.28600034966977, + "fy": 158.2743455478755, + "cx": 254.96116578191653, + "cy": 256.8894394501779, + "xi": -0.17213086034353243, + "alpha": 0.5931177593944744 + } + }, + { + "camera_type": "ds", + "intrinsics": { + "fx": 157.91830144176309, + "fy": 157.8901286125632, + "cx": 252.56547609702953, + "cy": 255.02489416194656, + "xi": -0.17114780716007858, + "alpha": 0.5925543396658507 + } + } + ], + "resolution": [ + [ + 512, + 512 + ], + [ + 512, + 512 + ] + ], + "calib_accel_bias": [ + -0.0011791549110128743, + -0.007714616665106584, + 0.00006733124206236036, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0 + ], + "calib_gyro_bias": [ + -0.00007403817898158015, + 0.0000024090695654577384, + -0.00001847724206614636, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0 + ], + "imu_update_rate": 200.0, + "accel_noise_std": [0.016, 0.016, 0.016], + "gyro_noise_std": [0.000282, 0.000282, 0.000282], + "accel_bias_std": [0.001, 0.001, 0.001], + "gyro_bias_std": [0.0001, 0.0001, 0.0001], + "T_mocap_world": { + "px": 0.0, + "py": 0.0, + "pz": 0.0, + "qx": 0.0, + "qy": 0.0, + "qz": 0.0, + "qw": 1.0 + }, + "T_imu_marker": { + "px": 0.0, + "py": 0.0, + "pz": 0.0, + "qx": 0.0, + "qy": 0.0, + "qz": 0.0, + "qw": 1.0 + }, + "mocap_time_offset_ns": 0, + "mocap_to_imu_offset_ns": 0, + "cam_time_offset_ns": 0, + "vignette": [ + { + "value0": 0, + "value1": 10000000000, + "value2": [ + [ + 0.9899372881888493 + ], + [ + 0.9931178795891834 + ], + [ + 0.9984407803536122 + ], + [ + 0.996964178110498 + ], + [ + 0.9956605833036875 + ], + [ + 0.9973489553595134 + ], + [ + 0.9985691891331739 + ], + [ + 1.0 + ], + [ + 0.9966866746094087 + ], + [ + 0.9958706368580655 + ], + [ + 0.9957807702286541 + ], + [ + 0.9927911027519979 + ], + [ + 0.9838733297580375 + ], + [ + 0.9719198145611155 + ], + [ + 0.9566058020047663 + ], + [ + 0.9378032130699772 + ], + [ + 0.9194004905192558 + ], + [ + 0.9024065312353705 + ], + [ + 0.8845584821495792 + ], + [ + 0.8624817202873822 + ], + [ + 0.8449163722596605 + ], + [ + 0.8239771250783265 + ], + [ + 0.802936456716557 + ], + [ + 0.7811632337206126 + ], + [ + 0.758539495055499 + ], + [ + 0.7341063700839284 + ], + [ + 0.7100276770866111 + ], + [ + 0.6874554816035845 + ], + [ + 0.6641863547515791 + ], + [ + 0.6412571843680321 + ], + [ + 0.6228323769487529 + ], + [ + 0.6030940673078892 + ], + [ + 0.5598212055840929 + ], + [ + 0.4377741720108287 + ], + [ + 0.24026566347390075 + ], + [ + 0.08222347033252997 + ], + [ + 0.03489917830551274 + ], + [ + 0.029153547460757366 + ], + [ + 0.030477276736751079 + ], + [ + 0.01 + ], + [ + 0.01 + ], + [ + 0.01 + ], + [ + 0.01 + ] + ] + }, + { + "value0": 0, + "value1": 10000000000, + "value2": [ + [ + 0.9982144512705363 + ], + [ + 0.9961165266369948 + ], + [ + 0.9892083034957775 + ], + [ + 0.9870647327777502 + ], + [ + 0.9863145153017516 + ], + [ + 0.9847218081828388 + ], + [ + 0.9828536767530198 + ], + [ + 0.9808724309018916 + ], + [ + 0.9797280962140972 + ], + [ + 0.9784762156394231 + ], + [ + 0.9771015659455632 + ], + [ + 0.9730709135912151 + ], + [ + 0.9650117828320545 + ], + [ + 0.9512319166574508 + ], + [ + 0.9346333455674182 + ], + [ + 0.9182278021221385 + ], + [ + 0.9025351121100457 + ], + [ + 0.8845788263796353 + ], + [ + 0.8668369304290567 + ], + [ + 0.8469326512989164 + ], + [ + 0.8280417962134851 + ], + [ + 0.8072181976634909 + ], + [ + 0.788496496511515 + ], + [ + 0.7691952807935 + ], + [ + 0.7459299901795546 + ], + [ + 0.7240742868053557 + ], + [ + 0.7009088004583106 + ], + [ + 0.6799474150660547 + ], + [ + 0.6586062723412073 + ], + [ + 0.6406388063878976 + ], + [ + 0.6239758367746359 + ], + [ + 0.6026729560290718 + ], + [ + 0.5455288113795859 + ], + [ + 0.3945979030583888 + ], + [ + 0.1981919324270445 + ], + [ + 0.06754460426558173 + ], + [ + 0.028910567836990938 + ], + [ + 0.02842995279817033 + ], + [ + 0.0299618927515648 + ], + [ + 0.01 + ], + [ + 0.01 + ], + [ + 0.01 + ], + [ + 0.01 + ] + ] + } + ] + } +} diff --git a/data/tumvi_512_eucm_calib.json b/data/tumvi_512_eucm_calib.json new file mode 100644 index 0000000..98d2cff --- /dev/null +++ b/data/tumvi_512_eucm_calib.json @@ -0,0 +1,381 @@ +{ + "value0": { + "T_imu_cam": [ + { + "px": 0.04548094812071685, + "py": -0.07145370002838907, + "pz": -0.046315428444919249, + "qx": -0.013392900690257393, + "qy": -0.6945866755293793, + "qz": 0.7192437840259219, + "qw": 0.007639340823570553 + }, + { + "px": -0.05546984222234079, + "py": -0.06999334244486549, + "pz": -0.049092582974927929, + "qx": -0.01340980138125811, + "qy": -0.7115668842588793, + "qz": 0.7024477338114514, + "qw": 0.007741299385907546 + } + ], + "intrinsics": [ + { + "camera_type": "eucm", + "intrinsics": { + "fx": 191.14799836282189, + "fy": 191.13150963902818, + "cx": 254.9585771534443, + "cy": 256.88154645599448, + "alpha": 0.6291060881178562, + "beta": 1.0418067381860868 + } + }, + { + "camera_type": "eucm", + "intrinsics": { + "fx": 190.47905769226575, + "fy": 190.44567561523216, + "cx": 252.55882115024333, + "cy": 255.02104780344699, + "alpha": 0.6281040684983363, + "beta": 1.041250259119081 + } + } + ], + "resolution": [ + [ + 512, + 512 + ], + [ + 512, + 512 + ] + ], + "calib_accel_bias": [ + -0.0011791549110128743, + -0.007714616665106584, + 0.00006733124206236036, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0 + ], + "calib_gyro_bias": [ + -0.00007403817898158015, + 0.0000024090695654577384, + -0.00001847724206614636, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0 + ], + "imu_update_rate": 200.0, + "accel_noise_std": [0.016, 0.016, 0.016], + "gyro_noise_std": [0.000282, 0.000282, 0.000282], + "accel_bias_std": [0.001, 0.001, 0.001], + "gyro_bias_std": [0.0001, 0.0001, 0.0001], + "T_mocap_world": { + "px": 0.0, + "py": 0.0, + "pz": 0.0, + "qx": 0.0, + "qy": 0.0, + "qz": 0.0, + "qw": 1.0 + }, + "T_imu_marker": { + "px": 0.0, + "py": 0.0, + "pz": 0.0, + "qx": 0.0, + "qy": 0.0, + "qz": 0.0, + "qw": 1.0 + }, + "mocap_time_offset_ns": 0, + "mocap_to_imu_offset_ns": 0, + "cam_time_offset_ns": 0, + "vignette": [ + { + "value0": 0, + "value1": 10000000000, + "value2": [ + [ + 0.9899372881888493 + ], + [ + 0.9931178795891834 + ], + [ + 0.9984407803536122 + ], + [ + 0.996964178110498 + ], + [ + 0.9956605833036875 + ], + [ + 0.9973489553595134 + ], + [ + 0.9985691891331739 + ], + [ + 1.0 + ], + [ + 0.9966866746094087 + ], + [ + 0.9958706368580655 + ], + [ + 0.9957807702286541 + ], + [ + 0.9927911027519979 + ], + [ + 0.9838733297580375 + ], + [ + 0.9719198145611155 + ], + [ + 0.9566058020047663 + ], + [ + 0.9378032130699772 + ], + [ + 0.9194004905192558 + ], + [ + 0.9024065312353705 + ], + [ + 0.8845584821495792 + ], + [ + 0.8624817202873822 + ], + [ + 0.8449163722596605 + ], + [ + 0.8239771250783265 + ], + [ + 0.802936456716557 + ], + [ + 0.7811632337206126 + ], + [ + 0.758539495055499 + ], + [ + 0.7341063700839284 + ], + [ + 0.7100276770866111 + ], + [ + 0.6874554816035845 + ], + [ + 0.6641863547515791 + ], + [ + 0.6412571843680321 + ], + [ + 0.6228323769487529 + ], + [ + 0.6030940673078892 + ], + [ + 0.5598212055840929 + ], + [ + 0.4377741720108287 + ], + [ + 0.24026566347390075 + ], + [ + 0.08222347033252997 + ], + [ + 0.03489917830551274 + ], + [ + 0.029153547460757366 + ], + [ + 0.030477276736751079 + ], + [ + 0.01 + ], + [ + 0.01 + ], + [ + 0.01 + ], + [ + 0.01 + ] + ] + }, + { + "value0": 0, + "value1": 10000000000, + "value2": [ + [ + 0.9982144512705363 + ], + [ + 0.9961165266369948 + ], + [ + 0.9892083034957775 + ], + [ + 0.9870647327777502 + ], + [ + 0.9863145153017516 + ], + [ + 0.9847218081828388 + ], + [ + 0.9828536767530198 + ], + [ + 0.9808724309018916 + ], + [ + 0.9797280962140972 + ], + [ + 0.9784762156394231 + ], + [ + 0.9771015659455632 + ], + [ + 0.9730709135912151 + ], + [ + 0.9650117828320545 + ], + [ + 0.9512319166574508 + ], + [ + 0.9346333455674182 + ], + [ + 0.9182278021221385 + ], + [ + 0.9025351121100457 + ], + [ + 0.8845788263796353 + ], + [ + 0.8668369304290567 + ], + [ + 0.8469326512989164 + ], + [ + 0.8280417962134851 + ], + [ + 0.8072181976634909 + ], + [ + 0.788496496511515 + ], + [ + 0.7691952807935 + ], + [ + 0.7459299901795546 + ], + [ + 0.7240742868053557 + ], + [ + 0.7009088004583106 + ], + [ + 0.6799474150660547 + ], + [ + 0.6586062723412073 + ], + [ + 0.6406388063878976 + ], + [ + 0.6239758367746359 + ], + [ + 0.6026729560290718 + ], + [ + 0.5455288113795859 + ], + [ + 0.3945979030583888 + ], + [ + 0.1981919324270445 + ], + [ + 0.06754460426558173 + ], + [ + 0.028910567836990938 + ], + [ + 0.02842995279817033 + ], + [ + 0.0299618927515648 + ], + [ + 0.01 + ], + [ + 0.01 + ], + [ + 0.01 + ], + [ + 0.01 + ] + ] + } + ] + } +} diff --git a/doc/BatchEvaluation.md b/doc/BatchEvaluation.md new file mode 100644 index 0000000..d3788ea --- /dev/null +++ b/doc/BatchEvaluation.md @@ -0,0 +1,261 @@ +# Batch Evaluation of Square Root Optimization and Marginalization + +In this tutorial we detail how you can use the batch evaluation +scripts to reproduce the results of the ICCV'21 paper Demmel et al., +"Square Root Marginalization for Sliding-Window Bundle Adjustment". +In the paper we discuss how square root estimation techniques can be +used in Basalt's optimization-based sliding-window odometry to make +optimization faster and marginalization numerially more stable. See +the [project page](https://go.vision.in.tum.de/rootvo) for further +details. + +Basalt's VIO/VO now runs with single-precision floating point numbers +by default, using the new square root formulation. The conventional +squared (Hessian-based) formualtion is still available via config +options. For manual testing, you can pass `--use-double true` or +`--use-double false` (default) as command line arguments to +`basalt_vio`, and change `config.vio_sqrt_marg` in the config file, +which controls if the marginalization prior is stored in Hessian or +Jacobian form (default: `true`), as well as +`config.vio_linearization_type` in the config file, which controls +whether to use Schur complement, or nullspace projection and +QR-decomposition for optimization and marginalization (`"ABS_SC"` or +`"ABS_QR"` (default)). + +In the following tutorial we systematically compare the different +formulations in single and double precision to reproduce the results +from the ICCV'21 paper. You can of course adjust the correspondig +config files to evaluate other aspects of the system. + +## Prerequisites + + 1. **Source installation of Basalt:** The batch evaluation scripts + by default assume that the `build` folder is directly inside the + source checkout `basalt`. See + [README.md](../README.md#source-installation-for-ubuntu-1804-and-macos-1014-mojave) + for instructions. + 2. **Downloads of the datasets:** We evaluate EuRoC (all 11 + sequneces), TUMVI (euroc format in 512x512 resultion; sequences: + corridor1-2, magistrale1-2, room1-2, slides1-2), and Kitti + Odometry (sequences 00-10). It's recommended to store the data + locally on an SSD to ensure that reading the images is not the + bottleneck during evaluation (on a multicore desktop Basalt runs + many times faster than real-time). There are instructions for + downloading these dataset: [EuRoC](VioMapping.md#euroc-dataset), + [TUMVI](VioMapping.md#tum-vi-dataset), + [KITTI](Vo.md#kitti-dataset). Calibration for EuRoC and TUMVI is + provided in the `data` folder. For KITTI you can use the + `basalt_convert_kitti_calib.py` script to convert the provided + calibration to a Basalt-compatible format (see + [KITTI](Vo.md#kitti-dataset)). + 3. **Dependencies of evaluation scripts:** You need pip packages + `py_ubjson`, `matplotlib`, `numpy`, `munch`, `scipy`, `pylatex`, + `toml`. How to install depends on your Python setup (virtualenv, + conda, ...). To just install for the local user with pip you can + use the command `python3 -m pip install --user -U py_ubjson + matplotlib numpy munch scipy pylatex toml`. For generating result + tables and plots you additionally need latexmk and a LaTeX + distribution (Ubuntu: `sudo apt install texlive-latex-extra + latexmk`; macOS with Homebrew: `brew install --cask mactex`). + +## Folder Structure + +The batch evaluation scripts and config files assume a certain folder +structure inside a "parent" folder, since relative paths are used to +find the compiled executable and calibration files. So **it's +important to follow the folder structure**. + +``` +parent-folder/ +├─ basalt/ +│ ├─ build/ +│ │ ├─ basalt_vio +│ │ ├─ ... +│ ├─ data/ +│ │ ├─ euroc_ds_calib.json +│ │ ├─ ... +│ ├─ ... +├─ experiments/ +│ ├─ iccv_tutorial/ +│ │ ├─ basalt_batch_config.toml +│ │ ├─ experiments-iccv.toml +│ │ ├─ 01_iccv_all/ +│ │ │ ├─ ... +│ │ ├─ 02_iccv_runtime/ +│ │ │ ├─ ... +``` + +As a sibling of the `basalt` source checkout we'll have an +`experiments` folder, and inside, a folder `iccv_tutorial` for this +tutorial. Into that folder, we copy the provided +`basalt_batch_config.toml` file that defines the configurations we +want to evaluate and from which we generate individual config files +for each VIO / VO run. We also copy the provided +`experiments-iccv.toml` config file, which defines the results tables +and plots that we generate from the experiments' logs. + +> *Note:* Commands in this tutorial are assumed to be executed from +> within `parent-folder` unless specified otherwise. + +```bash +mkdir -p experiments/iccv_tutorial +cp basalt/data/iccv21/basalt_batch_config.toml experiments/iccv_tutorial/ +cp basalt/data/iccv21/experiments-iccv.toml experiments/iccv_tutorial/ +``` + +## Generate Experimental Configs + +First, edit the copied configuration file +`experiments/iccv_tutorial/basalt_batch_config.toml` and modify all +`"dataset-path"` lines to point to the locations where you downloaded +the datasets to. + +Now, we can generate per-experiment config files: + +```bash +cd experiments/iccv_tutorial/ +../../basalt/scripts/batch/generate-batch-configs.py . +``` + +This will create subfolder `01_iccv_all` containing folders +`vio_euroc`, `vio_tumvi`, and `vo_kitti`, which in turn contain all +generated `basalt_config_...json` files, one for each experiment we +will run. + +## Run Experiments + +We can now run all experiments for those generate configs. Each config +/ sequence combination will automatically be run twice and only the +second run is evaluated, which is meant to ensure that file system +caches are hot. + +Since we also evaluate runtimes, we recommend that you don't use the +machine running the experiments for anything else and also ensure no +expensive background tasks are running during the evaluation. On one +of our desktops with 12 (virtual) cores the total evaluation of all +sequences takes aroudn 3:30h. Your milage may vary of course depending +on the hardware. + +```bash +cd experiments/iccv_tutorial/ +time ../../basalt/scripts/batch/run-all-in.sh 01_iccv_all/ +``` + +Inside `01_iccv_all`, a new folder with the start-time of the +experimental run is created, e.g., `20211006-143137`, and inside that +you can again see the same per-dataset subfolders `vio_euroc`, +`vio_tumvi`, and `vo_kitti`, inside of which there is a folder for +each config / run. Inside these per-run folders you can find log files +including the command line output, which you can inspect in case +something doesn't work. + +In a second terminal, you can check the status of evaluation while it +is running (adjust the argument to match the actual folder name). + +```bash +cd experiments/iccv_tutorial/ +../../basalt/scripts/batch/list-jobs.sh 01_iccv_all/20211006-143137 +``` + +If you see failed experiments for the square root solver in single +precision, don't worry, that is expected. + +## Generate Results Tables and Plots + +After all experimental runs have completed, you can generate a PDF +file with tabulated results and plots, similar to those in the ICCV'21 +paper. + +```bash +cd experiments/iccv_tutorial/ +../../basalt/scripts/batch/generate-tables.py --config experiments-iccv.toml --open +``` + +The results are in the generated `tables/experiments-iccv.pdf` file +(and with the `--open` argument should automatically open with the +default PDF reader). + +## Better Runtime Evaluation + +The experiments above have the extended logging of eigenvalue and +nullspace information enabled, which does cost a little extra +runtime. To get a better runtime comparison, you can re-run the +experiments without this extended logging. The downside is, that you +can only generate the results tables, but not the plots. + +We assume that you have already followed the tutorial above, including +the initial folder setup. For these modified experiments, we redo all +three steps (generating config files; running experiments; generating +results) with slight modifications. + +First, edit the `experiments/iccv_tutorial/basalt_batch_config.toml` +file at the bottom, and uncomment the commented entries in +`_batch.combinations` as well as the commented `revision`. At the same +time, comment out the initially uncommented lines. It should look +something like this after the modifications: + +```toml +[_batch.combinations] +#vio_euroc = ["vio", "savetumgt", "extlog", "runtwice", "all_meth", "all_double", "all_euroc"] +#vio_tumvi = ["vio", "tumvivio", "savetumgt", "extlog", "runtwice", "all_meth", "all_double", "more_tumvi"] +#vo_kitti = ["vo", "kittivo", "savetumgt", "extlog", "runtwice", "all_meth", "all_double", "all_kitti"] + +vio_euroc = ["vio", "runtwice", "all_meth", "all_double", "all_euroc"] +vio_tumvi = ["vio", "tumvivio", "runtwice", "all_meth", "all_double", "more_tumvi"] +vo_kitti = ["vo", "kittivo", "runtwice", "all_meth", "all_double", "all_kitti"] +``` + +```toml +[_batch] +#revision = "01_iccv_all" +revision = "02_iccv_runtime" +``` + +You can see that we removed the `savetumgt` and `extlog` named config +elements and that generated config files and results for this second +run of experiments will be placed in `02_iccv_runtime`. + +Now generate config files and start the experimental runs: + +``` +cd experiments/iccv_tutorial/ +../../basalt/scripts/batch/generate-batch-configs.py . +time ../../basalt/scripts/batch/run-all-in.sh 02_iccv_runtime/ +``` + +Before generating the results PDF you need to now edit the +`experiments-iccv.toml` file, point it to the new location for +experimental logs and disable the generation of plots. Check the place +towards the start of the file where substitutions for +`EXP_PATTERN_VIO` and `EXP_PATTERN_VO` are defined, as well as +`SHOW_TRAJECTORY_PLOTS`, `SHOW_EIGENVALUE_PLOTS`, and +`SHOW_NULLSPACE_PLOTS`. After your modifications, that section should +look something like: + +```toml +################### +## where to find experimental runs +[[substitutions]] + +#EXP_PATTERN_VIO = "01_iccv_all/*-*/vio_*/" +#EXP_PATTERN_VO = "01_iccv_all/*-*/vo_*/" + +EXP_PATTERN_VIO = "02_iccv_runtime/*-*/vio_*/" +EXP_PATTERN_VO = "02_iccv_runtime/*-*/vo_*/" + + +################### +## which kind of plots to show +[[substitutions]] +SHOW_TRAJECTORY_PLOTS = false +SHOW_EIGENVALUE_PLOTS = false +SHOW_NULLSPACE_PLOTS = false +``` + +Now we can generate the results tables for the new experimental runs +with the same command as before: + +```bash +cd experiments/iccv_tutorial/ +../../basalt/scripts/batch/generate-tables.py --config experiments-iccv.toml --open +``` diff --git a/doc/Calibration.md b/doc/Calibration.md new file mode 100644 index 0000000..3e4f272 --- /dev/null +++ b/doc/Calibration.md @@ -0,0 +1,174 @@ +# Calibration + +Here, we explain how to use the calibration tools with [TUM-VI](https://vision.in.tum.de/data/datasets/visual-inertial-dataset), [EuRoC](https://projects.asl.ethz.ch/datasets/doku.php?id=kmavvisualinertialdatasets), [UZH-FPV](http://rpg.ifi.uzh.ch/uzh-fpv.html) and [Kalibr](https://github.com/ethz-asl/kalibr/wiki/downloads) datasets as an example. + + +## TUM-VI dataset +Download the datasets for camera and camera-IMU calibration: +``` +mkdir ~/tumvi_calib_data +cd ~/tumvi_calib_data +wget http://vision.in.tum.de/tumvi/calibrated/512_16/dataset-calib-cam3_512_16.bag +wget http://vision.in.tum.de/tumvi/calibrated/512_16/dataset-calib-imu1_512_16.bag +``` + +### Camera calibration +Run the camera calibration: +``` +basalt_calibrate --dataset-path ~/tumvi_calib_data/dataset-calib-cam3_512_16.bag --dataset-type bag --aprilgrid /usr/etc/basalt/aprilgrid_6x6.json --result-path ~/tumvi_calib_result/ --cam-types ds ds +``` +The command line options have the following meaning: +* `--dataset-path` path to the dataset. +* `--dataset-type` type of the datset. Currently only `bag` and `euroc` formats of the datasets are supported. +* `--result-path` path to the folder where the resulting calibration and intermediate results will be stored. +* `--aprilgrid` path to the configuration file for the aprilgrid. +* `--cam-types` camera models for the image streams in the dataset. For more details see [arXiv:1807.08957](https://arxiv.org/abs/1807.08957). + +After that, you should see the calibration GUI: +![tumvi_cam_calib](/doc/img/tumvi_cam_calib.png) + +The buttons in the GUI are located in the order which you should follow to calibrate the camera. After pressing a button the system will print the output to the command line: +* `load_dataset` loads the dataset. +* `detect_corners` starts corner detection in the background thread. Since it is the most time consuming part of the calibration process, the detected corners are cached and loaded if you run the executable again pointing to the same result folder path. +* `init_cam_intr` computes an initial guess for camera intrinsics. +* `init_cam_poses` computes an initial guess for camera poses given the current intrinsics. +* `init_cam_extr` computes an initial transformation between the cameras. +* `init_opt` initializes optimization and shows the projected points given the current calibration and camera poses. +* `optimize` runs an iteration of the optimization and visualizes the result. You should press this button until the error printed in the console output stops decreasing and the optimization converges. Alternatively, you can use the `opt_until_converge` checkbox that will run the optimization until it converges automatically. +* `save_calib` saves the current calibration as `calibration.json` in the result folder. +* `compute_vign` **(Experimental)** computes a radially-symmetric vignetting for the cameras. For the algorithm to work, **the calibration pattern should be static (camera moving around it) and have a constant lighting throughout the calibration sequence**. If you run `compute_vign` you should press `save_calib` afterwards. The png images with vignetting will also be stored in the result folder. + +You can also control the process using the following buttons: +* `show_frame` slider to switch between the frames in the sequence. +* `show_corners` toggles the visibility of the detected corners shown in red. +* `show_corners_rejected` toggles the visibility of rejected corners. Works only when `show_corners` is enabled. +* `show_init_reproj` shows the initial reprojections computed by the `init_cam_poses` step. +* `show_opt` shows reprojected corners with the current estimate of the intrinsics and poses. +* `show_vign` toggles the visibility of the points used for vignetting estimation. The points are distributed across white areas of the pattern. +* `show_ids` toggles the ID visualization for every point. +* `huber_thresh` controls the threshold for the huber norm in pixels for the optimization. +* `opt_intr` controls if the optimization can change the intrinsics. For some datasets it might be helpful to disable this option for several first iterations of the optimization. +* `opt_until_converge` runs the optimization until convergence. +* `stop_thresh` defines the stopping criteria. Optimization will stop when the maximum increment is smaller than this value. + + +### Camera + IMU + Mocap calibration +After calibrating the camera you can run the camera + IMU + Mocap calibration. The result path should point to the **same folder as before**: +``` +basalt_calibrate_imu --dataset-path ~/tumvi_calib_data/dataset-calib-imu1_512_16.bag --dataset-type bag --aprilgrid /usr/etc/basalt/aprilgrid_6x6.json --result-path ~/tumvi_calib_result/ --gyro-noise-std 0.000282 --accel-noise-std 0.016 --gyro-bias-std 0.0001 --accel-bias-std 0.001 +``` +The command line options for the IMU noise are continous-time and defined as in [Kalibr](https://github.com/ethz-asl/kalibr/wiki/IMU-Noise-Model): +* `--gyro-noise-std` gyroscope white noise. +* `--accel-noise-std` accelerometer white noise. +* `--gyro-bias-std` gyroscope random walk. +* `--accel-bias-std` accelerometer random walk. + +![tumvi_imu_calib](/doc/img/tumvi_imu_calib.png) + +The buttons in the GUI are located in the order which you need to follow to calibrate the camera-IMU setup: +* `load_dataset`, `detect_corners`, `init_cam_poses` same as above. +* `init_cam_imu` initializes the rotation between camera and IMU by aligning rotation velocities of the camera to the gyro data. +* `init_opt` initializes the optimization. Shows reprojected corners in magenta and the estimated values from the spline as solid lines below. +* `optimize` runs an iteration of the optimization. You should press it several times until convergence before proceeding to next steps. Alternatively, you can use the `opt_until_converge` checkbox that will run the optimization until it converges automatically. +* `init_mocap` initializes the transformation from the Aprilgrid calibration pattern to the Mocap coordinate system. +* `save_calib` save the current calibration as `calibration.json` in the result folder. +* `save_mocap_calib` save the current Mocap to IMU calibration as `mocap_calibration.json` in the result folder. + +You can also control the visualization using the following buttons: +* `show_frame` - `show_ids` the same as above. +* `show_spline` toggles the visibility of enabled measurements (accel, gyro, position, velocity) generated from the spline that we optimize. +* `show_data` toggles the visibility of raw data contained in the dataset. +* `show_accel` shows accelerometer data. +* `show_gyro` shows gyroscope data. +* `show_pos` shows spline position for `show_spline` and positions generated from camera pose initialization transformed into IMU coordinate frame for `show_data`. +* `show_rot_error` shows the rotation error between spline and camera pose initializations transformed into IMU coordinate frame. +* `show_mocap` shows the mocap marker position transformed to the IMU frame. +* `show_mocap_rot_error` shows rotation between the spline and Mocap measurements. +* `show_mocap_rot_vel` shows the rotation velocity computed from the Mocap. + +The following options control the optimization process: +* `opt_intr` enables optimization of intrinsics. Usually should be disabled for the camera-IMU calibration. +* `opt_poses` enables optimization based camera pose initialization. Sometimes helps to better initialize the spline before running optimization with `opt_corners`. +* `opt_corners` enables optimization based on reprojection corner positions **(should be used by default)**. +* `opt_cam_time_offset` computes the time offset between camera and the IMU. This option should be used only for refinement when the optimization already converged. +* `opt_imu_scale` enables IMU axis scaling, rotation and misalignment calibration. This option should be used only for refinement when the optimization already converged. +* `opt_mocap` enables Mocap optimization. You should run it only after pressing `init_mocap`. +* `huber_thresh` controls the threshold for the huber norm in pixels for the optimization. +* `opt_until_convg` runs the optimization until convergence. +* `stop_thresh` defines the stopping criteria. Optimization will stop when the maximum increment is smaller than this value. + + +**NOTE:** In this case we use a pre-calibrated sequence, so most of refinements or Mocap to IMU calibration will not have any visible effect. If you want to test this functionality use the "raw" sequences, for example `http://vision.in.tum.de/tumvi/raw/dataset-calib-cam3.bag` and `http://vision.in.tum.de/tumvi/raw/dataset-calib-imu1.bag`. + +## EuRoC dataset +Download the datasets for camera and camera-IMU calibration: +``` +mkdir ~/euroc_calib_data +cd ~/euroc_calib_data +wget http://robotics.ethz.ch/~asl-datasets/ijrr_euroc_mav_dataset/calibration_datasets/cam_april/cam_april.bag +wget http://robotics.ethz.ch/~asl-datasets/ijrr_euroc_mav_dataset/calibration_datasets/imu_april/imu_april.bag +``` + +### Camera calibration +Run the camera calibration: +``` +basalt_calibrate --dataset-path ~/euroc_calib_data/cam_april.bag --dataset-type bag --aprilgrid /usr/etc/basalt/aprilgrid_6x6.json --result-path ~/euroc_calib_result/ --cam-types ds ds +``` +![euroc_cam_calib](/doc/img/euroc_cam_calib.png) + +### Camera + IMU calibration +After calibrating the camera you can run the camera + IMU calibration. The result-path should point to the same folder as before: +``` +basalt_calibrate_imu --dataset-path ~/euroc_calib_data/imu_april.bag --dataset-type bag --aprilgrid /usr/etc/basalt/aprilgrid_6x6.json --result-path ~/euroc_calib_result/ --gyro-noise-std 0.000282 --accel-noise-std 0.016 --gyro-bias-std 0.0001 --accel-bias-std 0.001 +``` +![euroc_imu_calib](/doc/img/euroc_imu_calib.png) + + +## UZH dataset +Download the datasets for camera and camera-IMU calibration: +``` +mkdir ~/uzh_calib_data +cd ~/uzh_calib_data +wget http://rpg.ifi.uzh.ch/datasets/uzh-fpv/calib/indoor_forward_calib_snapdragon_cam.bag +wget http://rpg.ifi.uzh.ch/datasets/uzh-fpv/calib/indoor_forward_calib_snapdragon_imu.bag +``` + +### Camera calibration +Run the camera calibration: +``` +basalt_calibrate --dataset-path ~/uzh_calib_data/indoor_forward_calib_snapdragon_cam.bag --dataset-type bag --aprilgrid /usr/etc/basalt/aprilgrid_5x4_uzh.json --result-path ~/uzh_calib_result/ --cam-types ds ds +``` +![uzh_cam_calib](/doc/img/uzh_cam_calib.png) + +### Camera + IMU calibration +After calibrating the camera you can run the camera + IMU calibration. The result-path should point to the same folder as before: +``` +basalt_calibrate_imu --dataset-path ~/uzh_calib_data/indoor_forward_calib_snapdragon_imu.bag --dataset-type bag --aprilgrid /usr/etc/basalt/aprilgrid_5x4_uzh.json --result-path ~/uzh_calib_result/ --gyro-noise-std 0.05 --accel-noise-std 0.1 --gyro-bias-std 4e-5 --accel-bias-std 0.002 +``` +![uzh_imu_calib](/doc/img/uzh_imu_calib.png) + + +## Kalibr dataset +Download the datasets for camera and camera-IMU calibration from [here (Sample datasets)](https://github.com/ethz-asl/kalibr/wiki/downloads): +``` +mkdir ~/kalibr_calib_data +cd ~/kalibr_calib_data +# Download data +tar xvf static.tar.gz +tar xvf dynamic.tar.gz +``` + +### Camera calibration +Run the camera calibration: +``` +basalt_calibrate --dataset-path ~/kalibr_calib_data/static/static.bag --dataset-type bag --aprilgrid /usr/etc/basalt/aprilgrid_6x6.json --result-path ~/kalibr_calib_result/ --cam-types ds ds ds ds +``` +![kalibr_cam_calib](/doc/img/kalibr_cam_calib.png) + + +### Camera + IMU calibration +After calibrating the camera you can run the camera + IMU calibration. The result-path should point to the same folder as before: +``` +basalt_calibrate_imu --dataset-path ~/kalibr_calib_data/dynamic/dynamic.bag --dataset-type bag --aprilgrid /usr/etc/basalt/aprilgrid_6x6.json --result-path ~/kalibr_calib_result/ --gyro-noise-std 0.005 --accel-noise-std 0.01 --gyro-bias-std 4.0e-06 --accel-bias-std 0.0002 +``` +![kalibr_imu_calib](/doc/img/kalibr_imu_calib.png) \ No newline at end of file diff --git a/doc/DevSetup.md b/doc/DevSetup.md new file mode 100644 index 0000000..c698247 --- /dev/null +++ b/doc/DevSetup.md @@ -0,0 +1,71 @@ + + +### Clang-format +We use clang-format to maintain a consistent formating of the code. Since there are small differences between different version of clang-format we use version 11 on all platforms. + +On **Ubuntu 20.04 or 18.04** run the following commands to install clang-format-11 +``` +wget -O - https://apt.llvm.org/llvm-snapshot.gpg.key|sudo apt-key add - +sudo sh -c 'echo "deb http://apt.llvm.org/$(lsb_release -sc)/ llvm-toolchain-$(lsb_release -sc)-11 main" > /etc/apt/sources.list.d/llvm11.list' +sudo apt-get update +sudo apt-get install clang-format-11 +``` + +On **MacOS** [Homebrew](https://brew.sh/) should install the right version of clang-format: +``` +brew install clang-format +``` + +### Realsense Drivers (Optional) +If you want to use the code with Realsense T265 cameras you should install the realsense library. + +On **Ubuntu 20.04 or 18.04** run the following commands: +``` +sudo apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv-key C8B3A55A6F3EFCDE +sudo sh -c 'echo "deb http://realsense-hw-public.s3.amazonaws.com/Debian/apt-repo bionic main" > /etc/apt/sources.list.d/realsense.list' +sudo apt-get update +sudo apt-get install librealsense2-dev librealsense2-gl-dev librealsense2-dkms librealsense2-utils librealsense2-dkms +``` + +On **MacOS** run: +``` +brew install librealsense +``` + +### Install and configure QtCreator +Download and install QtCreator. On **Ubuntu 20.04 or 18.04** run: +``` +wget https://download.qt.io/official_releases/qtcreator/4.10/4.10.0/qt-creator-opensource-linux-x86_64-4.10.0.run +chmod +x qt-creator-opensource-linux-x86_64-4.10.0.run +./qt-creator-opensource-linux-x86_64-4.10.0.run +``` + +On **MacOS** run: +``` +brew cask install qt-creator +``` + +After installation, go to `Help` -> `About plugins...` in the menu and enable Beautifier plugin (formats the code automatically on save): + +![qt_creator_plugins](/doc/img/qt_creator_plugins.png) + +Go to `Tools` -> `Options` and select the Beautifier tab. There select ClangFormat as the tool in `General` tab. + +![qt_creator_beautifier_general](/doc/img/qt_creator_beautifier_general.png) + +Select file as predefined style in `Clang Format` tab. Also select `None` as the fallback style. For **Ubuntu 20.04 or 18.04** change the executable name to `/usr/bin/clang-format-11`. + +![qt_creator_beautifier_clang_format](/doc/img/qt_creator_beautifier_clang_format.png) + +### Build project +First, clone the project repository. +``` +git clone --recursive https://gitlab.com/VladyslavUsenko/basalt.git +``` + +After that, in QtCreator open to the `CMakeLists.txt` in the `basalt` folder and configure the project with `Release with Debug Info` configuration. The build directory should point to `//basalt/build`. + +![qt_creator_configure_project](/doc/img/qt_creator_configure_project.png) + +Finally, you should be able to build and run the project. + diff --git a/doc/Realsense.md b/doc/Realsense.md new file mode 100644 index 0000000..e1b5647 --- /dev/null +++ b/doc/Realsense.md @@ -0,0 +1,159 @@ +# Tutorial on Camera-IMU and Motion Capture Calibration with Realsense T265 + +![Realsense](/doc/img/realsense_setup.jpg) + +In this tutorial we explain how to perform photometric and geometric calibration of the multi-camera setup and then calibrate the transformations between cameras, IMU and the motion capture marker setup. To make sure the calibration is successful we recommend to rigidly attach markers to the camera as shown on the figure above. + +## Dataset +We provide a set of example datasets for the calibration. Even if you plan to record your own datasets (covered in the next section), we recommend to download and try the provided examples: +``` +mkdir ~/t265_calib_data +cd ~/t265_calib_data +wget http://vision.in.tum.de/tumvi/t265/response_calib.zip +wget http://vision.in.tum.de/tumvi/t265/cam_calib.zip +wget http://vision.in.tum.de/tumvi/t265/imu_calib.zip +wget http://vision.in.tum.de/tumvi/t265/sequence0.zip +unzip response_calib +unzip cam_calib +unzip imu_calib +unzip sequence0 +``` + +## Recording Own Dataset +You can record your own sequences using the `basalt_rs_t265_record` executable: +``` +basalt_rs_t265_record --dataset-path ~/t265_calib_data/ --manual-exposure +``` +* `--dataset-path` specifies the location where the recorded dataset will be stored. In this case it will be stored in `~/t265_calib_data//`. +* `--manual-exposure` disables the autoexposure. In this tutorial the autoexposure is disabled for all calibration sequences, but for the VIO sequence (sequence0) we enable it. + +![t265_record](/doc/img/t265_record.png) + +The GUI elements have the following meaning: +* `webp_quality` compression quality. The highest value (101) means lossless compression. For photometric calibration it is important not to have any compression artifacts, so we record these calibration sequences with lossless compression. +* `skip_frames` reduces the framerate of the recorded dataset by skipping frames. +* `exposure` controls the exposure time of the cameras. +* `record` starts/stops the recoding of the dataset. If you run the system on the slow PC pay attention to the number of messages in the queue. If it goes beyond the limit the recorder will start dropping frames. +* `export_calibration` exports factory calibration in the basalt format. + +After recoding the dataset it is a good practice to verify the quality of the dataset. You can do this by running: +``` +basalt_verify_dataset.py -d ~/t265_calib_data// +``` +It will report the actual frequencies of the recorded sensor messages and warn you if any files with image data are missing. + +Every sequence required for the calibration should have certain properties to enable successful calibration. Pay attention to the **Important for recording the dataset** subsections and inspect the provided examples before recoding your own sequences. + +## Response calibration +In this project we assume a camera has a linear response function (intensity of the pixel is linearly proportional to the amount of light captured by the pixel). In this section we will verify this for the Realsense T265 cameras. We will need to record a static scene with different exposure times. + +**Important for recording the dataset:** +* Keep the camera static and make sure that nothing in the scene moves during the recording. +* Move `webp_quality` slider to the highest value to enable lossless compression. +* Optionally set the `skip_frames` slider to 3-5 to speed up the dataset recoding. +* Start recoding and slowly move the exposure slider up and down. Stop recoding. +* Rename the dataset to `response_calib`. + +Run the response function calibration: +``` +basalt_response_calib.py -d ~/t265_calib_data/response_calib +``` +You should see the response function and the irradiance image similar to the one shown below. For the details of the algorithm see Section 2.3.1 of [[arXiv:1607.02555]](https://arxiv.org/abs/1607.02555). The results suggest that the response function used in the camera is linear. +![t265_inv_resp_irradiance](/doc/img/t265_inv_resp_irradiance.png) + +## Multi-Camera Geometric and Vignette Calibration +For the camera calibration we need to record a dataset with a static aprilgrid pattern. + +**Important for recording the dataset:** +* Move `webp_quality` slider to the highest value to enable lossless compression (important for vignette calibration). +* Set the `skip_frames` slider to 5 to speed up the dataset recoding. +* Move the camera slowly to reduce the motion blur. +* Cover the entire field of view of the camera with the calibration pattern. Try to observe the pattern from different angles. +* Make sure you do not cast shadows at the pattern (important for vignette calibration). +* Rename the dataset to `cam_calib` + +Run the calibration executable: +``` +basalt_calibrate --dataset-path ~/t265_calib_data/cam_calib --dataset-type euroc --result-path ~/t265_calib_results/ --aprilgrid /usr/etc/basalt/aprilgrid_6x6.json --cam-types kb4 kb4 +``` +To perform the calibration follow these steps: +* `load_dataset` load the dataset. +* `detect_corners` detect corners. If the corners were detected before the cached detections will be loaded at the previous step so there is no need to re-run the detection. +* `init_cam_intr` initialize camera intrinsics. +* `init_cam_poses` initialize camera poses using the current intrinsics estimate. +* `init_cam_extr` initialize transformations between multiple cameras. +* `init_opt` initialize optimization. +* `opt_until_converge` optimize until convergence. +* `init_cam_poses` some initial poses computed from the initialized intrinsics can be far from optimum and not converge to the right minimum. To improve the final result we can re-initialize poses with optimized intrinsics. +* `init_opt` initialize optimization with new initial poses. +* `opt_until_converge` optimize until convergence. +* `compute_vign` after optimizing geometric models compute the vignetting of the cameras. +* `save_calib` save calibration file to the `~/t265_calib_results/calibration.json`. + +![t265_cam_calib](/doc/img/t265_cam_calib.png) + + +## IMU and Motion Capture Calibration +After calibrating cameras we can proceed to geometric and time calibration of the cameras, IMU and motion capture system. Setting up the motion capture system is specific for your setup. + +For the motion capture recording we use [ros_vrpn_client](https://github.com/ethz-asl/ros_vrpn_client) with [basalt_capture_mocap.py](/scripts/basalt_capture_mocap.py). We record the data to the `mocap0` folder and then move it to the `mav0` directory of the camera dataset. This script is provided as an example. Motion capture setup is different in every particular case. + +**Important for recording the dataset:** +* Set the `skip_frames` slider to 1 to use the full framerate. +* Reduce the exposure time to reduce the motion blur. +* Move the setup such that all axes of accelerometer and gyro are excited. This means moving with acceleration along X, Y and Z axes and rotating around those axes. +* Do not forget to simultaneously record motion capture data. +* Rename the dataset to `imu_calib`. + +Run the calibration executable: +``` +basalt_calibrate_imu --dataset-path ~/t265_calib_data/imu_calib --dataset-type euroc --result-path ~/t265_calib_results/ --aprilgrid /usr/etc/basalt/aprilgrid_6x6.json --accel-noise-std 0.00818 --gyro-noise-std 0.00226 --accel-bias-std 0.01 --gyro-bias-std 0.0007 +``` + +To perform the calibration follow these steps: +* `load_dataset` load the dataset. +* `detect_corners` detect corners. If the corners were detected before the cached detections will be loaded at the previous step so there is no need to re-run the detection. +* `init_cam_poses` initialize camera poses. +* `init_cam_imu` initialize the transformation between cameras and the IMU. +* `init_opt` initialize optimization. +* `opt_until_converge` optimize until convergence. +* Enable `opt_cam_time_offset` and `opt_imu_scale` to optimize the time offset between cameras and the IMU and the IMU scale. +* `opt_until_converge` optimize until convergence. +* `init_mocap` initialize transformation between the motion capture marker frame and the IMU and the transformation between the aprilgrid pattern and the motion capture system origin. +* Enable `opt_mocap`. +* `opt_until_converge` optimize until convergence. +* `save_calib` save calibration file to the `~/t265_calib_results/calibration.json`. +* `save_mocap_calib` save motion capture system calibration file to the `~/t265_calib_results/mocap_calibration.json`. + +![t265_imu_calib](/doc/img/t265_imu_calib.png) + + +## Generating Time-Aligned Ground Truth +Since motion capture system and the PC where the dataset was recorded might not have the same clock we need to perform the time synchronization. Additionally we need to transform the coordinate frame of the GT data to the IMU frame (originally it is in the coordinate frame attached to the markers). + +The raw motion capture data is stored in the `mav/mocap0/` folder. We can find the time offset by minimizing the error between gyro measurements and rotational velocities computed from the motion capture data. If you press the `save_aligned_dataset` button the resulting trajectory (time aligned and transformed to the IMU frame) will be written to `mav/gt/data.csv` and automatically loaded when available. +``` +basalt_time_alignment --dataset-path ~/t265_calib_data/sequence0 --dataset-type euroc --calibration ~/t265_calib_results/calibration.json --mocap-calibration ~/t265_calib_results/mocap_calibration.json +``` +You should be able to see that, despite some noise, rotational velocity computed from the motion capture data aligns well with gyro measurements. +![t265_time_align_gyro](/doc/img/t265_time_align_gyro.png) + +You can also switch to the error function plot and see that there is a clear minimum corresponding to the computed time offset. +![t265_time_align_error](/doc/img/t265_time_align_error.png) + +**Note:** If you want to run the time alignment again you should delete the `~/t265_calib_data/sequence0/mav/gt` folder first. If GT data already exist you will see the `save_aligned_dataset(disabled)` button which will **NOT** overwrite it. + +## Running Visual-Inertial Odometry +Now we can run the visual-inertial odometry on the recorded dataset: +``` +basalt_vio --dataset-path ~/t265_calib_data/sequence0 --cam-calib ~/t265_calib_results/calibration.json --dataset-type euroc --config-path /usr/etc/basalt/euroc_config.json --show-gui 1 +``` +After the system processes the whole sequence you can use `align_se3` button to align trajectory to the ground-truth data and compute RMS ATE. +![t265_vio](/doc/img/t265_vio.png) + + +## Running Visual-Inertial Odometry Live +It is also possible to run the odometry live with the camera. If no calibration files are provided the factory calibration will be used. +``` +basalt_rs_t265_vio --cam-calib ~/t265_calib_results/calibration.json --config-path /usr/etc/basalt/euroc_config.json +``` \ No newline at end of file diff --git a/doc/Simulation.md b/doc/Simulation.md new file mode 100644 index 0000000..b3ad171 --- /dev/null +++ b/doc/Simulation.md @@ -0,0 +1,51 @@ +## Simulator + +For better evaluation of the system we use the simulated environment where the optical flow and IMU data is generated from the ground truth by adding noise. + +**Note:** The path to calibration and configuration files used here works for the APT installation. If you compile from source specify the appropriate path to the files in [data folder](/data/). + + +### Visual-inertial odometry simulator +``` +basalt_vio_sim --cam-calib /usr/etc/basalt/euroc_ds_calib.json --marg-data sim_marg_data --show-gui 1 +``` + +The command line options have the following meaning: +* `--cam-calib` path to camera calibration file. Check [calibration instructions](doc/Calibration.md) to see how the calibration was generated. +* `--marg-data` folder where the data from keyframe marginalization will be stored. This data can be later used for visual-inertial mapping simulator. +* `--show-gui` enables or disables GUI. + +This opens the GUI and runs the sequence. +![SIM_VIO](/doc/img/SIM_VIO.png) + +The buttons in the GUI have the following meaning: +* `show_obs` toggles the visibility of the ground-truth landmarks in the image view. +* `show_obs_noisy` toggles the visibility of the noisy landmarks in the image view. +* `show_obs_vio` toggles the visibility of the landmarks estimated by VIO in the image view. +* `show_ids` toggles the IDs of the landmarks. +* `show_accel` shows noisy accelerometer measurements generated from the ground-truth spline. +* `show_gyro` shows noisy gyroscope measurements generated from the ground-truth spline. +* `show_gt_...` shows ground truth position, velocity and biases. +* `show_est_...` shows VIO estimates of the position, velocity and biases. +* `next_step` proceeds to next frame. +* `continue` plays the sequence. +* `align_se3` performs SE(3) alignment with ground-truth trajectory and prints the RMS ATE to the console. + + +### Visual-inertial mapping simulator +``` +basalt_mapper_sim --cam-calib /usr/etc/basalt/euroc_ds_calib.json --marg-data sim_marg_data --show-gui 1 +``` +The command line arguments are the same as above. + +This opens the GUI where the map can be processed. +![SIM_MAPPER](/doc/img/SIM_MAPPER.png) + +The system processes the marginalization data and extracts the non-linear factors from them. Roll-pitch and relative-pose factors are initially added to the system. One way to verify that they result in gravity-aligned map is the following +* `optimize` runs the optimization +* `rand_inc` applies a random increment to all frames of the system. If you run the `optimize` until convergence afterwards, and press `align_se3` the alignment transformation should only contain the rotation around Z axis. +* `rand_yaw` applies an increment in yaw to all poses. This should not change the error of the optimization once is have converged. +* `setup_points` triangulates the points and adds them to optimization. You should optimize the system again after adding the points. +* `align_se3` performs SE(3) alignment with ground-truth trajectory and prints the RMS ATE to the console. + +For comparison we also provide the `basalt_mapper_sim_naive` executable that has the same parameters. It runs a global bundle-adjustment on keyframe data and inserts pre-integrated IMU measurements between keyframes. This executable is included for comparison only. \ No newline at end of file diff --git a/doc/VioMapping.md b/doc/VioMapping.md new file mode 100644 index 0000000..a8f7f2f --- /dev/null +++ b/doc/VioMapping.md @@ -0,0 +1,121 @@ +## EuRoC dataset + +We demonstrate the usage of the system with the `MH_05_difficult` sequence of the [EuRoC dataset](https://projects.asl.ethz.ch/datasets/doku.php?id=kmavvisualinertialdatasets) as an example. + +**Note:** The path to calibration and configuration files used here works for the APT installation. If you compile from source specify the appropriate path to the files in [data folder](/data/). + +Download the sequence from the dataset and extract it. +``` +mkdir euroc_data +cd euroc_data +wget http://robotics.ethz.ch/~asl-datasets/ijrr_euroc_mav_dataset/machine_hall/MH_05_difficult/MH_05_difficult.zip +mkdir MH_05_difficult +cd MH_05_difficult +unzip ../MH_05_difficult.zip +cd ../ +``` + +### Visual-inertial odometry +To run the visual-inertial odometry execute the following command in `euroc_data` folder where you downloaded the dataset. +``` +basalt_vio --dataset-path MH_05_difficult/ --cam-calib /usr/etc/basalt/euroc_ds_calib.json --dataset-type euroc --config-path /usr/etc/basalt/euroc_config.json --marg-data euroc_marg_data --show-gui 1 +``` +The command line options have the following meaning: +* `--dataset-path` path to the dataset. +* `--dataset-type` type of the datset. Currently only `bag` and `euroc` formats of the datasets are supported. +* `--cam-calib` path to camera calibration file. Check [calibration instructions](doc/Calibration.md) to see how the calibration was generated. +* `--config-path` path to the configuration file. +* `--marg-data` folder where the data from keyframe marginalization will be stored. This data can be later used for visual-inertial mapping. +* `--show-gui` enables or disables GUI. + +This opens the GUI and runs the sequence. The processing happens in the background as fast as possible, and the visualization results are saved in the GUI and can be analysed offline. +![MH_05_VIO](/doc/img/MH_05_VIO.png) + +The buttons in the GUI have the following meaning: +* `show_obs` toggles the visibility of the tracked landmarks in the image view. +* `show_ids` toggles the IDs of the points. +* `show_est_pos` shows the plot of the estimated position. +* `show_est_vel` shows the plot of the estimated velocity. +* `show_est_bg` shows the plot of the estimated gyro bias. +* `show_est_ba` shows the plot of the estimated accel bias. +* `show_gt` shows ground-truth trajectory in the 3D view. + +By default the system starts with `continue_fast` enabled. This option visualizes the latest processed frame until the end of the sequence. Alternatively, the `continue` visualizes every frame without skipping. If both options are disabled the system shows the frame that is selected with the `show_frame` slider and the user can move forward and backward with `next_step` and `prev_step` buttons. The `follow` button changes between the static camera and the camera attached to the current frame. + +For evaluation the button `align_se3` is used. It aligns the GT trajectory with the current estimate using an SE(3) transformation and prints the transformation and the root-mean-squared absolute trajectory error (RMS ATE). + +The button `save_traj` saves the trajectory in one of two formats (`euroc_fmt` or `tum_rgbd_fmt`). In EuRoC format each pose is a line in the file and has the following format `timestamp[ns],tx,ty,tz,qw,qx,qy,qz`. TUM RBG-D can be used with [TUM RGB-D](https://vision.in.tum.de/data/datasets/rgbd-dataset/tools) or [UZH](https://github.com/uzh-rpg/rpg_trajectory_evaluation) trajectory evaluation tools and has the following format `timestamp[s] tx ty tz qx qy qz qw`. + + +### Visual-inertial mapping +To run the mapping tool execute the following command: +``` +basalt_mapper --cam-calib /usr/etc/basalt/euroc_ds_calib.json --marg-data euroc_marg_data +``` +Here `--marg-data` is the folder with the results from VIO. + +This opens the GUI and extracts non-linear factors from the marginalization data. +![MH_05_MAPPING](/doc/img/MH_05_MAPPING.png) + +The buttons in the GUI have the following meaning: +* `show_frame1`, `show_cam1`, `show_frame2`, `show_cam2` allows you to assign images to image view 1 and 2 from different timestamps and cameras. +* `show_detected` shows the detected keypoints in the image view window. +* `show_matches` shows feature matching results. +* `show_inliers` shows inlier matches after geometric verification. +* `show_ids` prints point IDs. Can be used to find the same point in two views to check matches and inliers. +* `show_gt` shows the ground-truth trajectory. +* `show_edges` shows the edges from the factors. Relative-pose factors in red, roll-pitch factors in magenta and bundle adjustment co-visibility edges in green. +* `show_points` shows 3D landmarks. + +The workflow for the mapping is the following: +* `detect` detect the keypoints in the keyframe images. +* `match` run the geometric 2D to 2D matching between image frames. +* `tracks` build tracks from 2D matches and triangulate the points. +* `optimize` run the optimization. +* `align_se3` align ground-truth trajectory in SE(3) and print the transformation and the error. + +The `num_opt_iter` slider controls the maximum number of iterations executed when pressing `optimize`. + +The button `save_traj` works similar to the VIO, but saves the keyframe trajectory (subset of frames). + +For more systematic evaluation see the evaluation scripts in the [scripts/eval_full](/scripts/eval_full) folder. + +**NOTE: It appears that only the datasets in ASL Dataset Format (`euroc` dataset type in our notation) contain ground truth that is time-aligned to the IMU and camera images. It is located in the `state_groundtruth_estimate0` folder. Bag files have raw Mocap measurements that are not time aligned and should not be used for evaluations.** + + + +### Optical Flow +The visual-inertial odometry relies on the optical flow results. To enable a better analysis of the system we also provide a separate optical flow executable +``` +basalt_opt_flow --dataset-path MH_05_difficult/ --cam-calib /usr/etc/basalt/euroc_ds_calib.json --dataset-type euroc --config-path /usr/etc/basalt/euroc_config.json --show-gui 1 +``` + +This will run the GUI and print an average track length after the dataset is processed. +![MH_05_OPT_FLOW](/doc/img/MH_05_OPT_FLOW.png) + + +## TUM-VI dataset + +We demonstrate the usage of the system with the `magistrale1` sequence of the [TUM-VI dataset](https://vision.in.tum.de/data/datasets/visual-inertial-dataset) as an example. + +Download the sequence from the dataset and extract it. +``` +mkdir tumvi_data +cd tumvi_data +wget http://vision.in.tum.de/tumvi/exported/euroc/512_16/dataset-magistrale1_512_16.tar +tar -xvf dataset-magistrale1_512_16.tar +``` + +### Visual-inertial odometry +To run the visual-inertial odometry execute the following command in `tumvi_data` folder where you downloaded the dataset. +``` +basalt_vio --dataset-path dataset-magistrale1_512_16/ --cam-calib /usr/etc/basalt/tumvi_512_ds_calib.json --dataset-type euroc --config-path /usr/etc/basalt/tumvi_512_config.json --marg-data tumvi_marg_data --show-gui 1 +``` +![magistrale1_vio](/doc/img/magistrale1_vio.png) + +### Visual-inertial mapping +To run the mapping tool execute the following command: +``` +basalt_mapper --cam-calib /usr/etc/basalt/tumvi_512_ds_calib.json --marg-data tumvi_marg_data +``` +![magistrale1_mapping](/doc/img/magistrale1_mapping.png) diff --git a/doc/Vo.md b/doc/Vo.md new file mode 100644 index 0000000..6bd3c9b --- /dev/null +++ b/doc/Vo.md @@ -0,0 +1,25 @@ +## KITTI dataset + +[![teaser](/doc/img/kitti_video.png)](https://www.youtube.com/watch?v=M_ZcNgExUNc) + +We demonstrate the usage of the system with the [KITTI dataset](http://www.cvlibs.net/datasets/kitti/eval_odometry.php) as an example. + +**Note:** The path to calibration and configuration files used here works for the APT installation. If you compile from source specify the appropriate path to the files in [data folder](/data/). + +Download the sequences (`data_odometry_gray.zip`) from the dataset and extract it. +``` +# We assume you have extracted the sequences in ~/dataset_gray/sequences/ +# Convert calibration to the basalt format +basalt_convert_kitti_calib.py ~/dataset_gray/sequences/00/ + +# If you want to convert calibrations for all sequences use the following command +for i in {00..21}; do basalt_convert_kitti_calib.py ~/dataset_gray/sequences/$i/; done +``` +Optionally you can also copy the provided ground-truth poses to `poses.txt` in the corresponding sequence. + +### Visual odometry +To run the visual odometry execute the following command. +``` +basalt_vio --dataset-path ~/dataset_gray/sequences/00/ --cam-calib /work/kitti/dataset_gray/sequences/00/basalt_calib.json --dataset-type kitti --config-path /usr/etc/basalt/kitti_config.json --show-gui 1 --use-imu 0 +``` +![magistrale1_vio](/doc/img/kitti.png) diff --git a/doc/img/MH_05_MAPPING.png b/doc/img/MH_05_MAPPING.png new file mode 100644 index 0000000..bc7bd1e Binary files /dev/null and b/doc/img/MH_05_MAPPING.png differ diff --git a/doc/img/MH_05_OPT_FLOW.png b/doc/img/MH_05_OPT_FLOW.png new file mode 100644 index 0000000..a25c340 Binary files /dev/null and b/doc/img/MH_05_OPT_FLOW.png differ diff --git a/doc/img/MH_05_VIO.png b/doc/img/MH_05_VIO.png new file mode 100644 index 0000000..42dc260 Binary files /dev/null and b/doc/img/MH_05_VIO.png differ diff --git a/doc/img/SIM_MAPPER.png b/doc/img/SIM_MAPPER.png new file mode 100644 index 0000000..1caac36 Binary files /dev/null and b/doc/img/SIM_MAPPER.png differ diff --git a/doc/img/SIM_VIO.png b/doc/img/SIM_VIO.png new file mode 100644 index 0000000..cdfcf0d Binary files /dev/null and b/doc/img/SIM_VIO.png differ diff --git a/doc/img/euroc_cam_calib.png b/doc/img/euroc_cam_calib.png new file mode 100644 index 0000000..a22ad58 Binary files /dev/null and b/doc/img/euroc_cam_calib.png differ diff --git a/doc/img/euroc_imu_calib.png b/doc/img/euroc_imu_calib.png new file mode 100644 index 0000000..7506731 Binary files /dev/null and b/doc/img/euroc_imu_calib.png differ diff --git a/doc/img/kalibr_cam_calib.png b/doc/img/kalibr_cam_calib.png new file mode 100644 index 0000000..8833ca3 Binary files /dev/null and b/doc/img/kalibr_cam_calib.png differ diff --git a/doc/img/kalibr_imu_calib.png b/doc/img/kalibr_imu_calib.png new file mode 100644 index 0000000..faf8f6e Binary files /dev/null and b/doc/img/kalibr_imu_calib.png differ diff --git a/doc/img/kitti.png b/doc/img/kitti.png new file mode 100644 index 0000000..febe0a8 Binary files /dev/null and b/doc/img/kitti.png differ diff --git a/doc/img/kitti_video.png b/doc/img/kitti_video.png new file mode 100644 index 0000000..14378c7 Binary files /dev/null and b/doc/img/kitti_video.png differ diff --git a/doc/img/magistrale1_mapping.png b/doc/img/magistrale1_mapping.png new file mode 100644 index 0000000..8bc6918 Binary files /dev/null and b/doc/img/magistrale1_mapping.png differ diff --git a/doc/img/magistrale1_vio.png b/doc/img/magistrale1_vio.png new file mode 100644 index 0000000..1b86a7f Binary files /dev/null and b/doc/img/magistrale1_vio.png differ diff --git a/doc/img/qt_creator_beautifier_clang_format.png b/doc/img/qt_creator_beautifier_clang_format.png new file mode 100644 index 0000000..a6d23ea Binary files /dev/null and b/doc/img/qt_creator_beautifier_clang_format.png differ diff --git a/doc/img/qt_creator_beautifier_general.png b/doc/img/qt_creator_beautifier_general.png new file mode 100644 index 0000000..e7291dd Binary files /dev/null and b/doc/img/qt_creator_beautifier_general.png differ diff --git a/doc/img/qt_creator_configure_project.png b/doc/img/qt_creator_configure_project.png new file mode 100644 index 0000000..f4adeab Binary files /dev/null and b/doc/img/qt_creator_configure_project.png differ diff --git a/doc/img/qt_creator_plugins.png b/doc/img/qt_creator_plugins.png new file mode 100644 index 0000000..cc1ba98 Binary files /dev/null and b/doc/img/qt_creator_plugins.png differ diff --git a/doc/img/realsense_setup.jpg b/doc/img/realsense_setup.jpg new file mode 100644 index 0000000..f34b6d2 Binary files /dev/null and b/doc/img/realsense_setup.jpg differ diff --git a/doc/img/t265_cam_calib.png b/doc/img/t265_cam_calib.png new file mode 100644 index 0000000..f8d519e Binary files /dev/null and b/doc/img/t265_cam_calib.png differ diff --git a/doc/img/t265_imu_calib.png b/doc/img/t265_imu_calib.png new file mode 100644 index 0000000..f622008 Binary files /dev/null and b/doc/img/t265_imu_calib.png differ diff --git a/doc/img/t265_inv_resp_irradiance.png b/doc/img/t265_inv_resp_irradiance.png new file mode 100644 index 0000000..1f5d5f2 Binary files /dev/null and b/doc/img/t265_inv_resp_irradiance.png differ diff --git a/doc/img/t265_record.png b/doc/img/t265_record.png new file mode 100644 index 0000000..290d032 Binary files /dev/null and b/doc/img/t265_record.png differ diff --git a/doc/img/t265_time_align_error.png b/doc/img/t265_time_align_error.png new file mode 100644 index 0000000..9cbcdca Binary files /dev/null and b/doc/img/t265_time_align_error.png differ diff --git a/doc/img/t265_time_align_gyro.png b/doc/img/t265_time_align_gyro.png new file mode 100644 index 0000000..5eb3d7b Binary files /dev/null and b/doc/img/t265_time_align_gyro.png differ diff --git a/doc/img/t265_vio.png b/doc/img/t265_vio.png new file mode 100644 index 0000000..6249a28 Binary files /dev/null and b/doc/img/t265_vio.png differ diff --git a/doc/img/teaser.png b/doc/img/teaser.png new file mode 100644 index 0000000..a9a1bff Binary files /dev/null and b/doc/img/teaser.png differ diff --git a/doc/img/tumvi_cam_calib.png b/doc/img/tumvi_cam_calib.png new file mode 100644 index 0000000..631c4b5 Binary files /dev/null and b/doc/img/tumvi_cam_calib.png differ diff --git a/doc/img/tumvi_imu_calib.png b/doc/img/tumvi_imu_calib.png new file mode 100644 index 0000000..4fbbe94 Binary files /dev/null and b/doc/img/tumvi_imu_calib.png differ diff --git a/doc/img/uzh_cam_calib.png b/doc/img/uzh_cam_calib.png new file mode 100644 index 0000000..3f91eed Binary files /dev/null and b/doc/img/uzh_cam_calib.png differ diff --git a/doc/img/uzh_imu_calib.png b/doc/img/uzh_imu_calib.png new file mode 100644 index 0000000..760ed9b Binary files /dev/null and b/doc/img/uzh_imu_calib.png differ diff --git a/docker/b_image_bionic/Dockerfile b/docker/b_image_bionic/Dockerfile new file mode 100644 index 0000000..4e6b631 --- /dev/null +++ b/docker/b_image_bionic/Dockerfile @@ -0,0 +1,19 @@ +FROM ubuntu:18.04 + +RUN apt-get update && apt-get install -y wget gnupg lsb-release software-properties-common && apt-get clean all + +RUN wget -O - https://apt.llvm.org/llvm-snapshot.gpg.key| apt-key add - +RUN echo "deb http://apt.llvm.org/bionic/ llvm-toolchain-bionic-11 main" > /etc/apt/sources.list.d/llvm11.list +RUN echo "deb http://apt.llvm.org/bionic/ llvm-toolchain-bionic-12 main" > /etc/apt/sources.list.d/llvm12.list + +RUN add-apt-repository ppa:ubuntu-toolchain-r/test + +RUN apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv-key F6E65AC044F831AC80A06380C8B3A55A6F3EFCDE +RUN add-apt-repository "deb https://librealsense.intel.com/Debian/apt-repo $(lsb_release -cs) main" -u + +RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y cmake git libtbb-dev libeigen3-dev libglew-dev ccache libjpeg-dev libpng-dev openssh-client liblz4-dev libbz2-dev libboost-regex-dev libboost-filesystem-dev libboost-date-time-dev libboost-program-options-dev libopencv-dev libpython2.7-dev libgtest-dev lsb-core gcovr ggcov lcov librealsense2-dev librealsense2-gl-dev librealsense2-dkms librealsense2-utils doxygen graphviz libsuitesparse-dev clang-11 clang-format-11 clang-tidy-11 clang-12 clang-format-12 clang-tidy-12 g++-9 libfmt-dev && apt-get clean all + +RUN update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-9 60 \ + --slave /usr/bin/g++ g++ /usr/bin/g++-9 +RUN gcc --version +RUN g++ --version \ No newline at end of file diff --git a/docker/b_image_focal/Dockerfile b/docker/b_image_focal/Dockerfile new file mode 100644 index 0000000..157f518 --- /dev/null +++ b/docker/b_image_focal/Dockerfile @@ -0,0 +1,12 @@ +FROM ubuntu:20.04 + +RUN apt-get update && apt-get install -y wget gnupg lsb-release software-properties-common && apt-get clean all + +RUN wget -O - https://apt.llvm.org/llvm-snapshot.gpg.key| apt-key add - +RUN echo "deb http://apt.llvm.org/focal/ llvm-toolchain-focal-11 main" > /etc/apt/sources.list.d/llvm11.list +RUN echo "deb http://apt.llvm.org/focal/ llvm-toolchain-focal-12 main" > /etc/apt/sources.list.d/llvm12.list + +RUN apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv-key F6E65AC044F831AC80A06380C8B3A55A6F3EFCDE +RUN add-apt-repository "deb https://librealsense.intel.com/Debian/apt-repo $(lsb_release -cs) main" -u + +RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y cmake git libtbb-dev libeigen3-dev libglew-dev ccache libjpeg-dev libpng-dev openssh-client liblz4-dev libbz2-dev libboost-regex-dev libboost-filesystem-dev libboost-date-time-dev libboost-program-options-dev libopencv-dev libpython2.7-dev libgtest-dev lsb-core gcovr ggcov lcov librealsense2-dev librealsense2-gl-dev librealsense2-dkms librealsense2-utils doxygen graphviz libsuitesparse-dev clang-11 clang-format-11 clang-tidy-11 clang-12 clang-format-12 clang-tidy-12 libfmt-dev && apt-get clean all diff --git a/include/basalt/calibration/aprilgrid.h b/include/basalt/calibration/aprilgrid.h new file mode 100644 index 0000000..019a136 --- /dev/null +++ b/include/basalt/calibration/aprilgrid.h @@ -0,0 +1,57 @@ +/** +BSD 3-Clause License + +This file is part of the Basalt project. +https://gitlab.com/VladyslavUsenko/basalt.git + +Copyright (c) 2019, Vladyslav Usenko and Nikolaus Demmel. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ +#pragma once + +#include + +namespace basalt { + +struct AprilGrid { + AprilGrid(const std::string &config_path); + + Eigen::aligned_vector aprilgrid_corner_pos_3d; + Eigen::aligned_vector aprilgrid_vignette_pos_3d; + + inline int getTagCols() const { return tagCols; } + inline int getTagRows() const { return tagRows; } + + private: + int tagCols; // number of apriltags + int tagRows; // number of apriltags + double tagSize; // size of apriltag, edge to edge [m] + double tagSpacing; // ratio of space between tags to tagSize +}; + +} // namespace basalt diff --git a/include/basalt/calibration/calibration_helper.h b/include/basalt/calibration/calibration_helper.h new file mode 100644 index 0000000..95aac3a --- /dev/null +++ b/include/basalt/calibration/calibration_helper.h @@ -0,0 +1,131 @@ +/** +BSD 3-Clause License + +This file is part of the Basalt project. +https://gitlab.com/VladyslavUsenko/basalt.git + +Copyright (c) 2019, Vladyslav Usenko and Nikolaus Demmel. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ +#pragma once + +#include +#include +#include +#include + +#include + +namespace basalt { + +struct CalibCornerData { + Eigen::aligned_vector corners; + std::vector corner_ids; + std::vector radii; //!< threshold used for maximum displacement + //! during sub-pix refinement; Search region is + //! slightly larger. + EIGEN_MAKE_ALIGNED_OPERATOR_NEW +}; + +struct ProjectedCornerData { + Eigen::aligned_vector corners_proj; + std::vector corners_proj_success; + + EIGEN_MAKE_ALIGNED_OPERATOR_NEW +}; + +struct CalibInitPoseData { + Sophus::SE3d T_a_c; + size_t num_inliers; + + Eigen::aligned_vector reprojected_corners; + + EIGEN_MAKE_ALIGNED_OPERATOR_NEW +}; + +using CalibCornerMap = tbb::concurrent_unordered_map>; + +using CalibInitPoseMap = + tbb::concurrent_unordered_map>; + +class CalibHelper { + public: + static void detectCorners(const VioDatasetPtr& vio_data, + CalibCornerMap& calib_corners, + CalibCornerMap& calib_corners_rejected); + + static void initCamPoses( + const Calibration::Ptr& calib, + const Eigen::aligned_vector& aprilgrid_corner_pos_3d, + CalibCornerMap& calib_corners, CalibInitPoseMap& calib_init_poses); + + static bool initializeIntrinsics( + const Eigen::aligned_vector& corners, + const std::vector& corner_ids, const AprilGrid& aprilgrid, int cols, + int rows, Eigen::Vector4d& init_intr); + + static bool initializeIntrinsicsPinhole( + const std::vector pinhole_corners, + const AprilGrid& aprilgrid, int cols, int rows, + Eigen::Vector4d& init_intr); + + private: + inline static double square(double x) { return x * x; } + + inline static double hypot(double a, double b) { + return sqrt(square(a) + square(b)); + } + + static void computeInitialPose( + const Calibration::Ptr& calib, size_t cam_id, + const Eigen::aligned_vector& aprilgrid_corner_pos_3d, + const basalt::CalibCornerData& cd, basalt::CalibInitPoseData& cp); + + static size_t computeReprojectionError( + const UnifiedCamera& cam_calib, + const Eigen::aligned_vector& corners, + const std::vector& corner_ids, + const Eigen::aligned_vector& aprilgrid_corner_pos_3d, + const Sophus::SE3d& T_target_camera, double& error); +}; + +} // namespace basalt + +namespace cereal { +template +void serialize(Archive& ar, basalt::CalibCornerData& c) { + ar(c.corners, c.corner_ids, c.radii); +} + +template +void serialize(Archive& ar, basalt::CalibInitPoseData& c) { + ar(c.T_a_c, c.num_inliers, c.reprojected_corners); +} +} // namespace cereal diff --git a/include/basalt/calibration/cam_calib.h b/include/basalt/calibration/cam_calib.h new file mode 100644 index 0000000..34e4524 --- /dev/null +++ b/include/basalt/calibration/cam_calib.h @@ -0,0 +1,176 @@ +/** +BSD 3-Clause License + +This file is part of the Basalt project. +https://gitlab.com/VladyslavUsenko/basalt.git + +Copyright (c) 2019, Vladyslav Usenko and Nikolaus Demmel. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ +#pragma once + +#include +#include +#include +#include +#include +#include + +#include + +#include +#include +#include + +#include +#include +#include +#include +#include + +namespace basalt { + +class PosesOptimization; + +class CamCalib { + public: + CamCalib(const std::string &dataset_path, const std::string &dataset_type, + const std::string &aprilgrid_path, const std::string &cache_path, + const std::string &cache_dataset_name, int skip_images, + const std::vector &cam_types, bool show_gui = true); + + ~CamCalib(); + + void initGui(); + + void computeVign(); + + void setNumCameras(size_t n); + + void renderingLoop(); + + void computeProjections(); + + void detectCorners(); + + void initCamIntrinsics(); + + void initCamPoses(); + + void initCamExtrinsics(); + + void initOptimization(); + + void loadDataset(); + + void optimize(); + + bool optimizeWithParam(bool print_info, + std::map *stats = nullptr); + + void saveCalib(); + + void drawImageOverlay(pangolin::View &v, size_t cam_id); + + bool hasCorners() const; + + void setOptIntrinsics(bool opt) { opt_intr = opt; } + + private: + static constexpr int UI_WIDTH = 300; + + static constexpr size_t RANSAC_THRESHOLD = 10; + + // typedef Calibration::Ptr CalibrationPtr; + + VioDatasetPtr vio_dataset; + // CalibrationPtr calib; + + CalibCornerMap calib_corners; + CalibCornerMap calib_corners_rejected; + CalibInitPoseMap calib_init_poses; + + std::shared_ptr processing_thread; + + std::shared_ptr calib_opt; + + std::map reprojected_corners; + std::map reprojected_vignette; + std::map> reprojected_vignette_error; + + std::string dataset_path; + std::string dataset_type; + + AprilGrid april_grid; + + std::string cache_path; + std::string cache_dataset_name; + + int skip_images; + + std::vector cam_types; + + bool show_gui; + + const size_t MIN_CORNERS = 15; + + ////////////////////// + + pangolin::Var show_frame; + + pangolin::Var show_corners; + pangolin::Var show_corners_rejected; + pangolin::Var show_init_reproj; + pangolin::Var show_opt; + pangolin::Var show_vign; + pangolin::Var show_ids; + + pangolin::Var huber_thresh; + + pangolin::Var opt_intr; + + pangolin::Var opt_until_convg; + pangolin::Var stop_thresh; + + std::shared_ptr vign_plotter; + std::shared_ptr polar_plotter; + std::shared_ptr azimuth_plotter; + + std::vector cam_colors; + + pangolin::View *img_view_display; + + std::vector> img_view; + + pangolin::DataLog vign_data_log; + + std::vector> polar_data_log, + azimuth_data_log; +}; + +} // namespace basalt diff --git a/include/basalt/calibration/cam_imu_calib.h b/include/basalt/calibration/cam_imu_calib.h new file mode 100644 index 0000000..94e8d1c --- /dev/null +++ b/include/basalt/calibration/cam_imu_calib.h @@ -0,0 +1,182 @@ +/** +BSD 3-Clause License + +This file is part of the Basalt project. +https://gitlab.com/VladyslavUsenko/basalt.git + +Copyright (c) 2019, Vladyslav Usenko and Nikolaus Demmel. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ +#pragma once + +#include +#include +#include +#include +#include +#include + +#include + +#include +#include +#include + +#include +#include +#include +#include + +namespace basalt { + +template +class SplineOptimization; + +class CamImuCalib { + public: + CamImuCalib(const std::string &dataset_path, const std::string &dataset_type, + const std::string &aprilgrid_path, const std::string &cache_path, + const std::string &cache_dataset_name, int skip_images, + const std::vector &imu_noise, bool show_gui = true); + + ~CamImuCalib(); + + void initGui(); + + void setNumCameras(size_t n); + + void renderingLoop(); + + void computeProjections(); + + void detectCorners(); + + void initCamPoses(); + + void initCamImuTransform(); + + void initOptimization(); + + void initMocap(); + + void loadDataset(); + + void optimize(); + + bool optimizeWithParam(bool print_info, + std::map *stats = nullptr); + + void saveCalib(); + + void saveMocapCalib(); + + void drawImageOverlay(pangolin::View &v, size_t cam_id); + + void recomputeDataLog(); + + void drawPlots(); + + bool hasCorners() const; + + void setOptIntrinsics(bool opt) { opt_intr = opt; } + + private: + static constexpr int UI_WIDTH = 300; + + VioDatasetPtr vio_dataset; + + CalibCornerMap calib_corners; + CalibCornerMap calib_corners_rejected; + CalibInitPoseMap calib_init_poses; + + std::shared_ptr processing_thread; + + std::shared_ptr> calib_opt; + + std::map reprojected_corners; + + std::string dataset_path; + std::string dataset_type; + + AprilGrid april_grid; + + std::string cache_path; + std::string cache_dataset_name; + + int skip_images; + + bool show_gui; + + const size_t MIN_CORNERS = 15; + + std::vector imu_noise; + + ////////////////////// + + pangolin::Var show_frame; + + pangolin::Var show_corners; + pangolin::Var show_corners_rejected; + pangolin::Var show_init_reproj; + pangolin::Var show_opt; + pangolin::Var show_ids; + + pangolin::Var show_accel; + pangolin::Var show_gyro; + pangolin::Var show_pos; + pangolin::Var show_rot_error; + + pangolin::Var show_mocap; + pangolin::Var show_mocap_rot_error; + pangolin::Var show_mocap_rot_vel; + + pangolin::Var show_spline; + pangolin::Var show_data; + + pangolin::Var opt_intr; + pangolin::Var opt_poses; + pangolin::Var opt_corners; + pangolin::Var opt_cam_time_offset; + pangolin::Var opt_imu_scale; + + pangolin::Var opt_mocap; + + pangolin::Var huber_thresh; + + pangolin::Var opt_until_convg; + pangolin::Var stop_thresh; + + pangolin::Plotter *plotter; + pangolin::View *img_view_display; + + std::vector> img_view; + + pangolin::DataLog imu_data_log, pose_data_log, mocap_data_log, vign_data_log; +}; + +} // namespace basalt diff --git a/include/basalt/calibration/vignette.h b/include/basalt/calibration/vignette.h new file mode 100644 index 0000000..8f1ed20 --- /dev/null +++ b/include/basalt/calibration/vignette.h @@ -0,0 +1,89 @@ +/** +BSD 3-Clause License + +This file is part of the Basalt project. +https://gitlab.com/VladyslavUsenko/basalt.git + +Copyright (c) 2019, Vladyslav Usenko and Nikolaus Demmel. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ +#pragma once + +#include +#include + +#include + +namespace basalt { + +class VignetteEstimator { + public: + static const int SPLINE_N = 4; + static const int64_t knot_spacing = 1e10; + static const int border_size = 2; + + VignetteEstimator( + const VioDatasetPtr &vio_dataset, + const Eigen::aligned_vector &optical_centers, + const Eigen::aligned_vector &resolutions, + const std::map> + &reprojected_vignette, + const AprilGrid &april_grid); + + void compute_error(std::map> + *reprojected_vignette_error = nullptr); + + void opt_irradience(); + + void opt_vign(); + + void optimize(); + + void compute_data_log(std::vector> &vign_data_log); + + void save_vign_png(const std::string &path); + + inline const std::vector> &get_vign_param() { + return vign_param; + } + + EIGEN_MAKE_ALIGNED_OPERATOR_NEW + + private: + const VioDatasetPtr vio_dataset; + Eigen::aligned_vector optical_centers; + Eigen::aligned_vector resolutions; + std::map> + reprojected_vignette; + const AprilGrid &april_grid; + + size_t vign_size; + std::vector irradiance; + std::vector> vign_param; +}; +} // namespace basalt diff --git a/include/basalt/device/rs_t265.h b/include/basalt/device/rs_t265.h new file mode 100644 index 0000000..462c6d2 --- /dev/null +++ b/include/basalt/device/rs_t265.h @@ -0,0 +1,118 @@ +/** +BSD 3-Clause License + +This file is part of the Basalt project. +https://gitlab.com/VladyslavUsenko/basalt.git + +Copyright (c) 2019, Vladyslav Usenko, Michael Loipführer and Nikolaus Demmel. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#pragma once + +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include + +#include + +#include +#include +#include + +namespace basalt { + +struct RsIMUData { + double timestamp; + Eigen::Vector3d data; + + EIGEN_MAKE_ALIGNED_OPERATOR_NEW +}; + +struct RsPoseData { + int64_t t_ns; + Sophus::SE3d data; + + EIGEN_MAKE_ALIGNED_OPERATOR_NEW +}; + +class RsT265Device { + public: + using Ptr = std::shared_ptr; + + static constexpr int IMU_RATE = 200; + static constexpr int NUM_CAMS = 2; + + RsT265Device(bool manual_exposure, int skip_frames, int webp_quality, + double exposure_value = 10.0); + + void start(); + void stop(); + + bool setExposure(double exposure); // in milliseconds + void setSkipFrames(int skip); + void setWebpQuality(int quality); + + std::shared_ptr> exportCalibration(); + + OpticalFlowInput::Ptr last_img_data; + tbb::concurrent_bounded_queue* image_data_queue = + nullptr; + tbb::concurrent_bounded_queue::Ptr>* imu_data_queue = nullptr; + tbb::concurrent_bounded_queue* pose_data_queue = nullptr; + + private: + bool manual_exposure; + int skip_frames; + int webp_quality; + + int frame_counter = 0; + + Eigen::aligned_deque gyro_data_queue; + std::shared_ptr prev_accel_data; + + std::shared_ptr> calib; + + rs2::context context; + rs2::config config; + rs2::pipeline pipe; + rs2::sensor sensor; + + rs2::pipeline_profile profile; +}; + +} // namespace basalt diff --git a/include/basalt/hash_bow/hash_bow.h b/include/basalt/hash_bow/hash_bow.h new file mode 100644 index 0000000..c034edd --- /dev/null +++ b/include/basalt/hash_bow/hash_bow.h @@ -0,0 +1,167 @@ +#pragma once + +#include +#include +#include +#include +#include + +#include + +#include +#include + +namespace basalt { + +template +class HashBow { + public: + HashBow(size_t num_bits) : num_bits(num_bits < 32 ? num_bits : 32) { + static_assert(N < 512, + "This implementation of HashBow only supports the descriptor " + "length below 512."); + } + + inline FeatureHash compute_hash(const std::bitset& descriptor) const { + FeatureHash res; + for (size_t i = 0; i < num_bits; ++i) { + res[i] = descriptor[word_bit_permutation[i]]; + } + return res; + } + + inline void compute_bow(const std::vector>& descriptors, + std::vector& hashes, + HashBowVector& bow_vector) const { + size_t descriptors_size = descriptors.size(); + hashes.resize(descriptors_size); + + std::unordered_map bow_map; + bow_map.clear(); + bow_map.reserve(descriptors_size); + + for (size_t i = 0; i < descriptors_size; i++) { + hashes[i] = compute_hash(descriptors[i]); + bow_map[hashes[i]] += 1.0; + } + + bow_vector.clear(); + + double l1_sum = 0; + for (const auto& kv : bow_map) { + bow_vector.emplace_back(kv); + l1_sum += std::abs(kv.second); + } + + for (auto& kv : bow_vector) { + kv.second /= l1_sum; + } + } + + inline void add_to_database(const TimeCamId& tcid, + const HashBowVector& bow_vector) { + for (const auto& kv : bow_vector) { + // std::pair p = std::make_pair(tcid, kv.second); + inverted_index[kv.first].emplace_back(tcid, kv.second); + } + } + + inline void querry_database( + const HashBowVector& bow_vector, size_t num_results, + std::vector>& results, + const int64_t* max_t_ns = nullptr) const { + results.clear(); + + std::unordered_map scores; + + for (const auto& kv : bow_vector) { + const auto range_it = inverted_index.find(kv.first); + + if (range_it != inverted_index.end()) + for (const auto& v : range_it->second) { + // if there is a maximum query time select only the frames that have + // timestamp below max_t_ns + if (!max_t_ns || v.first.frame_id < (*max_t_ns)) + scores[v.first] += std::abs(kv.second - v.second) - + std::abs(kv.second) - std::abs(v.second); + } + } + + results.reserve(scores.size()); + + for (const auto& kv : scores) + results.emplace_back(kv.first, -kv.second / 2.0); + + if (results.size() > num_results) { + std::partial_sort( + results.begin(), results.begin() + num_results, results.end(), + [](const auto& a, const auto& b) { return a.second > b.second; }); + + results.resize(num_results); + } + } + + protected: + constexpr static const size_t random_bit_permutation[512] = { + 484, 458, 288, 170, 215, 424, 41, 38, 293, 96, 172, 428, 508, 52, 370, + 1, 182, 472, 89, 339, 273, 234, 98, 217, 73, 195, 307, 306, 113, 429, + 161, 443, 364, 439, 301, 247, 325, 24, 490, 366, 75, 7, 464, 232, 49, + 196, 144, 69, 470, 387, 3, 86, 361, 313, 396, 356, 94, 201, 291, 360, + 107, 251, 413, 393, 296, 124, 308, 146, 298, 160, 121, 302, 151, 345, 336, + 26, 63, 238, 79, 267, 262, 437, 433, 350, 53, 134, 194, 452, 114, 54, + 82, 214, 191, 242, 482, 37, 432, 311, 130, 460, 422, 221, 271, 192, 474, + 46, 289, 34, 20, 95, 463, 499, 159, 272, 481, 129, 448, 173, 323, 258, + 416, 229, 334, 510, 461, 263, 362, 346, 39, 500, 381, 401, 492, 299, 33, + 169, 241, 11, 254, 449, 199, 486, 400, 365, 70, 436, 108, 19, 233, 505, + 152, 6, 480, 468, 278, 426, 253, 471, 328, 327, 139, 29, 27, 488, 332, + 290, 412, 164, 259, 352, 222, 186, 32, 319, 410, 211, 405, 187, 213, 507, + 205, 395, 62, 178, 36, 140, 87, 491, 351, 450, 314, 77, 342, 132, 133, + 477, 103, 389, 206, 197, 324, 485, 425, 297, 231, 123, 447, 126, 9, 64, + 181, 40, 14, 5, 261, 431, 333, 223, 4, 138, 220, 76, 44, 300, 331, + 78, 193, 497, 403, 435, 275, 147, 66, 368, 141, 451, 225, 250, 61, 18, + 444, 208, 380, 109, 255, 337, 372, 212, 359, 457, 31, 398, 354, 219, 117, + 248, 392, 203, 88, 479, 509, 149, 120, 145, 51, 15, 367, 190, 163, 417, + 454, 329, 183, 390, 83, 404, 249, 81, 264, 445, 317, 179, 244, 473, 71, + 111, 118, 209, 171, 224, 459, 446, 104, 13, 377, 200, 414, 198, 420, 226, + 153, 384, 25, 441, 305, 338, 316, 483, 184, 402, 48, 131, 502, 252, 469, + 12, 167, 243, 373, 35, 127, 341, 455, 379, 210, 340, 128, 430, 57, 434, + 330, 415, 494, 142, 355, 282, 322, 65, 105, 421, 68, 409, 466, 245, 59, + 269, 112, 386, 257, 256, 93, 174, 16, 60, 143, 343, 115, 506, 276, 10, + 496, 489, 235, 47, 136, 22, 165, 204, 42, 465, 440, 498, 312, 504, 116, + 419, 185, 303, 218, 353, 283, 374, 2, 177, 137, 240, 102, 309, 292, 85, + 453, 388, 397, 438, 281, 279, 442, 110, 55, 101, 100, 150, 375, 406, 157, + 23, 0, 237, 376, 236, 216, 8, 154, 91, 456, 423, 176, 427, 284, 30, + 84, 349, 335, 56, 270, 227, 286, 168, 239, 122, 478, 162, 475, 166, 17, + 348, 285, 175, 155, 266, 382, 304, 268, 180, 295, 125, 371, 467, 277, 294, + 58, 347, 72, 280, 50, 287, 511, 80, 260, 326, 495, 45, 106, 399, 369, + 503, 357, 315, 418, 487, 99, 43, 320, 188, 407, 246, 501, 119, 158, 274, + 408, 230, 358, 90, 148, 363, 207, 344, 265, 462, 189, 310, 385, 67, 28, + 383, 378, 156, 394, 97, 476, 493, 321, 411, 228, 21, 391, 202, 92, 318, + 74, 135}; + + constexpr static std::array + compute_permutation() { + std::array res{}; + size_t j = 0; + for (size_t i = 0; i < 512 && j < FEATURE_HASH_MAX_SIZE; ++i) { + if (random_bit_permutation[i] < N) { + res[j] = random_bit_permutation[i]; + j++; + } + } + + return res; + } + + constexpr static const std::array + word_bit_permutation = compute_permutation(); + + size_t num_bits; + + tbb::concurrent_unordered_map< + FeatureHash, tbb::concurrent_vector>, + std::hash> + inverted_index; +}; + +} // namespace basalt diff --git a/include/basalt/io/dataset_io.h b/include/basalt/io/dataset_io.h new file mode 100644 index 0000000..af4776a --- /dev/null +++ b/include/basalt/io/dataset_io.h @@ -0,0 +1,178 @@ +/** +BSD 3-Clause License + +This file is part of the Basalt project. +https://gitlab.com/VladyslavUsenko/basalt.git + +Copyright (c) 2019, Vladyslav Usenko and Nikolaus Demmel. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#pragma once + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include + +#include +#include + +#include + +namespace basalt { + +struct ImageData { + ImageData() : exposure(0) {} + + ManagedImage::Ptr img; + double exposure; +}; + +struct Observations { + Eigen::aligned_vector pos; + std::vector id; +}; + +struct GyroData { + int64_t timestamp_ns; + Eigen::Vector3d data; + EIGEN_MAKE_ALIGNED_OPERATOR_NEW +}; + +struct AccelData { + int64_t timestamp_ns; + Eigen::Vector3d data; + EIGEN_MAKE_ALIGNED_OPERATOR_NEW +}; + +struct PoseData { + int64_t timestamp_ns; + Sophus::SE3d data; + EIGEN_MAKE_ALIGNED_OPERATOR_NEW +}; + +struct MocapPoseData { + int64_t timestamp_ns; + Sophus::SE3d data; + EIGEN_MAKE_ALIGNED_OPERATOR_NEW +}; + +struct AprilgridCornersData { + int64_t timestamp_ns; + int cam_id; + + Eigen::aligned_vector corner_pos; + std::vector corner_id; +}; + +class VioDataset { + public: + virtual ~VioDataset(){}; + + virtual size_t get_num_cams() const = 0; + + virtual std::vector &get_image_timestamps() = 0; + + virtual const Eigen::aligned_vector &get_accel_data() const = 0; + virtual const Eigen::aligned_vector &get_gyro_data() const = 0; + virtual const std::vector &get_gt_timestamps() const = 0; + virtual const Eigen::aligned_vector &get_gt_pose_data() + const = 0; + virtual int64_t get_mocap_to_imu_offset_ns() const = 0; + virtual std::vector get_image_data(int64_t t_ns) = 0; + virtual std::vector get_image_data_cv_mat(int64_t t_ns) = 0; + + EIGEN_MAKE_ALIGNED_OPERATOR_NEW +}; + +typedef std::shared_ptr VioDatasetPtr; + +class DatasetIoInterface { + public: + virtual void read(const std::string &path) = 0; + virtual void reset() = 0; + virtual VioDatasetPtr get_data() = 0; + + virtual ~DatasetIoInterface(){}; +}; + +typedef std::shared_ptr DatasetIoInterfacePtr; + +class DatasetIoFactory { + public: + static DatasetIoInterfacePtr getDatasetIo(const std::string &dataset_type, + bool load_mocap_as_gt = false); +}; + +} // namespace basalt + +namespace cereal { + +template +void serialize(Archive &archive, basalt::ManagedImage &m) { + archive(m.w); + archive(m.h); + + m.Reinitialise(m.w, m.h); + + archive(binary_data(m.ptr, m.size())); +} + +template +void serialize(Archive &ar, basalt::GyroData &c) { + ar(c.timestamp_ns, c.data); +} + +template +void serialize(Archive &ar, basalt::AccelData &c) { + ar(c.timestamp_ns, c.data); +} + +} // namespace cereal diff --git a/include/basalt/io/dataset_io_euroc.h b/include/basalt/io/dataset_io_euroc.h new file mode 100644 index 0000000..cf60a7c --- /dev/null +++ b/include/basalt/io/dataset_io_euroc.h @@ -0,0 +1,336 @@ +/** +BSD 3-Clause License + +This file is part of the Basalt project. +https://gitlab.com/VladyslavUsenko/basalt.git + +Copyright (c) 2019, Vladyslav Usenko and Nikolaus Demmel. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ +#ifndef DATASET_IO_EUROC_H +#define DATASET_IO_EUROC_H + +#include +#include + +#include + +namespace basalt { + +class EurocVioDataset : public VioDataset { + size_t num_cams; + + std::string path; + + std::vector image_timestamps; + std::unordered_map image_path; + + // vector of images for every timestamp + // assumes vectors size is num_cams for every timestamp with null pointers for + // missing frames + // std::unordered_map> image_data; + + Eigen::aligned_vector accel_data; + Eigen::aligned_vector gyro_data; + + std::vector gt_timestamps; // ordered gt timestamps + Eigen::aligned_vector + gt_pose_data; // TODO: change to eigen aligned + + int64_t mocap_to_imu_offset_ns = 0; + + std::vector> exposure_times; + + public: + ~EurocVioDataset(){}; + + size_t get_num_cams() const { return num_cams; } + + std::vector &get_image_timestamps() { return image_timestamps; } + + const Eigen::aligned_vector &get_accel_data() const { + return accel_data; + } + const Eigen::aligned_vector &get_gyro_data() const { + return gyro_data; + } + const std::vector &get_gt_timestamps() const { + return gt_timestamps; + } + const Eigen::aligned_vector &get_gt_pose_data() const { + return gt_pose_data; + } + + int64_t get_mocap_to_imu_offset_ns() const { return mocap_to_imu_offset_ns; } + + std::vector get_image_data(int64_t t_ns) { + std::vector res(num_cams); + + const std::vector folder = {"/mav0/cam0/", "/mav0/cam1/"}; + + for (size_t i = 0; i < num_cams; i++) { + std::string full_image_path = + path + folder[i] + "data/" + image_path[t_ns]; + + if (fs::exists(full_image_path)) { + cv::Mat img = cv::imread(full_image_path, cv::IMREAD_UNCHANGED); + + if (img.type() == CV_8UC1) { + res[i].img.reset(new ManagedImage(img.cols, img.rows)); + + const uint8_t *data_in = img.ptr(); + uint16_t *data_out = res[i].img->ptr; + + size_t full_size = img.cols * img.rows; + for (size_t i = 0; i < full_size; i++) { + int val = data_in[i]; + val = val << 8; + data_out[i] = val; + } + } else if (img.type() == CV_8UC3) { + res[i].img.reset(new ManagedImage(img.cols, img.rows)); + + const uint8_t *data_in = img.ptr(); + uint16_t *data_out = res[i].img->ptr; + + size_t full_size = img.cols * img.rows; + for (size_t i = 0; i < full_size; i++) { + int val = data_in[i * 3]; + val = val << 8; + data_out[i] = val; + } + } else if (img.type() == CV_16UC1) { + res[i].img.reset(new ManagedImage(img.cols, img.rows)); + std::memcpy(res[i].img->ptr, img.ptr(), + img.cols * img.rows * sizeof(uint16_t)); + + } else { + std::cerr << "img.fmt.bpp " << img.type() << std::endl; + std::abort(); + } + + auto exp_it = exposure_times[i].find(t_ns); + if (exp_it != exposure_times[i].end()) { + res[i].exposure = exp_it->second; + } + } + } + + return res; + } + + // returns a vector of 2 images: for the first and the second camera at the defined timestamp. + std::vector get_image_data_cv_mat(int64_t t_ns) { + std::vector res(num_cams); + + const std::vector folder = {"/mav0/cam0/", "/mav0/cam1/"}; + + for (size_t i = 0; i < num_cams; i++) { + std::string full_image_path = + path + folder[i] + "data/" + image_path[t_ns]; + + if (fs::exists(full_image_path)) { + cv::Mat img = cv::imread(full_image_path, cv::IMREAD_COLOR); + + res.push_back(img); + // The rest of the body from get_image_data() was deleted. + } + } + return res; + } + + EIGEN_MAKE_ALIGNED_OPERATOR_NEW + + friend class EurocIO; +}; + +class EurocIO : public DatasetIoInterface { + public: + EurocIO(bool load_mocap_as_gt) : load_mocap_as_gt(load_mocap_as_gt) {} + + void read(const std::string &path) { + if (!fs::exists(path)) + std::cerr << "No dataset found in " << path << std::endl; + + data.reset(new EurocVioDataset); + + data->num_cams = 2; + data->path = path; + + read_image_timestamps(path + "/mav0/cam0/"); + + read_imu_data(path + "/mav0/imu0/"); + + if (!load_mocap_as_gt && + fs::exists(path + "/mav0/state_groundtruth_estimate0/data.csv")) { + read_gt_data_state(path + "/mav0/state_groundtruth_estimate0/"); + } else if (!load_mocap_as_gt && fs::exists(path + "/mav0/gt/data.csv")) { + read_gt_data_pose(path + "/mav0/gt/"); + } else if (fs::exists(path + "/mav0/mocap0/data.csv")) { + read_gt_data_pose(path + "/mav0/mocap0/"); + } + + data->exposure_times.resize(data->num_cams); + if (fs::exists(path + "/mav0/cam0/exposure.csv")) { + std::cout << "Loading exposure times for cam0" << std::endl; + read_exposure(path + "/mav0/cam0/", data->exposure_times[0]); + } + if (fs::exists(path + "/mav0/cam1/exposure.csv")) { + std::cout << "Loading exposure times for cam1" << std::endl; + read_exposure(path + "/mav0/cam1/", data->exposure_times[1]); + } + } + + void reset() { data.reset(); } + + VioDatasetPtr get_data() { return data; } + + private: + void read_exposure(const std::string &path, + std::unordered_map &exposure_data) { + exposure_data.clear(); + + std::ifstream f(path + "exposure.csv"); + std::string line; + while (std::getline(f, line)) { + if (line[0] == '#') continue; + + std::stringstream ss(line); + + char tmp; + int64_t timestamp, exposure_int; + Eigen::Vector3d gyro, accel; + + ss >> timestamp >> tmp >> exposure_int; + + exposure_data[timestamp] = exposure_int * 1e-9; + } + } + + void read_image_timestamps(const std::string &path) { + std::ifstream f(path + "data.csv"); + std::string line; + while (std::getline(f, line)) { + if (line[0] == '#') continue; + std::stringstream ss(line); + char tmp; + int64_t t_ns; + std::string path; + ss >> t_ns >> tmp >> path; + + data->image_timestamps.emplace_back(t_ns); + data->image_path[t_ns] = path; + } + } + + void read_imu_data(const std::string &path) { + data->accel_data.clear(); + data->gyro_data.clear(); + + std::ifstream f(path + "data.csv"); + std::string line; + while (std::getline(f, line)) { + if (line[0] == '#') continue; + + std::stringstream ss(line); + + char tmp; + uint64_t timestamp; + Eigen::Vector3d gyro, accel; + + ss >> timestamp >> tmp >> gyro[0] >> tmp >> gyro[1] >> tmp >> gyro[2] >> + tmp >> accel[0] >> tmp >> accel[1] >> tmp >> accel[2]; + + data->accel_data.emplace_back(); + data->accel_data.back().timestamp_ns = timestamp; + data->accel_data.back().data = accel; + + data->gyro_data.emplace_back(); + data->gyro_data.back().timestamp_ns = timestamp; + data->gyro_data.back().data = gyro; + } + } + + void read_gt_data_state(const std::string &path) { + data->gt_timestamps.clear(); + data->gt_pose_data.clear(); + + std::ifstream f(path + "data.csv"); + std::string line; + while (std::getline(f, line)) { + if (line[0] == '#') continue; + + std::stringstream ss(line); + + char tmp; + uint64_t timestamp; + Eigen::Quaterniond q; + Eigen::Vector3d pos, vel, accel_bias, gyro_bias; + + ss >> timestamp >> tmp >> pos[0] >> tmp >> pos[1] >> tmp >> pos[2] >> + tmp >> q.w() >> tmp >> q.x() >> tmp >> q.y() >> tmp >> q.z() >> tmp >> + vel[0] >> tmp >> vel[1] >> tmp >> vel[2] >> tmp >> accel_bias[0] >> + tmp >> accel_bias[1] >> tmp >> accel_bias[2] >> tmp >> gyro_bias[0] >> + tmp >> gyro_bias[1] >> tmp >> gyro_bias[2]; + + data->gt_timestamps.emplace_back(timestamp); + data->gt_pose_data.emplace_back(q, pos); + } + } + + void read_gt_data_pose(const std::string &path) { + data->gt_timestamps.clear(); + data->gt_pose_data.clear(); + + std::ifstream f(path + "data.csv"); + std::string line; + while (std::getline(f, line)) { + if (line[0] == '#') continue; + + std::stringstream ss(line); + + char tmp; + uint64_t timestamp; + Eigen::Quaterniond q; + Eigen::Vector3d pos; + + ss >> timestamp >> tmp >> pos[0] >> tmp >> pos[1] >> tmp >> pos[2] >> + tmp >> q.w() >> tmp >> q.x() >> tmp >> q.y() >> tmp >> q.z(); + + data->gt_timestamps.emplace_back(timestamp); + data->gt_pose_data.emplace_back(q, pos); + } + } + + std::shared_ptr data; + bool load_mocap_as_gt; +}; // namespace basalt + +} // namespace basalt + +#endif // DATASET_IO_H diff --git a/include/basalt/io/dataset_io_kitti.h b/include/basalt/io/dataset_io_kitti.h new file mode 100644 index 0000000..7fc4357 --- /dev/null +++ b/include/basalt/io/dataset_io_kitti.h @@ -0,0 +1,220 @@ +/** +BSD 3-Clause License + +This file is part of the Basalt project. +https://gitlab.com/VladyslavUsenko/basalt.git + +Copyright (c) 2019, Vladyslav Usenko and Nikolaus Demmel. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#pragma once + +#include +#include + +#include + +namespace basalt { + +class KittiVioDataset : public VioDataset { + size_t num_cams; + + std::string path; + + std::vector image_timestamps; + std::unordered_map image_path; + + // vector of images for every timestamp + // assumes vectors size is num_cams for every timestamp with null pointers for + // missing frames + // std::unordered_map> image_data; + + Eigen::aligned_vector accel_data; + Eigen::aligned_vector gyro_data; + + std::vector gt_timestamps; // ordered gt timestamps + Eigen::aligned_vector + gt_pose_data; // TODO: change to eigen aligned + + int64_t mocap_to_imu_offset_ns; + + public: + ~KittiVioDataset(){}; + + size_t get_num_cams() const { return num_cams; } + + std::vector &get_image_timestamps() { return image_timestamps; } + + const Eigen::aligned_vector &get_accel_data() const { + return accel_data; + } + const Eigen::aligned_vector &get_gyro_data() const { + return gyro_data; + } + const std::vector &get_gt_timestamps() const { + return gt_timestamps; + } + const Eigen::aligned_vector &get_gt_pose_data() const { + return gt_pose_data; + } + int64_t get_mocap_to_imu_offset_ns() const { return mocap_to_imu_offset_ns; } + + std::vector get_image_data(int64_t t_ns) { + std::vector res(num_cams); + + const std::vector folder = {"/image_0/", "/image_1/"}; + + for (size_t i = 0; i < num_cams; i++) { + std::string full_image_path = path + folder[i] + image_path[t_ns]; + + if (fs::exists(full_image_path)) { + cv::Mat img = cv::imread(full_image_path, cv::IMREAD_UNCHANGED); + + if (img.type() == CV_8UC1) { + res[i].img.reset(new ManagedImage(img.cols, img.rows)); + + const uint8_t *data_in = img.ptr(); + uint16_t *data_out = res[i].img->ptr; + + size_t full_size = img.cols * img.rows; + for (size_t i = 0; i < full_size; i++) { + int val = data_in[i]; + val = val << 8; + data_out[i] = val; + } + } else { + std::cerr << "img.fmt.bpp " << img.type() << std::endl; + std::abort(); + } + } + } + + return res; + } + + std::vector get_image_data_cv_mat(int64_t t_ns) { + std::vector res(num_cams); + + const std::vector folder = {"/image_0/", "/image_1/"}; + + for (size_t i = 0; i < num_cams; i++) { + std::string full_image_path = + path + folder[i] + image_path[t_ns]; + + if (fs::exists(full_image_path)) { + cv::Mat img = cv::imread(full_image_path, cv::IMREAD_GRAYSCALE); + + res.push_back(img); + // The rest of the body from get_image_data() was deleted. + } + } + return res; + } + + EIGEN_MAKE_ALIGNED_OPERATOR_NEW + + friend class KittiIO; +}; + +class KittiIO : public DatasetIoInterface { + public: + KittiIO() {} + + void read(const std::string &path) { + if (!fs::exists(path)) + std::cerr << "No dataset found in " << path << std::endl; + + data.reset(new KittiVioDataset); + + data->num_cams = 2; + data->path = path; + + read_image_timestamps(path + "/times.txt"); + + if (fs::exists(path + "/poses.txt")) { + read_gt_data_pose(path + "/poses.txt"); + } + } + + void reset() { data.reset(); } + + VioDatasetPtr get_data() { return data; } + + private: + void read_image_timestamps(const std::string &path) { + std::ifstream f(path); + std::string line; + while (std::getline(f, line)) { + if (line[0] == '#') continue; + std::stringstream ss(line); + + double t_s; + ss >> t_s; + + int64_t t_ns = t_s * 1e9; + + std::stringstream ss1; + ss1 << std::setfill('0') << std::setw(6) << data->image_timestamps.size() + << ".png"; + + data->image_timestamps.emplace_back(t_ns); + data->image_path[t_ns] = ss1.str(); + } + } + + void read_gt_data_pose(const std::string &path) { + data->gt_timestamps.clear(); + data->gt_pose_data.clear(); + + int i = 0; + + std::ifstream f(path); + std::string line; + while (std::getline(f, line)) { + if (line[0] == '#') continue; + + std::stringstream ss(line); + + Eigen::Matrix3d rot; + Eigen::Vector3d pos; + + ss >> rot(0, 0) >> rot(0, 1) >> rot(0, 2) >> pos[0] >> rot(1, 0) >> + rot(1, 1) >> rot(1, 2) >> pos[1] >> rot(2, 0) >> rot(2, 1) >> + rot(2, 2) >> pos[2]; + + data->gt_timestamps.emplace_back(data->image_timestamps[i]); + data->gt_pose_data.emplace_back(Eigen::Quaterniond(rot), pos); + i++; + } + } + + std::shared_ptr data; +}; + +} // namespace basalt diff --git a/include/basalt/io/dataset_io_rosbag.h b/include/basalt/io/dataset_io_rosbag.h new file mode 100644 index 0000000..f68eba1 --- /dev/null +++ b/include/basalt/io/dataset_io_rosbag.h @@ -0,0 +1,406 @@ +/** +BSD 3-Clause License + +This file is part of the Basalt project. +https://gitlab.com/VladyslavUsenko/basalt.git + +Copyright (c) 2019, Vladyslav Usenko and Nikolaus Demmel. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ +#ifndef DATASET_IO_ROSBAG_H +#define DATASET_IO_ROSBAG_H + +#include +#include + +#include + +// Hack to access private functions +#define private public +#include +#include +#undef private + +#include +#include +#include +#include +#include + +#include + +namespace basalt { + +class RosbagVioDataset : public VioDataset { + std::shared_ptr bag; + std::mutex m; + + size_t num_cams; + + std::vector image_timestamps; + + // vector of images for every timestamp + // assumes vectors size is num_cams for every timestamp with null pointers for + // missing frames + std::unordered_map>> + image_data_idx; + + Eigen::aligned_vector accel_data; + Eigen::aligned_vector gyro_data; + + std::vector gt_timestamps; // ordered gt timestamps + Eigen::aligned_vector + gt_pose_data; // TODO: change to eigen aligned + + int64_t mocap_to_imu_offset_ns; + + public: + ~RosbagVioDataset() {} + + size_t get_num_cams() const { return num_cams; } + + std::vector &get_image_timestamps() { return image_timestamps; } + + const Eigen::aligned_vector &get_accel_data() const { + return accel_data; + } + const Eigen::aligned_vector &get_gyro_data() const { + return gyro_data; + } + const std::vector &get_gt_timestamps() const { + return gt_timestamps; + } + const Eigen::aligned_vector &get_gt_pose_data() const { + return gt_pose_data; + } + + int64_t get_mocap_to_imu_offset_ns() const { return mocap_to_imu_offset_ns; } + + std::vector get_image_data(int64_t t_ns) { + std::vector res(num_cams); + + auto it = image_data_idx.find(t_ns); + + if (it != image_data_idx.end()) + for (size_t i = 0; i < num_cams; i++) { + ImageData &id = res[i]; + + if (!it->second[i].has_value()) continue; + + m.lock(); + sensor_msgs::ImageConstPtr img_msg = + bag->instantiateBuffer(*it->second[i]); + m.unlock(); + + // std::cerr << "img_msg->width " << img_msg->width << " + // img_msg->height " + // << img_msg->height << std::endl; + + id.img.reset( + new ManagedImage(img_msg->width, img_msg->height)); + + if (!img_msg->header.frame_id.empty() && + std::isdigit(img_msg->header.frame_id[0])) { + id.exposure = std::stol(img_msg->header.frame_id) * 1e-9; + } else { + id.exposure = -1; + } + + if (img_msg->encoding == "mono8") { + const uint8_t *data_in = img_msg->data.data(); + uint16_t *data_out = id.img->ptr; + + for (size_t i = 0; i < img_msg->data.size(); i++) { + int val = data_in[i]; + val = val << 8; + data_out[i] = val; + } + + } else if (img_msg->encoding == "mono16") { + std::memcpy(id.img->ptr, img_msg->data.data(), img_msg->data.size()); + } else { + std::cerr << "Encoding " << img_msg->encoding << " is not supported." + << std::endl; + std::abort(); + } + } + + return res; + } + + std::vector get_image_data_cv_mat(int64_t t_ns) { + cv::Mat mat = cv::Mat(1, 10, CV_32F, 1); + return mat; + } + EIGEN_MAKE_ALIGNED_OPERATOR_NEW + + friend class RosbagIO; +}; + +class RosbagIO : public DatasetIoInterface { + public: + RosbagIO() {} + + void read(const std::string &path) { + if (!fs::exists(path)) + std::cerr << "No dataset found in " << path << std::endl; + + data.reset(new RosbagVioDataset); + + data->bag.reset(new rosbag::Bag); + data->bag->open(path, rosbag::bagmode::Read); + + rosbag::View view(*data->bag); + + // get topics + std::vector connection_infos = + view.getConnections(); + + std::set cam_topics; + std::string imu_topic; + std::string mocap_topic; + std::string point_topic; + + for (const rosbag::ConnectionInfo *info : connection_infos) { + // if (info->topic.substr(0, 4) == std::string("/cam")) { + // cam_topics.insert(info->topic); + // } else if (info->topic.substr(0, 4) == std::string("/imu")) { + // imu_topic = info->topic; + // } else if (info->topic.substr(0, 5) == std::string("/vrpn") || + // info->topic.substr(0, 6) == std::string("/vicon")) { + // mocap_topic = info->topic; + // } + + if (info->datatype == std::string("sensor_msgs/Image")) { + cam_topics.insert(info->topic); + } else if (info->datatype == std::string("sensor_msgs/Imu") && + info->topic.rfind("/fcu", 0) != 0) { + imu_topic = info->topic; + } else if (info->datatype == + std::string("geometry_msgs/TransformStamped") || + info->datatype == std::string("geometry_msgs/PoseStamped")) { + mocap_topic = info->topic; + } else if (info->datatype == std::string("geometry_msgs/PointStamped")) { + point_topic = info->topic; + } + } + + std::cout << "imu_topic: " << imu_topic << std::endl; + std::cout << "mocap_topic: " << mocap_topic << std::endl; + std::cout << "cam_topics: "; + for (const std::string &s : cam_topics) std::cout << s << " "; + std::cout << std::endl; + + std::map topic_to_id; + int idx = 0; + for (const std::string &s : cam_topics) { + topic_to_id[s] = idx; + idx++; + } + + data->num_cams = cam_topics.size(); + + int num_msgs = 0; + + int64_t min_time = std::numeric_limits::max(); + int64_t max_time = std::numeric_limits::min(); + + std::vector mocap_msgs; + std::vector point_msgs; + + std::vector + system_to_imu_offset_vec; // t_imu = t_system + system_to_imu_offset + std::vector system_to_mocap_offset_vec; // t_mocap = t_system + + // system_to_mocap_offset + + std::set image_timestamps; + + for (const rosbag::MessageInstance &m : view) { + const std::string &topic = m.getTopic(); + + if (cam_topics.find(topic) != cam_topics.end()) { + sensor_msgs::ImageConstPtr img_msg = + m.instantiate(); + int64_t timestamp_ns = img_msg->header.stamp.toNSec(); + + auto &img_vec = data->image_data_idx[timestamp_ns]; + if (img_vec.size() == 0) img_vec.resize(data->num_cams); + + img_vec[topic_to_id.at(topic)] = m.index_entry_; + image_timestamps.insert(timestamp_ns); + + min_time = std::min(min_time, timestamp_ns); + max_time = std::max(max_time, timestamp_ns); + } + + if (imu_topic == topic) { + sensor_msgs::ImuConstPtr imu_msg = m.instantiate(); + int64_t time = imu_msg->header.stamp.toNSec(); + + data->accel_data.emplace_back(); + data->accel_data.back().timestamp_ns = time; + data->accel_data.back().data = Eigen::Vector3d( + imu_msg->linear_acceleration.x, imu_msg->linear_acceleration.y, + imu_msg->linear_acceleration.z); + + data->gyro_data.emplace_back(); + data->gyro_data.back().timestamp_ns = time; + data->gyro_data.back().data = Eigen::Vector3d( + imu_msg->angular_velocity.x, imu_msg->angular_velocity.y, + imu_msg->angular_velocity.z); + + min_time = std::min(min_time, time); + max_time = std::max(max_time, time); + + int64_t msg_arrival_time = m.getTime().toNSec(); + system_to_imu_offset_vec.push_back(time - msg_arrival_time); + } + + if (mocap_topic == topic) { + geometry_msgs::TransformStampedConstPtr mocap_msg = + m.instantiate(); + + // Try different message type if instantiate did not work + if (!mocap_msg) { + geometry_msgs::PoseStampedConstPtr mocap_pose_msg = + m.instantiate(); + + geometry_msgs::TransformStampedPtr mocap_new_msg( + new geometry_msgs::TransformStamped); + mocap_new_msg->header = mocap_pose_msg->header; + mocap_new_msg->transform.rotation = mocap_pose_msg->pose.orientation; + mocap_new_msg->transform.translation.x = + mocap_pose_msg->pose.position.x; + mocap_new_msg->transform.translation.y = + mocap_pose_msg->pose.position.y; + mocap_new_msg->transform.translation.z = + mocap_pose_msg->pose.position.z; + + mocap_msg = mocap_new_msg; + } + + int64_t time = mocap_msg->header.stamp.toNSec(); + + mocap_msgs.push_back(mocap_msg); + + int64_t msg_arrival_time = m.getTime().toNSec(); + system_to_mocap_offset_vec.push_back(time - msg_arrival_time); + } + + if (point_topic == topic) { + geometry_msgs::PointStampedConstPtr mocap_msg = + m.instantiate(); + + int64_t time = mocap_msg->header.stamp.toNSec(); + + point_msgs.push_back(mocap_msg); + + int64_t msg_arrival_time = m.getTime().toNSec(); + system_to_mocap_offset_vec.push_back(time - msg_arrival_time); + } + + num_msgs++; + } + + data->image_timestamps.clear(); + data->image_timestamps.insert(data->image_timestamps.begin(), + image_timestamps.begin(), + image_timestamps.end()); + + if (system_to_mocap_offset_vec.size() > 0) { + int64_t system_to_imu_offset = + system_to_imu_offset_vec[system_to_imu_offset_vec.size() / 2]; + + int64_t system_to_mocap_offset = + system_to_mocap_offset_vec[system_to_mocap_offset_vec.size() / 2]; + + data->mocap_to_imu_offset_ns = + system_to_imu_offset - system_to_mocap_offset; + } + + data->gt_pose_data.clear(); + data->gt_timestamps.clear(); + + if (!mocap_msgs.empty()) + for (size_t i = 0; i < mocap_msgs.size() - 1; i++) { + auto mocap_msg = mocap_msgs[i]; + + int64_t time = mocap_msg->header.stamp.toNSec(); + + Eigen::Quaterniond q( + mocap_msg->transform.rotation.w, mocap_msg->transform.rotation.x, + mocap_msg->transform.rotation.y, mocap_msg->transform.rotation.z); + + Eigen::Vector3d t(mocap_msg->transform.translation.x, + mocap_msg->transform.translation.y, + mocap_msg->transform.translation.z); + + int64_t timestamp_ns = time + data->mocap_to_imu_offset_ns; + data->gt_timestamps.emplace_back(timestamp_ns); + data->gt_pose_data.emplace_back(q, t); + } + + if (!point_msgs.empty()) + for (size_t i = 0; i < point_msgs.size() - 1; i++) { + auto point_msg = point_msgs[i]; + + int64_t time = point_msg->header.stamp.toNSec(); + + Eigen::Vector3d t(point_msg->point.x, point_msg->point.y, + point_msg->point.z); + + int64_t timestamp_ns = time; // + data->mocap_to_imu_offset_ns; + data->gt_timestamps.emplace_back(timestamp_ns); + data->gt_pose_data.emplace_back(Sophus::SO3d(), t); + } + + std::cout << "Total number of messages: " << num_msgs << std::endl; + std::cout << "Image size: " << data->image_data_idx.size() << std::endl; + + std::cout << "Min time: " << min_time << " max time: " << max_time + << " mocap to imu offset: " << data->mocap_to_imu_offset_ns + << std::endl; + + std::cout << "Number of mocap poses: " << data->gt_timestamps.size() + << std::endl; + } + + void reset() { data.reset(); } + + VioDatasetPtr get_data() { + // return std::dynamic_pointer_cast(data); + return data; + } + + private: + std::shared_ptr data; +}; + +} // namespace basalt + +#endif // DATASET_IO_ROSBAG_H diff --git a/include/basalt/io/dataset_io_uzh.h b/include/basalt/io/dataset_io_uzh.h new file mode 100644 index 0000000..ed426b4 --- /dev/null +++ b/include/basalt/io/dataset_io_uzh.h @@ -0,0 +1,316 @@ +/** +BSD 3-Clause License + +This file is part of the Basalt project. +https://gitlab.com/VladyslavUsenko/basalt.git + +Copyright (c) 2019, Vladyslav Usenko and Nikolaus Demmel. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#pragma once + +#include +#include + +#include + +namespace basalt { + +class UzhVioDataset : public VioDataset { + size_t num_cams; + + std::string path; + + std::vector image_timestamps; + std::unordered_map left_image_path; + std::unordered_map right_image_path; + + // vector of images for every timestamp + // assumes vectors size is num_cams for every timestamp with null pointers for + // missing frames + // std::unordered_map> image_data; + + Eigen::aligned_vector accel_data; + Eigen::aligned_vector gyro_data; + + std::vector gt_timestamps; // ordered gt timestamps + Eigen::aligned_vector + gt_pose_data; // TODO: change to eigen aligned + + int64_t mocap_to_imu_offset_ns = 0; + + std::vector> exposure_times; + + public: + ~UzhVioDataset(){}; + + size_t get_num_cams() const { return num_cams; } + + std::vector &get_image_timestamps() { return image_timestamps; } + + const Eigen::aligned_vector &get_accel_data() const { + return accel_data; + } + const Eigen::aligned_vector &get_gyro_data() const { + return gyro_data; + } + const std::vector &get_gt_timestamps() const { + return gt_timestamps; + } + const Eigen::aligned_vector &get_gt_pose_data() const { + return gt_pose_data; + } + + int64_t get_mocap_to_imu_offset_ns() const { return mocap_to_imu_offset_ns; } + + std::vector get_image_data(int64_t t_ns) { + std::vector res(num_cams); + + for (size_t i = 0; i < num_cams; i++) { + std::string full_image_path = + path + "/" + + (i == 0 ? left_image_path.at(t_ns) : right_image_path.at(t_ns)); + + if (fs::exists(full_image_path)) { + cv::Mat img = cv::imread(full_image_path, cv::IMREAD_UNCHANGED); + + if (img.type() == CV_8UC1) { + res[i].img.reset(new ManagedImage(img.cols, img.rows)); + + const uint8_t *data_in = img.ptr(); + uint16_t *data_out = res[i].img->ptr; + + size_t full_size = img.cols * img.rows; + for (size_t i = 0; i < full_size; i++) { + int val = data_in[i]; + val = val << 8; + data_out[i] = val; + } + } else if (img.type() == CV_8UC3) { + res[i].img.reset(new ManagedImage(img.cols, img.rows)); + + const uint8_t *data_in = img.ptr(); + uint16_t *data_out = res[i].img->ptr; + + size_t full_size = img.cols * img.rows; + for (size_t i = 0; i < full_size; i++) { + int val = data_in[i * 3]; + val = val << 8; + data_out[i] = val; + } + } else if (img.type() == CV_16UC1) { + res[i].img.reset(new ManagedImage(img.cols, img.rows)); + std::memcpy(res[i].img->ptr, img.ptr(), + img.cols * img.rows * sizeof(uint16_t)); + + } else { + std::cerr << "img.fmt.bpp " << img.type() << std::endl; + std::abort(); + } + + auto exp_it = exposure_times[i].find(t_ns); + if (exp_it != exposure_times[i].end()) { + res[i].exposure = exp_it->second; + } + } + } + + return res; + } + + std::vector get_image_data_cv_mat(int64_t t_ns) { + cv::Mat mat = cv::Mat(1, 10, CV_32F, 1); + return mat; + } + EIGEN_MAKE_ALIGNED_OPERATOR_NEW + + friend class UzhIO; +}; + +class UzhIO : public DatasetIoInterface { + public: + UzhIO() {} + + void read(const std::string &path) { + if (!fs::exists(path)) + std::cerr << "No dataset found in " << path << std::endl; + + data.reset(new UzhVioDataset); + + data->num_cams = 2; + data->path = path; + + read_image_timestamps(path); + + std::cout << "Loaded " << data->get_image_timestamps().size() + << " timestamps, " << data->left_image_path.size() + << " left images and " << data->right_image_path.size() + << std::endl; + + // { + // int64_t t_ns = data->get_image_timestamps()[0]; + // std::cout << t_ns << " " << data->left_image_path.at(t_ns) << " " + // << data->right_image_path.at(t_ns) << std::endl; + // } + + read_imu_data(path + "/imu.txt"); + + std::cout << "Loaded " << data->get_gyro_data().size() << " imu msgs." + << std::endl; + + if (fs::exists(path + "/groundtruth.txt")) { + read_gt_data_pose(path + "/groundtruth.txt"); + } + + data->exposure_times.resize(data->num_cams); + } + + void reset() { data.reset(); } + + VioDatasetPtr get_data() { return data; } + + private: + void read_exposure(const std::string &path, + std::unordered_map &exposure_data) { + exposure_data.clear(); + + std::ifstream f(path + "exposure.csv"); + std::string line; + while (std::getline(f, line)) { + if (line[0] == '#') continue; + + std::stringstream ss(line); + + char tmp; + int64_t timestamp, exposure_int; + Eigen::Vector3d gyro, accel; + + ss >> timestamp >> tmp >> exposure_int; + + exposure_data[timestamp] = exposure_int * 1e-9; + } + } + + void read_image_timestamps(const std::string &path) { + { + std::ifstream f(path + "/left_images.txt"); + std::string line; + while (std::getline(f, line)) { + if (line[0] == '#') continue; + std::stringstream ss(line); + int tmp; + double t_s; + std::string path; + ss >> tmp >> t_s >> path; + + int64_t t_ns = t_s * 1e9; + + data->image_timestamps.emplace_back(t_ns); + data->left_image_path[t_ns] = path; + } + } + + { + std::ifstream f(path + "/right_images.txt"); + std::string line; + while (std::getline(f, line)) { + if (line[0] == '#') continue; + std::stringstream ss(line); + int tmp; + double t_s; + std::string path; + ss >> tmp >> t_s >> path; + + int64_t t_ns = t_s * 1e9; + + data->right_image_path[t_ns] = path; + } + } + } + + void read_imu_data(const std::string &path) { + data->accel_data.clear(); + data->gyro_data.clear(); + + std::ifstream f(path); + std::string line; + while (std::getline(f, line)) { + if (line[0] == '#') continue; + + std::stringstream ss(line); + + int tmp; + double timestamp; + Eigen::Vector3d gyro, accel; + + ss >> tmp >> timestamp >> gyro[0] >> gyro[1] >> gyro[2] >> accel[0] >> + accel[1] >> accel[2]; + + int64_t t_ns = timestamp * 1e9; + + data->accel_data.emplace_back(); + data->accel_data.back().timestamp_ns = t_ns; + data->accel_data.back().data = accel; + + data->gyro_data.emplace_back(); + data->gyro_data.back().timestamp_ns = t_ns; + data->gyro_data.back().data = gyro; + } + } + + void read_gt_data_pose(const std::string &path) { + data->gt_timestamps.clear(); + data->gt_pose_data.clear(); + + std::ifstream f(path); + std::string line; + while (std::getline(f, line)) { + if (line[0] == '#') continue; + + std::stringstream ss(line); + + int tmp; + double timestamp; + Eigen::Quaterniond q; + Eigen::Vector3d pos; + + ss >> tmp >> timestamp >> pos[0] >> pos[1] >> pos[2] >> q.x() >> q.y() >> + q.z() >> q.w(); + + int64_t t_ns = timestamp * 1e9; + + data->gt_timestamps.emplace_back(t_ns); + data->gt_pose_data.emplace_back(q, pos); + } + } + + std::shared_ptr data; +}; + +} // namespace basalt diff --git a/include/basalt/io/marg_data_io.h b/include/basalt/io/marg_data_io.h new file mode 100644 index 0000000..aae0e71 --- /dev/null +++ b/include/basalt/io/marg_data_io.h @@ -0,0 +1,78 @@ +/** +BSD 3-Clause License + +This file is part of the Basalt project. +https://gitlab.com/VladyslavUsenko/basalt.git + +Copyright (c) 2019, Vladyslav Usenko and Nikolaus Demmel. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ +#pragma once + +#include +#include + +#include + +namespace basalt { + +class MargDataSaver { + public: + using Ptr = std::shared_ptr; + + MargDataSaver(const std::string& path); + ~MargDataSaver() { + saving_thread->join(); + saving_img_thread->join(); + } + tbb::concurrent_bounded_queue in_marg_queue; + + private: + std::shared_ptr saving_thread; + std::shared_ptr saving_img_thread; + + tbb::concurrent_bounded_queue save_image_queue; +}; + +class MargDataLoader { + public: + using Ptr = std::shared_ptr; + + MargDataLoader(); + + void start(const std::string& path); + ~MargDataLoader() { + if (processing_thread) processing_thread->join(); + } + + tbb::concurrent_bounded_queue* out_marg_queue; + + private: + std::shared_ptr processing_thread; +}; +} // namespace basalt diff --git a/include/basalt/linearization/block_diagonal.hpp b/include/basalt/linearization/block_diagonal.hpp new file mode 100644 index 0000000..2353e77 --- /dev/null +++ b/include/basalt/linearization/block_diagonal.hpp @@ -0,0 +1,89 @@ +#pragma once + +#include + +#include + +#include + +namespace basalt { + +// TODO: expand IndexedBlocks to small class / struct that also holds info on +// block size and number of blocks, so we don't have to pass it around all the +// time and we can directly implement things link adding diagonal and matrix +// vector products in this sparse block diagonal matrix. + +// map of camera index to block; used to represent sparse block diagonal matrix +template +using IndexedBlocks = + std::unordered_map>; + +// scale dimensions of JTJ as you would do for jacobian scaling of J beforehand, +// with diagonal scaling matrix D: For jacobain we would use JD, so for JTJ we +// use DJTJD. +template +void scale_jacobians( + IndexedBlocks& block_diagonal, size_t num_blocks, size_t block_size, + const Eigen::Matrix& scaling_vector) { + BASALT_ASSERT(num_blocks * block_size == + unsigned_cast(scaling_vector.size())); + for (auto& [cam_idx, block] : block_diagonal) { + auto D = + scaling_vector.segment(block_size * cam_idx, block_size).asDiagonal(); + block = D * block * D; + } +} + +// add diagonal to block-diagonal matrix; missing blocks are assumed to be 0 +template +void add_diagonal(IndexedBlocks& block_diagonal, size_t num_blocks, + size_t block_size, + const Eigen::Matrix& diagonal) { + BASALT_ASSERT(num_blocks * block_size == unsigned_cast(diagonal.size())); + for (size_t idx = 0; idx < num_blocks; ++idx) { + auto [it, is_new] = block_diagonal.try_emplace(idx); + if (is_new) { + it->second = diagonal.segment(block_size * idx, block_size).asDiagonal(); + } else { + it->second += diagonal.segment(block_size * idx, block_size).asDiagonal(); + } + } +} + +// sum up diagonal blocks in hash map for parallel reduction +template +class BlockDiagonalAccumulator { + public: + using VecX = Eigen::Matrix; + using MatX = Eigen::Matrix; + + inline void add(size_t idx, MatX&& block) { + auto [it, is_new] = block_diagonal_.try_emplace(idx); + if (is_new) { + it->second = std::move(block); + } else { + it->second += block; + } + } + + inline void add_diag(size_t num_blocks, size_t block_size, + const VecX& diagonal) { + add_diagonal(block_diagonal_, num_blocks, block_size, diagonal); + } + + inline void join(BlockDiagonalAccumulator& b) { + for (auto& [k, v] : b.block_diagonal_) { + auto [it, is_new] = block_diagonal_.try_emplace(k); + if (is_new) { + it->second = std::move(v); + } else { + it->second += v; + } + } + } + + IndexedBlocks block_diagonal_; +}; + +} // namespace basalt diff --git a/include/basalt/linearization/imu_block.hpp b/include/basalt/linearization/imu_block.hpp new file mode 100644 index 0000000..5707943 --- /dev/null +++ b/include/basalt/linearization/imu_block.hpp @@ -0,0 +1,235 @@ +#pragma once + +#include +#include +#include + +namespace basalt { + +template +class ImuBlock { + public: + using Scalar = Scalar_; + + using Vec3 = Eigen::Matrix; + using VecX = Eigen::Matrix; + + using MatX = Eigen::Matrix; + + ImuBlock(const IntegratedImuMeasurement* meas, + const ImuLinData* imu_lin_data, const AbsOrderMap& aom) + : imu_meas(meas), imu_lin_data(imu_lin_data), aom(aom) { + Jp.resize(POSE_VEL_BIAS_SIZE, 2 * POSE_VEL_BIAS_SIZE); + r.resize(POSE_VEL_BIAS_SIZE); + } + + Scalar linearizeImu( + const Eigen::aligned_map>& + frame_states) { + Jp.setZero(); + r.setZero(); + + const int64_t start_t = imu_meas->get_start_t_ns(); + const int64_t end_t = imu_meas->get_start_t_ns() + imu_meas->get_dt_ns(); + + const size_t start_idx = 0; + const size_t end_idx = POSE_VEL_BIAS_SIZE; + + PoseVelBiasStateWithLin start_state = frame_states.at(start_t); + PoseVelBiasStateWithLin end_state = frame_states.at(end_t); + + typename IntegratedImuMeasurement::MatNN d_res_d_start, d_res_d_end; + typename IntegratedImuMeasurement::MatN3 d_res_d_bg, d_res_d_ba; + + typename PoseVelState::VecN res = imu_meas->residual( + start_state.getStateLin(), imu_lin_data->g, end_state.getStateLin(), + start_state.getStateLin().bias_gyro, + start_state.getStateLin().bias_accel, &d_res_d_start, &d_res_d_end, + &d_res_d_bg, &d_res_d_ba); + + if (start_state.isLinearized() || end_state.isLinearized()) { + res = imu_meas->residual( + start_state.getState(), imu_lin_data->g, end_state.getState(), + start_state.getState().bias_gyro, start_state.getState().bias_accel); + } + + // error + Scalar imu_error = + Scalar(0.5) * (imu_meas->get_sqrt_cov_inv() * res).squaredNorm(); + + // imu residual linearization + Jp.template block<9, 9>(0, start_idx) = + imu_meas->get_sqrt_cov_inv() * d_res_d_start; + Jp.template block<9, 9>(0, end_idx) = + imu_meas->get_sqrt_cov_inv() * d_res_d_end; + + Jp.template block<9, 3>(0, start_idx + 9) = + imu_meas->get_sqrt_cov_inv() * d_res_d_bg; + Jp.template block<9, 3>(0, start_idx + 12) = + imu_meas->get_sqrt_cov_inv() * d_res_d_ba; + + r.template segment<9>(0) = imu_meas->get_sqrt_cov_inv() * res; + + // difference between biases + Scalar dt = imu_meas->get_dt_ns() * Scalar(1e-9); + + Vec3 gyro_bias_weight_dt = + imu_lin_data->gyro_bias_weight_sqrt / std::sqrt(dt); + Vec3 res_bg = + start_state.getState().bias_gyro - end_state.getState().bias_gyro; + + Jp.template block<3, 3>(9, start_idx + 9) = + gyro_bias_weight_dt.asDiagonal(); + Jp.template block<3, 3>(9, end_idx + 9) = + (-gyro_bias_weight_dt).asDiagonal(); + + r.template segment<3>(9) += gyro_bias_weight_dt.asDiagonal() * res_bg; + + Scalar bg_error = + Scalar(0.5) * (gyro_bias_weight_dt.asDiagonal() * res_bg).squaredNorm(); + + Vec3 accel_bias_weight_dt = + imu_lin_data->accel_bias_weight_sqrt / std::sqrt(dt); + Vec3 res_ba = + start_state.getState().bias_accel - end_state.getState().bias_accel; + + Jp.template block<3, 3>(12, start_idx + 12) = + accel_bias_weight_dt.asDiagonal(); + Jp.template block<3, 3>(12, end_idx + 12) = + (-accel_bias_weight_dt).asDiagonal(); + + r.template segment<3>(12) += accel_bias_weight_dt.asDiagonal() * res_ba; + + Scalar ba_error = + Scalar(0.5) * + (accel_bias_weight_dt.asDiagonal() * res_ba).squaredNorm(); + + return imu_error + bg_error + ba_error; + } + + void add_dense_Q2Jp_Q2r(MatX& Q2Jp, VecX& Q2r, size_t row_start_idx) const { + int64_t start_t = imu_meas->get_start_t_ns(); + int64_t end_t = imu_meas->get_start_t_ns() + imu_meas->get_dt_ns(); + + const size_t start_idx = aom.abs_order_map.at(start_t).first; + const size_t end_idx = aom.abs_order_map.at(end_t).first; + + Q2Jp.template block(row_start_idx, + start_idx) += + Jp.template topLeftCorner(); + + Q2Jp.template block(row_start_idx, + end_idx) += + Jp.template topRightCorner(); + + Q2r.template segment(row_start_idx) += r; + } + + void add_dense_H_b(DenseAccumulator& accum) const { + int64_t start_t = imu_meas->get_start_t_ns(); + int64_t end_t = imu_meas->get_start_t_ns() + imu_meas->get_dt_ns(); + + const size_t start_idx = aom.abs_order_map.at(start_t).first; + const size_t end_idx = aom.abs_order_map.at(end_t).first; + + const MatX H = Jp.transpose() * Jp; + const VecX b = Jp.transpose() * r; + + accum.template addH( + start_idx, start_idx, + H.template block(0, 0)); + + accum.template addH( + end_idx, start_idx, + H.template block( + POSE_VEL_BIAS_SIZE, 0)); + + accum.template addH( + start_idx, end_idx, + H.template block( + 0, POSE_VEL_BIAS_SIZE)); + + accum.template addH( + end_idx, end_idx, + H.template block( + POSE_VEL_BIAS_SIZE, POSE_VEL_BIAS_SIZE)); + + accum.template addB( + start_idx, b.template segment(0)); + accum.template addB( + end_idx, b.template segment(POSE_VEL_BIAS_SIZE)); + } + + void scaleJp_cols(const VecX& jacobian_scaling) { + int64_t start_t = imu_meas->get_start_t_ns(); + int64_t end_t = imu_meas->get_start_t_ns() + imu_meas->get_dt_ns(); + + const size_t start_idx = aom.abs_order_map.at(start_t).first; + const size_t end_idx = aom.abs_order_map.at(end_t).first; + + Jp.template topLeftCorner() *= + jacobian_scaling.template segment(start_idx) + .asDiagonal(); + + Jp.template topRightCorner() *= + jacobian_scaling.template segment(end_idx) + .asDiagonal(); + } + + void addJp_diag2(VecX& res) const { + int64_t start_t = imu_meas->get_start_t_ns(); + int64_t end_t = imu_meas->get_start_t_ns() + imu_meas->get_dt_ns(); + + const size_t start_idx = aom.abs_order_map.at(start_t).first; + const size_t end_idx = aom.abs_order_map.at(end_t).first; + + res.template segment(start_idx) += + Jp.template topLeftCorner() + .colwise() + .squaredNorm(); + + res.template segment(end_idx) += + Jp.template topRightCorner() + .colwise() + .squaredNorm(); + } + + void backSubstitute(const VecX& pose_inc, Scalar& l_diff) { + int64_t start_t = imu_meas->get_start_t_ns(); + int64_t end_t = imu_meas->get_start_t_ns() + imu_meas->get_dt_ns(); + + const size_t start_idx = aom.abs_order_map.at(start_t).first; + const size_t end_idx = aom.abs_order_map.at(end_t).first; + + VecX pose_inc_reduced(2 * POSE_VEL_BIAS_SIZE); + pose_inc_reduced.template head() = + pose_inc.template segment(start_idx); + pose_inc_reduced.template tail() = + pose_inc.template segment(end_idx); + + // We want to compute the model cost change. The model function is + // + // L(inc) = F(x) + incT JT r + 0.5 incT JT J inc + // + // and thus the expect decrease in cost for the computed increment is + // + // l_diff = L(0) - L(inc) + // = - incT JT r - 0.5 incT JT J inc. + // = - incT JT (r + 0.5 J inc) + // = - (J inc)T (r + 0.5 (J inc)) + + VecX Jinc = Jp * pose_inc_reduced; + l_diff -= Jinc.transpose() * (Scalar(0.5) * Jinc + r); + } + + protected: + std::array frame_ids; + MatX Jp; + VecX r; + + const IntegratedImuMeasurement* imu_meas; + const ImuLinData* imu_lin_data; + const AbsOrderMap& aom; +}; // namespace basalt + +} // namespace basalt diff --git a/include/basalt/linearization/landmark_block.hpp b/include/basalt/linearization/landmark_block.hpp new file mode 100644 index 0000000..a0710b1 --- /dev/null +++ b/include/basalt/linearization/landmark_block.hpp @@ -0,0 +1,140 @@ +#pragma once + +#include +#include + +#include +#include +#include + +namespace basalt { + +template +struct RelPoseLin { + using Mat4 = Eigen::Matrix; + using Mat6 = Eigen::Matrix; + + Mat4 T_t_h; + Mat6 d_rel_d_h; + Mat6 d_rel_d_t; + + EIGEN_MAKE_ALIGNED_OPERATOR_NEW +}; + +template +class LandmarkBlock { + public: + struct Options { + // use Householder instead of Givens for marginalization + bool use_householder = true; + + // use_valid_projections_only: if true, set invalid projection's + // residual and jacobian to 0; invalid means z <= 0 + bool use_valid_projections_only = true; + + // if > 0, use huber norm with given threshold, else squared norm + Scalar huber_parameter = 0; + + // Standard deviation of reprojection error to weight visual measurements + Scalar obs_std_dev = 1; + + // ceres uses 1.0 / (1.0 + sqrt(SquaredColumnNorm)) + // we use 1.0 / (eps + sqrt(SquaredColumnNorm)) + Scalar jacobi_scaling_eps = 1e-6; + }; + + enum State { + Uninitialized = 0, + Allocated, + NumericalFailure, + Linearized, + Marginalized + }; + + using Vec2 = Eigen::Matrix; + using Vec3 = Eigen::Matrix; + using Vec4 = Eigen::Matrix; + + using VecX = Eigen::Matrix; + + using Mat36 = Eigen::Matrix; + + using MatX = Eigen::Matrix; + using RowMatX = + Eigen::Matrix; + + using RowMat3 = Eigen::Matrix; + + virtual ~LandmarkBlock(){}; + + virtual bool isNumericalFailure() const = 0; + virtual void allocateLandmark( + Keypoint& lm, + const Eigen::aligned_unordered_map, + RelPoseLin>& relative_pose_lin, + const Calibration& calib, const AbsOrderMap& aom, + const Options& options, + const std::map* rel_order = nullptr) = 0; + + // may set state to NumericalFailure --> linearization at this state is + // unusable. Numeric check is only performed for residuals that were + // considered to be used (valid), which depends on + // use_valid_projections_only setting. + virtual Scalar linearizeLandmark() = 0; + virtual void performQR() = 0; + + // Sets damping and maintains upper triangular matrix for landmarks. + virtual void setLandmarkDamping(Scalar lambda) = 0; + + // lambda < 0 means computing exact model cost change + virtual void backSubstitute(const VecX& pose_inc, Scalar& l_diff) = 0; + + virtual void addQ2JpTQ2Jp_mult_x(VecX& res, const VecX& x_pose) const = 0; + + virtual void addQ2JpTQ2r(VecX& res) const = 0; + + virtual void addJp_diag2(VecX& res) const = 0; + + virtual void addQ2JpTQ2Jp_blockdiag( + BlockDiagonalAccumulator& accu) const = 0; + + virtual void scaleJl_cols() = 0; + virtual void scaleJp_cols(const VecX& jacobian_scaling) = 0; + virtual void printStorage(const std::string& filename) const = 0; + virtual State getState() const = 0; + + virtual size_t numReducedCams() const = 0; + + virtual void get_dense_Q2Jp_Q2r(MatX& Q2Jp, VecX& Q2r, + size_t start_idx) const = 0; + + virtual void get_dense_Q2Jp_Q2r_rel( + MatX& Q2Jp, VecX& Q2r, size_t start_idx, + const std::map& rel_order) const = 0; + + virtual void add_dense_H_b(DenseAccumulator& accum) const = 0; + + virtual void add_dense_H_b(MatX& H, VecX& b) const = 0; + + virtual void add_dense_H_b_rel( + MatX& H_rel, VecX& b_rel, + const std::map& rel_order) const = 0; + + virtual const Eigen::PermutationMatrix& get_rel_permutation() + const = 0; + + virtual Eigen::PermutationMatrix compute_rel_permutation( + const std::map& rel_order) const = 0; + + virtual void add_dense_H_b_rel_2(MatX& H_rel, VecX& b_rel) const = 0; + + virtual TimeCamId getHostKf() const = 0; + + virtual size_t numQ2rows() const = 0; + + // factory method + template + static std::unique_ptr> createLandmarkBlock(); +}; + +} // namespace basalt diff --git a/include/basalt/linearization/landmark_block_abs_dynamic.hpp b/include/basalt/linearization/landmark_block_abs_dynamic.hpp new file mode 100644 index 0000000..cc10c99 --- /dev/null +++ b/include/basalt/linearization/landmark_block_abs_dynamic.hpp @@ -0,0 +1,585 @@ +#pragma once + +#include +#include + +#include +#include +#include +#include +#include + +namespace basalt { + +template +class LandmarkBlockAbsDynamic : public LandmarkBlock { + public: + using Options = typename LandmarkBlock::Options; + using State = typename LandmarkBlock::State; + + inline bool isNumericalFailure() const override { + return state == State::NumericalFailure; + } + + using Vec2 = Eigen::Matrix; + using Vec3 = Eigen::Matrix; + using Vec4 = Eigen::Matrix; + + using VecX = Eigen::Matrix; + + using Mat36 = Eigen::Matrix; + + using MatX = Eigen::Matrix; + using RowMatX = + Eigen::Matrix; + + virtual inline void allocateLandmark( + Keypoint& lm, + const Eigen::aligned_unordered_map, + RelPoseLin>& relative_pose_lin, + const Calibration& calib, const AbsOrderMap& aom, + const Options& options, + const std::map* rel_order = nullptr) override { + // some of the logic assumes the members are at their initial values + BASALT_ASSERT(state == State::Uninitialized); + + UNUSED(rel_order); + + lm_ptr = &lm; + options_ = &options; + calib_ = &calib; + + // TODO: consider for VIO that we have a lot of 0 columns if we just use aom + // --> add option to use different AOM with reduced size and/or just + // involved poses --> when accumulating results, check which case we have; + // if both aom are identical, we don't have to do block-wise operations. + aom_ = &aom; + + pose_lin_vec.clear(); + pose_lin_vec.reserve(lm.obs.size()); + pose_tcid_vec.clear(); + pose_tcid_vec.reserve(lm.obs.size()); + + // LMBs without host frame should not be created + BASALT_ASSERT(aom.abs_order_map.count(lm.host_kf_id.frame_id) > 0); + + for (const auto& [tcid_t, pos] : lm.obs) { + size_t i = pose_lin_vec.size(); + + auto it = relative_pose_lin.find(std::make_pair(lm.host_kf_id, tcid_t)); + BASALT_ASSERT(it != relative_pose_lin.end()); + + if (aom.abs_order_map.count(tcid_t.frame_id) > 0) { + pose_lin_vec.push_back(&it->second); + } else { + // Observation droped for marginalization + pose_lin_vec.push_back(nullptr); + } + pose_tcid_vec.push_back(&it->first); + + res_idx_by_abs_pose_[it->first.first.frame_id].insert(i); // host + res_idx_by_abs_pose_[it->first.second.frame_id].insert(i); // target + } + + // number of pose-jacobian columns is determined by oam + padding_idx = aom_->total_size; + + num_rows = pose_lin_vec.size() * 2 + 3; // residuals and lm damping + + size_t pad = padding_idx % 4; + if (pad != 0) { + padding_size = 4 - pad; + } + + lm_idx = padding_idx + padding_size; + res_idx = lm_idx + 3; + num_cols = res_idx + 1; + + // number of columns should now be multiple of 4 for good memory alignment + // TODO: test extending this to 8 --> 32byte alignment for float? + BASALT_ASSERT(num_cols % 4 == 0); + + storage.resize(num_rows, num_cols); + + damping_rotations.clear(); + damping_rotations.reserve(6); + + state = State::Allocated; + } + + // may set state to NumericalFailure --> linearization at this state is + // unusable. Numeric check is only performed for residuals that were + // considered to be used (valid), which depends on + // use_valid_projections_only setting. + virtual inline Scalar linearizeLandmark() override { + BASALT_ASSERT(state == State::Allocated || + state == State::NumericalFailure || + state == State::Linearized || state == State::Marginalized); + + // storage.setZero(num_rows, num_cols); + storage.setZero(); + damping_rotations.clear(); + damping_rotations.reserve(6); + + bool numerically_valid = true; + + Scalar error_sum = 0; + + size_t i = 0; + for (const auto& [tcid_t, obs] : lm_ptr->obs) { + std::visit( + [&, obs = obs](const auto& cam) { + // TODO: The pose_lin_vec[i] == nullptr is intended to deal with + // dropped measurements during marginalization. However, dropped + // measurements should only occur for the remaining frames, not for + // the marginalized frames. Maybe these are observations bewtween + // two marginalized frames, if more than one is marginalized at the + // same time? But those we would not have to drop... Double check if + // and when this happens and possibly resolve by fixing handling + // here, or else updating the measurements in lmdb before calling + // linearization. Otherwise, check where else we need a `if + // (pose_lin_vec[i])` check or `pose_lin_vec[i] != nullptr` assert + // in this class. + + if (pose_lin_vec[i]) { + size_t obs_idx = i * 2; + size_t abs_h_idx = + aom_->abs_order_map.at(pose_tcid_vec[i]->first.frame_id) + .first; + size_t abs_t_idx = + aom_->abs_order_map.at(pose_tcid_vec[i]->second.frame_id) + .first; + + Vec2 res; + Eigen::Matrix d_res_d_xi; + Eigen::Matrix d_res_d_p; + + using CamT = std::decay_t; + bool valid = linearizePoint( + obs, *lm_ptr, pose_lin_vec[i]->T_t_h, cam, res, &d_res_d_xi, + &d_res_d_p); + + if (!options_->use_valid_projections_only || valid) { + numerically_valid = numerically_valid && + d_res_d_xi.array().isFinite().all() && + d_res_d_p.array().isFinite().all(); + + const Scalar res_squared = res.squaredNorm(); + const auto [weighted_error, weight] = + compute_error_weight(res_squared); + const Scalar sqrt_weight = + std::sqrt(weight) / options_->obs_std_dev; + + error_sum += weighted_error / + (options_->obs_std_dev * options_->obs_std_dev); + + storage.template block<2, 3>(obs_idx, lm_idx) = + sqrt_weight * d_res_d_p; + storage.template block<2, 1>(obs_idx, res_idx) = + sqrt_weight * res; + + d_res_d_xi *= sqrt_weight; + storage.template block<2, 6>(obs_idx, abs_h_idx) += + d_res_d_xi * pose_lin_vec[i]->d_rel_d_h; + storage.template block<2, 6>(obs_idx, abs_t_idx) += + d_res_d_xi * pose_lin_vec[i]->d_rel_d_t; + } + } + + i++; + }, + calib_->intrinsics[tcid_t.cam_id].variant); + } + + if (numerically_valid) { + state = State::Linearized; + } else { + state = State::NumericalFailure; + } + + return error_sum; + } + + virtual inline void performQR() override { + BASALT_ASSERT(state == State::Linearized); + + // Since we use dense matrices Householder QR might be better: + // https://mathoverflow.net/questions/227543/why-householder-reflection-is-better-than-givens-rotation-in-dense-linear-algebr + + if (options_->use_householder) { + performQRHouseholder(); + } else { + performQRGivens(); + } + + state = State::Marginalized; + } + + // Sets damping and maintains upper triangular matrix for landmarks. + virtual inline void setLandmarkDamping(Scalar lambda) override { + BASALT_ASSERT(state == State::Marginalized); + BASALT_ASSERT(lambda >= 0); + + if (hasLandmarkDamping()) { + BASALT_ASSERT(damping_rotations.size() == 6); + + // undo dampening + for (int n = 2; n >= 0; n--) { + for (int m = n; m >= 0; m--) { + storage.applyOnTheLeft(num_rows - 3 + n - m, n, + damping_rotations.back().adjoint()); + damping_rotations.pop_back(); + } + } + } + + if (lambda == 0) { + storage.template block<3, 3>(num_rows - 3, lm_idx).diagonal().setZero(); + } else { + BASALT_ASSERT(Jl_col_scale.array().isFinite().all()); + + storage.template block<3, 3>(num_rows - 3, lm_idx) + .diagonal() + .setConstant(sqrt(lambda)); + + BASALT_ASSERT(damping_rotations.empty()); + + // apply dampening and remember rotations to undo + for (int n = 0; n < 3; n++) { + for (int m = 0; m <= n; m++) { + damping_rotations.emplace_back(); + damping_rotations.back().makeGivens( + storage(n, lm_idx + n), + storage(num_rows - 3 + n - m, lm_idx + n)); + storage.applyOnTheLeft(num_rows - 3 + n - m, n, + damping_rotations.back()); + } + } + } + } + + // lambda < 0 means computing exact model cost change + virtual inline void backSubstitute(const VecX& pose_inc, + Scalar& l_diff) override { + BASALT_ASSERT(state == State::Marginalized); + + // For now we include all columns in LMB + BASALT_ASSERT(pose_inc.size() == signed_cast(padding_idx)); + + const auto Q1Jl = storage.template block<3, 3>(0, lm_idx) + .template triangularView(); + + const auto Q1Jr = storage.col(res_idx).template head<3>(); + const auto Q1Jp = storage.topLeftCorner(3, padding_idx); + + Vec3 inc = -Q1Jl.solve(Q1Jr + Q1Jp * pose_inc); + + // We want to compute the model cost change. The model function is + // + // L(inc) = F(x) + inc^T J^T r + 0.5 inc^T J^T J inc + // + // and thus the expected decrease in cost for the computed increment is + // + // l_diff = L(0) - L(inc) + // = - inc^T J^T r - 0.5 inc^T J^T J inc + // = - inc^T J^T (r + 0.5 J inc) + // = - (J inc)^T (r + 0.5 (J inc)). + // + // Here we have J = [Jp, Jl] under the orthogonal projection Q = [Q1, Q2], + // i.e. the linearized system (model cost) is + // + // L(inc) = 0.5 || J inc + r ||^2 = 0.5 || Q^T J inc + Q^T r ||^2 + // + // and below we thus compute + // + // l_diff = - (Q^T J inc)^T (Q^T r + 0.5 (Q^T J inc)). + // + // We have + // | Q1^T | | Q1^T Jp Q1^T Jl | + // Q^T J = | | [Jp, Jl] = | | + // | Q2^T | | Q2^T Jp 0 |. + // + // Note that Q2 is the nullspace of Jl, and Q1^T Jl == R. So with inc = + // [incp^T, incl^T]^T we have + // + // | Q1^T Jp incp + Q1^T Jl incl | + // Q^T J inc = | | + // | Q2^T Jp incp | + // + + // undo damping before we compute the model cost difference + setLandmarkDamping(0); + + // compute "Q^T J incp" + VecX QJinc = storage.topLeftCorner(num_rows - 3, padding_idx) * pose_inc; + + // add "Q1^T Jl incl" to the first 3 rows + QJinc.template head<3>() += Q1Jl * inc; + + auto Qr = storage.col(res_idx).head(num_rows - 3); + l_diff -= QJinc.transpose() * (Scalar(0.5) * QJinc + Qr); + + // TODO: detect and handle case like ceres, allowing a few iterations but + // stopping eventually + if (!inc.array().isFinite().all() || + !lm_ptr->direction.array().isFinite().all() || + !std::isfinite(lm_ptr->inv_dist)) { + std::cerr << "Numerical failure in backsubstitution\n"; + } + + // Note: scale only after computing model cost change + inc.array() *= Jl_col_scale.array(); + + lm_ptr->direction += inc.template head<2>(); + lm_ptr->inv_dist = std::max(Scalar(0), lm_ptr->inv_dist + inc[2]); + } + + virtual inline size_t numReducedCams() const override { + BASALT_LOG_FATAL("check what we mean by numReducedCams for absolute poses"); + return pose_lin_vec.size(); + } + + inline void addQ2JpTQ2Jp_mult_x(VecX& res, + const VecX& x_pose) const override { + UNUSED(res); + UNUSED(x_pose); + BASALT_LOG_FATAL("not implemented"); + } + + virtual inline void addQ2JpTQ2r(VecX& res) const override { + UNUSED(res); + BASALT_LOG_FATAL("not implemented"); + } + + virtual inline void addJp_diag2(VecX& res) const override { + BASALT_ASSERT(state == State::Linearized); + + for (const auto& [frame_id, idx_set] : res_idx_by_abs_pose_) { + const int pose_idx = aom_->abs_order_map.at(frame_id).first; + for (const int i : idx_set) { + const auto block = storage.block(2 * i, pose_idx, 2, POSE_SIZE); + + res.template segment(pose_idx) += + block.colwise().squaredNorm(); + } + } + } + + virtual inline void addQ2JpTQ2Jp_blockdiag( + BlockDiagonalAccumulator& accu) const override { + UNUSED(accu); + BASALT_LOG_FATAL("not implemented"); + } + + virtual inline void scaleJl_cols() override { + BASALT_ASSERT(state == State::Linearized); + + // ceres uses 1.0 / (1.0 + sqrt(SquaredColumnNorm)) + // we use 1.0 / (eps + sqrt(SquaredColumnNorm)) + Jl_col_scale = + (options_->jacobi_scaling_eps + + storage.block(0, lm_idx, num_rows - 3, 3).colwise().norm().array()) + .inverse(); + + storage.block(0, lm_idx, num_rows - 3, 3) *= Jl_col_scale.asDiagonal(); + } + + virtual inline void scaleJp_cols(const VecX& jacobian_scaling) override { + BASALT_ASSERT(state == State::Marginalized); + + // we assume we apply scaling before damping (we exclude the last 3 rows) + BASALT_ASSERT(!hasLandmarkDamping()); + + storage.topLeftCorner(num_rows - 3, padding_idx) *= + jacobian_scaling.asDiagonal(); + } + + inline bool hasLandmarkDamping() const { return !damping_rotations.empty(); } + + virtual inline void printStorage(const std::string& filename) const override { + std::ofstream f(filename); + + Eigen::IOFormat CleanFmt(4, 0, " ", "\n", "", ""); + + f << "Storage (state: " << state + << ", damping: " << (hasLandmarkDamping() ? "yes" : "no") + << " Jl_col_scale: " << Jl_col_scale.transpose() << "):\n" + << storage.format(CleanFmt) << std::endl; + + f.close(); + } +#if 0 + virtual inline void stage2( + Scalar lambda, const VecX* jacobian_scaling, VecX* precond_diagonal2, + BlockDiagonalAccumulator* precond_block_diagonal, + VecX& bref) override { + // 1. scale jacobian + if (jacobian_scaling) { + scaleJp_cols(*jacobian_scaling); + } + + // 2. dampen landmarks + setLandmarkDamping(lambda); + + // 3a. compute diagonal preconditioner (SCHUR_JACOBI_DIAGONAL) + if (precond_diagonal2) { + addQ2Jp_diag2(*precond_diagonal2); + } + + // 3b. compute block diagonal preconditioner (SCHUR_JACOBI) + if (precond_block_diagonal) { + addQ2JpTQ2Jp_blockdiag(*precond_block_diagonal); + } + + // 4. compute rhs of reduced camera normal equations + addQ2JpTQ2r(bref); + } +#endif + + inline State getState() const override { return state; } + + virtual inline size_t numQ2rows() const override { return num_rows - 3; } + + protected: + inline void performQRGivens() { + // Based on "Matrix Computations 4th Edition by Golub and Van Loan" + // See page 252, Algorithm 5.2.4 for how these two loops work + Eigen::JacobiRotation gr; + for (size_t n = 0; n < 3; n++) { + for (size_t m = num_rows - 4; m > n; m--) { + gr.makeGivens(storage(m - 1, lm_idx + n), storage(m, lm_idx + n)); + storage.applyOnTheLeft(m, m - 1, gr); + } + } + } + + inline void performQRHouseholder() { + VecX tempVector1(num_cols); + VecX tempVector2(num_rows - 3); + + for (size_t k = 0; k < 3; ++k) { + size_t remainingRows = num_rows - k - 3; + + Scalar beta; + Scalar tau; + storage.col(lm_idx + k) + .segment(k, remainingRows) + .makeHouseholder(tempVector2, tau, beta); + + storage.block(k, 0, remainingRows, num_cols) + .applyHouseholderOnTheLeft(tempVector2, tau, tempVector1.data()); + } + } + + inline std::tuple compute_error_weight( + Scalar res_squared) const { + // Note: Definition of cost is 0.5 ||r(x)||^2 to be in line with ceres + + if (options_->huber_parameter > 0) { + // use huber norm + const Scalar huber_weight = + res_squared <= options_->huber_parameter * options_->huber_parameter + ? Scalar(1) + : options_->huber_parameter / std::sqrt(res_squared); + const Scalar error = + Scalar(0.5) * (2 - huber_weight) * huber_weight * res_squared; + return {error, huber_weight}; + } else { + // use squared norm + return {Scalar(0.5) * res_squared, Scalar(1)}; + } + } + + void get_dense_Q2Jp_Q2r(MatX& Q2Jp, VecX& Q2r, + size_t start_idx) const override { + Q2r.segment(start_idx, num_rows - 3) = + storage.col(res_idx).tail(num_rows - 3); + + BASALT_ASSERT(Q2Jp.cols() == signed_cast(padding_idx)); + + Q2Jp.block(start_idx, 0, num_rows - 3, padding_idx) = + storage.block(3, 0, num_rows - 3, padding_idx); + } + + void get_dense_Q2Jp_Q2r_rel( + MatX& Q2Jp, VecX& Q2r, size_t start_idx, + const std::map& rel_order) const override { + UNUSED(Q2Jp); + UNUSED(Q2r); + UNUSED(start_idx); + UNUSED(rel_order); + BASALT_LOG_FATAL("not implemented"); + } + + void add_dense_H_b(DenseAccumulator& accum) const override { + UNUSED(accum); + BASALT_LOG_FATAL("not implemented"); + } + + void add_dense_H_b(MatX& H, VecX& b) const override { + const auto r = storage.col(res_idx).tail(num_rows - 3); + const auto J = storage.block(3, 0, num_rows - 3, padding_idx); + + H.noalias() += J.transpose() * J; + b.noalias() += J.transpose() * r; + } + + void add_dense_H_b_rel( + MatX& H_rel, VecX& b_rel, + const std::map& rel_order) const override { + UNUSED(H_rel); + UNUSED(b_rel); + UNUSED(rel_order); + BASALT_LOG_FATAL("not implemented"); + } + + const Eigen::PermutationMatrix& get_rel_permutation() + const override { + BASALT_LOG_FATAL("not implemented"); + } + + Eigen::PermutationMatrix compute_rel_permutation( + const std::map& rel_order) const override { + UNUSED(rel_order); + BASALT_LOG_FATAL("not implemented"); + } + + void add_dense_H_b_rel_2(MatX& H_rel, VecX& b_rel) const override { + UNUSED(H_rel); + UNUSED(b_rel); + BASALT_LOG_FATAL("not implemented"); + } + + virtual TimeCamId getHostKf() const override { return lm_ptr->host_kf_id; } + + private: + // Dense storage for pose Jacobians, padding, landmark Jacobians and + // residuals [J_p | pad | J_l | res] + Eigen::Matrix + storage; + + Vec3 Jl_col_scale = Vec3::Ones(); + std::vector> damping_rotations; + + std::vector*> pose_lin_vec; + std::vector*> pose_tcid_vec; + size_t padding_idx = 0; + size_t padding_size = 0; + size_t lm_idx = 0; + size_t res_idx = 0; + + size_t num_cols = 0; + size_t num_rows = 0; + + const Options* options_ = nullptr; + + State state = State::Uninitialized; + + Keypoint* lm_ptr = nullptr; + const Calibration* calib_ = nullptr; + const AbsOrderMap* aom_ = nullptr; + + std::map> res_idx_by_abs_pose_; +}; + +} // namespace basalt diff --git a/include/basalt/linearization/linearization_abs_qr.hpp b/include/basalt/linearization/linearization_abs_qr.hpp new file mode 100644 index 0000000..c05884d --- /dev/null +++ b/include/basalt/linearization/linearization_abs_qr.hpp @@ -0,0 +1,141 @@ +#pragma once + +#include + +#include +#include +#include +#include +#include + +namespace basalt { + +template +class ImuBlock; + +template +class LinearizationAbsQR : public LinearizationBase { + public: + using Scalar = Scalar_; + static constexpr int POSE_SIZE = POSE_SIZE_; + using Base = LinearizationBase; + + using Vec2 = Eigen::Matrix; + using Vec3 = Eigen::Matrix; + using Vec4 = Eigen::Matrix; + using VecP = Eigen::Matrix; + + using VecX = Eigen::Matrix; + + using Mat36 = Eigen::Matrix; + using Mat4 = Eigen::Matrix; + using Mat6 = Eigen::Matrix; + + using MatX = Eigen::Matrix; + + using LandmarkBlockPtr = std::unique_ptr>; + using ImuBlockPtr = std::unique_ptr>; + + using typename Base::Options; + + LinearizationAbsQR( + BundleAdjustmentBase* estimator, const AbsOrderMap& aom, + const Options& options, + const MargLinData* marg_lin_data = nullptr, + const ImuLinData* imu_lin_data = nullptr, + const std::set* used_frames = nullptr, + const std::unordered_set* lost_landmarks = nullptr, + int64_t last_state_to_marg = std::numeric_limits::max()); + + // destructor defined in cpp b/c of unique_ptr destructor (ImuBlock) + // copy/move constructor/assignment-operator for rule-of-five + ~LinearizationAbsQR(); + LinearizationAbsQR(const LinearizationAbsQR&) = default; + LinearizationAbsQR(LinearizationAbsQR&&) = default; + LinearizationAbsQR& operator=(const LinearizationAbsQR&) = default; + LinearizationAbsQR& operator=(LinearizationAbsQR&&) = default; + + void log_problem_stats(ExecutionStats& stats) const override; + + Scalar linearizeProblem(bool* numerically_valid = nullptr) override; + + void performQR() override; + + void setPoseDamping(const Scalar lambda); + + bool hasPoseDamping() const { return pose_damping_diagonal > 0; } + + Scalar backSubstitute(const VecX& pose_inc) override; + + VecX getJp_diag2() const; + + void scaleJl_cols(); + + void scaleJp_cols(const VecX& jacobian_scaling); + + void setLandmarkDamping(Scalar lambda); + + void get_dense_Q2Jp_Q2r(MatX& Q2Jp, VecX& Q2r) const override; + + void get_dense_H_b(MatX& H, VecX& b) const override; + + protected: // types + using PoseLinMapType = + Eigen::aligned_unordered_map, + RelPoseLin>; + using PoseLinMapTypeConstIter = typename PoseLinMapType::const_iterator; + + using HostLandmarkMapType = + std::unordered_map*>>; + + protected: // helper + void get_dense_Q2Jp_Q2r_pose_damping(MatX& Q2Jp, size_t start_idx) const; + + void get_dense_Q2Jp_Q2r_marg_prior(MatX& Q2Jp, VecX& Q2r, + size_t start_idx) const; + + void add_dense_H_b_pose_damping(MatX& H) const; + + void add_dense_H_b_marg_prior(MatX& H, VecX& b) const; + + void add_dense_H_b_imu(DenseAccumulator& accum) const; + + void add_dense_H_b_imu(MatX& H, VecX& b) const; + + protected: + Options options_; + + std::vector landmark_ids; + std::vector landmark_blocks; + std::vector imu_blocks; + + std::unordered_map host_to_idx_; + HostLandmarkMapType host_to_landmark_block; + + std::vector landmark_block_idx; + const BundleAdjustmentBase* estimator; + + LandmarkDatabase& lmdb_; + const Eigen::aligned_map>& frame_poses; + + const Calibration& calib; + + const AbsOrderMap& aom; + const std::set* used_frames; + + const MargLinData* marg_lin_data; + const ImuLinData* imu_lin_data; + + PoseLinMapType relative_pose_lin; + std::vector> relative_pose_per_host; + + Scalar pose_damping_diagonal; + Scalar pose_damping_diagonal_sqrt; + + VecX marg_scaling; + + size_t num_cameras; + size_t num_rows_Q2r; +}; + +} // namespace basalt diff --git a/include/basalt/linearization/linearization_abs_sc.hpp b/include/basalt/linearization/linearization_abs_sc.hpp new file mode 100644 index 0000000..c1e5e21 --- /dev/null +++ b/include/basalt/linearization/linearization_abs_sc.hpp @@ -0,0 +1,142 @@ +#pragma once + +#include + +#include +#include +#include +#include +#include +#include + +namespace basalt { + +template +class ImuBlock; + +template +class LinearizationAbsSC : public LinearizationBase { + public: + using Scalar = Scalar_; + static constexpr int POSE_SIZE = POSE_SIZE_; + using Base = LinearizationBase; + + using Vec2 = Eigen::Matrix; + using Vec3 = Eigen::Matrix; + using Vec4 = Eigen::Matrix; + using VecP = Eigen::Matrix; + + using VecX = Eigen::Matrix; + + using Mat36 = Eigen::Matrix; + using Mat4 = Eigen::Matrix; + using Mat6 = Eigen::Matrix; + + using MatX = Eigen::Matrix; + + using LandmarkBlockPtr = std::unique_ptr>; + using ImuBlockPtr = std::unique_ptr>; + + using AbsLinData = typename ScBundleAdjustmentBase::AbsLinData; + + using typename Base::Options; + + LinearizationAbsSC( + BundleAdjustmentBase* estimator, const AbsOrderMap& aom, + const Options& options, + const MargLinData* marg_lin_data = nullptr, + const ImuLinData* imu_lin_data = nullptr, + const std::set* used_frames = nullptr, + const std::unordered_set* lost_landmarks = nullptr, + int64_t last_state_to_marg = std::numeric_limits::max()); + + // destructor defined in cpp b/c of unique_ptr destructor (ImuBlock) + // copy/move constructor/assignment-operator for rule-of-five + ~LinearizationAbsSC(); + LinearizationAbsSC(const LinearizationAbsSC&) = default; + LinearizationAbsSC(LinearizationAbsSC&&) = default; + LinearizationAbsSC& operator=(const LinearizationAbsSC&) = default; + LinearizationAbsSC& operator=(LinearizationAbsSC&&) = default; + + void log_problem_stats(ExecutionStats& stats) const override; + + Scalar linearizeProblem(bool* numerically_valid = nullptr) override; + + void performQR() override; + + void setPoseDamping(const Scalar lambda); + + bool hasPoseDamping() const { return pose_damping_diagonal > 0; } + + Scalar backSubstitute(const VecX& pose_inc) override; + + VecX getJp_diag2() const; + + void scaleJl_cols(); + + void scaleJp_cols(const VecX& jacobian_scaling); + + void setLandmarkDamping(Scalar lambda); + + void get_dense_Q2Jp_Q2r(MatX& Q2Jp, VecX& Q2r) const override; + + void get_dense_H_b(MatX& H, VecX& b) const override; + + protected: // types + using PoseLinMapType = + Eigen::aligned_unordered_map, + RelPoseLin>; + using PoseLinMapTypeConstIter = typename PoseLinMapType::const_iterator; + + using HostLandmarkMapType = + std::unordered_map*>>; + + protected: // helper + void get_dense_Q2Jp_Q2r_pose_damping(MatX& Q2Jp, size_t start_idx) const; + + void get_dense_Q2Jp_Q2r_marg_prior(MatX& Q2Jp, VecX& Q2r, + size_t start_idx) const; + + void add_dense_H_b_pose_damping(MatX& H) const; + + void add_dense_H_b_marg_prior(MatX& H, VecX& b) const; + + void add_dense_H_b_imu(DenseAccumulator& accum) const; + + void add_dense_H_b_imu(MatX& H, VecX& b) const; + + // Transform to abs + static void accumulate_dense_H_b_rel_to_abs( + const MatX& rel_H, const VecX& rel_b, + const std::vector& rpph, const AbsOrderMap& aom, + DenseAccumulator& accum); + + protected: + Options options_; + + std::vector imu_blocks; + + const BundleAdjustmentBase* estimator; + + LandmarkDatabase& lmdb_; + const Calibration& calib; + + const AbsOrderMap& aom; + const std::set* used_frames; + + const MargLinData* marg_lin_data; + const ImuLinData* imu_lin_data; + const std::unordered_set* lost_landmarks; + int64_t last_state_to_marg; + + Eigen::aligned_vector ald_vec; + + Scalar pose_damping_diagonal; + Scalar pose_damping_diagonal_sqrt; + + VecX marg_scaling; + + size_t num_rows_Q2r; +}; + +} // namespace basalt diff --git a/include/basalt/linearization/linearization_base.hpp b/include/basalt/linearization/linearization_base.hpp new file mode 100644 index 0000000..5fdf2aa --- /dev/null +++ b/include/basalt/linearization/linearization_base.hpp @@ -0,0 +1,63 @@ +#pragma once + +#include + +#include +#include +#include + +namespace basalt { + +template +class LinearizationBase { + public: + using Scalar = Scalar_; + static constexpr int POSE_SIZE = POSE_SIZE_; + + using VecX = Eigen::Matrix; + using MatX = Eigen::Matrix; + + struct Options { + typename LandmarkBlock::Options lb_options; + LinearizationType linearization_type; + }; + + virtual ~LinearizationBase() = default; + + virtual void log_problem_stats(ExecutionStats& stats) const = 0; + + virtual Scalar linearizeProblem(bool* numerically_valid = nullptr) = 0; + + virtual void performQR() = 0; + + // virtual void setPoseDamping(const Scalar lambda) = 0; + + // virtual bool hasPoseDamping() const = 0; + + virtual Scalar backSubstitute(const VecX& pose_inc) = 0; + + // virtual VecX getJp_diag2() const = 0; + + // virtual void scaleJl_cols() = 0; + + // virtual void scaleJp_cols(const VecX& jacobian_scaling) = 0; + + // virtual void setLandmarkDamping(Scalar lambda) = 0; + + virtual void get_dense_Q2Jp_Q2r(MatX& Q2Jp, VecX& Q2r) const = 0; + + virtual void get_dense_H_b(MatX& H, VecX& b) const = 0; + + static std::unique_ptr create( + BundleAdjustmentBase* estimator, const AbsOrderMap& aom, + const Options& options, + const MargLinData* marg_lin_data = nullptr, + const ImuLinData* imu_lin_data = nullptr, + const std::set* used_frames = nullptr, + const std::unordered_set* lost_landmarks = nullptr, + int64_t last_state_to_marg = std::numeric_limits::max()); +}; + +bool isLinearizationSqrt(const LinearizationType& type); + +} // namespace basalt diff --git a/include/basalt/linearization/linearization_rel_sc.hpp b/include/basalt/linearization/linearization_rel_sc.hpp new file mode 100644 index 0000000..aa0fb6e --- /dev/null +++ b/include/basalt/linearization/linearization_rel_sc.hpp @@ -0,0 +1,143 @@ +#pragma once + +#include + +#include +#include +#include +#include +#include +#include + +namespace basalt { + +template +class ImuBlock; + +template +class LinearizationRelSC : public LinearizationBase { + public: + using Scalar = Scalar_; + static constexpr int POSE_SIZE = POSE_SIZE_; + using Base = LinearizationBase; + + using Vec2 = Eigen::Matrix; + using Vec3 = Eigen::Matrix; + using Vec4 = Eigen::Matrix; + using VecP = Eigen::Matrix; + + using VecX = Eigen::Matrix; + + using Mat36 = Eigen::Matrix; + using Mat4 = Eigen::Matrix; + using Mat6 = Eigen::Matrix; + + using MatX = Eigen::Matrix; + + using LandmarkBlockPtr = std::unique_ptr>; + using ImuBlockPtr = std::unique_ptr>; + + using RelLinData = typename ScBundleAdjustmentBase::RelLinData; + + using typename Base::Options; + + LinearizationRelSC( + BundleAdjustmentBase* estimator, const AbsOrderMap& aom, + const Options& options, + const MargLinData* marg_lin_data = nullptr, + const ImuLinData* imu_lin_data = nullptr, + const std::set* used_frames = nullptr, + const std::unordered_set* lost_landmarks = nullptr, + int64_t last_state_to_marg = std::numeric_limits::max()); + + // destructor defined in cpp b/c of unique_ptr destructor (ImuBlock) + // copy/move constructor/assignment-operator for rule-of-five + ~LinearizationRelSC(); + LinearizationRelSC(const LinearizationRelSC&) = default; + LinearizationRelSC(LinearizationRelSC&&) = default; + LinearizationRelSC& operator=(const LinearizationRelSC&) = default; + LinearizationRelSC& operator=(LinearizationRelSC&&) = default; + + void log_problem_stats(ExecutionStats& stats) const override; + + Scalar linearizeProblem(bool* numerically_valid = nullptr) override; + + void performQR() override; + + void setPoseDamping(const Scalar lambda); + + bool hasPoseDamping() const { return pose_damping_diagonal > 0; } + + Scalar backSubstitute(const VecX& pose_inc) override; + + VecX getJp_diag2() const; + + void scaleJl_cols(); + + void scaleJp_cols(const VecX& jacobian_scaling); + + void setLandmarkDamping(Scalar lambda); + + void get_dense_Q2Jp_Q2r(MatX& Q2Jp, VecX& Q2r) const override; + + void get_dense_H_b(MatX& H, VecX& b) const override; + + protected: // types + using PoseLinMapType = + Eigen::aligned_unordered_map, + RelPoseLin>; + using PoseLinMapTypeConstIter = typename PoseLinMapType::const_iterator; + + using HostLandmarkMapType = + std::unordered_map*>>; + + protected: // helper + void get_dense_Q2Jp_Q2r_pose_damping(MatX& Q2Jp, size_t start_idx) const; + + void get_dense_Q2Jp_Q2r_marg_prior(MatX& Q2Jp, VecX& Q2r, + size_t start_idx) const; + + void add_dense_H_b_pose_damping(MatX& H) const; + + void add_dense_H_b_marg_prior(MatX& H, VecX& b) const; + + void add_dense_H_b_imu(DenseAccumulator& accum) const; + + void add_dense_H_b_imu(MatX& H, VecX& b) const; + + // Transform to abs + static void accumulate_dense_H_b_rel_to_abs( + const MatX& rel_H, const VecX& rel_b, + const std::vector& rpph, const AbsOrderMap& aom, + DenseAccumulator& accum); + + protected: + Options options_; + + std::vector imu_blocks; + + const BundleAdjustmentBase* estimator; + + LandmarkDatabase& lmdb_; + + const Calibration& calib; + + const AbsOrderMap& aom; + const std::set* used_frames; + + const MargLinData* marg_lin_data; + const ImuLinData* imu_lin_data; + const std::unordered_set* lost_landmarks; + int64_t last_state_to_marg; + + Eigen::aligned_vector rld_vec; + + Scalar pose_damping_diagonal; + Scalar pose_damping_diagonal_sqrt; + + VecX marg_scaling; + + size_t num_rows_Q2r; +}; + +} // namespace basalt diff --git a/include/basalt/optical_flow/frame_to_frame_optical_flow.h b/include/basalt/optical_flow/frame_to_frame_optical_flow.h new file mode 100644 index 0000000..9ede196 --- /dev/null +++ b/include/basalt/optical_flow/frame_to_frame_optical_flow.h @@ -0,0 +1,405 @@ +/** +BSD 3-Clause License + +This file is part of the Basalt project. +https://gitlab.com/VladyslavUsenko/basalt.git + +Copyright (c) 2019, Vladyslav Usenko and Nikolaus Demmel. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#pragma once + +#include + +#include + +#include +#include +#include + +#include +#include + +#include +#include + +namespace basalt { + +/// Unlike PatchOpticalFlow, FrameToFrameOpticalFlow always tracks patches +/// against the previous frame, not the initial frame where a track was created. +/// While it might cause more drift of the patch location, it leads to longer +/// tracks in practice. +template typename Pattern> +class FrameToFrameOpticalFlow : public OpticalFlowBase { + public: + typedef OpticalFlowPatch> PatchT; + + typedef Eigen::Matrix Vector2; + typedef Eigen::Matrix Matrix2; + + typedef Eigen::Matrix Vector3; + typedef Eigen::Matrix Matrix3; + + typedef Eigen::Matrix Vector4; + typedef Eigen::Matrix Matrix4; + + typedef Sophus::SE2 SE2; + + FrameToFrameOpticalFlow(const VioConfig& config, + const basalt::Calibration& calib) + : t_ns(-1), frame_counter(0), last_keypoint_id(0), config(config) { + input_queue.set_capacity(10); + + this->calib = calib.cast(); + + patch_coord = PatchT::pattern2.template cast(); + + if (calib.intrinsics.size() > 1) { + Eigen::Matrix4d Ed; + Sophus::SE3d T_i_j = calib.T_i_c[0].inverse() * calib.T_i_c[1]; + computeEssential(T_i_j, Ed); + E = Ed.cast(); + } + + processing_thread.reset( + new std::thread(&FrameToFrameOpticalFlow::processingLoop, this)); + } + + ~FrameToFrameOpticalFlow() { processing_thread->join(); } + + void processingLoop() { + OpticalFlowInput::Ptr input_ptr; + + while (true) { + input_queue.pop(input_ptr); + + if (!input_ptr.get()) { + if (output_queue) output_queue->push(nullptr); + break; + } + + processFrame(input_ptr->t_ns, input_ptr); + } + } + + void processFrame(int64_t curr_t_ns, OpticalFlowInput::Ptr& new_img_vec) { + for (const auto& v : new_img_vec->img_data) { + if (!v.img.get()) return; + } + + if (t_ns < 0) { + t_ns = curr_t_ns; + + transforms.reset(new OpticalFlowResult); + transforms->observations.resize(calib.intrinsics.size()); + transforms->t_ns = t_ns; + + pyramid.reset(new std::vector>); + pyramid->resize(calib.intrinsics.size()); + + tbb::parallel_for(tbb::blocked_range(0, calib.intrinsics.size()), + [&](const tbb::blocked_range& r) { + for (size_t i = r.begin(); i != r.end(); ++i) { + pyramid->at(i).setFromImage( + *new_img_vec->img_data[i].img, + config.optical_flow_levels); + } + }); + + transforms->input_images = new_img_vec; + + addPoints(); + filterPoints(); + + } else { + t_ns = curr_t_ns; + + old_pyramid = pyramid; + + pyramid.reset(new std::vector>); + pyramid->resize(calib.intrinsics.size()); + tbb::parallel_for(tbb::blocked_range(0, calib.intrinsics.size()), + [&](const tbb::blocked_range& r) { + for (size_t i = r.begin(); i != r.end(); ++i) { + pyramid->at(i).setFromImage( + *new_img_vec->img_data[i].img, + config.optical_flow_levels); + } + }); + + OpticalFlowResult::Ptr new_transforms; + new_transforms.reset(new OpticalFlowResult); + new_transforms->observations.resize(calib.intrinsics.size()); + new_transforms->t_ns = t_ns; + + for (size_t i = 0; i < calib.intrinsics.size(); i++) { + trackPoints(old_pyramid->at(i), pyramid->at(i), + transforms->observations[i], + new_transforms->observations[i]); + } + + transforms = new_transforms; + transforms->input_images = new_img_vec; + + addPoints(); + filterPoints(); + } + + if (output_queue && frame_counter % config.optical_flow_skip_frames == 0) { + output_queue->push(transforms); + } + + frame_counter++; + } + + void trackPoints(const basalt::ManagedImagePyr& pyr_1, + const basalt::ManagedImagePyr& pyr_2, + const Eigen::aligned_map& + transform_map_1, + Eigen::aligned_map& + transform_map_2) const { + size_t num_points = transform_map_1.size(); + + std::vector ids; + Eigen::aligned_vector init_vec; + + ids.reserve(num_points); + init_vec.reserve(num_points); + + for (const auto& kv : transform_map_1) { + ids.push_back(kv.first); + init_vec.push_back(kv.second); + } + + tbb::concurrent_unordered_map> + result; + + auto compute_func = [&](const tbb::blocked_range& range) { + for (size_t r = range.begin(); r != range.end(); ++r) { + const KeypointId id = ids[r]; + + const Eigen::AffineCompact2f& transform_1 = init_vec[r]; + Eigen::AffineCompact2f transform_2 = transform_1; + + bool valid = trackPoint(pyr_1, pyr_2, transform_1, transform_2); + + if (valid) { + Eigen::AffineCompact2f transform_1_recovered = transform_2; + + valid = trackPoint(pyr_2, pyr_1, transform_2, transform_1_recovered); + + if (valid) { + Scalar dist2 = (transform_1.translation() - + transform_1_recovered.translation()) + .squaredNorm(); + + if (dist2 < config.optical_flow_max_recovered_dist2) { + result[id] = transform_2; + } + } + } + } + }; + + tbb::blocked_range range(0, num_points); + + tbb::parallel_for(range, compute_func); + // compute_func(range); + + transform_map_2.clear(); + transform_map_2.insert(result.begin(), result.end()); + } + + inline bool trackPoint(const basalt::ManagedImagePyr& old_pyr, + const basalt::ManagedImagePyr& pyr, + const Eigen::AffineCompact2f& old_transform, + Eigen::AffineCompact2f& transform) const { + bool patch_valid = true; + + transform.linear().setIdentity(); + + for (int level = config.optical_flow_levels; level >= 0 && patch_valid; + level--) { + const Scalar scale = 1 << level; + + transform.translation() /= scale; + + PatchT p(old_pyr.lvl(level), old_transform.translation() / scale); + + patch_valid &= p.valid; + if (patch_valid) { + // Perform tracking on current level + patch_valid &= trackPointAtLevel(pyr.lvl(level), p, transform); + } + + transform.translation() *= scale; + } + + transform.linear() = old_transform.linear() * transform.linear(); + + return patch_valid; + } + + inline bool trackPointAtLevel(const Image& img_2, + const PatchT& dp, + Eigen::AffineCompact2f& transform) const { + bool patch_valid = true; + + for (int iteration = 0; + patch_valid && iteration < config.optical_flow_max_iterations; + iteration++) { + typename PatchT::VectorP res; + + typename PatchT::Matrix2P transformed_pat = + transform.linear().matrix() * PatchT::pattern2; + transformed_pat.colwise() += transform.translation(); + + patch_valid &= dp.residual(img_2, transformed_pat, res); + + if (patch_valid) { + const Vector3 inc = -dp.H_se2_inv_J_se2_T * res; + + // avoid NaN in increment (leads to SE2::exp crashing) + patch_valid &= inc.array().isFinite().all(); + + // avoid very large increment + patch_valid &= inc.template lpNorm() < 1e6; + + if (patch_valid) { + transform *= SE2::exp(inc).matrix(); + + const int filter_margin = 2; + + patch_valid &= img_2.InBounds(transform.translation(), filter_margin); + } + } + } + + return patch_valid; + } + + void addPoints() { + Eigen::aligned_vector pts0; + + for (const auto& kv : transforms->observations.at(0)) { + pts0.emplace_back(kv.second.translation().cast()); + } + + KeypointsData kd; + + detectKeypoints(pyramid->at(0).lvl(0), kd, + config.optical_flow_detection_grid_size, 1, pts0); + + Eigen::aligned_map new_poses0, + new_poses1; + + for (size_t i = 0; i < kd.corners.size(); i++) { + Eigen::AffineCompact2f transform; + transform.setIdentity(); + transform.translation() = kd.corners[i].cast(); + + transforms->observations.at(0)[last_keypoint_id] = transform; + new_poses0[last_keypoint_id] = transform; + + last_keypoint_id++; + } + + if (calib.intrinsics.size() > 1) { + trackPoints(pyramid->at(0), pyramid->at(1), new_poses0, new_poses1); + + for (const auto& kv : new_poses1) { + transforms->observations.at(1).emplace(kv); + } + } + } + + void filterPoints() { + if (calib.intrinsics.size() < 2) return; + + std::set lm_to_remove; + + std::vector kpid; + Eigen::aligned_vector proj0, proj1; + + for (const auto& kv : transforms->observations.at(1)) { + auto it = transforms->observations.at(0).find(kv.first); + + if (it != transforms->observations.at(0).end()) { + proj0.emplace_back(it->second.translation()); + proj1.emplace_back(kv.second.translation()); + kpid.emplace_back(kv.first); + } + } + + Eigen::aligned_vector p3d0, p3d1; + std::vector p3d0_success, p3d1_success; + + calib.intrinsics[0].unproject(proj0, p3d0, p3d0_success); + calib.intrinsics[1].unproject(proj1, p3d1, p3d1_success); + + for (size_t i = 0; i < p3d0_success.size(); i++) { + if (p3d0_success[i] && p3d1_success[i]) { + const double epipolar_error = + std::abs(p3d0[i].transpose() * E * p3d1[i]); + + if (epipolar_error > config.optical_flow_epipolar_error) { + lm_to_remove.emplace(kpid[i]); + } + } else { + lm_to_remove.emplace(kpid[i]); + } + } + + for (int id : lm_to_remove) { + transforms->observations.at(1).erase(id); + } + } + + EIGEN_MAKE_ALIGNED_OPERATOR_NEW + private: + int64_t t_ns; + + size_t frame_counter; + + KeypointId last_keypoint_id; + + VioConfig config; + basalt::Calibration calib; + + OpticalFlowResult::Ptr transforms; + std::shared_ptr>> old_pyramid, + pyramid; + + Matrix4 E; + + std::shared_ptr processing_thread; +}; + +} // namespace basalt diff --git a/include/basalt/optical_flow/multiscale_frame_to_frame_optical_flow.h b/include/basalt/optical_flow/multiscale_frame_to_frame_optical_flow.h new file mode 100644 index 0000000..c3473e3 --- /dev/null +++ b/include/basalt/optical_flow/multiscale_frame_to_frame_optical_flow.h @@ -0,0 +1,468 @@ +/** +BSD 3-Clause License + +This file is part of the Basalt project. +https://gitlab.com/VladyslavUsenko/basalt.git + +Copyright (c) 2019, Vladyslav Usenko and Nikolaus Demmel. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +// Original source for multi-scale implementation: +// https://github.com/DLR-RM/granite (MIT license) + +#pragma once + +#include + +#include + +#include +#include +#include + +#include +#include + +#include +#include + +namespace basalt { + +/// MultiscaleFrameToFrameOpticalFlow is the same as FrameToFrameOpticalFlow, +/// but patches can be created at all pyramid levels, not just the lowest +/// pyramid. +template typename Pattern> +class MultiscaleFrameToFrameOpticalFlow : public OpticalFlowBase { + public: + typedef OpticalFlowPatch> PatchT; + + typedef Eigen::Matrix Vector2; + typedef Eigen::Matrix Matrix2; + + typedef Eigen::Matrix Vector3; + typedef Eigen::Matrix Matrix3; + + typedef Eigen::Matrix Vector4; + typedef Eigen::Matrix Matrix4; + + typedef Sophus::SE2 SE2; + + MultiscaleFrameToFrameOpticalFlow(const VioConfig& config, + const basalt::Calibration& calib) + : t_ns(-1), frame_counter(0), last_keypoint_id(0), config(config) { + input_queue.set_capacity(10); + + this->calib = calib.cast(); + + patch_coord = PatchT::pattern2.template cast(); + + if (calib.intrinsics.size() > 1) { + Eigen::Matrix4d Ed; + Sophus::SE3d T_i_j = calib.T_i_c[0].inverse() * calib.T_i_c[1]; + computeEssential(T_i_j, Ed); + E = Ed.cast(); + } + + processing_thread.reset(new std::thread( + &MultiscaleFrameToFrameOpticalFlow::processingLoop, this)); + } + + ~MultiscaleFrameToFrameOpticalFlow() { processing_thread->join(); } + + void processingLoop() { + OpticalFlowInput::Ptr input_ptr; + + while (true) { + input_queue.pop(input_ptr); + + if (!input_ptr.get()) { + if (output_queue) output_queue->push(nullptr); + break; + } + + processFrame(input_ptr->t_ns, input_ptr); + } + } + + bool processFrame(int64_t curr_t_ns, OpticalFlowInput::Ptr& new_img_vec) { + for (const auto& v : new_img_vec->img_data) { + if (!v.img.get()) { + std::cout << "Image for " << curr_t_ns << " not present!" << std::endl; + return true; + } + } + if (t_ns < 0) { + t_ns = curr_t_ns; + + transforms.reset(new OpticalFlowResult); + transforms->observations.resize(calib.intrinsics.size()); + transforms->pyramid_levels.resize(calib.intrinsics.size()); + transforms->t_ns = t_ns; + + pyramid.reset(new std::vector>); + pyramid->resize(calib.intrinsics.size()); + + tbb::parallel_for(tbb::blocked_range(0, calib.intrinsics.size()), + [&](const tbb::blocked_range& r) { + for (size_t i = r.begin(); i != r.end(); ++i) { + pyramid->at(i).setFromImage( + *new_img_vec->img_data[i].img, + config.optical_flow_levels); + } + }); + + transforms->input_images = new_img_vec; + + addPoints(); + filterPoints(); + } else { + t_ns = curr_t_ns; + + old_pyramid = pyramid; + + pyramid.reset(new std::vector>); + pyramid->resize(calib.intrinsics.size()); + tbb::parallel_for(tbb::blocked_range(0, calib.intrinsics.size()), + [&](const tbb::blocked_range& r) { + for (size_t i = r.begin(); i != r.end(); ++i) { + pyramid->at(i).setFromImage( + *new_img_vec->img_data[i].img, + config.optical_flow_levels); + } + }); + + OpticalFlowResult::Ptr new_transforms; + new_transforms.reset(new OpticalFlowResult); + new_transforms->observations.resize(calib.intrinsics.size()); + new_transforms->pyramid_levels.resize(calib.intrinsics.size()); + new_transforms->t_ns = t_ns; + + for (size_t i = 0; i < calib.intrinsics.size(); i++) { + trackPoints(old_pyramid->at(i), pyramid->at(i), + transforms->observations[i], transforms->pyramid_levels[i], + new_transforms->observations[i], + new_transforms->pyramid_levels[i]); + } + + // std::cout << t_ns << ": Could track " + // << new_transforms->observations.at(0).size() << " points." + // << std::endl; + + transforms = new_transforms; + transforms->input_images = new_img_vec; + + addPoints(); + filterPoints(); + } + + if (frame_counter % config.optical_flow_skip_frames == 0) { + try { + output_queue->push(transforms); + } catch (const tbb::user_abort&) { + return false; + }; + } + + frame_counter++; + return true; + } + + void trackPoints( + const basalt::ManagedImagePyr& pyr_1, + const basalt::ManagedImagePyr& pyr_2, + const Eigen::aligned_map& + transform_map_1, + const std::map& pyramid_levels_1, + Eigen::aligned_map& transform_map_2, + std::map& pyramid_levels_2) const { + size_t num_points = transform_map_1.size(); + + std::vector ids; + Eigen::aligned_vector init_vec; + std::vector pyramid_level; + + ids.reserve(num_points); + init_vec.reserve(num_points); + pyramid_level.reserve(num_points); + + for (const auto& kv : transform_map_1) { + ids.push_back(kv.first); + init_vec.push_back(kv.second); + pyramid_level.push_back(pyramid_levels_1.at(kv.first)); + } + + tbb::concurrent_unordered_map> + result_transforms; + tbb::concurrent_unordered_map> + result_pyramid_level; + + auto compute_func = [&](const tbb::blocked_range& range) { + for (size_t r = range.begin(); r != range.end(); ++r) { + const KeypointId id = ids[r]; + + const Eigen::AffineCompact2f& transform_1 = init_vec[r]; + Eigen::AffineCompact2f transform_2 = transform_1; + + bool valid = trackPoint(pyr_1, pyr_2, transform_1, pyramid_level[r], + transform_2); + + if (valid) { + Eigen::AffineCompact2f transform_1_recovered = transform_2; + + valid = trackPoint(pyr_2, pyr_1, transform_2, pyramid_level[r], + transform_1_recovered); + + if (valid) { + const Scalar scale = 1 << pyramid_level[r]; + Scalar dist2 = (transform_1.translation() / scale - + transform_1_recovered.translation() / scale) + .squaredNorm(); + + if (dist2 < config.optical_flow_max_recovered_dist2) { + result_transforms[id] = transform_2; + result_pyramid_level[id] = pyramid_level[r]; + } + } + } + } + }; + + tbb::blocked_range range(0, num_points); + + tbb::parallel_for(range, compute_func); + // compute_func(range); + + transform_map_2.clear(); + transform_map_2.insert(result_transforms.begin(), result_transforms.end()); + pyramid_levels_2.clear(); + pyramid_levels_2.insert(result_pyramid_level.begin(), + result_pyramid_level.end()); + } + + inline bool trackPoint(const basalt::ManagedImagePyr& old_pyr, + const basalt::ManagedImagePyr& pyr, + const Eigen::AffineCompact2f& old_transform, + const size_t pyramid_level, + Eigen::AffineCompact2f& transform) const { + bool patch_valid = true; + + transform.linear().setIdentity(); + + for (ssize_t level = config.optical_flow_levels; + level >= static_cast(pyramid_level); level--) { + const Scalar scale = 1 << level; + + Eigen::AffineCompact2f transform_tmp = transform; + + transform_tmp.translation() /= scale; + + PatchT p(old_pyr.lvl(level), old_transform.translation() / scale); + + patch_valid &= p.valid; + if (patch_valid) { + // Perform tracking on current level + patch_valid &= trackPointAtLevel(pyr.lvl(level), p, transform_tmp); + } + + if (level == static_cast(pyramid_level) + 1 && !patch_valid) { + return false; + } + + transform_tmp.translation() *= scale; + + if (patch_valid) { + transform = transform_tmp; + } + } + + transform.linear() = old_transform.linear() * transform.linear(); + + return patch_valid; + } + + inline bool trackPointAtLevel(const Image& img_2, + const PatchT& dp, + Eigen::AffineCompact2f& transform) const { + bool patch_valid = true; + + for (int iteration = 0; + patch_valid && iteration < config.optical_flow_max_iterations; + iteration++) { + typename PatchT::VectorP res; + + typename PatchT::Matrix2P transformed_pat = + transform.linear().matrix() * PatchT::pattern2; + transformed_pat.colwise() += transform.translation(); + + patch_valid &= dp.residual(img_2, transformed_pat, res); + + if (patch_valid) { + const Vector3 inc = -dp.H_se2_inv_J_se2_T * res; + + // avoid NaN in increment (leads to SE2::exp crashing) + patch_valid &= inc.array().isFinite().all(); + + // avoid very large increment + patch_valid &= inc.template lpNorm() < 1e6; + + if (patch_valid) { + transform *= SE2::exp(inc).matrix(); + + const int filter_margin = 2; + + patch_valid &= img_2.InBounds(transform.translation(), filter_margin); + } + } + } + + return patch_valid; + } + + void addPoints() { + KeypointsData kd; + + Eigen::aligned_map new_poses_main, + new_poses_stereo; + std::map new_pyramid_levels_main, + new_pyramid_levels_stereo; + + for (ssize_t level = 0; + level < static_cast(config.optical_flow_levels) - 1; + level++) { + Eigen::aligned_vector pts; + + for (const auto& kv : transforms->observations.at(0)) { + const ssize_t point_level = + transforms->pyramid_levels.at(0).at(kv.first); + + // do not create points were already points at similar levels are + if (point_level <= level + 1 && point_level >= level - 1) { + // if (point_level == level) { + const Scalar scale = 1 << point_level; + pts.emplace_back( + (kv.second.translation() / scale).template cast()); + } + } + + detectKeypoints(pyramid->at(0).lvl(level), kd, + config.optical_flow_detection_grid_size, 1, pts); + + const Scalar scale = 1 << level; + + for (size_t i = 0; i < kd.corners.size(); i++) { + Eigen::AffineCompact2f transform; + transform.setIdentity(); + transform.translation() = + kd.corners[i].cast() * scale; // TODO cast float? + + transforms->observations.at(0)[last_keypoint_id] = transform; + transforms->pyramid_levels.at(0)[last_keypoint_id] = level; + new_poses_main[last_keypoint_id] = transform; + new_pyramid_levels_main[last_keypoint_id] = level; + + last_keypoint_id++; + } + + trackPoints(pyramid->at(0), pyramid->at(1), new_poses_main, + new_pyramid_levels_main, new_poses_stereo, + new_pyramid_levels_stereo); + + for (const auto& kv : new_poses_stereo) { + transforms->observations.at(1).emplace(kv); + transforms->pyramid_levels.at(1)[kv.first] = + new_pyramid_levels_stereo.at(kv.first); + } + } + } + + void filterPoints() { + std::set lm_to_remove; + + std::vector kpid; + Eigen::aligned_vector proj0, proj1; + + for (const auto& kv : transforms->observations.at(1)) { + auto it = transforms->observations.at(0).find(kv.first); + + if (it != transforms->observations.at(0).end()) { + proj0.emplace_back(it->second.translation()); + proj1.emplace_back(kv.second.translation()); + kpid.emplace_back(kv.first); + } + } + + Eigen::aligned_vector p3d_main, p3d_stereo; + std::vector p3d_main_success, p3d_stereo_success; + + calib.intrinsics[0].unproject(proj0, p3d_main, p3d_main_success); + calib.intrinsics[1].unproject(proj1, p3d_stereo, p3d_stereo_success); + + for (size_t i = 0; i < p3d_main_success.size(); i++) { + if (p3d_main_success[i] && p3d_stereo_success[i]) { + const double epipolar_error = + std::abs(p3d_main[i].transpose() * E * p3d_stereo[i]); + + const Scalar scale = 1 << transforms->pyramid_levels.at(0).at(kpid[i]); + + if (epipolar_error > config.optical_flow_epipolar_error * scale) { + lm_to_remove.emplace(kpid[i]); + } + } else { + lm_to_remove.emplace(kpid[i]); + } + } + + for (int id : lm_to_remove) { + transforms->observations.at(1).erase(id); + } + } + + EIGEN_MAKE_ALIGNED_OPERATOR_NEW + private: + int64_t t_ns; + + size_t frame_counter; + + KeypointId last_keypoint_id; + + VioConfig config; + basalt::Calibration calib; + + OpticalFlowResult::Ptr transforms; + std::shared_ptr>> old_pyramid, + pyramid; + + // map from stereo pair -> essential matrix + Matrix4 E; + + std::shared_ptr processing_thread; +}; + +} // namespace basalt diff --git a/include/basalt/optical_flow/optical_flow.h b/include/basalt/optical_flow/optical_flow.h new file mode 100644 index 0000000..ee46031 --- /dev/null +++ b/include/basalt/optical_flow/optical_flow.h @@ -0,0 +1,91 @@ +/** +BSD 3-Clause License + +This file is part of the Basalt project. +https://gitlab.com/VladyslavUsenko/basalt.git + +Copyright (c) 2019, Vladyslav Usenko and Nikolaus Demmel. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ +#pragma once + +#include + +#include + +#include + +#include +#include +#include +#include + +#include + +#include + +namespace basalt { + +using KeypointId = size_t; + +struct OpticalFlowInput { + using Ptr = std::shared_ptr; + + int64_t t_ns; + std::vector img_data; + std::vector img_cv_data; +}; + +struct OpticalFlowResult { + using Ptr = std::shared_ptr; + + int64_t t_ns; + std::vector> + observations; + + std::vector> pyramid_levels; + + OpticalFlowInput::Ptr input_images; +}; + +class OpticalFlowBase { + public: + using Ptr = std::shared_ptr; + + tbb::concurrent_bounded_queue input_queue; + tbb::concurrent_bounded_queue* output_queue = nullptr; + + Eigen::MatrixXf patch_coord; +}; + +class OpticalFlowFactory { + public: + static OpticalFlowBase::Ptr getOpticalFlow(const VioConfig& config, + const Calibration& cam); +}; +} // namespace basalt diff --git a/include/basalt/optical_flow/patch.h b/include/basalt/optical_flow/patch.h new file mode 100644 index 0000000..29a6400 --- /dev/null +++ b/include/basalt/optical_flow/patch.h @@ -0,0 +1,226 @@ +/** +BSD 3-Clause License + +This file is part of the Basalt project. +https://gitlab.com/VladyslavUsenko/basalt.git + +Copyright (c) 2019, Vladyslav Usenko and Nikolaus Demmel. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ +#pragma once + +#include +#include + +#include +#include + +namespace basalt { + +template +struct OpticalFlowPatch { + static constexpr int PATTERN_SIZE = Pattern::PATTERN_SIZE; + + typedef Eigen::Matrix Vector2i; + + typedef Eigen::Matrix Vector2; + typedef Eigen::Matrix Vector2T; + typedef Eigen::Matrix Vector3; + typedef Eigen::Matrix Matrix3; + typedef Eigen::Matrix Matrix4; + typedef Eigen::Matrix VectorP; + + typedef Eigen::Matrix Matrix2P; + typedef Eigen::Matrix MatrixP2; + typedef Eigen::Matrix MatrixP3; + typedef Eigen::Matrix Matrix3P; + typedef Eigen::Matrix MatrixP4; + typedef Eigen::Matrix Matrix2Pi; + + static const Matrix2P pattern2; + + EIGEN_MAKE_ALIGNED_OPERATOR_NEW + + OpticalFlowPatch() = default; + + OpticalFlowPatch(const Image &img, const Vector2 &pos) { + setFromImage(img, pos); + } + + template + static void setData(const ImgT &img, const Vector2 &pos, Scalar &mean, + VectorP &data, const Sophus::SE2 *se2 = nullptr) { + int num_valid_points = 0; + Scalar sum = 0; + + for (int i = 0; i < PATTERN_SIZE; i++) { + Vector2 p; + if (se2) { + p = pos + (*se2) * pattern2.col(i); + } else { + p = pos + pattern2.col(i); + }; + + if (img.InBounds(p, 2)) { + Scalar val = img.template interp(p); + data[i] = val; + sum += val; + num_valid_points++; + } else { + data[i] = -1; + } + } + + mean = sum / num_valid_points; + data /= mean; + } + + template + static void setDataJacSe2(const ImgT &img, const Vector2 &pos, Scalar &mean, + VectorP &data, MatrixP3 &J_se2) { + int num_valid_points = 0; + Scalar sum = 0; + Vector3 grad_sum_se2(0, 0, 0); + + Eigen::Matrix Jw_se2; + Jw_se2.template topLeftCorner<2, 2>().setIdentity(); + + for (int i = 0; i < PATTERN_SIZE; i++) { + Vector2 p = pos + pattern2.col(i); + + // Fill jacobians with respect to SE2 warp + Jw_se2(0, 2) = -pattern2(1, i); + Jw_se2(1, 2) = pattern2(0, i); + + if (img.InBounds(p, 2)) { + Vector3 valGrad = img.template interpGrad(p); + data[i] = valGrad[0]; + sum += valGrad[0]; + J_se2.row(i) = valGrad.template tail<2>().transpose() * Jw_se2; + grad_sum_se2 += J_se2.row(i); + num_valid_points++; + } else { + data[i] = -1; + } + } + + mean = sum / num_valid_points; + + const Scalar mean_inv = num_valid_points / sum; + + for (int i = 0; i < PATTERN_SIZE; i++) { + if (data[i] >= 0) { + J_se2.row(i) -= grad_sum_se2.transpose() * data[i] / sum; + data[i] *= mean_inv; + } else { + J_se2.row(i).setZero(); + } + } + J_se2 *= mean_inv; + } + + void setFromImage(const Image &img, const Vector2 &pos) { + this->pos = pos; + + MatrixP3 J_se2; + + setDataJacSe2(img, pos, mean, data, J_se2); + + Matrix3 H_se2 = J_se2.transpose() * J_se2; + Matrix3 H_se2_inv; + H_se2_inv.setIdentity(); + H_se2.ldlt().solveInPlace(H_se2_inv); + + H_se2_inv_J_se2_T = H_se2_inv * J_se2.transpose(); + + // NOTE: while it's very unlikely we get a source patch with all black + // pixels, since points are usually selected at corners, it doesn't cost + // much to be safe here. + + // all-black patch cannot be normalized; will result in mean of "zero" and + // H_se2_inv_J_se2_T will contain "NaN" and data will contain "inf" + valid = mean > std::numeric_limits::epsilon() && + H_se2_inv_J_se2_T.array().isFinite().all() && + data.array().isFinite().all(); + } + + inline bool residual(const Image &img, + const Matrix2P &transformed_pattern, + VectorP &residual) const { + Scalar sum = 0; + int num_valid_points = 0; + + for (int i = 0; i < PATTERN_SIZE; i++) { + if (img.InBounds(transformed_pattern.col(i), 2)) { + residual[i] = img.interp(transformed_pattern.col(i)); + sum += residual[i]; + num_valid_points++; + } else { + residual[i] = -1; + } + } + + // all-black patch cannot be normalized + if (sum < std::numeric_limits::epsilon()) { + residual.setZero(); + return false; + } + + int num_residuals = 0; + + for (int i = 0; i < PATTERN_SIZE; i++) { + if (residual[i] >= 0 && data[i] >= 0) { + const Scalar val = residual[i]; + residual[i] = num_valid_points * val / sum - data[i]; + num_residuals++; + + } else { + residual[i] = 0; + } + } + + return num_residuals > PATTERN_SIZE / 2; + } + + Vector2 pos = Vector2::Zero(); + VectorP data = VectorP::Zero(); // negative if the point is not valid + + // MatrixP3 J_se2; // total jacobian with respect to se2 warp + // Matrix3 H_se2_inv; + Matrix3P H_se2_inv_J_se2_T = Matrix3P::Zero(); + + Scalar mean = 0; + + bool valid = false; +}; + +template +const typename OpticalFlowPatch::Matrix2P + OpticalFlowPatch::pattern2 = Pattern::pattern2; + +} // namespace basalt diff --git a/include/basalt/optical_flow/patch_optical_flow.h b/include/basalt/optical_flow/patch_optical_flow.h new file mode 100644 index 0000000..72a2466 --- /dev/null +++ b/include/basalt/optical_flow/patch_optical_flow.h @@ -0,0 +1,410 @@ +/** +BSD 3-Clause License + +This file is part of the Basalt project. +https://gitlab.com/VladyslavUsenko/basalt.git + +Copyright (c) 2019, Vladyslav Usenko and Nikolaus Demmel. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ +#pragma once + +#include + +#include + +#include +#include +#include + +#include +#include + +#include +#include + +namespace basalt { + +// TODO: patches are currently never erased, so over time memory consumption +// increases +// TODO: some changes from FrameToFrameOpticalFlow could be back-ported +// (adjustments to Scalar=float, tbb parallelization, ...). + +/// PatchOpticalFlow keeps reference patches from the frame where the point was +/// initially created. Should result in more consistent tracks (less drift over +/// time) than frame-to-frame tracking, but it results in shorter tracks in +/// practice. +template typename Pattern> +class PatchOpticalFlow : public OpticalFlowBase { + public: + typedef OpticalFlowPatch> PatchT; + + typedef Eigen::Matrix Vector2; + typedef Eigen::Matrix Matrix2; + + typedef Eigen::Matrix Vector3; + typedef Eigen::Matrix Matrix3; + + typedef Eigen::Matrix Vector4; + typedef Eigen::Matrix Matrix4; + + typedef Sophus::SE2 SE2; + + PatchOpticalFlow(const VioConfig& config, + const basalt::Calibration& calib) + : t_ns(-1), + frame_counter(0), + last_keypoint_id(0), + config(config), + calib(calib) { + patches.reserve(3000); + input_queue.set_capacity(10); + + patch_coord = PatchT::pattern2.template cast(); + + if (calib.intrinsics.size() > 1) { + Sophus::SE3d T_i_j = calib.T_i_c[0].inverse() * calib.T_i_c[1]; + computeEssential(T_i_j, E); + } + + processing_thread.reset( + new std::thread(&PatchOpticalFlow::processingLoop, this)); + } + + ~PatchOpticalFlow() { processing_thread->join(); } + + void processingLoop() { + OpticalFlowInput::Ptr input_ptr; + + while (true) { + input_queue.pop(input_ptr); + + if (!input_ptr.get()) { + output_queue->push(nullptr); + break; + } + + processFrame(input_ptr->t_ns, input_ptr); + } + } + + void processFrame(int64_t curr_t_ns, OpticalFlowInput::Ptr& new_img_vec) { + for (const auto& v : new_img_vec->img_data) { + if (!v.img.get()) return; + } + + if (t_ns < 0) { + t_ns = curr_t_ns; + + transforms.reset(new OpticalFlowResult); + transforms->observations.resize(calib.intrinsics.size()); + transforms->t_ns = t_ns; + + pyramid.reset(new std::vector>); + pyramid->resize(calib.intrinsics.size()); + for (size_t i = 0; i < calib.intrinsics.size(); i++) { + pyramid->at(i).setFromImage(*new_img_vec->img_data[i].img, + config.optical_flow_levels); + } + + transforms->input_images = new_img_vec; + + addPoints(); + filterPoints(); + + } else { + t_ns = curr_t_ns; + + old_pyramid = pyramid; + + pyramid.reset(new std::vector>); + pyramid->resize(calib.intrinsics.size()); + for (size_t i = 0; i < calib.intrinsics.size(); i++) { + pyramid->at(i).setFromImage(*new_img_vec->img_data[i].img, + config.optical_flow_levels); + } + + OpticalFlowResult::Ptr new_transforms; + new_transforms.reset(new OpticalFlowResult); + new_transforms->observations.resize(new_img_vec->img_data.size()); + new_transforms->t_ns = t_ns; + + for (size_t i = 0; i < calib.intrinsics.size(); i++) { + trackPoints(old_pyramid->at(i), pyramid->at(i), + transforms->observations[i], + new_transforms->observations[i]); + } + + transforms = new_transforms; + transforms->input_images = new_img_vec; + + addPoints(); + filterPoints(); + } + + if (output_queue && frame_counter % config.optical_flow_skip_frames == 0) { + output_queue->push(transforms); + } + frame_counter++; + } + + void trackPoints(const basalt::ManagedImagePyr& pyr_1, + const basalt::ManagedImagePyr& pyr_2, + const Eigen::aligned_map& + transform_map_1, + Eigen::aligned_map& + transform_map_2) const { + size_t num_points = transform_map_1.size(); + + std::vector ids; + Eigen::aligned_vector init_vec; + + ids.reserve(num_points); + init_vec.reserve(num_points); + + for (const auto& kv : transform_map_1) { + ids.push_back(kv.first); + init_vec.push_back(kv.second); + } + + tbb::concurrent_unordered_map> + result; + + auto compute_func = [&](const tbb::blocked_range& range) { + for (size_t r = range.begin(); r != range.end(); ++r) { + const KeypointId id = ids[r]; + + const Eigen::AffineCompact2f& transform_1 = init_vec[r]; + Eigen::AffineCompact2f transform_2 = transform_1; + + const Eigen::aligned_vector& patch_vec = patches.at(id); + + bool valid = trackPoint(pyr_2, patch_vec, transform_2); + + if (valid) { + Eigen::AffineCompact2f transform_1_recovered = transform_2; + + valid = trackPoint(pyr_1, patch_vec, transform_1_recovered); + + if (valid) { + Scalar dist2 = (transform_1.translation() - + transform_1_recovered.translation()) + .squaredNorm(); + + if (dist2 < config.optical_flow_max_recovered_dist2) { + result[id] = transform_2; + } + } + } + } + }; + + tbb::blocked_range range(0, num_points); + + tbb::parallel_for(range, compute_func); + // compute_func(range); + + transform_map_2.clear(); + transform_map_2.insert(result.begin(), result.end()); + } + + inline bool trackPoint(const basalt::ManagedImagePyr& pyr, + const Eigen::aligned_vector& patch_vec, + Eigen::AffineCompact2f& transform) const { + bool patch_valid = true; + + for (int level = config.optical_flow_levels; level >= 0 && patch_valid; + level--) { + const Scalar scale = 1 << level; + + transform.translation() /= scale; + + // TODO: maybe we should better check patch validity when creating points + const auto& p = patch_vec[level]; + patch_valid &= p.valid; + if (patch_valid) { + // Perform tracking on current level + patch_valid &= trackPointAtLevel(pyr.lvl(level), p, transform); + } + + transform.translation() *= scale; + } + + return patch_valid; + } + + inline bool trackPointAtLevel(const Image& img_2, + const PatchT& dp, + Eigen::AffineCompact2f& transform) const { + bool patch_valid = true; + + for (int iteration = 0; + patch_valid && iteration < config.optical_flow_max_iterations; + iteration++) { + typename PatchT::VectorP res; + + typename PatchT::Matrix2P transformed_pat = + transform.linear().matrix() * PatchT::pattern2; + transformed_pat.colwise() += transform.translation(); + + patch_valid &= dp.residual(img_2, transformed_pat, res); + + if (patch_valid) { + const Vector3 inc = -dp.H_se2_inv_J_se2_T * res; + + // avoid NaN in increment (leads to SE2::exp crashing) + patch_valid &= inc.array().isFinite().all(); + + // avoid very large increment + patch_valid &= inc.template lpNorm() < 1e6; + + if (patch_valid) { + transform *= SE2::exp(inc).matrix(); + + const int filter_margin = 2; + + patch_valid &= img_2.InBounds(transform.translation(), filter_margin); + } + } + } + + return patch_valid; + } + + void addPoints() { + Eigen::aligned_vector pts0; + + for (const auto& kv : transforms->observations.at(0)) { + pts0.emplace_back(kv.second.translation().cast()); + } + + KeypointsData kd; + + detectKeypoints(pyramid->at(0).lvl(0), kd, + config.optical_flow_detection_grid_size, 1, pts0); + + Eigen::aligned_map new_poses0, + new_poses1; + + for (size_t i = 0; i < kd.corners.size(); i++) { + Eigen::aligned_vector& p = patches[last_keypoint_id]; + + Vector2 pos = kd.corners[i].cast(); + + for (int l = 0; l <= config.optical_flow_levels; l++) { + Scalar scale = 1 << l; + Vector2 pos_scaled = pos / scale; + p.emplace_back(pyramid->at(0).lvl(l), pos_scaled); + } + + Eigen::AffineCompact2f transform; + transform.setIdentity(); + transform.translation() = kd.corners[i].cast(); + + transforms->observations.at(0)[last_keypoint_id] = transform; + new_poses0[last_keypoint_id] = transform; + + last_keypoint_id++; + } + + if (calib.intrinsics.size() > 1) { + trackPoints(pyramid->at(0), pyramid->at(1), new_poses0, new_poses1); + + for (const auto& kv : new_poses1) { + transforms->observations.at(1).emplace(kv); + } + } + } + + void filterPoints() { + if (calib.intrinsics.size() < 2) return; + + std::set lm_to_remove; + + std::vector kpid; + Eigen::aligned_vector proj0, proj1; + + for (const auto& kv : transforms->observations.at(1)) { + auto it = transforms->observations.at(0).find(kv.first); + + if (it != transforms->observations.at(0).end()) { + proj0.emplace_back(it->second.translation().cast()); + proj1.emplace_back(kv.second.translation().cast()); + kpid.emplace_back(kv.first); + } + } + + Eigen::aligned_vector p3d0, p3d1; + std::vector p3d0_success, p3d1_success; + + calib.intrinsics[0].unproject(proj0, p3d0, p3d0_success); + calib.intrinsics[1].unproject(proj1, p3d1, p3d1_success); + + for (size_t i = 0; i < p3d0_success.size(); i++) { + if (p3d0_success[i] && p3d1_success[i]) { + const double epipolar_error = + std::abs(p3d0[i].transpose() * E * p3d1[i]); + + if (epipolar_error > config.optical_flow_epipolar_error) { + lm_to_remove.emplace(kpid[i]); + } + } else { + lm_to_remove.emplace(kpid[i]); + } + } + + for (int id : lm_to_remove) { + transforms->observations.at(1).erase(id); + } + } + + EIGEN_MAKE_ALIGNED_OPERATOR_NEW + private: + int64_t t_ns; + + size_t frame_counter; + + KeypointId last_keypoint_id; + + VioConfig config; + basalt::Calibration calib; + + Eigen::aligned_unordered_map> + patches; + + OpticalFlowResult::Ptr transforms; + std::shared_ptr>> old_pyramid, + pyramid; + + Eigen::Matrix4d E; + + std::shared_ptr processing_thread; +}; + +} // namespace basalt diff --git a/include/basalt/optical_flow/patterns.h b/include/basalt/optical_flow/patterns.h new file mode 100644 index 0000000..0b05102 --- /dev/null +++ b/include/basalt/optical_flow/patterns.h @@ -0,0 +1,171 @@ +/** +BSD 3-Clause License + +This file is part of the Basalt project. +https://gitlab.com/VladyslavUsenko/basalt.git + +Copyright (c) 2019, Vladyslav Usenko and Nikolaus Demmel. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ +#pragma once + +#include + +namespace basalt { + +template +struct Pattern24 { + // 00 01 + // + // 02 03 04 05 + // + // 06 07 08 09 10 11 + // + // 12 13 14 15 16 17 + // + // 18 19 20 21 + // + // 22 23 + // + // -----> x + // | + // | + // y + + static constexpr Scalar pattern_raw[][2] = { + {-1, 5}, {1, 5}, + + {-3, 3}, {-1, 3}, {1, 3}, {3, 3}, + + {-5, 1}, {-3, 1}, {-1, 1}, {1, 1}, {3, 1}, {5, 1}, + + {-5, -1}, {-3, -1}, {-1, -1}, {1, -1}, {3, -1}, {5, -1}, + + {-3, -3}, {-1, -3}, {1, -3}, {3, -3}, + + {-1, -5}, {1, -5} + + }; + + static constexpr int PATTERN_SIZE = + sizeof(pattern_raw) / (2 * sizeof(Scalar)); + + typedef Eigen::Matrix Matrix2P; + static const Matrix2P pattern2; +}; + +template +const typename Pattern24::Matrix2P Pattern24::pattern2 = + Eigen::Map::Matrix2P>((Scalar *) + Pattern24::pattern_raw); + +template +struct Pattern52 { + // 00 01 02 03 + // + // 04 05 06 07 08 09 + // + // 10 11 12 13 14 15 16 17 + // + // 18 19 20 21 22 23 24 25 + // + // 26 27 28 29 30 31 32 33 + // + // 34 35 36 37 38 39 40 41 + // + // 42 43 44 45 46 47 + // + // 48 49 50 51 + // + // -----> x + // | + // | + // y + + static constexpr Scalar pattern_raw[][2] = { + {-3, 7}, {-1, 7}, {1, 7}, {3, 7}, + + {-5, 5}, {-3, 5}, {-1, 5}, {1, 5}, {3, 5}, {5, 5}, + + {-7, 3}, {-5, 3}, {-3, 3}, {-1, 3}, {1, 3}, {3, 3}, + {5, 3}, {7, 3}, + + {-7, 1}, {-5, 1}, {-3, 1}, {-1, 1}, {1, 1}, {3, 1}, + {5, 1}, {7, 1}, + + {-7, -1}, {-5, -1}, {-3, -1}, {-1, -1}, {1, -1}, {3, -1}, + {5, -1}, {7, -1}, + + {-7, -3}, {-5, -3}, {-3, -3}, {-1, -3}, {1, -3}, {3, -3}, + {5, -3}, {7, -3}, + + {-5, -5}, {-3, -5}, {-1, -5}, {1, -5}, {3, -5}, {5, -5}, + + {-3, -7}, {-1, -7}, {1, -7}, {3, -7} + + }; + + static constexpr int PATTERN_SIZE = + sizeof(pattern_raw) / (2 * sizeof(Scalar)); + + typedef Eigen::Matrix Matrix2P; + static const Matrix2P pattern2; +}; + +template +const typename Pattern52::Matrix2P Pattern52::pattern2 = + Eigen::Map::Matrix2P>((Scalar *) + Pattern52::pattern_raw); + +// Same as Pattern52 but twice smaller +template +struct Pattern51 { + static constexpr int PATTERN_SIZE = Pattern52::PATTERN_SIZE; + + typedef Eigen::Matrix Matrix2P; + static const Matrix2P pattern2; +}; + +template +const typename Pattern51::Matrix2P Pattern51::pattern2 = + 0.5 * Pattern52::pattern2; + +// Same as Pattern52 but 0.75 smaller +template +struct Pattern50 { + static constexpr int PATTERN_SIZE = Pattern52::PATTERN_SIZE; + + typedef Eigen::Matrix Matrix2P; + static const Matrix2P pattern2; +}; + +template +const typename Pattern50::Matrix2P Pattern50::pattern2 = + 0.75 * Pattern52::pattern2; + +} // namespace basalt diff --git a/include/basalt/optimization/accumulator.h b/include/basalt/optimization/accumulator.h new file mode 100644 index 0000000..e22a988 --- /dev/null +++ b/include/basalt/optimization/accumulator.h @@ -0,0 +1,281 @@ +/** +BSD 3-Clause License + +This file is part of the Basalt project. +https://gitlab.com/VladyslavUsenko/basalt.git + +Copyright (c) 2019, Vladyslav Usenko and Nikolaus Demmel. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#pragma once + +#include +#include + +#include +#include +#include + +#include +#include + +#if defined(BASALT_USE_CHOLMOD) + +#include + +template +using SparseLLT = Eigen::CholmodSupernodalLLT; + +#else + +template +using SparseLLT = Eigen::SimplicialLDLT; + +#endif + +namespace basalt { + +template +class DenseAccumulator { + public: + using Scalar = Scalar_; + + typedef Eigen::Matrix VectorX; + typedef Eigen::Matrix MatrixX; + + template + inline void addH(int i, int j, const Eigen::MatrixBase& data) { + BASALT_ASSERT_STREAM(i >= 0, "i " << i); + BASALT_ASSERT_STREAM(j >= 0, "j " << j); + + BASALT_ASSERT_STREAM(i + ROWS <= H.cols(), "i " << i << " ROWS " << ROWS + << " H.rows() " + << H.rows()); + BASALT_ASSERT_STREAM(j + COLS <= H.rows(), "j " << j << " COLS " << COLS + << " H.cols() " + << H.cols()); + + H.template block(i, j) += data; + } + + template + inline void addB(int i, const Eigen::MatrixBase& data) { + BASALT_ASSERT_STREAM(i >= 0, "i " << i); + + BASALT_ASSERT_STREAM(i + ROWS <= H.cols(), "i " << i << " ROWS " << ROWS + << " H.rows() " + << H.rows()); + + b.template segment(i) += data; + } + + // inline VectorX solve() const { return H.ldlt().solve(b); } + inline VectorX solve(const VectorX* diagonal) const { + if (diagonal == nullptr) { + return H.ldlt().solve(b); + } else { + MatrixX HH = H; + HH.diagonal() += *diagonal; + return HH.ldlt().solve(b); + } + } + + inline void reset(int opt_size) { + H.setZero(opt_size, opt_size); + b.setZero(opt_size); + } + + inline void join(const DenseAccumulator& other) { + H += other.H; + b += other.b; + } + + inline void print() { + Eigen::IOFormat CleanFmt(2); + std::cerr << "H\n" << H.format(CleanFmt) << std::endl; + std::cerr << "b\n" << b.transpose().format(CleanFmt) << std::endl; + } + + inline void setup_solver(){}; + inline VectorX Hdiagonal() const { return H.diagonal(); } + + inline const MatrixX& getH() const { return H; } + inline const VectorX& getB() const { return b; } + + inline MatrixX& getH() { return H; } + inline VectorX& getB() { return b; } + + EIGEN_MAKE_ALIGNED_OPERATOR_NEW + private: + MatrixX H; + VectorX b; +}; + +template +class SparseHashAccumulator { + public: + using Scalar = Scalar_; + + typedef Eigen::Matrix VectorX; + typedef Eigen::Matrix MatrixX; + typedef Eigen::Triplet T; + typedef Eigen::SparseMatrix SparseMatrix; + + template + inline void addH(int si, int sj, const Eigen::MatrixBase& data) { + EIGEN_STATIC_ASSERT_MATRIX_SPECIFIC_SIZE(Derived, ROWS, COLS); + + KeyT id; + id[0] = si; + id[1] = sj; + id[2] = ROWS; + id[3] = COLS; + + auto it = hash_map.find(id); + + if (it == hash_map.end()) { + hash_map.emplace(id, data); + } else { + it->second += data; + } + } + + template + inline void addB(int i, const Eigen::MatrixBase& data) { + b.template segment(i) += data; + } + + inline void setup_solver() { + std::vector triplets; + triplets.reserve(hash_map.size() * 36 + b.rows()); + + for (const auto& kv : hash_map) { + for (int i = 0; i < kv.second.rows(); i++) { + for (int j = 0; j < kv.second.cols(); j++) { + triplets.emplace_back(kv.first[0] + i, kv.first[1] + j, + kv.second(i, j)); + } + } + } + + for (int i = 0; i < b.rows(); i++) { + triplets.emplace_back(i, i, std::numeric_limits::min()); + } + + smm = SparseMatrix(b.rows(), b.rows()); + smm.setFromTriplets(triplets.begin(), triplets.end()); + } + + inline VectorX Hdiagonal() const { return smm.diagonal(); } + + inline VectorX& getB() { return b; } + + inline VectorX solve(const VectorX* diagonal) const { + auto t2 = std::chrono::high_resolution_clock::now(); + + SparseMatrix sm = smm; + if (diagonal) sm.diagonal() += *diagonal; + + VectorX res; + + if (iterative_solver) { + // NOTE: since we have to disable Eigen's parallelization with OpenMP + // (interferes with TBB), the current CG is single-threaded, and we + // can expect a substantial speedup by switching to a parallel + // implementation of CG. + Eigen::ConjugateGradient, + Eigen::Lower | Eigen::Upper> + cg; + + cg.setTolerance(tolerance); + cg.compute(sm); + res = cg.solve(b); + } else { + SparseLLT chol(sm); + res = chol.solve(b); + } + + auto t3 = std::chrono::high_resolution_clock::now(); + + auto elapsed2 = + std::chrono::duration_cast(t3 - t2); + + if (print_info) { + std::cout << "Solving linear system: " << elapsed2.count() * 1e-6 << "s." + << std::endl; + } + + return res; + } + + inline void reset(int opt_size) { + hash_map.clear(); + b.setZero(opt_size); + } + + inline void join(const SparseHashAccumulator& other) { + for (const auto& kv : other.hash_map) { + auto it = hash_map.find(kv.first); + + if (it == hash_map.end()) { + hash_map.emplace(kv.first, kv.second); + } else { + it->second += kv.second; + } + } + + b += other.b; + } + + double tolerance = 1e-4; + bool iterative_solver = false; + bool print_info = false; + + EIGEN_MAKE_ALIGNED_OPERATOR_NEW + private: + using KeyT = std::array; + + struct KeyHash { + inline size_t operator()(const KeyT& c) const { + size_t seed = 0; + for (int i = 0; i < 4; i++) { + hash_combine(seed, c[i]); + } + return seed; + } + }; + + std::unordered_map hash_map; + + VectorX b; + + SparseMatrix smm; +}; + +} // namespace basalt diff --git a/include/basalt/optimization/linearize.h b/include/basalt/optimization/linearize.h new file mode 100644 index 0000000..465886a --- /dev/null +++ b/include/basalt/optimization/linearize.h @@ -0,0 +1,195 @@ +/** +BSD 3-Clause License + +This file is part of the Basalt project. +https://gitlab.com/VladyslavUsenko/basalt.git + +Copyright (c) 2019, Vladyslav Usenko and Nikolaus Demmel. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ +#ifndef BASALT_LINEARIZE_H +#define BASALT_LINEARIZE_H + +#include +#include +#include +#include + +namespace basalt { + +template +struct LinearizeBase { + static const int POSE_SIZE = 6; + + static const int POS_SIZE = 3; + static const int POS_OFFSET = 0; + static const int ROT_SIZE = 3; + static const int ROT_OFFSET = 3; + static const int ACCEL_BIAS_SIZE = 9; + static const int GYRO_BIAS_SIZE = 12; + static const int G_SIZE = 3; + + static const int TIME_SIZE = 1; + + static const int ACCEL_BIAS_OFFSET = 0; + static const int GYRO_BIAS_OFFSET = ACCEL_BIAS_SIZE; + static const int G_OFFSET = GYRO_BIAS_OFFSET + GYRO_BIAS_SIZE; + + typedef Sophus::SE3 SE3; + typedef Eigen::Matrix Vector3; + typedef Eigen::Matrix Vector6; + + typedef Eigen::Matrix Matrix3; + typedef Eigen::Matrix Matrix6; + + typedef Eigen::Matrix VectorX; + typedef Eigen::Matrix MatrixX; + + typedef typename Eigen::aligned_vector::const_iterator PoseDataIter; + typedef typename Eigen::aligned_vector::const_iterator GyroDataIter; + typedef + typename Eigen::aligned_vector::const_iterator AccelDataIter; + typedef typename Eigen::aligned_vector::const_iterator + AprilgridCornersDataIter; + + template + struct PoseCalibH { + Sophus::Matrix6d H_pose_accum; + Sophus::Vector6d b_pose_accum; + Eigen::Matrix H_intr_pose_accum; + Eigen::Matrix H_intr_accum; + Eigen::Matrix b_intr_accum; + + PoseCalibH() { + H_pose_accum.setZero(); + b_pose_accum.setZero(); + H_intr_pose_accum.setZero(); + H_intr_accum.setZero(); + b_intr_accum.setZero(); + } + + EIGEN_MAKE_ALIGNED_OPERATOR_NEW + }; + + struct CalibCommonData { + const Calibration* calibration = nullptr; + const MocapCalibration* mocap_calibration = nullptr; + const Eigen::aligned_vector* aprilgrid_corner_pos_3d = + nullptr; + + // Calib data + const std::vector* offset_T_i_c = nullptr; + const std::vector* offset_intrinsics = nullptr; + + // Cam data + size_t mocap_block_offset; + size_t bias_block_offset; + const std::unordered_map* offset_poses = nullptr; + + // Cam-IMU data + const Vector3* g = nullptr; + + bool opt_intrinsics; + + // Cam-IMU options + bool opt_cam_time_offset; + bool opt_g; + bool opt_imu_scale; + + Scalar pose_var_inv; + Vector3 gyro_var_inv; + Vector3 accel_var_inv; + Scalar mocap_var_inv; + + Scalar huber_thresh; + + EIGEN_MAKE_ALIGNED_OPERATOR_NEW + }; + + template + inline void linearize_point(const Eigen::Vector2d& corner_pos, int corner_id, + const Eigen::Matrix4d& T_c_w, const CamT& cam, + PoseCalibH* cph, double& error, + int& num_points, double& reproj_err) const { + Eigen::Matrix d_r_d_p; + Eigen::Matrix d_r_d_param; + + BASALT_ASSERT_STREAM( + corner_id < int(common_data.aprilgrid_corner_pos_3d->size()), + "corner_id " << corner_id); + + Eigen::Vector4d point3d = + T_c_w * common_data.aprilgrid_corner_pos_3d->at(corner_id); + + Eigen::Vector2d proj; + bool valid; + if (cph) { + valid = cam.project(point3d, proj, &d_r_d_p, &d_r_d_param); + } else { + valid = cam.project(point3d, proj); + } + if (!valid || !proj.array().isFinite().all()) return; + + Eigen::Vector2d residual = proj - corner_pos; + + double e = residual.norm(); + double huber_weight = + e < common_data.huber_thresh ? 1.0 : common_data.huber_thresh / e; + + if (cph) { + Eigen::Matrix d_point_d_xi; + + d_point_d_xi.topLeftCorner<3, 3>().setIdentity(); + d_point_d_xi.topRightCorner<3, 3>() = + -Sophus::SO3d::hat(point3d.head<3>()); + d_point_d_xi.row(3).setZero(); + + Eigen::Matrix d_r_d_xi = d_r_d_p * d_point_d_xi; + + cph->H_pose_accum += huber_weight * d_r_d_xi.transpose() * d_r_d_xi; + cph->b_pose_accum += huber_weight * d_r_d_xi.transpose() * residual; + + cph->H_intr_pose_accum += + huber_weight * d_r_d_param.transpose() * d_r_d_xi; + cph->H_intr_accum += huber_weight * d_r_d_param.transpose() * d_r_d_param; + cph->b_intr_accum += huber_weight * d_r_d_param.transpose() * residual; + } + + error += huber_weight * e * e * (2 - huber_weight); + reproj_err += e; + num_points++; + } + + CalibCommonData common_data; + + EIGEN_MAKE_ALIGNED_OPERATOR_NEW +}; + +} // namespace basalt + +#endif diff --git a/include/basalt/optimization/poses_linearize.h b/include/basalt/optimization/poses_linearize.h new file mode 100644 index 0000000..4cd814d --- /dev/null +++ b/include/basalt/optimization/poses_linearize.h @@ -0,0 +1,264 @@ +/** +BSD 3-Clause License + +This file is part of the Basalt project. +https://gitlab.com/VladyslavUsenko/basalt.git + +Copyright (c) 2019, Vladyslav Usenko and Nikolaus Demmel. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ +#ifndef BASALT_POSES_LINEARIZE_H +#define BASALT_POSES_LINEARIZE_H + +#include +#include +#include + +#include + +#include + +#include + +namespace basalt { + +template +struct LinearizePosesOpt : public LinearizeBase { + static const int POSE_SIZE = LinearizeBase::POSE_SIZE; + + typedef Sophus::SE3 SE3; + typedef Eigen::Matrix Vector2; + typedef Eigen::Matrix Vector3; + typedef Eigen::Matrix Vector4; + typedef Eigen::Matrix Vector6; + + typedef Eigen::Matrix Matrix3; + typedef Eigen::Matrix Matrix6; + + typedef Eigen::Matrix VectorX; + typedef Eigen::Matrix MatrixX; + + typedef typename Eigen::aligned_vector::const_iterator + AprilgridCornersDataIter; + + typedef typename LinearizeBase::CalibCommonData CalibCommonData; + + AccumT accum; + Scalar error; + Scalar reprojection_error; + int num_points; + + size_t opt_size; + + const Eigen::aligned_unordered_map& timestam_to_pose; + + LinearizePosesOpt( + size_t opt_size, + const Eigen::aligned_unordered_map& timestam_to_pose, + const CalibCommonData& common_data) + : opt_size(opt_size), timestam_to_pose(timestam_to_pose) { + this->common_data = common_data; + accum.reset(opt_size); + error = 0; + reprojection_error = 0; + num_points = 0; + } + LinearizePosesOpt(const LinearizePosesOpt& other, tbb::split) + : opt_size(other.opt_size), timestam_to_pose(other.timestam_to_pose) { + this->common_data = other.common_data; + accum.reset(opt_size); + error = 0; + reprojection_error = 0; + num_points = 0; + } + + void operator()(const tbb::blocked_range& r) { + for (const AprilgridCornersData& acd : r) { + std::visit( + [&](const auto& cam) { + constexpr int INTRINSICS_SIZE = + std::remove_reference::type::N; + typename LinearizeBase::template PoseCalibH + cph; + + SE3 T_w_i = timestam_to_pose.at(acd.timestamp_ns); + SE3 T_w_c = + T_w_i * this->common_data.calibration->T_i_c[acd.cam_id]; + SE3 T_c_w = T_w_c.inverse(); + Eigen::Matrix4d T_c_w_m = T_c_w.matrix(); + + double err = 0; + double reproj_err = 0; + int num_inliers = 0; + + for (size_t i = 0; i < acd.corner_pos.size(); i++) { + this->linearize_point(acd.corner_pos[i], acd.corner_id[i], + T_c_w_m, cam, &cph, err, num_inliers, + reproj_err); + } + + error += err; + reprojection_error += reproj_err; + num_points += num_inliers; + + const Matrix6 Adj = + -this->common_data.calibration->T_i_c[acd.cam_id] + .inverse() + .Adj(); + + const size_t po = + this->common_data.offset_poses->at(acd.timestamp_ns); + const size_t co = this->common_data.offset_T_i_c->at(acd.cam_id); + const size_t io = + this->common_data.offset_intrinsics->at(acd.cam_id); + + accum.template addH( + po, po, Adj.transpose() * cph.H_pose_accum * Adj); + accum.template addB(po, + Adj.transpose() * cph.b_pose_accum); + + if (acd.cam_id > 0) { + accum.template addH( + co, po, -cph.H_pose_accum * Adj); + accum.template addH(co, co, + cph.H_pose_accum); + + accum.template addB(co, -cph.b_pose_accum); + } + + if (this->common_data.opt_intrinsics) { + accum.template addH( + io, po, cph.H_intr_pose_accum * Adj); + + if (acd.cam_id > 0) + accum.template addH( + io, co, -cph.H_intr_pose_accum); + + accum.template addH( + io, io, cph.H_intr_accum); + accum.template addB(io, cph.b_intr_accum); + } + }, + this->common_data.calibration->intrinsics[acd.cam_id].variant); + } + } + + void join(LinearizePosesOpt& rhs) { + accum.join(rhs.accum); + error += rhs.error; + reprojection_error += rhs.reprojection_error; + num_points += rhs.num_points; + } +}; + +template +struct ComputeErrorPosesOpt : public LinearizeBase { + static const int POSE_SIZE = LinearizeBase::POSE_SIZE; + + typedef Sophus::SE3 SE3; + typedef Eigen::Matrix Vector2; + typedef Eigen::Matrix Vector3; + typedef Eigen::Matrix Vector4; + typedef Eigen::Matrix Vector6; + + typedef Eigen::Matrix Matrix3; + typedef Eigen::Matrix Matrix6; + + typedef Eigen::Matrix VectorX; + typedef Eigen::Matrix MatrixX; + + typedef typename Eigen::aligned_vector::const_iterator + AprilgridCornersDataIter; + + typedef typename LinearizeBase::CalibCommonData CalibCommonData; + + Scalar error; + Scalar reprojection_error; + int num_points; + + size_t opt_size; + + const Eigen::aligned_unordered_map& timestam_to_pose; + + ComputeErrorPosesOpt( + size_t opt_size, + const Eigen::aligned_unordered_map& timestam_to_pose, + const CalibCommonData& common_data) + : opt_size(opt_size), timestam_to_pose(timestam_to_pose) { + this->common_data = common_data; + error = 0; + reprojection_error = 0; + num_points = 0; + } + + ComputeErrorPosesOpt(const ComputeErrorPosesOpt& other, tbb::split) + : opt_size(other.opt_size), timestam_to_pose(other.timestam_to_pose) { + this->common_data = other.common_data; + error = 0; + reprojection_error = 0; + num_points = 0; + } + + void operator()(const tbb::blocked_range& r) { + for (const AprilgridCornersData& acd : r) { + std::visit( + [&](const auto& cam) { + SE3 T_w_i = timestam_to_pose.at(acd.timestamp_ns); + SE3 T_w_c = + T_w_i * this->common_data.calibration->T_i_c[acd.cam_id]; + SE3 T_c_w = T_w_c.inverse(); + Eigen::Matrix4d T_c_w_m = T_c_w.matrix(); + + double err = 0; + double reproj_err = 0; + int num_inliers = 0; + + for (size_t i = 0; i < acd.corner_pos.size(); i++) { + this->linearize_point(acd.corner_pos[i], acd.corner_id[i], + T_c_w_m, cam, nullptr, err, num_inliers, + reproj_err); + } + + error += err; + reprojection_error += reproj_err; + num_points += num_inliers; + }, + this->common_data.calibration->intrinsics[acd.cam_id].variant); + } + } + + void join(ComputeErrorPosesOpt& rhs) { + error += rhs.error; + reprojection_error += rhs.reprojection_error; + num_points += rhs.num_points; + } +}; // namespace basalt + +} // namespace basalt + +#endif diff --git a/include/basalt/optimization/poses_optimize.h b/include/basalt/optimization/poses_optimize.h new file mode 100644 index 0000000..a4e32fe --- /dev/null +++ b/include/basalt/optimization/poses_optimize.h @@ -0,0 +1,315 @@ +/** +BSD 3-Clause License + +This file is part of the Basalt project. +https://gitlab.com/VladyslavUsenko/basalt.git + +Copyright (c) 2019, Vladyslav Usenko and Nikolaus Demmel. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ +#pragma once + +#include +#include +#include + +#include + +namespace basalt { + +class PosesOptimization { + static constexpr size_t POSE_SIZE = 6; + + using Scalar = double; + + typedef LinearizePosesOpt> LinearizeT; + + using SE3 = typename LinearizeT::SE3; + using Vector2 = typename LinearizeT::Vector2; + using Vector3 = typename LinearizeT::Vector3; + using Vector4 = typename LinearizeT::Vector4; + using VectorX = typename LinearizeT::VectorX; + + using AprilgridCornersDataIter = + typename Eigen::aligned_vector::const_iterator; + + public: + PosesOptimization() + : lambda(1e-6), min_lambda(1e-12), max_lambda(100), lambda_vee(2) {} + + Vector2 getOpticalCenter(size_t i) { + return calib->intrinsics[i].getParam().template segment<2>(2); + } + + void resetCalib(size_t num_cams, const std::vector &cam_types) { + BASALT_ASSERT(cam_types.size() == num_cams); + + calib.reset(new Calibration); + + for (size_t i = 0; i < cam_types.size(); i++) { + calib->intrinsics.emplace_back( + GenericCamera::fromString(cam_types[i])); + + if (calib->intrinsics.back().getName() != cam_types[i]) { + std::cerr << "Unknown camera type " << cam_types[i] << " default to " + << calib->intrinsics.back().getName() << std::endl; + } + } + calib->T_i_c.resize(num_cams); + } + + void loadCalib(const std::string &p) { + std::string path = p + "calibration.json"; + + std::ifstream is(path); + + if (is.good()) { + cereal::JSONInputArchive archive(is); + calib.reset(new Calibration); + archive(*calib); + std::cout << "Loaded calibration from: " << path << std::endl; + } else { + std::cout << "No calibration found" << std::endl; + } + } + + void saveCalib(const std::string &path) const { + if (calib) { + std::ofstream os(path + "calibration.json"); + cereal::JSONOutputArchive archive(os); + + archive(*calib); + } + } + + bool calibInitialized() const { return calib != nullptr; } + bool initialized() const { return true; } + + // Returns true when converged + bool optimize(bool opt_intrinsics, double huber_thresh, double stop_thresh, + double &error, int &num_points, double &reprojection_error) { + error = 0; + num_points = 0; + + ccd.opt_intrinsics = opt_intrinsics; + ccd.huber_thresh = huber_thresh; + + LinearizePosesOpt> lopt( + problem_size, timestam_to_pose, ccd); + + tbb::blocked_range april_range( + aprilgrid_corners_measurements.begin(), + aprilgrid_corners_measurements.end()); + tbb::parallel_reduce(april_range, lopt); + + error = lopt.error; + num_points = lopt.num_points; + reprojection_error = lopt.reprojection_error; + + std::cout << "[LINEARIZE] Error: " << lopt.error << " num points " + << lopt.num_points << std::endl; + + lopt.accum.setup_solver(); + Eigen::VectorXd Hdiag = lopt.accum.Hdiagonal(); + + bool converged = false; + bool step = false; + int max_iter = 10; + + while (!step && max_iter > 0 && !converged) { + Eigen::aligned_unordered_map + timestam_to_pose_backup = timestam_to_pose; + Calibration calib_backup = *calib; + + Eigen::VectorXd Hdiag_lambda = Hdiag * lambda; + for (int i = 0; i < Hdiag_lambda.size(); i++) + Hdiag_lambda[i] = std::max(Hdiag_lambda[i], min_lambda); + + Eigen::VectorXd inc = -lopt.accum.solve(&Hdiag_lambda); + double max_inc = inc.array().abs().maxCoeff(); + if (max_inc < stop_thresh) converged = true; + + for (auto &kv : timestam_to_pose) { + kv.second *= + Sophus::se3_expd(inc.segment(offset_poses[kv.first])); + } + + for (size_t i = 0; i < calib->T_i_c.size(); i++) { + calib->T_i_c[i] *= + Sophus::se3_expd(inc.segment(offset_T_i_c[i])); + } + + for (size_t i = 0; i < calib->intrinsics.size(); i++) { + auto &c = calib->intrinsics[i]; + c.applyInc(inc.segment(offset_cam_intrinsics[i], c.getN())); + } + + ComputeErrorPosesOpt eopt(problem_size, timestam_to_pose, ccd); + tbb::parallel_reduce(april_range, eopt); + + double f_diff = (lopt.error - eopt.error); + double l_diff = 0.5 * inc.dot(inc * lambda - lopt.accum.getB()); + + // std::cout << "f_diff " << f_diff << " l_diff " << l_diff << std::endl; + + double step_quality = f_diff / l_diff; + + if (step_quality < 0) { + std::cout << "\t[REJECTED] lambda:" << lambda + << " step_quality: " << step_quality + << " max_inc: " << max_inc << " Error: " << eopt.error + << " num points " << eopt.num_points << std::endl; + lambda = std::min(max_lambda, lambda_vee * lambda); + lambda_vee *= 2; + + timestam_to_pose = timestam_to_pose_backup; + *calib = calib_backup; + } else { + std::cout << "\t[ACCEPTED] lambda:" << lambda + << " step_quality: " << step_quality + << " max_inc: " << max_inc << " Error: " << eopt.error + << " num points " << eopt.num_points << std::endl; + + lambda = std::max( + min_lambda, + lambda * + std::max(1.0 / 3, 1 - std::pow(2 * step_quality - 1, 3.0))); + lambda_vee = 2; + + error = eopt.error; + num_points = eopt.num_points; + reprojection_error = eopt.reprojection_error; + + step = true; + } + max_iter--; + } + + if (converged) { + std::cout << "[CONVERGED]" << std::endl; + } + + return converged; + } + + void recompute_size() { + offset_poses.clear(); + offset_T_i_c.clear(); + offset_cam_intrinsics.clear(); + + size_t curr_offset = 0; + + for (const auto &kv : timestam_to_pose) { + offset_poses[kv.first] = curr_offset; + curr_offset += POSE_SIZE; + } + + offset_T_i_c.emplace_back(curr_offset); + for (size_t i = 0; i < calib->T_i_c.size(); i++) + offset_T_i_c.emplace_back(offset_T_i_c.back() + POSE_SIZE); + + offset_cam_intrinsics.emplace_back(offset_T_i_c.back()); + for (size_t i = 0; i < calib->intrinsics.size(); i++) + offset_cam_intrinsics.emplace_back(offset_cam_intrinsics.back() + + calib->intrinsics[i].getN()); + + problem_size = offset_cam_intrinsics.back(); + } + + Sophus::SE3d getT_w_i(int64_t i) { + auto it = timestam_to_pose.find(i); + + if (it == timestam_to_pose.end()) + return Sophus::SE3d(); + else + return it->second; + } + + void setAprilgridCorners3d(const Eigen::aligned_vector &v) { + aprilgrid_corner_pos_3d = v; + } + + void init() { + recompute_size(); + + ccd.calibration = calib.get(); + ccd.aprilgrid_corner_pos_3d = &aprilgrid_corner_pos_3d; + ccd.offset_poses = &offset_poses; + ccd.offset_T_i_c = &offset_T_i_c; + ccd.offset_intrinsics = &offset_cam_intrinsics; + } + + void addAprilgridMeasurement( + int64_t t_ns, int cam_id, + const Eigen::aligned_vector &corners_pos, + const std::vector &corner_id) { + aprilgrid_corners_measurements.emplace_back(); + + aprilgrid_corners_measurements.back().timestamp_ns = t_ns; + aprilgrid_corners_measurements.back().cam_id = cam_id; + aprilgrid_corners_measurements.back().corner_pos = corners_pos; + aprilgrid_corners_measurements.back().corner_id = corner_id; + } + + void addPoseMeasurement(int64_t t_ns, const Sophus::SE3d &pose) { + timestam_to_pose[t_ns] = pose; + } + + void setVignette(const std::vector> &vign) { + calib->vignette = vign; + } + + void setResolution(const Eigen::aligned_vector &resolution) { + calib->resolution = resolution; + } + + EIGEN_MAKE_ALIGNED_OPERATOR_NEW + + std::shared_ptr> calib; + + private: + typename LinearizePosesOpt< + Scalar, SparseHashAccumulator>::CalibCommonData ccd; + + Scalar lambda, min_lambda, max_lambda, lambda_vee; + + size_t problem_size; + + std::unordered_map offset_poses; + std::vector offset_cam_intrinsics; + std::vector offset_T_i_c; + + // frame poses + Eigen::aligned_unordered_map timestam_to_pose; + + Eigen::aligned_vector aprilgrid_corners_measurements; + + Eigen::aligned_vector aprilgrid_corner_pos_3d; +}; + +} // namespace basalt diff --git a/include/basalt/optimization/spline_linearize.h b/include/basalt/optimization/spline_linearize.h new file mode 100644 index 0000000..f09efb4 --- /dev/null +++ b/include/basalt/optimization/spline_linearize.h @@ -0,0 +1,846 @@ +/** +BSD 3-Clause License + +This file is part of the Basalt project. +https://gitlab.com/VladyslavUsenko/basalt.git + +Copyright (c) 2019, Vladyslav Usenko and Nikolaus Demmel. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ +#ifndef BASALT_SPLINE_LINEARIZE_H +#define BASALT_SPLINE_LINEARIZE_H + +#include +#include +#include +#include + +#include + +#include + +#include + +namespace basalt { + +template +struct LinearizeSplineOpt : public LinearizeBase { + static const int POSE_SIZE = LinearizeBase::POSE_SIZE; + static const int POS_SIZE = LinearizeBase::POS_SIZE; + static const int POS_OFFSET = LinearizeBase::POS_OFFSET; + static const int ROT_SIZE = LinearizeBase::ROT_SIZE; + static const int ROT_OFFSET = LinearizeBase::ROT_OFFSET; + static const int ACCEL_BIAS_SIZE = LinearizeBase::ACCEL_BIAS_SIZE; + static const int GYRO_BIAS_SIZE = LinearizeBase::GYRO_BIAS_SIZE; + static const int G_SIZE = LinearizeBase::G_SIZE; + static const int TIME_SIZE = LinearizeBase::TIME_SIZE; + static const int ACCEL_BIAS_OFFSET = LinearizeBase::ACCEL_BIAS_OFFSET; + static const int GYRO_BIAS_OFFSET = LinearizeBase::GYRO_BIAS_OFFSET; + static const int G_OFFSET = LinearizeBase::G_OFFSET; + + typedef Sophus::SE3 SE3; + + typedef Eigen::Matrix Vector2; + typedef Eigen::Matrix Vector3; + typedef Eigen::Matrix Vector4; + typedef Eigen::Matrix Vector6; + + typedef Eigen::Matrix Matrix3; + typedef Eigen::Matrix Matrix6; + + typedef Eigen::Matrix Matrix24; + + typedef Eigen::Matrix VectorX; + typedef Eigen::Matrix MatrixX; + + typedef Se3Spline SplineT; + + typedef typename Eigen::aligned_deque::const_iterator PoseDataIter; + typedef typename Eigen::aligned_deque::const_iterator GyroDataIter; + typedef + typename Eigen::aligned_deque::const_iterator AccelDataIter; + typedef typename Eigen::aligned_deque::const_iterator + AprilgridCornersDataIter; + typedef typename Eigen::aligned_deque::const_iterator + MocapPoseDataIter; + + // typedef typename LinearizeBase::PoseCalibH PoseCalibH; + typedef typename LinearizeBase::CalibCommonData CalibCommonData; + + AccumT accum; + Scalar error; + Scalar reprojection_error; + int num_points; + + size_t opt_size; + + const SplineT* spline; + + LinearizeSplineOpt(size_t opt_size, const SplineT* spl, + const CalibCommonData& common_data) + : opt_size(opt_size), spline(spl) { + this->common_data = common_data; + + accum.reset(opt_size); + error = 0; + reprojection_error = 0; + num_points = 0; + + BASALT_ASSERT(spline); + } + + LinearizeSplineOpt(const LinearizeSplineOpt& other, tbb::split) + : opt_size(other.opt_size), spline(other.spline) { + this->common_data = other.common_data; + accum.reset(opt_size); + error = 0; + reprojection_error = 0; + num_points = 0; + } + + void operator()(const tbb::blocked_range& r) { + for (const PoseData& pm : r) { + int64_t start_idx; + + typename SplineT::SO3JacobianStruct J_rot; + typename SplineT::PosJacobianStruct J_pos; + + int64_t time_ns = pm.timestamp_ns; + + // std::cout << "time " << time << std::endl; + // std::cout << "sline.minTime() " << spline.minTime() << std::endl; + + BASALT_ASSERT_STREAM( + time_ns >= spline->minTimeNs(), + "time " << time_ns << " spline.minTimeNs() " << spline->minTimeNs()); + + // Residual from current value of spline + Vector3 residual_pos = + spline->positionResidual(time_ns, pm.data.translation(), &J_pos); + Vector3 residual_rot = + spline->orientationResidual(time_ns, pm.data.so3(), &J_rot); + + // std::cerr << "residual_pos " << residual_pos.transpose() << std::endl; + // std::cerr << "residual_rot " << residual_rot.transpose() << std::endl; + + BASALT_ASSERT(J_pos.start_idx == J_rot.start_idx); + + start_idx = J_pos.start_idx; + + // std::cout << "J_pos.start_idx " << J_pos.start_idx << std::endl; + + const Scalar& pose_var_inv = this->common_data.pose_var_inv; + + error += pose_var_inv * + (residual_pos.squaredNorm() + residual_rot.squaredNorm()); + + for (size_t i = 0; i < N; i++) { + size_t start_i = (start_idx + i) * POSE_SIZE; + + // std::cout << "start_idx " << start_idx << std::endl; + // std::cout << "start_i " << start_i << std::endl; + + BASALT_ASSERT(start_i < opt_size); + + for (size_t j = 0; j <= i; j++) { + size_t start_j = (start_idx + j) * POSE_SIZE; + + BASALT_ASSERT(start_j < opt_size); + + accum.template addH( + start_i + POS_OFFSET, start_j + POS_OFFSET, + this->common_data.pose_var_inv * J_pos.d_val_d_knot[i] * + J_pos.d_val_d_knot[j] * Matrix3::Identity()); + + accum.template addH( + start_i + ROT_OFFSET, start_j + ROT_OFFSET, + this->common_data.pose_var_inv * + J_rot.d_val_d_knot[i].transpose() * J_rot.d_val_d_knot[j]); + } + + accum.template addB( + start_i + POS_OFFSET, + pose_var_inv * J_pos.d_val_d_knot[i] * residual_pos); + accum.template addB( + start_i + ROT_OFFSET, + pose_var_inv * J_rot.d_val_d_knot[i].transpose() * residual_rot); + } + } + } + + void operator()(const tbb::blocked_range& r) { + // size_t num_knots = spline.numKnots(); + // size_t bias_block_offset = POSE_SIZE * num_knots; + + for (const AccelData& pm : r) { + typename SplineT::AccelPosSO3JacobianStruct J; + typename SplineT::Mat39 J_bias; + Matrix3 J_g; + + int64_t t = pm.timestamp_ns; + + // std::cout << "time " << t << std::endl; + // std::cout << "sline.minTime() " << spline.minTime() << std::endl; + + BASALT_ASSERT_STREAM( + t >= spline->minTimeNs(), + "t " << t << " spline.minTime() " << spline->minTimeNs()); + BASALT_ASSERT_STREAM( + t <= spline->maxTimeNs(), + "t " << t << " spline.maxTime() " << spline->maxTimeNs()); + + Vector3 residual = spline->accelResidual( + t, pm.data, this->common_data.calibration->calib_accel_bias, + *(this->common_data.g), &J, &J_bias, &J_g); + + if (!this->common_data.opt_imu_scale) { + J_bias.template block<3, 6>(0, 3).setZero(); + } + + // std::cerr << "================================" << std::endl; + // std::cerr << "accel res: " << residual.transpose() << std::endl; + // std::cerr << "accel_bias.transpose(): " + // << this->common_data.calibration->accel_bias.transpose() + // << std::endl; + // std::cerr << "*(this->common_data.g): " + // << this->common_data.g->transpose() << std::endl; + // std::cerr << "pm.data " << pm.data.transpose() << std::endl; + + // std::cerr << "time " << t << std::endl; + // std::cerr << "sline.minTime() " << spline.minTime() << std::endl; + // std::cerr << "sline.maxTime() " << spline.maxTime() << std::endl; + // std::cerr << "================================" << std::endl; + + const Vector3& accel_var_inv = this->common_data.accel_var_inv; + + error += residual.transpose() * accel_var_inv.asDiagonal() * residual; + + size_t start_bias = + this->common_data.bias_block_offset + ACCEL_BIAS_OFFSET; + size_t start_g = this->common_data.bias_block_offset + G_OFFSET; + + for (size_t i = 0; i < N; i++) { + size_t start_i = (J.start_idx + i) * POSE_SIZE; + + BASALT_ASSERT(start_i < opt_size); + + for (size_t j = 0; j <= i; j++) { + size_t start_j = (J.start_idx + j) * POSE_SIZE; + + BASALT_ASSERT(start_j < opt_size); + + accum.template addH( + start_i, start_j, + J.d_val_d_knot[i].transpose() * accel_var_inv.asDiagonal() * + J.d_val_d_knot[j]); + } + accum.template addH( + start_bias, start_i, + J_bias.transpose() * accel_var_inv.asDiagonal() * + J.d_val_d_knot[i]); + + if (this->common_data.opt_g) { + accum.template addH( + start_g, start_i, + J_g.transpose() * accel_var_inv.asDiagonal() * J.d_val_d_knot[i]); + } + + accum.template addB(start_i, J.d_val_d_knot[i].transpose() * + accel_var_inv.asDiagonal() * + residual); + } + + accum.template addH( + start_bias, start_bias, + J_bias.transpose() * accel_var_inv.asDiagonal() * J_bias); + + if (this->common_data.opt_g) { + accum.template addH( + start_g, start_bias, + J_g.transpose() * accel_var_inv.asDiagonal() * J_bias); + accum.template addH( + start_g, start_g, + J_g.transpose() * accel_var_inv.asDiagonal() * J_g); + } + + accum.template addB( + start_bias, + J_bias.transpose() * accel_var_inv.asDiagonal() * residual); + + if (this->common_data.opt_g) { + accum.template addB( + start_g, J_g.transpose() * accel_var_inv.asDiagonal() * residual); + } + } + } + + void operator()(const tbb::blocked_range& r) { + // size_t num_knots = spline.numKnots(); + // size_t bias_block_offset = POSE_SIZE * num_knots; + + for (const GyroData& pm : r) { + typename SplineT::SO3JacobianStruct J; + typename SplineT::Mat312 J_bias; + + int64_t t_ns = pm.timestamp_ns; + + BASALT_ASSERT(t_ns >= spline->minTimeNs()); + BASALT_ASSERT(t_ns <= spline->maxTimeNs()); + + const Vector3& gyro_var_inv = this->common_data.gyro_var_inv; + + Vector3 residual = spline->gyroResidual( + t_ns, pm.data, this->common_data.calibration->calib_gyro_bias, &J, + &J_bias); + + if (!this->common_data.opt_imu_scale) { + J_bias.template block<3, 9>(0, 3).setZero(); + } + + // std::cerr << "==============================" << std::endl; + // std::cerr << "residual " << residual.transpose() << std::endl; + // std::cerr << "gyro_bias " + // << this->common_data.calibration->gyro_bias.transpose() + // << std::endl; + // std::cerr << "pm.data " << pm.data.transpose() << std::endl; + // std::cerr << "t_ns " << t_ns << std::endl; + + error += residual.transpose() * gyro_var_inv.asDiagonal() * residual; + + size_t start_bias = + this->common_data.bias_block_offset + GYRO_BIAS_OFFSET; + for (size_t i = 0; i < N; i++) { + size_t start_i = (J.start_idx + i) * POSE_SIZE + ROT_OFFSET; + + // std::cout << "start_idx " << J.start_idx << std::endl; + // std::cout << "start_i " << start_i << std::endl; + + BASALT_ASSERT(start_i < opt_size); + + for (size_t j = 0; j <= i; j++) { + size_t start_j = (J.start_idx + j) * POSE_SIZE + ROT_OFFSET; + + // std::cout << "start_j " << start_j << std::endl; + + BASALT_ASSERT(start_i < opt_size); + + accum.template addH( + start_i, start_j, + J.d_val_d_knot[i].transpose() * gyro_var_inv.asDiagonal() * + J.d_val_d_knot[j]); + } + accum.template addH( + start_bias, start_i, + J_bias.transpose() * gyro_var_inv.asDiagonal() * J.d_val_d_knot[i]); + accum.template addB(start_i, J.d_val_d_knot[i].transpose() * + gyro_var_inv.asDiagonal() * + residual); + } + + accum.template addH( + start_bias, start_bias, + J_bias.transpose() * gyro_var_inv.asDiagonal() * J_bias); + accum.template addB( + start_bias, + J_bias.transpose() * gyro_var_inv.asDiagonal() * residual); + } + } + + void operator()(const tbb::blocked_range& r) { + for (const AprilgridCornersData& acd : r) { + std::visit( + [&](const auto& cam) { + constexpr int INTRINSICS_SIZE = + std::remove_reference::type::N; + + typename SplineT::PosePosSO3JacobianStruct J; + + int64_t time_ns = acd.timestamp_ns + + this->common_data.calibration->cam_time_offset_ns; + + if (time_ns < spline->minTimeNs() || time_ns >= spline->maxTimeNs()) + return; + + SE3 T_w_i = spline->pose(time_ns, &J); + + Vector6 d_T_wi_d_time; + spline->d_pose_d_t(time_ns, d_T_wi_d_time); + + SE3 T_w_c = + T_w_i * this->common_data.calibration->T_i_c[acd.cam_id]; + SE3 T_c_w = T_w_c.inverse(); + Eigen::Matrix4d T_c_w_m = T_c_w.matrix(); + + typename LinearizeBase::template PoseCalibH + cph; + + double err = 0; + double reproj_err = 0; + int num_inliers = 0; + + for (size_t i = 0; i < acd.corner_pos.size(); i++) { + this->linearize_point(acd.corner_pos[i], acd.corner_id[i], + T_c_w_m, cam, &cph, err, num_inliers, + reproj_err); + } + + error += err; + reprojection_error += reproj_err; + num_points += num_inliers; + + Matrix6 Adj = -this->common_data.calibration->T_i_c[acd.cam_id] + .inverse() + .Adj(); + Matrix6 A_H_p_A = Adj.transpose() * cph.H_pose_accum * Adj; + + size_t T_i_c_start = this->common_data.offset_T_i_c->at(acd.cam_id); + size_t calib_start = + this->common_data.offset_intrinsics->at(acd.cam_id); + size_t start_cam_time_offset = + this->common_data.mocap_block_offset + 2 * POSE_SIZE + 1; + + for (int i = 0; i < N; i++) { + int start_i = (J.start_idx + i) * POSE_SIZE; + + Matrix6 Ji_A_H_p_A = J.d_val_d_knot[i].transpose() * A_H_p_A; + + for (int j = 0; j <= i; j++) { + int start_j = (J.start_idx + j) * POSE_SIZE; + + accum.template addH( + start_i, start_j, Ji_A_H_p_A * J.d_val_d_knot[j]); + } + + accum.template addH( + T_i_c_start, start_i, + -cph.H_pose_accum * Adj * J.d_val_d_knot[i]); + + if (this->common_data.opt_intrinsics) + accum.template addH( + calib_start, start_i, + cph.H_intr_pose_accum * Adj * J.d_val_d_knot[i]); + + if (this->common_data.opt_cam_time_offset) + accum.template addH( + start_cam_time_offset, start_i, + d_T_wi_d_time.transpose() * A_H_p_A * J.d_val_d_knot[i]); + + accum.template addB( + start_i, J.d_val_d_knot[i].transpose() * Adj.transpose() * + cph.b_pose_accum); + } + + accum.template addH(T_i_c_start, T_i_c_start, + cph.H_pose_accum); + accum.template addB(T_i_c_start, -cph.b_pose_accum); + + if (this->common_data.opt_intrinsics) { + accum.template addH( + calib_start, T_i_c_start, -cph.H_intr_pose_accum); + accum.template addH( + calib_start, calib_start, cph.H_intr_accum); + accum.template addB(calib_start, + cph.b_intr_accum); + } + + if (this->common_data.opt_cam_time_offset) { + accum.template addH( + start_cam_time_offset, T_i_c_start, + -d_T_wi_d_time.transpose() * Adj.transpose() * + cph.H_pose_accum); + + if (this->common_data.opt_intrinsics) + accum.template addH( + start_cam_time_offset, calib_start, + d_T_wi_d_time.transpose() * Adj.transpose() * + cph.H_intr_pose_accum.transpose()); + + accum.template addH( + start_cam_time_offset, start_cam_time_offset, + d_T_wi_d_time.transpose() * A_H_p_A * d_T_wi_d_time); + + accum.template addB(start_cam_time_offset, + d_T_wi_d_time.transpose() * + Adj.transpose() * + cph.b_pose_accum); + } + }, + this->common_data.calibration->intrinsics[acd.cam_id].variant); + } + } + + void operator()(const tbb::blocked_range& r) { + for (const MocapPoseData& pm : r) { + typename SplineT::PosePosSO3JacobianStruct J_pose; + + int64_t time_ns = + pm.timestamp_ns + + this->common_data.mocap_calibration->mocap_time_offset_ns; + + // std::cout << "time " << time << std::endl; + // std::cout << "sline.minTime() " << spline.minTime() << std::endl; + + if (time_ns < spline->minTimeNs() || time_ns >= spline->maxTimeNs()) + continue; + + BASALT_ASSERT_STREAM( + time_ns >= spline->minTimeNs(), + "time " << time_ns << " spline.minTimeNs() " << spline->minTimeNs()); + + const SE3 T_moc_w = this->common_data.mocap_calibration->T_moc_w; + const SE3 T_i_mark = this->common_data.mocap_calibration->T_i_mark; + + const SE3 T_w_i = spline->pose(time_ns, &J_pose); + const SE3 T_moc_mark = T_moc_w * T_w_i * T_i_mark; + + const SE3 T_mark_moc_meas = pm.data.inverse(); + + Vector6 residual = Sophus::se3_logd(T_mark_moc_meas * T_moc_mark); + + // TODO: check derivatives + Matrix6 d_res_d_T_i_mark; + Sophus::rightJacobianInvSE3Decoupled(residual, d_res_d_T_i_mark); + Matrix6 d_res_d_T_w_i = d_res_d_T_i_mark * T_i_mark.inverse().Adj(); + Matrix6 d_res_d_T_moc_w = + d_res_d_T_i_mark * (T_w_i * T_i_mark).inverse().Adj(); + + Vector6 d_T_wi_d_time; + spline->d_pose_d_t(time_ns, d_T_wi_d_time); + + Vector6 d_res_d_time = d_res_d_T_w_i * d_T_wi_d_time; + + size_t start_idx = J_pose.start_idx; + + size_t start_T_moc_w = this->common_data.mocap_block_offset; + size_t start_T_i_mark = this->common_data.mocap_block_offset + POSE_SIZE; + size_t start_mocap_time_offset = + this->common_data.mocap_block_offset + 2 * POSE_SIZE; + + // std::cout << "J_pos.start_idx " << J_pos.start_idx << std::endl; + + const Scalar& mocap_var_inv = this->common_data.mocap_var_inv; + + error += mocap_var_inv * residual.squaredNorm(); + + Matrix6 H_T_w_i = d_res_d_T_w_i.transpose() * d_res_d_T_w_i; + + for (size_t i = 0; i < N; i++) { + size_t start_i = (start_idx + i) * POSE_SIZE; + + // std::cout << "start_idx " << start_idx << std::endl; + // std::cout << "start_i " << start_i << std::endl; + + BASALT_ASSERT(start_i < opt_size); + + Matrix6 Ji_H_T_w_i = J_pose.d_val_d_knot[i].transpose() * H_T_w_i; + + for (size_t j = 0; j <= i; j++) { + size_t start_j = (start_idx + j) * POSE_SIZE; + + BASALT_ASSERT(start_j < opt_size); + + accum.template addH( + start_i, start_j, + mocap_var_inv * Ji_H_T_w_i * J_pose.d_val_d_knot[j]); + } + + accum.template addB( + start_i, mocap_var_inv * J_pose.d_val_d_knot[i].transpose() * + d_res_d_T_w_i.transpose() * residual); + + accum.template addH( + start_T_moc_w, start_i, + mocap_var_inv * d_res_d_T_moc_w.transpose() * d_res_d_T_w_i * + J_pose.d_val_d_knot[i]); + + accum.template addH( + start_T_i_mark, start_i, + mocap_var_inv * d_res_d_T_i_mark.transpose() * d_res_d_T_w_i * + J_pose.d_val_d_knot[i]); + + accum.template addH( + start_mocap_time_offset, start_i, + mocap_var_inv * d_res_d_time.transpose() * d_res_d_T_w_i * + J_pose.d_val_d_knot[i]); + } + + // start_T_moc_w + accum.template addH( + start_T_moc_w, start_T_moc_w, + mocap_var_inv * d_res_d_T_moc_w.transpose() * d_res_d_T_moc_w); + + // start_T_i_mark + accum.template addH( + start_T_i_mark, start_T_moc_w, + mocap_var_inv * d_res_d_T_i_mark.transpose() * d_res_d_T_moc_w); + + accum.template addH( + start_T_i_mark, start_T_i_mark, + mocap_var_inv * d_res_d_T_i_mark.transpose() * d_res_d_T_i_mark); + + // start_mocap_time_offset + accum.template addH( + start_mocap_time_offset, start_T_moc_w, + mocap_var_inv * d_res_d_time.transpose() * d_res_d_T_moc_w); + + accum.template addH( + start_mocap_time_offset, start_T_i_mark, + mocap_var_inv * d_res_d_time.transpose() * d_res_d_T_i_mark); + + accum.template addH( + start_mocap_time_offset, start_mocap_time_offset, + mocap_var_inv * d_res_d_time.transpose() * d_res_d_time); + + // B + accum.template addB( + start_T_moc_w, + mocap_var_inv * d_res_d_T_moc_w.transpose() * residual); + + accum.template addB( + start_T_i_mark, + mocap_var_inv * d_res_d_T_i_mark.transpose() * residual); + + accum.template addB( + start_mocap_time_offset, + mocap_var_inv * d_res_d_time.transpose() * residual); + } + } + + void join(LinearizeSplineOpt& rhs) { + accum.join(rhs.accum); + error += rhs.error; + reprojection_error += rhs.reprojection_error; + num_points += rhs.num_points; + } + + EIGEN_MAKE_ALIGNED_OPERATOR_NEW +}; + +template +struct ComputeErrorSplineOpt : public LinearizeBase { + typedef Sophus::SE3 SE3; + + typedef Eigen::Matrix Vector2; + typedef Eigen::Matrix Vector3; + typedef Eigen::Matrix Vector4; + typedef Eigen::Matrix Vector6; + + typedef Eigen::Matrix Matrix3; + typedef Eigen::Matrix Matrix6; + + typedef Eigen::Matrix Matrix24; + + typedef Eigen::Matrix VectorX; + typedef Eigen::Matrix MatrixX; + + typedef Se3Spline SplineT; + + typedef typename Eigen::aligned_deque::const_iterator PoseDataIter; + typedef typename Eigen::aligned_deque::const_iterator GyroDataIter; + typedef + typename Eigen::aligned_deque::const_iterator AccelDataIter; + typedef typename Eigen::aligned_deque::const_iterator + AprilgridCornersDataIter; + typedef typename Eigen::aligned_deque::const_iterator + MocapPoseDataIter; + + // typedef typename LinearizeBase::PoseCalibH PoseCalibH; + typedef typename LinearizeBase::CalibCommonData CalibCommonData; + + Scalar error; + Scalar reprojection_error; + int num_points; + + size_t opt_size; + + const SplineT* spline; + + ComputeErrorSplineOpt(size_t opt_size, const SplineT* spl, + const CalibCommonData& common_data) + : opt_size(opt_size), spline(spl) { + this->common_data = common_data; + + error = 0; + reprojection_error = 0; + num_points = 0; + + BASALT_ASSERT(spline); + } + + ComputeErrorSplineOpt(const ComputeErrorSplineOpt& other, tbb::split) + : opt_size(other.opt_size), spline(other.spline) { + this->common_data = other.common_data; + error = 0; + reprojection_error = 0; + num_points = 0; + } + + void operator()(const tbb::blocked_range& r) { + for (const PoseData& pm : r) { + int64_t time_ns = pm.timestamp_ns; + + BASALT_ASSERT_STREAM( + time_ns >= spline->minTimeNs(), + "time " << time_ns << " spline.minTimeNs() " << spline->minTimeNs()); + + // Residual from current value of spline + Vector3 residual_pos = + spline->positionResidual(time_ns, pm.data.translation()); + Vector3 residual_rot = + spline->orientationResidual(time_ns, pm.data.so3()); + + // std::cout << "J_pos.start_idx " << J_pos.start_idx << std::endl; + + const Scalar& pose_var_inv = this->common_data.pose_var_inv; + + error += pose_var_inv * + (residual_pos.squaredNorm() + residual_rot.squaredNorm()); + } + } + + void operator()(const tbb::blocked_range& r) { + // size_t num_knots = spline.numKnots(); + // size_t bias_block_offset = POSE_SIZE * num_knots; + + for (const AccelData& pm : r) { + int64_t t = pm.timestamp_ns; + + // std::cout << "time " << t << std::endl; + // std::cout << "sline.minTime() " << spline.minTime() << std::endl; + + BASALT_ASSERT_STREAM( + t >= spline->minTimeNs(), + "t " << t << " spline.minTime() " << spline->minTimeNs()); + BASALT_ASSERT_STREAM( + t <= spline->maxTimeNs(), + "t " << t << " spline.maxTime() " << spline->maxTimeNs()); + + Vector3 residual = spline->accelResidual( + t, pm.data, this->common_data.calibration->calib_accel_bias, + *(this->common_data.g)); + + const Vector3& accel_var_inv = this->common_data.accel_var_inv; + + error += residual.transpose() * accel_var_inv.asDiagonal() * residual; + } + } + + void operator()(const tbb::blocked_range& r) { + // size_t num_knots = spline.numKnots(); + // size_t bias_block_offset = POSE_SIZE * num_knots; + + for (const GyroData& pm : r) { + int64_t t_ns = pm.timestamp_ns; + + BASALT_ASSERT(t_ns >= spline->minTimeNs()); + BASALT_ASSERT(t_ns <= spline->maxTimeNs()); + + const Vector3& gyro_var_inv = this->common_data.gyro_var_inv; + + Vector3 residual = spline->gyroResidual( + t_ns, pm.data, this->common_data.calibration->calib_gyro_bias); + + error += residual.transpose() * gyro_var_inv.asDiagonal() * residual; + } + } + + void operator()(const tbb::blocked_range& r) { + for (const AprilgridCornersData& acd : r) { + std::visit( + [&](const auto& cam) { + int64_t time_ns = acd.timestamp_ns + + this->common_data.calibration->cam_time_offset_ns; + + if (time_ns < spline->minTimeNs() || time_ns >= spline->maxTimeNs()) + return; + + SE3 T_w_i = spline->pose(time_ns); + SE3 T_w_c = + T_w_i * this->common_data.calibration->T_i_c[acd.cam_id]; + SE3 T_c_w = T_w_c.inverse(); + Eigen::Matrix4d T_c_w_m = T_c_w.matrix(); + + double err = 0; + double reproj_err = 0; + int num_inliers = 0; + + for (size_t i = 0; i < acd.corner_pos.size(); i++) { + this->linearize_point(acd.corner_pos[i], acd.corner_id[i], + T_c_w_m, cam, nullptr, err, num_inliers, + reproj_err); + } + + error += err; + reprojection_error += reproj_err; + num_points += num_inliers; + }, + this->common_data.calibration->intrinsics[acd.cam_id].variant); + } + } + + void operator()(const tbb::blocked_range& r) { + for (const MocapPoseData& pm : r) { + int64_t time_ns = + pm.timestamp_ns + + this->common_data.mocap_calibration->mocap_time_offset_ns; + + if (time_ns < spline->minTimeNs() || time_ns >= spline->maxTimeNs()) + continue; + + BASALT_ASSERT_STREAM( + time_ns >= spline->minTimeNs(), + "time " << time_ns << " spline.minTimeNs() " << spline->minTimeNs()); + + const SE3 T_moc_w = this->common_data.mocap_calibration->T_moc_w; + const SE3 T_i_mark = this->common_data.mocap_calibration->T_i_mark; + + const SE3 T_w_i = spline->pose(time_ns); + const SE3 T_moc_mark = T_moc_w * T_w_i * T_i_mark; + + const SE3 T_mark_moc_meas = pm.data.inverse(); + + Vector6 residual = Sophus::se3_logd(T_mark_moc_meas * T_moc_mark); + + const Scalar& mocap_var_inv = this->common_data.mocap_var_inv; + + error += mocap_var_inv * residual.squaredNorm(); + } + } + + void join(ComputeErrorSplineOpt& rhs) { + error += rhs.error; + reprojection_error += rhs.reprojection_error; + num_points += rhs.num_points; + } + + EIGEN_MAKE_ALIGNED_OPERATOR_NEW +}; + +} // namespace basalt + +#endif diff --git a/include/basalt/optimization/spline_optimize.h b/include/basalt/optimization/spline_optimize.h new file mode 100644 index 0000000..bd0de2c --- /dev/null +++ b/include/basalt/optimization/spline_optimize.h @@ -0,0 +1,612 @@ +/** +BSD 3-Clause License + +This file is part of the Basalt project. +https://gitlab.com/VladyslavUsenko/basalt.git + +Copyright (c) 2019, Vladyslav Usenko and Nikolaus Demmel. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ +#pragma once + +#include +#include + +#include + +#include + +#include + +#include + +#include + +#include + +namespace basalt { + +template +class SplineOptimization { + public: + typedef LinearizeSplineOpt> + LinearizeT; + + typedef typename LinearizeT::SE3 SE3; + typedef typename LinearizeT::Vector2 Vector2; + typedef typename LinearizeT::Vector3 Vector3; + typedef typename LinearizeT::Vector4 Vector4; + typedef typename LinearizeT::VectorX VectorX; + typedef typename LinearizeT::Matrix24 Matrix24; + + static const int POSE_SIZE = LinearizeT::POSE_SIZE; + static const int POS_SIZE = LinearizeT::POS_SIZE; + static const int POS_OFFSET = LinearizeT::POS_OFFSET; + static const int ROT_SIZE = LinearizeT::ROT_SIZE; + static const int ROT_OFFSET = LinearizeT::ROT_OFFSET; + static const int ACCEL_BIAS_SIZE = LinearizeT::ACCEL_BIAS_SIZE; + static const int GYRO_BIAS_SIZE = LinearizeT::GYRO_BIAS_SIZE; + static const int G_SIZE = LinearizeT::G_SIZE; + + static const int ACCEL_BIAS_OFFSET = LinearizeT::ACCEL_BIAS_OFFSET; + static const int GYRO_BIAS_OFFSET = LinearizeT::GYRO_BIAS_OFFSET; + static const int G_OFFSET = LinearizeT::G_OFFSET; + + const Scalar pose_var; + + Scalar pose_var_inv; + + typedef Eigen::Matrix Matrix3; + + typedef Eigen::Matrix Vector6; + + typedef Eigen::Matrix MatrixX; + + typedef Se3Spline SplineT; + + SplineOptimization(int64_t dt_ns = 1e7, double init_lambda = 1e-12) + : pose_var(1e-4), + mocap_initialized(false), + lambda(init_lambda), + min_lambda(1e-18), + max_lambda(100), + lambda_vee(2), + spline(dt_ns), + dt_ns(dt_ns) { + pose_var_inv = 1.0 / pose_var; + + // reasonable default values + // calib.accelerometer_noise_var = 2e-2; + // calib.gyroscope_noise_var = 1e-4; + + g.setZero(); + + min_time_us = std::numeric_limits::max(); + max_time_us = std::numeric_limits::min(); + } + + int64_t getCamTimeOffsetNs() const { return calib->cam_time_offset_ns; } + int64_t getMocapTimeOffsetNs() const { + return mocap_calib->mocap_time_offset_ns; + } + + const SE3& getCamT_i_c(size_t i) const { return calib->T_i_c[i]; } + SE3& getCamT_i_c(size_t i) { return calib->T_i_c[i]; } + + VectorX getIntrinsics(size_t i) const { + return calib->intrinsics[i].getParam(); + } + + const CalibAccelBias& getAccelBias() const { + return calib->calib_accel_bias; + } + const CalibGyroBias& getGyroBias() const { + return calib->calib_gyro_bias; + } + + void resetCalib(size_t num_cams, const std::vector& cam_types) { + BASALT_ASSERT(cam_types.size() == num_cams); + + calib.reset(new Calibration); + + calib->intrinsics.resize(num_cams); + calib->T_i_c.resize(num_cams); + + mocap_calib.reset(new MocapCalibration); + } + + void resetMocapCalib() { mocap_calib.reset(new MocapCalibration); } + + void loadCalib(const std::string& p) { + std::string path = p + "calibration.json"; + + std::ifstream is(path); + + if (is.good()) { + cereal::JSONInputArchive archive(is); + calib.reset(new Calibration); + archive(*calib); + std::cout << "Loaded calibration from: " << path << std::endl; + } else { + std::cerr << "No calibration found. Run camera calibration first!!!" + << std::endl; + } + } + + void saveCalib(const std::string& path) const { + if (calib) { + std::ofstream os(path + "calibration.json"); + cereal::JSONOutputArchive archive(os); + + archive(*calib); + } + } + + void saveMocapCalib(const std::string& path, + int64_t mocap_to_imu_offset_ns = 0) const { + if (calib) { + std::ofstream os(path + "mocap_calibration.json"); + cereal::JSONOutputArchive archive(os); + + mocap_calib->mocap_to_imu_offset_ns = mocap_to_imu_offset_ns; + + archive(*mocap_calib); + } + } + + bool calibInitialized() const { return calib != nullptr; } + + bool initialized() const { return spline.numKnots() > 0; } + + void initSpline(const SE3& pose, int num_knots) { + spline.setKnots(pose, num_knots); + } + + void initSpline(const SplineT& other) { + spline.setKnots(other); + + // std::cerr << "spline.numKnots() " << spline.numKnots() << std::endl; + // std::cerr << "other.numKnots() " << other.numKnots() << std::endl; + + size_t num_knots = spline.numKnots(); + + for (size_t i = 0; i < num_knots; i++) { + Eigen::Vector3d rot_rand_inc = Eigen::Vector3d::Random() / 20; + Eigen::Vector3d trans_rand_inc = Eigen::Vector3d::Random(); + + // std::cerr << "rot_rand_inc " << rot_rand_inc.transpose() << std::endl; + // std::cerr << "trans_rand_inc " << trans_rand_inc.transpose() << + // std::endl; + + spline.getKnotSO3(i) *= Sophus::SO3d::exp(rot_rand_inc); + spline.getKnotPos(i) += trans_rand_inc; + } + } + + const SplineT& getSpline() const { return spline; } + + Vector3 getG() const { return g; } + + void setG(const Vector3& g_a) { g = g_a; } + + // const Calibration& getCalib() const { return calib; } + // void setCalib(const Calibration& c) { calib = c; } + + SE3 getT_w_moc() const { return mocap_calib->T_moc_w.inverse(); } + void setT_w_moc(const SE3& val) { mocap_calib->T_moc_w = val.inverse(); } + + SE3 getT_mark_i() const { return mocap_calib->T_i_mark.inverse(); } + void setT_mark_i(const SE3& val) { mocap_calib->T_i_mark = val.inverse(); } + + Eigen::Vector3d getTransAccelWorld(int64_t t_ns) const { + return spline.transAccelWorld(t_ns); + } + + Eigen::Vector3d getRotVelBody(int64_t t_ns) const { + return spline.rotVelBody(t_ns); + } + + SE3 getT_w_i(int64_t t_ns) { return spline.pose(t_ns); } + + void setAprilgridCorners3d(const Eigen::aligned_vector& v) { + aprilgrid_corner_pos_3d = v; + } + + void addPoseMeasurement(int64_t t_ns, const SE3& pose) { + min_time_us = std::min(min_time_us, t_ns); + max_time_us = std::max(max_time_us, t_ns); + + pose_measurements.emplace_back(); + pose_measurements.back().timestamp_ns = t_ns; + pose_measurements.back().data = pose; + } + + void addMocapMeasurement(int64_t t_ns, const SE3& pose) { + mocap_measurements.emplace_back(); + mocap_measurements.back().timestamp_ns = t_ns; + mocap_measurements.back().data = pose; + } + + void addAccelMeasurement(int64_t t_ns, const Vector3& meas) { + min_time_us = std::min(min_time_us, t_ns); + max_time_us = std::max(max_time_us, t_ns); + + accel_measurements.emplace_back(); + accel_measurements.back().timestamp_ns = t_ns; + accel_measurements.back().data = meas; + } + + void addGyroMeasurement(int64_t t_ns, const Vector3& meas) { + min_time_us = std::min(min_time_us, t_ns); + max_time_us = std::max(max_time_us, t_ns); + + gyro_measurements.emplace_back(); + gyro_measurements.back().timestamp_ns = t_ns; + gyro_measurements.back().data = meas; + } + + void addAprilgridMeasurement( + int64_t t_ns, int cam_id, + const Eigen::aligned_vector& corners_pos, + const std::vector& corner_id) { + min_time_us = std::min(min_time_us, t_ns); + max_time_us = std::max(max_time_us, t_ns); + + aprilgrid_corners_measurements.emplace_back(); + aprilgrid_corners_measurements.back().timestamp_ns = t_ns; + aprilgrid_corners_measurements.back().cam_id = cam_id; + aprilgrid_corners_measurements.back().corner_pos = corners_pos; + aprilgrid_corners_measurements.back().corner_id = corner_id; + } + + Scalar getMinTime() const { return min_time_us * 1e-9; } + Scalar getMaxTime() const { return max_time_us * 1e-9; } + + int64_t getMinTimeNs() const { return min_time_us; } + int64_t getMaxTimeNs() const { return max_time_us; } + + void init() { + int64_t time_interval_us = max_time_us - min_time_us; + + if (spline.numKnots() == 0) { + spline.setStartTimeNs(min_time_us); + spline.setKnots(pose_measurements.front().data, + time_interval_us / dt_ns + N + 1); + } + + recompute_size(); + + // std::cout << "spline.minTimeNs() " << spline.minTimeNs() << std::endl; + // std::cout << "spline.maxTimeNs() " << spline.maxTimeNs() << std::endl; + + while (!mocap_measurements.empty() && + mocap_measurements.front().timestamp_ns <= + spline.minTimeNs() + spline.getDtNs()) + mocap_measurements.pop_front(); + + while (!mocap_measurements.empty() && + mocap_measurements.back().timestamp_ns >= + spline.maxTimeNs() - spline.getDtNs()) + mocap_measurements.pop_back(); + + ccd.calibration = calib.get(); + ccd.mocap_calibration = mocap_calib.get(); + ccd.aprilgrid_corner_pos_3d = &aprilgrid_corner_pos_3d; + ccd.g = &g; + ccd.offset_intrinsics = &offset_cam_intrinsics; + ccd.offset_T_i_c = &offset_T_i_c; + ccd.bias_block_offset = bias_block_offset; + ccd.mocap_block_offset = mocap_block_offset; + + ccd.opt_g = true; + + ccd.pose_var_inv = pose_var_inv; + ccd.gyro_var_inv = + calib->dicrete_time_gyro_noise_std().array().square().inverse(); + ccd.accel_var_inv = + calib->dicrete_time_accel_noise_std().array().square().inverse(); + ccd.mocap_var_inv = pose_var_inv; + } + + void recompute_size() { + offset_cam_intrinsics.clear(); + + size_t num_knots = spline.numKnots(); + + bias_block_offset = POSE_SIZE * num_knots; + + size_t T_i_c_block_offset = + bias_block_offset + ACCEL_BIAS_SIZE + GYRO_BIAS_SIZE + G_SIZE; + + offset_T_i_c.emplace_back(T_i_c_block_offset); + for (size_t i = 0; i < calib->T_i_c.size(); i++) + offset_T_i_c.emplace_back(offset_T_i_c.back() + POSE_SIZE); + + offset_cam_intrinsics.emplace_back(offset_T_i_c.back()); + for (size_t i = 0; i < calib->intrinsics.size(); i++) + offset_cam_intrinsics.emplace_back(offset_cam_intrinsics.back() + + calib->intrinsics[i].getN()); + + mocap_block_offset = offset_cam_intrinsics.back(); + + opt_size = mocap_block_offset + 2 * POSE_SIZE + 2; + + // std::cerr << "bias_block_offset " << bias_block_offset << std::endl; + // std::cerr << "mocap_block_offset " << mocap_block_offset << std::endl; + // std::cerr << "opt_size " << opt_size << std::endl; + // std::cerr << "offset_T_i_c.back() " << offset_T_i_c.back() << + // std::endl; std::cerr << "offset_cam_intrinsics.back() " << + // offset_cam_intrinsics.back() + // << std::endl; + } + + // Returns true when converged + bool optimize(bool use_intr, bool use_poses, bool use_april_corners, + bool opt_cam_time_offset, bool opt_imu_scale, bool use_mocap, + double huber_thresh, double stop_thresh, double& error, + int& num_points, double& reprojection_error, + bool print_info = true) { + // std::cerr << "optimize num_knots " << num_knots << std::endl; + + ccd.opt_intrinsics = use_intr; + ccd.opt_cam_time_offset = opt_cam_time_offset; + ccd.opt_imu_scale = opt_imu_scale; + ccd.huber_thresh = huber_thresh; + + LinearizeT lopt(opt_size, &spline, ccd); + + // auto t1 = std::chrono::high_resolution_clock::now(); + + tbb::blocked_range pose_range(pose_measurements.begin(), + pose_measurements.end()); + tbb::blocked_range april_range( + aprilgrid_corners_measurements.begin(), + aprilgrid_corners_measurements.end()); + + tbb::blocked_range mocap_pose_range( + mocap_measurements.begin(), mocap_measurements.end()); + + tbb::blocked_range accel_range(accel_measurements.begin(), + accel_measurements.end()); + + tbb::blocked_range gyro_range(gyro_measurements.begin(), + gyro_measurements.end()); + + if (use_poses) { + tbb::parallel_reduce(pose_range, lopt); + // lopt(pose_range); + } + + if (use_april_corners) { + tbb::parallel_reduce(april_range, lopt); + // lopt(april_range); + } + + if (use_mocap && mocap_initialized) { + tbb::parallel_reduce(mocap_pose_range, lopt); + // lopt(mocap_pose_range); + } else if (use_mocap && !mocap_initialized) { + std::cout << "Mocap residuals are not used. Initialize Mocap first!" + << std::endl; + } + + tbb::parallel_reduce(accel_range, lopt); + tbb::parallel_reduce(gyro_range, lopt); + + error = lopt.error; + num_points = lopt.num_points; + reprojection_error = lopt.reprojection_error; + + if (print_info) + std::cout << "[LINEARIZE] Error: " << lopt.error << " num points " + << lopt.num_points << std::endl; + + lopt.accum.setup_solver(); + Eigen::VectorXd Hdiag = lopt.accum.Hdiagonal(); + + bool converged = false; + bool step = false; + int max_iter = 10; + + while (!step && max_iter > 0 && !converged) { + Eigen::VectorXd Hdiag_lambda = Hdiag * lambda; + for (int i = 0; i < Hdiag_lambda.size(); i++) + Hdiag_lambda[i] = std::max(Hdiag_lambda[i], min_lambda); + + VectorX inc_full = -lopt.accum.solve(&Hdiag_lambda); + double max_inc = inc_full.array().abs().maxCoeff(); + + if (max_inc < stop_thresh) converged = true; + + Calibration calib_backup = *calib; + MocapCalibration mocap_calib_backup = *mocap_calib; + SplineT spline_backup = spline; + Vector3 g_backup = g; + + applyInc(inc_full, offset_cam_intrinsics); + + ComputeErrorSplineOpt eopt(opt_size, &spline, ccd); + if (use_poses) { + tbb::parallel_reduce(pose_range, eopt); + } + + if (use_april_corners) { + tbb::parallel_reduce(april_range, eopt); + } + + if (use_mocap && mocap_initialized) { + tbb::parallel_reduce(mocap_pose_range, eopt); + } else if (use_mocap && !mocap_initialized) { + std::cout << "Mocap residuals are not used. Initialize Mocap first!" + << std::endl; + } + + tbb::parallel_reduce(accel_range, eopt); + tbb::parallel_reduce(gyro_range, eopt); + + double f_diff = (lopt.error - eopt.error); + double l_diff = 0.5 * inc_full.dot(inc_full * lambda - lopt.accum.getB()); + + // std::cout << "f_diff " << f_diff << " l_diff " << l_diff << std::endl; + + double step_quality = f_diff / l_diff; + + if (step_quality < 0) { + if (print_info) + std::cout << "\t[REJECTED] lambda:" << lambda + << " step_quality: " << step_quality + << " max_inc: " << max_inc << " Error: " << eopt.error + << " num points " << eopt.num_points << std::endl; + lambda = std::min(max_lambda, lambda_vee * lambda); + lambda_vee *= 2; + + spline = spline_backup; + *calib = calib_backup; + *mocap_calib = mocap_calib_backup; + g = g_backup; + + } else { + if (print_info) + std::cout << "\t[ACCEPTED] lambda:" << lambda + << " step_quality: " << step_quality + << " max_inc: " << max_inc << " Error: " << eopt.error + << " num points " << eopt.num_points << std::endl; + + lambda = std::max( + min_lambda, + lambda * + std::max(1.0 / 3, 1 - std::pow(2 * step_quality - 1, 3.0))); + lambda_vee = 2; + + error = eopt.error; + num_points = eopt.num_points; + reprojection_error = eopt.reprojection_error; + + step = true; + } + max_iter--; + } + + if (converged && print_info) { + std::cout << "[CONVERGED]" << std::endl; + } + + return converged; + } + + typename Calibration::Ptr calib; + typename MocapCalibration::Ptr mocap_calib; + bool mocap_initialized; + + EIGEN_MAKE_ALIGNED_OPERATOR_NEW + private: + typedef typename Eigen::aligned_deque::const_iterator PoseDataIter; + typedef typename Eigen::aligned_deque::const_iterator GyroDataIter; + typedef + typename Eigen::aligned_deque::const_iterator AccelDataIter; + typedef typename Eigen::aligned_deque::const_iterator + AprilgridCornersDataIter; + typedef typename Eigen::aligned_deque::const_iterator + MocapPoseDataIter; + + void applyInc(VectorX& inc_full, + const std::vector& offset_cam_intrinsics) { + size_t num_knots = spline.numKnots(); + + for (size_t i = 0; i < num_knots; i++) { + Vector6 inc = inc_full.template segment(POSE_SIZE * i); + + // std::cerr << "i: " << i << " inc: " << inc.transpose() << std::endl; + + spline.applyInc(i, inc); + } + + size_t bias_block_offset = POSE_SIZE * num_knots; + calib->calib_accel_bias += inc_full.template segment( + bias_block_offset + ACCEL_BIAS_OFFSET); + + calib->calib_gyro_bias += inc_full.template segment( + bias_block_offset + GYRO_BIAS_OFFSET); + g += inc_full.template segment(bias_block_offset + G_OFFSET); + + size_t T_i_c_block_offset = + bias_block_offset + ACCEL_BIAS_SIZE + GYRO_BIAS_SIZE + G_SIZE; + for (size_t i = 0; i < calib->T_i_c.size(); i++) { + calib->T_i_c[i] *= Sophus::se3_expd(inc_full.template segment( + T_i_c_block_offset + i * POSE_SIZE)); + } + + for (size_t i = 0; i < calib->intrinsics.size(); i++) { + calib->intrinsics[i].applyInc(inc_full.segment( + offset_cam_intrinsics[i], calib->intrinsics[i].getN())); + } + + size_t mocap_block_offset = offset_cam_intrinsics.back(); + + mocap_calib->T_moc_w *= Sophus::se3_expd( + inc_full.template segment(mocap_block_offset)); + mocap_calib->T_i_mark *= Sophus::se3_expd( + inc_full.template segment(mocap_block_offset + POSE_SIZE)); + + mocap_calib->mocap_time_offset_ns += + 1e9 * inc_full[mocap_block_offset + 2 * POSE_SIZE]; + + calib->cam_time_offset_ns += + 1e9 * inc_full[mocap_block_offset + 2 * POSE_SIZE + 1]; + + // std::cout << "bias_block_offset " << bias_block_offset << std::endl; + // std::cout << "mocap_block_offset " << mocap_block_offset << std::endl; + } + + Scalar lambda, min_lambda, max_lambda, lambda_vee; + + int64_t min_time_us, max_time_us; + + Eigen::aligned_deque pose_measurements; + Eigen::aligned_deque gyro_measurements; + Eigen::aligned_deque accel_measurements; + Eigen::aligned_deque aprilgrid_corners_measurements; + Eigen::aligned_deque mocap_measurements; + + typename LinearizeT::CalibCommonData ccd; + + std::vector offset_cam_intrinsics; + std::vector offset_T_i_c; + size_t mocap_block_offset; + size_t bias_block_offset; + size_t opt_size; + + SplineT spline; + Vector3 g; + + Eigen::aligned_vector aprilgrid_corner_pos_3d; + + int64_t dt_ns; +}; // namespace basalt + +} // namespace basalt diff --git a/include/basalt/utils/ba_utils.h b/include/basalt/utils/ba_utils.h new file mode 100644 index 0000000..2298d9b --- /dev/null +++ b/include/basalt/utils/ba_utils.h @@ -0,0 +1,146 @@ +/** +BSD 3-Clause License + +This file is part of the Basalt project. +https://gitlab.com/VladyslavUsenko/basalt.git + +Copyright (c) 2019, Vladyslav Usenko and Nikolaus Demmel. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ +#pragma once + +#include + +namespace basalt { + +template +Sophus::SE3 computeRelPose( + const Sophus::SE3& T_w_i_h, const Sophus::SE3& T_i_c_h, + const Sophus::SE3& T_w_i_t, const Sophus::SE3& T_i_c_t, + Sophus::Matrix6* d_rel_d_h = nullptr, + Sophus::Matrix6* d_rel_d_t = nullptr) { + Sophus::SE3 tmp2 = (T_i_c_t).inverse(); + + Sophus::SE3 T_t_i_h_i; + T_t_i_h_i.so3() = T_w_i_t.so3().inverse() * T_w_i_h.so3(); + T_t_i_h_i.translation() = + T_w_i_t.so3().inverse() * (T_w_i_h.translation() - T_w_i_t.translation()); + + Sophus::SE3 tmp = tmp2 * T_t_i_h_i; + Sophus::SE3 res = tmp * T_i_c_h; + + if (d_rel_d_h) { + Sophus::Matrix3 R = T_w_i_h.so3().inverse().matrix(); + + Sophus::Matrix6 RR; + RR.setZero(); + RR.template topLeftCorner<3, 3>() = R; + RR.template bottomRightCorner<3, 3>() = R; + + *d_rel_d_h = tmp.Adj() * RR; + } + + if (d_rel_d_t) { + Sophus::Matrix3 R = T_w_i_t.so3().inverse().matrix(); + + Sophus::Matrix6 RR; + RR.setZero(); + RR.template topLeftCorner<3, 3>() = R; + RR.template bottomRightCorner<3, 3>() = R; + + *d_rel_d_t = -tmp2.Adj() * RR; + } + + return res; +} + +template +inline bool linearizePoint( + const Eigen::Matrix& kpt_obs, const Keypoint& kpt_pos, + const Eigen::Matrix& T_t_h, const CamT& cam, + Eigen::Matrix& res, + Eigen::Matrix* d_res_d_xi = nullptr, + Eigen::Matrix* d_res_d_p = nullptr, + Eigen::Matrix* proj = nullptr) { + static_assert(std::is_same_v); + + // Todo implement without jacobians + Eigen::Matrix Jup; + Eigen::Matrix p_h_3d; + p_h_3d = StereographicParam::unproject(kpt_pos.direction, &Jup); + p_h_3d[3] = kpt_pos.inv_dist; + + const Eigen::Matrix p_t_3d = T_t_h * p_h_3d; + + Eigen::Matrix Jp; + bool valid = cam.project(p_t_3d, res, &Jp); + valid &= res.array().isFinite().all(); + + if (!valid) { + // std::cerr << " Invalid projection! kpt_pos.dir " + // << kpt_pos.dir.transpose() << " kpt_pos.id " << + // kpt_pos.id + // << " idx " << kpt_obs.kpt_id << std::endl; + + // std::cerr << "T_t_h\n" << T_t_h << std::endl; + // std::cerr << "p_h_3d\n" << p_h_3d.transpose() << std::endl; + // std::cerr << "p_t_3d\n" << p_t_3d.transpose() << std::endl; + + return false; + } + + if (proj) { + proj->template head<2>() = res; + (*proj)[2] = p_t_3d[3] / p_t_3d.template head<3>().norm(); + } + res -= kpt_obs; + + if (d_res_d_xi) { + Eigen::Matrix d_point_d_xi; + d_point_d_xi.template topLeftCorner<3, 3>() = + Eigen::Matrix::Identity() * kpt_pos.inv_dist; + d_point_d_xi.template topRightCorner<3, 3>() = + -Sophus::SO3::hat(p_t_3d.template head<3>()); + d_point_d_xi.row(3).setZero(); + + *d_res_d_xi = Jp * d_point_d_xi; + } + + if (d_res_d_p) { + Eigen::Matrix Jpp; + Jpp.setZero(); + Jpp.template block<3, 2>(0, 0) = T_t_h.template topLeftCorner<3, 4>() * Jup; + Jpp.col(2) = T_t_h.col(3); + + *d_res_d_p = Jp * Jpp; + } + + return true; +} + +} // namespace basalt diff --git a/include/basalt/utils/cast_utils.hpp b/include/basalt/utils/cast_utils.hpp new file mode 100644 index 0000000..91af421 --- /dev/null +++ b/include/basalt/utils/cast_utils.hpp @@ -0,0 +1,64 @@ +/** +BSD 3-Clause License + +This file is part of the Basalt project. +https://gitlab.com/VladyslavUsenko/basalt.git + +Copyright (c) 2021, Vladyslav Usenko and Nikolaus Demmel. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#pragma once + +#include + +namespace basalt { + +template +typename std::make_signed_t signed_cast(I v) { + static_assert(std::is_unsigned_v, "no unsigned"); + return static_cast>(v); +} + +template +typename std::make_unsigned_t unsigned_cast(I v) { + static_assert(std::is_signed_v, "no signed"); + return static_cast>(v); +} + +// copy-assign map while casting Scalar type of values +template +void assign_cast_map_values(M1& a, const M2& b) { + using Scalar = typename M1::mapped_type::Scalar; + a.clear(); + for (const auto& [k, v] : b) { + a.try_emplace(k, v.template cast()); + } +} + +} // namespace basalt diff --git a/include/basalt/utils/common_types.h b/include/basalt/utils/common_types.h new file mode 100644 index 0000000..48678c0 --- /dev/null +++ b/include/basalt/utils/common_types.h @@ -0,0 +1,319 @@ +/** +BSD 3-Clause License + +Copyright (c) 2018, Vladyslav Usenko and Nikolaus Demmel. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#pragma once + +#include +#include +#include +#include +#include + +#include + +#include +#include +#include + +#include +#include + +namespace basalt { + +/// ids for 2D features detected in images +using FeatureId = int; + +/// identifies a frame of multiple images (stereo pair) +using FrameId = int64_t; + +/// identifies the camera (left or right) +using CamId = std::size_t; + +/// pair of image timestamp and camera id identifies an image (imageId) +struct TimeCamId { + TimeCamId() : frame_id(0), cam_id(0) {} + + TimeCamId(const FrameId& frame_id, const CamId& cam_id) + : frame_id(frame_id), cam_id(cam_id) {} + + FrameId frame_id; + CamId cam_id; +}; + +inline std::ostream& operator<<(std::ostream& os, const TimeCamId& tcid) { + os << tcid.frame_id << "_" << tcid.cam_id; + return os; +} + +inline bool operator<(const TimeCamId& o1, const TimeCamId& o2) { + if (o1.frame_id == o2.frame_id) return o1.cam_id < o2.cam_id; + return o1.frame_id < o2.frame_id; +} + +inline bool operator==(const TimeCamId& o1, const TimeCamId& o2) { + return o1.frame_id == o2.frame_id && o1.cam_id == o2.cam_id; +} + +inline bool operator!=(const TimeCamId& o1, const TimeCamId& o2) { + return o1.frame_id != o2.frame_id || o1.cam_id != o2.cam_id; +} + +constexpr static const size_t FEATURE_HASH_MAX_SIZE = 32; +using FeatureHash = std::bitset; +using HashBowVector = std::vector>; + +/// keypoint positions and descriptors for an image +struct KeypointsData { + /// collection of 2d corner points (indexed by FeatureId) + std::vector> + corners; + /// collection of feature orientation (in radian) with same index as `corners` + /// (indexed by FeatureId) + std::vector corner_angles; + /// collection of feature descriptors with same index as `corners` (indexed by + /// FeatureId) + std::vector> corner_descriptors; + + Eigen::aligned_vector corners_3d; + + std::vector hashes; + HashBowVector bow_vector; +}; + +/// feature corners is a collection of { imageId => KeypointsData } +using Corners = tbb::concurrent_unordered_map>; + +/// feature matches for an image pair +struct MatchData { + /// estimated transformation (based on inliers or calibration) from the second + /// image's coordinate system to the first image's corrdinate system + Sophus::SE3d T_i_j; + /// collection of {featureId_i, featureId_j} pairs of all matches + std::vector> matches; + /// collection of {featureId_i, featureId_j} pairs of inlier matches + std::vector> inliers; + + EIGEN_MAKE_ALIGNED_OPERATOR_NEW +}; + +/// feature matches is a collection of { (imageId, imageId) => MatchData } +using Matches = tbb::concurrent_unordered_map< + std::pair, MatchData, + std::hash>, + std::equal_to>, + Eigen::aligned_allocator< + std::pair, MatchData>>>; + +/// pair of image and feature indices +using ImageFeaturePair = std::pair; + +/// Feature tracks are collections of {ImageId => FeatureId}. +/// I.e. a collection of all images that observed this feature and the +/// corresponding feature index in that image. +using FeatureTrack = std::map; + +/// Ids for feature tracks; also used for landmarks created from (some of) the +/// tracks; +using TrackId = int64_t; + +/// FeatureTracks is a collection {TrackId => FeatureTrack} +using FeatureTracks = std::unordered_map; + +/// cameras in the map +struct Camera { + // camera pose (transforms from camera to world) + Sophus::SE3d T_w_c; + + EIGEN_MAKE_ALIGNED_OPERATOR_NEW +}; + +/// landmarks in the map +struct Landmark { + /// 3d position in world coordinates + Eigen::Vector3d p; + + /// Inlier observations in the current map. + /// This is a subset of the original feature track. + FeatureTrack obs; + + /// Outlier observations in the current map. + /// This is a subset of the original feature track. + FeatureTrack outlier_obs; +}; + +/// collection {imageId => Camera} for all cameras in the map +using Cameras = + std::map, + Eigen::aligned_allocator>>; + +/// collection {trackId => Landmark} for all landmarks in the map. +/// trackIds correspond to feature_tracks +using Landmarks = std::unordered_map; + +/// camera candidate to be added to map +struct CameraCandidate { + TimeCamId tcid; + std::vector shared_tracks; + + // keep track of different stages of adding a set of candidate cameras and its + // landmarks to the map + bool tried = false; //!< tried to add to map + bool camera_added = false; //!< succeeded to add to map + bool landmarks_added = false; //!< added new landmarks to map +}; + +/// list of current candidates and some book keeping for the different stages +struct CameraCandidates { + enum Stage { + ComputeCandidates, + AddCameras, + AddLandmarks, + Optimize, + RemoveOutliers, + Done + }; + + std::vector cameras; + Stage current_stage = ComputeCandidates; + int min_localization_inliers = 0; + int max_cameras_to_add = 0; + + int num_cameras_added() { + int num_added = 0; + for (const auto& c : cameras) { + if (c.camera_added) { + ++num_added; + } + } + return num_added; + } + + int num_landmarks_added() { + int num_added = 0; + for (const auto& c : cameras) { + if (c.landmarks_added) { + ++num_added; + } + } + return num_added; + } +}; + +/// Flags for different landmark outlier criteria +enum OutlierFlags { + OutlierNone = 0, + // reprojection error much too large + OutlierReprojectionErrorHuge = 1 << 0, + // reprojection error too large + OutlierReprojectionErrorNormal = 1 << 1, + // distance to a camera too small + OutlierCameraDistance = 1 << 2, + // z-coord in some camera frame too small + OutlierZCoordinate = 1 << 3 +}; + +/// info on a single projected landmark +struct ProjectedLandmark { + Eigen::Vector2d point_measured; //!< detected feature location + Eigen::Vector2d point_reprojected; //!< landmark projected into image + Eigen::Vector3d point_3d_c; //!< 3d point in camera coordinates + TrackId track_id = -1; //!< corresponding track_id + double reprojection_error = 0; //!< current reprojection error + unsigned int outlier_flags = OutlierNone; //!< flags for outlier + EIGEN_MAKE_ALIGNED_OPERATOR_NEW +}; + +using ProjectedLandmarkPtr = std::shared_ptr; +using ProjectedLandmarkConstPtr = std::shared_ptr; + +/// all landmark projections for inlier and outlier observations for a single +/// image +struct ImageProjection { + std::vector obs; + std::vector outlier_obs; +}; + +/// projections for all images +using ImageProjections = std::map; + +/// inlier projections indexed per track +using TrackProjections = + std::unordered_map>; + +} // namespace basalt + +namespace cereal { + +template +void serialize(Archive& ar, basalt::TimeCamId& c) { + ar(c.frame_id, c.cam_id); +} + +template +void serialize(Archive& ar, basalt::KeypointsData& c) { + ar(c.corners, c.corner_angles, c.corner_descriptors); +} + +template +void serialize(Archive& ar, basalt::MatchData& c) { + ar(c.T_i_j, c.matches, c.inliers); +} + +} // namespace cereal + +namespace std { + +template <> +struct hash { + size_t operator()(const basalt::TimeCamId& x) const { + size_t seed = 0; + basalt::hash_combine(seed, x.frame_id); + basalt::hash_combine(seed, x.cam_id); + return seed; + } +}; + +template <> +struct hash> { + size_t operator()( + const std::pair& x) const { + size_t seed = 0; + basalt::hash_combine(seed, x.first.frame_id); + basalt::hash_combine(seed, x.first.cam_id); + basalt::hash_combine(seed, x.second.frame_id); + basalt::hash_combine(seed, x.second.cam_id); + return seed; + } +}; + +} // namespace std diff --git a/include/basalt/utils/filesystem.h b/include/basalt/utils/filesystem.h new file mode 100644 index 0000000..27b6d28 --- /dev/null +++ b/include/basalt/utils/filesystem.h @@ -0,0 +1,70 @@ +#pragma once + +// Check for feature test macro for +#if defined(__cpp_lib_filesystem) +#define INCLUDE_STD_FILESYSTEM_EXPERIMENTAL 0 + +// Check for feature test macro for +#elif defined(__cpp_lib_experimental_filesystem) +#define INCLUDE_STD_FILESYSTEM_EXPERIMENTAL 1 + +// We can't check if headers exist... +// Let's assume experimental to be safe +#elif !defined(__has_include) +#define INCLUDE_STD_FILESYSTEM_EXPERIMENTAL 1 + +// Check if the header "" exists +#elif __has_include() + +// If we're compiling on Visual Studio and are not compiling with C++17, we need +// to use experimental +#ifdef _MSC_VER + +// Check and include header that defines "_HAS_CXX17" +#if __has_include() +#include + +// Check for enabled C++17 support +#if defined(_HAS_CXX17) && _HAS_CXX17 +// We're using C++17, so let's use the normal version +#define INCLUDE_STD_FILESYSTEM_EXPERIMENTAL 0 +#endif +#endif + +// If the marco isn't defined yet, that means any of the other VS specific +// checks failed, so we need to use experimental +#ifndef INCLUDE_STD_FILESYSTEM_EXPERIMENTAL +#define INCLUDE_STD_FILESYSTEM_EXPERIMENTAL 1 +#endif + +// Not on Visual Studio. Let's use the normal version +#else // #ifdef _MSC_VER +#define INCLUDE_STD_FILESYSTEM_EXPERIMENTAL 0 +#endif + +// Check if the header "" exists +#elif __has_include() +#define INCLUDE_STD_FILESYSTEM_EXPERIMENTAL 1 + +// Fail if neither header is available with a nice error message +#else +#error Could not find system header "" or "" +#endif + +// We priously determined that we need the exprimental version +#if INCLUDE_STD_FILESYSTEM_EXPERIMENTAL +#include + +namespace basalt { +namespace fs = std::experimental::filesystem; +} + +// We have a decent compiler and can use the normal version +#else +#include + +namespace basalt { +namespace fs = std::filesystem; +} + +#endif diff --git a/include/basalt/utils/imu_types.h b/include/basalt/utils/imu_types.h new file mode 100644 index 0000000..832e878 --- /dev/null +++ b/include/basalt/utils/imu_types.h @@ -0,0 +1,385 @@ +/** +BSD 3-Clause License + +This file is part of the Basalt project. +https://gitlab.com/VladyslavUsenko/basalt.git + +Copyright (c) 2019, Vladyslav Usenko and Nikolaus Demmel. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#pragma once + +#include +#include +#include +#include + +#include +#include + +#include +#include +#include +#include + +#include + +namespace basalt { + +template +class IntegratedImuMeasurement; + +template +struct PoseStateWithLin; + +namespace constants { +static const Eigen::Vector3d g(0, 0, -9.81); +static const Eigen::Vector3d g_dir(0, 0, -1); +} // namespace constants + +template +struct PoseVelBiasStateWithLin { + using Scalar = Scalar_; + + using VecN = typename PoseVelBiasState::VecN; + using Vec3 = Eigen::Matrix; + using SE3 = Sophus::SE3; + + PoseVelBiasStateWithLin() { + linearized = false; + delta.setZero(); + }; + + PoseVelBiasStateWithLin(int64_t t_ns, const SE3& T_w_i, const Vec3& vel_w_i, + const Vec3& bias_gyro, const Vec3& bias_accel, + bool linearized) + : linearized(linearized), + state_linearized(t_ns, T_w_i, vel_w_i, bias_gyro, bias_accel) { + delta.setZero(); + state_current = state_linearized; + } + + explicit PoseVelBiasStateWithLin(const PoseVelBiasState& other) + : linearized(false), state_linearized(other) { + delta.setZero(); + state_current = other; + } + + template + PoseVelBiasStateWithLin cast() const { + PoseVelBiasStateWithLin a; + a.linearized = linearized; + a.delta = delta.template cast(); + a.state_linearized = state_linearized.template cast(); + a.state_current = state_current.template cast(); + return a; + } + + void setLinFalse() { + linearized = false; + delta.setZero(); + } + + void setLinTrue() { + linearized = true; + BASALT_ASSERT(delta.isApproxToConstant(0)); + state_current = state_linearized; + } + + void applyInc(const VecN& inc) { + if (!linearized) { + state_linearized.applyInc(inc); + } else { + delta += inc; + state_current = state_linearized; + state_current.applyInc(delta); + } + } + + inline const PoseVelBiasState& getState() const { + if (!linearized) { + return state_linearized; + } else { + return state_current; + } + } + + inline const PoseVelBiasState& getStateLin() const { + return state_linearized; + } + + inline bool isLinearized() const { return linearized; } + inline const VecN& getDelta() const { return delta; } + inline int64_t getT_ns() const { return state_linearized.t_ns; } + + inline void backup() { + backup_delta = delta; + backup_state_linearized = state_linearized; + backup_state_current = state_current; + } + + inline void restore() { + delta = backup_delta; + state_linearized = backup_state_linearized; + state_current = backup_state_current; + } + + EIGEN_MAKE_ALIGNED_OPERATOR_NEW + private: + bool linearized; + VecN delta; + PoseVelBiasState state_linearized, state_current; + + VecN backup_delta; + PoseVelBiasState backup_state_linearized, backup_state_current; + + // give access to private members for constructor of PoseStateWithLin + friend struct PoseStateWithLin; + + // give access to private members for cast() implementation + template + friend struct PoseVelBiasStateWithLin; + + friend class cereal::access; + + template + void serialize(Archive& ar) { + ar(state_linearized.T_w_i); + ar(state_linearized.vel_w_i); + ar(state_linearized.bias_gyro); + ar(state_linearized.bias_accel); + ar(state_current.T_w_i); + ar(state_current.vel_w_i); + ar(state_current.bias_gyro); + ar(state_current.bias_accel); + ar(delta); + ar(linearized); + ar(state_linearized.t_ns); + } +}; + +template +struct PoseStateWithLin { + using Scalar = Scalar_; + using VecN = typename PoseState::VecN; + using Vec3 = Eigen::Matrix; + using SE3 = Sophus::SE3; + + PoseStateWithLin() { + linearized = false; + delta.setZero(); + }; + + PoseStateWithLin(int64_t t_ns, const SE3& T_w_i, bool linearized = false) + : linearized(linearized), pose_linearized(t_ns, T_w_i) { + delta.setZero(); + T_w_i_current = T_w_i; + } + + explicit PoseStateWithLin(const PoseVelBiasStateWithLin& other) + : linearized(other.linearized), + delta(other.delta.template head<6>()), + pose_linearized(other.state_linearized.t_ns, + other.state_linearized.T_w_i) { + T_w_i_current = pose_linearized.T_w_i; + PoseState::incPose(delta, T_w_i_current); + } + + template + PoseStateWithLin cast() const { + PoseStateWithLin a; + a.linearized = linearized; + a.delta = delta.template cast(); + a.pose_linearized = pose_linearized.template cast(); + a.T_w_i_current = T_w_i_current.template cast(); + return a; + } + + void setLinTrue() { + linearized = true; + BASALT_ASSERT(delta.isApproxToConstant(0)); + T_w_i_current = pose_linearized.T_w_i; + } + + inline const SE3& getPose() const { + if (!linearized) { + return pose_linearized.T_w_i; + } else { + return T_w_i_current; + } + } + + inline const SE3& getPoseLin() const { return pose_linearized.T_w_i; } + + inline void applyInc(const VecN& inc) { + if (!linearized) { + PoseState::incPose(inc, pose_linearized.T_w_i); + } else { + delta += inc; + T_w_i_current = pose_linearized.T_w_i; + PoseState::incPose(delta, T_w_i_current); + } + } + + inline bool isLinearized() const { return linearized; } + inline const VecN& getDelta() const { return delta; } + inline int64_t getT_ns() const { return pose_linearized.t_ns; } + + inline void backup() { + backup_delta = delta; + backup_pose_linearized = pose_linearized; + backup_T_w_i_current = T_w_i_current; + } + + inline void restore() { + delta = backup_delta; + pose_linearized = backup_pose_linearized; + T_w_i_current = backup_T_w_i_current; + } + + EIGEN_MAKE_ALIGNED_OPERATOR_NEW + private: + bool linearized; + VecN delta; + PoseState pose_linearized; + SE3 T_w_i_current; + + VecN backup_delta; + PoseState backup_pose_linearized; + SE3 backup_T_w_i_current; + + // give access to private members for cast() implementation + template + friend struct PoseStateWithLin; + + friend class cereal::access; + + template + void serialize(Archive& ar) { + ar(pose_linearized.T_w_i); + ar(T_w_i_current); + ar(delta); + ar(linearized); + ar(pose_linearized.t_ns); + } +}; + +struct AbsOrderMap { + std::map> abs_order_map; + size_t items = 0; + size_t total_size = 0; + + void print_order() { + for (const auto& kv : abs_order_map) { + std::cout << kv.first << " (" << kv.second.first << "," + << kv.second.second << ")" << std::endl; + } + std::cout << std::endl; + } +}; + +template +struct ImuLinData { + using Scalar = Scalar_; + + const Eigen::Matrix& g; + const Eigen::Matrix& gyro_bias_weight_sqrt; + const Eigen::Matrix& accel_bias_weight_sqrt; + + std::map*> imu_meas; +}; + +template +struct MargLinData { + using Scalar = Scalar_; + + bool is_sqrt = true; + + AbsOrderMap order; + Eigen::Matrix H; + Eigen::Matrix b; +}; + +struct MargData { + typedef std::shared_ptr Ptr; + + AbsOrderMap aom; + Eigen::MatrixXd abs_H; + Eigen::VectorXd abs_b; + Eigen::aligned_map> frame_states; + Eigen::aligned_map> frame_poses; + std::set kfs_all; + std::set kfs_to_marg; + bool use_imu; + + std::vector opt_flow_res; +}; + +struct RelPoseFactor { + int64_t t_i_ns, t_j_ns; + + Sophus::SE3d T_i_j; + Sophus::Matrix6d cov_inv; + + EIGEN_MAKE_ALIGNED_OPERATOR_NEW +}; + +struct RollPitchFactor { + int64_t t_ns; + + Sophus::SO3d R_w_i_meas; + + Eigen::Matrix2d cov_inv; + + EIGEN_MAKE_ALIGNED_OPERATOR_NEW +}; +} // namespace basalt + +namespace cereal { + +template +void serialize(Archive& ar, basalt::AbsOrderMap& a) { + ar(a.total_size); + ar(a.items); + ar(a.abs_order_map); +} + +template +void serialize(Archive& ar, basalt::MargData& m) { + ar(m.aom); + ar(m.abs_H); + ar(m.abs_b); + ar(m.frame_poses); + ar(m.frame_states); + ar(m.kfs_all); + ar(m.kfs_to_marg); + ar(m.use_imu); +} + +} // namespace cereal diff --git a/include/basalt/utils/keypoints.h b/include/basalt/utils/keypoints.h new file mode 100644 index 0000000..d9eed21 --- /dev/null +++ b/include/basalt/utils/keypoints.h @@ -0,0 +1,104 @@ +/** +BSD 3-Clause License + +This file is part of the Basalt project. +https://gitlab.com/VladyslavUsenko/basalt.git + +Copyright (c) 2019, Vladyslav Usenko and Nikolaus Demmel. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ +#pragma once + +#include +#include + +#include +#include + +#include +#include + +#include +#include + +namespace basalt { + +typedef std::bitset<256> Descriptor; + +void detectKeypointsMapping(const basalt::Image& img_raw, + KeypointsData& kd, int num_features); + +void detectKeypoints( + const basalt::Image& img_raw, KeypointsData& kd, + int PATCH_SIZE = 32, int num_points_cell = 1, + const Eigen::aligned_vector& current_points = + Eigen::aligned_vector()); + +void computeAngles(const basalt::Image& img_raw, + KeypointsData& kd, bool rotate_features); + +void computeDescriptors(const basalt::Image& img_raw, + KeypointsData& kd); + +void matchDescriptors(const std::vector>& corner_descriptors_1, + const std::vector>& corner_descriptors_2, + std::vector>& matches, int threshold, + double dist_2_best); + +inline void computeEssential(const Sophus::SE3d& T_0_1, Eigen::Matrix4d& E) { + E.setZero(); + const Eigen::Vector3d t_0_1 = T_0_1.translation(); + const Eigen::Matrix3d R_0_1 = T_0_1.rotationMatrix(); + + E.topLeftCorner<3, 3>() = Sophus::SO3d::hat(t_0_1.normalized()) * R_0_1; +} + +inline void findInliersEssential(const KeypointsData& kd1, + const KeypointsData& kd2, + const Eigen::Matrix4d& E, + double epipolar_error_threshold, + MatchData& md) { + md.inliers.clear(); + + for (size_t j = 0; j < md.matches.size(); j++) { + const Eigen::Vector4d p0_3d = kd1.corners_3d[md.matches[j].first]; + const Eigen::Vector4d p1_3d = kd2.corners_3d[md.matches[j].second]; + + const double epipolar_error = std::abs(p0_3d.transpose() * E * p1_3d); + + if (epipolar_error < epipolar_error_threshold) { + md.inliers.push_back(md.matches[j]); + } + } +} + +void findInliersRansac(const KeypointsData& kd1, const KeypointsData& kd2, + const double ransac_thresh, const int ransac_min_inliers, + MatchData& md); + +} // namespace basalt diff --git a/include/basalt/utils/nfr.h b/include/basalt/utils/nfr.h new file mode 100644 index 0000000..7ea967b --- /dev/null +++ b/include/basalt/utils/nfr.h @@ -0,0 +1,120 @@ +/** +BSD 3-Clause License + +This file is part of the Basalt project. +https://gitlab.com/VladyslavUsenko/basalt.git + +Copyright (c) 2019, Vladyslav Usenko and Nikolaus Demmel. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ +#pragma once + +#include + +#include + +namespace basalt { + +inline Sophus::Vector6d relPoseError( + const Sophus::SE3d& T_i_j, const Sophus::SE3d& T_w_i, + const Sophus::SE3d& T_w_j, Sophus::Matrix6d* d_res_d_T_w_i = nullptr, + Sophus::Matrix6d* d_res_d_T_w_j = nullptr) { + Sophus::SE3d T_j_i = T_w_j.inverse() * T_w_i; + Sophus::Vector6d res = Sophus::se3_logd(T_i_j * T_j_i); + + if (d_res_d_T_w_i || d_res_d_T_w_j) { + Sophus::Matrix6d J; + Sophus::rightJacobianInvSE3Decoupled(res, J); + + Eigen::Matrix3d R = T_w_i.so3().inverse().matrix(); + + Sophus::Matrix6d Adj; + Adj.setZero(); + Adj.topLeftCorner<3, 3>() = R; + Adj.bottomRightCorner<3, 3>() = R; + + if (d_res_d_T_w_i) { + *d_res_d_T_w_i = J * Adj; + } + + if (d_res_d_T_w_j) { + Adj.topRightCorner<3, 3>() = + Sophus::SO3d::hat(T_j_i.inverse().translation()) * R; + *d_res_d_T_w_j = -J * Adj; + } + } + + return res; +} + +inline Sophus::Vector3d absPositionError( + const Sophus::SE3d& T_w_i, const Eigen::Vector3d pos, + Eigen::Matrix* d_res_d_T_w_i = nullptr) { + if (d_res_d_T_w_i) { + d_res_d_T_w_i->topLeftCorner<3, 3>().setIdentity(); + d_res_d_T_w_i->topRightCorner<3, 3>().setZero(); + } + + return T_w_i.translation() - pos; +} + +inline double yawError(const Sophus::SE3d& T_w_i, + const Eigen::Vector3d yaw_dir_body, + Eigen::Matrix* d_res_d_T_w_i = nullptr) { + Eigen::Matrix3d curr_R_w_i = T_w_i.so3().matrix(); + Eigen::Vector3d tmp = curr_R_w_i * yaw_dir_body; + double res_yaw = tmp[1]; + + if (d_res_d_T_w_i) { + d_res_d_T_w_i->setZero(); + (*d_res_d_T_w_i)[3] = -tmp[2]; + (*d_res_d_T_w_i)[5] = tmp[0]; + } + + return res_yaw; +} + +inline Sophus::Vector2d rollPitchError( + const Sophus::SE3d& T_w_i, const Sophus::SO3d& R_w_i_meas, + Eigen::Matrix* d_res_d_T_w_i = nullptr) { + // Assumes g direction is negative Z in world coordinate frame + + Eigen::Matrix3d R = (R_w_i_meas * T_w_i.so3().inverse()).matrix(); + Eigen::Vector3d res = R * (-Eigen::Vector3d::UnitZ()); + + if (d_res_d_T_w_i) { + d_res_d_T_w_i->setZero(); + (*d_res_d_T_w_i)(0, 3) = -R(0, 1); + (*d_res_d_T_w_i)(1, 3) = -R(1, 1); + (*d_res_d_T_w_i)(0, 4) = R(0, 0); + (*d_res_d_T_w_i)(1, 4) = R(1, 0); + } + + return res.head<2>(); +} +} // namespace basalt diff --git a/include/basalt/utils/sim_utils.h b/include/basalt/utils/sim_utils.h new file mode 100644 index 0000000..1e8e98a --- /dev/null +++ b/include/basalt/utils/sim_utils.h @@ -0,0 +1,53 @@ +/** +BSD 3-Clause License + +This file is part of the Basalt project. +https://gitlab.com/VladyslavUsenko/basalt.git + +Copyright (c) 2019, Vladyslav Usenko and Nikolaus Demmel. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ +#pragma once + +#include + +namespace basalt { + +struct SimObservations { + Eigen::aligned_vector pos; + std::vector id; +}; + +} // namespace basalt + +namespace cereal { +template +void serialize(Archive& ar, basalt::SimObservations& c) { + ar(c.pos, c.id); +} +} // namespace cereal diff --git a/include/basalt/utils/system_utils.h b/include/basalt/utils/system_utils.h new file mode 100644 index 0000000..eb9bd5b --- /dev/null +++ b/include/basalt/utils/system_utils.h @@ -0,0 +1,60 @@ +/** +BSD 3-Clause License + +This file is part of the Basalt project. +https://gitlab.com/VladyslavUsenko/basalt.git + +Copyright (c) 2019, Vladyslav Usenko and Nikolaus Demmel. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ +#ifndef BASALT_SYSTEM_UTILS_H +#define BASALT_SYSTEM_UTILS_H + +#include +#include + +namespace basalt { + +inline std::string ensure_trailing_slash(const std::string& path) { + if (!path.empty() && path[path.size() - 1] != '/') { + return path + "/"; + } else { + return path; + } +} + +struct MemoryInfo { + uint64_t resident_memory = 0; //!< in bytes + uint64_t resident_memory_peak = 0; //!< in bytes +}; + +bool get_memory_info(MemoryInfo& info); + +} // namespace basalt + +#endif // include guard diff --git a/include/basalt/utils/test_utils.h b/include/basalt/utils/test_utils.h new file mode 100644 index 0000000..10a2d5f --- /dev/null +++ b/include/basalt/utils/test_utils.h @@ -0,0 +1,77 @@ +/** +BSD 3-Clause License + +This file is part of the Basalt project. +https://gitlab.com/VladyslavUsenko/basalt.git + +Copyright (c) 2019, Vladyslav Usenko and Nikolaus Demmel. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ +#ifndef BASALT_TEST_UTILS_H +#define BASALT_TEST_UTILS_H + +#include + +template +void test_jacobian_code(const std::string& name, + const Eigen::MatrixBase& Ja, F func, + const Eigen::MatrixBase& x0, + double eps = 1e-8, double max_norm = 1e-4) { + typedef typename Derived1::Scalar Scalar; + + Eigen::Matrix Jn = Ja; + Jn.setZero(); + + Eigen::Matrix inc = x0; + for (int i = 0; i < Jn.cols(); i++) { + inc.setZero(); + inc[i] += eps; + + Eigen::Matrix fpe = func(x0 + inc); + Eigen::Matrix fme = func(x0 - inc); + + Jn.col(i) = (fpe - fme) / (2 * eps); + } + + Scalar diff = (Ja - Jn).norm(); + + if (diff > max_norm || !Ja.allFinite()) { + std::cerr << name << std::endl; + std::cerr << "Numeric Jacobian is different from analytic. Norm difference " + << diff << std::endl; + std::cerr << "Ja\n" << Ja << std::endl; + std::cerr << "Jn\n" << Jn << std::endl; + } else { + // std::cout << name << std::endl; + // std::cout << "Success" << std::endl; + // std::cout << "Ja\n" << Ja << std::endl; + // std::cout << "Jn\n" << Jn << std::endl; + } +} + +#endif // BASALT_TEST_UTILS_H diff --git a/include/basalt/utils/time_utils.hpp b/include/basalt/utils/time_utils.hpp new file mode 100644 index 0000000..6e66e1d --- /dev/null +++ b/include/basalt/utils/time_utils.hpp @@ -0,0 +1,141 @@ +/** +BSD 3-Clause License + +This file is part of the Basalt project. +https://gitlab.com/VladyslavUsenko/basalt.git + +Copyright (c) 2020-2021, Vladyslav Usenko and Nikolaus Demmel. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#pragma once + +#include +#include +#include +#include +#include + +#include + +namespace basalt { + +template +class Timer { + public: + /// start timer + Timer() : start_(Clock::now()) {} + + /// return elapsed time in seconds + double elapsed() const { + return std::chrono::duration(Clock::now() - start_).count(); + } + + /// return elapsed time in seconds and reset timer + double reset() { + const auto now = Clock::now(); + const double elapsed = std::chrono::duration(now - start_).count(); + start_ = now; + return elapsed; + } + + private: + std::chrono::time_point start_; +}; + +template +struct assign_op { + void operator()(Scalar& lhs, const Scalar& rhs) { lhs = rhs; } +}; + +template +struct plus_assign_op { + void operator()(Scalar& lhs, const Scalar& rhs) { lhs += rhs; } +}; + +template , class Assign = assign_op<>> +class ScopedTimer { + public: + explicit ScopedTimer(double& dest) : dest_(dest) {} + ~ScopedTimer() { Assign()(dest_, timer_.elapsed()); } + + private: + double& dest_; + T timer_; +}; + +using ScopedTimerAdd = ScopedTimer, plus_assign_op<>>; + +template +void log_timing(double& time, F fun) { + Timer timer; + fun(); + time = timer.elapsed(); +} + +template +void log_timing_add(double& time, F fun) { + Timer timer; + fun(); + time += timer.elapsed(); +} + +class ExecutionStats { + public: + struct Meta { + inline Meta& format(const std::string& s) { + format_ = s; + return *this; + } + + // overwrite the meta data from another object + void set_meta(const Meta& other) { format_ = other.format_; } + + std::variant, std::vector> data_; + + std::string format_; + }; + + Meta& add(const std::string& name, double value); + Meta& add(const std::string& name, const Eigen::VectorXd& value); + Meta& add(const std::string& name, const Eigen::VectorXf& value); + + void merge_all(const ExecutionStats& other); + + void merge_sums(const ExecutionStats& other); + + void print() const; + + bool save_json(const std::string& path) const; + + private: + std::unordered_map stats_; + std::vector order_; +}; + +} // namespace basalt diff --git a/include/basalt/utils/tracks.h b/include/basalt/utils/tracks.h new file mode 100644 index 0000000..72b2106 --- /dev/null +++ b/include/basalt/utils/tracks.h @@ -0,0 +1,222 @@ +// Adapted from OpenMVG + +// Copyright (c) 2012, 2013 Pierre MOULON +// 2018 Nikolaus DEMMEL + +// This file was originally part of OpenMVG, an Open Multiple View Geometry C++ +// library. + +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at http://mozilla.org/MPL/2.0/. + +// Implementation of [1] an efficient algorithm to compute track from pairwise +// correspondences. +// +// [1] Pierre Moulon and Pascal Monasse, +// "Unordered feature tracking made fast and easy" CVMP 2012. +// +// It tracks the position of features along the series of image from pairwise +// correspondences. +// +// From map<[imageI,ImageJ], [indexed matches array] > it builds tracks. +// +// Usage : +// //--------------------------------------- +// // Compute tracks from matches +// //--------------------------------------- +// TrackBuilder trackBuilder; +// FeatureTracks tracks; +// trackBuilder.Build(matches); // Build: Efficient fusion of correspondences +// trackBuilder.Filter(); // Filter: Remove tracks that have conflict +// trackBuilder.Export(tracks); // Export tree to usable data structure + +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +namespace basalt { + +/// TrackBuild class creates feature tracks from matches +struct TrackBuilder { + std::map map_node_to_index; + UnionFind uf_tree; + + /// Build tracks for a given series of pairWise matches + void Build(const Matches& map_pair_wise_matches) { + // 1. We need to know how much single set we will have. + // i.e each set is made of a tuple : (imageIndex, featureIndex) + std::set allFeatures; + // For each couple of images list the used features + for (const auto& iter : map_pair_wise_matches) { + const auto I = iter.first.first; + const auto J = iter.first.second; + const MatchData& matchData = iter.second; + + // Retrieve all shared features and add them to a set + for (const auto& match : matchData.inliers) { + allFeatures.emplace(I, match.first); + allFeatures.emplace(J, match.second); + } + } + + // 2. Build the 'flat' representation where a tuple (the node) + // is attached to a unique index. + TrackId cpt = 0; + for (const auto& feat : allFeatures) { + map_node_to_index.emplace(feat, cpt); + ++cpt; + } + // Clean some memory + allFeatures.clear(); + + // 3. Add the node and the pairwise correpondences in the UF tree. + uf_tree.InitSets(map_node_to_index.size()); + + // 4. Union of the matched features corresponding UF tree sets + for (const auto& iter : map_pair_wise_matches) { + const auto I = iter.first.first; + const auto J = iter.first.second; + const MatchData& matchData = iter.second; + for (const auto& match : matchData.inliers) { + const ImageFeaturePair pairI(I, match.first); + const ImageFeaturePair pairJ(J, match.second); + // Link feature correspondences to the corresponding containing sets. + uf_tree.Union(map_node_to_index[pairI], map_node_to_index[pairJ]); + } + } + } + + /// Remove bad tracks (too short or track with ids collision) + bool Filter(size_t minimumTrackLength = 2) { + // Remove bad tracks: + // - track that are too short, + // - track with id conflicts: + // i.e. tracks that have many times the same image index + + // From the UF tree, create tracks of the image indexes. + // If an image index appears twice the track must disappear + // If a track is too short it has to be removed. + std::map> tracks; + + std::set problematic_track_id; + // Build tracks from the UF tree, track problematic ids. + for (const auto& iter : map_node_to_index) { + const TrackId track_id = uf_tree.Find(iter.second); + if (problematic_track_id.count(track_id) != 0) { + continue; // Track already marked + } + + const ImageFeaturePair& feat = iter.first; + + if (tracks[track_id].count(feat.first)) { + problematic_track_id.insert(track_id); + } else { + tracks[track_id].insert(feat.first); + } + } + + // - track that are too short, + for (const auto& val : tracks) { + if (val.second.size() < minimumTrackLength) { + problematic_track_id.insert(val.first); + } + } + + for (uint32_t& root_index : uf_tree.m_cc_parent) { + if (problematic_track_id.count(root_index) > 0) { + // reset selected root + uf_tree.m_cc_size[root_index] = 1; + root_index = UnionFind::InvalidIndex(); + } + } + return false; + } + + /// Return the number of connected set in the UnionFind structure (tree + /// forest) + size_t TrackCount() const { + std::set parent_id(uf_tree.m_cc_parent.begin(), + uf_tree.m_cc_parent.end()); + // Erase the "special marker" that depicted rejected tracks + parent_id.erase(UnionFind::InvalidIndex()); + return parent_id.size(); + } + + /// Export tracks as a map (each entry is a map of imageId and + /// featureIndex): + /// {TrackIndex => {imageIndex => featureIndex}} + void Export(FeatureTracks& tracks) { + tracks.clear(); + for (const auto& iter : map_node_to_index) { + const TrackId track_id = uf_tree.Find(iter.second); + const ImageFeaturePair& feat = iter.first; + // ensure never add rejected elements (track marked as invalid) + if (track_id != UnionFind::InvalidIndex()) { + tracks[track_id].emplace(feat); + } + } + } +}; + +/// Find common tracks between images. +bool GetTracksInImages(const std::set& image_ids, + const FeatureTracks& all_tracks, + std::vector& shared_track_ids) { + shared_track_ids.clear(); + + // Go along the tracks + for (const auto& kv_track : all_tracks) { + // Look if the track contains the provided view index & save the point ids + size_t observed_image_count = 0; + for (const auto& imageId : image_ids) { + if (kv_track.second.count(imageId) > 0) { + ++observed_image_count; + } else { + break; + } + } + + if (observed_image_count == image_ids.size()) { + shared_track_ids.push_back(kv_track.first); + } + } + return !shared_track_ids.empty(); +} + +/// Find all tracks in an image. +bool GetTracksInImage(const TimeCamId& image_id, + const FeatureTracks& all_tracks, + std::vector& track_ids) { + std::set image_set; + image_set.insert(image_id); + return GetTracksInImages(image_set, all_tracks, track_ids); +} + +/// Find shared tracks between map and image +bool GetSharedTracks(const TimeCamId& image_id, const FeatureTracks& all_tracks, + const Landmarks& landmarks, + std::vector& track_ids) { + track_ids.clear(); + for (const auto& kv : landmarks) { + const TrackId trackId = kv.first; + if (all_tracks.at(trackId).count(image_id) > 0) { + track_ids.push_back(trackId); + } + } + return !track_ids.empty(); +} + +} // namespace basalt diff --git a/include/basalt/utils/union_find.h b/include/basalt/utils/union_find.h new file mode 100644 index 0000000..4f4a4c1 --- /dev/null +++ b/include/basalt/utils/union_find.h @@ -0,0 +1,94 @@ +// Adapted from OpenMVG + +// Copyright (c) 2016 Pierre MOULON +// 2018 Nikolaus DEMMEL + +// This file was originally part of OpenMVG, an Open Multiple View Geometry C++ +// library. + +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#pragma once + +#include +#include +#include +#include + +// Union-Find/Disjoint-Set data structure +//-- +// A disjoint-set data structure also called a union–find data structure +// or merge–find set, is a data structure that keeps track of a set of elements +// partitioned into a number of disjoint (non-overlapping) subsets. +// It supports two operations: +// - Find: Determine which subset a particular element is in. +// - It returns an item from this set that serves as its "representative"; +// - Union: Join two subsets into a single subset. +// Sometime a Connected method is implemented: +// - Connected: +// - By comparing the result of two Find operations, one can determine whether +// two elements are in the same subset. +//-- +struct UnionFind { + using ValueType = uint32_t; + + // Special Value for invalid parent index + static ValueType InvalidIndex() { + return std::numeric_limits::max(); + } + + // Represent the DS/UF forest thanks to two array: + // A parent 'pointer tree' where each node holds a reference to its parent + // node + std::vector m_cc_parent; + // A rank array used for union by rank + std::vector m_cc_rank; + // A 'size array' to know the size of each connected component + std::vector m_cc_size; + + // Init the UF structure with num_cc nodes + void InitSets(const ValueType num_cc) { + // all set size are 1 (independent nodes) + m_cc_size.resize(num_cc, 1); + // Parents id have their own CC id {0,n} + m_cc_parent.resize(num_cc); + std::iota(m_cc_parent.begin(), m_cc_parent.end(), 0); + // Rank array (0) + m_cc_rank.resize(num_cc, 0); + } + + // Return the number of nodes that have been initialized in the UF tree + std::size_t GetNumNodes() const { return m_cc_size.size(); } + + // Return the representative set id of I nth component + ValueType Find(ValueType i) { + // Recursively set all branch as children of root (Path compression) + if (m_cc_parent[i] != i && m_cc_parent[i] != InvalidIndex()) + m_cc_parent[i] = Find(m_cc_parent[i]); + return m_cc_parent[i]; + } + + // Replace sets containing I and J with their union + void Union(ValueType i, ValueType j) { + i = Find(i); + j = Find(j); + if (i == j) { // Already in the same set. Nothing to do + return; + } + + // x and y are not already in same set. Merge them. + // Perform an union by rank: + // - always attach the smaller tree to the root of the larger tree + if (m_cc_rank[i] < m_cc_rank[j]) { + m_cc_parent[i] = j; + m_cc_size[j] += m_cc_size[i]; + + } else { + m_cc_parent[j] = i; + m_cc_size[i] += m_cc_size[j]; + if (m_cc_rank[i] == m_cc_rank[j]) ++m_cc_rank[i]; + } + } +}; diff --git a/include/basalt/utils/vio_config.h b/include/basalt/utils/vio_config.h new file mode 100644 index 0000000..cebf67f --- /dev/null +++ b/include/basalt/utils/vio_config.h @@ -0,0 +1,111 @@ +/** +BSD 3-Clause License + +This file is part of the Basalt project. +https://gitlab.com/VladyslavUsenko/basalt.git + +Copyright (c) 2019, Vladyslav Usenko and Nikolaus Demmel. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ +#pragma once + +#include + +namespace basalt { + +enum class LinearizationType { ABS_QR, ABS_SC, REL_SC }; + +struct VioConfig { + VioConfig(); + void load(const std::string& filename); + void save(const std::string& filename); + + std::string optical_flow_type; + int optical_flow_detection_grid_size; + float optical_flow_max_recovered_dist2; + int optical_flow_pattern; + int optical_flow_max_iterations; + int optical_flow_levels; + float optical_flow_epipolar_error; + int optical_flow_skip_frames; + + LinearizationType vio_linearization_type; + bool vio_sqrt_marg; + + int vio_max_states; + int vio_max_kfs; + int vio_min_frames_after_kf; + float vio_new_kf_keypoints_thresh; + bool vio_debug; + bool vio_extended_logging; + + // double vio_outlier_threshold; + // int vio_filter_iteration; + int vio_max_iterations; + + double vio_obs_std_dev; + double vio_obs_huber_thresh; + double vio_min_triangulation_dist; + + bool vio_enforce_realtime; + + bool vio_use_lm; + double vio_lm_lambda_initial; + double vio_lm_lambda_min; + double vio_lm_lambda_max; + + bool vio_scale_jacobian; + + double vio_init_pose_weight; + double vio_init_ba_weight; + double vio_init_bg_weight; + + bool vio_marg_lost_landmarks; + double vio_kf_marg_feature_ratio; + + double mapper_obs_std_dev; + double mapper_obs_huber_thresh; + int mapper_detection_num_points; + double mapper_num_frames_to_match; + double mapper_frames_to_match_threshold; + double mapper_min_matches; + double mapper_ransac_threshold; + double mapper_min_track_length; + double mapper_max_hamming_distance; + double mapper_second_best_test_ratio; + int mapper_bow_num_bits; + double mapper_min_triangulation_dist; + bool mapper_no_factor_weights; + bool mapper_use_factors; + + bool mapper_use_lm; + double mapper_lm_lambda_min; + double mapper_lm_lambda_max; +}; + +} // namespace basalt diff --git a/include/basalt/utils/vis_utils.h b/include/basalt/utils/vis_utils.h new file mode 100644 index 0000000..a43d7fd --- /dev/null +++ b/include/basalt/utils/vis_utils.h @@ -0,0 +1,106 @@ +/** +BSD 3-Clause License + +This file is part of the Basalt project. +https://gitlab.com/VladyslavUsenko/basalt.git + +Copyright (c) 2019, Vladyslav Usenko and Nikolaus Demmel. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ +#pragma once + +#include + +#include + +#include + +const uint8_t cam_color[3]{250, 0, 26}; +const uint8_t state_color[3]{250, 0, 26}; +const uint8_t pose_color[3]{0, 50, 255}; +const uint8_t gt_color[3]{0, 171, 47}; + +inline void render_camera(const Eigen::Matrix4d& T_w_c, float lineWidth, + const uint8_t* color, float sizeFactor) { + const float sz = sizeFactor; + const float width = 640, height = 480, fx = 500, fy = 500, cx = 320, cy = 240; + + const Eigen::aligned_vector lines = { + {0, 0, 0}, + {sz * (0 - cx) / fx, sz * (0 - cy) / fy, sz}, + {0, 0, 0}, + {sz * (0 - cx) / fx, sz * (height - 1 - cy) / fy, sz}, + {0, 0, 0}, + {sz * (width - 1 - cx) / fx, sz * (height - 1 - cy) / fy, sz}, + {0, 0, 0}, + {sz * (width - 1 - cx) / fx, sz * (0 - cy) / fy, sz}, + {sz * (width - 1 - cx) / fx, sz * (0 - cy) / fy, sz}, + {sz * (width - 1 - cx) / fx, sz * (height - 1 - cy) / fy, sz}, + {sz * (width - 1 - cx) / fx, sz * (height - 1 - cy) / fy, sz}, + {sz * (0 - cx) / fx, sz * (height - 1 - cy) / fy, sz}, + {sz * (0 - cx) / fx, sz * (height - 1 - cy) / fy, sz}, + {sz * (0 - cx) / fx, sz * (0 - cy) / fy, sz}, + {sz * (0 - cx) / fx, sz * (0 - cy) / fy, sz}, + {sz * (width - 1 - cx) / fx, sz * (0 - cy) / fy, sz}}; + + glPushMatrix(); + glMultMatrixd(T_w_c.data()); + glColor3ubv(color); + glLineWidth(lineWidth); + pangolin::glDrawLines(lines); + glPopMatrix(); +} + +inline void getcolor(float p, float np, float& r, float& g, float& b) { + float inc = 4.0 / np; + float x = p * inc; + r = 0.0f; + g = 0.0f; + b = 0.0f; + + if ((0 <= x && x <= 1) || (5 <= x && x <= 6)) + r = 1.0f; + else if (4 <= x && x <= 5) + r = x - 4; + else if (1 <= x && x <= 2) + r = 1.0f - (x - 1); + + if (1 <= x && x <= 3) + g = 1.0f; + else if (0 <= x && x <= 1) + g = x - 0; + else if (3 <= x && x <= 4) + g = 1.0f - (x - 3); + + if (3 <= x && x <= 5) + b = 1.0f; + else if (2 <= x && x <= 3) + b = x - 2; + else if (5 <= x && x <= 6) + b = 1.0f - (x - 5); +} diff --git a/include/basalt/vi_estimator/ba_base.h b/include/basalt/vi_estimator/ba_base.h new file mode 100644 index 0000000..61707a7 --- /dev/null +++ b/include/basalt/vi_estimator/ba_base.h @@ -0,0 +1,167 @@ +/** +BSD 3-Clause License + +This file is part of the Basalt project. +https://gitlab.com/VladyslavUsenko/basalt.git + +Copyright (c) 2019, Vladyslav Usenko and Nikolaus Demmel. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ +#pragma once + +#include + +namespace basalt { + +template +class BundleAdjustmentBase { + public: + using Scalar = Scalar_; + using Vec2 = Eigen::Matrix; + using Vec3 = Eigen::Matrix; + using Vec4 = Eigen::Matrix; + using Vec6 = Eigen::Matrix; + using VecX = Eigen::Matrix; + + using Mat4 = Eigen::Matrix; + using Mat6 = Eigen::Matrix; + using MatX = Eigen::Matrix; + + using SE3 = Sophus::SE3; + + void computeError(Scalar& error, + std::map>>* + outliers = nullptr, + Scalar outlier_threshold = 0) const; + + void filterOutliers(Scalar outlier_threshold, int min_num_obs); + + void optimize_single_frame_pose( + PoseStateWithLin& state_t, + const std::vector>& connected_obs) const; + + template + void get_current_points( + Eigen::aligned_vector>& points, + std::vector& ids) const; + + void computeDelta(const AbsOrderMap& marg_order, VecX& delta) const; + + void linearizeMargPrior(const MargLinData& mld, + const AbsOrderMap& aom, MatX& abs_H, VecX& abs_b, + Scalar& marg_prior_error) const; + + void computeMargPriorError(const MargLinData& mld, + Scalar& marg_prior_error) const; + + Scalar computeMargPriorModelCostChange(const MargLinData& mld, + const VecX& marg_scaling, + const VecX& marg_pose_inc) const; + + // TODO: Old version for squared H and b. Remove when no longer needed. + Scalar computeModelCostChange(const MatX& H, const VecX& b, + const VecX& inc) const; + + template + void computeProjections( + std::vector>>& data, + FrameId last_state_t_ns) const; + + /// Triangulates the point and returns homogenous representation. First 3 + /// components - unit-length direction vector. Last component inverse + /// distance. + template + static Eigen::Matrix triangulate( + const Eigen::MatrixBase& f0, + const Eigen::MatrixBase& f1, + const Sophus::SE3& T_0_1) { + EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(Derived, 3); + + // suffix "2" to avoid name clash with class-wide typedefs + using Scalar_2 = typename Derived::Scalar; + using Vec4_2 = Eigen::Matrix; + + Eigen::Matrix P1, P2; + P1.setIdentity(); + P2 = T_0_1.inverse().matrix3x4(); + + Eigen::Matrix A(4, 4); + A.row(0) = f0[0] * P1.row(2) - f0[2] * P1.row(0); + A.row(1) = f0[1] * P1.row(2) - f0[2] * P1.row(1); + A.row(2) = f1[0] * P2.row(2) - f1[2] * P2.row(0); + A.row(3) = f1[1] * P2.row(2) - f1[2] * P2.row(1); + + Eigen::JacobiSVD> mySVD(A, + Eigen::ComputeFullV); + Vec4_2 worldPoint = mySVD.matrixV().col(3); + worldPoint /= worldPoint.template head<3>().norm(); + + // Enforce same direction of bearing vector and initial point + if (f0.dot(worldPoint.template head<3>()) < 0) worldPoint *= -1; + + return worldPoint; + } + + inline void backup() { + for (auto& kv : frame_states) kv.second.backup(); + for (auto& kv : frame_poses) kv.second.backup(); + lmdb.backup(); + } + + inline void restore() { + for (auto& kv : frame_states) kv.second.restore(); + for (auto& kv : frame_poses) kv.second.restore(); + lmdb.restore(); + } + + // protected: + PoseStateWithLin getPoseStateWithLin(int64_t t_ns) const { + auto it = frame_poses.find(t_ns); + if (it != frame_poses.end()) return it->second; + + auto it2 = frame_states.find(t_ns); + if (it2 == frame_states.end()) { + std::cerr << "Could not find pose " << t_ns << std::endl; + std::abort(); + } + + return PoseStateWithLin(it2->second); + } + + Eigen::aligned_map> frame_states; + Eigen::aligned_map> frame_poses; + + // Point management + LandmarkDatabase lmdb; + + Scalar obs_std_dev; + Scalar huber_thresh; + + basalt::Calibration calib; +}; +} // namespace basalt diff --git a/include/basalt/vi_estimator/landmark_database.h b/include/basalt/vi_estimator/landmark_database.h new file mode 100644 index 0000000..1cf0f80 --- /dev/null +++ b/include/basalt/vi_estimator/landmark_database.h @@ -0,0 +1,156 @@ +/** +BSD 3-Clause License + +This file is part of the Basalt project. +https://gitlab.com/VladyslavUsenko/basalt.git + +Copyright (c) 2019, Vladyslav Usenko and Nikolaus Demmel. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ +#pragma once + +#include +#include + +namespace basalt { + +template +struct KeypointObservation { + using Scalar = Scalar_; + + int kpt_id; + Eigen::Matrix pos; + + EIGEN_MAKE_ALIGNED_OPERATOR_NEW +}; + +// keypoint position defined relative to some frame +template +struct Keypoint { + using Scalar = Scalar_; + using Vec2 = Eigen::Matrix; + + using ObsMap = Eigen::aligned_map; + using MapIter = typename ObsMap::iterator; + + // 3D position parameters + Vec2 direction; + Scalar inv_dist; + + // Observations + TimeCamId host_kf_id; + ObsMap obs; + + inline void backup() { + backup_direction = direction; + backup_inv_dist = inv_dist; + } + + inline void restore() { + direction = backup_direction; + inv_dist = backup_inv_dist; + } + + EIGEN_MAKE_ALIGNED_OPERATOR_NEW + + private: + Vec2 backup_direction; + Scalar backup_inv_dist; +}; + +template +class LandmarkDatabase { + public: + using Scalar = Scalar_; + + // Non-const + void addLandmark(KeypointId lm_id, const Keypoint& pos); + + void removeFrame(const FrameId& frame); + + void removeKeyframes(const std::set& kfs_to_marg, + const std::set& poses_to_marg, + const std::set& states_to_marg_all); + + void addObservation(const TimeCamId& tcid_target, + const KeypointObservation& o); + + Keypoint& getLandmark(KeypointId lm_id); + + // Const + const Keypoint& getLandmark(KeypointId lm_id) const; + + std::vector getHostKfs() const; + + std::vector*> getLandmarksForHost( + const TimeCamId& tcid) const; + + const std::unordered_map>>& + getObservations() const; + + const Eigen::aligned_unordered_map>& + getLandmarks() const; + + bool landmarkExists(int lm_id) const; + + size_t numLandmarks() const; + + int numObservations() const; + + int numObservations(KeypointId lm_id) const; + + void removeLandmark(KeypointId lm_id); + + void removeObservations(KeypointId lm_id, const std::set& obs); + + inline void backup() { + for (auto& kv : kpts) kv.second.backup(); + } + + inline void restore() { + for (auto& kv : kpts) kv.second.restore(); + } + + private: + using MapIter = + typename Eigen::aligned_unordered_map>::iterator; + MapIter removeLandmarkHelper(MapIter it); + typename Keypoint::MapIter removeLandmarkObservationHelper( + MapIter it, typename Keypoint::MapIter it2); + + Eigen::aligned_unordered_map> kpts; + + std::unordered_map>> + observations; + + static constexpr int min_num_obs = 2; +}; + +} // namespace basalt diff --git a/include/basalt/vi_estimator/marg_helper.h b/include/basalt/vi_estimator/marg_helper.h new file mode 100644 index 0000000..ffe2a7e --- /dev/null +++ b/include/basalt/vi_estimator/marg_helper.h @@ -0,0 +1,68 @@ +/** +BSD 3-Clause License + +This file is part of the Basalt project. +https://gitlab.com/VladyslavUsenko/basalt.git + +Copyright (c) 2019, Vladyslav Usenko and Nikolaus Demmel. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#pragma once + +#include +#include + +namespace basalt { + +template +class MargHelper { + public: + using Scalar = Scalar_; + using MatX = Eigen::Matrix; + using VecX = Eigen::Matrix; + + // Modifies abs_H and abs_b as a side effect. + static void marginalizeHelperSqToSq(MatX& abs_H, VecX& abs_b, + const std::set& idx_to_keep, + const std::set& idx_to_marg, + MatX& marg_H, VecX& marg_b); + + // Modifies abs_H and abs_b as a side effect. + static void marginalizeHelperSqToSqrt(MatX& abs_H, VecX& abs_b, + const std::set& idx_to_keep, + const std::set& idx_to_marg, + MatX& marg_sqrt_H, VecX& marg_sqrt_b); + + // Modifies Q2Jp and Q2r as a side effect. + static void marginalizeHelperSqrtToSqrt(MatX& Q2Jp, VecX& Q2r, + const std::set& idx_to_keep, + const std::set& idx_to_marg, + MatX& marg_sqrt_H, VecX& marg_sqrt_b); +}; +} // namespace basalt diff --git a/include/basalt/vi_estimator/nfr_mapper.h b/include/basalt/vi_estimator/nfr_mapper.h new file mode 100644 index 0000000..2a3ccd4 --- /dev/null +++ b/include/basalt/vi_estimator/nfr_mapper.h @@ -0,0 +1,209 @@ +/** +BSD 3-Clause License + +This file is part of the Basalt project. +https://gitlab.com/VladyslavUsenko/basalt.git + +Copyright (c) 2019, Vladyslav Usenko and Nikolaus Demmel. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ +#pragma once + +#include +#include + +#include +#include + +#include +#include +#include +#include + +#include +#include +#include +#include + +namespace basalt { + +template +class HashBow; + +class NfrMapper : public ScBundleAdjustmentBase { + public: + using Scalar = double; + + using Ptr = std::shared_ptr; + + template + struct MapperLinearizeAbsReduce + : public ScBundleAdjustmentBase::LinearizeAbsReduce { + using RollPitchFactorConstIter = + Eigen::aligned_vector::const_iterator; + using RelPoseFactorConstIter = + Eigen::aligned_vector::const_iterator; + using RelLinDataConstIter = + Eigen::aligned_vector::const_iterator; + + MapperLinearizeAbsReduce( + AbsOrderMap& aom, + const Eigen::aligned_map>* + frame_poses) + : ScBundleAdjustmentBase::LinearizeAbsReduce(aom), + frame_poses(frame_poses) { + this->accum.reset(aom.total_size); + roll_pitch_error = 0; + rel_error = 0; + } + + MapperLinearizeAbsReduce(const MapperLinearizeAbsReduce& other, tbb::split) + : ScBundleAdjustmentBase::LinearizeAbsReduce(other.aom), + frame_poses(other.frame_poses) { + this->accum.reset(this->aom.total_size); + roll_pitch_error = 0; + rel_error = 0; + } + + void join(MapperLinearizeAbsReduce& rhs) { + this->accum.join(rhs.accum); + roll_pitch_error += rhs.roll_pitch_error; + rel_error += rhs.rel_error; + } + + void operator()(const tbb::blocked_range& range) { + for (const RelLinData& rld : range) { + Eigen::MatrixXd rel_H; + Eigen::VectorXd rel_b; + linearizeRel(rld, rel_H, rel_b); + + linearizeAbs(rel_H, rel_b, rld, this->aom, this->accum); + } + } + + void operator()(const tbb::blocked_range& range) { + for (const RollPitchFactor& rpf : range) { + const Sophus::SE3d& pose = frame_poses->at(rpf.t_ns).getPose(); + + int idx = this->aom.abs_order_map.at(rpf.t_ns).first; + + Eigen::Matrix J; + Sophus::Vector2d res = basalt::rollPitchError(pose, rpf.R_w_i_meas, &J); + + this->accum.template addH( + idx, idx, J.transpose() * rpf.cov_inv * J); + this->accum.template addB(idx, + J.transpose() * rpf.cov_inv * res); + + roll_pitch_error += res.transpose() * rpf.cov_inv * res; + } + } + + void operator()(const tbb::blocked_range& range) { + for (const RelPoseFactor& rpf : range) { + const Sophus::SE3d& pose_i = frame_poses->at(rpf.t_i_ns).getPose(); + const Sophus::SE3d& pose_j = frame_poses->at(rpf.t_j_ns).getPose(); + + int idx_i = this->aom.abs_order_map.at(rpf.t_i_ns).first; + int idx_j = this->aom.abs_order_map.at(rpf.t_j_ns).first; + + Sophus::Matrix6d Ji, Jj; + Sophus::Vector6d res = + basalt::relPoseError(rpf.T_i_j, pose_i, pose_j, &Ji, &Jj); + + this->accum.template addH( + idx_i, idx_i, Ji.transpose() * rpf.cov_inv * Ji); + this->accum.template addH( + idx_i, idx_j, Ji.transpose() * rpf.cov_inv * Jj); + this->accum.template addH( + idx_j, idx_i, Jj.transpose() * rpf.cov_inv * Ji); + this->accum.template addH( + idx_j, idx_j, Jj.transpose() * rpf.cov_inv * Jj); + + this->accum.template addB( + idx_i, Ji.transpose() * rpf.cov_inv * res); + this->accum.template addB( + idx_j, Jj.transpose() * rpf.cov_inv * res); + + rel_error += res.transpose() * rpf.cov_inv * res; + } + } + + double roll_pitch_error; + double rel_error; + + const Eigen::aligned_map>* frame_poses; + }; + + NfrMapper(const basalt::Calibration& calib, const VioConfig& config); + + void addMargData(basalt::MargData::Ptr& data); + + void processMargData(basalt::MargData& m); + + bool extractNonlinearFactors(basalt::MargData& m); + + void optimize(int num_iterations = 10); + + Eigen::aligned_map>& getFramePoses(); + + void computeRelPose(double& rel_error); + + void computeRollPitch(double& roll_pitch_error); + + void detect_keypoints(); + + // Feature matching and inlier filtering for stereo pairs with known pose + void match_stereo(); + + void match_all(); + + void build_tracks(); + + void setup_opt(); + + EIGEN_MAKE_ALIGNED_OPERATOR_NEW + + Eigen::aligned_vector roll_pitch_factors; + Eigen::aligned_vector rel_pose_factors; + + std::unordered_map img_data; + + Corners feature_corners; + + Matches feature_matches; + + FeatureTracks feature_tracks; + + std::shared_ptr> hash_bow_database; + + VioConfig config; + + double lambda, min_lambda, max_lambda, lambda_vee; +}; +} // namespace basalt diff --git a/include/basalt/vi_estimator/sc_ba_base.h b/include/basalt/vi_estimator/sc_ba_base.h new file mode 100644 index 0000000..5001361 --- /dev/null +++ b/include/basalt/vi_estimator/sc_ba_base.h @@ -0,0 +1,452 @@ +/** +BSD 3-Clause License + +This file is part of the Basalt project. +https://gitlab.com/VladyslavUsenko/basalt.git + +Copyright (c) 2019, Vladyslav Usenko and Nikolaus Demmel. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ +#pragma once + +#include + +#include + +namespace basalt { + +template +class ScBundleAdjustmentBase : public BundleAdjustmentBase { + public: + using Scalar = Scalar_; + using Vec2 = Eigen::Matrix; + using Vec3 = Eigen::Matrix; + using Vec6 = Eigen::Matrix; + using VecX = Eigen::Matrix; + using Mat3 = Eigen::Matrix; + using Mat4 = Eigen::Matrix; + using Mat6 = Eigen::Matrix; + using Mat63 = Eigen::Matrix; + using MatX = Eigen::Matrix; + using SO3 = Sophus::SO3; + using SE3 = Sophus::SE3; + + struct RelLinDataBase { + std::vector> order; + + Eigen::aligned_vector d_rel_d_h; + Eigen::aligned_vector d_rel_d_t; + }; + + struct FrameRelLinData { + Mat6 Hpp; + Vec6 bp; + + std::vector lm_id; + Eigen::aligned_vector Hpl; + + FrameRelLinData() { + Hpp.setZero(); + bp.setZero(); + } + + EIGEN_MAKE_ALIGNED_OPERATOR_NEW + }; + + struct RelLinData : public RelLinDataBase { + RelLinData(size_t num_keypoints, size_t num_rel_poses) { + Hll.reserve(num_keypoints); + Hllinv.reserve(num_keypoints); + bl.reserve(num_keypoints); + lm_to_obs.reserve(num_keypoints); + + Hpppl.reserve(num_rel_poses); + order.reserve(num_rel_poses); + + d_rel_d_h.reserve(num_rel_poses); + d_rel_d_t.reserve(num_rel_poses); + + error = 0; + } + + void invert_keypoint_hessians() { + for (const auto& [kpt_idx, hll] : Hll) { + Mat3 Hll_inv; + Hll_inv.setIdentity(); + // Use ldlt b/c it has good speed (no numeric indefiniteness of this 3x3 + // matrix expected), and compared ot a direct inverse (which may be even + // faster), it can handle degenerate cases where Hll is not invertible. + hll.ldlt().solveInPlace(Hll_inv); + Hllinv[kpt_idx] = Hll_inv; + } + } + + using RelLinDataBase::d_rel_d_h; + using RelLinDataBase::d_rel_d_t; + using RelLinDataBase::order; + + Eigen::aligned_unordered_map Hll; + Eigen::aligned_unordered_map Hllinv; + Eigen::aligned_unordered_map bl; + Eigen::aligned_unordered_map>> + lm_to_obs; + + Eigen::aligned_vector Hpppl; + + Scalar error; + }; + + struct FrameAbsLinData { + Mat6 Hphph; + Vec6 bph; + + Mat6 Hptpt; + Vec6 bpt; + + Mat6 Hphpt; + + std::vector lm_id; + Eigen::aligned_vector Hphl; + Eigen::aligned_vector Hptl; + + FrameAbsLinData() { + Hphph.setZero(); + Hptpt.setZero(); + Hphpt.setZero(); + + bph.setZero(); + bpt.setZero(); + } + + EIGEN_MAKE_ALIGNED_OPERATOR_NEW + }; + + struct AbsLinData { + AbsLinData(size_t num_keypoints, size_t num_rel_poses) { + Hll.reserve(num_keypoints); + Hllinv.reserve(num_keypoints); + bl.reserve(num_keypoints); + lm_to_obs.reserve(num_keypoints); + + Hpppl.reserve(num_rel_poses); + order.reserve(num_rel_poses); + + error = 0; + } + + void invert_keypoint_hessians() { + for (const auto& [kpt_idx, hll] : Hll) { + Mat3 Hll_inv; + Hll_inv.setIdentity(); + // Use ldlt b/c it has good speed (no numeric indefiniteness of this 3x3 + // matrix expected), and compared ot a direct inverse (which may be even + // faster), it can handle degenerate cases where Hll is not invertible. + hll.ldlt().solveInPlace(Hll_inv); + Hllinv[kpt_idx] = Hll_inv; + } + } + + std::vector> order; + + Eigen::aligned_unordered_map Hll; + Eigen::aligned_unordered_map Hllinv; + Eigen::aligned_unordered_map bl; + Eigen::aligned_unordered_map>> + lm_to_obs; + + Eigen::aligned_vector Hpppl; + + Scalar error; + }; + + using BundleAdjustmentBase::computeDelta; + + void linearizeHelper( + Eigen::aligned_vector& rld_vec, + const std::unordered_map< + TimeCamId, std::map>>& obs_to_lin, + Scalar& error) const { + linearizeHelperStatic(rld_vec, obs_to_lin, this, error); + } + + void linearizeAbsHelper( + Eigen::aligned_vector& ald_vec, + const std::unordered_map< + TimeCamId, std::map>>& obs_to_lin, + Scalar& error) const { + linearizeHelperAbsStatic(ald_vec, obs_to_lin, this, error); + } + + static void linearizeHelperStatic( + Eigen::aligned_vector& rld_vec, + const std::unordered_map< + TimeCamId, std::map>>& obs_to_lin, + const BundleAdjustmentBase* ba_base, Scalar& error); + + void linearizeHelperAbs( + Eigen::aligned_vector& ald_vec, + const std::unordered_map< + TimeCamId, std::map>>& obs_to_lin, + Scalar& error) const { + linearizeHelperAbsStatic(ald_vec, obs_to_lin, this, error); + } + + static void linearizeHelperAbsStatic( + Eigen::aligned_vector& ald_vec, + const std::unordered_map< + TimeCamId, std::map>>& obs_to_lin, + const BundleAdjustmentBase* ba_base, Scalar& error); + + static void linearizeRel(const RelLinData& rld, MatX& H, VecX& b); + + static void updatePoints(const AbsOrderMap& aom, const RelLinData& rld, + const VecX& inc, LandmarkDatabase& lmdb, + Scalar* l_diff = nullptr); + + static void updatePointsAbs(const AbsOrderMap& aom, const AbsLinData& ald, + const VecX& inc, LandmarkDatabase& lmdb, + Scalar* l_diff = nullptr); + + static Eigen::VectorXd checkNullspace( + const MatX& H, const VecX& b, const AbsOrderMap& marg_order, + const Eigen::aligned_map>& + frame_states, + const Eigen::aligned_map>& frame_poses, + bool verbose = true); + + static Eigen::VectorXd checkEigenvalues(const MatX& H, bool verbose = true); + + static void computeImuError( + const AbsOrderMap& aom, Scalar& imu_error, Scalar& bg_error, + Scalar& ba_error, + const Eigen::aligned_map>& + states, + const Eigen::aligned_map>& + imu_meas, + const Vec3& gyro_bias_weight, const Vec3& accel_bias_weight, + const Vec3& g); + + template + static void linearizeAbs(const MatX& rel_H, const VecX& rel_b, + const RelLinDataBase& rld, const AbsOrderMap& aom, + AccumT& accum) { + // int asize = aom.total_size; + + // BASALT_ASSERT(abs_H.cols() == asize); + // BASALT_ASSERT(abs_H.rows() == asize); + // BASALT_ASSERT(abs_b.rows() == asize); + + for (size_t i = 0; i < rld.order.size(); i++) { + const TimeCamId& tcid_h = rld.order[i].first; + const TimeCamId& tcid_ti = rld.order[i].second; + + int abs_h_idx = aom.abs_order_map.at(tcid_h.frame_id).first; + int abs_ti_idx = aom.abs_order_map.at(tcid_ti.frame_id).first; + + accum.template addB( + abs_h_idx, rld.d_rel_d_h[i].transpose() * + rel_b.template segment(i * POSE_SIZE)); + accum.template addB( + abs_ti_idx, rld.d_rel_d_t[i].transpose() * + rel_b.template segment(i * POSE_SIZE)); + + for (size_t j = 0; j < rld.order.size(); j++) { + BASALT_ASSERT(rld.order[i].first == rld.order[j].first); + + const TimeCamId& tcid_tj = rld.order[j].second; + + int abs_tj_idx = aom.abs_order_map.at(tcid_tj.frame_id).first; + + if (tcid_h.frame_id == tcid_ti.frame_id || + tcid_h.frame_id == tcid_tj.frame_id) + continue; + + accum.template addH( + abs_h_idx, abs_h_idx, + rld.d_rel_d_h[i].transpose() * + rel_H.template block(POSE_SIZE * i, + POSE_SIZE * j) * + rld.d_rel_d_h[j]); + + accum.template addH( + abs_ti_idx, abs_h_idx, + rld.d_rel_d_t[i].transpose() * + rel_H.template block(POSE_SIZE * i, + POSE_SIZE * j) * + rld.d_rel_d_h[j]); + + accum.template addH( + abs_h_idx, abs_tj_idx, + rld.d_rel_d_h[i].transpose() * + rel_H.template block(POSE_SIZE * i, + POSE_SIZE * j) * + rld.d_rel_d_t[j]); + + accum.template addH( + abs_ti_idx, abs_tj_idx, + rld.d_rel_d_t[i].transpose() * + rel_H.template block(POSE_SIZE * i, + POSE_SIZE * j) * + rld.d_rel_d_t[j]); + } + } + } + + template + static void linearizeAbs(const AbsLinData& ald, const AbsOrderMap& aom, + AccumT& accum) { + for (size_t i = 0; i < ald.order.size(); i++) { + const TimeCamId& tcid_h = ald.order[i].first; + const TimeCamId& tcid_ti = ald.order[i].second; + + int abs_h_idx = aom.abs_order_map.at(tcid_h.frame_id).first; + int abs_ti_idx = aom.abs_order_map.at(tcid_ti.frame_id).first; + + const FrameAbsLinData& fald = ald.Hpppl.at(i); + + // Pose H and b part + accum.template addH(abs_h_idx, abs_h_idx, + fald.Hphph); + accum.template addH(abs_ti_idx, abs_ti_idx, + fald.Hptpt); + accum.template addH(abs_h_idx, abs_ti_idx, + fald.Hphpt); + accum.template addH(abs_ti_idx, abs_h_idx, + fald.Hphpt.transpose()); + + accum.template addB(abs_h_idx, fald.bph); + accum.template addB(abs_ti_idx, fald.bpt); + + // Schur complement for landmark part + for (size_t j = 0; j < fald.lm_id.size(); j++) { + Eigen::Matrix H_phl_H_ll_inv, H_ptl_H_ll_inv; + int lm_id = fald.lm_id.at(j); + + H_phl_H_ll_inv = fald.Hphl[j] * ald.Hllinv.at(lm_id); + H_ptl_H_ll_inv = fald.Hptl[j] * ald.Hllinv.at(lm_id); + + accum.template addB(abs_h_idx, + -H_phl_H_ll_inv * ald.bl.at(lm_id)); + accum.template addB(abs_ti_idx, + -H_ptl_H_ll_inv * ald.bl.at(lm_id)); + + const auto& other_obs = ald.lm_to_obs.at(lm_id); + + for (size_t k = 0; k < other_obs.size(); k++) { + int other_frame_idx = other_obs[k].first; + int other_lm_idx = other_obs[k].second; + const FrameAbsLinData& fald_other = ald.Hpppl.at(other_frame_idx); + const TimeCamId& tcid_hk = ald.order.at(other_frame_idx).first; + const TimeCamId& tcid_tk = ald.order.at(other_frame_idx).second; + + // Assume same host frame + BASALT_ASSERT(tcid_hk.frame_id == tcid_h.frame_id && + tcid_hk.cam_id == tcid_h.cam_id); + + int abs_tk_idx = aom.abs_order_map.at(tcid_tk.frame_id).first; + + Eigen::Matrix H_l_ph_other = + fald_other.Hphl[other_lm_idx].transpose(); + + Eigen::Matrix H_l_pt_other = + fald_other.Hptl[other_lm_idx].transpose(); + + accum.template addH( + abs_h_idx, abs_h_idx, -H_phl_H_ll_inv * H_l_ph_other); + accum.template addH( + abs_ti_idx, abs_h_idx, -H_ptl_H_ll_inv * H_l_ph_other); + accum.template addH( + abs_h_idx, abs_tk_idx, -H_phl_H_ll_inv * H_l_pt_other); + accum.template addH( + abs_ti_idx, abs_tk_idx, -H_ptl_H_ll_inv * H_l_pt_other); + } + } + } + } + + template + struct LinearizeAbsReduce { + static_assert(std::is_same_v); + + using RelLinConstDataIter = + typename Eigen::aligned_vector::const_iterator; + + LinearizeAbsReduce(const AbsOrderMap& aom) : aom(aom) { + accum.reset(aom.total_size); + } + + LinearizeAbsReduce(const LinearizeAbsReduce& other, tbb::split) + : aom(other.aom) { + accum.reset(aom.total_size); + } + + void operator()(const tbb::blocked_range& range) { + for (const RelLinData& rld : range) { + MatX rel_H; + VecX rel_b; + linearizeRel(rld, rel_H, rel_b); + + linearizeAbs(rel_H, rel_b, rld, aom, accum); + } + } + + void join(LinearizeAbsReduce& rhs) { accum.join(rhs.accum); } + + const AbsOrderMap& aom; + AccumT accum; + }; + + template + struct LinearizeAbsReduce2 { + static_assert(std::is_same_v); + + using AbsLinDataConstIter = + typename Eigen::aligned_vector::const_iterator; + + LinearizeAbsReduce2(const AbsOrderMap& aom) : aom(aom) { + accum.reset(aom.total_size); + } + + LinearizeAbsReduce2(const LinearizeAbsReduce2& other, tbb::split) + : aom(other.aom) { + accum.reset(aom.total_size); + } + + void operator()(const tbb::blocked_range& range) { + for (const AbsLinData& ald : range) { + linearizeAbs(ald, aom, accum); + } + } + + void join(LinearizeAbsReduce2& rhs) { accum.join(rhs.accum); } + + const AbsOrderMap& aom; + AccumT accum; + }; +}; +} // namespace basalt diff --git a/include/basalt/vi_estimator/sqrt_ba_base.h b/include/basalt/vi_estimator/sqrt_ba_base.h new file mode 100644 index 0000000..aced339 --- /dev/null +++ b/include/basalt/vi_estimator/sqrt_ba_base.h @@ -0,0 +1,124 @@ +/** +BSD 3-Clause License + +This file is part of the Basalt project. +https://gitlab.com/VladyslavUsenko/basalt.git + +Copyright (c) 2019, Vladyslav Usenko and Nikolaus Demmel. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ +#pragma once + +#include +#include + +namespace basalt { + +template +class SqrtBundleAdjustmentBase : public BundleAdjustmentBase { + public: + using Scalar = Scalar_; + using Vec3 = Eigen::Matrix; + using VecX = Eigen::Matrix; + using Mat3 = Eigen::Matrix; + using MatX = Eigen::Matrix; + using SO3 = Sophus::SO3; + + using RelLinDataBase = + typename ScBundleAdjustmentBase::RelLinDataBase; + using FrameRelLinData = + typename ScBundleAdjustmentBase::FrameRelLinData; + using RelLinData = typename ScBundleAdjustmentBase::RelLinData; + + using FrameAbsLinData = + typename ScBundleAdjustmentBase::FrameAbsLinData; + using AbsLinData = typename ScBundleAdjustmentBase::AbsLinData; + + using BundleAdjustmentBase::computeDelta; + + void linearizeHelper( + Eigen::aligned_vector& rld_vec, + const std::unordered_map< + TimeCamId, std::map>>& obs_to_lin, + Scalar& error) const { + ScBundleAdjustmentBase::linearizeHelperStatic(rld_vec, obs_to_lin, + this, error); + } + + void linearizeAbsHelper( + Eigen::aligned_vector& ald_vec, + const std::unordered_map< + TimeCamId, std::map>>& obs_to_lin, + Scalar& error) const { + ScBundleAdjustmentBase::linearizeHelperAbsStatic( + ald_vec, obs_to_lin, this, error); + } + + static void linearizeRel(const RelLinData& rld, MatX& H, VecX& b) { + ScBundleAdjustmentBase::linearizeRel(rld, H, b); + } + + static void updatePoints(const AbsOrderMap& aom, const RelLinData& rld, + const VecX& inc, LandmarkDatabase& lmdb, + Scalar* l_diff = nullptr) { + ScBundleAdjustmentBase::updatePoints(aom, rld, inc, lmdb, l_diff); + } + + static void updatePointsAbs(const AbsOrderMap& aom, const AbsLinData& ald, + const VecX& inc, LandmarkDatabase& lmdb, + Scalar* l_diff = nullptr) { + ScBundleAdjustmentBase::updatePointsAbs(aom, ald, inc, lmdb, + l_diff); + } + + static Eigen::VectorXd checkNullspace( + const MargLinData& mld, + const Eigen::aligned_map>& + frame_states, + const Eigen::aligned_map>& frame_poses, + bool verbose = true); + + static Eigen::VectorXd checkEigenvalues(const MargLinData& mld, + bool verbose = true); + + template + static void linearizeAbs(const MatX& rel_H, const VecX& rel_b, + const RelLinDataBase& rld, const AbsOrderMap& aom, + AccumT& accum) { + return ScBundleAdjustmentBase::template linearizeAbs( + rel_H, rel_b, rld, aom, accum); + } + + template + static void linearizeAbs(const AbsLinData& ald, const AbsOrderMap& aom, + AccumT& accum) { + return ScBundleAdjustmentBase::template linearizeAbs( + ald, aom, accum); + } +}; +} // namespace basalt diff --git a/include/basalt/vi_estimator/sqrt_keypoint_vio.h b/include/basalt/vi_estimator/sqrt_keypoint_vio.h new file mode 100644 index 0000000..69494db --- /dev/null +++ b/include/basalt/vi_estimator/sqrt_keypoint_vio.h @@ -0,0 +1,261 @@ +/** +BSD 3-Clause License + +This file is part of the Basalt project. +https://gitlab.com/VladyslavUsenko/basalt.git + +Copyright (c) 2019, Vladyslav Usenko and Nikolaus Demmel. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ +#pragma once + +#include + +#include +#include + +#include +#include + +#include + +namespace basalt { + +template +class SqrtKeypointVioEstimator : public VioEstimatorBase, + public SqrtBundleAdjustmentBase { + public: + using Scalar = Scalar_; + + typedef std::shared_ptr Ptr; + + static const int N = 9; + using Vec2 = Eigen::Matrix; + using Vec3 = Eigen::Matrix; + using Vec4 = Eigen::Matrix; + using VecN = Eigen::Matrix; + using VecX = Eigen::Matrix; + using MatN3 = Eigen::Matrix; + using MatX = Eigen::Matrix; + using SE3 = Sophus::SE3; + + using typename SqrtBundleAdjustmentBase::RelLinData; + using typename SqrtBundleAdjustmentBase::AbsLinData; + + using BundleAdjustmentBase::computeError; + using BundleAdjustmentBase::get_current_points; + using BundleAdjustmentBase::computeDelta; + using BundleAdjustmentBase::computeProjections; + using BundleAdjustmentBase::triangulate; + using BundleAdjustmentBase::backup; + using BundleAdjustmentBase::restore; + using BundleAdjustmentBase::getPoseStateWithLin; + using BundleAdjustmentBase::computeModelCostChange; + + using SqrtBundleAdjustmentBase::linearizeHelper; + using SqrtBundleAdjustmentBase::linearizeAbsHelper; + using SqrtBundleAdjustmentBase::linearizeRel; + using SqrtBundleAdjustmentBase::linearizeAbs; + using SqrtBundleAdjustmentBase::updatePoints; + using SqrtBundleAdjustmentBase::updatePointsAbs; + using SqrtBundleAdjustmentBase::linearizeMargPrior; + using SqrtBundleAdjustmentBase::computeMargPriorError; + using SqrtBundleAdjustmentBase::computeMargPriorModelCostChange; + using SqrtBundleAdjustmentBase::checkNullspace; + using SqrtBundleAdjustmentBase::checkEigenvalues; + + SqrtKeypointVioEstimator(const Eigen::Vector3d& g, + const basalt::Calibration& calib, + const VioConfig& config); + + void initialize(int64_t t_ns, const Sophus::SE3d& T_w_i, + const Eigen::Vector3d& vel_w_i, const Eigen::Vector3d& bg, + const Eigen::Vector3d& ba) override; + + void initialize(const Eigen::Vector3d& bg, + const Eigen::Vector3d& ba) override; + + virtual ~SqrtKeypointVioEstimator() { maybe_join(); } + + inline void maybe_join() override { + if (processing_thread) { + processing_thread->join(); + processing_thread.reset(); + } + } + + void addIMUToQueue(const ImuData::Ptr& data) override; + void addVisionToQueue(const OpticalFlowResult::Ptr& data) override; + + typename ImuData::Ptr popFromImuDataQueue(); + + bool measure(const OpticalFlowResult::Ptr& opt_flow_meas, + const typename IntegratedImuMeasurement::Ptr& meas); + + // int64_t propagate(); + // void addNewState(int64_t data_t_ns); + + void optimize_and_marg(const std::map& num_points_connected, + const std::unordered_set& lost_landmaks); + + void marginalize(const std::map& num_points_connected, + const std::unordered_set& lost_landmaks); + void optimize(); + + void debug_finalize() override; + + void logMargNullspace(); + Eigen::VectorXd checkMargNullspace() const; + Eigen::VectorXd checkMargEigenvalues() const; + + int64_t get_t_ns() const { + return frame_states.at(last_state_t_ns).getState().t_ns; + } + const SE3& get_T_w_i() const { + return frame_states.at(last_state_t_ns).getState().T_w_i; + } + const Vec3& get_vel_w_i() const { + return frame_states.at(last_state_t_ns).getState().vel_w_i; + } + + const PoseVelBiasState& get_state() const { + return frame_states.at(last_state_t_ns).getState(); + } + PoseVelBiasState get_state(int64_t t_ns) const { + PoseVelBiasState state; + + auto it = frame_states.find(t_ns); + + if (it != frame_states.end()) { + return it->second.getState(); + } + + auto it2 = frame_poses.find(t_ns); + if (it2 != frame_poses.end()) { + state.T_w_i = it2->second.getPose(); + } + + return state; + } + + void setMaxStates(size_t val) override { max_states = val; } + void setMaxKfs(size_t val) override { max_kfs = val; } + + Eigen::aligned_vector getFrameStates() const { + Eigen::aligned_vector res; + + for (const auto& kv : frame_states) { + res.push_back(kv.second.getState().T_w_i); + } + + return res; + } + + Eigen::aligned_vector getFramePoses() const { + Eigen::aligned_vector res; + + for (const auto& kv : frame_poses) { + res.push_back(kv.second.getPose()); + } + + return res; + } + + Eigen::aligned_map getAllPosesMap() const { + Eigen::aligned_map res; + + for (const auto& kv : frame_poses) { + res[kv.first] = kv.second.getPose(); + } + + for (const auto& kv : frame_states) { + res[kv.first] = kv.second.getState().T_w_i; + } + + return res; + } + + Sophus::SE3d getT_w_i_init() override { + return T_w_i_init.template cast(); + } + + EIGEN_MAKE_ALIGNED_OPERATOR_NEW + + private: + using BundleAdjustmentBase::frame_poses; + using BundleAdjustmentBase::frame_states; + using BundleAdjustmentBase::lmdb; + using BundleAdjustmentBase::obs_std_dev; + using BundleAdjustmentBase::huber_thresh; + using BundleAdjustmentBase::calib; + + private: + bool take_kf; + int frames_after_kf; + std::set kf_ids; + + int64_t last_state_t_ns; + Eigen::aligned_map> imu_meas; + + const Vec3 g; + + // Input + + Eigen::aligned_map prev_opt_flow_res; + + std::map num_points_kf; + + // Marginalization + MargLinData marg_data; + + // Used only for debug and log purporses. + MargLinData nullspace_marg_data; + + Vec3 gyro_bias_sqrt_weight, accel_bias_sqrt_weight; + + size_t max_states; + size_t max_kfs; + + SE3 T_w_i_init; + + bool initialized; + bool opt_started; + + VioConfig config; + + constexpr static Scalar vee_factor = Scalar(2.0); + constexpr static Scalar initial_vee = Scalar(2.0); + Scalar lambda, min_lambda, max_lambda, lambda_vee; + + std::shared_ptr processing_thread; + + // timing and stats + ExecutionStats stats_all_; + ExecutionStats stats_sums_; +}; +} // namespace basalt diff --git a/include/basalt/vi_estimator/sqrt_keypoint_vo.h b/include/basalt/vi_estimator/sqrt_keypoint_vo.h new file mode 100644 index 0000000..b20124b --- /dev/null +++ b/include/basalt/vi_estimator/sqrt_keypoint_vo.h @@ -0,0 +1,251 @@ +/** +BSD 3-Clause License + +This file is part of the Basalt project. +https://gitlab.com/VladyslavUsenko/basalt.git + +Copyright (c) 2019, Vladyslav Usenko and Nikolaus Demmel. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ +#pragma once + +#include + +#include +#include + +#include + +namespace basalt { + +template +class SqrtKeypointVoEstimator : public VioEstimatorBase, + public SqrtBundleAdjustmentBase { + public: + using Scalar = Scalar_; + + typedef std::shared_ptr Ptr; + + static const int N = 9; + using Vec2 = Eigen::Matrix; + using Vec3 = Eigen::Matrix; + using Vec4 = Eigen::Matrix; + using VecN = Eigen::Matrix; + using VecX = Eigen::Matrix; + using MatN3 = Eigen::Matrix; + using MatX = Eigen::Matrix; + using SE3 = Sophus::SE3; + + using typename SqrtBundleAdjustmentBase::RelLinData; + using typename SqrtBundleAdjustmentBase::AbsLinData; + + using BundleAdjustmentBase::computeError; + using BundleAdjustmentBase::get_current_points; + using BundleAdjustmentBase::computeDelta; + using BundleAdjustmentBase::computeProjections; + using BundleAdjustmentBase::triangulate; + using BundleAdjustmentBase::backup; + using BundleAdjustmentBase::restore; + using BundleAdjustmentBase::getPoseStateWithLin; + using BundleAdjustmentBase::computeModelCostChange; + + using SqrtBundleAdjustmentBase::linearizeHelper; + using SqrtBundleAdjustmentBase::linearizeAbsHelper; + using SqrtBundleAdjustmentBase::linearizeRel; + using SqrtBundleAdjustmentBase::linearizeAbs; + using SqrtBundleAdjustmentBase::updatePoints; + using SqrtBundleAdjustmentBase::updatePointsAbs; + using SqrtBundleAdjustmentBase::linearizeMargPrior; + using SqrtBundleAdjustmentBase::computeMargPriorError; + using SqrtBundleAdjustmentBase::computeMargPriorModelCostChange; + using SqrtBundleAdjustmentBase::checkNullspace; + using SqrtBundleAdjustmentBase::checkEigenvalues; + + SqrtKeypointVoEstimator(const basalt::Calibration& calib, + const VioConfig& config); + + void initialize(int64_t t_ns, const Sophus::SE3d& T_w_i, + const Eigen::Vector3d& vel_w_i, const Eigen::Vector3d& bg, + const Eigen::Vector3d& ba) override; + + void initialize(const Eigen::Vector3d& bg, + const Eigen::Vector3d& ba) override; + + virtual ~SqrtKeypointVoEstimator() { maybe_join(); } + + inline void maybe_join() override { + if (processing_thread) { + processing_thread->join(); + processing_thread.reset(); + } + } + + void addIMUToQueue(const ImuData::Ptr& data) override; + void addVisionToQueue(const OpticalFlowResult::Ptr& data) override; + + bool measure(const OpticalFlowResult::Ptr& opt_flow_meas, bool add_frame); + + // int64_t propagate(); + // void addNewState(int64_t data_t_ns); + + void optimize_and_marg(const std::map& num_points_connected, + const std::unordered_set& lost_landmaks); + + void marginalize(const std::map& num_points_connected, + const std::unordered_set& lost_landmaks); + void optimize(); + + void logMargNullspace(); + Eigen::VectorXd checkMargNullspace() const; + Eigen::VectorXd checkMargEigenvalues() const; + + int64_t get_t_ns() const { + return frame_states.at(last_state_t_ns).getState().t_ns; + } + const SE3& get_T_w_i() const { + return frame_states.at(last_state_t_ns).getState().T_w_i; + } + const Vec3& get_vel_w_i() const { + return frame_states.at(last_state_t_ns).getState().vel_w_i; + } + + const PoseVelBiasState& get_state() const { + return frame_states.at(last_state_t_ns).getState(); + } + PoseVelBiasState get_state(int64_t t_ns) const { + PoseVelBiasState state; + + auto it = frame_states.find(t_ns); + + if (it != frame_states.end()) { + return it->second.getState(); + } + + auto it2 = frame_poses.find(t_ns); + if (it2 != frame_poses.end()) { + state.T_w_i = it2->second.getPose(); + } + + return state; + } + + void setMaxStates(size_t val) override { max_states = val; } + void setMaxKfs(size_t val) override { max_kfs = val; } + + Eigen::aligned_vector getFrameStates() const { + Eigen::aligned_vector res; + + for (const auto& kv : frame_states) { + res.push_back(kv.second.getState().T_w_i); + } + + return res; + } + + Eigen::aligned_vector getFramePoses() const { + Eigen::aligned_vector res; + + for (const auto& kv : frame_poses) { + res.push_back(kv.second.getPose()); + } + + return res; + } + + Eigen::aligned_map getAllPosesMap() const { + Eigen::aligned_map res; + + for (const auto& kv : frame_poses) { + res[kv.first] = kv.second.getPose(); + } + + for (const auto& kv : frame_states) { + res[kv.first] = kv.second.getState().T_w_i; + } + + return res; + } + + Sophus::SE3d getT_w_i_init() override { + return T_w_i_init.template cast(); + } + + void debug_finalize() override; + + EIGEN_MAKE_ALIGNED_OPERATOR_NEW + + private: + using BundleAdjustmentBase::frame_poses; + using BundleAdjustmentBase::frame_states; + using BundleAdjustmentBase::lmdb; + using BundleAdjustmentBase::obs_std_dev; + using BundleAdjustmentBase::huber_thresh; + using BundleAdjustmentBase::calib; + + private: + bool take_kf; // true if next frame should become kf + int frames_after_kf; // number of frames since last kf + std::set kf_ids; // sliding window frame ids + + // timestamp of latest state in the sliding window + // TODO: check and document when this is equal to kf_ids.rbegin() and when + // not. It may be only relevant for VIO, not VO. + int64_t last_state_t_ns = -1; + + // Input + + Eigen::aligned_map prev_opt_flow_res; + + std::map num_points_kf; + + // Marginalization + MargLinData marg_data; + + // Used only for debug and log purporses. + MargLinData nullspace_marg_data; + + size_t max_states; + size_t max_kfs; + + SE3 T_w_i_init; + + bool initialized; + + VioConfig config; + + constexpr static Scalar vee_factor = Scalar(2.0); + constexpr static Scalar initial_vee = Scalar(2.0); + Scalar lambda, min_lambda, max_lambda, lambda_vee; + + std::shared_ptr processing_thread; + + // timing and stats + ExecutionStats stats_all_; + ExecutionStats stats_sums_; +}; +} // namespace basalt diff --git a/include/basalt/vi_estimator/vio_estimator.h b/include/basalt/vi_estimator/vio_estimator.h new file mode 100644 index 0000000..9b755a0 --- /dev/null +++ b/include/basalt/vi_estimator/vio_estimator.h @@ -0,0 +1,136 @@ +/** +BSD 3-Clause License + +This file is part of the Basalt project. +https://gitlab.com/VladyslavUsenko/basalt.git + +Copyright (c) 2019, Vladyslav Usenko and Nikolaus Demmel. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ +#pragma once + +#include + +#include +#include + +namespace basalt { + +struct VioVisualizationData { + typedef std::shared_ptr Ptr; + + int64_t t_ns; + + Eigen::aligned_vector states; + Eigen::aligned_vector frames; + + Eigen::aligned_vector points; + std::vector point_ids; + + OpticalFlowResult::Ptr opt_flow_res; + + std::vector> projections; + + EIGEN_MAKE_ALIGNED_OPERATOR_NEW +}; + +class VioEstimatorBase { + public: + typedef std::shared_ptr Ptr; + + VioEstimatorBase() + : out_state_queue(nullptr), + out_marg_queue(nullptr), + out_vis_queue(nullptr) { + vision_data_queue.set_capacity(10); + imu_data_queue.set_capacity(300); + last_processed_t_ns = 0; + finished = false; + } + + std::atomic last_processed_t_ns; + std::atomic finished; + + tbb::concurrent_bounded_queue vision_data_queue; + tbb::concurrent_bounded_queue::Ptr> imu_data_queue; + + tbb::concurrent_bounded_queue::Ptr>* + out_state_queue = nullptr; + tbb::concurrent_bounded_queue* out_marg_queue = nullptr; + tbb::concurrent_bounded_queue* out_vis_queue = + nullptr; + + virtual void initialize(int64_t t_ns, const Sophus::SE3d& T_w_i, + const Eigen::Vector3d& vel_w_i, + const Eigen::Vector3d& bg, + const Eigen::Vector3d& ba) = 0; + + virtual void initialize(const Eigen::Vector3d& bg, + const Eigen::Vector3d& ba) = 0; + + virtual void maybe_join() = 0; + + virtual inline void drain_input_queues() { + // Input threads should abort when vio is finished, but might be stuck in + // full push to full queue. So this can help to drain queues after joining + // the processing thread. + while (!imu_data_queue.empty()) { + ImuData::Ptr d; + imu_data_queue.pop(d); + } + while (!vision_data_queue.empty()) { + OpticalFlowResult::Ptr d; + vision_data_queue.pop(d); + } + } + + virtual inline void debug_finalize() {} + + virtual Sophus::SE3d getT_w_i_init() = 0; + + // Legacy functions. Should not be used in the new code. + virtual void setMaxStates(size_t val) = 0; + virtual void setMaxKfs(size_t val) = 0; + + virtual void addIMUToQueue(const ImuData::Ptr& data) = 0; + virtual void addVisionToQueue(const OpticalFlowResult::Ptr& data) = 0; +}; + +class VioEstimatorFactory { + public: + static VioEstimatorBase::Ptr getVioEstimator(const VioConfig& config, + const Calibration& cam, + const Eigen::Vector3d& g, + bool use_imu, bool use_double); +}; + +double alignSVD(const std::vector& filter_t_ns, + const Eigen::aligned_vector& filter_t_w_i, + const std::vector& gt_t_ns, + Eigen::aligned_vector& gt_t_w_i); +} // namespace basalt diff --git a/python/basalt/__init__.py b/python/basalt/__init__.py new file mode 100644 index 0000000..4b936c8 --- /dev/null +++ b/python/basalt/__init__.py @@ -0,0 +1,9 @@ +# +# BSD 3-Clause License +# +# This file is part of the Basalt project. +# https://gitlab.com/VladyslavUsenko/basalt.git +# +# Copyright (c) 2019-2021, Vladyslav Usenko and Nikolaus Demmel. +# All rights reserved. +# diff --git a/python/basalt/experiments.py b/python/basalt/experiments.py new file mode 100644 index 0000000..c13a94e --- /dev/null +++ b/python/basalt/experiments.py @@ -0,0 +1,614 @@ +# +# BSD 3-Clause License +# +# This file is part of the Basalt project. +# https://gitlab.com/VladyslavUsenko/basalt.git +# +# Copyright (c) 2019-2021, Vladyslav Usenko and Nikolaus Demmel. +# All rights reserved. +# +import os +import re +import json +import hashlib +import pickle +import toml +import argparse + +from string import Template +from glob import glob +from collections.abc import Mapping +from munch import Munch +from munch import munchify +from copy import deepcopy +from collections import abc + +from .run import Run + +from .util import copy_subdict + +_CURRENT_CACHE_VERSION = '1.3' +"""cache version that can be incremented to invalidate all cache files in case the format changes""" + + +def version_less(vstr1, vstr2): + """Order for sorting versions in the format a.b.c""" + return vstr1.split(".") < vstr2.split(".") + + +def compute_caching_hash(d): + """Generate a hash from a dictionary to use as a cache file name + + This is intended to be used for experiments cache files + """ + string = json.dumps(d, sort_keys=True, ensure_ascii=True) + h = hashlib.sha1() + h.update(string.encode('utf8')) + return h.hexdigest() + + +class Experiment: + """Holds the logs for one experiment: a single odometry config run on a set of sequences + + For one experiment, each sequence may have at most one Run. + + Since for each run we have multiple log files and there may be many runs, we + cache the loaded configs / output / log files (after preprocessing) into a single + binary cache file (pickle). This significantly speeds up loading results when + we have many experiments defined in a single experiments config file. + """ + + def __init__(self, + log_dirs, + name, + display_name=None, + description=None, + caching_hash=None, + spec=None, + seq_name_mapping=None, + extend=None, + extend_override=False): + """Load an experiment and all it's runs from a set of directories + + There may be no duplicate runs of the same sequence. + + :param log_dirs: list of directories to look for runs in + :param name: experiment name + :param display_name: optional experiment display name + :param description: optional experiment description + :param caching_hash: own caching hash; mostly used to combine this hash with the has of extending experiments + :param spec: the config spec for this experiment; mainly informational for informative error messages; the + functionally relevant information has already be extracted an preprocessed (other arguments) + :param seq_name_mapping: optional mapping of sequence names; may contain only part of the sequences + :param extend: optionally provide base experiment whose runs are copied (and possibly extended) + :param extend_override: if True, sequences in the extended experiment may be replaced, if they are also found in `log_dirs` + """ + + self.name = name + self.display_name = display_name + self.description = description + self.caching_hash = caching_hash + self.spec = spec + self.runs = dict() + + if extend is not None: + for k, v in extend.runs.items(): + self.runs[k] = deepcopy(v) + + seqs_ok_to_override = set(self.runs.keys()) if extend_override else set() + + for d in log_dirs: + run = Run(d, seq_name_mapping) + if run.seq_name in self.runs: + if run.seq_name in seqs_ok_to_override: + seqs_ok_to_override.remove(run.seq_name) # ok only once + else: + if extend is not None and run.seq_name in extend.runs and not extend_override: + raise RuntimeError( + str.format( + "{} appears both in the extended experiment {} and in the extending " + "experiment {} but extend_override is False:\n - {}\n - {}\n", run.seq_name, + extend.name, self.name, extend.runs[run.seq_name].dirpath, run.dirpath)) + else: + raise RuntimeError( + str.format( + "{} appears multiple times in experiment {}:\n - {}\n - {}\n" + "Do your experiment pattern(s) '{}' match too many directories? " + "Delete the additional runs or narrow the pattern.", run.seq_name, self.name, + self.runs[run.seq_name].dirpath, run.dirpath, "', '".join(self.spec["pattern"]))) + self.runs[run.seq_name] = run + + def sequences(self, filter_regex=None): + """return list of sequence names found for this experiment + + :param filter_regex: if provided, return only they sequences that match the regex + """ + if filter_regex is None: + return self.runs.keys() + else: + return [k for k in self.runs.keys() if re.search(filter_regex, k)] + + @staticmethod + def load_spec(spec, base_path, cache_dir, seq_name_mapping=None, extra_filter_regex=None, other_specs=[]): + """Load a single experiment from logs or cache + + The cache key is determined by the 'pattern', 'filter_regex' and 'extend' keys + in the spec. That means changing the name or display name for example doesn't + invalidate the cache. If the experiment is not found in cache, it is loaded from + the run directories and then saved in cache. + + :param spec: experiment spec from the config file + :param base_path: base folder to search for run dirs in + :param cache_dir: cache directory + :param seq_name_mapping: optional sequence name mapping + :param extra_filter_regex: additional filter to limit the loaded sequences on top of what is defined in the spec; if set, caching is disabled + :param other_specs: other experiment specs in case our spec has the 'extend' option defined + :return: loaded Experiment object + """ + + # disable cache if extra filtering + if extra_filter_regex is not None: + cache_dir = None + + # extending some other experiment: + extend = None + if "extend" in spec: + other_spec = next((s for s in other_specs if s.name == spec.extend), None) + if other_spec is None: + raise RuntimeError("Experiment {} extends unknown experiment {}.".format(spec.name, spec.extend)) + extend = Experiment.load_spec(other_spec, + base_path, + cache_dir, + seq_name_mapping=seq_name_mapping, + extra_filter_regex=extra_filter_regex, + other_specs=other_specs) + + caching_hash = None + if cache_dir: + caching_spec = copy_subdict(spec, ["pattern", "filter_regex"]) + if extend is not None: + caching_spec["extend"] = extend.caching_hash + caching_hash = compute_caching_hash(caching_spec) + + cache_filename = "experiment-cache-{}.pickle".format(caching_hash) + cache_path = os.path.join(cache_dir, cache_filename) + + if os.path.isfile(cache_path): + if not spec.overwrite_cache: + with open(cache_path, 'rb') as f: + cache = pickle.load(f) + if cache.version != _CURRENT_CACHE_VERSION: + print("> experiment: {} (cache {} has version {}; expected {})".format( + spec.name, cache_path, cache.version, _CURRENT_CACHE_VERSION)) + else: + print("> experiment: {} (from cache: {})".format(spec.name, cache_path)) + exp = cache.experiment + # overwrite names according to config + exp.name = spec.name + exp.display_name = spec.display_name + exp.description = spec.description + return exp + else: + print("> experiment: {} (overwrite cache: {})".format(spec.name, cache_path)) + else: + print("> experiment: {} (cache doesn't exist: {})".format(spec.name, cache_path)) + else: + print("> experiment: {}".format(spec.name)) + + log_dirs = Experiment.get_log_dirs(base_path, spec, filter_regex=extra_filter_regex) + + kwargs = copy_subdict(spec, ["name", "display_name", "description", "extend_override"]) + exp = Experiment(log_dirs, + caching_hash=caching_hash, + seq_name_mapping=seq_name_mapping, + extend=extend, + spec=deepcopy(spec), + **kwargs) + + if cache_dir: + cache = Munch(version=_CURRENT_CACHE_VERSION, experiment=exp, spec=caching_spec) + os.makedirs(cache_dir, exist_ok=True) + with open(cache_path, 'wb') as f: + pickle.dump(cache, f) + print("experiment {} -> saved cache {}".format(spec.name, cache_path)) + + return exp + + @staticmethod + def get_log_dirs(base_path, spec, filter_regex=None): + """Return list of run directories given an experiments spec + + :param base_path: base directory to search in + :param spec: experiment spec, e.g. from an experiments config file + :param filter_regex: optional additional regex; limits result to matching paths + :return: list of (filtered) paths (joined with base path) + """ + log_dirs = [d for p in spec.pattern for d in glob(os.path.join(base_path, p)) if Run.is_run_dir(d)] + if spec.filter_regex: + log_dirs = [d for d in log_dirs if re.search(spec.filter_regex, d)] + if filter_regex: + log_dirs = [d for d in log_dirs if re.search(filter_regex, d)] + return log_dirs + + @staticmethod + def load_all(specs, config_file, base_path, cache_dir, seq_name_mapping=None): + """Load a set experiments from log files or cache + + If there is more than one experiment with the same name, an error is raised. + + :param specs: list of experiments specs, e.g. from a experiments config file + :param config_file: experiments config file path (currently unused) + :param base_path: base directory relative to which all patterns in experiments are search for + :param cache_dir: folder to look for and/or save cached experiments + :param seq_name_mapping: optional mapping of sequence names + :return: a dict {name: experiment} + """ + + # Note: Seems saving everything to one cache file isn't much faster than the per-experiments cache... + use_combined_cache = False + + # load all from cache + if use_combined_cache and cache_dir: + + overwrite_cache_any = any(e.overwrite_cache for e in specs) + caching_specs = munchify([{k: v for k, v in s.items() if k not in ["overwrite_cache"]} for s in specs]) + meta_info = Munch(version=_CURRENT_CACHE_VERSION, options=Munch(base_path=base_path), specs=caching_specs) + + config_filename = os.path.splitext(os.path.basename(config_file))[0] + cache_filename = "experiment-cache-{}.pickle".format(config_filename) + cache_path = os.path.join(cache_dir, cache_filename) + + if os.path.isfile(cache_path): + if not overwrite_cache_any: + with open(cache_path, 'rb') as f: + cached_meta_info = pickle.load(f) + if cached_meta_info == meta_info: + print("> loading from cache: {}".format(cache_path)) + exps = pickle.load(f) + return exps + + # load individually + exps = dict() + for spec in specs: + if spec.name in exps: + raise RuntimeError("experiment {} is duplicate".format(spec.name)) + exps[spec.name] = Experiment.load_spec(spec, + base_path, + cache_dir, + seq_name_mapping=seq_name_mapping, + other_specs=specs) + + # save all to cache + if use_combined_cache and cache_dir: + os.makedirs(cache_dir, exist_ok=True) + with open(cache_path, 'wb') as f: + pickle.dump(meta_info, f) + pickle.dump(exps, f) + print("> saved cache {}".format(cache_path)) + + return exps + + +def load_experiments_config(path, args=None): + """Load experiments config file, applying substitutions and setting defaults + + An experiments config file defines general options, locations of experimental runs, + and results sections that define tables and plots to render. + + Substitutions and templates can be used to more concisely describe repetitive + definitions (e.g. generate the same plot for ALL runs of an experiment). + + :param log_dirs: optional command line arguments to override some values in the config + :type log_dirs: Union[dict, argparse.Namespace] + """ + + config = munchify(toml.load(path)) + + # default config: + config.setdefault("options", Munch()) + config.options.setdefault("base_path", "$config_dir") + config.options.setdefault("cache_dir", "cache") + config.options.setdefault("output_path", "results") + config.options.setdefault("filter_regex", None) + config.options.setdefault("overwrite_cache", False) + config.options.setdefault("show_values_failed_runs", True) + config.options.setdefault("screenread", False) + config.options.setdefault("import_experiments", []) + config.setdefault("seq_name_mapping", dict()) + config.setdefault("seq_displayname_mapping", dict()) + config.setdefault("substitutions", []) + config.setdefault("templates", []) + config.setdefault("experiments", []) + config.setdefault("results", []) + + # overrides from command line + if isinstance(args, argparse.Namespace): + args = vars(args) + + # values + for k in ["base_path", "cache_dir", "output_path", "filter_regex"]: + if k in args and args[k] is not None: + config.options[k] = args[k] + + # positive flags + for k in ["overwrite_cache"]: + if k in args and args[k]: + config.options[k] = True + + # negative flags + for k in ["dont_show_values_failed_runs"]: + if k in args and args[k]: + config.options[k] = False + + # collapse all substitutions into one dict + static_subs = dict() + for d in config.substitutions: + for k, v in d.items(): + if k in static_subs: + raise RuntimeError("substitution {} defined multiple times".format(k)) + static_subs[k] = v + + # create dictionary from list of templates (index by name) + template_definitions = dict() + for t in config.templates: + template_definitions[t._name] = t + + # substituion helper + var_pattern = re.compile(r"\$\{(\w+)\}") # match '${foo}' + + def substitute(obj, subs): + if isinstance(obj, Mapping): + # For mappings in general we simply recurse the 'substitute' call for the dict values. + # In case the '_template' key is present, we do template expansion. + if "_template" in obj: + # template expansion + + # single templates can be abbreviated by not putting them in a list + # --> put the in list now to make following code the same for either case + templates = obj._template if isinstance(obj._template, list) else [obj._template] + + # recurse 'substitute' on non-templated part + prototype = {k: substitute(v, subs) for k, v in obj.items() if not k.startswith("_")} + + # loop over all templates + result = [Munch()] + for tmpl in templates: + + # which arguments are defined? + args = [k for k in tmpl if not k.startswith("_")] + + # check template definition + tmpl_def = template_definitions[tmpl._name] + tmpl_args = tmpl_def._arguments if "_arguments" in tmpl_def else [] + if set(args) != set(tmpl_args): + raise RuntimeError("Template {} required arguments {}, but supplied {} during expansion".format( + tmpl._name, tmpl_def._arguments, args)) + + # apply template definition to all new objects + tmp = result + result = list() + for new_obj in tmp: + # create substitutions from template arguments (recursing 'substitute' call) + all_argument_combinations = [dict()] # start with single combination (usual case) + for arg in args: + if isinstance(tmpl[arg], Mapping) and "_argument" in tmpl[arg]: + if tmpl[arg]._argument == "product": + # given list of alternative argument values: create combination for each of them + tmp2 = all_argument_combinations + all_argument_combinations = list() + for d in tmp2: + for val in substitute(tmpl[arg]._value, subs): + d_new = deepcopy(d) + d_new[arg] = val + all_argument_combinations.append(d_new) + else: + raise RuntimeError("arugment type {} not for argument {} not implemented".format( + tmpl[arg]._argument, arg)) + else: + # simple argument: append to all combintations + for d in all_argument_combinations: + assert (arg not in d) + d[arg] = substitute(tmpl[arg], subs) + + # for each argument combination, create substitutions and apply template definition + for expanded_args in all_argument_combinations: + + subs_with_args = dict(subs) + subs_with_args.update(expanded_args) + + # merge template definition into result, while recursing substitute call with augmented substitutions + new_obj2 = deepcopy(new_obj) + for k, v in tmpl_def.items(): + if not k.startswith("_"): + # later templates can override keys from earlier ones + new_obj2[k] = substitute(deepcopy(v), subs_with_args) + + result.append(new_obj2) + + # do prototype keys last, since they may override template keys (we already recursed) + for new_obj in result: + for k, v in prototype.items(): + new_obj[k] = deepcopy(v) + + if len(result) == 1: + return new_obj + else: + return Munch(_return="splice", _value=result) + else: + # default case + for k, v in obj.items(): + obj[k] = substitute(v, subs) + return obj + elif isinstance(obj, list): + # Go over elements of list and recurse the 'substitute' call. + # In certain cases the returned value can indicate that we should splice in the resulting list instead of + # just inserting it. + tmp = list() + for v in obj: + val = substitute(v, subs) + if isinstance(val, dict) and "_return" in val: + if val._return == "splice": + tmp.extend(val._value) + else: + raise RuntimeError("Unknown return type {}".format(val._return)) + else: + tmp.append(val) + return tmp + elif isinstance(obj, str): + if len(obj) > 2 and obj[0] == "<" and obj[-1] == ">": + # if string is '', the whole string is replaced by the substitution defined for FOO + var = obj[1:-1] + if var in subs: + return substitute(subs[var], subs) + else: + raise RuntimeError("Unknown substitution <{}>".format(var)) + else: + # otherwise, find occurances of ${FOO} in the string an replace by a string representation + # of the substitution defined for FOO + obj, n = var_pattern.subn(lambda m: str(subs[m.group(1)]), obj) + if n > 0: + # something change --> recurse + return substitute(obj, subs) + else: + # no substitution --> just return + return obj + else: + return obj + + # apply substitutions + config.experiments = substitute(config.experiments, static_subs) + config.results = substitute(config.results, static_subs) + + # set default values for experiments specs + for spec in config.experiments: + spec.setdefault("display_name", spec.name) + spec.setdefault("description", None) + spec.setdefault("filter_regex", config.options.filter_regex) + spec.setdefault("overwrite_cache", config.options.overwrite_cache) + spec.setdefault("pattern", []) + spec.pattern = [spec.pattern] if isinstance(spec.pattern, str) else spec.pattern # ensure list + assert isinstance(spec.pattern, abc.Sequence), "pattern {} in experiment {} is neither string nor list".format( + spec.pattern, spec.name) + + # results: backwards-compatibility -- move old sections into 'results' + if "results_tables" in config: + for spec in config.results_tables: + spec["class"] = "results_table" + config.results.append(spec) + del config["results_tables"] + + if "summarize_sequences_tables" in config: + for spec in config.summarize_sequences_tables: + spec["class"] = "summarize_sequences_table" + config.results.append(spec) + del config["summarize_sequences_tables"] + + if "plots" in config: + for spec in config.plots: + spec["class"] = "plot" + config.results.append(spec) + del config["plots"] + + if "overview_tables" in config: + for spec in config.overview_tables: + spec["class"] = "overview_table" + config.results.append(spec) + del config["overview_tables"] + + # results: default values + for spec in config.results: + spec.setdefault("class", "results_table") + + # set common default values + spec.setdefault("show", True) + spec.setdefault("clearpage", spec["class"] == "section") + spec.setdefault("filter_regex", None) + + if spec["class"] == "section": + spec.setdefault("name", "Section") + spec.setdefault("pagewidth", None) + elif spec["class"] == "results_table": + spec.setdefault("metrics_legend", True) + spec.setdefault("escape_latex_header", True) + spec.setdefault("rotate_header", True) + spec.setdefault("vertical_bars", True) + spec.setdefault("export_latex", None) + spec.setdefault("color_failed", "red") + spec.setdefault("multirow", True) + spec.setdefault("override_as_failed", []) + elif spec["class"] == "summarize_sequences_table": + spec.setdefault("header", "") + spec.setdefault("export_latex", None) + spec.setdefault("escape_latex_header", True) + spec.setdefault("rotate_header", True) + elif spec["class"] == "plot": + spec.setdefault("plot_ate", False) + spec.setdefault("figsize", None) + spec.setdefault("title", None) + spec.setdefault("reference_experiment", None) + spec.setdefault("width", None) + spec.setdefault("ylim", Munch()) + spec.ylim.setdefault("top", None) + spec.ylim.setdefault("bottom", None) + spec.setdefault("ylim_cost", Munch()) + spec.ylim_cost.setdefault("top", None) + spec.ylim_cost.setdefault("bottom", None) + spec.setdefault("ylim_ate", Munch()) + spec.ylim_ate.setdefault("top", None) + spec.ylim_ate.setdefault("bottom", None) + spec.setdefault("ylim_tolerance", Munch()) + spec.ylim_tolerance.setdefault("top", None) + spec.ylim_tolerance.setdefault("bottom", None) + spec.setdefault("xlim_time", Munch()) + spec.xlim_time.setdefault("right", None) + spec.xlim_time.setdefault("left", None) + spec.setdefault("xlim_time_fastest", Munch()) + spec.xlim_time_fastest.setdefault("right", None) + spec.xlim_time_fastest.setdefault("left", None) + spec.setdefault("xlim_it", Munch()) + spec.xlim_it.setdefault("right", None) + spec.xlim_it.setdefault("left", None) + spec.setdefault("xlimits", Munch()) + spec.xlimits.setdefault("right", None) + spec.xlimits.setdefault("left", None) + spec.setdefault("legend_loc", "best") + spec.setdefault("align_fraction", None) + spec.setdefault("layout", "horizontal") + spec.setdefault("extend_x", False) + if "problem_size_variants" not in spec and "memory_variants" in spec: + # legacy support for "memory_variants" + spec.problem_size_variants = spec.memory_variants + del spec["memory_variants"] + spec.setdefault("problem_size_variants", ["cam", "lm", "obs"]) + spec.setdefault("bal_cost_include", ["cost_time", "cost_it", "tr_radius", "inner_it", "memory"]) + spec.setdefault("tolerances", [0.01, 0.001, 0.00001]) + spec.setdefault("plot_tolerances", False) + spec.setdefault("best_fit_line", True) + spec.setdefault("reverse_zorder", False) + spec.setdefault("plot_cost_semilogy", True) + spec.setdefault("marker_size", 8) + spec.setdefault("ylabel", True) + spec.setdefault("suptitle", None) + spec.setdefault("rotate2d", 0) + spec.setdefault("trajectory_axes", "xy") + elif spec["class"] == "overview_table": + spec.setdefault("export_latex", None) + + # expand templates in path names + template_args = dict(config_dir=os.path.dirname(os.path.abspath(path))) + for key in ["base_path", "output_path", "cache_dir"]: + config.options[key] = Template(config.options[key]).substitute(**template_args) + if isinstance(config.options.import_experiments, str): + config.options.import_experiments = [config.options.import_experiments] + config.options.import_experiments = [ + Template(path).substitute(**template_args) for path in config.options.import_experiments + ] + + # import experiments + imported_experiments = [] + for path in config.options.import_experiments: + cfg = load_experiments_config(path, args) + imported_experiments.extend(cfg.experiments) + config.experiments = imported_experiments + config.experiments + + return config diff --git a/python/basalt/generate_tables.py b/python/basalt/generate_tables.py new file mode 100644 index 0000000..2d31207 --- /dev/null +++ b/python/basalt/generate_tables.py @@ -0,0 +1,143 @@ +# +# BSD 3-Clause License +# +# This file is part of the Basalt project. +# https://gitlab.com/VladyslavUsenko/basalt.git +# +# Copyright (c) 2019-2021, Vladyslav Usenko and Nikolaus Demmel. +# All rights reserved. +# +import argparse +import os + +from pylatex import Document, Section, Package, NewPage +from pylatex import Command +from pylatex.base_classes import Arguments + +from .experiments import load_experiments_config +from .experiments import Experiment +from .latex.templates import screenread_sty +from .util import os_open_file +from .latex.results_table import ResultsTable +from .latex.summarize_sequences_table import SummarizeSequencesTable +from .latex.plot import Plot + + +def generate_tables(args): + + if args.change_directory: + os.chdir(args.change_directory) + + config = load_experiments_config(args.config, args) + + exps = Experiment.load_all(config.experiments, + config_file=args.config, + base_path=config.options.base_path, + cache_dir=config.options.cache_dir, + seq_name_mapping=config.seq_name_mapping) + + doc = Document(geometry_options={"tmargin": "1cm", "lmargin": "1cm"}) + + export_basepath = "{}-export".format(config.options.output_path) + + curr = doc + + hide_all = False + for spec in config.results: + if spec.show: + + if spec["class"] == "section": + if spec.clearpage: + curr.append(NewPage()) + if spec.pagewidth: + curr.append(Command("SetPageScreenWidth", Arguments(spec.pagewidth))) + else: + curr.append(Command("RestorePageScreenWidth")) + hide_all = False + curr = Section(spec.name) + doc.append(curr) + continue + + if hide_all: + continue + + if spec.clearpage: + curr.append(NewPage()) + + elif spec["class"] == "results_table": + elem = ResultsTable(exps, + spec, + show_values_failed_runs=config.options.show_values_failed_runs, + seq_displayname_mapping=config.seq_displayname_mapping, + export_basepath=export_basepath) + elif spec["class"] == "summarize_sequences_table": + elem = SummarizeSequencesTable(exps, + spec, + show_values_failed_runs=config.options.show_values_failed_runs, + seq_displayname_mapping=config.seq_displayname_mapping, + export_basepath=export_basepath) + elif spec["class"] == "plot": + elem = Plot(exps, + spec, + seq_displayname_mapping=config.seq_displayname_mapping, + export_basepath=export_basepath) + else: + raise RuntimeError("Invalid results class {}".format(spec["class"])) + + curr.append(elem) + else: + if spec["class"] == "section": + hide_all = True + continue + + # generate auxiliary tex files + if config.options.screenread: + output_dir = os.path.dirname(config.options.output_path) + screenread_path = output_dir + "/screenread.sty" + with open(screenread_path, "w") as f: + f.write(screenread_sty) + doc.packages.add(Package('screenread')) + + # create nofloatfigure environment + doc.preamble.append( + Command("newenvironment", Arguments("nofloatfigure", Command("captionsetup", Arguments(type="figure")), ""))) + doc.packages.add(Package('caption')) + doc.packages.add(Package('mathtools')) + + # render latex + doc.generate_pdf(config.options.output_path, clean_tex=not args.dont_clean_tex) + + # cleanup + if config.options.screenread and not args.dont_clean_tex: + os.remove(screenread_path) + + # open the generated pdf + if args.open: + os_open_file(config.options.output_path + ".pdf") + + +def main(): + + parser = argparse.ArgumentParser("Load basalt experiment logs and generate result tables and plots.") + parser.add_argument("-C", + "--change-directory", + default=None, + help="Change directory to this folder before doing anything else") + parser.add_argument("--config", default="experiments.toml", help="specs for experiments to load") + parser.add_argument("--base-path", default=None, help="overwrite basepath for loading logs defined in the config") + parser.add_argument("--output-path", default=None, help="output filepath") + parser.add_argument("--dont-clean-tex", action="store_true", help="don't remove tex file after generation") + parser.add_argument("--cache-dir", default=None, help="load/save experiments cache from/to give folder") + parser.add_argument("--overwrite-cache", action="store_true", help="reload all experiments independent of cache") + parser.add_argument("--dont-show-values-failed-runs", + action="store_true", + help="don't attempt to show values for failed logs based on partial logs") + parser.add_argument("--open", action="store_true", help="open after generation") + + args = parser.parse_args() + + generate_tables(args) + + +if __name__ == "__main__": + main() diff --git a/python/basalt/latex/containers.py b/python/basalt/latex/containers.py new file mode 100644 index 0000000..7ddf764 --- /dev/null +++ b/python/basalt/latex/containers.py @@ -0,0 +1,109 @@ +# +# BSD 3-Clause License +# +# This file is part of the Basalt project. +# https://gitlab.com/VladyslavUsenko/basalt.git +# +# Copyright (c) 2019-2021, Vladyslav Usenko and Nikolaus Demmel. +# All rights reserved. +# +import math + +from pylatex import Package +from pylatex.base_classes import Container + +from ..metric import metrics_from_config +from ..metric import ExperimentSpec +from ..util import alphanum + + +class MyContainer(Container): + + def __init__(self): + super().__init__() + # add packages that seem to not propagate properly from added elements + self.packages.add(Package("xcolor")) + self.packages.add(Package('graphicx')) + + def dumps(self): + return self.dumps_content() + + +class ExperimentsContainer(MyContainer): + + def __init__(self, seq_displayname_mapping): + super().__init__() + + self.seq_displayname_mapping = seq_displayname_mapping + + def seq_displayname(self, seq): + return self.seq_displayname_mapping.get(seq, seq) + + +class ExperimentsTable(ExperimentsContainer): + + def __init__(self, exps, spec, show_values_failed_runs, seq_displayname_mapping, export_basepath): + super().__init__(seq_displayname_mapping) + self.exps = exps + self.spec = spec + self.show_values_failed_runs = show_values_failed_runs + self.export_basepath = export_basepath + + self.experiment_specs = [ExperimentSpec(s) for s in self.spec.experiments] + self.metrics = metrics_from_config(self.spec.metrics) + + self.seq_names = self.sequence_names([s.name for s in self.experiment_specs]) + self.num_seqs = len(self.seq_names) + self.num_metrics = len(self.metrics) + self.num_exps = len(self.experiment_specs) + + def sequence_names(self, experiment_names): + seq_names = set() + for s in experiment_names: + seq_names.update(self.exps[s].sequences(filter_regex=self.spec.filter_regex)) + + return sorted(seq_names, key=alphanum) + + def is_failed(self, exp, seq): + if seq not in exp.runs: + return True + return exp.runs[seq].is_failed() + + def render_failure(self, exp, seq): + if seq in self.spec.override_as_failed: + return "x" + + if seq not in exp.runs: + return '?' + run = exp.runs[seq] + + treat_as_failed = (run.log is None) if self.show_values_failed_runs else run.is_failed() + + if treat_as_failed: + return run.failure_str() + else: + return None + + def get_metrics(self, exp, seq, it): + if seq not in exp.runs: + return [math.nan for _ in self.metrics] + run = exp.runs[seq] + + treat_as_failed = (run.log is None) if self.show_values_failed_runs else run.is_failed() + + if treat_as_failed: + return [math.nan for _ in self.metrics] + + return [m.get_value(self.exps, exp, seq, it) for m in self.metrics] + # try: + # return [m.get_value(self.exps, exp, seq, it) for m in self.metrics] + # except AttributeError as e: + # if e.args[0].startswith("local_error"): + # if not has_imported_sophus(): + # print("To use local-error, you need to install sophuspy and flush the cache.") + # sys.exit(1) + # if not exp.runs[seq].log.has_cam_pos: + # print("You cannot use local-error for experiment {}, which has no camera positions in the log.". + # format(exp.name)) + # sys.exit(1) + # raise diff --git a/python/basalt/latex/plot.py b/python/basalt/latex/plot.py new file mode 100644 index 0000000..23c7c7c --- /dev/null +++ b/python/basalt/latex/plot.py @@ -0,0 +1,393 @@ +# +# BSD 3-Clause License +# +# This file is part of the Basalt project. +# https://gitlab.com/VladyslavUsenko/basalt.git +# +# Copyright (c) 2019-2021, Vladyslav Usenko and Nikolaus Demmel. +# All rights reserved. +# +import numpy as np +import os +import math +import functools + +import matplotlib + +matplotlib.use('Agg') # Not to use X server. For TravisCI. +import matplotlib.pyplot as plt # noqa +from matplotlib.ticker import MaxNLocator + +prop_cycle = plt.rcParams['axes.prop_cycle'] + +#default_cycler = (cycler(linestyle=['-', '--', ':', '-.']) * +# cycler(color=prop_cycle.by_key()['color'])) + + +class ModulusList(list): + + def __init__(self, *args, **kwargs): + list.__init__(self, *args, **kwargs) + + def __getitem__(self, key): + return list.__getitem__(self, key % len(self)) + + +default_colors_finite = prop_cycle.by_key()['color'] +default_colors_finite[0] = prop_cycle.by_key()['color'][0] +default_colors_finite[1] = prop_cycle.by_key()['color'][2] +default_colors_finite[2] = prop_cycle.by_key()['color'][3] +default_colors_finite[3] = prop_cycle.by_key()['color'][1] + +default_colors = ModulusList(default_colors_finite) +#default_lines = ModulusList(["-", "-", ":", "--", "-.", ":", "--", "-."]) +#default_markers = ModulusList(["o", "s", "^", "X", "D", "P", "v", "h"]) +default_lines = ModulusList([":", "-", "-.", "--", ":", "--", "-.", "-"]) +default_markers = ModulusList(["o", "s", "^", "X", "D", "P", "v", "h"]) + +from collections import deque +from collections import defaultdict + +from pylatex import Figure +from pylatex.utils import NoEscape + +from .containers import ExperimentsContainer +from .util import rotation2d + + +class NoFloatFigure(Figure): + pass + + +class Plot(ExperimentsContainer): + + def __init__(self, exps, spec, seq_displayname_mapping, export_basepath): + super().__init__(seq_displayname_mapping) + + self.width = None + + plotters = dict(nullspace=self.plot_nullspace, + eigenvalues=self.plot_eigenvalues, + trajectory=self.plot_trajectory) + + plot_fn = plotters[spec.type] + plot_fn(exps, spec) + + if spec.width is not None: + self.width = spec.width + elif self.width is None: + self.width = 1 + + plt.tight_layout() + + saved_file = self._save_plot(spec, export_basepath) + + if "sequence" in spec: + plot_name = '{} {} {}'.format(spec.type, spec.name, spec.sequence).replace("_", " ") + else: + plot_name = '{} {}'.format(spec.type, spec.name).replace("_", " ") + + #with self.create(Subsection(spec.name, numbering=False)) as p: + with self.create(NoFloatFigure()) as f: + f.add_image(os.path.abspath(saved_file), width=NoEscape(r'{}\textwidth'.format(self.width))) + f.add_caption(plot_name) + + # cleanup + plt.close('all') + + def plot_nullspace(self, exps, spec): + + logs = [exps[e].runs[spec.sequence].log for e in spec.experiments] + names = [exps[e].display_name for e in spec.experiments] + + num_plots = len(names) + + if num_plots == 4: + if True: + if spec.figsize is None: + spec.figsize = [10, 2.5] + fig, axs = plt.subplots(1, 4, figsize=spec.figsize, sharey=True) + else: + if spec.figsize is None: + spec.figsize = [10, 4.7] + fig, axs = plt.subplots(2, 2, figsize=spec.figsize, sharey=True) + axs = axs.flatten() + else: + if spec.figsize is None: + spec.figsize = [6, 2 * num_plots] + fig, axs = plt.subplots(num_plots, 1, figsize=spec.figsize, sharey=True) + + if num_plots == 1: + axs = [axs] + + for i, (log, name) in enumerate(zip(logs, names)): + + if log is None: + continue + + ax = axs[i] + + ns = log.sums.marg_ns[1:] # skip first prior, which just is all 0 + ns = np.abs(ns) # cost change may be negative, we are only interested in the norm + ns = np.maximum(ns, 1e-20) # clamp at very small value + + markerfacecolor = "white" + + markevery = 1000 + if spec.sequence == "kitti10": + markevery = 100 + + ax.semilogy( + ns[:, 0], + ":", + # label="x", + color="tab:blue") + ax.semilogy( + ns[:, 1], + ":", + # label="y", + color="tab:blue") + ax.semilogy( + ns[:, 2], + ":", + # label="z", + label="x, y, z", + color="tab:blue", + marker="o", + markerfacecolor=markerfacecolor, + markevery=(markevery // 2, markevery)) + + ax.semilogy( + ns[:, 3], + ":", + # label="roll", + color="tab:orange") + ax.semilogy( + ns[:, 4], + ":", + # label="pitch", + label="roll, pitch", + color="tab:orange", + marker="s", + markerfacecolor=markerfacecolor, + markevery=(markevery // 2, markevery)) + + ax.semilogy(ns[:, 5], + ":", + label="yaw", + color="tab:green", + marker="^", + markerfacecolor=markerfacecolor, + markevery=(0, markevery)) + + ax.semilogy(ns[:, 6], + ":", + label="random", + color="tab:red", + marker="D", + markerfacecolor=markerfacecolor, + markevery=(0, markevery)) + + # marker on top of lines; + + ax.semilogy(ns[:, 2], + color="None", + marker="o", + markerfacecolor=markerfacecolor, + markeredgecolor="tab:blue", + markevery=(markevery // 2, markevery)) + ax.semilogy(ns[:, 4], + color="None", + marker="s", + markerfacecolor=markerfacecolor, + markeredgecolor="tab:orange", + markevery=(markevery // 2, markevery)) + + #ax.set_yscale("symlog", linthresh=1e-12) + + ax.set_title(name) + + ax.set_yticks([1e-17, 1e-12, 1e-7, 1e-2, 1e3, 1e8]) + + if spec.sequence == "kitti10": + ax.set_xticks([i * 100 for i in range(4)]) + #ax.xaxis.set_minor_locator(matplotlib.ticker.FixedLocator([i * 100 + 50 for i in range(4)])) + + if i == 0: + ax.set_ylabel("$\\Delta E_m$", rotation=0) + ax.yaxis.set_label_coords(-0.05, 1.05) + + if i == num_plots - 1: + ax.legend(loc=spec.legend_loc) + + if spec.ylim.top is not None: + ax.set_ylim(top=spec.ylim.top) + if spec.ylim.bottom is not None: + ax.set_ylim(bottom=spec.ylim.bottom) + + if spec.suptitle: + fig.suptitle(spec.suptitle) + + def plot_eigenvalues(self, exps, spec): + + logs = [exps[e].runs[spec.sequence].log for e in spec.experiments] + names = [exps[e].display_name for e in spec.experiments] + + num_plots = 1 + + if spec.figsize is None: + spec.figsize = [5.2, 2 * num_plots] + + fig, axs = plt.subplots(num_plots, 1, figsize=spec.figsize) + + ax = axs + + for i, (log, name) in enumerate(zip(logs, names)): + if log is not None: + min_ev = [np.min(e) for e in log.sums.marg_ev[1:]] + #ax.plot(min_ev, ":", label=name, color=default_colors[i]) + ax.plot(min_ev, default_lines[i], label=name, color=default_colors[i]) + + ax.set_yscale("symlog", linthresh=1e-8) + ax.legend(loc=spec.legend_loc) + + #ax.set_title("smallest eigenvalue {} {}".format(name, spec.sequence)) + + if spec.sequence == "eurocMH01": + ax.set_xticks([i * 1000 for i in range(4)]) + ax.xaxis.set_minor_locator(matplotlib.ticker.FixedLocator([i * 1000 + 500 for i in range(4)])) + + if spec.sequence == "kitti10": + ax.set_xticks([i * 100 for i in range(4)]) + ax.xaxis.set_minor_locator(matplotlib.ticker.FixedLocator([i * 100 + 50 for i in range(4)])) + + ax.set_yticks([-1e4, -1e-4, 0.0, 1e-4, 1e4]) + ax.set_ylim(bottom=-1e8, top=1e8) + # ax.yaxis.tick_right() + ax.set_ylabel("$\\sigma_{min}$", rotation=0) + ax.yaxis.set_label_coords(0, 1.05) + + if spec.ylim.top is not None: + ax.set_ylim(top=spec.ylim.top) + if spec.ylim.bottom is not None: + ax.set_ylim(bottom=spec.ylim.bottom) + + if spec.suptitle: + fig.suptitle(spec.suptitle) + + def plot_trajectory(self, exps, spec): + + #self.width = 1.5 + + runs = [exps[e].runs[spec.sequence] for e in spec.experiments] + names = [exps[e].display_name for e in spec.experiments] + + linewidth_factor = 3 + + R = rotation2d(spec.rotate2d) + + traj_axes_idx = self._axes_spec_to_index(spec.trajectory_axes) + + if spec.figsize is None: + spec.figsize = [6.4, 4.8] + + fig, ax = plt.subplots(figsize=spec.figsize) + + ax.axis("equal") + ax.axis('off') + #ax.set_xlabel("x") + #ax.set_ylabel("y") + + gt_color = "tab:grey" + #gt_color = "black" + + # take gt-trajectory from first experiment: + if runs[0].traj_gt is not None: + gt = runs[0].traj_gt[:, traj_axes_idx].T + gt = np.matmul(R, gt) + ax.plot(gt[0, :], + gt[1, :], + '-', + zorder=1, + linewidth=1 * linewidth_factor, + color=gt_color, + label="ground truth") + + # https://matplotlib.org/stable/gallery/color/named_colors.html + linestyles = [":", ":", "--", "-"] + colors = [default_colors[1], default_colors[3]] + #colors = ["tab:blue", "tab:orange"] + linewidths = [2, 1] + + for i, (r, name) in enumerate(zip(runs, names)): + # plot in decreasing zorder + #zorder = len(runs) - i + 1 + + zorder = i + 2 + + if r.traj_est is not None: + pos = r.traj_est[:, traj_axes_idx].T + pos = np.matmul(R, pos) + ax.plot( + pos[0, :], + pos[1, :], + linestyles[i], + #default_lines[i], + zorder=zorder, + linewidth=linewidths[i] * linewidth_factor, + label=name, + color=colors[i]) + + #ax.set_xlim(np.min(x_gt), np.max(x_gt)) + #ax.set_ylim(np.min(y_gt), np.max(y_gt)) + + #lines = [gt] + #colors = ['black'] + #line_segments = LineCollection(lines, colors=colors, linestyle='solid') + + #ax.add_collection(line_segments) + + ax.legend(loc=spec.legend_loc) + + if spec.title is not None: + ax.set_title(spec.title.format(sequence=self.seq_displayname(spec.sequence))) + + @staticmethod + def _axes_spec_to_index(axes_spec): + index = [] + assert len(axes_spec) == 2, "Inalid axes_spec {}".format(axes_spec) + for c in axes_spec: + if c == "x": + index.append(0) + elif c == "y": + index.append(1) + elif c == "z": + index.append(2) + else: + assert False, "Inalid axes_spec {}".format(axes_spec) + return index + + # static: + filename_counters = defaultdict(int) + + def _save_plot(self, spec, basepath, extension=".pdf"): + + os.makedirs(basepath, exist_ok=True) + + if "sequence" in spec: + filename = '{}_{}_{}'.format(spec.type, spec.name, spec.sequence) + else: + filename = '{}_{}'.format(spec.type, spec.name) + + filename = filename.replace(" ", "_").replace("/", "_") + + Plot.filename_counters[filename] += 1 + counter = Plot.filename_counters[filename] + if counter > 1: + filename = "{}-{}".format(filename, counter) + + filepath = os.path.join(basepath, "{}.{}".format(filename, extension.strip('.'))) + + plt.savefig(filepath) + + return filepath diff --git a/python/basalt/latex/results_table.py b/python/basalt/latex/results_table.py new file mode 100644 index 0000000..5d0366c --- /dev/null +++ b/python/basalt/latex/results_table.py @@ -0,0 +1,163 @@ +# +# BSD 3-Clause License +# +# This file is part of the Basalt project. +# https://gitlab.com/VladyslavUsenko/basalt.git +# +# Copyright (c) 2019-2021, Vladyslav Usenko and Nikolaus Demmel. +# All rights reserved. +# +import numbers +import os +import math + +import numpy as np + +from pylatex import Subsection, Tabular, TextColor +from pylatex import MultiRow, FootnoteText +from pylatex.utils import italic, bold, NoEscape, escape_latex, dumps_list + +from .containers import ExperimentsTable +from .util import format_ratio_percent +from .util import best_two_non_repeating + + +class ResultsTable(ExperimentsTable): + + def __init__(self, exps, spec, show_values_failed_runs, seq_displayname_mapping, export_basepath): + super().__init__(exps, spec, show_values_failed_runs, seq_displayname_mapping, export_basepath) + + self.doit() + + def doit(self): + + is_multirow = self.num_metrics > 1 and self.spec.multirow + + def render_metric(value, best, second, decimals, format_string, highlight_top, relative_to): + if isinstance(value, numbers.Number): + if relative_to is None or relative_to == 0 or not np.isfinite(relative_to): + # absolute number + rendered = format_string.format(value, prec=decimals) + else: + # percent + rendered = format_ratio_percent(value, relative_to, decimals=decimals) + if highlight_top: + if value == best: + rendered = bold(rendered) + elif value == second: + rendered = italic(rendered) + return rendered + else: + return value + + if self.spec.export_latex: + row_height = None + else: + row_height = 0.65 if is_multirow and self.num_metrics >= 3 else 1 + + column_spec = '|r' if self.spec.vertical_bars else 'r' + t = Tabular('l' + column_spec * self.num_exps, row_height=row_height, pos=['t']) + escape_header_fun = lambda text: text if self.spec.escape_latex_header else NoEscape(text) + if self.spec.rotate_header: + t.add_row([''] + [ + NoEscape(r"\rotatebox{90}{%s}" % escape_latex(escape_header_fun(s.display_name(self.exps[s.name])))) + for s in self.experiment_specs + ]) + else: + t.add_row([''] + [escape_header_fun(s.display_name(self.exps[s.name])) for s in self.experiment_specs]) + t.add_hline() + + for seq in self.seq_names: + fails = [self.is_failed(self.exps[s.name], seq) for s in self.experiment_specs] + failure_strings = [self.render_failure(self.exps[s.name], seq) for s in self.experiment_specs] + values = np.array([self.get_metrics(self.exps[s.name], seq, s.it) for s in self.experiment_specs]) + + top_values = list(range(self.num_metrics)) + for c, m in enumerate(self.metrics): + try: + values[:, c] = np.around(values[:, c], m.decimals) + except IndexError: + pass + non_excluded_values = np.array(values[:, c]) + for i in m.exclude_columns_highlight: + non_excluded_values[i] = math.nan + top_values[c] = best_two_non_repeating(non_excluded_values, reverse=m.larger_is_better) + + if is_multirow: + rows = [[MultiRow(self.num_metrics, data=self.seq_displayname(seq))] + ] + [list(['']) for _ in range(1, self.num_metrics)] + else: + rows = [[self.seq_displayname(seq)]] + for c, (fail, failure_str, value_col) in enumerate(zip(fails, failure_strings, values)): + if failure_str is not None: + if self.spec.color_failed: + failure_str = TextColor(self.spec.color_failed, failure_str) + if is_multirow: + rows[0].append(MultiRow(self.num_metrics, data=failure_str)) + for r in range(1, self.num_metrics): + rows[r].append('') + else: + rows[0].append(failure_str) + else: + tmp_data = [None] * self.num_metrics + for r, m in enumerate(self.metrics): + if m.failed_threshold and value_col[r] > m.failed_threshold: + obj = "x" + if self.spec.color_failed: + obj = TextColor(self.spec.color_failed, obj) + else: + relative_to = None + if m.relative_to_column is not None and m.relative_to_column != c: + relative_to = values[m.relative_to_column, r] + obj = render_metric(value_col[r], + top_values[r][0], + top_values[r][1], + m.effective_display_decimals(), + m.format_string, + m.highlight_top, + relative_to=relative_to) + if fail and self.spec.color_failed: + obj = TextColor(self.spec.color_failed, obj) + tmp_data[r] = obj + if self.num_metrics == 1 or is_multirow: + for r, obj in enumerate(tmp_data): + rows[r].append(obj) + else: + entry = [] + for v in tmp_data: + entry.append(v) + entry.append(NoEscape("~/~")) + entry.pop() + rows[0].append(dumps_list(entry)) + + for row in rows: + t.add_row(row) + + if is_multirow: + t.add_hline() + + if self.spec.export_latex: + os.makedirs(self.export_basepath, exist_ok=True) + t.generate_tex(os.path.join(self.export_basepath, self.spec.export_latex)) + + with self.create(Subsection(self.spec.name, numbering=False)) as p: + + if self.spec.metrics_legend: + legend = Tabular('|c|', row_height=row_height, pos=['t']) + legend.add_hline() + legend.add_row(["Metrics"]) + legend.add_hline() + for m in self.metrics: + legend.add_row([m.display_name]) + legend.add_hline() + + tab = Tabular("ll") + tab.add_row([t, legend]) + content = tab + else: + content = t + + if True: + content = FootnoteText(content) + + p.append(content) diff --git a/python/basalt/latex/summarize_sequences_table.py b/python/basalt/latex/summarize_sequences_table.py new file mode 100644 index 0000000..f6c23cf --- /dev/null +++ b/python/basalt/latex/summarize_sequences_table.py @@ -0,0 +1,88 @@ +# +# BSD 3-Clause License +# +# This file is part of the Basalt project. +# https://gitlab.com/VladyslavUsenko/basalt.git +# +# Copyright (c) 2019-2021, Vladyslav Usenko and Nikolaus Demmel. +# All rights reserved. +# +import numbers +import os +import scipy.stats +import numpy as np + +from pylatex import Subsection, FootnoteText, Tabular, NoEscape, escape_latex +from pylatex.utils import italic, bold + +from .containers import ExperimentsTable +from .util import best_two_non_repeating + + +class SummarizeSequencesTable(ExperimentsTable): + + def __init__(self, exps, spec, show_values_failed_runs, seq_displayname_mapping, export_basepath): + super().__init__(exps, spec, show_values_failed_runs, seq_displayname_mapping, export_basepath) + + self.doit() + + def doit(self): + + def render_metric(value, best, second, decimals, format_string): + if isinstance(value, numbers.Number): + rendered = format_string.format(value, prec=decimals) + if value == best: + rendered = bold(rendered) + elif value == second: + rendered = italic(rendered) + return rendered + else: + return value + + values = np.empty((self.num_metrics, self.num_seqs, self.num_exps)) + + for i, seq in enumerate(self.seq_names): + for j, s in enumerate(self.experiment_specs): + values[:, i, j] = np.array(self.get_metrics(self.exps[s.name], seq, s.it)) + + means = np.empty((self.num_metrics, self.num_exps)) + for i, m in enumerate(self.metrics): + if m.geometric_mean: + means[i, :] = scipy.stats.gmean(values[i, :, :], axis=0) + else: + means[i, :] = np.mean(values[i, :, :], axis=0) + + t = Tabular('l' + 'c' * self.num_exps) + + t.add_hline() + escape_header_fun = lambda text: text if self.spec.escape_latex_header else NoEscape(text) + if self.spec.rotate_header: + t.add_row([self.spec.header] + [ + NoEscape(r"\rotatebox{90}{%s}" % escape_latex(escape_header_fun(s.display_name(self.exps[s.name])))) + for s in self.experiment_specs + ]) + else: + t.add_row([self.spec.header] + + [escape_header_fun(s.display_name(self.exps[s.name])) for s in self.experiment_specs]) + t.add_hline() + + for i, m in enumerate(self.metrics): + row_values = np.around(means[i, :], m.decimals) + top_values = best_two_non_repeating(row_values, reverse=m.larger_is_better) + row = [m.display_name] + for v in row_values: + # TODO: use NoEscape only if certain flag is enabled? + row.append( + NoEscape( + render_metric(v, top_values[0], top_values[1], m.effective_display_decimals(), + m.format_string))) + t.add_row(row) + + t.add_hline() + + if self.spec.export_latex: + os.makedirs(self.export_basepath, exist_ok=True) + t.generate_tex(os.path.join(self.export_basepath, self.spec.export_latex)) + + with self.create(Subsection(self.spec.name, numbering=False)) as p: + p.append(FootnoteText(t)) diff --git a/python/basalt/latex/templates.py b/python/basalt/latex/templates.py new file mode 100644 index 0000000..e72182c --- /dev/null +++ b/python/basalt/latex/templates.py @@ -0,0 +1,93 @@ +# +# BSD 3-Clause License +# +# This file is part of the Basalt project. +# https://gitlab.com/VladyslavUsenko/basalt.git +# +# Copyright (c) 2019-2021, Vladyslav Usenko and Nikolaus Demmel. +# All rights reserved. +# +screenread_sty = r""" +\ProvidesPackage{screenread} +% Copyright (C) 2012 John Collins, collins@phys.psu.edu +% License: LPPL 1.2 + +% Note: To avoid compatibility issues between geometry and at least one +% class file, it may be better to set all the dimensions by hand. + +% 20 Nov 2014 - use `pageBreakSection` instead of clobbering `section` +% - increase longest page size to 575cm +% - make top, right, and left margins something sensible and +% a bit more aesthetically pleasing +% 24 Jan 2012 Argument to \SetScreen is screen width +% 23 Jan 2012 Remove package showlayout +% 22 Jan 2012 Initial version, based on ideas in +% B. Veytsman amd M. Ware, Tugboat 32 (2011) 261. + +\RequirePackage{everyshi} +\RequirePackage{geometry} + +%======================= + +\pagestyle{empty} + +\EveryShipout{% + \pdfpageheight=\pagetotal + \advance\pdfpageheight by 2in + \advance\pdfpageheight by \topmargin + \advance\pdfpageheight by \textheight % This and next allow for footnotes + \advance\pdfpageheight by -\pagegoal +} + +\AtEndDocument{\pagebreak} + +\def\pageBreakSection{\pagebreak\section} + +\newlength\screenwidth +\newlength{\savedscreenwidth} + +\newcommand\SetScreen[1]{% + % Argument #1 is the screen width. + % Set appropriate layout parameters, with only a little white space + % around the text. + \setlength\screenwidth{#1}% + \setlength\savedscreenwidth{#1}% + \setlength\textwidth{#1}% + \addtolength\textwidth{-2cm}% + \geometry{layoutwidth=\screenwidth, + paperwidth=\screenwidth, + textwidth=\textwidth, + layoutheight=575cm, + paperheight=575cm, + textheight=575cm, + top=1cm, + left=1cm, + right=1cm, + hcentering=true + }% +} + +\newcommand\SetPageScreenWidth[1]{% + \setlength\savedscreenwidth{\screenwidth}% + \setlength\screenwidth{#1}% + \pdfpagewidth\screenwidth% + \setlength\textwidth{\screenwidth}% + \addtolength\textwidth{-2cm}% +} + +\newcommand\RestorePageScreenWidth{% + \setlength\screenwidth{\savedscreenwidth}% + \pdfpagewidth\screenwidth% + \setlength\textwidth{\screenwidth}% + \addtolength\textwidth{-2cm}% +} + + +% Compute a reasonable default screen width, and set it +\setlength\screenwidth{\textwidth} +\addtolength\screenwidth{1cm} +\SetScreen{\screenwidth} + +\endinput + +""" diff --git a/python/basalt/latex/util.py b/python/basalt/latex/util.py new file mode 100644 index 0000000..08f2d82 --- /dev/null +++ b/python/basalt/latex/util.py @@ -0,0 +1,61 @@ +# +# BSD 3-Clause License +# +# This file is part of the Basalt project. +# https://gitlab.com/VladyslavUsenko/basalt.git +# +# Copyright (c) 2019-2021, Vladyslav Usenko and Nikolaus Demmel. +# All rights reserved. +# +import math +import numpy as np + + +def best_two_non_repeating(array, reverse=False): + if reverse: + best = -math.inf + second = -math.inf + for v in array: + if v > best: + second = best + best = v + elif v < best and v > second: + second = v + else: + best = math.inf + second = math.inf + for v in array: + if v < best: + second = best + best = v + elif v > best and v < second: + second = v + + return best, second + + +def format_ratio(val, val_ref=None, decimals=0): + if val_ref == 0: + return "{}".format(math.inf) + else: + if val_ref is not None: + val = float(val) / float(val_ref) + return "{:.{prec}f}".format(val, prec=decimals) + + +def format_ratio_percent(val, val_ref=None, decimals=0): + if val_ref == 0: + return "{}".format(val) + else: + if val_ref is not None: + val = float(val) / float(val_ref) + val = 100 * val + return "{:.{prec}f}%".format(val, prec=decimals) + + +def rotation2d(theta_deg): + theta = np.radians(theta_deg) + + R = np.array(((np.cos(theta), -np.sin(theta)), (np.sin(theta), np.cos(theta)))) + + return R diff --git a/python/basalt/log.py b/python/basalt/log.py new file mode 100644 index 0000000..eea25f8 --- /dev/null +++ b/python/basalt/log.py @@ -0,0 +1,106 @@ +# +# BSD 3-Clause License +# +# This file is part of the Basalt project. +# https://gitlab.com/VladyslavUsenko/basalt.git +# +# Copyright (c) 2019-2021, Vladyslav Usenko and Nikolaus Demmel. +# All rights reserved. +# +import ubjson +import json +import os + +import numpy as np + +from collections import Mapping +from munch import Munch +from munch import munchify + + +class ExecutionStats(Munch): + + def __init__(self, path): + data = self._load(path) + if data is None: + Munch.__init__(self) + else: + Munch.__init__(self, data) + + def _load(self, path): + + if path.endswith("ubjson"): + with open(path, 'rb') as f: + data = ubjson.load(f) + else: + with open(path, 'r') as f: + data = json.load(f) + + if isinstance(data, Mapping): + data = self._convert(data) + + return munchify(data) + + def _convert(self, data): + + data_new = dict() + + for k, v in data.items(): + if k.endswith("__values"): + continue # skip; processed together with __index + elif k.endswith("__index"): + idx = v + values = np.array(data[k.replace("__index", "__values")]) + # convert to list of arrays according to start indices + res = np.split(values, idx[1:]) + if all(len(res[0]) == len(x) for x in res): + res = np.array(res) + data_new[k.replace("__index", "")] = res + else: + data_new[k] = np.array(v) + + return data_new + + def _is_imu(self): + return len(self.marg_ev[0]) == 15 + + +def detect_log_path(dir, basename): + + for ext in ["ubjson", "json"]: + path = os.path.join(dir, basename + "." + ext) + if os.path.isfile(path): + return path + + return None + + +def load_execution_stats(dir, basename): + + path = detect_log_path(dir, basename) + + if path is not None: + return ExecutionStats(path) + else: + return None + + +class Log(Munch): + + @staticmethod + def load(dir): + + log = Log(all=load_execution_stats(dir, "stats_all"), + sums=load_execution_stats(dir, "stats_sums"), + vio=load_execution_stats(dir, "stats_vio")) + + if all([v is None for v in log.values()]): + return None + else: + return log + + def __init__(self, *args, **kwargs): + Munch.__init__(self, *args, **kwargs) + + def duration(self): + return (self.sums.frame_id[-1] - self.sums.frame_id[0]) * 1e-9 diff --git a/python/basalt/metric.py b/python/basalt/metric.py new file mode 100644 index 0000000..44360da --- /dev/null +++ b/python/basalt/metric.py @@ -0,0 +1,171 @@ +# +# BSD 3-Clause License +# +# This file is part of the Basalt project. +# https://gitlab.com/VladyslavUsenko/basalt.git +# +# Copyright (c) 2019-2021, Vladyslav Usenko and Nikolaus Demmel. +# All rights reserved. +# +import copy + +import numpy as np + + +class ExperimentSpec: + + def __init__(self, string): + if "@it" in string: + self.name, it = string.split("@it") + self.it = int(it) + else: + self.name = string + self.it = -1 + + def display_name(self, exp): + if self.it == -1: + return exp.display_name + else: + return "{} @ it{}".format(exp.display_name, self.it) + + +class Metric: + + def __init__(self, + display_name, + accessor, + decimals, + format_string="{:.{prec}f}", + highlight_top=True, + geometric_mean=False, + larger_is_better=False): + self.display_name = display_name + self.accessor = accessor + self.decimals = decimals + self.display_decimals = None + self.relative_to_column = None + self.relative_to_experiment = None + self.relative_to_metric = None + self.ratio = True + self.format_string = format_string + self.highlight_top = highlight_top + self.larger_is_better = larger_is_better + self.exclude_columns_highlight = [] + self.geometric_mean = geometric_mean + self.failed_threshold = None + + def set_config(self, spec): + # change defaults in case of "relative_to_..." display mode + if any(k in spec for k in ["relative_to_column", "relative_to_experiment", "relative_to_metric"]): + # maybe overwritten by explicit "decimals" / "format_string" below + self.decimals = 3 + self.display_decimals = 3 + self.format_string = "{:.3f}" + self.geometric_mean = True + + if "display_name" in spec: + self.display_name = spec.display_name + if "decimals" in spec: + self.decimals = spec.decimals + if "display_decimals" in spec: + self.display_decimals = spec.display_decimals + if "relative_to_column" in spec: + self.relative_to_column = spec.relative_to_column + if "relative_to_experiment" in spec: + self.relative_to_experiment = ExperimentSpec(spec.relative_to_experiment) + if "relative_to_metric" in spec: + self.relative_to_metric = spec.relative_to_metric + if "ratio" in spec: + self.ratio = spec.ratio + if "format_string" in spec: + self.format_string = spec.format_string + if "highlight_top" in spec: + self.highlight_top = spec.highlight_top + if "larger_is_better" in spec: + self.larger_is_better = spec.larger_is_better + if "exclude_columns_highlight" in spec: + self.exclude_columns_highlight = spec.exclude_columns_highlight + if "geometric_mean" in spec: + self.geometric_mean = spec.geometric_mean + if "failed_threshold" in spec: + self.failed_threshold = spec.failed_threshold + + def effective_display_decimals(self): + if self.display_decimals is not None: + return self.display_decimals + else: + return self.decimals + + def get_value(self, exps, e, s, it): + #try: + value = self.accessor(e.runs[s].log, it) + #except AttributeError as err: + # raise + + if self.relative_to_metric is not None: + relative_to_metric_accessor = self.relative_to_metric.accessor + else: + relative_to_metric_accessor = self.accessor + + if self.relative_to_experiment is not None: + relative_to_log = exps[self.relative_to_experiment.name].runs[s].log + relative_to_it = self.relative_to_experiment.it + else: + relative_to_log = e.runs[s].log + relative_to_it = it + + if self.relative_to_metric is not None or self.relative_to_experiment is not None: + base_value = relative_to_metric_accessor(relative_to_log, relative_to_it) + if self.ratio: + value = value / base_value + else: + value = base_value - value + return value + + +def peak_memory_opt(l, it): + if it == -1: + index = -1 + else: + index = int(l.all.num_it[:it + 1].sum()) - 1 + return l.all.resident_memory_peak[index] / 1024**2 + + +# yapf: disable +metric_desc = dict( + ev_min=Metric("min ev", lambda l, it: min(min(x) for x in l.sums.marg_ev), 1), + avg_num_it=Metric("avg #it", lambda l, it: np.mean(l.sums.num_it), 1), + avg_num_it_failed=Metric("avg #it-fail", lambda l, it: np.mean(l.sums.num_it_rejected), 1), + duration=Metric("duration (s)", lambda l, it: l.duration(), 1), + time_marg=Metric("t marg", lambda l, it: np.sum(l.sums.marginalize), 2), + time_opt=Metric("t opt", lambda l, it: np.sum(l.sums.optimize), 2), + time_optmarg=Metric("t opt", lambda l, it: np.sum(l.sums.optimize) + np.sum(l.sums.marginalize), 2), + time_exec=Metric("t exec", lambda l, it: l.vio.exec_time_s[0], 1), + time_exec_realtimefactor=Metric("t exec (rtf)", lambda l, it: l.duration() / l.vio.exec_time_s[0], 1, larger_is_better=True), + time_measure=Metric("t meas", lambda l, it: np.sum(l.sums.measure), 1), + time_measure_realtimefactor=Metric("t meas (rtf)", lambda l, it: l.duration() / np.sum(l.sums.measure), 1, larger_is_better=True), + time_exec_minus_measure=Metric("t exec - meas", lambda l, it: l.vio.exec_time_s[0] - np.sum(l.sums.measure), 1), + time_measure_minus_optmarg=Metric("t exec - (opt + marg)", lambda l, it: np.sum(l.sums.measure) - (np.sum(l.sums.optimize) + np.sum(l.sums.marginalize)), 1), + ate_num_kfs=Metric("ATE #kf", lambda l, it: l.vio.ate_num_kfs[0], 0), + ate_rmse=Metric("ATE", lambda l, it: l.vio.ate_rmse[0], 3), + peak_memory=Metric("mem peak (MB)", lambda l, it: l.vio.resident_memory_peak[0] / 1024**2, 1), + #peak_memory_opt=Metric("mem peak opt (MB)", lambda l, it: l.all.resident_memory_peak[l.all.num_it[:it].sum()-1] / 1024**2, 1), + peak_memory_opt=Metric("mem peak opt (MB)", peak_memory_opt, 1), +) +# yapf: enable + + +def metrics_from_config(spec): + + def get_from_spec(m): + if isinstance(m, str): + obj = copy.copy(metric_desc[m]) + else: + obj = copy.copy(metric_desc[m.name]) + obj.set_config(m) + if obj.relative_to_metric is not None: + obj.relative_to_metric = get_from_spec(obj.relative_to_metric) + + return obj + + return [get_from_spec(m) for m in spec] diff --git a/python/basalt/nullspace.py b/python/basalt/nullspace.py new file mode 100644 index 0000000..2b42633 --- /dev/null +++ b/python/basalt/nullspace.py @@ -0,0 +1,209 @@ +# +# BSD 3-Clause License +# +# This file is part of the Basalt project. +# https://gitlab.com/VladyslavUsenko/basalt.git +# +# Copyright (c) 2019-2021, Vladyslav Usenko and Nikolaus Demmel. +# All rights reserved. +# +from .log import ExecutionStats + +import argparse + +import numpy as np + +import matplotlib.pyplot as plt + +prop_cycle = plt.rcParams['axes.prop_cycle'] + +#default_cycler = (cycler(linestyle=['-', '--', ':', '-.']) * +# cycler(color=prop_cycle.by_key()['color'])) + +default_colors = prop_cycle.by_key()['color'] + + +def plot(args): + + log = ExecutionStats(args.path) + + fig, axs = plt.subplots(nrows=6, ncols=1, figsize=(10, 12.0)) + i = 0 + + if log._is_imu(): + ns = log.marg_ns[1:, [0, 1, 2, 5]] + else: + ns = log.marg_ns[1:, 0:6] + not_ns = log.marg_ns[1:, 6] + + if True: + ax = axs[i] + i += 1 + + ax.semilogy(log.marg_ns[1:, 0], ":", label="x", color=default_colors[0]) + ax.semilogy(log.marg_ns[1:, 1], ":", label="y", color=default_colors[0]) + ax.semilogy(log.marg_ns[1:, 2], ":", label="z", color=default_colors[0]) + + ax.semilogy(log.marg_ns[1:, 3], ":", label="roll", color=default_colors[1]) + ax.semilogy(log.marg_ns[1:, 4], ":", label="pitch", color=default_colors[1]) + + ax.semilogy(log.marg_ns[1:, 5], ":", label="yaw", color=default_colors[2]) + + ax.semilogy(log.marg_ns[1:, 6], ":", label="rand", color=default_colors[3]) + + #ax.semilogy(np.min(ns, axis=1), "-.", color=default_colors[0]) + #ax.semilogy(np.max(ns, axis=1), ":", color=default_colors[0]) + #ax.semilogy(not_ns, "-", label="foo", color=default_colors[0]) + ax.set_title("nullspace") + + ax.legend(loc="center right") + + ev_all = np.array([x[0:7] for x in log.marg_ev[3:]]) + ev = np.array([x[x > 1e-5][0:7] for x in log.marg_ev[3:]]) + #ev = np.array([x[0:7] for x in log.marg_ev[3:]]) + + ev_ns_min = ev[:, 0] + if log._is_imu(): + print("is vio") + ev_ns_max = ev[:, 3] + ev_not_ns = ev[:, 4] + else: + print("is vo") + ev_ns_max = ev[:, 5] + ev_not_ns = ev[:, 6] + + if True: + ax = axs[i] + i += 1 + + ax.semilogy(ev_ns_min, "-.", color=default_colors[0]) + ax.semilogy(ev_ns_max, ":", color=default_colors[0]) + ax.semilogy(ev_not_ns, "-", label="foo", color=default_colors[0]) + + ax.set_title("eigenvalues (filtered all ev < 1e-5)") + #ax.set_title("eigenvalues") + #ax.legend() + + if True: + ax = axs[i] + i += 1 + + ax.plot([sum(x < 1e-5) for x in ev_all], label="x < 1e-5", color=default_colors[0]) + + ax.set_title("zero ev count") + ax.legend() + + if False: + ax = axs[i] + i += 1 + + ax.plot([sum(x == 0) for x in ev_all], label="== 0", color=default_colors[0]) + + ax.set_title("zero ev count") + ax.legend() + + if True: + ax = axs[i] + i += 1 + + ax.plot([sum(x < 0) for x in ev_all], label="< 0", color=default_colors[0]) + + #ax.set_title("zero ev count") + ax.legend() + + if False: + ax = axs[i] + i += 1 + + ax.plot([sum((x > 0) & (x <= 1e-8)) for x in ev_all], label="0 < x <= 1e-8", color=default_colors[0]) + + #ax.set_title("zero ev count") + ax.legend() + + if False: + ax = axs[i] + i += 1 + + ax.plot([sum(x < -1e-8) for x in ev_all], label="< -1e-8", color=default_colors[0]) + + #ax.set_title("zero ev count") + ax.legend() + + if True: + ax = axs[i] + i += 1 + + #ax.plot([sum((1e-6 <= x) & (x <= 1e2)) for x in ev_all], label="1e-8 <= x <= 1e1", color=default_colors[0]) + + #ax.set_title("zero ev count") + #ax.legend() + + ev_all = np.concatenate(log.marg_ev[3:]) + ev_all = ev_all[ev_all < 1e3] + num = len(log.marg_ev[3:]) + + ax.hist( + ev_all, + bins=[ + -1e2, + -1e1, + -1e0, + -1e-1, + -1e-2, + -1e-3, + -1e-4, + -1e-5, + -1e-6, + #-1e-7, + #-1e-8, + #-1e-9, + #-1e-10, + 0, + #1e-10, + #1e-9, + #1e-8, + #1e-7, + 1e-6, + 1e-5, + 1e-4, + 1e-3, + 1e-2, + 1e-1, + 1e-0, + 1e1, + 1e2, + 1e3, + #1e4, + #1e5, + #1e6, + #1e7, + #1e8, + #1e9, + #1e10, + #1e11 + ]) + ax.set_xscale("symlog", linthresh=1e-6) + y_vals = ax.get_yticks() + ax.set_yticklabels(['{:.1f}'.format(x / num) for x in y_vals]) + ax.set_title("hist of all ev < 1e3 (count normalized by num marg-priors)") + + if args.save: + plt.savefig(args.save) + + if not args.no_gui: + plt.show() + + +def main(): + parser = argparse.ArgumentParser("Load multiple PBA logs and plot combined results for comparison.") + parser.add_argument("path", help="log file path") + parser.add_argument("--no-gui", action="store_true", help="show plots") + parser.add_argument("--save", help="save plots to specified file") + + args = parser.parse_args() + + plot(args) + + +if __name__ == "__main__": + main() diff --git a/python/basalt/run.py b/python/basalt/run.py new file mode 100644 index 0000000..4a313f9 --- /dev/null +++ b/python/basalt/run.py @@ -0,0 +1,117 @@ +# +# BSD 3-Clause License +# +# This file is part of the Basalt project. +# https://gitlab.com/VladyslavUsenko/basalt.git +# +# Copyright (c) 2019-2021, Vladyslav Usenko and Nikolaus Demmel. +# All rights reserved. +# +import os +from collections import Mapping + +from .log import Log + +from .util import load_json_if_exists +from .util import load_text_if_exists +from .util import load_trajectory_tum_if_exists + + +class Run: + """Loads files from a single run of an experiment from a folder (config, status, output, log, ...) + + A single run is one invocation of odometry with a specific config on a specific sequence. + This is meant to be used on directories created with the 'generate-batch-configs' and 'run-all-in' scripts. + It's best-effort, loading as many of the files as are present. + """ + + def __init__(self, dirpath, seq_name_mapping): + self.dirpath = dirpath + + self.config = load_json_if_exists(os.path.join(dirpath, 'basalt_config.json')) + self.output = load_text_if_exists(os.path.join(dirpath, 'output.log')) + self.status = load_text_if_exists(os.path.join(dirpath, 'status.log')) + self.traj_est = load_trajectory_tum_if_exists(os.path.join(dirpath, 'trajectory.txt')) + self.traj_gt = load_trajectory_tum_if_exists(os.path.join(dirpath, 'groundtruth.txt')) + + self.log = Log.load(dirpath) + + self.seq_name = self._infer_sequence_name(self.config, dirpath, seq_name_mapping) + + print("loaded {} from '{}'".format(self.seq_name, dirpath)) + + def is_imu(self): + return self.config.batch_run.args["use-imu"] in [1, "1", True, "true"] + + def is_failed(self): + if self.log is None: + return True + else: + return "Completed" not in self.status + + def failure_str(self): + if not self.is_failed(): + return "" + if self.output: + if "Some of your processes may have been killed by the cgroup out-of-memory handler" in self.output: + return "OOM" + if "DUE TO TIME LIMIT" in self.output: + return "OOT" + return "x" + + @staticmethod + def _infer_sequence_name(config, dirpath, name_mapping): + """Tries to infer the sequence name from the config, or falls back to the parent folder name""" + seq_name = "" + try: + type = config.batch_run.args["dataset-type"] + path = config.batch_run.args["dataset-path"] + seq_name = os.path.basename(path) + if type == "euroc": + if seq_name.startswith("dataset-"): + # assume tumvi + seq_name = seq_name.replace("dataset-", "tumvi-").split("_")[0] + else: + # assume euroc + s = seq_name.split("_") + seq_name = "euroc{}{}".format(s[0], s[1]) + elif type == "kitti": + # assume kitti + seq_name = "kitti{}".format(seq_name) + except: + pass + + # Fallback to detecting the sequence name base on the last component of the parent folder. This is intended + # to work for run folders created with the 'generate-batch-configs' script, assuming the sequence is the + # last component in '_batch.combinations'. + if seq_name == "": + seq_name = os.path.basename(dirpath).split("_")[-1] + + # optionally remap the sequence name to something else as defined in the experiments config + if isinstance(name_mapping, Mapping) and seq_name in name_mapping: + seq_name = name_mapping[seq_name] + + return seq_name + + @staticmethod + def is_run_dir(dirpath): + """Returns True if the folder may be a run directory, based on the present files + + This is intended to be used for auto-detecting run directories in a file tree. + """ + + files = [ + 'status.log', + 'slurm-output.log', + 'output.log', + 'stats_all.ubjson', + 'stats_all.json', + 'stats_sums.ubjson', + 'stats_sums.json', + 'stats_vio.ubjson', + 'stats_vio.json', + ] + for f in files: + if os.path.isfile(os.path.join(dirpath, f)): + return True + return False diff --git a/python/basalt/util.py b/python/basalt/util.py new file mode 100644 index 0000000..d9631f3 --- /dev/null +++ b/python/basalt/util.py @@ -0,0 +1,72 @@ +# +# BSD 3-Clause License +# +# This file is part of the Basalt project. +# https://gitlab.com/VladyslavUsenko/basalt.git +# +# Copyright (c) 2019-2021, Vladyslav Usenko and Nikolaus Demmel. +# All rights reserved. +# +import os +import json +import platform +import subprocess +import re +import numpy as np + +from munch import munchify + + +def copy_subdict(d, keys): + res = dict() + for k in keys: + if k in d: + res[k] = d[k] + return res + + +def load_json_if_exists(filepath): + if os.path.isfile(filepath): + with open(filepath, 'r') as f: + return munchify(json.load(f)) + return None + + +def load_text_if_exists(filepath): + if os.path.isfile(filepath): + with open(filepath, 'r') as f: + return f.read() + return None + + +def load_trajectory_tum(filepath): + # first row is header + # format for each row is: ts x y z qz qy qz qw + traj = np.loadtxt(filepath, delimiter=" ", skiprows=1) + # return just translation for now + traj = traj[:, 1:4] + return traj + + +def load_trajectory_tum_if_exists(filepath): + if os.path.isfile(filepath): + return load_trajectory_tum(filepath) + return None + + +def os_open_file(filepath): + if platform.system() == 'Darwin': + subprocess.call(('open', filepath)) + elif platform.system() == 'Windows': + os.startfile(filepath) + else: + subprocess.call(('xdg-open', filepath)) + + +# key for 'human' sorting +def alphanum(key): + + def convert(text): + return float(text) if text.isdigit() else text + + return [convert(c) for c in re.split('([-+]?[0-9]*\.?[0-9]*)', key)] diff --git a/scripts/basalt_capture_mocap.py b/scripts/basalt_capture_mocap.py new file mode 100755 index 0000000..de216fe --- /dev/null +++ b/scripts/basalt_capture_mocap.py @@ -0,0 +1,64 @@ +#!/usr/bin/env python +# +# BSD 3-Clause License +# +# This file is part of the Basalt project. +# https://gitlab.com/VladyslavUsenko/basalt.git +# +# Copyright (c) 2019-2021, Vladyslav Usenko and Nikolaus Demmel. +# All rights reserved. +# + +import sys +import os +import rospy +import argparse +from geometry_msgs.msg import TransformStamped + + +def callback(data): + global out_file, time_offset + if not out_file: + return + + if not time_offset: + time_offset = rospy.Time().now() - data.header.stamp + + out_file.write('{},{},{},{},{},{},{},{}\n'.format( + data.header.stamp + time_offset, + data.transform.translation.x, + data.transform.translation.y, + data.transform.translation.z, + data.transform.rotation.w, + data.transform.rotation.x, + data.transform.rotation.y, + data.transform.rotation.z + )) + + +def listener(): + rospy.init_node('listener', anonymous=True) + rospy.Subscriber('/vrpn_client/raw_transform', TransformStamped, callback) + + rospy.spin() + + +if __name__ == '__main__': + + parser = argparse.ArgumentParser(description='Record Motion Capture messages from ROS (/vrpn_client/raw_transform).') + parser.add_argument('-d', '--dataset-path', required=True, help="Path to store the result") + args = parser.parse_args() + + dataset_path = args.dataset_path + + out_file = None + time_offset = None + + if not os.path.exists(dataset_path): + os.makedirs(dataset_path) + + out_file = open(dataset_path + '/data.csv', 'w') + out_file.write('#timestamp [ns], p_RS_R_x [m], p_RS_R_y [m], p_RS_R_z [m], q_RS_w [], q_RS_x [], q_RS_y [], q_RS_z []\n') + listener() + out_file.close() + diff --git a/scripts/basalt_convert_kitti_calib.py b/scripts/basalt_convert_kitti_calib.py new file mode 100755 index 0000000..ccc8ce0 --- /dev/null +++ b/scripts/basalt_convert_kitti_calib.py @@ -0,0 +1,144 @@ +#!/usr/bin/env python3 +# +# BSD 3-Clause License +# +# This file is part of the Basalt project. +# https://gitlab.com/VladyslavUsenko/basalt.git +# +# Copyright (c) 2019-2021, Vladyslav Usenko and Nikolaus Demmel. +# All rights reserved. +# + +import sys +import math +import numpy as np +import os +from string import Template +import cv2 +import argparse + +parser = argparse.ArgumentParser(description='Convert KITTI calibration to basalt and save it int the dataset folder as basalt_calib.json.') +parser.add_argument('-d', '--dataset-path', required=True, help="Path to the dataset in KITTI format") +args = parser.parse_args() + +dataset_path = args.dataset_path + +print(dataset_path) + +kitti_calib_file = dataset_path + '/calib.txt' + + +calib_template = Template('''{ + "value0": { + "T_imu_cam": [ + { + "px": 0.0, + "py": 0.0, + "pz": 0.0, + "qx": 0.0, + "qy": 0.0, + "qz": 0.0, + "qw": 1.0 + }, + { + "px": $px, + "py": 0.0, + "pz": 0.0, + "qx": 0.0, + "qy": 0.0, + "qz": 0.0, + "qw": 1.0 + } + ], + "intrinsics": [ + { + "camera_type": "pinhole", + "intrinsics": { + "fx": $fx0, + "fy": $fy0, + "cx": $cx0, + "cy": $cy0 + } + }, + { + "camera_type": "pinhole", + "intrinsics": { + "fx": $fx1, + "fy": $fy1, + "cx": $cx1, + "cy": $cy1 + } + } + ], + "resolution": [ + [ + $rx, + $ry + ], + [ + $rx, + $ry + ] + ], + "vignette": [], + "calib_accel_bias": [ + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0 + ], + "calib_gyro_bias": [ + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0 + ], + "imu_update_rate": 0.0, + "accel_noise_std": [0.0, 0.0, 0.0], + "gyro_noise_std": [0.0, 0.0, 0.0], + "accel_bias_std": [0.0, 0.0, 0.0], + "gyro_bias_std": [0.0, 0.0, 0.0], + "cam_time_offset_ns": 0 + } +} +''') + + +with open(kitti_calib_file, 'r') as stream: + lines = (' '.join([x.strip('\n ') for x in stream.readlines() if x.strip('\n ') ])).split(' ') + + if len(lines) != 52: + print('Issues loading calibration') + print(lines) + + P0 = np.array([float(x) for x in lines[1:13]]).reshape(3,4) + P1 = np.array([float(x) for x in lines[14:26]]).reshape(3,4) + print('P0\n', P0) + print('P1\n', P1) + + tx = -P1[0,3]/P1[0,0] + + img = cv2.imread(dataset_path + '/image_0/000000.png') + rx = img.shape[1] + ry = img.shape[0] + + values = {'fx0': P0[0,0], 'fy0': P0[1,1], 'cx0': P0[0,2], 'cy0': P0[1,2], 'fx1': P1[0,0], 'fy1': P1[1,1], 'cx1': P1[0,2], 'cy1': P1[1,2], 'px': tx, 'rx': rx, 'ry': ry} + + calib = calib_template.substitute(values) + print(calib) + + with open(dataset_path + '/basalt_calib.json', 'w') as stream2: + stream2.write(calib) \ No newline at end of file diff --git a/scripts/basalt_response_calib.py b/scripts/basalt_response_calib.py new file mode 100755 index 0000000..5f6d3dc --- /dev/null +++ b/scripts/basalt_response_calib.py @@ -0,0 +1,113 @@ +#!/usr/bin/env python3 +# +# BSD 3-Clause License +# +# This file is part of the Basalt project. +# https://gitlab.com/VladyslavUsenko/basalt.git +# +# Copyright (c) 2019-2021, Vladyslav Usenko and Nikolaus Demmel. +# All rights reserved. +# + +import sys +import math +import os +import cv2 +import argparse + +import numpy as np +from matplotlib import pyplot as plt + +parser = argparse.ArgumentParser(description='Response calibration.') +parser.add_argument('-d', '--dataset-path', required=True, help="Path to the dataset in Euroc format") +args = parser.parse_args() + + +dataset_path = args.dataset_path + +print(dataset_path) + +timestamps = np.loadtxt(dataset_path + '/mav0/cam0/data.csv', usecols=[0], delimiter=',', dtype=np.int64) +exposures = np.loadtxt(dataset_path + '/mav0/cam0/exposure.csv', usecols=[1], delimiter=',', dtype=np.int64).astype(np.float64) * 1e-6 +pixel_avgs = list() + +if timestamps.shape[0] != exposures.shape[0]: print("timestamps and exposures do not match") + +imgs = [] + +# check image data. +for timestamp in timestamps: + path = dataset_path + '/mav0/cam0/data/' + str(timestamp) + img = cv2.imread(dataset_path + '/mav0/cam0/data/' + str(timestamp) + '.webp', cv2.IMREAD_GRAYSCALE) + if len(img.shape) == 3: img = img[:,:,0] + imgs.append(img) + pixel_avgs.append(np.mean(img)) + +imgs = np.array(imgs) +print(imgs.shape) +print(imgs.dtype) + + + +num_pixels_by_intensity = np.bincount(imgs.flat) +print('num_pixels_by_intensity', num_pixels_by_intensity) + +inv_resp = np.arange(num_pixels_by_intensity.shape[0], dtype=np.float64) +inv_resp[-1] = -1.0 # Use negative numbers to detect saturation + + +def opt_irradiance(): + corrected_imgs = inv_resp[imgs] * exposures[:, np.newaxis, np.newaxis] + times = np.ones_like(corrected_imgs) * (exposures**2)[:, np.newaxis, np.newaxis] + + times[corrected_imgs < 0] = 0 + corrected_imgs[corrected_imgs < 0] = 0 + + denom = np.sum(times, axis=0) + idx = (denom != 0) + irr = np.sum(corrected_imgs, axis=0) + irr[idx] /= denom[idx] + irr[denom == 0] = -1.0 + return irr + +def opt_inv_resp(): + generated_imgs = irradiance[np.newaxis, :, :] * exposures[:, np.newaxis, np.newaxis] + + num_pixels_by_intensity = np.bincount(imgs.flat, generated_imgs.flat >= 0) + + generated_imgs[generated_imgs < 0] = 0 + sum_by_intensity = np.bincount(imgs.flat, generated_imgs.flat) + + new_inv_resp = inv_resp + + idx = np.nonzero(num_pixels_by_intensity > 0) + new_inv_resp[idx] = sum_by_intensity[idx] / num_pixels_by_intensity[idx] + new_inv_resp[-1] = -1.0 # Use negative numbers to detect saturation + return new_inv_resp + +def print_error(): + generated_imgs = irradiance[np.newaxis, :, :] * exposures[:, np.newaxis, np.newaxis] + generated_imgs -= inv_resp[imgs] + generated_imgs[imgs == 255] = 0 + print('Error', np.sum(generated_imgs**2)) + +for iter in range(5): + print('Iteration', iter) + irradiance = opt_irradiance() + print_error() + inv_resp = opt_inv_resp() + print_error() + + + +fig, (ax1, ax2) = plt.subplots(1, 2) +ax1.plot(inv_resp[:-1]) +ax1.set(xlabel='Image Intensity', ylabel='Irradiance Value') +ax1.set_title('Inverse Response Function') + + +ax2.imshow(irradiance) +ax2.set_title('Irradiance Image') +plt.show() + + diff --git a/scripts/basalt_verify_dataset.py b/scripts/basalt_verify_dataset.py new file mode 100755 index 0000000..0e13f29 --- /dev/null +++ b/scripts/basalt_verify_dataset.py @@ -0,0 +1,96 @@ +#!/usr/bin/env python3 +# +# BSD 3-Clause License +# +# This file is part of the Basalt project. +# https://gitlab.com/VladyslavUsenko/basalt.git +# +# Copyright (c) 2019-2021, Vladyslav Usenko and Nikolaus Demmel. +# All rights reserved. +# + +import sys +import math +import os +import argparse + +import numpy as np + + +parser = argparse.ArgumentParser(description='Check the dataset. Report if any images are missing.') +parser.add_argument('-d', '--dataset-path', required=True, help="Path to the dataset in Euroc format") +args = parser.parse_args() + + +dataset_path = args.dataset_path + +print(dataset_path) + +timestamps = {} +exposures = {} + +for sensor in ['cam0', 'cam1', 'imu0']: + data = np.loadtxt(dataset_path + '/mav0/' + sensor + '/data.csv', usecols=[0], delimiter=',', dtype=np.int64) + timestamps[sensor] = data + +# check if dataset is OK... +for key, value in timestamps.items(): + times = value * 1e-9 + min_t = times.min() + max_t = times.max() + interval = max_t - min_t + diff = times[1:] - times[:-1] + print('==========================================') + print('sensor', key) + print('min timestamp', min_t) + print('max timestamp', max_t) + print('interval', interval) + print('hz', times.shape[0] / interval) + print('min time between consecutive msgs', diff.min()) + print('max time between consecutive msgs', diff.max()) + for i, d in enumerate(diff): + # Note: 0.001 is just a hacky heuristic, since we have nothing faster than 1000Hz. Should maybe be topic-specific. + if d < 0.001: + print("ERROR: Difference on consecutive measurements too small: {} - {} = {}".format(times[i + 1], times[i], + d) + ' in sensor ' + key) + +# check if we have all images for timestamps +timestamp_to_topic = {} + +for key, value in timestamps.items(): + if not key.startswith('cam'): + continue + for v in value: + if v not in timestamp_to_topic: + timestamp_to_topic[v] = list() + timestamp_to_topic[v].append(key) + +for key in timestamp_to_topic.keys(): + if len(timestamp_to_topic[key]) != 2: + print('timestamp', key, 'has topics', timestamp_to_topic[key]) + +# check image data. +img_extensions = ['.png', '.jpg', '.webp'] +for key, value in timestamps.items(): + if not key.startswith('cam'): + continue + for v in value: + path = dataset_path + '/mav0/' + key + '/data/' + str(v) + img_exists = False + for e in img_extensions: + if os.path.exists(dataset_path + '/mav0/' + key + '/data/' + str(v) + e): + img_exists = True + + if not img_exists: + print('No image data for ' + key + ' at timestamp ' + str(v)) + + exposure_file = dataset_path + '/mav0/' + key + '/exposure.csv' + if not os.path.exists(exposure_file): + print('No exposure data for ' + key) + continue + + exposure_data = np.loadtxt(exposure_file, delimiter=',', dtype=np.int64) + for v in value: + idx = np.searchsorted(exposure_data[:, 0], v) + if exposure_data[idx, 0] != v: + print('No exposure data for ' + key + ' at timestamp ' + str(v)) diff --git a/scripts/batch/generate-batch-configs.py b/scripts/batch/generate-batch-configs.py new file mode 100755 index 0000000..bfa7cf3 --- /dev/null +++ b/scripts/batch/generate-batch-configs.py @@ -0,0 +1,135 @@ +#!/usr/bin/env python3 +# +# BSD 3-Clause License +# +# This file is part of the Basalt project. +# https://gitlab.com/VladyslavUsenko/basalt.git +# +# Copyright (c) 2019-2021, Vladyslav Usenko and Nikolaus Demmel. +# All rights reserved. +# + +# +# Generate basalt configurations from a batch config file. +# +# Example: +# ./generate-batch-config.py /path/to/folder +# +# It looks for the file named `basalt_batch_config.toml` inside the given folder. + +import os +import toml +import json +import argparse +from pprint import pprint +from copy import deepcopy +from collections import OrderedDict +import itertools +import shutil +import datetime +import sys + + +def isdict(o): + return isinstance(o, dict) or isinstance(o, OrderedDict) + + +def merge_config(a, b): + "merge b into a" + for k, v in b.items(): + if k in a: + if isdict(v) and isdict(a[k]): + #print("dict {}".format(k)) + merge_config(a[k], b[k]) + elif not isdict(v) and not isdict(a[k]): + a[k] = deepcopy(v) + #print("not dict {}".format(k)) + else: + raise RuntimeError("Incompatible types for key {}".format(k)) + else: + a[k] = deepcopy(v) + + +def save_config(template, configs, combination, path_prefix): + filename = os.path.join(path_prefix, "basalt_config_{}.json".format("_".join(combination))) + config = deepcopy(template) + #import ipdb; ipdb.set_trace() + for override in combination: + merge_config(config, configs[override]) + #import ipdb; ipdb.set_trace() + with open(filename, 'w') as f: + json.dump(config, f, indent=4) + print(filename) + + +def generate_configs(root_path, cmdline=[], overwrite_existing=False, revision_override=None): + + # load and parse batch config file + batch_config_path = os.path.join(root_path, "basalt_batch_config.toml") + template = toml.load(batch_config_path, OrderedDict) + cfg = template["_batch"] + del template["_batch"] + + # parse batch configuration + revision = str(cfg.get("revision", 0)) if revision_override is None else revision_override + configs = cfg["config"] + alternatives = cfg.get("alternatives", dict()) + combinations = cfg["combinations"] + + # prepare output directory + date_str = datetime.datetime.now().strftime("%Y%m%d-%H%M%S") + outdir = root_path if revision is None else os.path.join(root_path, revision) + if overwrite_existing and os.path.exists(outdir): + print("WARNING: output directory exists, overwriting existing files: {}".format(outdir)) + else: + os.makedirs(outdir) + shutil.copy(batch_config_path, outdir) + with open(os.path.join(outdir, "timestamp"), 'w') as f: + f.write(date_str) + with open(os.path.join(outdir, "commandline"), 'w') as f: + f.write(cmdline) + + # expand single entry in combination array + def expand_one(x): + if x in alternatives: + return alternatives[x] + elif isinstance(x, list): + # allow "inline" alternative + return x + else: + return [x] + + def flatten(l): + for el in l: + if isinstance(el, list): + yield from flatten(el) + else: + yield el + + # generate all configurations + for name, description in combinations.items(): + if True or len(combinations) > 1: + path_prefix = os.path.join(outdir, name) + if not (overwrite_existing and os.path.exists(path_prefix)): + os.mkdir(path_prefix) + else: + path_prefix = outdir + expanded = [expand_one(x) for x in description] + for comb in itertools.product(*expanded): + # flatten list to allow each alternative to reference multiple configs + comb = list(flatten(comb)) + save_config(template, configs, comb, path_prefix) + + +def main(): + cmdline = str(sys.argv) + parser = argparse.ArgumentParser("Generate basalt configurations from a batch config file.") + parser.add_argument("path", help="path to look for config and templates") + parser.add_argument("--revision", help="override revision") + parser.add_argument("--force", "-f", action="store_true", help="overwrite existing files") + args = parser.parse_args() + generate_configs(args.path, cmdline, args.force, args.revision) + + +if __name__ == "__main__": + main() diff --git a/scripts/batch/generate-tables.py b/scripts/batch/generate-tables.py new file mode 100755 index 0000000..03ba10c --- /dev/null +++ b/scripts/batch/generate-tables.py @@ -0,0 +1,26 @@ +#!/usr/bin/env python3 +# +# BSD 3-Clause License +# +# This file is part of the Basalt project. +# https://gitlab.com/VladyslavUsenko/basalt.git +# +# Copyright (c) 2019-2021, Vladyslav Usenko and Nikolaus Demmel. +# All rights reserved. +# + +# Dependencies: +# pip3 install -U --user py_ubjson matplotlib numpy munch scipy pylatex toml + +# also: latexmk and latex +# +# Ubuntu: +# sudo apt install texlive-latex-extra latexmk +import os +import sys + +sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "..", "python"))) + +import basalt.generate_tables + +basalt.generate_tables.main() diff --git a/scripts/batch/list-jobs.sh b/scripts/batch/list-jobs.sh new file mode 100755 index 0000000..8a9583c --- /dev/null +++ b/scripts/batch/list-jobs.sh @@ -0,0 +1,139 @@ +#!/usr/bin/env bash +## +## BSD 3-Clause License +## +## This file is part of the Basalt project. +## https://gitlab.com/VladyslavUsenko/basalt.git +## +## Copyright (c) 2019-2021, Vladyslav Usenko and Nikolaus Demmel. +## All rights reserved. +## + +# +# Usage: +# list-jobs.sh DIRNAME [DIRNAME ...] [-s|--short] [-o|--only STATUS] +# +# Lists all batch jobs found in DIRNAME. If the optional argument +# STATUS is passed, only lists jobs with that status. Multiple +# statuses can be passed in a space-separated string. +# +# Possible status arguments: queued, running, completed, failed, unknown +# You can also use 'active' as a synonym for 'queued running unknown' + +# exit on error +set -o errexit -o pipefail + + +# we need GNU getopt... +GETOPT=getopt +if [[ "$OSTYPE" == "darwin"* ]]; then + if [ -f /usr/local/opt/gnu-getopt/bin/getopt ]; then + GETOPT="/usr/local/opt/gnu-getopt/bin/getopt" + fi +fi + +# option parsing, see: https://stackoverflow.com/a/29754866/1813258 +usage() { echo "Usage: `basename $0` DIRNAME [DIRNAME ...] [-s|--short] [-o|--only STATUS]" ; exit 1; } + +# -allow a command to fail with !’s side effect on errexit +# -use return value from ${PIPESTATUS[0]}, because ! hosed $? +! "$GETOPT" --test > /dev/null +if [[ ${PIPESTATUS[0]} -ne 4 ]]; then + echo 'I’m sorry, `getopt --test` failed in this environment.' + exit 1 +fi + +OPTIONS=hsjo: +LONGOPTS=help,short,jobids,only: + +# -regarding ! and PIPESTATUS see above +# -temporarily store output to be able to check for errors +# -activate quoting/enhanced mode (e.g. by writing out “--options”) +# -pass arguments only via -- "$@" to separate them correctly +! PARSED=$("$GETOPT" --options=$OPTIONS --longoptions=$LONGOPTS --name "`basename $0`" -- "$@") +if [[ ${PIPESTATUS[0]} -ne 0 ]]; then + # e.g. return value is 1 + # then getopt has complained about wrong arguments to stdout + usage +fi +# read getopt’s output this way to handle the quoting right: +eval set -- "$PARSED" + +SHORT=n +ONLY="" +JOBIDS=n +# now enjoy the options in order and nicely split until we see -- +while true; do + case "$1" in + -h|--help) usage ;; + -s|--short) SHORT=y; shift ;; + -j|--jobids) JOBIDS=y; shift ;; + -o|--only) ONLY="$2"; shift 2 ;; + --) shift; break ;; + *) echo "Programming error"; exit 3 ;; + esac +done + +# handle non-option arguments --> directories +if [[ $# -lt 1 ]]; then + echo "Error: Pass at least one folder" + usage +fi +DIRS=("$@") + +# status aliases: +ONLY="${ONLY/active/queued running unknown}" +ONLY="${ONLY/notcompleted/queued running failed unknown}" + +contains() { + [[ $1 =~ (^| )$2($| ) ]] && return 0 || return 1 +} + +display() { + if [ -z "$ONLY" ] || contains "$ONLY" $2; then + if [ $SHORT = y ]; then + echo "$1" + else + echo -n "$1 : $2" + if [ -n "$3" ]; then + echo -n " - $3" + fi + echo "" + fi + fi +} + +for d in "${DIRS[@]}"; do + for f in `find "$d" -name status.log | sort`; do + DIR=`dirname "$f"` + + # ignore backup folder from "rerun" scripts + if [[ `basename $DIR` = results-backup* ]]; then + continue + fi + + if ! grep Started "$f" > /dev/null; then + display "$DIR" unknown "not started" + continue + fi + + # job has started: + + if grep Completed "$f" > /dev/null ; then + display "$DIR" completed "" + continue + fi + + # job has started, but not completed (cleanly) + + # check signs of termination + if [ -f "$DIR"/output.log ] && grep "Command terminated by signal" "$DIR"/output.log > /dev/null; then + display "$DIR" failed killed "`grep -oP 'Command terminated by \Ksignal .+' "$DIR"/output.log`" + continue + fi + + # might be running or aborted + display "$DIR" unknown started + + done +done diff --git a/scripts/batch/plot.py b/scripts/batch/plot.py new file mode 100755 index 0000000..007661b --- /dev/null +++ b/scripts/batch/plot.py @@ -0,0 +1,19 @@ +#!/usr/bin/env python3 +# +# BSD 3-Clause License +# +# This file is part of the Basalt project. +# https://gitlab.com/VladyslavUsenko/basalt.git +# +# Copyright (c) 2019-2021, Vladyslav Usenko and Nikolaus Demmel. +# All rights reserved. +# + +import os +import sys + +sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "..", "python"))) + +import basalt.nullspace + +basalt.nullspace.main() diff --git a/scripts/batch/query-config.py b/scripts/batch/query-config.py new file mode 100755 index 0000000..747ee86 --- /dev/null +++ b/scripts/batch/query-config.py @@ -0,0 +1,100 @@ +#!/usr/bin/env python3 +# +# BSD 3-Clause License +# +# This file is part of the Basalt project. +# https://gitlab.com/VladyslavUsenko/basalt.git +# +# Copyright (c) 2019-2021, Vladyslav Usenko and Nikolaus Demmel. +# All rights reserved. +# + +# +# Example usage: +# $ ./query-config.py path/to/basalt_config.json value0.\"config.vio_debug\" +# 10G + +import json +import toml +import argparse +import sys + + +def parse_query(query): + query_list = [] + + quote_open_char = None + curr = "" + for c in query: + if quote_open_char: + if c == quote_open_char: + quote_open_char = None + else: + curr += c + elif c in ['"', "'"]: + quote_open_char = c + elif c == '.': + query_list.append(curr) + curr = "" + else: + curr += c + query_list.append(curr) + + return query_list + + +def query_config(path, query, default_value=None, format_env=False, format_cli=False): + query_list = parse_query(query) + with open(path) as f: + cfg = json.load(f) + try: + curr = cfg + for q in query_list: + curr = curr[q] + result = curr + except: + if default_value is None: + result = "" + else: + result = default_value + if isinstance(result, dict): + if format_env: + lines = [] + for k, v in result.items(): + # NOTE: assumes no special escaping is necessary + lines.append("{}='{}'".format(k, v)) + return "\n".join(lines) + elif format_cli: + args = ["--{} {}".format(k, v) for k, v in result.items()] + return " ".join(args) + else: + result = toml.dumps(result) + else: + result = "{}".format(result) + return result + + +def main(): + parser = argparse.ArgumentParser("Parse toml file and print content of query key.") + parser.add_argument("config_path", help="path to toml file") + parser.add_argument("query", help="query string") + parser.add_argument("default_value", help="value printed if query is not successful", nargs='?') + parser.add_argument( + "--format-env", + action="store_true", + help="Expect dictionary as query result and output like environment variables, i.e. VAR='VALUE' lines.") + parser.add_argument("--format-cli", + action="store_true", + help="Expect dictionary as query result and output like cli arguments, i.e. --VAR 'VALUE'.") + args = parser.parse_args() + + res = query_config(args.config_path, + args.query, + default_value=args.default_value, + format_env=args.format_env, + format_cli=args.format_cli) + print(res) + + +if __name__ == "__main__": + main() diff --git a/scripts/batch/rerun-failed-in.sh b/scripts/batch/rerun-failed-in.sh new file mode 100755 index 0000000..a81fc26 --- /dev/null +++ b/scripts/batch/rerun-failed-in.sh @@ -0,0 +1,25 @@ +#!/usr/bin/env bash +## +## BSD 3-Clause License +## +## This file is part of the Basalt project. +## https://gitlab.com/VladyslavUsenko/basalt.git +## +## Copyright (c) 2019-2021, Vladyslav Usenko and Nikolaus Demmel. +## All rights reserved. +## + +# +# Usage: +# rerun-failed-in.sh FOLDER +# +# Reruns all failed experiments that are found in a given folder. + +set -e + +SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" + +"$SCRIPT_DIR"/list-jobs.sh "$1" -s -o failed | while read f; do + echo "$f" + "$SCRIPT_DIR"/rerun-one-in.sh "$f" || true +done diff --git a/scripts/batch/rerun-one-in.sh b/scripts/batch/rerun-one-in.sh new file mode 100755 index 0000000..eceb4fe --- /dev/null +++ b/scripts/batch/rerun-one-in.sh @@ -0,0 +1,35 @@ +#!/usr/bin/env bash +## +## BSD 3-Clause License +## +## This file is part of the Basalt project. +## https://gitlab.com/VladyslavUsenko/basalt.git +## +## Copyright (c) 2019-2021, Vladyslav Usenko and Nikolaus Demmel. +## All rights reserved. +## + +set -e +set -x + +SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" + +FOLDER="${1}" + +cd "$FOLDER" + +# backup previous files +DATE=`date +'%Y%m%d-%H%M%S'` +BACKUP_FOLDER=results-backup-$DATE +for f in *.jobid *.log stats*.*json; do + if [ -f $f ]; then + mkdir -p $BACKUP_FOLDER + mv $f $BACKUP_FOLDER/ + fi +done + +echo "Created" > status.log +echo "Restarted" >> status.log + +echo "Starting run in $PWD" +"$SCRIPT_DIR"/run-one.sh "$PWD" diff --git a/scripts/batch/run-all-in.sh b/scripts/batch/run-all-in.sh new file mode 100755 index 0000000..2ea5219 --- /dev/null +++ b/scripts/batch/run-all-in.sh @@ -0,0 +1,60 @@ +#!/usr/bin/env bash +## +## BSD 3-Clause License +## +## This file is part of the Basalt project. +## https://gitlab.com/VladyslavUsenko/basalt.git +## +## Copyright (c) 2019-2021, Vladyslav Usenko and Nikolaus Demmel. +## All rights reserved. +## + + +# given folder with basalt_config_*.json, run optimization for each config in +# corresponding subfolder + +set -e +set -x + +# number of logical cores on linux and macos +NUM_CORES=`(which nproc > /dev/null && nproc) || sysctl -n hw.logicalcpu || echo 1` + +echo "Running on '`hostname`', nproc: $NUM_CORES" + +SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" + +# loop over all arguments, and in each folder find configs and run them +for FOLDER in "$@" +do + + pushd "$FOLDER" + + FILE_PATTERN='basalt_config_*.json' + FILE_REGEX='basalt_config_(.*)\.json' + + DATE=`date +'%Y%m%d-%H%M%S'` + mkdir -p $DATE + + declare -a RUN_DIRS=() + + for f in `find . -name "$FILE_PATTERN" -type f | sort`; do + if [[ `basename $f` =~ $FILE_REGEX ]]; then + RUN_DIR=${DATE}/`dirname $f`/${BASH_REMATCH[1]} + echo "Creating run with config $f in $RUN_DIR" + mkdir -p "$RUN_DIR" + cp $f "$RUN_DIR"/basalt_config.json + echo "Created" > "$RUN_DIR"/status.log + RUN_DIRS+=($RUN_DIR) + else + echo "Skipping $f" + fi + done + + for RUN_DIR in "${RUN_DIRS[@]}"; do + echo "Starting run in $RUN_DIR" + "$SCRIPT_DIR"/run-one.sh "$RUN_DIR" || true + done + + popd + +done diff --git a/scripts/batch/run-one.sh b/scripts/batch/run-one.sh new file mode 100755 index 0000000..50cc1b8 --- /dev/null +++ b/scripts/batch/run-one.sh @@ -0,0 +1,83 @@ +#!/usr/bin/env bash +## +## BSD 3-Clause License +## +## This file is part of the Basalt project. +## https://gitlab.com/VladyslavUsenko/basalt.git +## +## Copyright (c) 2019-2021, Vladyslav Usenko and Nikolaus Demmel. +## All rights reserved. +## + +# +# This script runs on the slurm nodes to run rootba for one config. + +set -e +set -o pipefail +set -x + +error() { + local parent_lineno="$1" + local message="$2" + local code="${3:-1}" + if [[ -n "$message" ]] ; then + echo "Error on or near line ${parent_lineno}: ${message}; exiting with status ${code}" + else + echo "Error on or near line ${parent_lineno}; exiting with status ${code}" + fi + echo "Failed" >> status.log + exit "${code}" +} +trap 'error ${LINENO}' ERR + +# number of logical cores on linux and macos +NUM_CORES=`(which nproc > /dev/null && nproc) || sysctl -n hw.logicalcpu || echo 1` + +SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" + +BASALT_BIN_DIR="${BASALT_BIN_DIR:-$SCRIPT_DIR/../../build}" + +FOLDER="${1}" + +cd "$FOLDER" + +if ! which time 2> /dev/null; then + echo "Did not find 'time' executable. Not installed?" + exit 1 +fi + +if [[ "$OSTYPE" == "darwin"* ]]; then + TIMECMD="`which time` -lp" +else + TIMECMD="`which time` -v" +fi + +echo "Started" >> status.log + +# set environment variables according to config +while read l; do + if [ -n "$l" ]; then + eval "export $l" + fi +done <<< `"$SCRIPT_DIR"/query-config.py basalt_config.json batch_run.env --format-env` + +# lookup executable to run +EXECUTABLE=`"$SCRIPT_DIR"/query-config.py basalt_config.json batch_run.executable basalt_vio` + +# lookup args +ARGS=`"$SCRIPT_DIR"/query-config.py basalt_config.json batch_run.args --format-cli` + +CMD="$BASALT_BIN_DIR/$EXECUTABLE" + +echo "Running on '`hostname`', nproc: $NUM_CORES, bin: $CMD" + +# run as many times as specified (for timing tests to make sure filecache is hot); default is once +rm -f output.log +NUM_RUNS=`"$SCRIPT_DIR"/query-config.py basalt_config.json batch_run.num_runs 1` +echo "Will run $NUM_RUNS times." +for i in $(seq $NUM_RUNS); do + echo ">>> Run $i" |& tee -a output.log + { $TIMECMD "$CMD" $ARGS; } |& tee -a output.log +done + +echo "Completed" >> status.log diff --git a/scripts/clang-format-all.sh b/scripts/clang-format-all.sh new file mode 100755 index 0000000..bda31dc --- /dev/null +++ b/scripts/clang-format-all.sh @@ -0,0 +1,52 @@ +#!/usr/bin/env bash +## +## BSD 3-Clause License +## +## This file is part of the Basalt project. +## https://gitlab.com/VladyslavUsenko/basalt.git +## +## Copyright (c) 2019-2021, Vladyslav Usenko and Nikolaus Demmel. +## All rights reserved. +## + +# Format all source files in the project. +# Optionally take folder as argument; default is full inlude and src dirs. + +set -e + +SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" + +FOLDER="${1:-$SCRIPT_DIR/../include $SCRIPT_DIR/../src $SCRIPT_DIR/../test/src}" + +CLANG_FORMAT_COMMANDS="clang-format-11 clang-format-10 clang-format-9 clang-format" + +# find the first available command: +for CMD in $CLANG_FORMAT_COMMANDS; do + if hash $CMD 2>/dev/null; then + CLANG_FORMAT_CMD=$CMD + break + fi +done + +if [ -z $CLANG_FORMAT_CMD ]; then + echo "clang-format not installed..." + exit 1 +fi + +# clang format check version +MAJOR_VERSION_NEEDED=8 + +MAJOR_VERSION_DETECTED=`$CLANG_FORMAT_CMD -version | sed -n -E 's/.*version ([0-9]+).*/\1/p'` +if [ -z $MAJOR_VERSION_DETECTED ]; then + echo "Failed to parse major version (`$CLANG_FORMAT_CMD -version`)" + exit 1 +fi + +echo "clang-format version $MAJOR_VERSION_DETECTED (`$CLANG_FORMAT_CMD -version`)" + +if [ $MAJOR_VERSION_DETECTED -lt $MAJOR_VERSION_NEEDED ]; then + echo "Looks like your clang format is too old; need at least version $MAJOR_VERSION_NEEDED" + exit 1 +fi + +find $FOLDER -iname "*.?pp" -or -iname "*.h" | xargs $CLANG_FORMAT_CMD -verbose -i diff --git a/scripts/compare_calib.py b/scripts/compare_calib.py new file mode 100755 index 0000000..5d76fbd --- /dev/null +++ b/scripts/compare_calib.py @@ -0,0 +1,106 @@ +#!/usr/bin/env python3 +# +# BSD 3-Clause License +# +# This file is part of the Basalt project. +# https://gitlab.com/VladyslavUsenko/basalt.git +# +# Copyright (c) 2019-2021, Vladyslav Usenko and Nikolaus Demmel. +# All rights reserved. +# + + +import argparse +import json +import numpy as np +from scipy.spatial.transform import Rotation + + +def print_abs_rel(info, v_0, v_1): + diff = np.abs(np.linalg.norm(v_0 - v_1)) + out = f'{info}:\t{diff:.5f}' + + if diff < 10e-7: + out += ' (0.0%)' + else: + out += f' ({diff / (np.abs(np.linalg.norm(v_0)) * 100.0):.7f}%)' + + print(out) + + +def main(calib_path_1, calib_path_2): + with open(calib_path_1, 'r') as c_1, open(calib_path_2, 'r') as c_2: + calib0 = json.load(c_1) + calib1 = json.load(c_2) + + for i, (t_imu_cam_0, t_imu_cam_1) in enumerate( + zip(calib0['value0']['T_imu_cam'], calib1['value0']['T_imu_cam'])): + print(f'\nCamera {i} transformation differences') + t_0 = np.array(list(t_imu_cam_0.values())[0:2]) + t_1 = np.array(list(t_imu_cam_1.values())[0:2]) + r_0 = Rotation(list(t_imu_cam_0.values())[3:7]) + r_1 = Rotation(list(t_imu_cam_1.values())[3:7]) + + print_abs_rel(f'Transformation', t_0, t_1) + print_abs_rel(f'Rotation', r_0.as_rotvec(), r_1.as_rotvec()) + + for i, (intrinsics0, intrinsics1) in enumerate( + zip(calib0['value0']['intrinsics'], calib1['value0']['intrinsics'])): + print(f'\nCamera {i} intrinsics differences') + + for ( + k_0, v_0), (_, v_1) in zip( + intrinsics0['intrinsics'].items(), intrinsics1['intrinsics'].items()): + print_abs_rel(f'Difference for {k_0}', v_0, v_1) + + print_abs_rel('\nAccel Bias Difference', + np.array(calib0['value0']['calib_accel_bias'][0:2]), + np.array(calib1['value0']['calib_accel_bias'][0:2])) + + print_abs_rel('Accel Scale Difference', + np.array(calib0['value0']['calib_accel_bias'][3:9]), + np.array(calib1['value0']['calib_accel_bias'][3:9])) + + print_abs_rel('Gyro Bias Difference', + np.array(calib0['value0']['calib_gyro_bias'][0:2]), + np.array(calib1['value0']['calib_gyro_bias'][0:2])) + + print_abs_rel('Gyro Scale Difference', + np.array(calib0['value0']['calib_gyro_bias'][3:12]), + np.array(calib1['value0']['calib_gyro_bias'][3:12])) + + print_abs_rel( + '\nAccel Noise Std Difference', + calib0['value0']['accel_noise_std'], + calib1['value0']['accel_noise_std']) + print_abs_rel( + 'Gyro Noise Std Difference', + calib0['value0']['gyro_noise_std'], + calib1['value0']['gyro_noise_std']) + print_abs_rel( + 'Accel Bias Std Difference', + calib0['value0']['accel_bias_std'], + calib1['value0']['accel_bias_std']) + print_abs_rel( + 'Gyro Bias Std Difference', + calib0['value0']['gyro_bias_std'], + calib1['value0']['gyro_bias_std']) + + print_abs_rel( + '\nCam Time Offset Difference', + calib0['value0']['cam_time_offset_ns'], + calib0['value0']['cam_time_offset_ns']) + + +def create_parser(): + parser = argparse.ArgumentParser() + parser.add_argument('calib_path_1') + parser.add_argument('calib_path_2') + + return parser + + +if __name__ == '__main__': + args = create_parser().parse_args() + + main(args.calib_path_1, args.calib_path_2) diff --git a/scripts/eval_full/gen_results.py b/scripts/eval_full/gen_results.py new file mode 100755 index 0000000..9816529 --- /dev/null +++ b/scripts/eval_full/gen_results.py @@ -0,0 +1,107 @@ +#!/usr/bin/env python3 +# +# BSD 3-Clause License +# +# This file is part of the Basalt project. +# https://gitlab.com/VladyslavUsenko/basalt.git +# +# Copyright (c) 2019-2021, Vladyslav Usenko and Nikolaus Demmel. +# All rights reserved. +# + +import os +import sys +import json + + +datasets = ['Seq.', 'MH_01_easy', 'MH_02_easy', 'MH_03_medium', 'MH_04_difficult', +'MH_05_difficult', 'V1_01_easy', 'V1_02_medium', +'V1_03_difficult', 'V2_01_easy', 'V2_02_medium'] + + +# Other results. + + +vio = { +'ate' : ['VIO RMS ATE [m]'], +'time' : ['VIO Time [s]'], +'num_frames' : ['VIO Num. Frames'] +} + +mapping = { +'ate' : ['MAP RMS ATE [m]'], +'time' : ['MAP Time [s]'], +'num_frames' : ['MAP Num. KFs'] +} + +pose_graph = { +'ate' : ['PG RMS ATE [m]'], +'time' : ['PG Time [s]'], +'num_frames' : ['PG Num. KFs'] +} + +pure_ba = { +'ate' : ['PG RMS ATE [m]'], +'time' : ['PG Time [s]'], +'num_frames' : ['PG Num. KFs'] +} + +out_dir = sys.argv[1] + +def load_data(x, prefix, key): + fname = out_dir + '/' + prefix + '_' + key + if os.path.isfile(fname): + with open(fname, 'r') as f: + j = json.load(f) + res = round(j['rms_ate'], 3) + x['ate'].append(float(res)) + x['time'].append(round(j['exec_time_ns']*1e-9, 3)) + x['num_frames'].append(j['num_frames']) + else: + x['ate'].append(float('Inf')) + x['time'].append(float('Inf')) + x['num_frames'].append(float('Inf')) + + +for key in datasets[1:]: + load_data(vio, 'vio', key) + load_data(mapping, 'mapper', key) + load_data(pose_graph, 'mapper_no_weights', key) + load_data(pure_ba, 'mapper_no_factors', key) + + +row_format ="{:>17}" + "{:>13}" * (len(datasets)-1) + +datasets_short = [x[:5] for x in datasets] + +print('\nVisual-Inertial Odometry') +print(row_format.format(*datasets_short)) + +print(row_format.format(*vio['ate'])) +#print(row_format.format(*vio['time'])) +print(row_format.format(*vio['num_frames'])) + +print('\nVisual-Inertial Mapping') +print(row_format.format(*datasets_short)) + +print(row_format.format(*mapping['ate'])) +#print(row_format.format(*mapping['time'])) +print(row_format.format(*mapping['num_frames'])) + + +print('\nPose-Graph optimization (Identity weights for all factors)') +print(row_format.format(*datasets_short)) + +print(row_format.format(*pose_graph['ate'])) +#print(row_format.format(*pose_graph['time'])) +print(row_format.format(*pose_graph['num_frames'])) + + +print('\nPure BA optimization (no factors from the recovery used)') +print(row_format.format(*datasets_short)) + +print(row_format.format(*pure_ba['ate'])) +#print(row_format.format(*pure_ba['time'])) +print(row_format.format(*pure_ba['num_frames'])) + + diff --git a/scripts/eval_full/gen_results_kitti.py b/scripts/eval_full/gen_results_kitti.py new file mode 100755 index 0000000..44bc882 --- /dev/null +++ b/scripts/eval_full/gen_results_kitti.py @@ -0,0 +1,83 @@ +#!/usr/bin/env python3 +# +# BSD 3-Clause License +# +# This file is part of the Basalt project. +# https://gitlab.com/VladyslavUsenko/basalt.git +# +# Copyright (c) 2019-2021, Vladyslav Usenko and Nikolaus Demmel. +# All rights reserved. +# + +import os +import sys +import json + + +datasets = ['Seq.', '00', '02', '03','04', '05', '06','07', '08', '09', '10'] +lengths = [100, 200, 300, 400, 500, 600, 700, 800] + +# Other results. + + +vo = { +'trans_error': {}, +'rot_error': {} +} + +for l in lengths: + vo['trans_error'][l] = ['Trans. error [%] ' + str(l) + 'm.'] + vo['rot_error'][l] = ['Rot. error [deg/m] ' + str(l) + 'm.'] + + +out_dir = sys.argv[1] + +mean_values = { +'mean_trans_error' : 0.0, +'mean_rot_error' : 0.0, +'total_num_meas' : 0.0 +} + +def load_data(x, prefix, key, mean_values): + fname = out_dir + '/' + prefix + '_' + key + '.txt' + if os.path.isfile(fname): + with open(fname, 'r') as f: + j = json.load(f) + res = j['results'] + for v in lengths: + num_meas = res[str(v)]['num_meas'] + trans_error = res[str(v)]['trans_error'] + rot_error = res[str(v)]['rot_error'] + x['trans_error'][int(v)].append(round(trans_error, 5)) + x['rot_error'][int(v)].append(round(rot_error, 5)) + if num_meas > 0: + mean_values['mean_trans_error'] += trans_error*num_meas + mean_values['mean_rot_error'] += rot_error*num_meas + mean_values['total_num_meas'] += num_meas + else: + for v in lengths: + x['trans_error'][int(v)].append(float('inf')) + x['rot_error'][int(v)].append(float('inf')) + +for key in datasets[1:]: + load_data(vo, 'rpe', key, mean_values) + + +row_format ="{:>24}" + "{:>10}" * (len(datasets)-1) + +datasets_short = [x[:5] for x in datasets] + +print('\nVisual Odometry (Stereo)') +print(row_format.format(*datasets_short)) + +for l in lengths: + print(row_format.format(*(vo['trans_error'][l]))) + +print() + +for l in lengths: + print(row_format.format(*(vo['rot_error'][l]))) + + +print('Mean translation error [%] ', mean_values['mean_trans_error']/mean_values['total_num_meas']) +print('Mean rotation error [deg/m] ', mean_values['mean_rot_error']/mean_values['total_num_meas']) \ No newline at end of file diff --git a/scripts/eval_full/gen_results_tumvi.py b/scripts/eval_full/gen_results_tumvi.py new file mode 100755 index 0000000..4b6b195 --- /dev/null +++ b/scripts/eval_full/gen_results_tumvi.py @@ -0,0 +1,62 @@ +#!/usr/bin/env python3 +# +# BSD 3-Clause License +# +# This file is part of the Basalt project. +# https://gitlab.com/VladyslavUsenko/basalt.git +# +# Copyright (c) 2019-2021, Vladyslav Usenko and Nikolaus Demmel. +# All rights reserved. +# + +import os +import sys +import json + + +datasets = ['Seq.', 'dataset-corridor1_512_16', 'dataset-magistrale1_512_16', 'dataset-room1_512_16', 'dataset-slides1_512_16'] + +# Other results. + + +vio = { +'ate' : ['VIO RMS ATE [m]'], +'time' : ['VIO Time [s]'], +'num_frames' : ['VIO Num. Frames'] +} + +out_dir = sys.argv[1] + +def load_data(x, prefix, key): + fname = out_dir + '/' + prefix + '_' + key + if os.path.isfile(fname): + with open(fname, 'r') as f: + j = json.load(f) + res = round(j['rms_ate'], 3) + x['ate'].append(float(res)) + x['time'].append(round(j['exec_time_ns']*1e-9, 3)) + x['num_frames'].append(j['num_frames']) + else: + x['ate'].append(float('Inf')) + x['time'].append(float('Inf')) + x['num_frames'].append(float('Inf')) + + +for key in datasets[1:]: + load_data(vio, 'vio', key) + + +row_format ="{:>17}" + "{:>13}" * (len(datasets)-1) + +datasets_short = [x[8:].split('_')[0] for x in datasets] + +print('\nVisual-Inertial Odometry') +print(row_format.format(*datasets_short)) + +print(row_format.format(*vio['ate'])) +#print(row_format.format(*vio['time'])) +print(row_format.format(*vio['num_frames'])) + + + + diff --git a/scripts/eval_full/run_evaluations.sh b/scripts/eval_full/run_evaluations.sh new file mode 100755 index 0000000..0028d42 --- /dev/null +++ b/scripts/eval_full/run_evaluations.sh @@ -0,0 +1,44 @@ +#!/bin/bash +## +## BSD 3-Clause License +## +## This file is part of the Basalt project. +## https://gitlab.com/VladyslavUsenko/basalt.git +## +## Copyright (c) 2019-2021, Vladyslav Usenko and Nikolaus Demmel. +## All rights reserved. +## + +set -e +set -x + +DATASET_PATH=/data/euroc + +DATASETS=(MH_01_easy MH_02_easy MH_03_medium MH_04_difficult MH_05_difficult V1_01_easy V1_02_medium V1_03_difficult V2_01_easy V2_02_medium) + + +folder_name=eval_results +mkdir $folder_name + + + +for d in ${DATASETS[$CI_NODE_INDEX-1]}; do + basalt_vio --dataset-path $DATASET_PATH/$d --cam-calib /usr/etc/basalt/euroc_eucm_calib.json \ + --dataset-type euroc --show-gui 0 --config-path /usr/etc/basalt/euroc_config.json \ + --result-path $folder_name/vio_$d --marg-data eval_tmp_marg_data --save-trajectory tum + + mv trajectory.txt $folder_name/traj_vio_$d.txt + + basalt_mapper --show-gui 0 --cam-calib /usr/etc/basalt/euroc_eucm_calib.json --config-path /usr/etc/basalt/euroc_config.json --marg-data eval_tmp_marg_data \ + --result-path $folder_name/mapper_$d + + basalt_mapper --show-gui 0 --cam-calib /usr/etc/basalt/euroc_eucm_calib.json --config-path /usr/etc/basalt/euroc_config_no_weights.json --marg-data eval_tmp_marg_data \ + --result-path $folder_name/mapper_no_weights_$d + + basalt_mapper --show-gui 0 --cam-calib /usr/etc/basalt/euroc_eucm_calib.json --config-path /usr/etc/basalt/euroc_config_no_factors.json --marg-data eval_tmp_marg_data \ + --result-path $folder_name/mapper_no_factors_$d + + rm -rf eval_tmp_marg_data +done + +#./gen_results.py $folder_name > euroc_results.txt diff --git a/scripts/eval_full/run_evaluations_kitti.sh b/scripts/eval_full/run_evaluations_kitti.sh new file mode 100755 index 0000000..670aaad --- /dev/null +++ b/scripts/eval_full/run_evaluations_kitti.sh @@ -0,0 +1,33 @@ +#!/bin/bash +## +## BSD 3-Clause License +## +## This file is part of the Basalt project. +## https://gitlab.com/VladyslavUsenko/basalt.git +## +## Copyright (c) 2019-2021, Vladyslav Usenko and Nikolaus Demmel. +## All rights reserved. +## + +set -e +set -x + +DATASET_PATH=/data/kitti_odom_grey/sequences + +DATASETS=(00 02 03 04 05 06 07 08 09 10) + + +folder_name=eval_results_kitti +mkdir $folder_name + +for d in ${DATASETS[$CI_NODE_INDEX-1]}; do +echo $d + basalt_vio --dataset-path $DATASET_PATH/$d --cam-calib $DATASET_PATH/$d/basalt_calib.json \ + --dataset-type kitti --show-gui 0 --config-path /usr/etc/basalt/kitti_config.json --result-path $folder_name/vo_$d --save-trajectory kitti --use-imu 0 + + mv trajectory_kitti.txt $folder_name/kitti_$d.txt + + basalt_kitti_eval --traj-path $folder_name/kitti_$d.txt --gt-path $DATASET_PATH/$d/poses.txt --result-path $folder_name/rpe_$d.txt + +done + diff --git a/scripts/eval_full/run_evaluations_tumvi.sh b/scripts/eval_full/run_evaluations_tumvi.sh new file mode 100755 index 0000000..ee2df0b --- /dev/null +++ b/scripts/eval_full/run_evaluations_tumvi.sh @@ -0,0 +1,39 @@ +#!/bin/bash +## +## BSD 3-Clause License +## +## This file is part of the Basalt project. +## https://gitlab.com/VladyslavUsenko/basalt.git +## +## Copyright (c) 2019-2021, Vladyslav Usenko and Nikolaus Demmel. +## All rights reserved. +## + +set -e +set -x + +DATASET_PATH=/data/tumvi/512_16/ + +DATASETS=( +dataset-corridor1_512_16 +dataset-magistrale1_512_16 +dataset-room1_512_16 +dataset-slides1_512_16 +) + + +folder_name=eval_results_tumvi +mkdir $folder_name + + + +for d in ${DATASETS[$CI_NODE_INDEX-1]}; do + basalt_vio --dataset-path $DATASET_PATH/$d --cam-calib /usr/etc/basalt/tumvi_512_eucm_calib.json \ + --dataset-type euroc --show-gui 0 --config-path /usr/etc/basalt/tumvi_512_config.json \ + --result-path $folder_name/vio_$d --save-trajectory tum + + mv trajectory.txt $folder_name/${d}_basalt_poses.txt + +done + +#./gen_results_tumvi.py $folder_name > euroc_tumvi.txt \ No newline at end of file diff --git a/scripts/install_deps.sh b/scripts/install_deps.sh new file mode 100755 index 0000000..0625bdd --- /dev/null +++ b/scripts/install_deps.sh @@ -0,0 +1,18 @@ +#!/bin/bash +## +## BSD 3-Clause License +## +## This file is part of the Basalt project. +## https://gitlab.com/VladyslavUsenko/basalt.git +## +## Copyright (c) 2019-2021, Vladyslav Usenko and Nikolaus Demmel. +## All rights reserved. +## + +DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" + +if [[ "$OSTYPE" == "darwin"* ]]; then +${DIR}/install_mac_os_deps.sh +else +${DIR}/install_ubuntu_deps.sh +fi diff --git a/scripts/install_mac_os_deps.sh b/scripts/install_mac_os_deps.sh new file mode 100755 index 0000000..32284bd --- /dev/null +++ b/scripts/install_mac_os_deps.sh @@ -0,0 +1,26 @@ +#!/bin/sh +## +## BSD 3-Clause License +## +## This file is part of the Basalt project. +## https://gitlab.com/VladyslavUsenko/basalt.git +## +## Copyright (c) 2019-2021, Vladyslav Usenko and Nikolaus Demmel. +## All rights reserved. +## + +brew install \ + boost \ + opencv \ + cmake \ + pkgconfig \ + lz4 \ + clang-format \ + tbb \ + glew \ + eigen \ + ccache \ + lz4 \ + fmt + +brew install llvm diff --git a/scripts/install_ubuntu_deps.sh b/scripts/install_ubuntu_deps.sh new file mode 100755 index 0000000..9ddf190 --- /dev/null +++ b/scripts/install_ubuntu_deps.sh @@ -0,0 +1,13 @@ +#!/bin/sh +## +## BSD 3-Clause License +## +## This file is part of the Basalt project. +## https://gitlab.com/VladyslavUsenko/basalt.git +## +## Copyright (c) 2019-2021, Vladyslav Usenko and Nikolaus Demmel. +## All rights reserved. +## + +sudo apt-get update +sudo apt-get install -y gcc g++ cmake git libtbb-dev libeigen3-dev libglew-dev ccache libjpeg-dev libpng-dev liblz4-dev libbz2-dev libboost-regex-dev libboost-filesystem-dev libboost-date-time-dev libboost-program-options-dev libgtest-dev libopencv-dev libfmt-dev diff --git a/scripts/templates/license-py-sh.tmpl b/scripts/templates/license-py-sh.tmpl new file mode 100644 index 0000000..812d2e9 --- /dev/null +++ b/scripts/templates/license-py-sh.tmpl @@ -0,0 +1,7 @@ +BSD 3-Clause License + +This file is part of the Basalt project. +https://gitlab.com/VladyslavUsenko/basalt.git + +Copyright (c) ${years}, ${owner}. +All rights reserved. diff --git a/scripts/update-license-headers.sh b/scripts/update-license-headers.sh new file mode 100755 index 0000000..d8e3c99 --- /dev/null +++ b/scripts/update-license-headers.sh @@ -0,0 +1,33 @@ +#!/usr/bin/env bash +## +## BSD 3-Clause License +## +## This file is part of the Basalt project. +## https://gitlab.com/VladyslavUsenko/basalt.git +## +## Copyright (c) 2019-2021, Vladyslav Usenko and Nikolaus Demmel. +## All rights reserved. +## + +# Update license headers in source files. + +# Dependency: licenseheaders python package (install with pip) + +# TODO: Make it also update C++ files automatically. (Consider files with multiple headers, e.g. track.h and union_find.h) + +SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" + +DIRS=( + "$SCRIPT_DIR/../python/" + "$SCRIPT_DIR/../scripts" +) + +YEAR="2019-2021" +OWNER="Vladyslav Usenko and Nikolaus Demmel" +TEMPLATE="$SCRIPT_DIR/templates/license-py-sh.tmpl" + +for d in "${DIRS[@]}" +do + licenseheaders -d "$d" -y $YEAR -o "$OWNER" -t "$TEMPLATE" -vv +done + diff --git a/scripts/update_submodules.sh b/scripts/update_submodules.sh new file mode 100755 index 0000000..51132a4 --- /dev/null +++ b/scripts/update_submodules.sh @@ -0,0 +1,16 @@ +#!/usr/bin/env bash +## +## BSD 3-Clause License +## +## This file is part of the Basalt project. +## https://gitlab.com/VladyslavUsenko/basalt.git +## +## Copyright (c) 2019-2021, Vladyslav Usenko and Nikolaus Demmel. +## All rights reserved. +## + +set -x + +git submodule sync --recursive +git submodule update --init --recursive + diff --git a/scripts/yapf-all.sh b/scripts/yapf-all.sh new file mode 100755 index 0000000..707a0dd --- /dev/null +++ b/scripts/yapf-all.sh @@ -0,0 +1,27 @@ +#!/usr/bin/env bash +## +## BSD 3-Clause License +## +## This file is part of the Basalt project. +## https://gitlab.com/VladyslavUsenko/basalt.git +## +## Copyright (c) 2019-2021, Vladyslav Usenko and Nikolaus Demmel. +## All rights reserved. +## + + +# Format all python source files in the project. +# Optionally take folder as argument; default are `python` and `script` dirs. + +set -e + +SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" + +# default folders if not passed +if [ $# -lt 1 ]; then + set -- "$SCRIPT_DIR"/../python "$SCRIPT_DIR/batch" +fi + +echo "Formatting: $@" + +yapf -i -r "$@" diff --git a/src/calibrate.cpp b/src/calibrate.cpp new file mode 100644 index 0000000..1bc5ff1 --- /dev/null +++ b/src/calibrate.cpp @@ -0,0 +1,81 @@ +/** +BSD 3-Clause License + +This file is part of the Basalt project. +https://gitlab.com/VladyslavUsenko/basalt.git + +Copyright (c) 2019, Vladyslav Usenko and Nikolaus Demmel. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#include + +#include + +int main(int argc, char **argv) { + std::string dataset_path; + std::string dataset_type; + std::string aprilgrid_path; + std::string result_path; + std::vector cam_types; + std::string cache_dataset_name = "calib-cam"; + int skip_images = 1; + + CLI::App app{"Calibrate IMU"}; + + app.add_option("--dataset-path", dataset_path, "Path to dataset")->required(); + app.add_option("--result-path", result_path, "Path to result folder") + ->required(); + app.add_option("--dataset-type", dataset_type, "Dataset type (euroc, bag)") + ->required(); + + app.add_option("--aprilgrid", aprilgrid_path, + "Path to Aprilgrid config file)") + ->required(); + + app.add_option("--cache-name", cache_dataset_name, + "Name to save cached files"); + + app.add_option("--skip-images", skip_images, "Number of images to skip"); + app.add_option("--cam-types", cam_types, + "Type of cameras (eucm, ds, kb4, pinhole)") + ->required(); + + try { + app.parse(argc, argv); + } catch (const CLI::ParseError &e) { + return app.exit(e); + } + + basalt::CamCalib cv(dataset_path, dataset_type, aprilgrid_path, result_path, + cache_dataset_name, skip_images, cam_types); + + cv.renderingLoop(); + + return 0; +} diff --git a/src/calibrate_imu.cpp b/src/calibrate_imu.cpp new file mode 100644 index 0000000..cb0f59a --- /dev/null +++ b/src/calibrate_imu.cpp @@ -0,0 +1,95 @@ +/** +BSD 3-Clause License + +This file is part of the Basalt project. +https://gitlab.com/VladyslavUsenko/basalt.git + +Copyright (c) 2019, Vladyslav Usenko and Nikolaus Demmel. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#include + +#include + +#include + +int main(int argc, char **argv) { + std::string dataset_path; + std::string dataset_type; + std::string aprilgrid_path; + std::string result_path; + std::string cache_dataset_name = "calib-cam-imu"; + int skip_images = 1; + + double accel_noise_std = 0.016; + double gyro_noise_std = 0.000282; + double accel_bias_std = 0.001; + double gyro_bias_std = 0.0001; + + CLI::App app{"Calibrate IMU"}; + + app.add_option("--dataset-path", dataset_path, "Path to dataset")->required(); + app.add_option("--result-path", result_path, "Path to result folder") + ->required(); + app.add_option("--dataset-type", dataset_type, "Dataset type (euroc, bag)") + ->required(); + + app.add_option("--aprilgrid", aprilgrid_path, + "Path to Aprilgrid config file)") + ->required(); + + app.add_option("--gyro-noise-std", gyro_noise_std, "Gyroscope noise std"); + app.add_option("--accel-noise-std", accel_noise_std, + "Accelerometer noise std"); + + app.add_option("--gyro-bias-std", gyro_bias_std, + "Gyroscope bias random walk std"); + app.add_option("--accel-bias-std", accel_bias_std, + "Accelerometer bias random walk std"); + + app.add_option("--cache-name", cache_dataset_name, + "Name to save cached files"); + + app.add_option("--skip-images", skip_images, "Number of images to skip"); + + try { + app.parse(argc, argv); + } catch (const CLI::ParseError &e) { + return app.exit(e); + } + + basalt::CamImuCalib cv( + dataset_path, dataset_type, aprilgrid_path, result_path, + cache_dataset_name, skip_images, + {accel_noise_std, gyro_noise_std, accel_bias_std, gyro_bias_std}); + + cv.renderingLoop(); + + return 0; +} diff --git a/src/calibration/aprilgrid.cpp b/src/calibration/aprilgrid.cpp new file mode 100644 index 0000000..6b36d0b --- /dev/null +++ b/src/calibration/aprilgrid.cpp @@ -0,0 +1,144 @@ +/** +BSD 3-Clause License + +This file is part of the Basalt project. +https://gitlab.com/VladyslavUsenko/basalt.git + +Copyright (c) 2019, Vladyslav Usenko and Nikolaus Demmel. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#include + +#include + +#include + +namespace basalt { + +AprilGrid::AprilGrid(const std::string &config_path) { + std::ifstream is(config_path); + if (is.is_open()) { + cereal::JSONInputArchive ar(is); + ar(cereal::make_nvp("tagCols", tagCols)); + ar(cereal::make_nvp("tagRows", tagRows)); + ar(cereal::make_nvp("tagSize", tagSize)); + ar(cereal::make_nvp("tagSpacing", tagSpacing)); + } else { + std::cerr << "Could not open aprilgrid configuration: " << config_path + << std::endl; + std::abort(); + } + + double x_corner_offsets[4] = {0, tagSize, tagSize, 0}; + double y_corner_offsets[4] = {0, 0, tagSize, tagSize}; + + aprilgrid_corner_pos_3d.resize(tagCols * tagRows * 4); + + for (int y = 0; y < tagRows; y++) { + for (int x = 0; x < tagCols; x++) { + int tag_id = tagCols * y + x; + double x_offset = x * tagSize * (1 + tagSpacing); + double y_offset = y * tagSize * (1 + tagSpacing); + + for (int i = 0; i < 4; i++) { + int corner_id = (tag_id << 2) + i; + + Eigen::Vector4d &pos_3d = aprilgrid_corner_pos_3d[corner_id]; + + pos_3d[0] = x_offset + x_corner_offsets[i]; + pos_3d[1] = y_offset + y_corner_offsets[i]; + pos_3d[2] = 0; + pos_3d[3] = 1; + } + } + } + + int num_vign_points = 5; + int num_blocks = tagCols * tagRows * 2; + + aprilgrid_vignette_pos_3d.resize((num_blocks + tagCols + tagRows) * + num_vign_points); + + for (int k = 0; k < num_vign_points; k++) { + for (int i = 0; i < tagCols * tagRows; i++) { + // const Eigen::Vector3d p0 = aprilgrid_corner_pos_3d[4 * i + 0]; + const Eigen::Vector4d p1 = aprilgrid_corner_pos_3d[4 * i + 1]; + const Eigen::Vector4d p2 = aprilgrid_corner_pos_3d[4 * i + 2]; + const Eigen::Vector4d p3 = aprilgrid_corner_pos_3d[4 * i + 3]; + + double coeff = double(k + 1) / double(num_vign_points + 1); + + aprilgrid_vignette_pos_3d[k * num_blocks + 2 * i + 0] = + (p1 + coeff * (p2 - p1)); + aprilgrid_vignette_pos_3d[k * num_blocks + 2 * i + 1] = + (p2 + coeff * (p3 - p2)); + + aprilgrid_vignette_pos_3d[k * num_blocks + 2 * i + 0][0] += + tagSize * tagSpacing / 2; + aprilgrid_vignette_pos_3d[k * num_blocks + 2 * i + 1][1] += + tagSize * tagSpacing / 2; + } + } + + size_t curr_idx = num_blocks * num_vign_points; + + for (int k = 0; k < num_vign_points; k++) { + for (int i = 0; i < tagCols; i++) { + const Eigen::Vector4d p0 = aprilgrid_corner_pos_3d[4 * i + 0]; + const Eigen::Vector4d p1 = aprilgrid_corner_pos_3d[4 * i + 1]; + + double coeff = double(k + 1) / double(num_vign_points + 1); + + aprilgrid_vignette_pos_3d[curr_idx + k * tagCols + i] = + (p0 + coeff * (p1 - p0)); + + aprilgrid_vignette_pos_3d[curr_idx + k * tagCols + i][1] -= + tagSize * tagSpacing / 2; + } + } + + curr_idx += tagCols * num_vign_points; + + for (int k = 0; k < num_vign_points; k++) { + for (int i = 0; i < tagRows; i++) { + const Eigen::Vector4d p0 = aprilgrid_corner_pos_3d[4 * i * tagCols + 0]; + const Eigen::Vector4d p3 = aprilgrid_corner_pos_3d[4 * i * tagCols + 3]; + + double coeff = double(k + 1) / double(num_vign_points + 1); + + aprilgrid_vignette_pos_3d[curr_idx + k * tagRows + i] = + (p0 + coeff * (p3 - p0)); + + aprilgrid_vignette_pos_3d[curr_idx + k * tagRows + i][0] -= + tagSize * tagSpacing / 2; + } + } +} + +} // namespace basalt diff --git a/src/calibration/calibraiton_helper.cpp b/src/calibration/calibraiton_helper.cpp new file mode 100644 index 0000000..323f26d --- /dev/null +++ b/src/calibration/calibraiton_helper.cpp @@ -0,0 +1,466 @@ +/** +BSD 3-Clause License + +This file is part of the Basalt project. +https://gitlab.com/VladyslavUsenko/basalt.git + +Copyright (c) 2019, Vladyslav Usenko and Nikolaus Demmel. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#include + +#include + +#include + +#include +#include + +#include +#include + +#include + +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wunused-parameter" +#include +#include +#pragma GCC diagnostic pop + +#include + +namespace basalt { + +template +bool estimateTransformation( + const CamT &cam_calib, + const Eigen::aligned_vector &corners, + const std::vector &corner_ids, + const Eigen::aligned_vector &aprilgrid_corner_pos_3d, + Sophus::SE3d &T_target_camera, size_t &num_inliers) { + opengv::bearingVectors_t bearingVectors; + opengv::points_t points; + + for (size_t i = 0; i < corners.size(); i++) { + Eigen::Vector4d tmp; + if (!cam_calib.unproject(corners[i], tmp)) { + continue; + } + Eigen::Vector3d bearing = tmp.head<3>(); + Eigen::Vector3d point = aprilgrid_corner_pos_3d[corner_ids[i]].head<3>(); + bearing.normalize(); + + bearingVectors.push_back(bearing); + points.push_back(point); + } + + opengv::absolute_pose::CentralAbsoluteAdapter adapter(bearingVectors, points); + + opengv::sac::Ransac< + opengv::sac_problems::absolute_pose::AbsolutePoseSacProblem> + ransac; + std::shared_ptr + absposeproblem_ptr( + new opengv::sac_problems::absolute_pose::AbsolutePoseSacProblem( + adapter, opengv::sac_problems::absolute_pose:: + AbsolutePoseSacProblem::KNEIP)); + ransac.sac_model_ = absposeproblem_ptr; + ransac.threshold_ = 1.0 - cos(atan(sqrt(2.0) * 1 / cam_calib.getParam()[0])); + ransac.max_iterations_ = 50; + + ransac.computeModel(); + + T_target_camera = + Sophus::SE3d(ransac.model_coefficients_.topLeftCorner<3, 3>(), + ransac.model_coefficients_.topRightCorner<3, 1>()); + + num_inliers = ransac.inliers_.size(); + + return ransac.inliers_.size() > 8; +} + +void CalibHelper::detectCorners(const VioDatasetPtr &vio_data, + CalibCornerMap &calib_corners, + CalibCornerMap &calib_corners_rejected) { + calib_corners.clear(); + calib_corners_rejected.clear(); + + tbb::parallel_for( + tbb::blocked_range(0, vio_data->get_image_timestamps().size()), + [&](const tbb::blocked_range &r) { + ApriltagDetector ad; + + for (size_t j = r.begin(); j != r.end(); ++j) { + int64_t timestamp_ns = vio_data->get_image_timestamps()[j]; + const std::vector &img_vec = + vio_data->get_image_data(timestamp_ns); + + for (size_t i = 0; i < img_vec.size(); i++) { + if (img_vec[i].img.get()) { + CalibCornerData ccd_good; + CalibCornerData ccd_bad; + ad.detectTags(*img_vec[i].img, ccd_good.corners, + ccd_good.corner_ids, ccd_good.radii, + ccd_bad.corners, ccd_bad.corner_ids, ccd_bad.radii); + + // std::cout << "image (" << timestamp_ns << "," + // << i + // << ") detected " << + // ccd_good.corners.size() + // << "corners (" << + // ccd_bad.corners.size() + // << " rejected)" << std::endl; + + TimeCamId tcid(timestamp_ns, i); + + calib_corners.emplace(tcid, ccd_good); + calib_corners_rejected.emplace(tcid, ccd_bad); + } + } + } + }); +} + +void CalibHelper::initCamPoses( + const Calibration::Ptr &calib, + const Eigen::aligned_vector &aprilgrid_corner_pos_3d, + CalibCornerMap &calib_corners, CalibInitPoseMap &calib_init_poses) { + calib_init_poses.clear(); + + std::vector corners; + corners.reserve(calib_corners.size()); + for (const auto &kv : calib_corners) { + corners.emplace_back(kv.first); + } + + tbb::parallel_for(tbb::blocked_range(0, corners.size()), + [&](const tbb::blocked_range &r) { + for (size_t j = r.begin(); j != r.end(); ++j) { + TimeCamId tcid = corners[j]; + const CalibCornerData &ccd = calib_corners.at(tcid); + + CalibInitPoseData cp; + + computeInitialPose(calib, tcid.cam_id, + aprilgrid_corner_pos_3d, ccd, cp); + + calib_init_poses.emplace(tcid, cp); + } + }); +} + +bool CalibHelper::initializeIntrinsics( + const Eigen::aligned_vector &corners, + const std::vector &corner_ids, const AprilGrid &aprilgrid, int cols, + int rows, Eigen::Vector4d &init_intr) { + // First, initialize the image center at the center of the image. + + Eigen::aligned_map id_to_corner; + for (size_t i = 0; i < corner_ids.size(); i++) { + id_to_corner[corner_ids[i]] = corners[i]; + } + + const double _xi = 1.0; + const double _cu = cols / 2.0 - 0.5; + const double _cv = rows / 2.0 - 0.5; + + /// Initialize some temporaries needed. + double gamma0 = 0.0; + double minReprojErr = std::numeric_limits::max(); + + // Now we try to find a non-radial line to initialize the focal length + const size_t target_cols = aprilgrid.getTagCols(); + const size_t target_rows = aprilgrid.getTagRows(); + + bool success = false; + for (int tag_corner_offset = 0; tag_corner_offset < 2; tag_corner_offset++) + for (size_t r = 0; r < target_rows; ++r) { + // cv::Mat P(target.cols(); 4, CV_64F); + + Eigen::aligned_vector P; + + for (size_t c = 0; c < target_cols; ++c) { + int tag_offset = (r * target_cols + c) << 2; + + for (int i = 0; i < 2; i++) { + int corner_id = tag_offset + i + tag_corner_offset * 2; + + // std::cerr << corner_id << " "; + + if (id_to_corner.find(corner_id) != id_to_corner.end()) { + const Eigen::Vector2d imagePoint = id_to_corner[corner_id]; + + double u = imagePoint[0] - _cu; + double v = imagePoint[1] - _cv; + + P.emplace_back(u, v, 0.5, -0.5 * (square(u) + square(v))); + } + } + } + + // std::cerr << std::endl; + + const int MIN_CORNERS = 8; + // MIN_CORNERS is an arbitrary threshold for the number of corners + if (P.size() > MIN_CORNERS) { + // Resize P to fit with the count of valid points. + + Eigen::Map P_mat((double *)P.data(), 4, P.size()); + + // std::cerr << "P_mat\n" << P_mat.transpose() << std::endl; + + Eigen::MatrixXd P_mat_t = P_mat.transpose(); + + Eigen::JacobiSVD svd( + P_mat_t, Eigen::ComputeThinU | Eigen::ComputeThinV); + + // std::cerr << "U\n" << svd.matrixU() << std::endl; + // std::cerr << "V\n" << svd.matrixV() << std::endl; + // std::cerr << "singularValues\n" << svd.singularValues() << + // std::endl; + + Eigen::Vector4d C = svd.matrixV().col(3); + // std::cerr << "C\n" << C.transpose() << std::endl; + // std::cerr << "P*res\n" << P_mat.transpose() * C << std::endl; + + double t = square(C(0)) + square(C(1)) + C(2) * C(3); + if (t < 0) { + continue; + } + + // check that line image is not radial + double d = sqrt(1.0 / t); + double nx = C(0) * d; + double ny = C(1) * d; + if (hypot(nx, ny) > 0.95) { + // std::cerr << "hypot(nx, ny) " << hypot(nx, ny) << std::endl; + continue; + } + + double nz = sqrt(1.0 - square(nx) - square(ny)); + double gamma = fabs(C(2) * d / nz); + + Eigen::Matrix calib; + calib << 0.5 * gamma, 0.5 * gamma, _cu, _cv, 0.5 * _xi; + // std::cerr << "gamma " << gamma << std::endl; + + UnifiedCamera cam_calib(calib); + + size_t num_inliers; + Sophus::SE3d T_target_camera; + if (!estimateTransformation(cam_calib, corners, corner_ids, + aprilgrid.aprilgrid_corner_pos_3d, + T_target_camera, num_inliers)) { + continue; + } + + double reprojErr = 0.0; + size_t numReprojected = computeReprojectionError( + cam_calib, corners, corner_ids, aprilgrid.aprilgrid_corner_pos_3d, + T_target_camera, reprojErr); + + // std::cerr << "numReprojected " << numReprojected << " reprojErr " + // << reprojErr / numReprojected << std::endl; + + if (numReprojected > MIN_CORNERS) { + double avgReprojErr = reprojErr / numReprojected; + + if (avgReprojErr < minReprojErr) { + minReprojErr = avgReprojErr; + gamma0 = gamma; + success = true; + } + } + + } // If this observation has enough valid corners + } // For each row in the image. + + if (success) init_intr << 0.5 * gamma0, 0.5 * gamma0, _cu, _cv; + + return success; +} + +bool CalibHelper::initializeIntrinsicsPinhole( + const std::vector pinhole_corners, + const AprilGrid &aprilgrid, int cols, int rows, + Eigen::Vector4d &init_intr) { + // First, initialize the image center at the center of the image. + + const double _cu = cols / 2.0 - 0.5; + const double _cv = rows / 2.0 - 0.5; + + // Z. Zhang, A Flexible New Technique for Camera Calibration, PAMI 2000 + + size_t nImages = pinhole_corners.size(); + + // Eigen::MatrixXd A(2 * nImages, 2); + // Eigen::VectorXd b(2 * nImages); + + Eigen::MatrixXd A(nImages * 2, 2); + Eigen::VectorXd b(nImages * 2, 1); + + int i = 0; + + for (const CalibCornerData *ccd : pinhole_corners) { + const auto &corners = ccd->corners; + const auto &corner_ids = ccd->corner_ids; + + std::vector M(corners.size()), imagePoints(corners.size()); + for (size_t j = 0; j < corners.size(); ++j) { + M.at(j) = + cv::Point2f(aprilgrid.aprilgrid_corner_pos_3d[corner_ids[j]][0], + aprilgrid.aprilgrid_corner_pos_3d[corner_ids[j]][1]); + + // std::cout << "corner " + // << + // aprilgrid.aprilgrid_corner_pos_3d[corner_ids[j]].transpose() + // << std::endl; + + imagePoints.at(j) = cv::Point2f(corners[j][0], corners[j][1]); + } + + cv::Mat H = cv::findHomography(M, imagePoints); + + if (H.empty()) return false; + + // std::cout << H << std::endl; + + H.at(0, 0) -= H.at(2, 0) * _cu; + H.at(0, 1) -= H.at(2, 1) * _cu; + H.at(0, 2) -= H.at(2, 2) * _cu; + H.at(1, 0) -= H.at(2, 0) * _cv; + H.at(1, 1) -= H.at(2, 1) * _cv; + H.at(1, 2) -= H.at(2, 2) * _cv; + + double h[3], v[3], d1[3], d2[3]; + double n[4] = {0, 0, 0, 0}; + + for (int j = 0; j < 3; ++j) { + double t0 = H.at(j, 0); + double t1 = H.at(j, 1); + h[j] = t0; + v[j] = t1; + d1[j] = (t0 + t1) * 0.5; + d2[j] = (t0 - t1) * 0.5; + n[0] += t0 * t0; + n[1] += t1 * t1; + n[2] += d1[j] * d1[j]; + n[3] += d2[j] * d2[j]; + } + + for (int j = 0; j < 4; ++j) { + n[j] = 1.0 / sqrt(n[j]); + } + + for (int j = 0; j < 3; ++j) { + h[j] *= n[0]; + v[j] *= n[1]; + d1[j] *= n[2]; + d2[j] *= n[3]; + } + + A(i * 2, 0) = h[0] * v[0]; + A(i * 2, 1) = h[1] * v[1]; + A(i * 2 + 1, 0) = d1[0] * d2[0]; + A(i * 2 + 1, 1) = d1[1] * d2[1]; + b(i * 2, 0) = -h[2] * v[2]; + b(i * 2 + 1, 0) = -d1[2] * d2[2]; + + i++; + } + + Eigen::Vector2d f = (A.transpose() * A).ldlt().solve(A.transpose() * b); + + double fx = sqrt(fabs(1.0 / f(0))); + double fy = sqrt(fabs(1.0 / f(1))); + + init_intr << fx, fy, _cu, _cv; + + return true; +} + +void CalibHelper::computeInitialPose( + const Calibration::Ptr &calib, size_t cam_id, + const Eigen::aligned_vector &aprilgrid_corner_pos_3d, + const CalibCornerData &cd, CalibInitPoseData &cp) { + if (cd.corners.size() < 8) { + cp.num_inliers = 0; + return; + } + + bool success; + size_t num_inliers; + + std::visit( + [&](const auto &cam) { + Sophus::SE3d T_target_camera; + success = estimateTransformation(cam, cd.corners, cd.corner_ids, + aprilgrid_corner_pos_3d, cp.T_a_c, + num_inliers); + }, + calib->intrinsics[cam_id].variant); + + if (success) { + Eigen::Matrix4d T_c_a_init = cp.T_a_c.inverse().matrix(); + + std::vector proj_success; + calib->intrinsics[cam_id].project(aprilgrid_corner_pos_3d, T_c_a_init, + cp.reprojected_corners, proj_success); + + cp.num_inliers = num_inliers; + } else { + cp.num_inliers = 0; + } +} + +size_t CalibHelper::computeReprojectionError( + const UnifiedCamera &cam_calib, + const Eigen::aligned_vector &corners, + const std::vector &corner_ids, + const Eigen::aligned_vector &aprilgrid_corner_pos_3d, + const Sophus::SE3d &T_target_camera, double &error) { + size_t num_projected = 0; + error = 0; + + Eigen::Matrix4d T_camera_target = T_target_camera.inverse().matrix(); + + for (size_t i = 0; i < corners.size(); i++) { + Eigen::Vector4d p_cam = + T_camera_target * aprilgrid_corner_pos_3d[corner_ids[i]]; + Eigen::Vector2d res; + cam_calib.project(p_cam, res); + res -= corners[i]; + + num_projected++; + error += res.norm(); + } + + return num_projected; +} +} // namespace basalt diff --git a/src/calibration/cam_calib.cpp b/src/calibration/cam_calib.cpp new file mode 100644 index 0000000..b70f402 --- /dev/null +++ b/src/calibration/cam_calib.cpp @@ -0,0 +1,1099 @@ +/** +BSD 3-Clause License + +This file is part of the Basalt project. +https://gitlab.com/VladyslavUsenko/basalt.git + +Copyright (c) 2019, Vladyslav Usenko and Nikolaus Demmel. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#include + +#include + +#include + +#include + +#include + +#include + +namespace basalt { + +CamCalib::CamCalib(const std::string &dataset_path, + const std::string &dataset_type, + const std::string &aprilgrid_path, + const std::string &cache_path, + const std::string &cache_dataset_name, int skip_images, + const std::vector &cam_types, bool show_gui) + : dataset_path(dataset_path), + dataset_type(dataset_type), + april_grid(aprilgrid_path), + cache_path(ensure_trailing_slash(cache_path)), + cache_dataset_name(cache_dataset_name), + skip_images(skip_images), + cam_types(cam_types), + show_gui(show_gui), + show_frame("ui.show_frame", 0, 0, 1500), + show_corners("ui.show_corners", true, false, true), + show_corners_rejected("ui.show_corners_rejected", false, false, true), + show_init_reproj("ui.show_init_reproj", false, false, true), + show_opt("ui.show_opt", true, false, true), + show_vign("ui.show_vign", false, false, true), + show_ids("ui.show_ids", false, false, true), + huber_thresh("ui.huber_thresh", 4.0, 0.1, 10.0), + opt_intr("ui.opt_intr", true, false, true), + opt_until_convg("ui.opt_until_converge", false, false, true), + stop_thresh("ui.stop_thresh", 1e-8, 1e-10, 0.01, true) { + if (show_gui) initGui(); + + if (!fs::exists(cache_path)) { + fs::create_directory(cache_path); + } + + pangolin::ColourWheel cw; + for (int i = 0; i < 20; i++) { + cam_colors.emplace_back(cw.GetUniqueColour()); + } +} + +CamCalib::~CamCalib() { + if (processing_thread) { + processing_thread->join(); + } +} + +void CamCalib::initGui() { + pangolin::CreateWindowAndBind("Main", 1600, 1000); + + pangolin::CreatePanel("ui").SetBounds(0.0, 1.0, 0.0, + pangolin::Attach::Pix(UI_WIDTH)); + + img_view_display = + &pangolin::CreateDisplay() + .SetBounds(0.5, 1.0, pangolin::Attach::Pix(UI_WIDTH), 1.0) + .SetLayout(pangolin::LayoutEqual); + + pangolin::View &vign_plot_display = + pangolin::CreateDisplay().SetBounds(0.0, 0.5, 0.72, 1.0); + + vign_plotter.reset(new pangolin::Plotter(&vign_data_log, 0.0, 1000.0, 0.0, + 1.0, 0.01f, 0.01f)); + vign_plot_display.AddDisplay(*vign_plotter); + + pangolin::View &polar_error_display = pangolin::CreateDisplay().SetBounds( + 0.0, 0.5, pangolin::Attach::Pix(UI_WIDTH), 0.43); + + polar_plotter.reset( + new pangolin::Plotter(nullptr, 0.0, 120.0, 0.0, 1.0, 0.01f, 0.01f)); + polar_error_display.AddDisplay(*polar_plotter); + + pangolin::View &azimuthal_plot_display = + pangolin::CreateDisplay().SetBounds(0.0, 0.5, 0.45, 0.7); + + azimuth_plotter.reset( + new pangolin::Plotter(nullptr, -180.0, 180.0, 0.0, 1.0, 0.01f, 0.01f)); + azimuthal_plot_display.AddDisplay(*azimuth_plotter); + + pangolin::Var> load_dataset( + "ui.load_dataset", std::bind(&CamCalib::loadDataset, this)); + + pangolin::Var> detect_corners( + "ui.detect_corners", std::bind(&CamCalib::detectCorners, this)); + + pangolin::Var> init_cam_intrinsics( + "ui.init_cam_intr", std::bind(&CamCalib::initCamIntrinsics, this)); + + pangolin::Var> init_cam_poses( + "ui.init_cam_poses", std::bind(&CamCalib::initCamPoses, this)); + + pangolin::Var> init_cam_extrinsics( + "ui.init_cam_extr", std::bind(&CamCalib::initCamExtrinsics, this)); + + pangolin::Var> init_opt( + "ui.init_opt", std::bind(&CamCalib::initOptimization, this)); + + pangolin::Var> optimize( + "ui.optimize", std::bind(&CamCalib::optimize, this)); + + pangolin::Var> save_calib( + "ui.save_calib", std::bind(&CamCalib::saveCalib, this)); + + pangolin::Var> compute_vign( + "ui.compute_vign", std::bind(&CamCalib::computeVign, this)); + + setNumCameras(1); +} + +void CamCalib::computeVign() { + Eigen::aligned_vector optical_centers; + for (size_t i = 0; i < calib_opt->calib->intrinsics.size(); i++) { + optical_centers.emplace_back( + calib_opt->calib->intrinsics[i].getParam().segment<2>(2)); + } + + std::map> + reprojected_vignette2; + for (size_t i = 0; i < vio_dataset->get_image_timestamps().size(); i++) { + int64_t timestamp_ns = vio_dataset->get_image_timestamps()[i]; + const std::vector img_vec = + vio_dataset->get_image_data(timestamp_ns); + + for (size_t j = 0; j < calib_opt->calib->intrinsics.size(); j++) { + TimeCamId tcid(timestamp_ns, j); + + auto it = reprojected_vignette.find(tcid); + + if (it != reprojected_vignette.end() && img_vec[j].img.get()) { + Eigen::aligned_vector rv; + rv.resize(it->second.corners_proj.size()); + + for (size_t k = 0; k < it->second.corners_proj.size(); k++) { + Eigen::Vector2d pos = it->second.corners_proj[k]; + + rv[k].head<2>() = pos; + + if (img_vec[j].img->InBounds(pos[0], pos[1], 1) && + it->second.corners_proj_success[k]) { + double val = img_vec[j].img->interp(pos); + val /= std::numeric_limits::max(); + + if (img_vec[j].exposure > 0) { + val *= 0.001 / img_vec[j].exposure; // bring to common exposure + } + + rv[k][2] = val; + } else { + rv[k][2] = -1; + } + } + + reprojected_vignette2.emplace(tcid, rv); + } + } + } + + VignetteEstimator ve(vio_dataset, optical_centers, + calib_opt->calib->resolution, reprojected_vignette2, + april_grid); + + ve.optimize(); + ve.compute_error(&reprojected_vignette_error); + + std::vector> vign_data; + ve.compute_data_log(vign_data); + vign_data_log.Clear(); + for (const auto &v : vign_data) vign_data_log.Log(v); + + { + vign_plotter->ClearSeries(); + vign_plotter->ClearMarkers(); + + for (size_t i = 0; i < calib_opt->calib->intrinsics.size(); i++) { + vign_plotter->AddSeries("$i", "$" + std::to_string(2 * i), + pangolin::DrawingModeLine, cam_colors[i], + "vignette camera " + std::to_string(i)); + } + + vign_plotter->ScaleViewSmooth(vign_data_log.Samples() / 1000.0f, 1.0f, 0.0f, + 0.5f); + } + + ve.save_vign_png(cache_path); + + calib_opt->setVignette(ve.get_vign_param()); + + std::cout << "Saved vignette png files to " << cache_path << std::endl; +} + +void CamCalib::setNumCameras(size_t n) { + while (img_view.size() < n && show_gui) { + std::shared_ptr iv(new pangolin::ImageView); + + size_t idx = img_view.size(); + img_view.push_back(iv); + + img_view_display->AddDisplay(*iv); + iv->extern_draw_function = std::bind(&CamCalib::drawImageOverlay, this, + std::placeholders::_1, idx); + } +} + +void CamCalib::renderingLoop() { + while (!pangolin::ShouldQuit()) { + glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); + + if (vio_dataset.get()) { + if (show_frame.GuiChanged()) { + size_t frame_id = static_cast(show_frame); + int64_t timestamp = vio_dataset->get_image_timestamps()[frame_id]; + + const std::vector &img_vec = + vio_dataset->get_image_data(timestamp); + + for (size_t cam_id = 0; cam_id < vio_dataset->get_num_cams(); cam_id++) + if (img_vec[cam_id].img.get()) { + pangolin::GlPixFormat fmt; + fmt.glformat = GL_LUMINANCE; + fmt.gltype = GL_UNSIGNED_SHORT; + fmt.scalable_internal_format = GL_LUMINANCE16; + + img_view[cam_id]->SetImage( + img_vec[cam_id].img->ptr, img_vec[cam_id].img->w, + img_vec[cam_id].img->h, img_vec[cam_id].img->pitch, fmt); + } else { + img_view[cam_id]->Clear(); + } + } + } + + if (opt_until_convg) { + bool converged = optimizeWithParam(true); + if (converged) opt_until_convg = false; + } + + pangolin::FinishFrame(); + } +} + +void CamCalib::computeProjections() { + reprojected_corners.clear(); + reprojected_vignette.clear(); + + if (!calib_opt.get() || !vio_dataset.get()) return; + + constexpr int ANGLE_BIN_SIZE = 2; + std::vector> polar_sum( + calib_opt->calib->intrinsics.size()); + std::vector> polar_num( + calib_opt->calib->intrinsics.size()); + + std::vector> azimuth_sum( + calib_opt->calib->intrinsics.size()); + std::vector> azimuth_num( + calib_opt->calib->intrinsics.size()); + + for (size_t i = 0; i < calib_opt->calib->intrinsics.size(); i++) { + polar_sum[i].setZero(); + polar_num[i].setZero(); + azimuth_sum[i].setZero(); + azimuth_num[i].setZero(); + } + + for (size_t j = 0; j < vio_dataset->get_image_timestamps().size(); ++j) { + int64_t timestamp_ns = vio_dataset->get_image_timestamps()[j]; + + for (size_t i = 0; i < calib_opt->calib->intrinsics.size(); i++) { + TimeCamId tcid(timestamp_ns, i); + + ProjectedCornerData rc, rv; + Eigen::aligned_vector polar_azimuthal_angle; + + Sophus::SE3d T_c_w_ = + (calib_opt->getT_w_i(timestamp_ns) * calib_opt->calib->T_i_c[i]) + .inverse(); + + Eigen::Matrix4d T_c_w = T_c_w_.matrix(); + + calib_opt->calib->intrinsics[i].project( + april_grid.aprilgrid_corner_pos_3d, T_c_w, rc.corners_proj, + rc.corners_proj_success, polar_azimuthal_angle); + + calib_opt->calib->intrinsics[i].project( + april_grid.aprilgrid_vignette_pos_3d, T_c_w, rv.corners_proj, + rv.corners_proj_success); + + reprojected_corners.emplace(tcid, rc); + reprojected_vignette.emplace(tcid, rv); + + // Compute reprojection histogrames over polar and azimuth angle + auto it = calib_corners.find(tcid); + if (it != calib_corners.end()) { + for (size_t k = 0; k < it->second.corners.size(); k++) { + size_t id = it->second.corner_ids[k]; + + if (rc.corners_proj_success[id]) { + double error = (it->second.corners[k] - rc.corners_proj[id]).norm(); + + size_t polar_bin = + 180 * polar_azimuthal_angle[id][0] / (M_PI * ANGLE_BIN_SIZE); + + polar_sum[tcid.cam_id][polar_bin] += error; + polar_num[tcid.cam_id][polar_bin] += 1; + + size_t azimuth_bin = + 180 / ANGLE_BIN_SIZE + (180.0 * polar_azimuthal_angle[id][1]) / + (M_PI * ANGLE_BIN_SIZE); + + azimuth_sum[tcid.cam_id][azimuth_bin] += error; + azimuth_num[tcid.cam_id][azimuth_bin] += 1; + } + } + } + } + } + + while (polar_data_log.size() < calib_opt->calib->intrinsics.size()) { + polar_data_log.emplace_back(new pangolin::DataLog); + } + + while (azimuth_data_log.size() < calib_opt->calib->intrinsics.size()) { + azimuth_data_log.emplace_back(new pangolin::DataLog); + } + + constexpr int MIN_POINTS_HIST = 3; + polar_plotter->ClearSeries(); + azimuth_plotter->ClearSeries(); + + for (size_t c = 0; c < calib_opt->calib->intrinsics.size(); c++) { + polar_data_log[c]->Clear(); + azimuth_data_log[c]->Clear(); + + for (int i = 0; i < polar_sum[c].rows(); i++) { + if (polar_num[c][i] > MIN_POINTS_HIST) { + double x_coord = ANGLE_BIN_SIZE * i + ANGLE_BIN_SIZE / 2.0; + double mean_reproj = polar_sum[c][i] / polar_num[c][i]; + + polar_data_log[c]->Log(x_coord, mean_reproj); + } + } + + polar_plotter->AddSeries( + "$0", "$1", pangolin::DrawingModeLine, cam_colors[c], + "mean error(pix) vs polar angle(deg) for cam" + std::to_string(c), + polar_data_log[c].get()); + + for (int i = 0; i < azimuth_sum[c].rows(); i++) { + if (azimuth_num[c][i] > MIN_POINTS_HIST) { + double x_coord = ANGLE_BIN_SIZE * i + ANGLE_BIN_SIZE / 2.0 - 180.0; + double mean_reproj = azimuth_sum[c][i] / azimuth_num[c][i]; + + azimuth_data_log[c]->Log(x_coord, mean_reproj); + } + } + + azimuth_plotter->AddSeries( + "$0", "$1", pangolin::DrawingModeLine, cam_colors[c], + "mean error(pix) vs azimuth angle(deg) for cam" + std::to_string(c), + azimuth_data_log[c].get()); + } +} + +void CamCalib::detectCorners() { + if (processing_thread) { + processing_thread->join(); + processing_thread.reset(); + } + + processing_thread.reset(new std::thread([this]() { + std::cout << "Started detecting corners" << std::endl; + + CalibHelper::detectCorners(this->vio_dataset, this->calib_corners, + this->calib_corners_rejected); + + std::string path = + cache_path + cache_dataset_name + "_detected_corners.cereal"; + std::ofstream os(path, std::ios::binary); + cereal::BinaryOutputArchive archive(os); + + archive(this->calib_corners); + archive(this->calib_corners_rejected); + + std::cout << "Done detecting corners. Saved them here: " << path + << std::endl; + })); + + if (!show_gui) { + processing_thread->join(); + processing_thread.reset(); + } +} + +void CamCalib::initCamIntrinsics() { + if (calib_corners.empty()) { + std::cerr << "No corners detected. Press detect_corners to start corner " + "detection." + << std::endl; + return; + } + + std::cout << "Started camera intrinsics initialization" << std::endl; + + if (!calib_opt) calib_opt.reset(new PosesOptimization); + + calib_opt->resetCalib(vio_dataset->get_num_cams(), cam_types); + + std::vector cam_initialized(vio_dataset->get_num_cams(), false); + + int inc = 1; + if (vio_dataset->get_image_timestamps().size() > 100) inc = 3; + + for (size_t j = 0; j < vio_dataset->get_num_cams(); j++) { + for (size_t i = 0; i < vio_dataset->get_image_timestamps().size(); + i += inc) { + const int64_t timestamp_ns = vio_dataset->get_image_timestamps()[i]; + const std::vector &img_vec = + vio_dataset->get_image_data(timestamp_ns); + + TimeCamId tcid(timestamp_ns, j); + + if (calib_corners.find(tcid) != calib_corners.end()) { + CalibCornerData cid = calib_corners.at(tcid); + + Eigen::Vector4d init_intr; + + bool success = CalibHelper::initializeIntrinsics( + cid.corners, cid.corner_ids, april_grid, img_vec[j].img->w, + img_vec[j].img->h, init_intr); + + if (success) { + cam_initialized[j] = true; + calib_opt->calib->intrinsics[j].setFromInit(init_intr); + break; + } + } + } + } + + // Try perfect pinhole initialization for cameras that are not initalized. + for (size_t j = 0; j < vio_dataset->get_num_cams(); j++) { + if (!cam_initialized[j]) { + std::vector pinhole_corners; + int w = 0; + int h = 0; + + for (size_t i = 0; i < vio_dataset->get_image_timestamps().size(); + i += inc) { + const int64_t timestamp_ns = vio_dataset->get_image_timestamps()[i]; + const std::vector &img_vec = + vio_dataset->get_image_data(timestamp_ns); + + TimeCamId tcid(timestamp_ns, j); + + auto it = calib_corners.find(tcid); + if (it != calib_corners.end()) { + if (it->second.corners.size() > 8) { + pinhole_corners.emplace_back(&it->second); + } + } + + w = img_vec[j].img->w; + h = img_vec[j].img->h; + } + + BASALT_ASSERT(w > 0 && h > 0); + + Eigen::Vector4d init_intr; + + bool success = CalibHelper::initializeIntrinsicsPinhole( + pinhole_corners, april_grid, w, h, init_intr); + + if (success) { + cam_initialized[j] = true; + + std::cout << "Initialized camera " << j + << " with pinhole model. You should set pinhole model for " + "this camera!" + << std::endl; + calib_opt->calib->intrinsics[j].setFromInit(init_intr); + } + } + } + + std::cout << "Done camera intrinsics initialization:" << std::endl; + for (size_t j = 0; j < vio_dataset->get_num_cams(); j++) { + std::cout << "Cam " << j << ": " + << calib_opt->calib->intrinsics[j].getParam().transpose() + << std::endl; + } + + // set resolution + { + size_t img_idx = 1; + int64_t t_ns = vio_dataset->get_image_timestamps()[img_idx]; + auto img_data = vio_dataset->get_image_data(t_ns); + + // Find the frame with all valid images + while (img_idx < vio_dataset->get_image_timestamps().size()) { + bool img_data_valid = true; + for (size_t i = 0; i < vio_dataset->get_num_cams(); i++) { + if (!img_data[i].img.get()) img_data_valid = false; + } + + if (!img_data_valid) { + img_idx++; + int64_t t_ns_new = vio_dataset->get_image_timestamps()[img_idx]; + img_data = vio_dataset->get_image_data(t_ns_new); + } else { + break; + } + } + + Eigen::aligned_vector res; + + for (size_t i = 0; i < vio_dataset->get_num_cams(); i++) { + res.emplace_back(img_data[i].img->w, img_data[i].img->h); + } + + calib_opt->setResolution(res); + } +} + +void CamCalib::initCamPoses() { + if (calib_corners.empty()) { + std::cerr << "No corners detected. Press detect_corners to start corner " + "detection." + << std::endl; + return; + } + + if (!calib_opt.get() || !calib_opt->calibInitialized()) { + std::cerr << "No initial intrinsics. Press init_intrinsics initialize " + "intrinsics" + << std::endl; + return; + } + + if (processing_thread) { + processing_thread->join(); + processing_thread.reset(); + } + + std::cout << "Started initial camera pose computation " << std::endl; + + CalibHelper::initCamPoses(calib_opt->calib, + april_grid.aprilgrid_corner_pos_3d, + this->calib_corners, this->calib_init_poses); + + std::string path = cache_path + cache_dataset_name + "_init_poses.cereal"; + std::ofstream os(path, std::ios::binary); + cereal::BinaryOutputArchive archive(os); + + archive(this->calib_init_poses); + + std::cout << "Done initial camera pose computation. Saved them here: " << path + << std::endl; +} + +void CamCalib::initCamExtrinsics() { + if (calib_init_poses.empty()) { + std::cerr << "No initial camera poses. Press init_cam_poses initialize " + "camera poses " + << std::endl; + return; + } + + if (!calib_opt.get() || !calib_opt->calibInitialized()) { + std::cerr << "No initial intrinsics. Press init_intrinsics initialize " + "intrinsics" + << std::endl; + return; + } + + // Camera graph. Stores the edge from i to j with weight w and timestamp. i + // and j should be sorted; + std::map, std::pair> cam_graph; + + // Construct the graph. + for (size_t i = 0; i < vio_dataset->get_image_timestamps().size(); i++) { + int64_t timestamp_ns = vio_dataset->get_image_timestamps()[i]; + + for (size_t cam_i = 0; cam_i < vio_dataset->get_num_cams(); cam_i++) { + TimeCamId tcid_i(timestamp_ns, cam_i); + + auto it = calib_init_poses.find(tcid_i); + if (it == calib_init_poses.end() || it->second.num_inliers < MIN_CORNERS) + continue; + + for (size_t cam_j = cam_i + 1; cam_j < vio_dataset->get_num_cams(); + cam_j++) { + TimeCamId tcid_j(timestamp_ns, cam_j); + + auto it2 = calib_init_poses.find(tcid_j); + if (it2 == calib_init_poses.end() || + it2->second.num_inliers < MIN_CORNERS) + continue; + + std::pair edge_id(cam_i, cam_j); + + int curr_weight = cam_graph[edge_id].first; + int new_weight = + std::min(it->second.num_inliers, it2->second.num_inliers); + + if (curr_weight < new_weight) { + cam_graph[edge_id] = std::make_pair(new_weight, timestamp_ns); + } + } + } + } + + std::vector cameras_initialized(vio_dataset->get_num_cams(), false); + cameras_initialized[0] = true; + size_t last_camera = 0; + calib_opt->calib->T_i_c[0] = Sophus::SE3d(); // Identity + + auto next_max_weight_edge = [&](size_t cam_id) { + int max_weight = -1; + std::pair res(-1, -1); + + for (size_t i = 0; i < vio_dataset->get_num_cams(); i++) { + if (cameras_initialized[i]) continue; + + std::pair edge_id; + + if (i < cam_id) { + edge_id = std::make_pair(i, cam_id); + } else if (i > cam_id) { + edge_id = std::make_pair(cam_id, i); + } + + auto it = cam_graph.find(edge_id); + if (it != cam_graph.end() && max_weight < it->second.first) { + max_weight = it->second.first; + res.first = i; + res.second = it->second.second; + } + } + + return res; + }; + + for (size_t i = 0; i < vio_dataset->get_num_cams() - 1; i++) { + std::pair res = next_max_weight_edge(last_camera); + + std::cout << "Initializing camera pair " << last_camera << " " << res.first + << std::endl; + + if (res.first >= 0) { + size_t new_camera = res.first; + + TimeCamId tcid_last(res.second, last_camera); + TimeCamId tcid_new(res.second, new_camera); + + calib_opt->calib->T_i_c[new_camera] = + calib_opt->calib->T_i_c[last_camera] * + calib_init_poses.at(tcid_last).T_a_c.inverse() * + calib_init_poses.at(tcid_new).T_a_c; + + last_camera = new_camera; + cameras_initialized[last_camera] = true; + } + } + + std::cout << "Done camera extrinsics initialization:" << std::endl; + for (size_t j = 0; j < vio_dataset->get_num_cams(); j++) { + std::cout << "T_c0_c" << j << ":\n" + << calib_opt->calib->T_i_c[j].matrix() << std::endl; + } +} // namespace basalt + +void CamCalib::initOptimization() { + if (!calib_opt) { + std::cerr << "Calibration is not initialized. Initialize calibration first!" + << std::endl; + return; + } + + if (calib_init_poses.empty()) { + std::cerr << "No initial camera poses. Press init_cam_poses initialize " + "camera poses " + << std::endl; + return; + } + + calib_opt->setAprilgridCorners3d(april_grid.aprilgrid_corner_pos_3d); + + std::unordered_set invalid_frames; + for (const auto &kv : calib_corners) { + if (kv.second.corner_ids.size() < MIN_CORNERS) + invalid_frames.insert(kv.first); + } + + for (size_t j = 0; j < vio_dataset->get_image_timestamps().size(); ++j) { + int64_t timestamp_ns = vio_dataset->get_image_timestamps()[j]; + + int max_inliers = -1; + int max_inliers_idx = -1; + + for (size_t cam_id = 0; cam_id < calib_opt->calib->T_i_c.size(); cam_id++) { + TimeCamId tcid(timestamp_ns, cam_id); + const auto cp_it = calib_init_poses.find(tcid); + if (cp_it != calib_init_poses.end()) { + if ((int)cp_it->second.num_inliers > max_inliers) { + max_inliers = cp_it->second.num_inliers; + max_inliers_idx = cam_id; + } + } + } + + if (max_inliers >= (int)MIN_CORNERS) { + TimeCamId tcid(timestamp_ns, max_inliers_idx); + const auto cp_it = calib_init_poses.find(tcid); + + // Initial pose + calib_opt->addPoseMeasurement( + timestamp_ns, cp_it->second.T_a_c * + calib_opt->calib->T_i_c[max_inliers_idx].inverse()); + } else { + // Set all frames invalid if we do not have initial pose + for (size_t cam_id = 0; cam_id < calib_opt->calib->T_i_c.size(); + cam_id++) { + invalid_frames.emplace(timestamp_ns, cam_id); + } + } + } + + for (const auto &kv : calib_corners) { + if (invalid_frames.count(kv.first) == 0) + calib_opt->addAprilgridMeasurement(kv.first.frame_id, kv.first.cam_id, + kv.second.corners, + kv.second.corner_ids); + } + + calib_opt->init(); + computeProjections(); + + std::cout << "Initialized optimization." << std::endl; +} // namespace basalt + +void CamCalib::loadDataset() { + basalt::DatasetIoInterfacePtr dataset_io = + basalt::DatasetIoFactory::getDatasetIo(dataset_type); + + dataset_io->read(dataset_path); + + vio_dataset = dataset_io->get_data(); + setNumCameras(vio_dataset->get_num_cams()); + + if (skip_images > 1) { + std::vector new_image_timestamps; + for (size_t i = 0; i < vio_dataset->get_image_timestamps().size(); i++) { + if (i % skip_images == 0) + new_image_timestamps.push_back(vio_dataset->get_image_timestamps()[i]); + } + + vio_dataset->get_image_timestamps() = new_image_timestamps; + } + + // load detected corners if they exist + { + std::string path = + cache_path + cache_dataset_name + "_detected_corners.cereal"; + + std::ifstream is(path, std::ios::binary); + + if (is.good()) { + cereal::BinaryInputArchive archive(is); + + calib_corners.clear(); + calib_corners_rejected.clear(); + archive(calib_corners); + archive(calib_corners_rejected); + + std::cout << "Loaded detected corners from: " << path << std::endl; + } else { + std::cout << "No pre-processed detected corners found" << std::endl; + } + } + + // load initial poses if they exist + { + std::string path = cache_path + cache_dataset_name + "_init_poses.cereal"; + + std::ifstream is(path, std::ios::binary); + + if (is.good()) { + cereal::BinaryInputArchive archive(is); + + calib_init_poses.clear(); + archive(calib_init_poses); + + std::cout << "Loaded initial poses from: " << path << std::endl; + } else { + std::cout << "No pre-processed initial poses found" << std::endl; + } + } + + // load calibration if exist + { + if (!calib_opt) calib_opt.reset(new PosesOptimization); + + calib_opt->loadCalib(cache_path); + } + + reprojected_corners.clear(); + reprojected_vignette.clear(); + + if (show_gui) { + show_frame = 0; + + show_frame.Meta().range[1] = vio_dataset->get_image_timestamps().size() - 1; + show_frame.Meta().gui_changed = true; + } +} + +void CamCalib::optimize() { optimizeWithParam(true); } + +bool CamCalib::optimizeWithParam(bool print_info, + std::map *stats) { + if (calib_init_poses.empty()) { + std::cerr << "No initial camera poses. Press init_cam_poses initialize " + "camera poses " + << std::endl; + return true; + } + + if (!calib_opt.get() || !calib_opt->calibInitialized()) { + std::cerr << "No initial intrinsics. Press init_intrinsics initialize " + "intrinsics" + << std::endl; + return true; + } + + bool converged = true; + + if (calib_opt) { + // calib_opt->compute_projections(); + double error; + double reprojection_error; + int num_points; + + auto start = std::chrono::high_resolution_clock::now(); + + converged = calib_opt->optimize(opt_intr, huber_thresh, stop_thresh, error, + num_points, reprojection_error); + + auto finish = std::chrono::high_resolution_clock::now(); + + if (stats) { + stats->clear(); + + stats->emplace("energy_error", error); + stats->emplace("num_points", num_points); + stats->emplace("mean_energy_error", error / num_points); + stats->emplace("reprojection_error", reprojection_error); + stats->emplace("mean_reprojection_error", + reprojection_error / num_points); + } + + if (print_info) { + std::cout << "==================================" << std::endl; + + for (size_t i = 0; i < vio_dataset->get_num_cams(); i++) { + std::cout << "intrinsics " << i << ": " + << calib_opt->calib->intrinsics[i].getParam().transpose() + << std::endl; + std::cout << "T_i_c" << i << ":\n" + << calib_opt->calib->T_i_c[i].matrix() << std::endl; + } + + std::cout << "Current error: " << error << " num_points " << num_points + << " mean_error " << error / num_points + << " reprojection_error " << reprojection_error + << " mean reprojection " << reprojection_error / num_points + << " opt_time " + << std::chrono::duration_cast( + finish - start) + .count() + << "ms." << std::endl; + + if (converged) std::cout << "Optimization Converged !!" << std::endl; + + std::cout << "==================================" << std::endl; + } + + if (show_gui) { + computeProjections(); + } + } + + return converged; +} + +void CamCalib::saveCalib() { + if (calib_opt) { + calib_opt->saveCalib(cache_path); + + std::cout << "Saved calibration in " << cache_path << "calibration.json" + << std::endl; + } +} + +void CamCalib::drawImageOverlay(pangolin::View &v, size_t cam_id) { + UNUSED(v); + + size_t frame_id = show_frame; + + if (vio_dataset && frame_id < vio_dataset->get_image_timestamps().size()) { + int64_t timestamp_ns = vio_dataset->get_image_timestamps()[frame_id]; + TimeCamId tcid(timestamp_ns, cam_id); + + if (show_corners) { + glLineWidth(1.0); + glColor3f(1.0, 0.0, 0.0); + glEnable(GL_BLEND); + glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA); + + if (calib_corners.find(tcid) != calib_corners.end()) { + const CalibCornerData &cr = calib_corners.at(tcid); + const CalibCornerData &cr_rej = calib_corners_rejected.at(tcid); + + for (size_t i = 0; i < cr.corners.size(); i++) { + // the radius is the threshold used for maximum displacement. The + // search region is slightly larger. + const float radius = static_cast(cr.radii[i]); + const Eigen::Vector2f c = cr.corners[i].cast(); + pangolin::glDrawCirclePerimeter(c[0], c[1], radius); + + if (show_ids) + pangolin::GlFont::I().Text("%d", cr.corner_ids[i]).Draw(c[0], c[1]); + } + + pangolin::GlFont::I() + .Text("Detected %d corners (%d rejected)", cr.corners.size(), + cr_rej.corners.size()) + .Draw(5, 50); + + if (show_corners_rejected) { + glColor3f(1.0, 0.5, 0.0); + + for (size_t i = 0; i < cr_rej.corners.size(); i++) { + // the radius is the threshold used for maximum displacement. The + // search region is slightly larger. + const float radius = static_cast(cr_rej.radii[i]); + const Eigen::Vector2f c = cr_rej.corners[i].cast(); + pangolin::glDrawCirclePerimeter(c[0], c[1], radius); + + if (show_ids) + pangolin::GlFont::I() + .Text("%d", cr_rej.corner_ids[i]) + .Draw(c[0], c[1]); + } + } + + } else { + glLineWidth(1.0); + + pangolin::GlFont::I().Text("Corners not processed").Draw(5, 50); + } + } + + if (show_init_reproj) { + glLineWidth(1.0); + glColor3f(1.0, 1.0, 0.0); + glEnable(GL_BLEND); + glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA); + + if (calib_init_poses.find(tcid) != calib_init_poses.end()) { + const CalibInitPoseData &cr = calib_init_poses.at(tcid); + + for (size_t i = 0; i < cr.reprojected_corners.size(); i++) { + Eigen::Vector2d c = cr.reprojected_corners[i]; + pangolin::glDrawCirclePerimeter(c[0], c[1], 3.0); + + if (show_ids) pangolin::GlFont::I().Text("%d", i).Draw(c[0], c[1]); + } + + pangolin::GlFont::I() + .Text("Initial pose with %d inliers", cr.num_inliers) + .Draw(5, 100); + + } else { + pangolin::GlFont::I().Text("Initial pose not processed").Draw(5, 100); + } + } + + if (show_opt) { + glLineWidth(1.0); + glColor3f(1.0, 0.0, 1.0); + glEnable(GL_BLEND); + glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA); + + if (reprojected_corners.find(tcid) != reprojected_corners.end()) { + if (calib_corners.count(tcid) > 0 && + calib_corners.at(tcid).corner_ids.size() >= MIN_CORNERS) { + const auto &rc = reprojected_corners.at(tcid); + + for (size_t i = 0; i < rc.corners_proj.size(); i++) { + if (!rc.corners_proj_success[i]) continue; + + Eigen::Vector2d c = rc.corners_proj[i]; + pangolin::glDrawCirclePerimeter(c[0], c[1], 3.0); + + if (show_ids) pangolin::GlFont::I().Text("%d", i).Draw(c[0], c[1]); + } + } else { + pangolin::GlFont::I().Text("Too few corners detected.").Draw(5, 150); + } + } + } + + if (show_vign) { + glLineWidth(1.0); + glColor3f(1.0, 1.0, 0.0); + glEnable(GL_BLEND); + glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA); + + if (reprojected_vignette.find(tcid) != reprojected_vignette.end()) { + if (calib_corners.count(tcid) > 0 && + calib_corners.at(tcid).corner_ids.size() >= MIN_CORNERS) { + const auto &rc = reprojected_vignette.at(tcid); + + bool has_errors = false; + auto it = reprojected_vignette_error.find(tcid); + if (it != reprojected_vignette_error.end()) has_errors = true; + + for (size_t i = 0; i < rc.corners_proj.size(); i++) { + if (!rc.corners_proj_success[i]) continue; + + Eigen::Vector2d c = rc.corners_proj[i].head<2>(); + pangolin::glDrawCirclePerimeter(c[0], c[1], 3.0); + + if (show_ids) { + if (has_errors) { + pangolin::GlFont::I() + .Text("%d(%f)", i, it->second[i]) + .Draw(c[0], c[1]); + } else { + pangolin::GlFont::I().Text("%d", i).Draw(c[0], c[1]); + } + } + } + } else { + pangolin::GlFont::I().Text("Too few corners detected.").Draw(5, 200); + } + } + } + } +} + +bool CamCalib::hasCorners() const { return !calib_corners.empty(); } + +} // namespace basalt diff --git a/src/calibration/cam_imu_calib.cpp b/src/calibration/cam_imu_calib.cpp new file mode 100644 index 0000000..f1f5fb9 --- /dev/null +++ b/src/calibration/cam_imu_calib.cpp @@ -0,0 +1,1160 @@ +/** +BSD 3-Clause License + +This file is part of the Basalt project. +https://gitlab.com/VladyslavUsenko/basalt.git + +Copyright (c) 2019, Vladyslav Usenko and Nikolaus Demmel. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#include + +#include + +#include + +#include + +namespace basalt { + +CamImuCalib::CamImuCalib(const std::string &dataset_path, + const std::string &dataset_type, + const std::string &aprilgrid_path, + const std::string &cache_path, + const std::string &cache_dataset_name, int skip_images, + const std::vector &imu_noise, bool show_gui) + : dataset_path(dataset_path), + dataset_type(dataset_type), + april_grid(aprilgrid_path), + cache_path(ensure_trailing_slash(cache_path)), + cache_dataset_name(cache_dataset_name), + skip_images(skip_images), + show_gui(show_gui), + imu_noise(imu_noise), + show_frame("ui.show_frame", 0, 0, 1500), + show_corners("ui.show_corners", true, false, true), + show_corners_rejected("ui.show_corners_rejected", false, false, true), + show_init_reproj("ui.show_init_reproj", false, false, true), + show_opt("ui.show_opt", true, false, true), + show_ids("ui.show_ids", false, false, true), + show_accel("ui.show_accel", true, false, true), + show_gyro("ui.show_gyro", false, false, true), + show_pos("ui.show_pos", false, false, true), + show_rot_error("ui.show_rot_error", false, false, true), + show_mocap("ui.show_mocap", false, false, true), + show_mocap_rot_error("ui.show_mocap_rot_error", false, false, true), + show_mocap_rot_vel("ui.show_mocap_rot_vel", false, false, true), + show_spline("ui.show_spline", true, false, true), + show_data("ui.show_data", true, false, true), + opt_intr("ui.opt_intr", false, false, true), + opt_poses("ui.opt_poses", false, false, true), + opt_corners("ui.opt_corners", true, false, true), + opt_cam_time_offset("ui.opt_cam_time_offset", false, false, true), + opt_imu_scale("ui.opt_imu_scale", false, false, true), + opt_mocap("ui.opt_mocap", false, false, true), + huber_thresh("ui.huber_thresh", 4.0, 0.1, 10.0), + opt_until_convg("ui.opt_until_converge", false, false, true), + stop_thresh("ui.stop_thresh", 1e-8, 1e-10, 0.01, true) { + if (show_gui) initGui(); +} + +CamImuCalib::~CamImuCalib() { + if (processing_thread) { + processing_thread->join(); + } +} + +void CamImuCalib::initGui() { + pangolin::CreateWindowAndBind("Main", 1600, 1000); + + img_view_display = + &pangolin::CreateDisplay() + .SetBounds(0.5, 1.0, pangolin::Attach::Pix(UI_WIDTH), 1.0) + .SetLayout(pangolin::LayoutEqual); + + pangolin::View &plot_display = pangolin::CreateDisplay().SetBounds( + 0.0, 0.5, pangolin::Attach::Pix(UI_WIDTH), 1.0); + + pangolin::CreatePanel("ui").SetBounds(0.0, 1.0, 0.0, + pangolin::Attach::Pix(UI_WIDTH)); + + plotter = new pangolin::Plotter(&imu_data_log, 0.0, 100.0, -10.0, 10.0, 0.01f, + 0.01f); + plot_display.AddDisplay(*plotter); + + pangolin::Var> load_dataset( + "ui.load_dataset", std::bind(&CamImuCalib::loadDataset, this)); + + pangolin::Var> detect_corners( + "ui.detect_corners", std::bind(&CamImuCalib::detectCorners, this)); + + pangolin::Var> init_cam_poses( + "ui.init_cam_poses", std::bind(&CamImuCalib::initCamPoses, this)); + + pangolin::Var> init_cam_imu( + "ui.init_cam_imu", std::bind(&CamImuCalib::initCamImuTransform, this)); + + pangolin::Var> init_opt( + "ui.init_opt", std::bind(&CamImuCalib::initOptimization, this)); + + pangolin::Var> optimize( + "ui.optimize", std::bind(&CamImuCalib::optimize, this)); + + pangolin::Var> init_mocap( + "ui.init_mocap", std::bind(&CamImuCalib::initMocap, this)); + + pangolin::Var> save_calib( + "ui.save_calib", std::bind(&CamImuCalib::saveCalib, this)); + + pangolin::Var> save_mocap_calib( + "ui.save_mocap_calib", std::bind(&CamImuCalib::saveMocapCalib, this)); + + setNumCameras(1); +} + +void CamImuCalib::setNumCameras(size_t n) { + while (img_view.size() < n && show_gui) { + std::shared_ptr iv(new pangolin::ImageView); + + size_t idx = img_view.size(); + img_view.push_back(iv); + + img_view_display->AddDisplay(*iv); + iv->extern_draw_function = std::bind(&CamImuCalib::drawImageOverlay, this, + std::placeholders::_1, idx); + } +} + +void CamImuCalib::renderingLoop() { + while (!pangolin::ShouldQuit()) { + glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); + + if (vio_dataset.get()) { + if (show_frame.GuiChanged()) { + size_t frame_id = static_cast(show_frame); + int64_t timestamp = vio_dataset->get_image_timestamps()[frame_id]; + + const std::vector &img_vec = + vio_dataset->get_image_data(timestamp); + + for (size_t cam_id = 0; cam_id < vio_dataset->get_num_cams(); cam_id++) + if (img_vec[cam_id].img.get()) { + pangolin::GlPixFormat fmt; + fmt.glformat = GL_LUMINANCE; + fmt.gltype = GL_UNSIGNED_SHORT; + fmt.scalable_internal_format = GL_LUMINANCE16; + + img_view[cam_id]->SetImage( + img_vec[cam_id].img->ptr, img_vec[cam_id].img->w, + img_vec[cam_id].img->h, img_vec[cam_id].img->pitch, fmt); + } else { + img_view[cam_id]->Clear(); + } + drawPlots(); + } + + if (show_accel.GuiChanged() || show_gyro.GuiChanged() || + show_data.GuiChanged() || show_spline.GuiChanged() || + show_pos.GuiChanged() || show_rot_error.GuiChanged() || + show_mocap.GuiChanged() || show_mocap_rot_error.GuiChanged() || + show_mocap_rot_vel.GuiChanged()) { + drawPlots(); + } + } + + if (opt_until_convg) { + bool converged = optimizeWithParam(true); + if (converged) opt_until_convg = false; + } + + pangolin::FinishFrame(); + } +} + +void CamImuCalib::computeProjections() { + reprojected_corners.clear(); + + if (!calib_opt.get() || !vio_dataset.get()) return; + + for (size_t j = 0; j < vio_dataset->get_image_timestamps().size(); ++j) { + int64_t timestamp_ns = vio_dataset->get_image_timestamps()[j]; + + int64_t timestamp_corrected_ns = + timestamp_ns + calib_opt->getCamTimeOffsetNs(); + + if (timestamp_corrected_ns < calib_opt->getMinTimeNs() || + timestamp_corrected_ns >= calib_opt->getMaxTimeNs()) + continue; + + for (size_t i = 0; i < calib_opt->calib->intrinsics.size(); i++) { + TimeCamId tcid(timestamp_ns, i); + + ProjectedCornerData rc; + + Sophus::SE3d T_c_w_ = (calib_opt->getT_w_i(timestamp_corrected_ns) * + calib_opt->getCamT_i_c(i)) + .inverse(); + + Eigen::Matrix4d T_c_w = T_c_w_.matrix(); + + calib_opt->calib->intrinsics[i].project( + april_grid.aprilgrid_corner_pos_3d, T_c_w, rc.corners_proj, + rc.corners_proj_success); + + reprojected_corners.emplace(tcid, rc); + } + } +} + +void CamImuCalib::detectCorners() { + if (processing_thread) { + processing_thread->join(); + processing_thread.reset(); + } + + processing_thread.reset(new std::thread([this]() { + std::cout << "Started detecting corners" << std::endl; + + CalibHelper::detectCorners(this->vio_dataset, this->calib_corners, + this->calib_corners_rejected); + + std::string path = + cache_path + cache_dataset_name + "_detected_corners.cereal"; + std::ofstream os(path, std::ios::binary); + cereal::BinaryOutputArchive archive(os); + + archive(this->calib_corners); + archive(this->calib_corners_rejected); + + std::cout << "Done detecting corners. Saved them here: " << path + << std::endl; + })); + + if (!show_gui) { + processing_thread->join(); + processing_thread.reset(); + } +} + +void CamImuCalib::initCamPoses() { + if (calib_corners.empty()) { + std::cerr << "No corners detected. Press detect_corners to start corner " + "detection." + << std::endl; + return; + } + + if (!calib_opt.get() || !calib_opt->calibInitialized()) { + std::cerr << "No camera intrinsics. Calibrate camera using " + "basalt_calibrate first!" + << std::endl; + return; + } + + if (processing_thread) { + processing_thread->join(); + processing_thread.reset(); + } + { + std::cout << "Started initial camera pose computation " << std::endl; + + CalibHelper::initCamPoses(calib_opt->calib, + april_grid.aprilgrid_corner_pos_3d, + this->calib_corners, this->calib_init_poses); + + std::string path = cache_path + cache_dataset_name + "_init_poses.cereal"; + std::ofstream os(path, std::ios::binary); + cereal::BinaryOutputArchive archive(os); + + archive(this->calib_init_poses); + + std::cout << "Done initial camera pose computation. Saved them here: " + << path << std::endl; + }; +} + +void CamImuCalib::initCamImuTransform() { + if (!calib_opt.get() || !calib_opt->calibInitialized()) { + std::cerr << "No camera intrinsics. Calibrate camera using " + "basalt_calibrate first!" + << std::endl; + return; + } + + if (calib_init_poses.empty()) { + std::cerr << "Initialize camera poses first!" << std::endl; + return; + } + + std::vector timestamps_cam; + Eigen::aligned_vector rot_vel_cam; + Eigen::aligned_vector rot_vel_imu; + + Sophus::SO3d R_i_c0_init = calib_opt->getCamT_i_c(0).so3(); + + for (size_t i = 1; i < vio_dataset->get_image_timestamps().size(); i++) { + int64_t timestamp0_ns = vio_dataset->get_image_timestamps()[i - 1]; + int64_t timestamp1_ns = vio_dataset->get_image_timestamps()[i]; + + TimeCamId tcid0(timestamp0_ns, 0); + TimeCamId tcid1(timestamp1_ns, 0); + + if (calib_init_poses.find(tcid0) == calib_init_poses.end()) continue; + if (calib_init_poses.find(tcid1) == calib_init_poses.end()) continue; + + Sophus::SE3d T_a_c0 = calib_init_poses.at(tcid0).T_a_c; + Sophus::SE3d T_a_c1 = calib_init_poses.at(tcid1).T_a_c; + + double dt = (timestamp1_ns - timestamp0_ns) * 1e-9; + + Eigen::Vector3d rot_vel_c0 = + R_i_c0_init * (T_a_c0.so3().inverse() * T_a_c1.so3()).log() / dt; + + timestamps_cam.push_back(timestamp0_ns); + rot_vel_cam.push_back(rot_vel_c0); + } + + for (size_t j = 0; j < timestamps_cam.size(); j++) { + int idx = -1; + int64_t min_dist = std::numeric_limits::max(); + + for (size_t i = 1; i < vio_dataset->get_gyro_data().size(); i++) { + int64_t dist = + vio_dataset->get_gyro_data()[i].timestamp_ns - timestamps_cam[j]; + if (std::abs(dist) < min_dist) { + min_dist = std::abs(dist); + idx = i; + } + } + + rot_vel_imu.push_back(vio_dataset->get_gyro_data()[idx].data); + } + + BASALT_ASSERT_STREAM(rot_vel_cam.size() == rot_vel_imu.size(), + "rot_vel_cam.size() " << rot_vel_cam.size() + << " rot_vel_imu.size() " + << rot_vel_imu.size()); + + // R_i_c * rot_vel_cam = rot_vel_imu + // R_i_c * rot_vel_cam * rot_vel_cam.T = rot_vel_imu * rot_vel_cam.T + // R_i_c = rot_vel_imu * rot_vel_cam.T * (rot_vel_cam * rot_vel_cam.T)^-1; + + Eigen::Matrix rot_vel_cam_m(3, rot_vel_cam.size()), + rot_vel_imu_m(3, rot_vel_imu.size()); + + for (size_t i = 0; i < rot_vel_cam.size(); i++) { + rot_vel_cam_m.col(i) = rot_vel_cam[i]; + rot_vel_imu_m.col(i) = rot_vel_imu[i]; + } + + Eigen::Matrix3d R_i_c0 = + rot_vel_imu_m * rot_vel_cam_m.transpose() * + (rot_vel_cam_m * rot_vel_cam_m.transpose()).inverse(); + + // std::cout << "raw R_i_c0\n" << R_i_c0 << std::endl; + + Eigen::AngleAxisd aa(R_i_c0); // RotationMatrix to AxisAngle + R_i_c0 = aa.toRotationMatrix(); + + // std::cout << "R_i_c0\n" << R_i_c0 << std::endl; + + Sophus::SE3d T_i_c0(R_i_c0, Eigen::Vector3d::Zero()); + + std::cout << "T_i_c0\n" << T_i_c0.matrix() << std::endl; + + for (size_t i = 0; i < vio_dataset->get_num_cams(); i++) { + calib_opt->getCamT_i_c(i) = T_i_c0 * calib_opt->getCamT_i_c(i); + } + + std::cout << "Done Camera-IMU extrinsics initialization:" << std::endl; + for (size_t j = 0; j < vio_dataset->get_num_cams(); j++) { + std::cout << "T_i_c" << j << ":\n" + << calib_opt->getCamT_i_c(j).matrix() << std::endl; + } +} + +void CamImuCalib::initOptimization() { + if (!calib_opt.get() || !calib_opt->calibInitialized()) { + std::cerr << "No camera intrinsics. Calibrate camera using " + "basalt_calibrate first!" + << std::endl; + return; + } + + calib_opt->setAprilgridCorners3d(april_grid.aprilgrid_corner_pos_3d); + + for (size_t i = 0; i < vio_dataset->get_accel_data().size(); i++) { + const basalt::AccelData &ad = vio_dataset->get_accel_data()[i]; + const basalt::GyroData &gd = vio_dataset->get_gyro_data()[i]; + + calib_opt->addAccelMeasurement(ad.timestamp_ns, ad.data); + calib_opt->addGyroMeasurement(gd.timestamp_ns, gd.data); + } + + std::set invalid_timestamps; + for (const auto &kv : calib_corners) { + if (kv.second.corner_ids.size() < MIN_CORNERS) + invalid_timestamps.insert(kv.first.frame_id); + } + + for (const auto &kv : calib_corners) { + if (invalid_timestamps.find(kv.first.frame_id) == invalid_timestamps.end()) + calib_opt->addAprilgridMeasurement(kv.first.frame_id, kv.first.cam_id, + kv.second.corners, + kv.second.corner_ids); + } + + for (size_t i = 0; i < vio_dataset->get_gt_timestamps().size(); i++) { + calib_opt->addMocapMeasurement(vio_dataset->get_gt_timestamps()[i], + vio_dataset->get_gt_pose_data()[i]); + } + + bool g_initialized = false; + Eigen::Vector3d g_a_init; + + for (size_t j = 0; j < vio_dataset->get_image_timestamps().size(); ++j) { + int64_t timestamp_ns = vio_dataset->get_image_timestamps()[j]; + + TimeCamId tcid(timestamp_ns, 0); + const auto cp_it = calib_init_poses.find(tcid); + + if (cp_it != calib_init_poses.end()) { + Sophus::SE3d T_a_i = + cp_it->second.T_a_c * calib_opt->getCamT_i_c(0).inverse(); + + calib_opt->addPoseMeasurement(timestamp_ns, T_a_i); + + if (!g_initialized) { + for (size_t i = 0; + i < vio_dataset->get_accel_data().size() && !g_initialized; i++) { + const basalt::AccelData &ad = vio_dataset->get_accel_data()[i]; + if (std::abs(ad.timestamp_ns - timestamp_ns) < 3000000) { + g_a_init = T_a_i.so3() * ad.data; + g_initialized = true; + std::cout << "g_a initialized with " << g_a_init.transpose() + << std::endl; + } + } + } + } + } + + const int num_samples = 100; + double dt = 0.0; + for (int i = 0; i < num_samples; i++) { + dt += 1e-9 * (vio_dataset->get_gyro_data()[i + 1].timestamp_ns - + vio_dataset->get_gyro_data()[i].timestamp_ns); + } + dt /= num_samples; + + std::cout << "IMU dt: " << dt << " freq: " << 1.0 / dt << std::endl; + + calib_opt->calib->imu_update_rate = 1.0 / dt; + + calib_opt->setG(g_a_init); + calib_opt->init(); + computeProjections(); + recomputeDataLog(); + + std::cout << "Initialized optimization." << std::endl; +} + +void CamImuCalib::initMocap() { + if (!calib_opt.get() || !calib_opt->calibInitialized()) { + std::cerr << "Initalize optimization first!" << std::endl; + return; + } + + if (vio_dataset->get_gt_timestamps().empty()) { + std::cerr << "The dataset contains no Mocap data!" << std::endl; + return; + } + + { + std::vector timestamps_cam; + Eigen::aligned_vector rot_vel_mocap; + Eigen::aligned_vector rot_vel_imu; + + Sophus::SO3d R_i_mark_init = calib_opt->mocap_calib->T_i_mark.so3(); + + for (size_t i = 1; i < vio_dataset->get_gt_timestamps().size(); i++) { + int64_t timestamp0_ns = vio_dataset->get_gt_timestamps()[i - 1]; + int64_t timestamp1_ns = vio_dataset->get_gt_timestamps()[i]; + + Sophus::SE3d T_a_mark0 = vio_dataset->get_gt_pose_data()[i - 1]; + Sophus::SE3d T_a_mark1 = vio_dataset->get_gt_pose_data()[i]; + + double dt = (timestamp1_ns - timestamp0_ns) * 1e-9; + + Eigen::Vector3d rot_vel_c0 = + R_i_mark_init * (T_a_mark0.so3().inverse() * T_a_mark1.so3()).log() / + dt; + + timestamps_cam.push_back(timestamp0_ns); + rot_vel_mocap.push_back(rot_vel_c0); + } + + for (size_t j = 0; j < timestamps_cam.size(); j++) { + int idx = -1; + int64_t min_dist = std::numeric_limits::max(); + + for (size_t i = 1; i < vio_dataset->get_gyro_data().size(); i++) { + int64_t dist = + vio_dataset->get_gyro_data()[i].timestamp_ns - timestamps_cam[j]; + if (std::abs(dist) < min_dist) { + min_dist = std::abs(dist); + idx = i; + } + } + + rot_vel_imu.push_back(vio_dataset->get_gyro_data()[idx].data); + } + + BASALT_ASSERT_STREAM(rot_vel_mocap.size() == rot_vel_imu.size(), + "rot_vel_cam.size() " << rot_vel_mocap.size() + << " rot_vel_imu.size() " + << rot_vel_imu.size()); + + // R_i_c * rot_vel_mocap = rot_vel_imu + // R_i_c * rot_vel_mocap * rot_vel_mocap.T = rot_vel_imu * rot_vel_mocap.T + // R_i_c = rot_vel_imu * rot_vel_mocap.T * (rot_vel_mocap * + // rot_vel_mocap.T)^-1; + + Eigen::Matrix rot_vel_mocap_m( + 3, rot_vel_mocap.size()), + rot_vel_imu_m(3, rot_vel_imu.size()); + + for (size_t i = 0; i < rot_vel_mocap.size(); i++) { + rot_vel_mocap_m.col(i) = rot_vel_mocap[i]; + rot_vel_imu_m.col(i) = rot_vel_imu[i]; + } + + Eigen::Matrix3d R_i_mark = + rot_vel_imu_m * rot_vel_mocap_m.transpose() * + (rot_vel_mocap_m * rot_vel_mocap_m.transpose()).inverse(); + + // std::cout << "raw R_i_c0\n" << R_i_c0 << std::endl; + + Eigen::AngleAxisd aa(R_i_mark); // RotationMatrix to AxisAngle + R_i_mark = aa.toRotationMatrix(); + + Sophus::SE3d T_i_mark_new(R_i_mark, Eigen::Vector3d::Zero()); + calib_opt->mocap_calib->T_i_mark = + T_i_mark_new * calib_opt->mocap_calib->T_i_mark; + + std::cout << "Initialized T_i_mark:\n" + << calib_opt->mocap_calib->T_i_mark.matrix() << std::endl; + } + + // Initialize T_w_moc; + Sophus::SE3d T_w_moc; + + // TODO: check for failure cases.. + for (size_t i = vio_dataset->get_gt_timestamps().size() / 2; + i < vio_dataset->get_gt_timestamps().size(); i++) { + int64_t timestamp_ns = vio_dataset->get_gt_timestamps()[i]; + T_w_moc = calib_opt->getT_w_i(timestamp_ns) * + calib_opt->mocap_calib->T_i_mark * + vio_dataset->get_gt_pose_data()[i].inverse(); + + std::cout << "Initialized T_w_moc:\n" << T_w_moc.matrix() << std::endl; + break; + } + + calib_opt->setT_w_moc(T_w_moc); + calib_opt->mocap_initialized = true; + + recomputeDataLog(); +} + +void CamImuCalib::loadDataset() { + basalt::DatasetIoInterfacePtr dataset_io = + basalt::DatasetIoFactory::getDatasetIo(dataset_type); + + dataset_io->read(dataset_path); + + vio_dataset = dataset_io->get_data(); + setNumCameras(vio_dataset->get_num_cams()); + + if (skip_images > 1) { + std::vector new_image_timestamps; + for (size_t i = 0; i < vio_dataset->get_image_timestamps().size(); i++) { + if (i % skip_images == 0) + new_image_timestamps.push_back(vio_dataset->get_image_timestamps()[i]); + } + + vio_dataset->get_image_timestamps() = new_image_timestamps; + } + + // load detected corners if they exist + { + std::string path = + cache_path + cache_dataset_name + "_detected_corners.cereal"; + + std::ifstream is(path, std::ios::binary); + + if (is.good()) { + cereal::BinaryInputArchive archive(is); + + calib_corners.clear(); + calib_corners_rejected.clear(); + archive(calib_corners); + archive(calib_corners_rejected); + + std::cout << "Loaded detected corners from: " << path << std::endl; + } else { + std::cout << "No pre-processed detected corners found" << std::endl; + } + } + + // load initial poses if they exist + { + std::string path = cache_path + cache_dataset_name + "_init_poses.cereal"; + + std::ifstream is(path, std::ios::binary); + + if (is.good()) { + cereal::BinaryInputArchive archive(is); + + calib_init_poses.clear(); + archive(calib_init_poses); + + std::cout << "Loaded initial poses from: " << path << std::endl; + } else { + std::cout << "No pre-processed initial poses found" << std::endl; + } + } + + // load calibration if exist + { + if (!calib_opt) calib_opt.reset(new SplineOptimization<5, double>); + + calib_opt->loadCalib(cache_path); + + calib_opt->calib->accel_noise_std.setConstant(imu_noise[0]); + calib_opt->calib->gyro_noise_std.setConstant(imu_noise[1]); + calib_opt->calib->accel_bias_std.setConstant(imu_noise[2]); + calib_opt->calib->gyro_bias_std.setConstant(imu_noise[3]); + } + calib_opt->resetMocapCalib(); + + reprojected_corners.clear(); + + if (show_gui) { + show_frame = 0; + + show_frame.Meta().range[1] = vio_dataset->get_image_timestamps().size() - 1; + show_frame.Meta().gui_changed = true; + + plotter->ClearSeries(); + recomputeDataLog(); + drawPlots(); + } +} + +void CamImuCalib::optimize() { optimizeWithParam(true); } + +bool CamImuCalib::optimizeWithParam(bool print_info, + std::map *stats) { + if (!calib_opt.get() || !calib_opt->calibInitialized()) { + std::cerr << "Initalize optimization first!" << std::endl; + return true; + } + + bool converged = true; + + if (calib_opt) { + // calib_opt->compute_projections(); + double error; + double reprojection_error; + int num_points; + + auto start = std::chrono::high_resolution_clock::now(); + + converged = calib_opt->optimize(opt_intr, opt_poses, opt_corners, + opt_cam_time_offset, opt_imu_scale, + opt_mocap, huber_thresh, stop_thresh, error, + num_points, reprojection_error); + + auto finish = std::chrono::high_resolution_clock::now(); + + if (stats) { + stats->clear(); + + stats->emplace("energy_error", error); + stats->emplace("num_points", num_points); + stats->emplace("mean_energy_error", error / num_points); + stats->emplace("reprojection_error", reprojection_error); + stats->emplace("mean_reprojection_error", + reprojection_error / num_points); + } + + if (print_info) { + std::cout << "==================================" << std::endl; + + for (size_t i = 0; i < vio_dataset->get_num_cams(); i++) { + std::cout << "intrinsics " << i << ": " + << calib_opt->getIntrinsics(i).transpose() << std::endl; + std::cout << "T_i_c" << i << ":\n" + << calib_opt->getCamT_i_c(i).matrix() << std::endl; + } + + std::cout << "T_w_moc:\n" + << calib_opt->getT_w_moc().matrix() << std::endl; + + std::cout << "T_mark_i:\n" + << calib_opt->getT_mark_i().matrix() << std::endl; + + std::cout << "cam_time_offset_ns: " << calib_opt->getCamTimeOffsetNs() + << std::endl; + + std::cout << "mocap_time_offset_ns: " << calib_opt->getMocapTimeOffsetNs() + << std::endl; + { + Eigen::Vector3d accel_bias; + Eigen::Matrix3d accel_scale; + calib_opt->getAccelBias().getBiasAndScale(accel_bias, accel_scale); + + std::cout << "accel_bias: " << accel_bias.transpose() + << "\naccel_scale:\n" + << Eigen::Matrix3d::Identity() + accel_scale << std::endl; + + Eigen::Vector3d gyro_bias; + Eigen::Matrix3d gyro_scale; + calib_opt->getGyroBias().getBiasAndScale(gyro_bias, gyro_scale); + + std::cout << "gyro_bias: " << gyro_bias.transpose() << "\ngyro_scale:\n" + << Eigen::Matrix3d::Identity() + gyro_scale << std::endl; + } + + std::cout << " g " << calib_opt->getG().transpose() + << " norm: " << calib_opt->getG().norm() << " g_mocap: " + << (calib_opt->getT_w_moc().inverse().so3() * calib_opt->getG()) + .transpose() + << std::endl; + + std::cout << "Current error: " << error << " num_points " << num_points + << " mean_error " << error / num_points + << " reprojection_error " << reprojection_error + << " mean reprojection " << reprojection_error / num_points + << " opt_time " + << std::chrono::duration_cast( + finish - start) + .count() + << "ms." << std::endl; + + if (converged) std::cout << "Optimization Converged !!" << std::endl; + + std::cout << "==================================" << std::endl; + } + + // calib_opt->compute_error(error, num_points); + // std::cerr << "after opt error: " << error << " num_points " << + // num_points << std::endl; + + if (show_gui) { + computeProjections(); + recomputeDataLog(); + drawPlots(); + } + } + + return converged; +} + +void CamImuCalib::saveCalib() { + if (calib_opt) { + calib_opt->saveCalib(cache_path); + + std::cout << "Saved calibration in " << cache_path << "calibration.json" + << std::endl; + } +} + +void CamImuCalib::saveMocapCalib() { + if (calib_opt) { + calib_opt->saveMocapCalib(cache_path, + vio_dataset->get_mocap_to_imu_offset_ns()); + + std::cout << "Saved Mocap calibration in " << cache_path + << "mocap_calibration.json" << std::endl; + } +} + +void CamImuCalib::drawImageOverlay(pangolin::View &v, size_t cam_id) { + UNUSED(v); + + size_t frame_id = show_frame; + + if (vio_dataset && frame_id < vio_dataset->get_image_timestamps().size()) { + int64_t timestamp_ns = vio_dataset->get_image_timestamps()[frame_id]; + TimeCamId tcid(timestamp_ns, cam_id); + + if (show_corners) { + glLineWidth(1.0); + glColor3f(1.0, 0.0, 0.0); + glEnable(GL_BLEND); + glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA); + + if (calib_corners.find(tcid) != calib_corners.end()) { + const CalibCornerData &cr = calib_corners.at(tcid); + const CalibCornerData &cr_rej = calib_corners_rejected.at(tcid); + + for (size_t i = 0; i < cr.corners.size(); i++) { + // the radius is the threshold used for maximum displacement. The + // search region is slightly larger. + const float radius = static_cast(cr.radii[i]); + const Eigen::Vector2f c = cr.corners[i].cast(); + pangolin::glDrawCirclePerimeter(c[0], c[1], radius); + + if (show_ids) + pangolin::GlFont::I().Text("%d", cr.corner_ids[i]).Draw(c[0], c[1]); + } + + pangolin::GlFont::I() + .Text("Detected %d corners (%d rejected)", cr.corners.size(), + cr_rej.corners.size()) + .Draw(5, 50); + + if (show_corners_rejected) { + glColor3f(1.0, 0.5, 0.0); + + for (size_t i = 0; i < cr_rej.corners.size(); i++) { + // the radius is the threshold used for maximum displacement. The + // search region is slightly larger. + const float radius = static_cast(cr_rej.radii[i]); + const Eigen::Vector2f c = cr_rej.corners[i].cast(); + pangolin::glDrawCirclePerimeter(c[0], c[1], radius); + + if (show_ids) + pangolin::GlFont::I() + .Text("%d", cr_rej.corner_ids[i]) + .Draw(c[0], c[1]); + } + } + + } else { + glLineWidth(1.0); + + pangolin::GlFont::I().Text("Corners not processed").Draw(5, 50); + } + } + + if (show_init_reproj) { + glLineWidth(1.0); + glColor3f(1.0, 1.0, 0.0); + glEnable(GL_BLEND); + glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA); + + if (calib_init_poses.find(tcid) != calib_init_poses.end()) { + const CalibInitPoseData &cr = calib_init_poses.at(tcid); + + for (size_t i = 0; i < cr.reprojected_corners.size(); i++) { + Eigen::Vector2d c = cr.reprojected_corners[i]; + pangolin::glDrawCirclePerimeter(c[0], c[1], 3.0); + + if (show_ids) pangolin::GlFont::I().Text("%d", i).Draw(c[0], c[1]); + } + + pangolin::GlFont::I() + .Text("Initial pose with %d inliers", cr.num_inliers) + .Draw(5, 100); + + } else { + pangolin::GlFont::I().Text("Initial pose not processed").Draw(5, 100); + } + } + + if (show_opt) { + glLineWidth(1.0); + glColor3f(1.0, 0.0, 1.0); + glEnable(GL_BLEND); + glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA); + + if (reprojected_corners.find(tcid) != reprojected_corners.end()) { + if (calib_corners.count(tcid) > 0 && + calib_corners.at(tcid).corner_ids.size() >= MIN_CORNERS) { + const auto &rc = reprojected_corners.at(tcid); + + for (size_t i = 0; i < rc.corners_proj.size(); i++) { + if (!rc.corners_proj_success[i]) continue; + + Eigen::Vector2d c = rc.corners_proj[i]; + pangolin::glDrawCirclePerimeter(c[0], c[1], 3.0); + + if (show_ids) pangolin::GlFont::I().Text("%d", i).Draw(c[0], c[1]); + } + } else { + pangolin::GlFont::I().Text("Too few corners detected.").Draw(5, 150); + } + } + } + } +} + +void CamImuCalib::recomputeDataLog() { + imu_data_log.Clear(); + pose_data_log.Clear(); + mocap_data_log.Clear(); + + if (!vio_dataset || vio_dataset->get_accel_data().empty()) return; + + double min_time = vio_dataset->get_accel_data()[0].timestamp_ns * 1e-9; + + for (size_t i = 0; i < vio_dataset->get_accel_data().size(); i++) { + const basalt::AccelData &ad = vio_dataset->get_accel_data()[i]; + const basalt::GyroData &gd = vio_dataset->get_gyro_data()[i]; + + Eigen::Vector3d a_sp(0, 0, 0), g_sp(0, 0, 0); + + if (calib_opt && calib_opt->calibInitialized() && + calib_opt->initialized()) { + Sophus::SE3d pose_sp = calib_opt->getT_w_i(ad.timestamp_ns); + Eigen::Vector3d a_sp_w = calib_opt->getTransAccelWorld(ad.timestamp_ns); + + a_sp = calib_opt->getAccelBias().invertCalibration( + pose_sp.so3().inverse() * (a_sp_w + calib_opt->getG())); + + g_sp = calib_opt->getGyroBias().invertCalibration( + calib_opt->getRotVelBody(ad.timestamp_ns)); + } + + std::vector vals; + double t = ad.timestamp_ns * 1e-9 - min_time; + vals.push_back(t); + + for (int k = 0; k < 3; k++) vals.push_back(ad.data[k]); + for (int k = 0; k < 3; k++) vals.push_back(a_sp[k]); + for (int k = 0; k < 3; k++) vals.push_back(gd.data[k]); + for (int k = 0; k < 3; k++) vals.push_back(g_sp[k]); + + imu_data_log.Log(vals); + } + + for (size_t i = 0; i < vio_dataset->get_image_timestamps().size(); i++) { + int64_t timestamp_ns = vio_dataset->get_image_timestamps()[i]; + + TimeCamId tcid(timestamp_ns, 0); + const auto &it = calib_init_poses.find(tcid); + + double t = timestamp_ns * 1e-9 - min_time; + + Sophus::SE3d pose_sp, pose_meas; + if (calib_opt && calib_opt->initialized()) + pose_sp = calib_opt->getT_w_i(timestamp_ns); + + if (it != calib_init_poses.end() && calib_opt && + calib_opt->calibInitialized()) + pose_meas = it->second.T_a_c * calib_opt->getCamT_i_c(0).inverse(); + + Eigen::Vector3d p_sp = pose_sp.translation(); + Eigen::Vector3d p_meas = pose_meas.translation(); + + double angle = + pose_sp.unit_quaternion().angularDistance(pose_meas.unit_quaternion()) * + 180 / M_PI; + + pose_data_log.Log(t, p_meas[0], p_meas[1], p_meas[2], p_sp[0], p_sp[1], + p_sp[2], angle); + } + + for (size_t i = 0; i < vio_dataset->get_gt_timestamps().size(); i++) { + int64_t timestamp_ns = vio_dataset->get_gt_timestamps()[i]; + + if (calib_opt && calib_opt->calibInitialized()) + timestamp_ns += calib_opt->getMocapTimeOffsetNs(); + + double t = timestamp_ns * 1e-9 - min_time; + + Sophus::SE3d pose_sp, pose_mocap; + if (calib_opt && calib_opt->calibInitialized()) { + if (timestamp_ns < calib_opt->getMinTimeNs() || + timestamp_ns > calib_opt->getMaxTimeNs()) + continue; + + pose_sp = calib_opt->getT_w_i(timestamp_ns); + pose_mocap = calib_opt->getT_w_moc() * + vio_dataset->get_gt_pose_data()[i] * + calib_opt->getT_mark_i(); + } + + Eigen::Vector3d p_sp = pose_sp.translation(); + Eigen::Vector3d p_mocap = pose_mocap.translation(); + + double angle = pose_sp.unit_quaternion().angularDistance( + pose_mocap.unit_quaternion()) * + 180 / M_PI; + + Eigen::Vector3d rot_vel(0, 0, 0); + if (i > 0) { + double dt = (vio_dataset->get_gt_timestamps()[i] - + vio_dataset->get_gt_timestamps()[i - 1]) * + 1e-9; + + rot_vel = (vio_dataset->get_gt_pose_data()[i - 1].so3().inverse() * + vio_dataset->get_gt_pose_data()[i].so3()) + .log() / + dt; + } + + std::vector valsd = {t, p_mocap[0], p_mocap[1], p_mocap[2], + p_sp[0], p_sp[1], p_sp[2], angle, + rot_vel[0], rot_vel[1], rot_vel[2]}; + + std::vector vals(valsd.begin(), valsd.end()); + + mocap_data_log.Log(vals); + } +} + +void CamImuCalib::drawPlots() { + plotter->ClearSeries(); + plotter->ClearMarkers(); + + if (show_accel) { + if (show_data) { + plotter->AddSeries("$0", "$1", pangolin::DrawingModeDashed, + pangolin::Colour::Red(), "a x"); + plotter->AddSeries("$0", "$2", pangolin::DrawingModeDashed, + pangolin::Colour::Green(), "a y"); + plotter->AddSeries("$0", "$3", pangolin::DrawingModeDashed, + pangolin::Colour::Blue(), "a z"); + } + + if (show_spline) { + plotter->AddSeries("$0", "$4", pangolin::DrawingModeLine, + pangolin::Colour::Red(), "a x Spline"); + plotter->AddSeries("$0", "$5", pangolin::DrawingModeLine, + pangolin::Colour::Green(), "a y Spline"); + plotter->AddSeries("$0", "$6", pangolin::DrawingModeLine, + pangolin::Colour::Blue(), "a z Spline"); + } + } + + if (show_gyro) { + if (show_data) { + plotter->AddSeries("$0", "$7", pangolin::DrawingModeDashed, + pangolin::Colour::Red(), "g x"); + plotter->AddSeries("$0", "$8", pangolin::DrawingModeDashed, + pangolin::Colour::Green(), "g y"); + plotter->AddSeries("$0", "$9", pangolin::DrawingModeDashed, + pangolin::Colour::Blue(), "g z"); + } + + if (show_spline) { + plotter->AddSeries("$0", "$10", pangolin::DrawingModeLine, + pangolin::Colour::Red(), "g x Spline"); + plotter->AddSeries("$0", "$11", pangolin::DrawingModeLine, + pangolin::Colour::Green(), "g y Spline"); + plotter->AddSeries("$0", "$12", pangolin::DrawingModeLine, + pangolin::Colour::Blue(), "g z Spline"); + } + } + + if (show_pos) { + if (show_data) { + plotter->AddSeries("$0", "$1", pangolin::DrawingModeDashed, + pangolin::Colour::Red(), "p x", &pose_data_log); + plotter->AddSeries("$0", "$2", pangolin::DrawingModeDashed, + pangolin::Colour::Green(), "p y", &pose_data_log); + plotter->AddSeries("$0", "$3", pangolin::DrawingModeDashed, + pangolin::Colour::Blue(), "p z", &pose_data_log); + } + + if (show_spline) { + plotter->AddSeries("$0", "$4", pangolin::DrawingModeLine, + pangolin::Colour::Red(), "p x Spline", &pose_data_log); + plotter->AddSeries("$0", "$5", pangolin::DrawingModeLine, + pangolin::Colour::Green(), "p y Spline", + &pose_data_log); + plotter->AddSeries("$0", "$6", pangolin::DrawingModeLine, + pangolin::Colour::Blue(), "p z Spline", + &pose_data_log); + } + } + + if (show_rot_error) { + plotter->AddSeries("$0", "$7", pangolin::DrawingModeLine, + pangolin::Colour::White(), "rot error", &pose_data_log); + } + + if (show_mocap) { + if (show_data) { + plotter->AddSeries("$0", "$1", pangolin::DrawingModeDashed, + pangolin::Colour::Red(), "p x", &mocap_data_log); + plotter->AddSeries("$0", "$2", pangolin::DrawingModeDashed, + pangolin::Colour::Green(), "p y", &mocap_data_log); + plotter->AddSeries("$0", "$3", pangolin::DrawingModeDashed, + pangolin::Colour::Blue(), "p z", &mocap_data_log); + } + + if (show_spline) { + plotter->AddSeries("$0", "$4", pangolin::DrawingModeLine, + pangolin::Colour::Red(), "p x Spline", + &mocap_data_log); + plotter->AddSeries("$0", "$5", pangolin::DrawingModeLine, + pangolin::Colour::Green(), "p y Spline", + &mocap_data_log); + plotter->AddSeries("$0", "$6", pangolin::DrawingModeLine, + pangolin::Colour::Blue(), "p z Spline", + &mocap_data_log); + } + } + + if (show_mocap_rot_error) { + plotter->AddSeries("$0", "$7", pangolin::DrawingModeLine, + pangolin::Colour::White(), "rot error", &mocap_data_log); + } + + if (show_mocap_rot_vel) { + plotter->AddSeries("$0", "$8", pangolin::DrawingModeLine, + pangolin::Colour(1, 1, 0), "rot vel x", &mocap_data_log); + plotter->AddSeries("$0", "$9", pangolin::DrawingModeLine, + pangolin::Colour(1, 0, 1), "rot vel y", &mocap_data_log); + plotter->AddSeries("$0", "$10", pangolin::DrawingModeLine, + pangolin::Colour(0, 1, 1), "rot vel z", &mocap_data_log); + } + + size_t frame_id = show_frame; + double min_time = vio_dataset->get_accel_data().empty() + ? vio_dataset->get_image_timestamps()[0] * 1e-9 + : vio_dataset->get_accel_data()[0].timestamp_ns * 1e-9; + + int64_t timestamp = vio_dataset->get_image_timestamps()[frame_id]; + if (calib_opt && calib_opt->calibInitialized()) + timestamp += calib_opt->getCamTimeOffsetNs(); + + double t = timestamp * 1e-9 - min_time; + plotter->AddMarker(pangolin::Marker::Vertical, t, pangolin::Marker::Equal, + pangolin::Colour::White()); +} + +bool CamImuCalib::hasCorners() const { return !calib_corners.empty(); } + +} // namespace basalt diff --git a/src/calibration/vignette.cpp b/src/calibration/vignette.cpp new file mode 100644 index 0000000..08f821d --- /dev/null +++ b/src/calibration/vignette.cpp @@ -0,0 +1,312 @@ +/** +BSD 3-Clause License + +This file is part of the Basalt project. +https://gitlab.com/VladyslavUsenko/basalt.git + +Copyright (c) 2019, Vladyslav Usenko and Nikolaus Demmel. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#include + +#include + +namespace basalt { + +VignetteEstimator::VignetteEstimator( + const VioDatasetPtr &vio_dataset, + const Eigen::aligned_vector &optical_centers, + const Eigen::aligned_vector &resolutions, + const std::map> + &reprojected_vignette, + const AprilGrid &april_grid) + : vio_dataset(vio_dataset), + optical_centers(optical_centers), + resolutions(resolutions), + reprojected_vignette(reprojected_vignette), + april_grid(april_grid), + vign_param(vio_dataset->get_num_cams(), + RdSpline<1, SPLINE_N>(knot_spacing)) { + vign_size = 0; + + for (size_t i = 0; i < vio_dataset->get_num_cams(); i++) { + Eigen::Vector2d oc = optical_centers[i]; + + size_t new_size = oc.norm() * 1.1; + vign_size = std::max(vign_size, new_size); + } + + // std::cerr << vign_size << std::endl; + + for (size_t i = 0; i < vio_dataset->get_num_cams(); i++) { + while (vign_param[i].maxTimeNs() < + int64_t(vign_size) * int64_t(1e9 * 0.7)) { + vign_param[i].knotsPushBack(Eigen::Matrix(1)); + } + + while (vign_param[i].maxTimeNs() < int64_t(vign_size) * int64_t(1e9)) { + vign_param[i].knotsPushBack(Eigen::Matrix(0.01)); + } + } + + irradiance.resize(april_grid.aprilgrid_vignette_pos_3d.size()); + std::fill(irradiance.begin(), irradiance.end(), 1.0); +} + +void VignetteEstimator::compute_error( + std::map> *reprojected_vignette_error) { + // double error = 0; + // double mean_residual = 0; + double max_residual = 0; + int num_residuals = 0; + + TimeCamId tcid_max; + // int point_id = 0; + + if (reprojected_vignette_error) reprojected_vignette_error->clear(); + + for (const auto &kv : reprojected_vignette) { + const TimeCamId &tcid = kv.first; + const auto &points_2d_val = kv.second; + + Eigen::Vector2d oc = optical_centers[tcid.cam_id]; + + BASALT_ASSERT(points_2d_val.size() == + april_grid.aprilgrid_vignette_pos_3d.size()); + + std::vector ve(april_grid.aprilgrid_vignette_pos_3d.size()); + + for (size_t i = 0; i < points_2d_val.size(); i++) { + if (points_2d_val[i][2] >= 0) { + double val = points_2d_val[i][2]; + int64_t loc = + (points_2d_val[i].head<2>() - oc).norm() * 1e9; // in pixels * 1e9 + double e = + irradiance[i] * vign_param[tcid.cam_id].evaluate(loc)[0] - val; + ve[i] = e; + // error += e * e; + // mean_residual += std::abs(e); + max_residual = std::max(max_residual, std::abs(e)); + if (max_residual == std::abs(e)) { + tcid_max = tcid; + // point_id = i; + } + num_residuals++; + } + } + + if (reprojected_vignette_error) + reprojected_vignette_error->emplace(tcid, ve); + } + + // std::cerr << "error " << error << std::endl; + // std::cerr << "mean_residual " << mean_residual / num_residuals << + // std::endl; + // std::cerr << "max_residual " << max_residual << std::endl; + + // int frame_id = 0; + // for (size_t i = 0; i < vio_dataset->get_image_timestamps().size(); i++) { + // if (tcid_max.first == vio_dataset->get_image_timestamps()[i]) { + // frame_id = i; + // } + // } + + // std::cerr << "tcid_max " << frame_id << " " << tcid_max.second << " point + // id " + // << point_id << std::endl + // << std::endl; +} + +void VignetteEstimator::opt_irradience() { + std::vector new_irradiance(irradiance.size(), 0); + std::vector new_irradiance_count(irradiance.size(), 0); + + for (const auto &kv : reprojected_vignette) { + const TimeCamId &tcid = kv.first; + const auto &points_2d_val = kv.second; + + Eigen::Vector2d oc = optical_centers[tcid.cam_id]; + + BASALT_ASSERT(points_2d_val.size() == + april_grid.aprilgrid_vignette_pos_3d.size()); + + for (size_t i = 0; i < points_2d_val.size(); i++) { + if (points_2d_val[i][2] >= 0) { + double val = points_2d_val[i][2]; + int64_t loc = + (points_2d_val[i].head<2>() - oc).norm() * 1e9; // in pixels * 1e9 + + new_irradiance[i] += val / vign_param[tcid.cam_id].evaluate(loc)[0]; + new_irradiance_count[i] += 1; + } + } + } + + for (size_t i = 0; i < irradiance.size(); i++) { + if (new_irradiance_count[i] > 0) + irradiance[i] = new_irradiance[i] / new_irradiance_count[i]; + } +} + +void VignetteEstimator::opt_vign() { + size_t num_knots = vign_param[0].getKnots().size(); + + std::vector> new_vign_param( + vio_dataset->get_num_cams(), std::vector(num_knots, 0)); + std::vector> new_vign_param_count( + vio_dataset->get_num_cams(), std::vector(num_knots, 0)); + + for (const auto &kv : reprojected_vignette) { + const TimeCamId &tcid = kv.first; + const auto &points_2d_val = kv.second; + + // Sophus::SE3d T_w_cam = + // calib_opt->getT_w_i(tcid.first) * + // calib_opt->getCamT_i_c(tcid.second); + // Eigen::Vector3d opt_axis_w = T_w_cam.so3() * + // Eigen::Vector3d::UnitZ(); + // if (-opt_axis_w[2] < angle_threshold) continue; + + Eigen::Vector2d oc = optical_centers[tcid.cam_id]; + + BASALT_ASSERT(points_2d_val.size() == + april_grid.aprilgrid_vignette_pos_3d.size()); + + for (size_t i = 0; i < points_2d_val.size(); i++) { + if (points_2d_val[i][2] >= 0) { + double val = points_2d_val[i][2]; + int64_t loc = (points_2d_val[i].head<2>() - oc).norm() * 1e9; + + RdSpline<1, SPLINE_N>::JacobianStruct J; + vign_param[tcid.cam_id].evaluate(loc, &J); + + for (size_t k = 0; k < J.d_val_d_knot.size(); k++) { + new_vign_param[tcid.cam_id][J.start_idx + k] += + J.d_val_d_knot[k] * val / irradiance[i]; + new_vign_param_count[tcid.cam_id][J.start_idx + k] += + J.d_val_d_knot[k]; + } + } + } + } + + double max_val = 0; + for (size_t j = 0; j < vio_dataset->get_num_cams(); j++) + for (size_t i = 0; i < num_knots; i++) { + if (new_vign_param_count[j][i] > 0) { + // std::cerr << "update " << i << " " << j << std::endl; + double val = new_vign_param[j][i] / new_vign_param_count[j][i]; + max_val = std::max(max_val, val); + vign_param[j].getKnot(i)[0] = val; + } + } + + // normalize vignette + double max_val_inv = 1.0 / max_val; + for (size_t j = 0; j < vio_dataset->get_num_cams(); j++) + for (size_t i = 0; i < num_knots; i++) { + if (new_vign_param_count[j][i] > 0) { + vign_param[j].getKnot(i)[0] *= max_val_inv; + } + } +} + +void VignetteEstimator::optimize() { + compute_error(); + for (int i = 0; i < 10; i++) { + opt_irradience(); + compute_error(); + opt_vign(); + compute_error(); + } +} + +void VignetteEstimator::compute_data_log( + std::vector> &vign_data_log) { + std::vector> num_proj_points( + vio_dataset->get_num_cams(), std::vector(vign_size, 0)); + + for (const auto &kv : reprojected_vignette) { + const TimeCamId &tcid = kv.first; + const auto &points_2d = kv.second; + + Eigen::Vector2d oc = optical_centers[tcid.cam_id]; + + BASALT_ASSERT(points_2d.size() == + april_grid.aprilgrid_vignette_pos_3d.size()); + + for (size_t i = 0; i < points_2d.size(); i++) { + if (points_2d[i][2] >= 0) { + size_t loc = (points_2d[i].head<2>() - oc).norm(); + + if (loc < vign_size) num_proj_points[tcid.cam_id][loc] += 1.; + } + } + } + + vign_data_log.clear(); + for (size_t i = 0; i < vign_size; i++) { + std::vector log_data; + for (size_t j = 0; j < vio_dataset->get_num_cams(); j++) { + int64_t loc = i * 1e9; + log_data.push_back(vign_param[j].evaluate(loc)[0]); + log_data.push_back(num_proj_points[j][i]); + } + vign_data_log.push_back(log_data); + } +} + +void VignetteEstimator::save_vign_png(const std::string &path) { + for (size_t k = 0; k < vio_dataset->get_num_cams(); k++) { + ManagedImage vign_img(resolutions[k][0], resolutions[k][1]); + vign_img.Fill(0); + + Eigen::Vector2d oc = optical_centers[k]; + + for (size_t x = 0; x < vign_img.w; x++) { + for (size_t y = 0; y < vign_img.h; y++) { + int64_t loc = (Eigen::Vector2d(x, y) - oc).norm() * 1e9; + double val = vign_param[k].evaluate(loc)[0]; + if (val < 0.5) continue; + uint16_t val_int = + val >= 1.0 ? std::numeric_limits::max() + : uint16_t(val * std::numeric_limits::max()); + vign_img(x, y) = val_int; + } + } + + // pangolin::SaveImage(vign_img.UnsafeReinterpret(), + // pangolin::PixelFormatFromString("GRAY16LE"), + // path + "/vingette_" + std::to_string(k) + ".png"); + + cv::Mat img(vign_img.h, vign_img.w, CV_16U, vign_img.ptr); + cv::imwrite(path + "/vingette_" + std::to_string(k) + ".png", img); + } +} +} // namespace basalt diff --git a/src/device/rs_t265.cpp b/src/device/rs_t265.cpp new file mode 100644 index 0000000..462da01 --- /dev/null +++ b/src/device/rs_t265.cpp @@ -0,0 +1,371 @@ +/** +BSD 3-Clause License + +This file is part of the Basalt project. +https://gitlab.com/VladyslavUsenko/basalt.git + +Copyright (c) 2019, Vladyslav Usenko, Michael Loipführer and Nikolaus Demmel. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#include + +std::string get_date(); + +namespace basalt { + +RsT265Device::RsT265Device(bool manual_exposure, int skip_frames, + int webp_quality, double exposure_value) + : manual_exposure(manual_exposure), + skip_frames(skip_frames), + webp_quality(webp_quality) { + rs2::log_to_console(RS2_LOG_SEVERITY_ERROR); + pipe = rs2::pipeline(context); + + config.enable_stream(RS2_STREAM_ACCEL, RS2_FORMAT_MOTION_XYZ32F); + config.enable_stream(RS2_STREAM_GYRO, RS2_FORMAT_MOTION_XYZ32F); + config.enable_stream(RS2_STREAM_FISHEYE, 1, RS2_FORMAT_Y8); + config.enable_stream(RS2_STREAM_FISHEYE, 2, RS2_FORMAT_Y8); + if (!manual_exposure) { + config.enable_stream(RS2_STREAM_POSE, RS2_FORMAT_6DOF); + } + + if (context.query_devices().size() == 0) { + std::cout << "Waiting for device to be connected" << std::endl; + rs2::device_hub hub(context); + hub.wait_for_device(); + } + + for (auto& s : context.query_devices()[0].query_sensors()) { + std::cout << "Sensor " << s.get_info(RS2_CAMERA_INFO_NAME) + << ". Supported options:" << std::endl; + + for (const auto& o : s.get_supported_options()) { + std::cout << "\t" << rs2_option_to_string(o) << std::endl; + } + } + + auto device = context.query_devices()[0]; + device.hardware_reset(); + + std::cout << "Device " << device.get_info(RS2_CAMERA_INFO_NAME) + << " connected" << std::endl; + sensor = device.query_sensors()[0]; + + if (manual_exposure) { + std::cout << "Enabling manual exposure control" << std::endl; + sensor.set_option(rs2_option::RS2_OPTION_ENABLE_AUTO_EXPOSURE, 0); + sensor.set_option(rs2_option::RS2_OPTION_EXPOSURE, exposure_value * 1000); + } +} + +void RsT265Device::start() { + auto callback = [&](const rs2::frame& frame) { + exportCalibration(); + + if (auto fp = frame.as()) { + auto motion = frame.as(); + + if (motion && motion.get_profile().stream_type() == RS2_STREAM_GYRO && + motion.get_profile().format() == RS2_FORMAT_MOTION_XYZ32F) { + RsIMUData d; + d.timestamp = motion.get_timestamp(); + d.data << motion.get_motion_data().x, motion.get_motion_data().y, + motion.get_motion_data().z; + + gyro_data_queue.emplace_back(d); + } else if (motion && + motion.get_profile().stream_type() == RS2_STREAM_ACCEL && + motion.get_profile().format() == RS2_FORMAT_MOTION_XYZ32F) { + RsIMUData d; + d.timestamp = motion.get_timestamp(); + d.data << motion.get_motion_data().x, motion.get_motion_data().y, + motion.get_motion_data().z; + + if (!prev_accel_data.get()) { + prev_accel_data.reset(new RsIMUData(d)); + } else { + BASALT_ASSERT(d.timestamp > prev_accel_data->timestamp); + + while (!gyro_data_queue.empty() && gyro_data_queue.front().timestamp < + prev_accel_data->timestamp) { + std::cout << "Skipping gyro data. Timestamp before the first accel " + "measurement."; + gyro_data_queue.pop_front(); + } + + while (!gyro_data_queue.empty() && + gyro_data_queue.front().timestamp < d.timestamp) { + RsIMUData gyro_data = gyro_data_queue.front(); + gyro_data_queue.pop_front(); + + double w0 = (d.timestamp - gyro_data.timestamp) / + (d.timestamp - prev_accel_data->timestamp); + + double w1 = (gyro_data.timestamp - prev_accel_data->timestamp) / + (d.timestamp - prev_accel_data->timestamp); + + Eigen::Vector3d accel_interpolated = + w0 * prev_accel_data->data + w1 * d.data; + + basalt::ImuData::Ptr data; + data.reset(new basalt::ImuData); + data->t_ns = gyro_data.timestamp * 1e6; + data->accel = accel_interpolated; + data->gyro = gyro_data.data; + + if (imu_data_queue) imu_data_queue->push(data); + } + + prev_accel_data.reset(new RsIMUData(d)); + } + } + } else if (auto fs = frame.as()) { + BASALT_ASSERT(fs.size() == NUM_CAMS); + + std::vector vfs; + for (int i = 0; i < NUM_CAMS; ++i) { + rs2::video_frame vf = fs[i].as(); + if (!vf) { + std::cout << "Weird Frame, skipping" << std::endl; + return; + } + vfs.push_back(vf); + } + + // Callback is called for every new image, so in every other call, the + // left frame is updated but the right frame is still from the previous + // timestamp. So we only process framesets where both images are valid and + // have the same timestamp. + for (int i = 1; i < NUM_CAMS; ++i) { + if (vfs[0].get_timestamp() != vfs[i].get_timestamp()) { + return; + } + } + + // skip frames if configured + if (frame_counter++ % skip_frames != 0) { + return; + } + + OpticalFlowInput::Ptr data(new OpticalFlowInput); + data->img_data.resize(NUM_CAMS); + + // std::cout << "Reading frame " << frame_counter << std::endl; + + for (int i = 0; i < NUM_CAMS; i++) { + const auto& vf = vfs[i]; + + int64_t t_ns = vf.get_timestamp() * 1e6; + + // at this stage both image timestamps are expected to be equal + BASALT_ASSERT(i == 0 || t_ns == data->t_ns); + + data->t_ns = t_ns; + + data->img_data[i].exposure = + vf.get_frame_metadata(RS2_FRAME_METADATA_ACTUAL_EXPOSURE) * 1e-6; + + data->img_data[i].img.reset(new basalt::ManagedImage( + vf.get_width(), vf.get_height())); + + const uint8_t* data_in = (const uint8_t*)vf.get_data(); + uint16_t* data_out = data->img_data[i].img->ptr; + + size_t full_size = vf.get_width() * vf.get_height(); + for (size_t j = 0; j < full_size; j++) { + int val = data_in[j]; + val = val << 8; + data_out[j] = val; + } + + // std::cout << "Timestamp / exposure " << i << ": " << + // data->t_ns << " / " + // << int(data->img_data[i].exposure * 1e3) << "ms" << + // std::endl; + } + + last_img_data = data; + if (image_data_queue) image_data_queue->push(data); + + } else if (auto pf = frame.as()) { + auto data = pf.get_pose_data(); + + RsPoseData pdata; + pdata.t_ns = pf.get_timestamp() * 1e6; + + Eigen::Vector3d trans(data.translation.x, data.translation.y, + data.translation.z); + Eigen::Quaterniond quat(data.rotation.w, data.rotation.x, data.rotation.y, + data.rotation.z); + + pdata.data = Sophus::SE3d(quat, trans); + + if (pose_data_queue) pose_data_queue->push(pdata); + } + }; + + profile = pipe.start(config, callback); +} + +void RsT265Device::stop() { + if (image_data_queue) image_data_queue->push(nullptr); + if (imu_data_queue) imu_data_queue->push(nullptr); +} + +bool RsT265Device::setExposure(double exposure) { + if (!manual_exposure) return false; + + sensor.set_option(rs2_option::RS2_OPTION_EXPOSURE, exposure * 1000); + return true; +} + +void RsT265Device::setSkipFrames(int skip) { skip_frames = skip; } + +void RsT265Device::setWebpQuality(int quality) { webp_quality = quality; } + +std::shared_ptr> RsT265Device::exportCalibration() { + using Scalar = double; + + if (calib.get()) return calib; + + calib.reset(new basalt::Calibration); + calib->imu_update_rate = IMU_RATE; + + auto accel_stream = profile.get_stream(RS2_STREAM_ACCEL); + auto gyro_stream = profile.get_stream(RS2_STREAM_GYRO); + auto cam0_stream = profile.get_stream(RS2_STREAM_FISHEYE, 1); + auto cam1_stream = profile.get_stream(RS2_STREAM_FISHEYE, 2); + + // get gyro extrinsics + if (auto gyro = gyro_stream.as()) { + rs2_motion_device_intrinsic intrinsics = gyro.get_motion_intrinsics(); + + Eigen::Matrix gyro_bias_full; + gyro_bias_full << intrinsics.data[0][3], intrinsics.data[1][3], + intrinsics.data[2][3], intrinsics.data[0][0] - 1.0, + intrinsics.data[1][0], intrinsics.data[2][0], intrinsics.data[0][1], + intrinsics.data[1][1] - 1.0, intrinsics.data[2][1], + intrinsics.data[0][2], intrinsics.data[1][2], + intrinsics.data[2][2] - 1.0; + basalt::CalibGyroBias gyro_bias; + gyro_bias.getParam() = gyro_bias_full; + calib->calib_gyro_bias = gyro_bias; + + // std::cout << "Gyro Bias\n" << gyro_bias_full << std::endl; + + calib->gyro_noise_std = Eigen::Vector3d(intrinsics.noise_variances[0], + intrinsics.noise_variances[1], + intrinsics.noise_variances[2]) + .cwiseSqrt(); + + calib->gyro_bias_std = Eigen::Vector3d(intrinsics.bias_variances[0], + intrinsics.bias_variances[1], + intrinsics.bias_variances[2]) + .cwiseSqrt(); + + // std::cout << "Gyro noise var: " << intrinsics.noise_variances[0] + // << " bias var: " << intrinsics.bias_variances[0] << std::endl; + } else { + std::abort(); + } + + // get accel extrinsics + if (auto accel = accel_stream.as()) { + rs2_motion_device_intrinsic intrinsics = accel.get_motion_intrinsics(); + Eigen::Matrix accel_bias_full; + accel_bias_full << intrinsics.data[0][3], intrinsics.data[1][3], + intrinsics.data[2][3], intrinsics.data[0][0] - 1.0, + intrinsics.data[1][0], intrinsics.data[2][0], + intrinsics.data[1][1] - 1.0, intrinsics.data[2][1], + intrinsics.data[2][2] - 1.0; + basalt::CalibAccelBias accel_bias; + accel_bias.getParam() = accel_bias_full; + calib->calib_accel_bias = accel_bias; + + // std::cout << "Gyro Bias\n" << accel_bias_full << std::endl; + + calib->accel_noise_std = Eigen::Vector3d(intrinsics.noise_variances[0], + intrinsics.noise_variances[1], + intrinsics.noise_variances[2]) + .cwiseSqrt(); + + calib->accel_bias_std = Eigen::Vector3d(intrinsics.bias_variances[0], + intrinsics.bias_variances[1], + intrinsics.bias_variances[2]) + .cwiseSqrt(); + + // std::cout << "Accel noise var: " << intrinsics.noise_variances[0] + // << " bias var: " << intrinsics.bias_variances[0] << std::endl; + } else { + std::abort(); + } + + // get camera ex-/intrinsics + for (const auto& cam_stream : {cam0_stream, cam1_stream}) { + if (auto cam = cam_stream.as()) { + // extrinsics + rs2_extrinsics ex = cam.get_extrinsics_to(gyro_stream); + Eigen::Matrix3f rot = Eigen::Map(ex.rotation); + Eigen::Vector3f trans = Eigen::Map(ex.translation); + + Eigen::Quaterniond q(rot.cast()); + basalt::Calibration::SE3 T_i_c(q, trans.cast()); + + // std::cout << "T_i_c\n" << T_i_c.matrix() << std::endl; + + calib->T_i_c.push_back(T_i_c); + + // get resolution + Eigen::Vector2i resolution; + resolution << cam.width(), cam.height(); + calib->resolution.push_back(resolution); + + // intrinsics + rs2_intrinsics intrinsics = cam.get_intrinsics(); + basalt::KannalaBrandtCamera4::VecN params; + params << intrinsics.fx, intrinsics.fy, intrinsics.ppx, intrinsics.ppy, + intrinsics.coeffs[0], intrinsics.coeffs[1], intrinsics.coeffs[2], + intrinsics.coeffs[3]; + + // std::cout << "Camera intrinsics: " << params.transpose() << std::endl; + + basalt::GenericCamera camera; + basalt::KannalaBrandtCamera4 kannala_brandt(params); + camera.variant = kannala_brandt; + + calib->intrinsics.push_back(camera); + } else { + std::abort(); + } + } + + return calib; +} + +} // namespace basalt diff --git a/src/io/dataset_io.cpp b/src/io/dataset_io.cpp new file mode 100644 index 0000000..31a3cca --- /dev/null +++ b/src/io/dataset_io.cpp @@ -0,0 +1,62 @@ +/** +BSD 3-Clause License + +This file is part of the Basalt project. +https://gitlab.com/VladyslavUsenko/basalt.git + +Copyright (c) 2019, Vladyslav Usenko and Nikolaus Demmel. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#include +#include +#include +#include +#include + +namespace basalt { + +DatasetIoInterfacePtr DatasetIoFactory::getDatasetIo( + const std::string &dataset_type, bool load_mocap_as_gt) { + if (dataset_type == "euroc") { + // return DatasetIoInterfacePtr(); + return DatasetIoInterfacePtr(new EurocIO(load_mocap_as_gt)); + } else if (dataset_type == "bag") { + return DatasetIoInterfacePtr(new RosbagIO); + } else if (dataset_type == "uzh") { + return DatasetIoInterfacePtr(new UzhIO); + } else if (dataset_type == "kitti") { + return DatasetIoInterfacePtr(new KittiIO); + } else { + std::cerr << "Dataset type " << dataset_type << " is not supported" + << std::endl; + std::abort(); + } +} + +} // namespace basalt diff --git a/src/io/marg_data_io.cpp b/src/io/marg_data_io.cpp new file mode 100644 index 0000000..6a7eef2 --- /dev/null +++ b/src/io/marg_data_io.cpp @@ -0,0 +1,226 @@ +/** +BSD 3-Clause License + +This file is part of the Basalt project. +https://gitlab.com/VladyslavUsenko/basalt.git + +Copyright (c) 2019, Vladyslav Usenko and Nikolaus Demmel. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#include + +#include +#include + +namespace basalt { + +MargDataSaver::MargDataSaver(const std::string& path) { + fs::remove_all(path); + fs::create_directory(path); + + save_image_queue.set_capacity(300); + + std::string img_path = path + "/images/"; + fs::create_directory(img_path); + + in_marg_queue.set_capacity(1000); + + auto save_func = [&, path]() { + basalt::MargData::Ptr data; + + std::unordered_set processed_opt_flow; + + while (true) { + in_marg_queue.pop(data); + + if (data.get()) { + int64_t kf_id = *data->kfs_to_marg.begin(); + + std::string p = path + "/" + std::to_string(kf_id) + ".cereal"; + std::ofstream os(p, std::ios::binary); + + { + cereal::BinaryOutputArchive archive(os); + archive(*data); + } + os.close(); + + for (const auto& d : data->opt_flow_res) { + if (processed_opt_flow.count(d->t_ns) == 0) { + save_image_queue.push(d); + processed_opt_flow.emplace(d->t_ns); + } + } + + } else { + save_image_queue.push(nullptr); + break; + } + } + + std::cout << "Finished MargDataSaver" << std::endl; + }; + + auto save_image_func = [&, img_path]() { + basalt::OpticalFlowResult::Ptr data; + + while (true) { + save_image_queue.pop(data); + + if (data.get()) { + std::string p = img_path + "/" + std::to_string(data->t_ns) + ".cereal"; + std::ofstream os(p, std::ios::binary); + + { + cereal::BinaryOutputArchive archive(os); + archive(data); + } + os.close(); + } else { + break; + } + } + + std::cout << "Finished image MargDataSaver" << std::endl; + }; + + saving_thread.reset(new std::thread(save_func)); + saving_img_thread.reset(new std::thread(save_image_func)); +} // namespace basalt + +MargDataLoader::MargDataLoader() : out_marg_queue(nullptr) {} + +void MargDataLoader::start(const std::string& path) { + if (!fs::exists(path)) + std::cerr << "No marg. data found in " << path << std::endl; + + auto func = [&, path]() { + std::string img_path = path + "/images/"; + + std::unordered_set saved_images; + + std::map opt_flow_res; + + for (const auto& entry : fs::directory_iterator(img_path)) { + OpticalFlowResult::Ptr data; + // std::cout << entry.path() << std::endl; + std::ifstream is(entry.path(), std::ios::binary); + { + cereal::BinaryInputArchive archive(is); + archive(data); + } + is.close(); + opt_flow_res[data->t_ns] = data; + } + + std::map filenames; + + for (auto& p : fs::directory_iterator(path)) { + std::string filename = p.path().filename(); + if (!std::isdigit(filename[0])) continue; + + size_t lastindex = filename.find_last_of("."); + std::string rawname = filename.substr(0, lastindex); + + int64_t t_ns = std::stol(rawname); + + filenames.emplace(t_ns, filename); + } + + for (const auto& kv : filenames) { + basalt::MargData::Ptr data(new basalt::MargData); + + std::string p = path + "/" + kv.second; + std::ifstream is(p, std::ios::binary); + + { + cereal::BinaryInputArchive archive(is); + archive(*data); + } + is.close(); + + for (const auto& d : data->kfs_all) { + data->opt_flow_res.emplace_back(opt_flow_res.at(d)); + } + + out_marg_queue->push(data); + } + + out_marg_queue->push(nullptr); + + std::cout << "Finished MargDataLoader" << std::endl; + }; + + processing_thread.reset(new std::thread(func)); +} +} // namespace basalt + +namespace cereal { + +template +void save(Archive& ar, const basalt::ManagedImage& m) { + ar(m.w); + ar(m.h); + ar(cereal::binary_data(m.ptr, sizeof(T) * m.w * m.h)); +} + +template +void load(Archive& ar, basalt::ManagedImage& m) { + size_t w; + size_t h; + ar(w); + ar(h); + m.Reinitialise(w, h); + ar(cereal::binary_data(m.ptr, sizeof(T) * m.w * m.h)); +} + +template +void serialize(Archive& ar, basalt::OpticalFlowResult& m) { + ar(m.t_ns); + ar(m.observations); + ar(m.input_images); +} + +template +void serialize(Archive& ar, basalt::OpticalFlowInput& m) { + ar(m.t_ns); + ar(m.img_data); +} + +template +void serialize(Archive& ar, basalt::ImageData& m) { + ar(m.exposure); + ar(m.img); +} + +template +static void serialize(Archive& ar, Eigen::AffineCompact2f& m) { + ar(m.matrix()); +} +} // namespace cereal diff --git a/src/kitti_eval.cpp b/src/kitti_eval.cpp new file mode 100644 index 0000000..49c700a --- /dev/null +++ b/src/kitti_eval.cpp @@ -0,0 +1,215 @@ +/** +BSD 3-Clause License + +This file is part of the Basalt project. +https://gitlab.com/VladyslavUsenko/basalt.git + +Copyright (c) 2019, Vladyslav Usenko and Nikolaus Demmel. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#include + +#include + +#include +#include + +namespace cereal { + +template +inline void save(Archive& ar, std::map const& map) { + for (const auto& i : map) ar(cereal::make_nvp(i.first, i.second)); +} + +template +inline void load(Archive& ar, std::map& map) { + map.clear(); + + auto hint = map.begin(); + while (true) { + const auto namePtr = ar.getNodeName(); + + if (!namePtr) break; + + std::string key = namePtr; + T value; + ar(value); + hint = map.emplace_hint(hint, std::move(key), std::move(value)); + } +} + +} // namespace cereal + +Eigen::aligned_vector load_poses(const std::string& path) { + Eigen::aligned_vector res; + + std::ifstream f(path); + std::string line; + while (std::getline(f, line)) { + if (line[0] == '#') continue; + + std::stringstream ss(line); + + Eigen::Matrix3d rot; + Eigen::Vector3d pos; + + ss >> rot(0, 0) >> rot(0, 1) >> rot(0, 2) >> pos[0] >> rot(1, 0) >> + rot(1, 1) >> rot(1, 2) >> pos[1] >> rot(2, 0) >> rot(2, 1) >> + rot(2, 2) >> pos[2]; + + res.emplace_back(Eigen::Quaterniond(rot), pos); + } + + return res; +} + +void eval_kitti(const std::vector& lengths, + const Eigen::aligned_vector& poses_gt, + const Eigen::aligned_vector& poses_result, + std::map>& res) { + auto lastFrameFromSegmentLength = [](std::vector& dist, + int first_frame, float len) { + for (int i = first_frame; i < (int)dist.size(); i++) + if (dist[i] > dist[first_frame] + len) return i; + return -1; + }; + + std::cout << "poses_gt.size() " << poses_gt.size() << std::endl; + std::cout << "poses_result.size() " << poses_result.size() << std::endl; + + // pre-compute distances (from ground truth as reference) + std::vector dist_gt; + dist_gt.emplace_back(0); + for (size_t i = 1; i < poses_gt.size(); i++) { + const auto& p1 = poses_gt[i - 1]; + const auto& p2 = poses_gt[i]; + + dist_gt.emplace_back(dist_gt.back() + + (p2.translation() - p1.translation()).norm()); + } + + const size_t step_size = 10; + + for (size_t i = 0; i < lengths.size(); i++) { + // current length + float len = lengths[i]; + + double t_error_sum = 0; + double r_error_sum = 0; + int num_meas = 0; + + for (size_t first_frame = 0; first_frame < poses_gt.size(); + first_frame += step_size) { + // for all segment lengths do + + // compute last frame + int32_t last_frame = + lastFrameFromSegmentLength(dist_gt, first_frame, len); + + // continue, if sequence not long enough + if (last_frame == -1) continue; + + // compute rotational and translational errors + Sophus::SE3d pose_delta_gt = + poses_gt[first_frame].inverse() * poses_gt[last_frame]; + Sophus::SE3d pose_delta_result = + poses_result[first_frame].inverse() * poses_result[last_frame]; + // Sophus::SE3d pose_error = pose_delta_result.inverse() * pose_delta_gt; + double r_err = pose_delta_result.unit_quaternion().angularDistance( + pose_delta_gt.unit_quaternion()) * + 180.0 / M_PI; + double t_err = + (pose_delta_result.translation() - pose_delta_gt.translation()) + .norm(); + + t_error_sum += t_err / len; + r_error_sum += r_err / len; + num_meas++; + } + + std::string len_str = std::to_string((int)len); + res[len_str]["trans_error"] = 100.0 * t_error_sum / num_meas; + res[len_str]["rot_error"] = r_error_sum / num_meas; + res[len_str]["num_meas"] = num_meas; + } +} + +int main(int argc, char** argv) { + std::vector lengths = {100, 200, 300, 400, 500, 600, 700, 800}; + std::string result_path; + std::string traj_path; + std::string gt_path; + + CLI::App app{"KITTI evaluation"}; + + app.add_option("--traj-path", traj_path, + "Path to the file with computed trajectory.") + ->required(); + app.add_option("--gt-path", gt_path, + "Path to the file with ground truth trajectory.") + ->required(); + app.add_option("--result-path", result_path, "Path to store the result file.") + ->required(); + + app.add_option("--eval-lengths", lengths, "Trajectory length to evaluate."); + + try { + app.parse(argc, argv); + } catch (const CLI::ParseError& e) { + return app.exit(e); + } + + const Eigen::aligned_vector poses_gt = load_poses(gt_path); + const Eigen::aligned_vector poses_result = + load_poses(traj_path); + + if (poses_gt.empty() || poses_gt.size() != poses_result.size()) { + std::cerr << "Wrong number of poses: poses_gt " << poses_gt.size() + << " poses_result " << poses_result.size() << std::endl; + std::abort(); + } + + std::map> res_map; + eval_kitti(lengths, poses_gt, poses_result, res_map); + + { + cereal::JSONOutputArchive ar(std::cout); + ar(cereal::make_nvp("results", res_map)); + std::cout << std::endl; + } + + if (!result_path.empty()) { + std::ofstream os(result_path); + { + cereal::JSONOutputArchive ar(os); + ar(cereal::make_nvp("results", res_map)); + } + os.close(); + } +} diff --git a/src/linearization/landmark_block.cpp b/src/linearization/landmark_block.cpp new file mode 100644 index 0000000..19eb35d --- /dev/null +++ b/src/linearization/landmark_block.cpp @@ -0,0 +1,63 @@ +/** +BSD 3-Clause License + +This file is part of the Basalt project. +https://gitlab.com/VladyslavUsenko/basalt.git + +Copyright (c) 2021, Vladyslav Usenko and Nikolaus Demmel. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#include +#include + +namespace basalt { + +template +template +std::unique_ptr> +LandmarkBlock::createLandmarkBlock() { + return std::make_unique>(); +} + +// ////////////////////////////////////////////////////////////////// +// instatiate factory templates + +#ifdef BASALT_INSTANTIATIONS_DOUBLE +// Scalar=double, POSE_SIZE=6 +template std::unique_ptr> +LandmarkBlock::createLandmarkBlock<6>(); +#endif + +#ifdef BASALT_INSTANTIATIONS_FLOAT +// Scalar=float, POSE_SIZE=6 +template std::unique_ptr> +LandmarkBlock::createLandmarkBlock<6>(); +#endif + +} // namespace basalt diff --git a/src/linearization/linearization_abs_qr.cpp b/src/linearization/linearization_abs_qr.cpp new file mode 100644 index 0000000..e41bcea --- /dev/null +++ b/src/linearization/linearization_abs_qr.cpp @@ -0,0 +1,711 @@ +/** +BSD 3-Clause License + +This file is part of the Basalt project. +https://gitlab.com/VladyslavUsenko/basalt.git + +Copyright (c) 2021, Vladyslav Usenko and Nikolaus Demmel. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#include + +#include +#include +#include + +#include +#include +#include + +namespace basalt { + +template +LinearizationAbsQR::LinearizationAbsQR( + BundleAdjustmentBase* estimator, const AbsOrderMap& aom, + const Options& options, const MargLinData* marg_lin_data, + const ImuLinData* imu_lin_data, + const std::set* used_frames, + const std::unordered_set* lost_landmarks, + int64_t last_state_to_marg) + : options_(options), + estimator(estimator), + lmdb_(estimator->lmdb), + frame_poses(estimator->frame_poses), + calib(estimator->calib), + aom(aom), + used_frames(used_frames), + marg_lin_data(marg_lin_data), + imu_lin_data(imu_lin_data), + pose_damping_diagonal(0), + pose_damping_diagonal_sqrt(0) { + UNUSED(last_state_to_marg); + + BASALT_ASSERT_STREAM( + options.lb_options.huber_parameter == estimator->huber_thresh, + "Huber threshold should be set to the same value"); + + BASALT_ASSERT_STREAM(options.lb_options.obs_std_dev == estimator->obs_std_dev, + "obs_std_dev should be set to the same value"); + + // Allocate memory for relative pose linearization + for (const auto& [tcid_h, target_map] : lmdb_.getObservations()) { + // if (used_frames && used_frames->count(tcid_h.frame_id) == 0) continue; + const size_t host_idx = host_to_idx_.size(); + host_to_idx_.try_emplace(tcid_h, host_idx); + host_to_landmark_block.try_emplace(tcid_h); + + // assumption: every host frame has at least target frame with + // observations + // NOTE: in case a host frame loses all of its landmarks due + // to outlier removal or marginalization of other frames, it becomes quite + // useless and is expected to be removed before optimization. + BASALT_ASSERT(!target_map.empty()); + + for (const auto& [tcid_t, obs] : target_map) { + // assumption: every target frame has at least one observation + BASALT_ASSERT(!obs.empty()); + + std::pair key(tcid_h, tcid_t); + relative_pose_lin.emplace(key, RelPoseLin()); + } + } + + // Populate lookup for relative poses grouped by host-frame + for (const auto& [tcid_h, target_map] : lmdb_.getObservations()) { + // if (used_frames && used_frames->count(tcid_h.frame_id) == 0) continue; + relative_pose_per_host.emplace_back(); + + for (const auto& [tcid_t, _] : target_map) { + std::pair key(tcid_h, tcid_t); + auto it = relative_pose_lin.find(key); + + BASALT_ASSERT(it != relative_pose_lin.end()); + + relative_pose_per_host.back().emplace_back(it); + } + } + + num_cameras = frame_poses.size(); + + landmark_ids.clear(); + for (const auto& [k, v] : lmdb_.getLandmarks()) { + if (used_frames || lost_landmarks) { + if (used_frames && used_frames->count(v.host_kf_id.frame_id)) { + landmark_ids.emplace_back(k); + } else if (lost_landmarks && lost_landmarks->count(k)) { + landmark_ids.emplace_back(k); + } + } else { + landmark_ids.emplace_back(k); + } + } + size_t num_landmakrs = landmark_ids.size(); + + // std::cout << "num_landmakrs " << num_landmakrs << std::endl; + + landmark_blocks.resize(num_landmakrs); + + { + auto body = [&](const tbb::blocked_range& range) { + for (size_t r = range.begin(); r != range.end(); ++r) { + KeypointId lm_id = landmark_ids[r]; + auto& lb = landmark_blocks[r]; + auto& landmark = lmdb_.getLandmark(lm_id); + + lb = LandmarkBlock::template createLandmarkBlock(); + + lb->allocateLandmark(landmark, relative_pose_lin, calib, aom, + options.lb_options); + } + }; + + tbb::blocked_range range(0, num_landmakrs); + tbb::parallel_for(range, body); + } + + landmark_block_idx.reserve(num_landmakrs); + + num_rows_Q2r = 0; + for (size_t i = 0; i < num_landmakrs; i++) { + landmark_block_idx.emplace_back(num_rows_Q2r); + const auto& lb = landmark_blocks[i]; + num_rows_Q2r += lb->numQ2rows(); + + host_to_landmark_block.at(lb->getHostKf()).emplace_back(lb.get()); + } + + if (imu_lin_data) { + for (const auto& kv : imu_lin_data->imu_meas) { + imu_blocks.emplace_back( + new ImuBlock(kv.second, imu_lin_data, aom)); + } + } + + // std::cout << "num_rows_Q2r " << num_rows_Q2r << " num_poses " << + // num_cameras + // << std::endl; +} + +template +LinearizationAbsQR::~LinearizationAbsQR() = default; + +template +void LinearizationAbsQR::log_problem_stats( + ExecutionStats& stats) const { + UNUSED(stats); +} + +template +Scalar LinearizationAbsQR::linearizeProblem( + bool* numerically_valid) { + // reset damping and scaling (might be set from previous iteration) + pose_damping_diagonal = 0; + pose_damping_diagonal_sqrt = 0; + marg_scaling = VecX(); + + // Linearize relative poses + for (const auto& [tcid_h, target_map] : lmdb_.getObservations()) { + // if (used_frames && used_frames->count(tcid_h.frame_id) == 0) continue; + + for (const auto& [tcid_t, _] : target_map) { + std::pair key(tcid_h, tcid_t); + RelPoseLin& rpl = relative_pose_lin.at(key); + + if (tcid_h != tcid_t) { + const PoseStateWithLin& state_h = + estimator->getPoseStateWithLin(tcid_h.frame_id); + const PoseStateWithLin& state_t = + estimator->getPoseStateWithLin(tcid_t.frame_id); + + // compute relative pose & Jacobians at linearization point + Sophus::SE3 T_t_h_sophus = + computeRelPose(state_h.getPoseLin(), calib.T_i_c[tcid_h.cam_id], + state_t.getPoseLin(), calib.T_i_c[tcid_t.cam_id], + &rpl.d_rel_d_h, &rpl.d_rel_d_t); + + // if either state is already linearized, then the current state + // estimate is different from the linearization point, so recompute + // the value (not Jacobian) again based on the current state. + if (state_h.isLinearized() || state_t.isLinearized()) { + T_t_h_sophus = + computeRelPose(state_h.getPose(), calib.T_i_c[tcid_h.cam_id], + state_t.getPose(), calib.T_i_c[tcid_t.cam_id]); + } + + rpl.T_t_h = T_t_h_sophus.matrix(); + } else { + rpl.T_t_h.setIdentity(); + rpl.d_rel_d_h.setZero(); + rpl.d_rel_d_t.setZero(); + } + } + } + + // Linearize landmarks + size_t num_landmarks = landmark_blocks.size(); + + auto body = [&](const tbb::blocked_range& range, + std::pair error_valid) { + for (size_t r = range.begin(); r != range.end(); ++r) { + error_valid.first += landmark_blocks[r]->linearizeLandmark(); + error_valid.second = + error_valid.second && !landmark_blocks[r]->isNumericalFailure(); + } + return error_valid; + }; + + std::pair initial_value = {0.0, true}; + auto join = [](auto p1, auto p2) { + p1.first += p2.first; + p1.second = p1.second && p2.second; + return p1; + }; + + tbb::blocked_range range(0, num_landmarks); + auto reduction_res = tbb::parallel_reduce(range, initial_value, body, join); + + if (numerically_valid) *numerically_valid = reduction_res.second; + + if (imu_lin_data) { + for (auto& imu_block : imu_blocks) { + reduction_res.first += imu_block->linearizeImu(estimator->frame_states); + } + } + + if (marg_lin_data) { + Scalar marg_prior_error; + estimator->computeMargPriorError(*marg_lin_data, marg_prior_error); + reduction_res.first += marg_prior_error; + } + + return reduction_res.first; +} + +template +void LinearizationAbsQR::performQR() { + auto body = [&](const tbb::blocked_range& range) { + for (size_t r = range.begin(); r != range.end(); ++r) { + landmark_blocks[r]->performQR(); + } + }; + + tbb::blocked_range range(0, landmark_block_idx.size()); + tbb::parallel_for(range, body); +} + +template +void LinearizationAbsQR::setPoseDamping( + const Scalar lambda) { + BASALT_ASSERT(lambda >= 0); + + pose_damping_diagonal = lambda; + pose_damping_diagonal_sqrt = std::sqrt(lambda); +} + +template +Scalar LinearizationAbsQR::backSubstitute( + const VecX& pose_inc) { + BASALT_ASSERT(pose_inc.size() == signed_cast(aom.total_size)); + + auto body = [&](const tbb::blocked_range& range, Scalar l_diff) { + for (size_t r = range.begin(); r != range.end(); ++r) { + landmark_blocks[r]->backSubstitute(pose_inc, l_diff); + } + return l_diff; + }; + + tbb::blocked_range range(0, landmark_block_idx.size()); + Scalar l_diff = + tbb::parallel_reduce(range, Scalar(0), body, std::plus()); + + if (imu_lin_data) { + for (auto& imu_block : imu_blocks) { + imu_block->backSubstitute(pose_inc, l_diff); + } + } + + if (marg_lin_data) { + size_t marg_size = marg_lin_data->H.cols(); + VecX pose_inc_marg = pose_inc.head(marg_size); + + l_diff += estimator->computeMargPriorModelCostChange( + *marg_lin_data, marg_scaling, pose_inc_marg); + } + + return l_diff; +} + +template +typename LinearizationAbsQR::VecX +LinearizationAbsQR::getJp_diag2() const { + // TODO: group relative by host frame + + struct Reductor { + Reductor(size_t num_rows, + const std::vector& landmark_blocks) + : num_rows_(num_rows), landmark_blocks_(landmark_blocks) { + res_.setZero(num_rows); + } + + void operator()(const tbb::blocked_range& range) { + for (size_t r = range.begin(); r != range.end(); ++r) { + const auto& lb = landmark_blocks_[r]; + lb->addJp_diag2(res_); + } + } + + Reductor(Reductor& a, tbb::split) + : num_rows_(a.num_rows_), landmark_blocks_(a.landmark_blocks_) { + res_.setZero(num_rows_); + }; + + inline void join(const Reductor& b) { res_ += b.res_; } + + size_t num_rows_; + const std::vector& landmark_blocks_; + VecX res_; + }; + + Reductor r(aom.total_size, landmark_blocks); + + tbb::blocked_range range(0, landmark_block_idx.size()); + tbb::parallel_reduce(range, r); + // r(range); + + if (imu_lin_data) { + for (auto& imu_block : imu_blocks) { + imu_block->addJp_diag2(r.res_); + } + } + + // TODO: We don't include pose damping here, b/c we use this to compute + // jacobian scale. Make sure this is clear in the the usage, possibly rename + // to reflect this, or add assert such that it fails when pose damping is + // set. + + // Note: ignore damping here + + // Add marginalization prior + // Asumes marginalization part is in the head. Check for this is located + // outside + if (marg_lin_data) { + size_t marg_size = marg_lin_data->H.cols(); + if (marg_scaling.rows() > 0) { + r.res_.head(marg_size) += (marg_lin_data->H * marg_scaling.asDiagonal()) + .colwise() + .squaredNorm(); + } else { + r.res_.head(marg_size) += marg_lin_data->H.colwise().squaredNorm(); + } + } + + return r.res_; +} + +template +void LinearizationAbsQR::scaleJl_cols() { + auto body = [&](const tbb::blocked_range& range) { + for (size_t r = range.begin(); r != range.end(); ++r) { + landmark_blocks[r]->scaleJl_cols(); + } + }; + + tbb::blocked_range range(0, landmark_block_idx.size()); + tbb::parallel_for(range, body); +} + +template +void LinearizationAbsQR::scaleJp_cols( + const VecX& jacobian_scaling) { + // auto body = [&](const tbb::blocked_range& range) { + // for (size_t r = range.begin(); r != range.end(); ++r) { + // landmark_blocks[r]->scaleJp_cols(jacobian_scaling); + // } + // }; + + // tbb::blocked_range range(0, landmark_block_idx.size()); + // tbb::parallel_for(range, body); + + if (true) { + // In case of absolute poses, we scale Jp in the LMB. + + auto body = [&](const tbb::blocked_range& range) { + for (size_t r = range.begin(); r != range.end(); ++r) { + landmark_blocks[r]->scaleJp_cols(jacobian_scaling); + } + }; + + tbb::blocked_range range(0, landmark_block_idx.size()); + tbb::parallel_for(range, body); + } else { + // In case LMB use relative poses we cannot directly scale the relative pose + // Jacobians. We have + // + // Jp * diag(S) = Jp_rel * J_rel_to_abs * diag(S) + // + // so instead we scale the rel-to-abs jacobians. + // + // Note that since we do perform operations like J^T * J on the relative + // pose Jacobians, we should maybe consider additional scaling like + // + // (Jp_rel * diag(S_rel)) * (diag(S_rel)^-1 * J_rel_to_abs * diag(S)), + // + // but this might be only relevant if we do something more sensitive like + // also include camera intrinsics in the optimization. + + for (auto& [k, v] : relative_pose_lin) { + size_t h_idx = aom.abs_order_map.at(k.first.frame_id).first; + size_t t_idx = aom.abs_order_map.at(k.second.frame_id).first; + + v.d_rel_d_h *= + jacobian_scaling.template segment(h_idx).asDiagonal(); + + v.d_rel_d_t *= + jacobian_scaling.template segment(t_idx).asDiagonal(); + } + } + + if (imu_lin_data) { + for (auto& imu_block : imu_blocks) { + imu_block->scaleJp_cols(jacobian_scaling); + } + } + + // Add marginalization scaling + if (marg_lin_data) { + // We are only supposed to apply the scaling once. + BASALT_ASSERT(marg_scaling.size() == 0); + + size_t marg_size = marg_lin_data->H.cols(); + marg_scaling = jacobian_scaling.head(marg_size); + } +} + +template +void LinearizationAbsQR::setLandmarkDamping(Scalar lambda) { + auto body = [&](const tbb::blocked_range& range) { + for (size_t r = range.begin(); r != range.end(); ++r) { + landmark_blocks[r]->setLandmarkDamping(lambda); + } + }; + + tbb::blocked_range range(0, landmark_block_idx.size()); + tbb::parallel_for(range, body); +} + +template +void LinearizationAbsQR::get_dense_Q2Jp_Q2r( + MatX& Q2Jp, VecX& Q2r) const { + size_t total_size = num_rows_Q2r; + size_t poses_size = aom.total_size; + + size_t lm_start_idx = 0; + + // Space for IMU data if present + size_t imu_start_idx = total_size; + if (imu_lin_data) { + total_size += imu_lin_data->imu_meas.size() * POSE_VEL_BIAS_SIZE; + } + + // Space for damping if present + size_t damping_start_idx = total_size; + if (hasPoseDamping()) { + total_size += poses_size; + } + + // Space for marg-prior if present + size_t marg_start_idx = total_size; + if (marg_lin_data) total_size += marg_lin_data->H.rows(); + + Q2Jp.setZero(total_size, poses_size); + Q2r.setZero(total_size); + + auto body = [&](const tbb::blocked_range& range) { + for (size_t r = range.begin(); r != range.end(); ++r) { + const auto& lb = landmark_blocks[r]; + lb->get_dense_Q2Jp_Q2r(Q2Jp, Q2r, lm_start_idx + landmark_block_idx[r]); + } + }; + + // go over all host frames + tbb::blocked_range range(0, landmark_block_idx.size()); + tbb::parallel_for(range, body); + // body(range); + + if (imu_lin_data) { + size_t start_idx = imu_start_idx; + for (const auto& imu_block : imu_blocks) { + imu_block->add_dense_Q2Jp_Q2r(Q2Jp, Q2r, start_idx); + start_idx += POSE_VEL_BIAS_SIZE; + } + } + + // Add damping + get_dense_Q2Jp_Q2r_pose_damping(Q2Jp, damping_start_idx); + + // Add marginalization + get_dense_Q2Jp_Q2r_marg_prior(Q2Jp, Q2r, marg_start_idx); +} + +template +void LinearizationAbsQR::get_dense_H_b(MatX& H, + VecX& b) const { + struct Reductor { + Reductor(size_t opt_size, + const std::vector& landmark_blocks) + : opt_size_(opt_size), landmark_blocks_(landmark_blocks) { + H_.setZero(opt_size_, opt_size_); + b_.setZero(opt_size_); + } + + void operator()(const tbb::blocked_range& range) { + for (size_t r = range.begin(); r != range.end(); ++r) { + auto& lb = landmark_blocks_[r]; + lb->add_dense_H_b(H_, b_); + } + } + + Reductor(Reductor& a, tbb::split) + : opt_size_(a.opt_size_), landmark_blocks_(a.landmark_blocks_) { + H_.setZero(opt_size_, opt_size_); + b_.setZero(opt_size_); + }; + + inline void join(Reductor& b) { + H_ += b.H_; + b_ += b.b_; + } + + size_t opt_size_; + const std::vector& landmark_blocks_; + + MatX H_; + VecX b_; + }; + + size_t opt_size = aom.total_size; + + Reductor r(opt_size, landmark_blocks); + + // go over all host frames + tbb::blocked_range range(0, landmark_block_idx.size()); + tbb::parallel_reduce(range, r); + + // Add imu + add_dense_H_b_imu(r.H_, r.b_); + + // Add damping + add_dense_H_b_pose_damping(r.H_); + + // Add marginalization + add_dense_H_b_marg_prior(r.H_, r.b_); + + H = std::move(r.H_); + b = std::move(r.b_); +} + +template +void LinearizationAbsQR::get_dense_Q2Jp_Q2r_pose_damping( + MatX& Q2Jp, size_t start_idx) const { + size_t poses_size = num_cameras * POSE_SIZE; + if (hasPoseDamping()) { + Q2Jp.template block(start_idx, 0, poses_size, poses_size) + .diagonal() + .array() = pose_damping_diagonal_sqrt; + } +} + +template +void LinearizationAbsQR::get_dense_Q2Jp_Q2r_marg_prior( + MatX& Q2Jp, VecX& Q2r, size_t start_idx) const { + if (!marg_lin_data) return; + + BASALT_ASSERT(marg_lin_data->is_sqrt); + + size_t marg_rows = marg_lin_data->H.rows(); + size_t marg_cols = marg_lin_data->H.cols(); + + VecX delta; + estimator->computeDelta(marg_lin_data->order, delta); + + if (marg_scaling.rows() > 0) { + Q2Jp.template block(start_idx, 0, marg_rows, marg_cols) = + marg_lin_data->H * marg_scaling.asDiagonal(); + } else { + Q2Jp.template block(start_idx, 0, marg_rows, marg_cols) = marg_lin_data->H; + } + + Q2r.template segment(start_idx, marg_rows) = + marg_lin_data->H * delta + marg_lin_data->b; +} + +template +void LinearizationAbsQR::add_dense_H_b_pose_damping( + MatX& H) const { + if (hasPoseDamping()) { + H.diagonal().array() += pose_damping_diagonal; + } +} + +template +void LinearizationAbsQR::add_dense_H_b_marg_prior( + MatX& H, VecX& b) const { + if (!marg_lin_data) return; + + // Scaling not supported ATM + BASALT_ASSERT(marg_scaling.rows() == 0); + + Scalar marg_prior_error; + estimator->linearizeMargPrior(*marg_lin_data, aom, H, b, marg_prior_error); + + // size_t marg_size = marg_lin_data->H.cols(); + + // VecX delta; + // estimator->computeDelta(marg_lin_data->order, delta); + + // if (marg_scaling.rows() > 0) { + // H.topLeftCorner(marg_size, marg_size) += + // marg_scaling.asDiagonal() * marg_lin_data->H.transpose() * + // marg_lin_data->H * marg_scaling.asDiagonal(); + + // b.head(marg_size) += marg_scaling.asDiagonal() * + // marg_lin_data->H.transpose() * + // (marg_lin_data->H * delta + marg_lin_data->b); + + // } else { + // H.topLeftCorner(marg_size, marg_size) += + // marg_lin_data->H.transpose() * marg_lin_data->H; + + // b.head(marg_size) += marg_lin_data->H.transpose() * + // (marg_lin_data->H * delta + marg_lin_data->b); + // } +} + +template +void LinearizationAbsQR::add_dense_H_b_imu( + DenseAccumulator& accum) const { + if (!imu_lin_data) return; + + for (const auto& imu_block : imu_blocks) { + imu_block->add_dense_H_b(accum); + } +} + +template +void LinearizationAbsQR::add_dense_H_b_imu(MatX& H, + VecX& b) const { + if (!imu_lin_data) return; + + // workaround: create an accumulator here, to avoid implementing the + // add_dense_H_b(H, b) overload in ImuBlock + DenseAccumulator accum; + accum.reset(b.size()); + + for (const auto& imu_block : imu_blocks) { + imu_block->add_dense_H_b(accum); + } + + H += accum.getH(); + b += accum.getB(); +} + +// ////////////////////////////////////////////////////////////////// +// instatiate templates + +#ifdef BASALT_INSTANTIATIONS_DOUBLE +// Scalar=double, POSE_SIZE=6 +template class LinearizationAbsQR; +#endif + +#ifdef BASALT_INSTANTIATIONS_FLOAT +// Scalar=float, POSE_SIZE=6 +template class LinearizationAbsQR; +#endif + +} // namespace basalt diff --git a/src/linearization/linearization_abs_sc.cpp b/src/linearization/linearization_abs_sc.cpp new file mode 100644 index 0000000..b08fa51 --- /dev/null +++ b/src/linearization/linearization_abs_sc.cpp @@ -0,0 +1,432 @@ +/** +BSD 3-Clause License + +This file is part of the Basalt project. +https://gitlab.com/VladyslavUsenko/basalt.git + +Copyright (c) 2021, Vladyslav Usenko and Nikolaus Demmel. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#include + +#include +#include +#include + +#include +#include +#include +#include + +namespace basalt { + +template +LinearizationAbsSC::LinearizationAbsSC( + BundleAdjustmentBase* estimator, const AbsOrderMap& aom, + const Options& options, const MargLinData* marg_lin_data, + const ImuLinData* imu_lin_data, + const std::set* used_frames, + const std::unordered_set* lost_landmarks, + int64_t last_state_to_marg) + : options_(options), + estimator(estimator), + lmdb_(estimator->lmdb), + calib(estimator->calib), + aom(aom), + used_frames(used_frames), + marg_lin_data(marg_lin_data), + imu_lin_data(imu_lin_data), + lost_landmarks(lost_landmarks), + last_state_to_marg(last_state_to_marg), + pose_damping_diagonal(0), + pose_damping_diagonal_sqrt(0) { + BASALT_ASSERT_STREAM( + options.lb_options.huber_parameter == estimator->huber_thresh, + "Huber threshold should be set to the same value"); + + BASALT_ASSERT_STREAM(options.lb_options.obs_std_dev == estimator->obs_std_dev, + "obs_std_dev should be set to the same value"); + + if (imu_lin_data) { + for (const auto& kv : imu_lin_data->imu_meas) { + imu_blocks.emplace_back( + new ImuBlock(kv.second, imu_lin_data, aom)); + } + } + + // std::cout << "num_rows_Q2r " << num_rows_Q2r << " num_poses " << + // num_cameras + // << std::endl; +} + +template +LinearizationAbsSC::~LinearizationAbsSC() = default; + +template +void LinearizationAbsSC::log_problem_stats( + ExecutionStats& stats) const { + UNUSED(stats); +} + +template +Scalar LinearizationAbsSC::linearizeProblem( + bool* numerically_valid) { + // reset damping and scaling (might be set from previous iteration) + pose_damping_diagonal = 0; + pose_damping_diagonal_sqrt = 0; + marg_scaling = VecX(); + + std::unordered_map>> + obs_to_lin; + + if (used_frames) { + const auto& obs = lmdb_.getObservations(); + + // select all observations where the host frame is about to be + // marginalized + + if (lost_landmarks) { + for (auto it = obs.cbegin(); it != obs.cend(); ++it) { + if (used_frames->count(it->first.frame_id) > 0) { + for (auto it2 = it->second.cbegin(); it2 != it->second.cend(); + ++it2) { + if (it2->first.frame_id <= last_state_to_marg) + obs_to_lin[it->first].emplace(*it2); + } + } else { + std::map> lost_obs_map; + for (auto it2 = it->second.cbegin(); it2 != it->second.cend(); + ++it2) { + for (const auto& lm_id : it2->second) { + if (lost_landmarks->count(lm_id)) { + if (it2->first.frame_id <= last_state_to_marg) + lost_obs_map[it2->first].emplace(lm_id); + } + } + } + if (!lost_obs_map.empty()) { + obs_to_lin[it->first] = lost_obs_map; + } + } + } + + } else { + for (auto it = obs.cbegin(); it != obs.cend(); ++it) { + if (used_frames->count(it->first.frame_id) > 0) { + for (auto it2 = it->second.cbegin(); it2 != it->second.cend(); + ++it2) { + if (it2->first.frame_id <= last_state_to_marg) + obs_to_lin[it->first].emplace(*it2); + } + } + } + } + } else { + obs_to_lin = lmdb_.getObservations(); + } + + Scalar error; + + ScBundleAdjustmentBase::linearizeHelperAbsStatic(ald_vec, obs_to_lin, + estimator, error); + + // TODO: Fix the computation of numarically valid points + if (numerically_valid) *numerically_valid = true; + + if (imu_lin_data) { + for (auto& imu_block : imu_blocks) { + error += imu_block->linearizeImu(estimator->frame_states); + } + } + + if (marg_lin_data) { + Scalar marg_prior_error; + estimator->computeMargPriorError(*marg_lin_data, marg_prior_error); + error += marg_prior_error; + } + + return error; +} + +template +void LinearizationAbsSC::performQR() {} + +template +void LinearizationAbsSC::setPoseDamping( + const Scalar lambda) { + BASALT_ASSERT(lambda >= 0); + + pose_damping_diagonal = lambda; + pose_damping_diagonal_sqrt = std::sqrt(lambda); +} + +template +Scalar LinearizationAbsSC::backSubstitute( + const VecX& pose_inc) { + BASALT_ASSERT(pose_inc.size() == signed_cast(aom.total_size)); + + // Update points + tbb::blocked_range keys_range(0, ald_vec.size()); + auto update_points_func = [&](const tbb::blocked_range& r, + Scalar l_diff) { + for (size_t i = r.begin(); i != r.end(); ++i) { + const auto& ald = ald_vec[i]; + ScBundleAdjustmentBase::updatePointsAbs(aom, ald, -pose_inc, + lmdb_, &l_diff); + } + + return l_diff; + }; + Scalar l_diff = tbb::parallel_reduce(keys_range, Scalar(0), + update_points_func, std::plus()); + + if (imu_lin_data) { + for (auto& imu_block : imu_blocks) { + imu_block->backSubstitute(pose_inc, l_diff); + } + } + + if (marg_lin_data) { + size_t marg_size = marg_lin_data->H.cols(); + VecX pose_inc_marg = pose_inc.head(marg_size); + + l_diff += estimator->computeMargPriorModelCostChange( + *marg_lin_data, marg_scaling, pose_inc_marg); + } + + return l_diff; +} + +template +typename LinearizationAbsSC::VecX +LinearizationAbsSC::getJp_diag2() const { + // TODO: group relative by host frame + + BASALT_ASSERT_STREAM(false, "Not implemented"); +} + +template +void LinearizationAbsSC::scaleJl_cols() { + BASALT_ASSERT_STREAM(false, "Not implemented"); +} + +template +void LinearizationAbsSC::scaleJp_cols( + const VecX& jacobian_scaling) { + UNUSED(jacobian_scaling); + BASALT_ASSERT_STREAM(false, "Not implemented"); +} + +template +void LinearizationAbsSC::setLandmarkDamping(Scalar lambda) { + UNUSED(lambda); + BASALT_ASSERT_STREAM(false, "Not implemented"); +} + +template +void LinearizationAbsSC::get_dense_Q2Jp_Q2r( + MatX& Q2Jp, VecX& Q2r) const { + MatX H; + VecX b; + get_dense_H_b(H, b); + + Eigen::LDLT> ldlt(H); + + VecX D_sqrt = ldlt.vectorD().array().max(0).sqrt().matrix(); + + // After LDLT, we have + // marg_H == P^T*L*D*L^T*P, + // so we compute the square root as + // marg_sqrt_H = sqrt(D)*L^T*P, + // such that + // marg_sqrt_H^T marg_sqrt_H == marg_H. + Q2Jp.setIdentity(H.rows(), H.cols()); + Q2Jp = ldlt.transpositionsP() * Q2Jp; + Q2Jp = ldlt.matrixU() * Q2Jp; // U == L^T + Q2Jp = D_sqrt.asDiagonal() * Q2Jp; + + // For the right hand side, we want + // marg_b == marg_sqrt_H^T * marg_sqrt_b + // so we compute + // marg_sqrt_b = (marg_sqrt_H^T)^-1 * marg_b + // = (P^T*L*sqrt(D))^-1 * marg_b + // = sqrt(D)^-1 * L^-1 * P * marg_b + Q2r = ldlt.transpositionsP() * b; + ldlt.matrixL().solveInPlace(Q2r); + + // We already clamped negative values in D_sqrt to 0 above, but for values + // close to 0 we set b to 0. + for (int i = 0; i < Q2r.size(); ++i) { + if (D_sqrt(i) > std::sqrt(std::numeric_limits::min())) + Q2r(i) /= D_sqrt(i); + else + Q2r(i) = 0; + } +} + +template +void LinearizationAbsSC::get_dense_H_b(MatX& H, + VecX& b) const { + typename ScBundleAdjustmentBase::template LinearizeAbsReduce2< + DenseAccumulator> + lopt_abs(aom); + + tbb::blocked_range::const_iterator> + range(ald_vec.cbegin(), ald_vec.cend()); + tbb::parallel_reduce(range, lopt_abs); + // lopt_abs(range); + + // Add imu + add_dense_H_b_imu(lopt_abs.accum.getH(), lopt_abs.accum.getB()); + + // Add damping + add_dense_H_b_pose_damping(lopt_abs.accum.getH()); + + // Add marginalization + add_dense_H_b_marg_prior(lopt_abs.accum.getH(), lopt_abs.accum.getB()); + + H = std::move(lopt_abs.accum.getH()); + b = std::move(lopt_abs.accum.getB()); +} + +template +void LinearizationAbsSC::get_dense_Q2Jp_Q2r_pose_damping( + MatX& Q2Jp, size_t start_idx) const { + UNUSED(Q2Jp); + UNUSED(start_idx); + BASALT_ASSERT_STREAM(false, "Not implemented"); +} + +template +void LinearizationAbsSC::get_dense_Q2Jp_Q2r_marg_prior( + MatX& Q2Jp, VecX& Q2r, size_t start_idx) const { + if (!marg_lin_data) return; + + size_t marg_rows = marg_lin_data->H.rows(); + size_t marg_cols = marg_lin_data->H.cols(); + + VecX delta; + estimator->computeDelta(marg_lin_data->order, delta); + + if (marg_scaling.rows() > 0) { + Q2Jp.template block(start_idx, 0, marg_rows, marg_cols) = + marg_lin_data->H * marg_scaling.asDiagonal(); + } else { + Q2Jp.template block(start_idx, 0, marg_rows, marg_cols) = marg_lin_data->H; + } + + Q2r.template segment(start_idx, marg_rows) = + marg_lin_data->H * delta + marg_lin_data->b; +} + +template +void LinearizationAbsSC::add_dense_H_b_pose_damping( + MatX& H) const { + if (hasPoseDamping()) { + H.diagonal().array() += pose_damping_diagonal; + } +} + +template +void LinearizationAbsSC::add_dense_H_b_marg_prior( + MatX& H, VecX& b) const { + if (!marg_lin_data) return; + + BASALT_ASSERT(marg_scaling.rows() == 0); + + Scalar marg_prior_error; + estimator->linearizeMargPrior(*marg_lin_data, aom, H, b, marg_prior_error); + + // size_t marg_size = marg_lin_data->H.cols(); + + // VecX delta; + // estimator->computeDelta(marg_lin_data->order, delta); + + // Scaling not supported ATM + + // if (marg_scaling.rows() > 0) { + // H.topLeftCorner(marg_size, marg_size) += + // marg_scaling.asDiagonal() * marg_lin_data->H.transpose() * + // marg_lin_data->H * marg_scaling.asDiagonal(); + + // b.head(marg_size) += marg_scaling.asDiagonal() * + // marg_lin_data->H.transpose() * + // (marg_lin_data->H * delta + marg_lin_data->b); + + // } else { + // H.topLeftCorner(marg_size, marg_size) += + // marg_lin_data->H.transpose() * marg_lin_data->H; + + // b.head(marg_size) += marg_lin_data->H.transpose() * + // (marg_lin_data->H * delta + marg_lin_data->b); + // } +} + +template +void LinearizationAbsSC::add_dense_H_b_imu( + DenseAccumulator& accum) const { + if (!imu_lin_data) return; + + for (const auto& imu_block : imu_blocks) { + imu_block->add_dense_H_b(accum); + } +} + +template +void LinearizationAbsSC::add_dense_H_b_imu(MatX& H, + VecX& b) const { + if (!imu_lin_data) return; + + // workaround: create an accumulator here, to avoid implementing the + // add_dense_H_b(H, b) overload in ImuBlock + DenseAccumulator accum; + accum.reset(b.size()); + + for (const auto& imu_block : imu_blocks) { + imu_block->add_dense_H_b(accum); + } + + H += accum.getH(); + b += accum.getB(); +} + +// ////////////////////////////////////////////////////////////////// +// instatiate templates + +#ifdef BASALT_INSTANTIATIONS_DOUBLE +// Scalar=double, POSE_SIZE=6 +template class LinearizationAbsSC; +#endif + +#ifdef BASALT_INSTANTIATIONS_FLOAT +// Scalar=float, POSE_SIZE=6 +template class LinearizationAbsSC; +#endif + +} // namespace basalt diff --git a/src/linearization/linearization_base.cpp b/src/linearization/linearization_base.cpp new file mode 100644 index 0000000..a5bb836 --- /dev/null +++ b/src/linearization/linearization_base.cpp @@ -0,0 +1,119 @@ +/** +BSD 3-Clause License + +This file is part of the Basalt project. +https://gitlab.com/VladyslavUsenko/basalt.git + +Copyright (c) 2021, Vladyslav Usenko and Nikolaus Demmel. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#include +#include +#include +#include + +#include + +namespace basalt { + +bool isLinearizationSqrt(const LinearizationType& type) { + switch (type) { + case LinearizationType::ABS_QR: + return true; + case LinearizationType::ABS_SC: + case LinearizationType::REL_SC: + return false; + default: + BASALT_ASSERT_STREAM(false, "Linearization type is not supported."); + return false; + } +} + +template +std::unique_ptr> +LinearizationBase::create( + BundleAdjustmentBase* estimator, const AbsOrderMap& aom, + const Options& options, const MargLinData* marg_lin_data, + const ImuLinData* imu_lin_data, + const std::set* used_frames, + const std::unordered_set* lost_landmarks, + int64_t last_state_to_marg) { + // std::cout << "Creaing Linearization of type " + // << magic_enum::enum_name(options.linearization_type) << + // std::endl; + + switch (options.linearization_type) { + case LinearizationType::ABS_QR: + return std::make_unique>( + estimator, aom, options, marg_lin_data, imu_lin_data, used_frames, + lost_landmarks, last_state_to_marg); + + case LinearizationType::ABS_SC: + return std::make_unique>( + estimator, aom, options, marg_lin_data, imu_lin_data, used_frames, + lost_landmarks, last_state_to_marg); + + case LinearizationType::REL_SC: + return std::make_unique>( + estimator, aom, options, marg_lin_data, imu_lin_data, used_frames, + lost_landmarks, last_state_to_marg); + + default: + std::cerr << "Could not select a valid linearization." << std::endl; + std::abort(); + } +} + +// ////////////////////////////////////////////////////////////////// +// instatiate factory templates + +#ifdef BASALT_INSTANTIATIONS_DOUBLE +// Scalar=double, POSE_SIZE=6 +template std::unique_ptr> +LinearizationBase::create( + BundleAdjustmentBase* estimator, const AbsOrderMap& aom, + const Options& options, const MargLinData* marg_lin_data, + const ImuLinData* imu_lin_data, + const std::set* used_frames, + const std::unordered_set* lost_landmarks, + int64_t last_state_to_marg); +#endif + +#ifdef BASALT_INSTANTIATIONS_FLOAT +// Scalar=float, POSE_SIZE=6 +template std::unique_ptr> +LinearizationBase::create( + BundleAdjustmentBase* estimator, const AbsOrderMap& aom, + const Options& options, const MargLinData* marg_lin_data, + const ImuLinData* imu_lin_data, const std::set* used_frames, + const std::unordered_set* lost_landmarks, + int64_t last_state_to_marg); +#endif + +} // namespace basalt diff --git a/src/linearization/linearization_rel_sc.cpp b/src/linearization/linearization_rel_sc.cpp new file mode 100644 index 0000000..80005d6 --- /dev/null +++ b/src/linearization/linearization_rel_sc.cpp @@ -0,0 +1,431 @@ +/** +BSD 3-Clause License + +This file is part of the Basalt project. +https://gitlab.com/VladyslavUsenko/basalt.git + +Copyright (c) 2021, Vladyslav Usenko and Nikolaus Demmel. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#include + +#include +#include +#include + +#include +#include +#include +#include + +namespace basalt { + +template +LinearizationRelSC::LinearizationRelSC( + BundleAdjustmentBase* estimator, const AbsOrderMap& aom, + const Options& options, const MargLinData* marg_lin_data, + const ImuLinData* imu_lin_data, + const std::set* used_frames, + const std::unordered_set* lost_landmarks, + int64_t last_state_to_marg) + : options_(options), + estimator(estimator), + lmdb_(estimator->lmdb), + calib(estimator->calib), + aom(aom), + used_frames(used_frames), + marg_lin_data(marg_lin_data), + imu_lin_data(imu_lin_data), + lost_landmarks(lost_landmarks), + last_state_to_marg(last_state_to_marg), + pose_damping_diagonal(0), + pose_damping_diagonal_sqrt(0) { + BASALT_ASSERT_STREAM( + options.lb_options.huber_parameter == estimator->huber_thresh, + "Huber threshold should be set to the same value"); + + BASALT_ASSERT_STREAM(options.lb_options.obs_std_dev == estimator->obs_std_dev, + "obs_std_dev should be set to the same value"); + + if (imu_lin_data) { + for (const auto& kv : imu_lin_data->imu_meas) { + imu_blocks.emplace_back( + new ImuBlock(kv.second, imu_lin_data, aom)); + } + } + + // std::cout << "num_rows_Q2r " << num_rows_Q2r << " num_poses " << + // num_cameras + // << std::endl; +} + +template +LinearizationRelSC::~LinearizationRelSC() = default; + +template +void LinearizationRelSC::log_problem_stats( + ExecutionStats& stats) const { + UNUSED(stats); +} + +template +Scalar LinearizationRelSC::linearizeProblem( + bool* numerically_valid) { + // reset damping and scaling (might be set from previous iteration) + pose_damping_diagonal = 0; + pose_damping_diagonal_sqrt = 0; + marg_scaling = VecX(); + + std::unordered_map>> + obs_to_lin; + + if (used_frames) { + const auto& obs = lmdb_.getObservations(); + + // select all observations where the host frame is about to be + // marginalized + + if (lost_landmarks) { + for (auto it = obs.cbegin(); it != obs.cend(); ++it) { + if (used_frames->count(it->first.frame_id) > 0) { + for (auto it2 = it->second.cbegin(); it2 != it->second.cend(); + ++it2) { + if (it2->first.frame_id <= last_state_to_marg) + obs_to_lin[it->first].emplace(*it2); + } + } else { + std::map> lost_obs_map; + for (auto it2 = it->second.cbegin(); it2 != it->second.cend(); + ++it2) { + for (const auto& lm_id : it2->second) { + if (lost_landmarks->count(lm_id)) { + if (it2->first.frame_id <= last_state_to_marg) + lost_obs_map[it2->first].emplace(lm_id); + } + } + } + if (!lost_obs_map.empty()) { + obs_to_lin[it->first] = lost_obs_map; + } + } + } + + } else { + for (auto it = obs.cbegin(); it != obs.cend(); ++it) { + if (used_frames->count(it->first.frame_id) > 0) { + for (auto it2 = it->second.cbegin(); it2 != it->second.cend(); + ++it2) { + // TODO: Check how ABS_QR works without last_state_to_marg + if (it2->first.frame_id <= last_state_to_marg) + obs_to_lin[it->first].emplace(*it2); + } + } + } + } + } else { + obs_to_lin = lmdb_.getObservations(); + } + + Scalar error; + + ScBundleAdjustmentBase::linearizeHelperStatic(rld_vec, obs_to_lin, + estimator, error); + + // TODO: Fix the computation of numerically valid points + if (numerically_valid) *numerically_valid = true; + + if (imu_lin_data) { + for (auto& imu_block : imu_blocks) { + error += imu_block->linearizeImu(estimator->frame_states); + } + } + + if (marg_lin_data) { + Scalar marg_prior_error; + estimator->computeMargPriorError(*marg_lin_data, marg_prior_error); + error += marg_prior_error; + } + + return error; +} + +template +void LinearizationRelSC::performQR() {} + +template +void LinearizationRelSC::setPoseDamping( + const Scalar lambda) { + BASALT_ASSERT(lambda >= 0); + + pose_damping_diagonal = lambda; + pose_damping_diagonal_sqrt = std::sqrt(lambda); +} + +template +Scalar LinearizationRelSC::backSubstitute( + const VecX& pose_inc) { + BASALT_ASSERT(pose_inc.size() == signed_cast(aom.total_size)); + + // Update points + tbb::blocked_range keys_range(0, rld_vec.size()); + auto update_points_func = [&](const tbb::blocked_range& r, + Scalar l_diff) { + for (size_t i = r.begin(); i != r.end(); ++i) { + const auto& rld = rld_vec[i]; + ScBundleAdjustmentBase::updatePoints(aom, rld, -pose_inc, lmdb_, + &l_diff); + } + + return l_diff; + }; + Scalar l_diff = tbb::parallel_reduce(keys_range, Scalar(0), + update_points_func, std::plus()); + + if (imu_lin_data) { + for (auto& imu_block : imu_blocks) { + imu_block->backSubstitute(pose_inc, l_diff); + } + } + + if (marg_lin_data) { + size_t marg_size = marg_lin_data->H.cols(); + VecX pose_inc_marg = pose_inc.head(marg_size); + + l_diff += estimator->computeMargPriorModelCostChange( + *marg_lin_data, marg_scaling, pose_inc_marg); + } + + return l_diff; +} + +template +typename LinearizationRelSC::VecX +LinearizationRelSC::getJp_diag2() const { + // TODO: group relative by host frame + + BASALT_ASSERT_STREAM(false, "Not implemented"); +} + +template +void LinearizationRelSC::scaleJl_cols() { + BASALT_ASSERT_STREAM(false, "Not implemented"); +} + +template +void LinearizationRelSC::scaleJp_cols( + const VecX& jacobian_scaling) { + UNUSED(jacobian_scaling); + BASALT_ASSERT_STREAM(false, "Not implemented"); +} + +template +void LinearizationRelSC::setLandmarkDamping(Scalar lambda) { + UNUSED(lambda); + BASALT_ASSERT_STREAM(false, "Not implemented"); +} + +template +void LinearizationRelSC::get_dense_Q2Jp_Q2r( + MatX& Q2Jp, VecX& Q2r) const { + MatX H; + VecX b; + get_dense_H_b(H, b); + + Eigen::LDLT> ldlt(H); + + VecX D_sqrt = ldlt.vectorD().array().max(0).sqrt().matrix(); + + // After LDLT, we have + // marg_H == P^T*L*D*L^T*P, + // so we compute the square root as + // marg_sqrt_H = sqrt(D)*L^T*P, + // such that + // marg_sqrt_H^T marg_sqrt_H == marg_H. + Q2Jp.setIdentity(H.rows(), H.cols()); + Q2Jp = ldlt.transpositionsP() * Q2Jp; + Q2Jp = ldlt.matrixU() * Q2Jp; // U == L^T + Q2Jp = D_sqrt.asDiagonal() * Q2Jp; + + // For the right hand side, we want + // marg_b == marg_sqrt_H^T * marg_sqrt_b + // so we compute + // marg_sqrt_b = (marg_sqrt_H^T)^-1 * marg_b + // = (P^T*L*sqrt(D))^-1 * marg_b + // = sqrt(D)^-1 * L^-1 * P * marg_b + Q2r = ldlt.transpositionsP() * b; + ldlt.matrixL().solveInPlace(Q2r); + + // We already clamped negative values in D_sqrt to 0 above, but for values + // close to 0 we set b to 0. + for (int i = 0; i < Q2r.size(); ++i) { + if (D_sqrt(i) > std::sqrt(std::numeric_limits::min())) + Q2r(i) /= D_sqrt(i); + else + Q2r(i) = 0; + } +} + +template +void LinearizationRelSC::get_dense_H_b(MatX& H, + VecX& b) const { + typename ScBundleAdjustmentBase::template LinearizeAbsReduce< + DenseAccumulator> + lopt_abs(aom); + + tbb::blocked_range::const_iterator> + range(rld_vec.cbegin(), rld_vec.cend()); + tbb::parallel_reduce(range, lopt_abs); + + // Add imu + add_dense_H_b_imu(lopt_abs.accum.getH(), lopt_abs.accum.getB()); + + // Add damping + add_dense_H_b_pose_damping(lopt_abs.accum.getH()); + + // Add marginalization + add_dense_H_b_marg_prior(lopt_abs.accum.getH(), lopt_abs.accum.getB()); + + H = std::move(lopt_abs.accum.getH()); + b = std::move(lopt_abs.accum.getB()); +} + +template +void LinearizationRelSC::get_dense_Q2Jp_Q2r_pose_damping( + MatX& Q2Jp, size_t start_idx) const { + UNUSED(Q2Jp); + UNUSED(start_idx); + BASALT_ASSERT_STREAM(false, "Not implemented"); +} + +template +void LinearizationRelSC::get_dense_Q2Jp_Q2r_marg_prior( + MatX& Q2Jp, VecX& Q2r, size_t start_idx) const { + if (!marg_lin_data) return; + + size_t marg_rows = marg_lin_data->H.rows(); + size_t marg_cols = marg_lin_data->H.cols(); + + VecX delta; + estimator->computeDelta(marg_lin_data->order, delta); + + if (marg_scaling.rows() > 0) { + Q2Jp.template block(start_idx, 0, marg_rows, marg_cols) = + marg_lin_data->H * marg_scaling.asDiagonal(); + } else { + Q2Jp.template block(start_idx, 0, marg_rows, marg_cols) = marg_lin_data->H; + } + + Q2r.template segment(start_idx, marg_rows) = + marg_lin_data->H * delta + marg_lin_data->b; +} + +template +void LinearizationRelSC::add_dense_H_b_pose_damping( + MatX& H) const { + if (hasPoseDamping()) { + H.diagonal().array() += pose_damping_diagonal; + } +} + +template +void LinearizationRelSC::add_dense_H_b_marg_prior( + MatX& H, VecX& b) const { + if (!marg_lin_data) return; + + // Scaling not supported ATM + BASALT_ASSERT(marg_scaling.rows() == 0); + + Scalar marg_prior_error; + estimator->linearizeMargPrior(*marg_lin_data, aom, H, b, marg_prior_error); + + // size_t marg_size = marg_lin_data->H.cols(); + + // VecX delta; + // estimator->computeDelta(marg_lin_data->order, delta); + + // if (marg_scaling.rows() > 0) { + // H.topLeftCorner(marg_size, marg_size) += + // marg_scaling.asDiagonal() * marg_lin_data->H.transpose() * + // marg_lin_data->H * marg_scaling.asDiagonal(); + + // b.head(marg_size) += marg_scaling.asDiagonal() * + // marg_lin_data->H.transpose() * + // (marg_lin_data->H * delta + marg_lin_data->b); + + // } else { + // H.topLeftCorner(marg_size, marg_size) += + // marg_lin_data->H.transpose() * marg_lin_data->H; + + // b.head(marg_size) += marg_lin_data->H.transpose() * + // (marg_lin_data->H * delta + marg_lin_data->b); + // } +} + +template +void LinearizationRelSC::add_dense_H_b_imu( + DenseAccumulator& accum) const { + if (!imu_lin_data) return; + + for (const auto& imu_block : imu_blocks) { + imu_block->add_dense_H_b(accum); + } +} + +template +void LinearizationRelSC::add_dense_H_b_imu(MatX& H, + VecX& b) const { + if (!imu_lin_data) return; + + // workaround: create an accumulator here, to avoid implementing the + // add_dense_H_b(H, b) overload in ImuBlock + DenseAccumulator accum; + accum.reset(b.size()); + + for (const auto& imu_block : imu_blocks) { + imu_block->add_dense_H_b(accum); + } + + H += accum.getH(); + b += accum.getB(); +} + +// ////////////////////////////////////////////////////////////////// +// instatiate templates + +#ifdef BASALT_INSTANTIATIONS_DOUBLE +// Scalar=double, POSE_SIZE=6 +template class LinearizationRelSC; +#endif + +#ifdef BASALT_INSTANTIATIONS_FLOAT +// Scalar=float, POSE_SIZE=6 +template class LinearizationRelSC; +#endif + +} // namespace basalt diff --git a/src/mapper.cpp b/src/mapper.cpp new file mode 100644 index 0000000..d37f7f8 --- /dev/null +++ b/src/mapper.cpp @@ -0,0 +1,722 @@ +/** +BSD 3-Clause License + +This file is part of the Basalt project. +https://gitlab.com/VladyslavUsenko/basalt.git + +Copyright (c) 2019, Vladyslav Usenko and Nikolaus Demmel. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#include +#include +#include +#include + +#include + +#include + +#include +#include +#include +#include +#include +#include + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +using basalt::POSE_SIZE; +using basalt::POSE_VEL_BIAS_SIZE; + +Eigen::Vector3d g(0, 0, -9.81); + +const Eigen::aligned_vector image_resolutions = {{752, 480}, + {752, 480}}; + +basalt::VioConfig vio_config; +basalt::NfrMapper::Ptr nrf_mapper; + +Eigen::aligned_vector gt_frame_t_w_i; +std::vector gt_frame_t_ns, image_t_ns; + +Eigen::aligned_vector mapper_points; +std::vector mapper_point_ids; + +std::map marg_data; + +Eigen::aligned_vector edges_vis; +Eigen::aligned_vector roll_pitch_vis; +Eigen::aligned_vector rel_edges_vis; + +void draw_image_overlay(pangolin::View& v, size_t cam_id); +void draw_scene(); +void load_data(const std::string& calib_path, + const std::string& marg_data_path); +void processMargData(basalt::MargData& m); +void extractNonlinearFactors(basalt::MargData& m); +void computeEdgeVis(); +void optimize(); +void randomInc(); +void randomYawInc(); +void compute_error(); +double alignButton(); +void detect(); +void match(); +void tracks(); +void optimize(); +void filter(); +void saveTrajectoryButton(); + +constexpr int UI_WIDTH = 200; + +basalt::Calibration calib; + +pangolin::Var show_frame1("ui.show_frame1", 0, 0, 1); +pangolin::Var show_cam1("ui.show_cam1", 0, 0, 0); +pangolin::Var show_frame2("ui.show_frame2", 0, 0, 1); +pangolin::Var show_cam2("ui.show_cam2", 0, 0, 0); +pangolin::Var lock_frames("ui.lock_frames", true, false, true); +pangolin::Var show_detected("ui.show_detected", true, false, true); +pangolin::Var show_matches("ui.show_matches", true, false, true); +pangolin::Var show_inliers("ui.show_inliers", true, false, true); +pangolin::Var show_ids("ui.show_ids", false, false, true); + +pangolin::Var num_opt_iter("ui.num_opt_iter", 10, 0, 20); + +pangolin::Var show_gt("ui.show_gt", true, false, true); +pangolin::Var show_edges("ui.show_edges", true, false, true); +pangolin::Var show_points("ui.show_points", true, false, true); + +using Button = pangolin::Var>; + +Button detect_btn("ui.detect", &detect); +Button match_btn("ui.match", &match); +Button tracks_btn("ui.tracks", &tracks); +Button optimize_btn("ui.optimize", &optimize); + +pangolin::Var outlier_threshold("ui.outlier_threshold", 3.0, 0.01, 10); + +Button filter_btn("ui.filter", &filter); +Button align_btn("ui.aling_se3", &alignButton); + +pangolin::Var euroc_fmt("ui.euroc_fmt", true, false, true); +pangolin::Var tum_rgbd_fmt("ui.tum_rgbd_fmt", false, false, true); +Button save_traj_btn("ui.save_traj", &saveTrajectoryButton); + +pangolin::OpenGlRenderState camera; + +std::string marg_data_path; + +int main(int argc, char** argv) { + bool show_gui = true; + std::string cam_calib_path; + std::string result_path; + std::string config_path; + + CLI::App app{"App description"}; + + app.add_option("--show-gui", show_gui, "Show GUI"); + app.add_option("--cam-calib", cam_calib_path, + "Ground-truth camera calibration used for simulation.") + ->required(); + + app.add_option("--marg-data", marg_data_path, "Path to cache folder.") + ->required(); + + app.add_option("--config-path", config_path, "Path to config file."); + + app.add_option("--result-path", result_path, "Path to config file."); + + try { + app.parse(argc, argv); + } catch (const CLI::ParseError& e) { + return app.exit(e); + } + + if (!config_path.empty()) { + vio_config.load(config_path); + } + + load_data(cam_calib_path, marg_data_path); + + for (auto& kv : marg_data) { + nrf_mapper->addMargData(kv.second); + } + + computeEdgeVis(); + + { + std::cout << "Loaded " << nrf_mapper->img_data.size() << " images." + << std::endl; + + show_frame1.Meta().range[1] = nrf_mapper->img_data.size() - 1; + show_frame2.Meta().range[1] = nrf_mapper->img_data.size() - 1; + show_frame1.Meta().gui_changed = true; + show_frame2.Meta().gui_changed = true; + + show_cam1.Meta().range[1] = calib.intrinsics.size() - 1; + show_cam2.Meta().range[1] = calib.intrinsics.size() - 1; + if (calib.intrinsics.size() > 1) show_cam2 = 1; + + for (const auto& kv : nrf_mapper->img_data) { + image_t_ns.emplace_back(kv.first); + } + + std::sort(image_t_ns.begin(), image_t_ns.end()); + } + + if (show_gui) { + pangolin::CreateWindowAndBind("Main", 1800, 1000); + + glEnable(GL_DEPTH_TEST); + + pangolin::View& img_view_display = + pangolin::CreateDisplay() + .SetBounds(0.4, 1.0, pangolin::Attach::Pix(UI_WIDTH), 0.4) + .SetLayout(pangolin::LayoutEqual); + + pangolin::CreatePanel("ui").SetBounds(0.0, 1.0, 0.0, + pangolin::Attach::Pix(UI_WIDTH)); + + std::vector> img_view; + while (img_view.size() < calib.intrinsics.size()) { + std::shared_ptr iv(new pangolin::ImageView); + + size_t idx = img_view.size(); + img_view.push_back(iv); + + img_view_display.AddDisplay(*iv); + iv->extern_draw_function = + std::bind(&draw_image_overlay, std::placeholders::_1, idx); + } + + camera = pangolin::OpenGlRenderState( + pangolin::ProjectionMatrix(640, 480, 400, 400, 320, 240, 0.001, 10000), + pangolin::ModelViewLookAt(-3.4, -3.7, -8.3, 2.1, 0.6, 0.2, + pangolin::AxisNegY)); + + // pangolin::OpenGlRenderState camera( + // pangolin::ProjectionMatrixOrthographic(-30, 30, -30, 30, -30, 30), + // pangolin::ModelViewLookAt(-3.4, -3.7, -8.3, 2.1, 0.6, 0.2, + // pangolin::AxisNegY)); + + pangolin::View& display3D = + pangolin::CreateDisplay() + .SetAspect(-640 / 480.0) + .SetBounds(0.0, 1.0, 0.4, 1.0) + .SetHandler(new pangolin::Handler3D(camera)); + + while (!pangolin::ShouldQuit()) { + glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); + + if (lock_frames) { + // in case of locking frames, chaning one should change the other + if (show_frame1.GuiChanged()) { + show_frame2 = show_frame1; + show_frame2.Meta().gui_changed = true; + show_frame1.Meta().gui_changed = true; + } else if (show_frame2.GuiChanged()) { + show_frame1 = show_frame2; + show_frame1.Meta().gui_changed = true; + show_frame2.Meta().gui_changed = true; + } + } + + display3D.Activate(camera); + glClearColor(1.f, 1.f, 1.f, 1.0f); + + draw_scene(); + + if (show_frame1.GuiChanged() || show_cam1.GuiChanged()) { + size_t frame_id = static_cast(show_frame1); + int64_t timestamp = image_t_ns[frame_id]; + size_t cam_id = show_cam1; + + if (nrf_mapper->img_data.count(timestamp) > 0 && + nrf_mapper->img_data.at(timestamp).get()) { + const std::vector& img_vec = + nrf_mapper->img_data.at(timestamp)->img_data; + + pangolin::GlPixFormat fmt; + fmt.glformat = GL_LUMINANCE; + fmt.gltype = GL_UNSIGNED_SHORT; + fmt.scalable_internal_format = GL_LUMINANCE16; + + if (img_vec[cam_id].img.get()) { + img_view[0]->SetImage( + img_vec[cam_id].img->ptr, img_vec[cam_id].img->w, + img_vec[cam_id].img->h, img_vec[cam_id].img->pitch, fmt); + } else { + img_view[0]->Clear(); + } + } else { + img_view[0]->Clear(); + } + } + + if (euroc_fmt.GuiChanged()) { + tum_rgbd_fmt = !euroc_fmt; + } + + if (tum_rgbd_fmt.GuiChanged()) { + euroc_fmt = !tum_rgbd_fmt; + } + + if (show_frame2.GuiChanged() || show_cam2.GuiChanged()) { + size_t frame_id = static_cast(show_frame2); + int64_t timestamp = image_t_ns[frame_id]; + size_t cam_id = show_cam2; + + if (nrf_mapper->img_data.count(timestamp) > 0 && + nrf_mapper->img_data.at(timestamp).get()) { + const std::vector& img_vec = + nrf_mapper->img_data.at(timestamp)->img_data; + + pangolin::GlPixFormat fmt; + fmt.glformat = GL_LUMINANCE; + fmt.gltype = GL_UNSIGNED_SHORT; + fmt.scalable_internal_format = GL_LUMINANCE16; + + if (img_vec[cam_id].img.get()) { + img_view[1]->SetImage( + img_vec[cam_id].img->ptr, img_vec[cam_id].img->w, + img_vec[cam_id].img->h, img_vec[cam_id].img->pitch, fmt); + } else { + img_view[1]->Clear(); + } + } else { + img_view[1]->Clear(); + } + } + + pangolin::FinishFrame(); + + std::this_thread::sleep_for(std::chrono::milliseconds(50)); + } + } else { + auto time_start = std::chrono::high_resolution_clock::now(); + // optimize(); + detect(); + match(); + tracks(); + optimize(); + filter(); + optimize(); + + auto time_end = std::chrono::high_resolution_clock::now(); + + if (!result_path.empty()) { + double error = alignButton(); + + auto exec_time_ns = std::chrono::duration_cast( + time_end - time_start); + + std::ofstream os(result_path); + { + cereal::JSONOutputArchive ar(os); + ar(cereal::make_nvp("rms_ate", error)); + ar(cereal::make_nvp("num_frames", nrf_mapper->getFramePoses().size())); + ar(cereal::make_nvp("exec_time_ns", exec_time_ns.count())); + } + os.close(); + } + } + + return 0; +} + +void draw_image_overlay(pangolin::View& v, size_t view_id) { + UNUSED(v); + + size_t frame_id = (view_id == 0) ? show_frame1 : show_frame2; + size_t cam_id = (view_id == 0) ? show_cam1 : show_cam2; + + basalt::TimeCamId tcid(image_t_ns[frame_id], cam_id); + + float text_row = 20; + + if (show_detected) { + glLineWidth(1.0); + glColor3f(1.0, 0.0, 0.0); // red + glEnable(GL_BLEND); + glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA); + + if (nrf_mapper->feature_corners.find(tcid) != + nrf_mapper->feature_corners.end()) { + const basalt::KeypointsData& cr = nrf_mapper->feature_corners.at(tcid); + + for (size_t i = 0; i < cr.corners.size(); i++) { + Eigen::Vector2d c = cr.corners[i]; + double angle = cr.corner_angles[i]; + pangolin::glDrawCirclePerimeter(c[0], c[1], 3.0); + + Eigen::Vector2d r(3, 0); + Eigen::Rotation2Dd rot(angle); + r = rot * r; + + pangolin::glDrawLine(c, c + r); + } + + pangolin::GlFont::I() + .Text("Detected %d corners", cr.corners.size()) + .Draw(5, 20); + + } else { + glLineWidth(1.0); + + pangolin::GlFont::I().Text("Corners not processed").Draw(5, text_row); + } + text_row += 20; + } + + if (show_matches || show_inliers) { + glLineWidth(1.0); + glColor3f(0.0, 0.0, 1.0); // blue + glEnable(GL_BLEND); + glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA); + + size_t o_frame_id = (view_id == 0 ? show_frame2 : show_frame1); + size_t o_cam_id = (view_id == 0 ? show_cam2 : show_cam1); + + basalt::TimeCamId o_tcid(image_t_ns[o_frame_id], o_cam_id); + + int idx = -1; + + auto it = nrf_mapper->feature_matches.find(std::make_pair(tcid, o_tcid)); + + if (it != nrf_mapper->feature_matches.end()) { + idx = 0; + } else { + it = nrf_mapper->feature_matches.find(std::make_pair(o_tcid, tcid)); + if (it != nrf_mapper->feature_matches.end()) { + idx = 1; + } + } + + if (idx >= 0 && show_matches) { + if (nrf_mapper->feature_corners.find(tcid) != + nrf_mapper->feature_corners.end()) { + const basalt::KeypointsData& cr = nrf_mapper->feature_corners.at(tcid); + + for (size_t i = 0; i < it->second.matches.size(); i++) { + size_t c_idx = idx == 0 ? it->second.matches[i].first + : it->second.matches[i].second; + + Eigen::Vector2d c = cr.corners[c_idx]; + double angle = cr.corner_angles[c_idx]; + pangolin::glDrawCirclePerimeter(c[0], c[1], 3.0); + + Eigen::Vector2d r(3, 0); + Eigen::Rotation2Dd rot(angle); + r = rot * r; + + pangolin::glDrawLine(c, c + r); + + if (show_ids) { + pangolin::GlFont::I().Text("%d", i).Draw(c[0], c[1]); + } + } + + pangolin::GlFont::I() + .Text("Detected %d matches", it->second.matches.size()) + .Draw(5, text_row); + text_row += 20; + } + } + + glColor3f(0.0, 1.0, 0.0); // green + + if (idx >= 0 && show_inliers) { + if (nrf_mapper->feature_corners.find(tcid) != + nrf_mapper->feature_corners.end()) { + const basalt::KeypointsData& cr = nrf_mapper->feature_corners.at(tcid); + + for (size_t i = 0; i < it->second.inliers.size(); i++) { + size_t c_idx = idx == 0 ? it->second.inliers[i].first + : it->second.inliers[i].second; + + Eigen::Vector2d c = cr.corners[c_idx]; + double angle = cr.corner_angles[c_idx]; + pangolin::glDrawCirclePerimeter(c[0], c[1], 3.0); + + Eigen::Vector2d r(3, 0); + Eigen::Rotation2Dd rot(angle); + r = rot * r; + + pangolin::glDrawLine(c, c + r); + + if (show_ids) { + pangolin::GlFont::I().Text("%d", i).Draw(c[0], c[1]); + } + } + + pangolin::GlFont::I() + .Text("Detected %d inliers", it->second.inliers.size()) + .Draw(5, text_row); + text_row += 20; + } + } + } +} + +void draw_scene() { + glPointSize(3); + glColor3f(1.0, 0.0, 0.0); + glEnable(GL_BLEND); + glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA); + + glColor3ubv(pose_color); + if (show_points) pangolin::glDrawPoints(mapper_points); + + glColor3ubv(gt_color); + if (show_gt) pangolin::glDrawLineStrip(gt_frame_t_w_i); + + glColor3f(0.0, 1.0, 0.0); + if (show_edges) pangolin::glDrawLines(edges_vis); + + glLineWidth(2); + glColor3f(1.0, 0.0, 1.0); + if (show_edges) pangolin::glDrawLines(roll_pitch_vis); + glLineWidth(1); + + glColor3f(1.0, 0.0, 0.0); + if (show_edges) pangolin::glDrawLines(rel_edges_vis); + + for (const auto& kv : nrf_mapper->getFramePoses()) { + pangolin::glDrawAxis(kv.second.getPose().matrix(), 0.1); + } + + pangolin::glDrawAxis(Sophus::SE3d().matrix(), 1.0); +} + +void load_data(const std::string& calib_path, const std::string& cache_path) { + { + std::ifstream os(calib_path, std::ios::binary); + + if (os.is_open()) { + cereal::JSONInputArchive archive(os); + archive(calib); + std::cout << "Loaded camera with " << calib.intrinsics.size() + << " cameras" << std::endl; + + } else { + std::cerr << "could not load camera calibration " << calib_path + << std::endl; + std::abort(); + } + } + + { + // Load gt. + { + std::string p = cache_path + "/gt.cereal"; + std::ifstream is(p, std::ios::binary); + + { + cereal::BinaryInputArchive archive(is); + archive(gt_frame_t_ns); + archive(gt_frame_t_w_i); + } + is.close(); + std::cout << "Loaded " << gt_frame_t_ns.size() << " timestamps and " + << gt_frame_t_w_i.size() << " poses" << std::endl; + } + } + + nrf_mapper.reset(new basalt::NfrMapper(calib, vio_config)); + + basalt::MargDataLoader mdl; + tbb::concurrent_bounded_queue marg_queue; + mdl.out_marg_queue = &marg_queue; + + mdl.start(cache_path); + + while (true) { + basalt::MargData::Ptr data; + marg_queue.pop(data); + + if (data.get()) { + int64_t t_ns = *data->kfs_to_marg.begin(); + marg_data[t_ns] = data; + + } else { + break; + } + } + + std::cout << "Loaded " << marg_data.size() << " marg data." << std::endl; +} + +void computeEdgeVis() { + edges_vis.clear(); + for (const auto& kv1 : nrf_mapper->lmdb.getObservations()) { + for (const auto& kv2 : kv1.second) { + Eigen::Vector3d p1 = nrf_mapper->getFramePoses() + .at(kv1.first.frame_id) + .getPose() + .translation(); + Eigen::Vector3d p2 = nrf_mapper->getFramePoses() + .at(kv2.first.frame_id) + .getPose() + .translation(); + + edges_vis.emplace_back(p1); + edges_vis.emplace_back(p2); + } + } + + roll_pitch_vis.clear(); + for (const auto& v : nrf_mapper->roll_pitch_factors) { + const Sophus::SE3d& T_w_i = + nrf_mapper->getFramePoses().at(v.t_ns).getPose(); + + Eigen::Vector3d p = T_w_i.translation(); + Eigen::Vector3d d = + v.R_w_i_meas * T_w_i.so3().inverse() * (-Eigen::Vector3d::UnitZ()); + + roll_pitch_vis.emplace_back(p); + roll_pitch_vis.emplace_back(p + 0.1 * d); + } + + rel_edges_vis.clear(); + for (const auto& v : nrf_mapper->rel_pose_factors) { + Eigen::Vector3d p1 = + nrf_mapper->getFramePoses().at(v.t_i_ns).getPose().translation(); + Eigen::Vector3d p2 = + nrf_mapper->getFramePoses().at(v.t_j_ns).getPose().translation(); + + rel_edges_vis.emplace_back(p1); + rel_edges_vis.emplace_back(p2); + } +} + +void optimize() { + nrf_mapper->optimize(num_opt_iter); + nrf_mapper->get_current_points(mapper_points, mapper_point_ids); + + computeEdgeVis(); +} + +double alignButton() { + Eigen::aligned_vector filter_t_w_i; + std::vector filter_t_ns; + + for (const auto& kv : nrf_mapper->getFramePoses()) { + filter_t_ns.emplace_back(kv.first); + filter_t_w_i.emplace_back(kv.second.getPose().translation()); + } + + return basalt::alignSVD(filter_t_ns, filter_t_w_i, gt_frame_t_ns, + gt_frame_t_w_i); +} + +void detect() { + nrf_mapper->feature_corners.clear(); + nrf_mapper->feature_matches.clear(); + nrf_mapper->detect_keypoints(); +} + +void match() { + nrf_mapper->feature_matches.clear(); + nrf_mapper->match_stereo(); + nrf_mapper->match_all(); +} + +void tracks() { + nrf_mapper->build_tracks(); + nrf_mapper->setup_opt(); + nrf_mapper->get_current_points(mapper_points, mapper_point_ids); + // nrf_mapper->get_current_points_with_color(mapper_points, + // mapper_points_color, + // mapper_point_ids); + computeEdgeVis(); +} + +void filter() { + nrf_mapper->filterOutliers(outlier_threshold, 4); + nrf_mapper->get_current_points(mapper_points, mapper_point_ids); +} + +void saveTrajectoryButton() { + if (tum_rgbd_fmt) { + std::ofstream os("keyframeTrajectory.txt"); + + os << "# timestamp tx ty tz qx qy qz qw" << std::endl; + + for (const auto& kv : nrf_mapper->getFramePoses()) { + const Sophus::SE3d pose = kv.second.getPose(); + os << std::scientific << std::setprecision(18) << kv.first * 1e-9 << " " + << pose.translation().x() << " " << pose.translation().y() << " " + << pose.translation().z() << " " << pose.unit_quaternion().x() << " " + << pose.unit_quaternion().y() << " " << pose.unit_quaternion().z() + << " " << pose.unit_quaternion().w() << std::endl; + } + + os.close(); + + std::cout << "Saved trajectory in TUM RGB-D Dataset format in " + "keyframeTrajectory.txt" + << std::endl; + } else { + std::ofstream os("keyframeTrajectory.csv"); + + os << "#timestamp [ns],p_RS_R_x [m],p_RS_R_y [m],p_RS_R_z [m],q_RS_w " + "[],q_RS_x [],q_RS_y [],q_RS_z []" + << std::endl; + + for (const auto& kv : nrf_mapper->getFramePoses()) { + const Sophus::SE3d pose = kv.second.getPose(); + os << std::scientific << std::setprecision(18) << kv.first << "," + << pose.translation().x() << "," << pose.translation().y() << "," + << pose.translation().z() << "," << pose.unit_quaternion().w() << "," + << pose.unit_quaternion().x() << "," << pose.unit_quaternion().y() + << "," << pose.unit_quaternion().z() << std::endl; + } + + os.close(); + + std::cout + << "Saved trajectory in Euroc Dataset format in keyframeTrajectory.csv" + << std::endl; + } +} diff --git a/src/mapper_sim.cpp b/src/mapper_sim.cpp new file mode 100644 index 0000000..85b8bc3 --- /dev/null +++ b/src/mapper_sim.cpp @@ -0,0 +1,605 @@ +/** +BSD 3-Clause License + +This file is part of the Basalt project. +https://gitlab.com/VladyslavUsenko/basalt.git + +Copyright (c) 2019, Vladyslav Usenko and Nikolaus Demmel. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#include +#include +#include +#include + +#include + +#include + +#include +#include +#include +#include +#include +#include + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +using basalt::POSE_SIZE; +using basalt::POSE_VEL_BIAS_SIZE; + +Eigen::Vector3d g(0, 0, -9.81); + +std::shared_ptr> gt_spline; + +Eigen::aligned_vector gt_frame_T_w_i; +Eigen::aligned_vector gt_frame_t_w_i, vio_t_w_i; +std::vector gt_frame_t_ns; + +Eigen::aligned_vector gt_accel, gt_gyro, gt_accel_bias, + gt_gyro_bias, noisy_accel, noisy_gyro, gt_vel; + +std::vector gt_imu_t_ns; + +Eigen::aligned_vector filter_points; +std::vector filter_point_ids; + +std::map marg_data; + +Eigen::aligned_vector roll_pitch_factors; +Eigen::aligned_vector rel_pose_factors; + +Eigen::aligned_vector edges_vis; +Eigen::aligned_vector roll_pitch_vis; +Eigen::aligned_vector rel_edges_vis; + +Eigen::aligned_vector mapper_points; +std::vector mapper_point_ids; + +basalt::NfrMapper::Ptr nrf_mapper; + +std::map gt_observations; +std::map noisy_observations; + +void draw_scene(); +void load_data(const std::string& calib_path, + const std::string& marg_data_path); +void processMargData(basalt::MargData& m); +void extractNonlinearFactors(basalt::MargData& m); +void computeEdgeVis(); +void optimize(); +void randomInc(); +void randomYawInc(); +double alignButton(); +void setup_points(); + +constexpr int UI_WIDTH = 200; +// constexpr int NUM_FRAMES = 500; + +basalt::Calibration calib; + +pangolin::Var show_edges("ui.show_edges", true, false, true); +pangolin::Var show_points("ui.show_points", true, false, true); + +using Button = pangolin::Var>; + +Button optimize_btn("ui.optimize", &optimize); +Button rand_inc_btn("ui.rand_inc", &randomInc); +Button rand_yaw_inc_btn("ui.rand_yaw", &randomYawInc); +Button setup_points_btn("ui.setup_points", &setup_points); +Button align_se3_btn("ui.align_se3", &alignButton); + +std::string marg_data_path; + +int main(int argc, char** argv) { + bool show_gui = true; + std::string cam_calib_path; + std::string result_path; + + CLI::App app{"App description"}; + + app.add_option("--show-gui", show_gui, "Show GUI"); + app.add_option("--cam-calib", cam_calib_path, + "Ground-truth camera calibration used for simulation.") + ->required(); + + app.add_option("--marg-data", marg_data_path, "Path to cache folder.") + ->required(); + + app.add_option("--result-path", result_path, + "Path to result file where the system will write RMSE ATE."); + + try { + app.parse(argc, argv); + } catch (const CLI::ParseError& e) { + return app.exit(e); + } + + load_data(cam_calib_path, marg_data_path); + + basalt::VioConfig config; + + nrf_mapper.reset(new basalt::NfrMapper(calib, config)); + + for (auto& kv : marg_data) { + nrf_mapper->addMargData(kv.second); + } + + computeEdgeVis(); + + std::cout << "roll_pitch_factors.size() " << roll_pitch_factors.size() + << std::endl; + std::cout << "rel_pose_factors.size() " << rel_pose_factors.size() + << std::endl; + + if (show_gui) { + pangolin::CreateWindowAndBind("Main", 1800, 1000); + + glEnable(GL_DEPTH_TEST); + + pangolin::CreatePanel("ui").SetBounds(0.0, 1.0, 0.0, + pangolin::Attach::Pix(UI_WIDTH)); + + pangolin::OpenGlRenderState camera( + pangolin::ProjectionMatrix(640, 480, 400, 400, 320, 240, 0.001, 10000), + pangolin::ModelViewLookAt(-8.4, -8.7, -8.3, 2.1, 0.6, 0.2, + pangolin::AxisNegY)); + + pangolin::View& display3D = + pangolin::CreateDisplay() + .SetAspect(-640 / 480.0) + .SetBounds(0.0, 1.0, pangolin::Attach::Pix(UI_WIDTH), 1.0) + .SetHandler(new pangolin::Handler3D(camera)); + + while (!pangolin::ShouldQuit()) { + glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); + + display3D.Activate(camera); + glClearColor(1.f, 1.f, 1.f, 1.0f); + + draw_scene(); + + pangolin::FinishFrame(); + + std::this_thread::sleep_for(std::chrono::milliseconds(50)); + } + } else { + setup_points(); + optimize(); + + if (!result_path.empty()) { + double error = alignButton(); + + std::ofstream os(result_path); + os << error << std::endl; + os.close(); + } + } + + return 0; +} + +void draw_scene() { + glPointSize(3); + glColor3f(1.0, 0.0, 0.0); + glEnable(GL_BLEND); + glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA); + + glColor3ubv(pose_color); + if (show_points) pangolin::glDrawPoints(mapper_points); + + glColor3ubv(gt_color); + pangolin::glDrawLineStrip(gt_frame_t_w_i); + + glColor3ubv(cam_color); + pangolin::glDrawLineStrip(vio_t_w_i); + + glColor3f(0.0, 1.0, 0.0); + if (show_edges) pangolin::glDrawLines(edges_vis); + + glColor3f(1.0, 0.0, 1.0); + if (show_edges) pangolin::glDrawLines(roll_pitch_vis); + + glColor3f(1.0, 0.0, 0.0); + if (show_edges) pangolin::glDrawLines(rel_edges_vis); + + for (const auto& kv : nrf_mapper->getFramePoses()) { + pangolin::glDrawAxis(kv.second.getPose().matrix(), 0.1); + } + + pangolin::glDrawAxis(Sophus::SE3d().matrix(), 1.0); +} + +void load_data(const std::string& calib_path, const std::string& cache_path) { + { + std::ifstream os(calib_path, std::ios::binary); + + if (os.is_open()) { + cereal::JSONInputArchive archive(os); + archive(calib); + std::cout << "Loaded camera with " << calib.intrinsics.size() + << " cameras" << std::endl; + + } else { + std::cerr << "could not load camera calibration " << calib_path + << std::endl; + std::abort(); + } + } + + { + std::string path = cache_path + "/gt_spline.cereal"; + std::cout << "path " << path << std::endl; + + std::ifstream is(path, std::ios::binary); + + if (is.is_open()) { + cereal::JSONInputArchive archive(is); + + int64_t t_ns; + Eigen::aligned_vector knots; + + archive(cereal::make_nvp("t_ns", t_ns)); + archive(cereal::make_nvp("knots", knots)); + + archive(cereal::make_nvp("gt_observations", gt_observations)); + archive(cereal::make_nvp("noisy_observations", noisy_observations)); + + std::cout << "path " << path << std::endl; + std::cout << "t_ns " << t_ns << std::endl; + std::cout << "knots " << knots.size() << std::endl; + + gt_spline.reset(new basalt::Se3Spline<5>(t_ns)); + + for (size_t i = 0; i < knots.size(); i++) { + gt_spline->knotsPushBack(knots[i]); + } + + is.close(); + } else { + std::cerr << "could not open " << path << std::endl; + std::abort(); + } + } + + { + int64_t dt_ns = int64_t(1e9) / 50; + + for (int64_t t_ns = 0; t_ns < gt_spline->maxTimeNs(); t_ns += dt_ns) { + gt_frame_t_w_i.emplace_back(gt_spline->pose(t_ns).translation()); + gt_frame_t_ns.emplace_back(t_ns); + } + } + + basalt::MargDataLoader mdl; + tbb::concurrent_bounded_queue marg_queue; + mdl.out_marg_queue = &marg_queue; + + mdl.start(cache_path); + + while (true) { + basalt::MargData::Ptr data; + marg_queue.pop(data); + + if (data.get()) { + int64_t t_ns = *data->kfs_to_marg.begin(); + marg_data[t_ns] = data; + } else { + break; + } + } + + std::cout << "Loaded " << marg_data.size() << " marg data." << std::endl; +} + +void processMargData(basalt::MargData& m) { + BASALT_ASSERT(m.aom.total_size == size_t(m.abs_H.cols())); + + std::cout << "rank " << m.abs_H.fullPivLu().rank() << " size " + << m.abs_H.cols() << std::endl; + + basalt::AbsOrderMap aom_new; + std::set idx_to_keep; + std::set idx_to_marg; + + for (const auto& kv : m.aom.abs_order_map) { + if (kv.second.second == POSE_SIZE) { + for (size_t i = 0; i < POSE_SIZE; i++) + idx_to_keep.emplace(kv.second.first + i); + aom_new.abs_order_map.emplace(kv); + aom_new.total_size += POSE_SIZE; + } else if (kv.second.second == POSE_VEL_BIAS_SIZE) { + if (m.kfs_all.count(kv.first) > 0) { + for (size_t i = 0; i < POSE_SIZE; i++) + idx_to_keep.emplace(kv.second.first + i); + for (size_t i = POSE_SIZE; i < POSE_VEL_BIAS_SIZE; i++) + idx_to_marg.emplace(kv.second.first + i); + + aom_new.abs_order_map[kv.first] = + std::make_pair(aom_new.total_size, POSE_SIZE); + aom_new.total_size += POSE_SIZE; + + basalt::PoseStateWithLin p(m.frame_states.at(kv.first)); + m.frame_poses[kv.first] = p; + m.frame_states.erase(kv.first); + } else { + for (size_t i = 0; i < POSE_VEL_BIAS_SIZE; i++) + idx_to_marg.emplace(kv.second.first + i); + m.frame_states.erase(kv.first); + } + } else { + std::cerr << "Unknown size" << std::endl; + std::abort(); + } + + std::cout << kv.first << " " << kv.second.first << " " << kv.second.second + << std::endl; + } + + Eigen::MatrixXd marg_H_new; + Eigen::VectorXd marg_b_new; + basalt::MargHelper::marginalizeHelperSqToSq( + m.abs_H, m.abs_b, idx_to_keep, idx_to_marg, marg_H_new, marg_b_new); + + std::cout << "new rank " << marg_H_new.fullPivLu().rank() << " size " + << marg_H_new.cols() << std::endl; + + m.abs_H = marg_H_new; + m.abs_b = marg_b_new; + m.aom = aom_new; + + BASALT_ASSERT(m.aom.total_size == size_t(m.abs_H.cols())); +} + +void extractNonlinearFactors(basalt::MargData& m) { + size_t asize = m.aom.total_size; + std::cout << "asize " << asize << std::endl; + + Eigen::MatrixXd cov_old; + cov_old.setIdentity(asize, asize); + m.abs_H.ldlt().solveInPlace(cov_old); + + int64_t kf_id = *m.kfs_to_marg.cbegin(); + int kf_start_idx = m.aom.abs_order_map.at(kf_id).first; + + auto state_kf = m.frame_poses.at(kf_id); + + Sophus::SE3d T_w_i_kf = state_kf.getPose(); + + Eigen::Vector3d pos = T_w_i_kf.translation(); + Eigen::Vector3d yaw_dir_body = + T_w_i_kf.so3().inverse() * Eigen::Vector3d::UnitX(); + + Sophus::Matrix d_pos_d_T_w_i; + Sophus::Matrix d_yaw_d_T_w_i; + Sophus::Matrix d_rp_d_T_w_i; + + basalt::absPositionError(T_w_i_kf, pos, &d_pos_d_T_w_i); + basalt::yawError(T_w_i_kf, yaw_dir_body, &d_yaw_d_T_w_i); + basalt::rollPitchError(T_w_i_kf, T_w_i_kf.so3(), &d_rp_d_T_w_i); + + { + Eigen::MatrixXd J; + J.setZero(POSE_SIZE, asize); + J.block<3, POSE_SIZE>(0, kf_start_idx) = d_pos_d_T_w_i; + J.block<1, POSE_SIZE>(3, kf_start_idx) = d_yaw_d_T_w_i; + J.block<2, POSE_SIZE>(4, kf_start_idx) = d_rp_d_T_w_i; + + Sophus::Matrix6d cov_new = J * cov_old * J.transpose(); + + // std::cout << "cov_new\n" << cov_new << std::endl; + + basalt::RollPitchFactor rpf; + rpf.t_ns = kf_id; + rpf.R_w_i_meas = T_w_i_kf.so3(); + rpf.cov_inv = cov_new.block<2, 2>(4, 4).inverse(); + + roll_pitch_factors.emplace_back(rpf); + } + + for (int64_t other_id : m.kfs_all) { + if (m.frame_poses.count(other_id) == 0 || other_id == kf_id) { + continue; + } + + auto state_o = m.frame_poses.at(other_id); + + Sophus::SE3d T_w_i_o = state_o.getPose(); + Sophus::SE3d T_kf_o = T_w_i_kf.inverse() * T_w_i_o; + + int o_start_idx = m.aom.abs_order_map.at(other_id).first; + + Sophus::Matrix6d d_res_d_T_w_i, d_res_d_T_w_j; + basalt::relPoseError(T_kf_o, T_w_i_kf, T_w_i_o, &d_res_d_T_w_i, + &d_res_d_T_w_j); + + Eigen::MatrixXd J; + J.setZero(POSE_SIZE, asize); + J.block(0, kf_start_idx) = d_res_d_T_w_i; + J.block(0, o_start_idx) = d_res_d_T_w_j; + + Sophus::Matrix6d cov_new = J * cov_old * J.transpose(); + basalt::RelPoseFactor rpf; + rpf.t_i_ns = kf_id; + rpf.t_j_ns = other_id; + rpf.T_i_j = T_kf_o; + rpf.cov_inv.setIdentity(); + cov_new.ldlt().solveInPlace(rpf.cov_inv); + + // std::cout << "rpf.cov_inv\n" << rpf.cov_inv << std::endl; + + rel_pose_factors.emplace_back(rpf); + } +} + +void computeEdgeVis() { + edges_vis.clear(); + for (const auto& kv1 : nrf_mapper->lmdb.getObservations()) { + for (const auto& kv2 : kv1.second) { + Eigen::Vector3d p1 = nrf_mapper->getFramePoses() + .at(kv1.first.frame_id) + .getPose() + .translation(); + Eigen::Vector3d p2 = nrf_mapper->getFramePoses() + .at(kv2.first.frame_id) + .getPose() + .translation(); + + edges_vis.emplace_back(p1); + edges_vis.emplace_back(p2); + } + } + + roll_pitch_vis.clear(); + for (const auto& v : nrf_mapper->roll_pitch_factors) { + const Sophus::SE3d& T_w_i = + nrf_mapper->getFramePoses().at(v.t_ns).getPose(); + + Eigen::Vector3d p = T_w_i.translation(); + Eigen::Vector3d d = + v.R_w_i_meas * T_w_i.so3().inverse() * (-Eigen::Vector3d::UnitZ()); + + roll_pitch_vis.emplace_back(p); + roll_pitch_vis.emplace_back(p + 0.1 * d); + } + + rel_edges_vis.clear(); + for (const auto& v : nrf_mapper->rel_pose_factors) { + Eigen::Vector3d p1 = + nrf_mapper->getFramePoses().at(v.t_i_ns).getPose().translation(); + Eigen::Vector3d p2 = + nrf_mapper->getFramePoses().at(v.t_j_ns).getPose().translation(); + + rel_edges_vis.emplace_back(p1); + rel_edges_vis.emplace_back(p2); + } +} + +void optimize() { + nrf_mapper->optimize(); + nrf_mapper->get_current_points(mapper_points, mapper_point_ids); + // nrf_mapper->get_current_points_with_color(mapper_points, + // mapper_points_color, + // mapper_point_ids); + + computeEdgeVis(); +} + +void randomInc() { + Sophus::Vector6d rnd = Sophus::Vector6d::Random().array().abs(); + Sophus::SE3d random_inc = Sophus::se3_expd(rnd / 10); + + for (auto& kv : nrf_mapper->getFramePoses()) { + Sophus::SE3d pose = random_inc * kv.second.getPose(); + basalt::PoseStateWithLin p(kv.first, pose); + kv.second = p; + } + + computeEdgeVis(); +} + +void randomYawInc() { + Sophus::Vector6d rnd; + rnd.setZero(); + rnd[5] = std::abs(Eigen::Vector2d::Random()[0]); + + Sophus::SE3d random_inc = Sophus::se3_expd(rnd); + + std::cout << "random_inc\n" << random_inc.matrix() << std::endl; + + for (auto& kv : nrf_mapper->getFramePoses()) { + Sophus::SE3d pose = random_inc * kv.second.getPose(); + basalt::PoseStateWithLin p(kv.first, pose); + kv.second = p; + } + + computeEdgeVis(); +} + +double alignButton() { + Eigen::aligned_vector filter_t_w_i; + std::vector filter_t_ns; + + for (const auto& kv : nrf_mapper->getFramePoses()) { + filter_t_ns.emplace_back(kv.first); + filter_t_w_i.emplace_back(kv.second.getPose().translation()); + } + + return basalt::alignSVD(filter_t_ns, filter_t_w_i, gt_frame_t_ns, + gt_frame_t_w_i); +} + +void setup_points() { + for (auto& kv : nrf_mapper->getFramePoses()) { + for (size_t i = 0; i < calib.intrinsics.size(); i++) { + basalt::TimeCamId tcid(kv.first, i); + auto obs = noisy_observations.at(tcid); + + basalt::KeypointsData kd; + kd.corners = obs.pos; + + nrf_mapper->feature_corners[tcid] = kd; + + for (size_t j = 0; j < kd.corners.size(); j++) { + nrf_mapper->feature_tracks[obs.id[j]][tcid] = j; + } + } + } + + for (auto it = nrf_mapper->feature_tracks.cbegin(); + it != nrf_mapper->feature_tracks.cend();) { + if (it->second.size() < 5) { + it = nrf_mapper->feature_tracks.erase(it); + } else { + ++it; + } + } + + std::cerr << "nrf_mapper->feature_tracks.size() " + << nrf_mapper->feature_tracks.size() << std::endl; + + nrf_mapper->setup_opt(); + + nrf_mapper->get_current_points(mapper_points, mapper_point_ids); +} diff --git a/src/mapper_sim_naive.cpp b/src/mapper_sim_naive.cpp new file mode 100644 index 0000000..2ba362a --- /dev/null +++ b/src/mapper_sim_naive.cpp @@ -0,0 +1,766 @@ +/** +BSD 3-Clause License + +This file is part of the Basalt project. +https://gitlab.com/VladyslavUsenko/basalt.git + +Copyright (c) 2019, Vladyslav Usenko and Nikolaus Demmel. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#include +#include +#include +#include + +#include + +#include + +#include +#include +#include +#include +#include +#include + +#include + +#include +#include +#include +#include +#include + +#include + +#include + +#include + +// GUI functions +void draw_image_overlay(pangolin::View& v, size_t cam_id); +void draw_scene(); +void load_data(const std::string& calib_path); +void gen_data(); +void compute_projections(); +void setup_vio(); +void draw_plots(); +bool next_step(); +void alignButton(); + +static const int knot_time = 3; +// static const double obs_std_dev = 0.5; + +Eigen::Vector3d g(0, 0, -9.81); + +// std::random_device rd{}; +// std::mt19937 gen{rd()}; +std::mt19937 gen{1}; + +// Simulated data + +basalt::Se3Spline<5> gt_spline(int64_t(knot_time * 1e9)); + +Eigen::aligned_vector gt_points; +Eigen::aligned_vector gt_frame_T_w_i; +Eigen::aligned_vector gt_frame_t_w_i, vio_t_w_i; +std::vector gt_frame_t_ns, kf_t_ns; +Eigen::aligned_vector gt_accel, gt_gyro, gt_accel_bias, + gt_gyro_bias, noisy_accel, noisy_gyro, gt_vel; +std::vector gt_imu_t_ns; + +std::map gt_observations; +std::map noisy_observations; + +std::string marg_data_path; + +// VIO vars +basalt::Calibration calib; +basalt::VioEstimatorBase::Ptr vio; + +// Visualization vars +std::unordered_map vis_map; +tbb::concurrent_bounded_queue out_vis_queue; +tbb::concurrent_bounded_queue::Ptr> + out_state_queue; + +std::vector images; + +// Pangolin vars +constexpr int UI_WIDTH = 200; +pangolin::DataLog imu_data_log, vio_data_log, error_data_log; +pangolin::Plotter* plotter; + +pangolin::Var show_frame("ui.show_frame", 0, 0, 1000); + +pangolin::Var show_obs("ui.show_obs", true, false, true); +pangolin::Var show_obs_noisy("ui.show_obs_noisy", true, false, true); +pangolin::Var show_obs_vio("ui.show_obs_vio", true, false, true); + +pangolin::Var show_ids("ui.show_ids", false, false, true); + +pangolin::Var show_accel("ui.show_accel", false, false, true); +pangolin::Var show_gyro("ui.show_gyro", false, false, true); +pangolin::Var show_gt_vel("ui.show_gt_vel", false, false, true); +pangolin::Var show_gt_pos("ui.show_gt_pos", true, false, true); +pangolin::Var show_gt_bg("ui.show_gt_bg", false, false, true); +pangolin::Var show_gt_ba("ui.show_gt_ba", false, false, true); + +pangolin::Var show_est_vel("ui.show_est_vel", false, false, true); +pangolin::Var show_est_pos("ui.show_est_pos", true, false, true); +pangolin::Var show_est_bg("ui.show_est_bg", false, false, true); +pangolin::Var show_est_ba("ui.show_est_ba", false, false, true); + +using Button = pangolin::Var>; + +Button next_step_btn("ui.next_step", &next_step); + +pangolin::Var continue_btn("ui.continue", true, false, true); + +Button align_step_btn("ui.align_se3", &alignButton); + +int main(int argc, char** argv) { + srand(1); + + bool show_gui = true; + std::string cam_calib_path; + std::string result_path; + + CLI::App app{"App description"}; + + app.add_option("--show-gui", show_gui, "Show GUI"); + app.add_option("--cam-calib", cam_calib_path, + "Ground-truth camera calibration used for simulation.") + ->required(); + + app.add_option("--marg-data", marg_data_path, + "Folder to store marginalization data.") + ->required(); + + app.add_option("--result-path", result_path, + "Path to result file where the system will write RMSE ATE."); + + try { + app.parse(argc, argv); + } catch (const CLI::ParseError& e) { + return app.exit(e); + } + + load_data(cam_calib_path); + + gen_data(); + + setup_vio(); + + vio->out_vis_queue = &out_vis_queue; + vio->out_state_queue = &out_state_queue; + + std::thread t0([&]() { + for (size_t i = 0; i < gt_imu_t_ns.size(); i++) { + basalt::ImuData::Ptr data(new basalt::ImuData); + data->t_ns = gt_imu_t_ns[i]; + + data->accel = noisy_accel[i]; + data->gyro = noisy_gyro[i]; + + vio->addIMUToQueue(data); + } + + vio->addIMUToQueue(nullptr); + + std::cout << "Finished t0" << std::endl; + }); + + std::thread t1([&]() { + for (const auto& t_ns : kf_t_ns) { + basalt::OpticalFlowResult::Ptr data(new basalt::OpticalFlowResult); + data->t_ns = t_ns; + + for (size_t j = 0; j < calib.T_i_c.size(); j++) { + data->observations.emplace_back(); + basalt::TimeCamId tcid(data->t_ns, j); + const basalt::SimObservations& obs = noisy_observations.at(tcid); + for (size_t k = 0; k < obs.pos.size(); k++) { + Eigen::AffineCompact2f t; + t.setIdentity(); + t.translation() = obs.pos[k].cast(); + data->observations.back()[obs.id[k]] = t; + } + } + + vio->addVisionToQueue(data); + } + + vio->addVisionToQueue(nullptr); + + std::cout << "Finished t1" << std::endl; + }); + + std::thread t2([&]() { + basalt::VioVisualizationData::Ptr data; + + while (true) { + out_vis_queue.pop(data); + + if (data.get()) { + vis_map[data->t_ns] = data; + } else { + break; + } + } + + std::cout << "Finished t2" << std::endl; + }); + + std::thread t3([&]() { + basalt::PoseVelBiasState::Ptr data; + + while (true) { + out_state_queue.pop(data); + + if (!data.get()) break; + + int64_t t_ns = data->t_ns; + + // std::cerr << "t_ns " << t_ns << std::endl; + Sophus::SE3d T_w_i = data->T_w_i; + Eigen::Vector3d vel_w_i = data->vel_w_i; + Eigen::Vector3d bg = data->bias_gyro; + Eigen::Vector3d ba = data->bias_accel; + + vio_t_w_i.emplace_back(T_w_i.translation()); + + { + std::vector vals; + vals.push_back(t_ns * 1e-9); + + for (int i = 0; i < 3; i++) vals.push_back(vel_w_i[i]); + for (int i = 0; i < 3; i++) vals.push_back(T_w_i.translation()[i]); + for (int i = 0; i < 3; i++) vals.push_back(bg[i]); + for (int i = 0; i < 3; i++) vals.push_back(ba[i]); + + vio_data_log.Log(vals); + } + } + + std::cout << "Finished t3" << std::endl; + }); + + if (show_gui) { + pangolin::CreateWindowAndBind("Main", 1800, 1000); + + glEnable(GL_DEPTH_TEST); + + pangolin::View& img_view_display = + pangolin::CreateDisplay() + .SetBounds(0.4, 1.0, pangolin::Attach::Pix(UI_WIDTH), 0.5) + .SetLayout(pangolin::LayoutEqual); + + pangolin::View& plot_display = pangolin::CreateDisplay().SetBounds( + 0.0, 0.4, pangolin::Attach::Pix(UI_WIDTH), 1.0); + + plotter = new pangolin::Plotter(&imu_data_log, 0.0, kf_t_ns.back() * 1e-9, + -10.0, 10.0, 0.01f, 0.01f); + plot_display.AddDisplay(*plotter); + + pangolin::CreatePanel("ui").SetBounds(0.0, 1.0, 0.0, + pangolin::Attach::Pix(UI_WIDTH)); + + std::vector> img_view; + while (img_view.size() < calib.intrinsics.size()) { + std::shared_ptr iv(new pangolin::ImageView); + + size_t idx = img_view.size(); + img_view.push_back(iv); + + img_view_display.AddDisplay(*iv); + iv->extern_draw_function = + std::bind(&draw_image_overlay, std::placeholders::_1, idx); + } + + pangolin::OpenGlRenderState camera( + pangolin::ProjectionMatrix(640, 480, 400, 400, 320, 240, 0.001, 10000), + pangolin::ModelViewLookAt(15, 3, 15, 0, 0, 0, pangolin::AxisZ)); + + pangolin::View& display3D = + pangolin::CreateDisplay() + .SetAspect(-640 / 480.0) + .SetBounds(0.4, 1.0, 0.5, 1.0) + .SetHandler(new pangolin::Handler3D(camera)); + + while (!pangolin::ShouldQuit()) { + glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); + + display3D.Activate(camera); + glClearColor(0.95f, 0.95f, 0.95f, 1.0f); + + draw_scene(); + + img_view_display.Activate(); + + if (show_frame.GuiChanged()) { + for (size_t i = 0; i < calib.intrinsics.size(); i++) { + // img_view[i]->SetImage(images[i]); + } + draw_plots(); + } + + if (show_accel.GuiChanged() || show_gyro.GuiChanged() || + show_gt_vel.GuiChanged() || show_gt_pos.GuiChanged() || + show_gt_ba.GuiChanged() || show_gt_bg.GuiChanged() || + show_est_vel.GuiChanged() || show_est_pos.GuiChanged() || + show_est_ba.GuiChanged() || show_est_bg.GuiChanged()) { + draw_plots(); + } + + pangolin::FinishFrame(); + + if (continue_btn) { + if (!next_step()) continue_btn = false; + } else { + std::this_thread::sleep_for(std::chrono::milliseconds(50)); + } + } + } + + t0.join(); + t1.join(); + t2.join(); + t3.join(); + // t4.join(); + + if (!result_path.empty()) { + Eigen::aligned_vector vio_t_w_i; + + auto it = vis_map.find(kf_t_ns.back()); + + if (it != vis_map.end()) { + for (const auto& t : it->second->states) + vio_t_w_i.emplace_back(t.translation()); + + } else { + std::cerr << "Could not find results!!" << std::endl; + } + + BASALT_ASSERT(kf_t_ns.size() == vio_t_w_i.size()); + + double error = + basalt::alignSVD(kf_t_ns, vio_t_w_i, gt_frame_t_ns, gt_frame_t_w_i); + + std::ofstream os(result_path); + os << error << std::endl; + os.close(); + } + + return 0; +} + +void draw_image_overlay(pangolin::View& v, size_t cam_id) { + UNUSED(v); + + size_t frame_id = show_frame; + basalt::TimeCamId tcid(kf_t_ns[frame_id], cam_id); + + if (show_obs) { + glLineWidth(1.0); + glColor3f(1.0, 0.0, 0.0); + glEnable(GL_BLEND); + glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA); + + if (gt_observations.find(tcid) != gt_observations.end()) { + const basalt::SimObservations& cr = gt_observations.at(tcid); + + for (size_t i = 0; i < cr.pos.size(); i++) { + const float radius = 2; + const Eigen::Vector2f c = cr.pos[i].cast(); + pangolin::glDrawCirclePerimeter(c[0], c[1], radius); + + if (show_ids) + pangolin::GlFont::I().Text("%d", cr.id[i]).Draw(c[0], c[1]); + } + + pangolin::GlFont::I().Text("%d gt points", cr.pos.size()).Draw(5, 20); + } + } + + if (show_obs_noisy) { + glLineWidth(1.0); + glColor3f(1.0, 1.0, 0.0); + glEnable(GL_BLEND); + glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA); + + if (noisy_observations.find(tcid) != noisy_observations.end()) { + const basalt::SimObservations& cr = noisy_observations.at(tcid); + + for (size_t i = 0; i < cr.pos.size(); i++) { + const float radius = 2; + const Eigen::Vector2f c = cr.pos[i].cast(); + pangolin::glDrawCirclePerimeter(c[0], c[1], radius); + + if (show_ids) + pangolin::GlFont::I().Text("%d", cr.id[i]).Draw(c[0], c[1]); + } + + pangolin::GlFont::I().Text("%d noisy points", cr.pos.size()).Draw(5, 40); + } + } + + if (show_obs_vio) { + glLineWidth(1.0); + glColor3f(0.0, 0.0, 1.0); + glEnable(GL_BLEND); + glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA); + + auto it = vis_map.find(gt_frame_t_ns[frame_id]); + + if (it != vis_map.end() && cam_id < it->second->projections.size()) { + const auto& points = it->second->projections[cam_id]; + + if (points.size() > 0) { + double min_id = points[0][2], max_id = points[0][2]; + for (size_t i = 0; i < points.size(); i++) { + min_id = std::min(min_id, points[i][2]); + max_id = std::max(max_id, points[i][2]); + } + + for (size_t i = 0; i < points.size(); i++) { + const float radius = 2; + const Eigen::Vector4d c = points[i]; + pangolin::glDrawCirclePerimeter(c[0], c[1], radius); + + if (show_ids) + pangolin::GlFont::I().Text("%d", int(c[3])).Draw(c[0], c[1]); + } + } + + glColor3f(0.0, 0.0, 1.0); + pangolin::GlFont::I().Text("%d vio points", points.size()).Draw(5, 60); + } + } +} + +void draw_scene() { + glPointSize(3); + glColor3f(1.0, 0.0, 0.0); + glEnable(GL_BLEND); + glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA); + + glColor3ubv(gt_color); + pangolin::glDrawPoints(gt_points); + pangolin::glDrawLineStrip(gt_frame_t_w_i); + + glColor3ubv(cam_color); + pangolin::glDrawLineStrip(vio_t_w_i); + + size_t frame_id = show_frame; + + auto it = vis_map.find(kf_t_ns[frame_id]); + + if (it != vis_map.end()) { + for (const auto& p : it->second->states) + for (size_t i = 0; i < calib.T_i_c.size(); i++) + render_camera((p * calib.T_i_c[i]).matrix(), 2.0f, cam_color, 0.1f); + + for (const auto& p : it->second->frames) + for (size_t i = 0; i < calib.T_i_c.size(); i++) + render_camera((p * calib.T_i_c[i]).matrix(), 2.0f, pose_color, 0.1f); + + glColor3ubv(pose_color); + pangolin::glDrawPoints(it->second->points); + } + + // pangolin::glDrawAxis(gt_frame_T_w_i[frame_id].matrix(), 0.1); + + pangolin::glDrawAxis(Sophus::SE3d().matrix(), 1.0); +} + +void load_data(const std::string& calib_path) { + std::ifstream os(calib_path, std::ios::binary); + + if (os.is_open()) { + cereal::JSONInputArchive archive(os); + archive(calib); + std::cout << "Loaded camera with " << calib.intrinsics.size() << " cameras" + << std::endl; + + } else { + std::cerr << "could not load camera calibration " << calib_path + << std::endl; + std::abort(); + } +} + +void gen_data() { + // Save spline data + { + std::string path = marg_data_path + "/gt_spline.cereal"; + + std::cout << "Loading gt_spline " << path << std::endl; + + std::ifstream is(path, std::ios::binary); + { + cereal::JSONInputArchive archive(is); + + int64_t t_ns; + Eigen::aligned_vector knots; + + archive(cereal::make_nvp("t_ns", t_ns)); + archive(cereal::make_nvp("knots", knots)); + + gt_spline = basalt::Se3Spline<5>(t_ns); + + for (size_t i = 0; i < knots.size(); i++) { + gt_spline.knotsPushBack(knots[i]); + } + + archive(cereal::make_nvp("noisy_accel", noisy_accel)); + archive(cereal::make_nvp("noisy_gyro", noisy_gyro)); + archive(cereal::make_nvp("noisy_accel", gt_accel)); + archive(cereal::make_nvp("gt_gyro", gt_gyro)); + archive(cereal::make_nvp("gt_accel_bias", gt_accel_bias)); + archive(cereal::make_nvp("gt_gyro_bias", gt_gyro_bias)); + + archive(cereal::make_nvp("gt_points", gt_points)); + + archive(cereal::make_nvp("gt_observations", gt_observations)); + archive(cereal::make_nvp("noisy_observations", noisy_observations)); + + archive(cereal::make_nvp("gt_points", gt_points)); + + archive(cereal::make_nvp("gt_frame_t_ns", gt_frame_t_ns)); + archive(cereal::make_nvp("gt_imu_t_ns", gt_imu_t_ns)); + } + + gt_frame_t_w_i.clear(); + for (int64_t t_ns : gt_frame_t_ns) { + gt_frame_t_w_i.emplace_back(gt_spline.pose(t_ns).translation()); + } + + is.close(); + } + + basalt::MargDataLoader mdl; + tbb::concurrent_bounded_queue marg_queue; + mdl.out_marg_queue = &marg_queue; + + mdl.start(marg_data_path); + + Eigen::aligned_map tmp_poses; + + while (true) { + basalt::MargData::Ptr data; + marg_queue.pop(data); + + if (data.get()) { + for (const auto& kv : data->frame_poses) { + tmp_poses[kv.first] = kv.second.getPose(); + } + + for (const auto& kv : data->frame_states) { + if (data->kfs_all.count(kv.first) > 0) { + tmp_poses[kv.first] = kv.second.getState().T_w_i; + } + } + + } else { + break; + } + } + + for (const auto& kv : tmp_poses) { + kf_t_ns.emplace_back(kv.first); + } + + show_frame.Meta().range[1] = kf_t_ns.size() - 1; +} + +void draw_plots() { + plotter->ClearSeries(); + plotter->ClearMarkers(); + + if (show_accel) { + plotter->AddSeries("$0", "$1", pangolin::DrawingModeDashed, + pangolin::Colour::Red(), "accel measurements x"); + plotter->AddSeries("$0", "$2", pangolin::DrawingModeDashed, + pangolin::Colour::Green(), "accel measurements y"); + plotter->AddSeries("$0", "$3", pangolin::DrawingModeDashed, + pangolin::Colour::Blue(), "accel measurements z"); + } + + if (show_gyro) { + plotter->AddSeries("$0", "$4", pangolin::DrawingModeDashed, + pangolin::Colour::Red(), "gyro measurements x"); + plotter->AddSeries("$0", "$5", pangolin::DrawingModeDashed, + pangolin::Colour::Green(), "gyro measurements y"); + plotter->AddSeries("$0", "$6", pangolin::DrawingModeDashed, + pangolin::Colour::Blue(), "gyro measurements z"); + } + + if (show_gt_vel) { + plotter->AddSeries("$0", "$7", pangolin::DrawingModeDashed, + pangolin::Colour::Red(), "ground-truth velocity x"); + plotter->AddSeries("$0", "$8", pangolin::DrawingModeDashed, + pangolin::Colour::Green(), "ground-truth velocity y"); + plotter->AddSeries("$0", "$9", pangolin::DrawingModeDashed, + pangolin::Colour::Blue(), "ground-truth velocity z"); + } + + if (show_gt_pos) { + plotter->AddSeries("$0", "$10", pangolin::DrawingModeDashed, + pangolin::Colour::Red(), "ground-truth position x"); + plotter->AddSeries("$0", "$11", pangolin::DrawingModeDashed, + pangolin::Colour::Green(), "ground-truth position y"); + plotter->AddSeries("$0", "$12", pangolin::DrawingModeDashed, + pangolin::Colour::Blue(), "ground-truth position z"); + } + + if (show_gt_bg) { + plotter->AddSeries("$0", "$13", pangolin::DrawingModeDashed, + pangolin::Colour::Red(), "ground-truth gyro bias x"); + plotter->AddSeries("$0", "$14", pangolin::DrawingModeDashed, + pangolin::Colour::Green(), "ground-truth gyro bias y"); + plotter->AddSeries("$0", "$15", pangolin::DrawingModeDashed, + pangolin::Colour::Blue(), "ground-truth gyro bias z"); + } + + if (show_gt_ba) { + plotter->AddSeries("$0", "$16", pangolin::DrawingModeDashed, + pangolin::Colour::Red(), "ground-truth accel bias x"); + plotter->AddSeries("$0", "$17", pangolin::DrawingModeDashed, + pangolin::Colour::Green(), "ground-truth accel bias y"); + plotter->AddSeries("$0", "$18", pangolin::DrawingModeDashed, + pangolin::Colour::Blue(), "ground-truth accel bias z"); + } + + if (show_est_vel) { + plotter->AddSeries("$0", "$1", pangolin::DrawingModeLine, + pangolin::Colour::Red(), "estimated velocity x", + &vio_data_log); + plotter->AddSeries("$0", "$2", pangolin::DrawingModeLine, + pangolin::Colour::Green(), "estimated velocity y", + &vio_data_log); + plotter->AddSeries("$0", "$3", pangolin::DrawingModeLine, + pangolin::Colour::Blue(), "estimated velocity z", + &vio_data_log); + } + + if (show_est_pos) { + plotter->AddSeries("$0", "$4", pangolin::DrawingModeLine, + pangolin::Colour::Red(), "estimated position x", + &vio_data_log); + plotter->AddSeries("$0", "$5", pangolin::DrawingModeLine, + pangolin::Colour::Green(), "estimated position y", + &vio_data_log); + plotter->AddSeries("$0", "$6", pangolin::DrawingModeLine, + pangolin::Colour::Blue(), "estimated position z", + &vio_data_log); + } + + if (show_est_bg) { + plotter->AddSeries("$0", "$7", pangolin::DrawingModeLine, + pangolin::Colour::Red(), "estimated gyro bias x", + &vio_data_log); + plotter->AddSeries("$0", "$8", pangolin::DrawingModeLine, + pangolin::Colour::Green(), "estimated gyro bias y", + &vio_data_log); + plotter->AddSeries("$0", "$9", pangolin::DrawingModeLine, + pangolin::Colour::Blue(), "estimated gyro bias z", + &vio_data_log); + } + + if (show_est_ba) { + plotter->AddSeries("$0", "$10", pangolin::DrawingModeLine, + pangolin::Colour::Red(), "estimated accel bias x", + &vio_data_log); + plotter->AddSeries("$0", "$11", pangolin::DrawingModeLine, + pangolin::Colour::Green(), "estimated accel bias y", + &vio_data_log); + plotter->AddSeries("$0", "$12", pangolin::DrawingModeLine, + pangolin::Colour::Blue(), "estimated accel bias z", + &vio_data_log); + } + + double t = kf_t_ns[show_frame] * 1e-9; + plotter->AddMarker(pangolin::Marker::Vertical, t, pangolin::Marker::Equal, + pangolin::Colour::White()); +} + +void setup_vio() { + int64_t t_init_ns = kf_t_ns.front(); + Sophus::SE3d T_w_i_init = gt_spline.pose(t_init_ns); + Eigen::Vector3d vel_w_i_init = gt_spline.transVelWorld(t_init_ns); + + std::cout << "Setting up filter: t_ns " << t_init_ns << std::endl; + std::cout << "T_w_i\n" << T_w_i_init.matrix() << std::endl; + std::cout << "vel_w_i " << vel_w_i_init.transpose() << std::endl; + + basalt::VioConfig config; + config.vio_debug = true; + + vio = basalt::VioEstimatorFactory::getVioEstimator(config, calib, g, true, + true); + vio->initialize(t_init_ns, T_w_i_init, vel_w_i_init, gt_gyro_bias.front(), + gt_accel_bias.front()); + + vio->setMaxStates(10000); + vio->setMaxKfs(10000); + + // int iteration = 0; + vio_data_log.Clear(); + error_data_log.Clear(); + vio_t_w_i.clear(); +} + +bool next_step() { + if (show_frame < int(kf_t_ns.size()) - 1) { + show_frame = show_frame + 1; + show_frame.Meta().gui_changed = true; + return true; + } else { + return false; + } +} + +void alignButton() { + Eigen::aligned_vector vio_t_w_i; + + auto it = vis_map.find(kf_t_ns.back()); + + if (it != vis_map.end()) { + for (const auto& t : it->second->states) + vio_t_w_i.emplace_back(t.translation()); + + } else { + std::cerr << "Could not find results!!" << std::endl; + } + + BASALT_ASSERT(kf_t_ns.size() == vio_t_w_i.size()); + + basalt::alignSVD(kf_t_ns, vio_t_w_i, gt_frame_t_ns, gt_frame_t_w_i); +} diff --git a/src/opt_flow.cpp b/src/opt_flow.cpp new file mode 100644 index 0000000..d95924d --- /dev/null +++ b/src/opt_flow.cpp @@ -0,0 +1,356 @@ +/** +BSD 3-Clause License + +This file is part of the Basalt project. +https://gitlab.com/VladyslavUsenko/basalt.git + +Copyright (c) 2019, Vladyslav Usenko and Nikolaus Demmel. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#include +#include +#include +#include + +#include + +#include + +#include +#include +#include +#include +#include +#include + +#include + +#include +#include + +#include + +#include + +#include + +constexpr int UI_WIDTH = 200; + +void draw_image_overlay(pangolin::View& v, size_t cam_id); +void load_data(const std::string& calib_path); +bool next_step(); +bool prev_step(); + +pangolin::Var show_frame("ui.show_frame", 0, 0, 1500); +pangolin::Var show_obs("ui.show_obs", true, false, true); +pangolin::Var show_ids("ui.show_ids", false, false, true); + +using Button = pangolin::Var>; +Button next_step_btn("ui.next_step", &next_step); +Button prev_step_btn("ui.prev_step", &prev_step); +pangolin::Var continue_btn("ui.continue", true, false, true); + +// Opt flow variables +basalt::VioDatasetPtr vio_dataset; + +basalt::VioConfig vio_config; +basalt::OpticalFlowBase::Ptr opt_flow_ptr; + +tbb::concurrent_unordered_map> + observations; +tbb::concurrent_bounded_queue + observations_queue; + +basalt::Calibration calib; + +std::unordered_map keypoint_stats; + +void feed_images() { + std::cout << "Started input_data thread " << std::endl; + + for (size_t i = 0; i < vio_dataset->get_image_timestamps().size(); i++) { + basalt::OpticalFlowInput::Ptr data(new basalt::OpticalFlowInput); + + data->t_ns = vio_dataset->get_image_timestamps()[i]; + data->img_data = vio_dataset->get_image_data(data->t_ns); + + opt_flow_ptr->input_queue.push(data); + } + + // Indicate the end of the sequence + basalt::OpticalFlowInput::Ptr data; + opt_flow_ptr->input_queue.push(data); + + std::cout << "Finished input_data thread " << std::endl; +} + +void read_result() { + std::cout << "Started read_result thread " << std::endl; + + basalt::OpticalFlowResult::Ptr res; + + while (true) { + observations_queue.pop(res); + if (!res.get()) break; + + res->input_images.reset(); + + observations.emplace(res->t_ns, res); + + for (size_t i = 0; i < res->observations.size(); i++) + for (const auto& kv : res->observations.at(i)) { + if (keypoint_stats.count(kv.first) == 0) { + keypoint_stats[kv.first] = 1; + } else { + keypoint_stats[kv.first]++; + } + } + } + + std::cout << "Finished read_result thread " << std::endl; + + double sum = 0; + + for (const auto& kv : keypoint_stats) { + sum += kv.second; + } + + std::cout << "Mean track length: " << sum / keypoint_stats.size() + << " num_points: " << keypoint_stats.size() << std::endl; +} + +int main(int argc, char** argv) { + bool show_gui = true; + std::string cam_calib_path; + std::string dataset_path; + std::string dataset_type; + std::string config_path; + + CLI::App app{"App description"}; + + app.add_option("--show-gui", show_gui, "Show GUI"); + app.add_option("--cam-calib", cam_calib_path, + "Ground-truth camera calibration used for simulation.") + ->required(); + + app.add_option("--dataset-path", dataset_path, "Path to dataset.") + ->required(); + + app.add_option("--dataset-type", dataset_type, "Type of dataset.") + ->required(); + + app.add_option("--config-path", config_path, "Path to config file."); + + try { + app.parse(argc, argv); + } catch (const CLI::ParseError& e) { + return app.exit(e); + } + + if (!config_path.empty()) { + vio_config.load(config_path); + } + + load_data(cam_calib_path); + + { + basalt::DatasetIoInterfacePtr dataset_io = + basalt::DatasetIoFactory::getDatasetIo(dataset_type); + + dataset_io->read(dataset_path); + + vio_dataset = dataset_io->get_data(); + vio_dataset->get_image_timestamps().erase( + vio_dataset->get_image_timestamps().begin()); + + show_frame.Meta().range[1] = vio_dataset->get_image_timestamps().size() - 1; + show_frame.Meta().gui_changed = true; + + opt_flow_ptr = + basalt::OpticalFlowFactory::getOpticalFlow(vio_config, calib); + if (show_gui) opt_flow_ptr->output_queue = &observations_queue; + observations_queue.set_capacity(100); + + keypoint_stats.reserve(50000); + } + + std::thread t1(&feed_images); + + if (show_gui) { + std::thread t2(&read_result); + + pangolin::CreateWindowAndBind("Main", 1800, 1000); + + glEnable(GL_DEPTH_TEST); + + pangolin::View& img_view_display = + pangolin::CreateDisplay() + .SetBounds(0, 1.0, pangolin::Attach::Pix(UI_WIDTH), 1.0) + .SetLayout(pangolin::LayoutEqual); + + pangolin::CreatePanel("ui").SetBounds(0.0, 1.0, 0.0, + pangolin::Attach::Pix(UI_WIDTH)); + + std::vector> img_view; + while (img_view.size() < calib.intrinsics.size()) { + std::shared_ptr iv(new pangolin::ImageView); + + size_t idx = img_view.size(); + img_view.push_back(iv); + + img_view_display.AddDisplay(*iv); + iv->extern_draw_function = + std::bind(&draw_image_overlay, std::placeholders::_1, idx); + } + + while (!pangolin::ShouldQuit()) { + glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); + + glClearColor(0.95f, 0.95f, 0.95f, 1.0f); + + img_view_display.Activate(); + + if (show_frame.GuiChanged()) { + size_t frame_id = static_cast(show_frame); + int64_t timestamp = vio_dataset->get_image_timestamps()[frame_id]; + + const std::vector& img_vec = + vio_dataset->get_image_data(timestamp); + + for (size_t cam_id = 0; cam_id < calib.intrinsics.size(); cam_id++) { + if (img_vec[cam_id].img.get()) { + auto img = img_vec[cam_id].img; + + pangolin::GlPixFormat fmt; + fmt.glformat = GL_LUMINANCE; + fmt.gltype = GL_UNSIGNED_SHORT; + fmt.scalable_internal_format = GL_LUMINANCE16; + + img_view[cam_id]->SetImage(img->ptr, img->w, img->h, img->pitch, + fmt); + } else { + img_view[cam_id]->Clear(); + } + } + } + + pangolin::FinishFrame(); + + if (continue_btn) { + if (!next_step()) { + continue_btn = false; + } + } else { + std::this_thread::sleep_for(std::chrono::milliseconds(50)); + } + } + + t2.join(); + } + + t1.join(); + + return 0; +} + +void draw_image_overlay(pangolin::View& v, size_t cam_id) { + UNUSED(v); + + size_t frame_id = static_cast(show_frame); + int64_t t_ns = vio_dataset->get_image_timestamps()[frame_id]; + + if (show_obs) { + glLineWidth(1.0); + glColor3f(1.0, 0.0, 0.0); + glEnable(GL_BLEND); + glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA); + + if (observations.count(t_ns) > 0) { + const Eigen::aligned_map& + kp_map = observations.at(t_ns)->observations[cam_id]; + + for (const auto& kv : kp_map) { + Eigen::MatrixXf transformed_patch = + kv.second.linear() * opt_flow_ptr->patch_coord; + transformed_patch.colwise() += kv.second.translation(); + + for (int i = 0; i < transformed_patch.cols(); i++) { + const Eigen::Vector2f c = transformed_patch.col(i); + pangolin::glDrawCirclePerimeter(c[0], c[1], 0.5f); + } + + const Eigen::Vector2f c = kv.second.translation(); + + if (show_ids) + pangolin::GlFont::I().Text("%d", kv.first).Draw(5 + c[0], 5 + c[1]); + } + + pangolin::GlFont::I() + .Text("Tracked %d keypoints", kp_map.size()) + .Draw(5, 20); + } + } +} + +void load_data(const std::string& calib_path) { + std::ifstream os(calib_path, std::ios::binary); + + if (os.is_open()) { + cereal::JSONInputArchive archive(os); + archive(calib); + std::cout << "Loaded camera with " << calib.intrinsics.size() << " cameras" + << std::endl; + + } else { + std::cerr << "could not load camera calibration " << calib_path + << std::endl; + std::abort(); + } +} + +bool next_step() { + if (show_frame < int(vio_dataset->get_image_timestamps().size()) - 1) { + show_frame = show_frame + 1; + show_frame.Meta().gui_changed = true; + return true; + } else { + return false; + } +} + +bool prev_step() { + if (show_frame > 0) { + show_frame = show_frame - 1; + show_frame.Meta().gui_changed = true; + return true; + } else { + return false; + } +} diff --git a/src/optical_flow/optical_flow.cpp b/src/optical_flow/optical_flow.cpp new file mode 100644 index 0000000..e8546fd --- /dev/null +++ b/src/optical_flow/optical_flow.cpp @@ -0,0 +1,131 @@ +/** +BSD 3-Clause License + +This file is part of the Basalt project. +https://gitlab.com/VladyslavUsenko/basalt.git + +Copyright (c) 2019, Vladyslav Usenko and Nikolaus Demmel. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#include + +#include +#include +#include + +namespace basalt { + +OpticalFlowBase::Ptr OpticalFlowFactory::getOpticalFlow( + const VioConfig& config, const Calibration& cam) { + OpticalFlowBase::Ptr res; + + if (config.optical_flow_type == "patch") { + switch (config.optical_flow_pattern) { + case 24: + res.reset(new PatchOpticalFlow(config, cam)); + break; + + case 52: + res.reset(new PatchOpticalFlow(config, cam)); + break; + + case 51: + res.reset(new PatchOpticalFlow(config, cam)); + break; + + case 50: + res.reset(new PatchOpticalFlow(config, cam)); + break; + + default: + std::cerr << "config.optical_flow_pattern " + << config.optical_flow_pattern << " is not supported." + << std::endl; + std::abort(); + } + } + + if (config.optical_flow_type == "frame_to_frame") { + switch (config.optical_flow_pattern) { + case 24: + res.reset(new FrameToFrameOpticalFlow(config, cam)); + break; + + case 52: + res.reset(new FrameToFrameOpticalFlow(config, cam)); + break; + + case 51: + res.reset(new FrameToFrameOpticalFlow(config, cam)); + break; + + case 50: + res.reset(new FrameToFrameOpticalFlow(config, cam)); + break; + + default: + std::cerr << "config.optical_flow_pattern " + << config.optical_flow_pattern << " is not supported." + << std::endl; + std::abort(); + } + } + + if (config.optical_flow_type == "multiscale_frame_to_frame") { + switch (config.optical_flow_pattern) { + case 24: + res.reset(new MultiscaleFrameToFrameOpticalFlow( + config, cam)); + break; + + case 52: + res.reset(new MultiscaleFrameToFrameOpticalFlow( + config, cam)); + break; + + case 51: + res.reset(new MultiscaleFrameToFrameOpticalFlow( + config, cam)); + break; + + case 50: + res.reset(new MultiscaleFrameToFrameOpticalFlow( + config, cam)); + break; + + default: + std::cerr << "config.optical_flow_pattern " + << config.optical_flow_pattern << " is not supported." + << std::endl; + std::abort(); + } + } + return res; +} +} // namespace basalt diff --git a/src/rs_t265_record.cpp b/src/rs_t265_record.cpp new file mode 100644 index 0000000..7889f28 --- /dev/null +++ b/src/rs_t265_record.cpp @@ -0,0 +1,503 @@ +/** +BSD 3-Clause License + +This file is part of the Basalt project. +https://gitlab.com/VladyslavUsenko/basalt.git + +Copyright (c) 2019, Vladyslav Usenko, Michael Loipführer and Nikolaus Demmel. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#include +#include +#include +#include +#include +#include + +#include + +#include +#include +#include +#include +#include +#include + +#include +#include + +#include + +#include +#include +#include +#include +#include + +constexpr int UI_WIDTH = 200; + +basalt::RsT265Device::Ptr t265_device; + +std::shared_ptr imu_log; + +pangolin::Var webp_quality("ui.webp_quality", 90, 0, 101); +pangolin::Var skip_frames("ui.skip_frames", 1, 1, 10); +pangolin::Var exposure("ui.exposure", 5.0, 1, 20); + +tbb::concurrent_bounded_queue image_data_queue, + image_data_queue2; +tbb::concurrent_bounded_queue::Ptr> imu_data_queue; +tbb::concurrent_bounded_queue pose_data_queue; + +std::atomic stop_workers; +std::atomic recording; + +std::string dataset_dir; + +static constexpr int NUM_CAMS = basalt::RsT265Device::NUM_CAMS; +static constexpr int NUM_WORKERS = 8; + +std::ofstream cam_data[NUM_CAMS], exposure_data[NUM_CAMS], imu0_data, pose_data; + +std::vector worker_threads; +std::thread imu_worker_thread, pose_worker_thread, exposure_save_thread, + stop_recording_thread; + +#if CV_MAJOR_VERSION >= 3 +std::string file_extension = ".webp"; +#else +std::string file_extension = ".jpg"; +#endif + +// manual exposure mode, if not enabled will also record pose data +bool manual_exposure; + +void exposure_save_worker() { + basalt::OpticalFlowInput::Ptr img; + while (!stop_workers) { + if (image_data_queue.try_pop(img)) { + for (size_t cam_id = 0; cam_id < NUM_CAMS; ++cam_id) { + cam_data[cam_id] << img->t_ns << "," << img->t_ns << file_extension + << std::endl; + + exposure_data[cam_id] << img->t_ns << "," + << int64_t(img->img_data[cam_id].exposure * 1e9) + << std::endl; + } + + image_data_queue2.push(img); + } else { + std::this_thread::sleep_for(std::chrono::milliseconds(10)); + } + } +} + +void image_save_worker() { + basalt::OpticalFlowInput::Ptr img; + + while (!stop_workers) { + if (image_data_queue2.try_pop(img)) { + for (size_t cam_id = 0; cam_id < NUM_CAMS; ++cam_id) { + basalt::ManagedImage::Ptr image_raw = + img->img_data[cam_id].img; + + if (!image_raw.get()) continue; + + cv::Mat image(image_raw->h, image_raw->w, CV_8U); + + uint8_t *dst = image.ptr(); + const uint16_t *src = image_raw->ptr; + + for (size_t i = 0; i < image_raw->size(); i++) { + dst[i] = (src[i] >> 8); + } + +#if CV_MAJOR_VERSION >= 3 + std::string filename = dataset_dir + "mav0/cam" + + std::to_string(cam_id) + "/data/" + + std::to_string(img->t_ns) + ".webp"; + + std::vector compression_params = {cv::IMWRITE_WEBP_QUALITY, + webp_quality}; + cv::imwrite(filename, image, compression_params); +#else + std::string filename = dataset_dir + "mav0/cam" + + std::to_string(cam_id) + "/data/" + + std::to_string(img->t_ns) + ".jpg"; + + std::vector compression_params = {cv::IMWRITE_JPEG_QUALITY, + webp_quality}; + cv::imwrite(filename, image, compression_params); +#endif + } + + } else { + std::this_thread::sleep_for(std::chrono::milliseconds(10)); + } + } +} + +void imu_save_worker() { + basalt::ImuData::Ptr data; + + while (!stop_workers) { + if (imu_data_queue.try_pop(data)) { + if (imu_log.get()) + imu_log->Log(data->accel[0], data->accel[1], data->accel[2]); + + if (recording) { + imu0_data << data->t_ns << "," << data->gyro[0] << "," << data->gyro[1] + << "," << data->gyro[2] << "," << data->accel[0] << "," + << data->accel[1] << "," << data->accel[2] << "\n"; + } + + } else { + std::this_thread::sleep_for(std::chrono::milliseconds(10)); + } + } +} + +void pose_save_worker() { + basalt::RsPoseData data; + + while (!stop_workers) { + if (pose_data_queue.try_pop(data)) { + if (recording) { + pose_data << data.t_ns << "," << data.data.translation().x() << "," + << data.data.translation().y() << "," + << data.data.translation().z() << "," + << data.data.unit_quaternion().w() << "," + << data.data.unit_quaternion().x() << "," + << data.data.unit_quaternion().y() << "," + << data.data.unit_quaternion().z() << std::endl; + } + + } else { + std::this_thread::sleep_for(std::chrono::milliseconds(10)); + } + } +} + +void save_calibration(const basalt::RsT265Device::Ptr &device) { + auto calib = device->exportCalibration(); + + if (calib) { + std::ofstream os(dataset_dir + "/calibration.json"); + cereal::JSONOutputArchive archive(os); + + archive(*calib); + } +} + +inline std::string get_date() { + constexpr int MAX_DATE = 64; + time_t now; + char the_date[MAX_DATE]; + + the_date[0] = '\0'; + + now = time(nullptr); + + if (now != -1) { + strftime(the_date, MAX_DATE, "%Y_%m_%d_%H_%M_%S", gmtime(&now)); + } + + return std::string(the_date); +} + +void startRecording(const std::string &dir_path) { + if (!recording) { + if (stop_recording_thread.joinable()) stop_recording_thread.join(); + + dataset_dir = dir_path + "dataset_" + get_date() + "/"; + + basalt::fs::create_directory(dataset_dir); + basalt::fs::create_directory(dataset_dir + "mav0/"); + basalt::fs::create_directory(dataset_dir + "mav0/cam0/"); + basalt::fs::create_directory(dataset_dir + "mav0/cam0/data/"); + basalt::fs::create_directory(dataset_dir + "mav0/cam1/"); + basalt::fs::create_directory(dataset_dir + "mav0/cam1/data/"); + basalt::fs::create_directory(dataset_dir + "mav0/imu0/"); + + cam_data[0].open(dataset_dir + "mav0/cam0/data.csv"); + cam_data[1].open(dataset_dir + "mav0/cam1/data.csv"); + exposure_data[0].open(dataset_dir + "mav0/cam0/exposure.csv"); + exposure_data[1].open(dataset_dir + "mav0/cam1/exposure.csv"); + imu0_data.open(dataset_dir + "mav0/imu0/data.csv"); + + if (!manual_exposure) { + basalt::fs::create_directory(dataset_dir + "mav0/realsense0/"); + pose_data.open(dataset_dir + "mav0/realsense0/data.csv"); + pose_data << "#timestamp [ns], p_RS_R_x [m], p_RS_R_y [m], p_RS_R_z [m], " + "q_RS_w [], q_RS_x [], q_RS_y [], q_RS_z []\n"; + } + + cam_data[0] << "#timestamp [ns], filename\n"; + cam_data[1] << "#timestamp [ns], filename\n"; + exposure_data[0] << "#timestamp [ns], exposure time[ns]\n"; + exposure_data[1] << "#timestamp [ns], exposure time[ns]\n"; + imu0_data << "#timestamp [ns],w_RS_S_x [rad s^-1],w_RS_S_y [rad " + "s^-1],w_RS_S_z [rad s^-1],a_RS_S_x [m s^-2],a_RS_S_y " + "[m s^-2],a_RS_S_z [m s^-2]\n"; + + save_calibration(t265_device); + t265_device->image_data_queue = &image_data_queue; + t265_device->imu_data_queue = &imu_data_queue; + t265_device->pose_data_queue = &pose_data_queue; + + std::cout << "Started recording dataset in " << dataset_dir << std::endl; + + recording = true; + } else { + std::cout << "Already recording" << std::endl; + } +} + +void stopRecording() { + if (recording) { + auto stop_recording_func = [&]() { + t265_device->imu_data_queue = nullptr; + t265_device->pose_data_queue = nullptr; + t265_device->image_data_queue = nullptr; + + while (!image_data_queue.empty() || !image_data_queue2.empty() || + !imu_data_queue.empty() || !pose_data_queue.empty()) { + std::cout << "Waiting until the data from the queues is written to the " + "hard drive." + << std::endl; + std::this_thread::sleep_for(std::chrono::milliseconds(1000)); + } + + recording = false; + cam_data[0].close(); + cam_data[1].close(); + exposure_data[0].close(); + exposure_data[1].close(); + imu0_data.close(); + pose_data.close(); + + std::cout << "Stopped recording dataset in " << dataset_dir << std::endl; + }; + + stop_recording_thread = std::thread(stop_recording_func); + } +} + +void toggleRecording(const std::string &dir_path) { + if (recording) { + stopRecording(); + } else { + startRecording(dir_path); + } +} + +int main(int argc, char *argv[]) { + CLI::App app{"Record RealSense T265 Data"}; + + std::string dataset_path; + + app.add_option("--dataset-path", dataset_path, "Path to dataset"); + app.add_flag("--manual-exposure", manual_exposure, + "If set will enable manual exposure."); + + try { + app.parse(argc, argv); + } catch (const CLI::ParseError &e) { + return app.exit(e); + } + + if (!dataset_path.empty() && dataset_path[dataset_path.length() - 1] != '/') { + dataset_path += '/'; + } + + bool show_gui = true; + + stop_workers = false; + if (worker_threads.empty()) { + for (int i = 0; i < NUM_WORKERS; i++) { + worker_threads.emplace_back(image_save_worker); + } + } + + exposure_save_thread = std::thread(exposure_save_worker); + imu_worker_thread = std::thread(imu_save_worker); + pose_worker_thread = std::thread(pose_save_worker); + + image_data_queue.set_capacity(1000); + image_data_queue2.set_capacity(1000); + imu_data_queue.set_capacity(10000); + pose_data_queue.set_capacity(10000); + + // realsense + t265_device.reset(new basalt::RsT265Device(manual_exposure, skip_frames, + webp_quality, exposure)); + + t265_device->start(); + imu_log.reset(new pangolin::DataLog); + + if (show_gui) { + pangolin::CreateWindowAndBind("Record RealSense T265", 1200, 800); + + pangolin::Var> record_btn( + "ui.record", [&] { return toggleRecording(dataset_path); }); + pangolin::Var> export_calibration( + "ui.export_calib", [&] { return save_calibration(t265_device); }); + + std::atomic record_t_ns; + record_t_ns = 0; + + glEnable(GL_DEPTH_TEST); + + pangolin::View &img_view_display = + pangolin::CreateDisplay() + .SetBounds(0.4, 1.0, pangolin::Attach::Pix(UI_WIDTH), 1.0) + .SetLayout(pangolin::LayoutEqual); + + pangolin::View &plot_display = pangolin::CreateDisplay().SetBounds( + 0.0, 0.4, pangolin::Attach::Pix(UI_WIDTH), 1.0); + + pangolin::CreatePanel("ui").SetBounds(0.0, 1.0, 0.0, + pangolin::Attach::Pix(UI_WIDTH)); + + std::vector> img_view; + while (img_view.size() < basalt::RsT265Device::NUM_CAMS) { + int idx = img_view.size(); + std::shared_ptr iv(new pangolin::ImageView); + + iv->extern_draw_function = [&, idx](pangolin::View &v) { + UNUSED(v); + + glLineWidth(1.0); + glColor3f(1.0, 0.0, 0.0); // red + glEnable(GL_BLEND); + glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA); + + if (t265_device->last_img_data.get()) + pangolin::GlFont::I() + .Text("Exposure: %.3f ms.", + t265_device->last_img_data->img_data[idx].exposure * 1000.0) + .Draw(30, 30); + + if (idx == 0) { + pangolin::GlFont::I() + .Text("Queue: %d.", image_data_queue2.size()) + .Draw(30, 60); + } + + if (idx == 0 && recording) { + pangolin::GlFont::I().Text("Recording").Draw(30, 90); + } + }; + + iv->OnSelectionCallback = + [&](pangolin::ImageView::OnSelectionEventData o) { + UNUSED(o); + + int64_t curr_t_ns = std::chrono::high_resolution_clock::now() + .time_since_epoch() + .count(); + if (std::abs(record_t_ns - curr_t_ns) > int64_t(2e9)) { + toggleRecording(dataset_path); + record_t_ns = curr_t_ns; + } + }; + + img_view.push_back(iv); + img_view_display.AddDisplay(*iv); + } + + imu_log->Clear(); + + std::vector labels; + labels.push_back(std::string("accel x")); + labels.push_back(std::string("accel y")); + labels.push_back(std::string("accel z")); + imu_log->SetLabels(labels); + + pangolin::Plotter plotter(imu_log.get(), 0.0f, 2000.0f, -15.0f, 15.0f, 0.1f, + 0.1f); + plotter.SetBounds(0.0, 1.0, 0.0, 1.0); + plotter.Track("$i"); + + plot_display.AddDisplay(plotter); + + plotter.ClearSeries(); + plotter.AddSeries("$i", "$0", pangolin::DrawingModeLine, + pangolin::Colour::Red(), "accel x"); + plotter.AddSeries("$i", "$1", pangolin::DrawingModeLine, + pangolin::Colour::Green(), "accel y"); + plotter.AddSeries("$i", "$2", pangolin::DrawingModeLine, + pangolin::Colour::Blue(), "accel z"); + + while (!pangolin::ShouldQuit()) { + { + pangolin::GlPixFormat fmt; + fmt.glformat = GL_LUMINANCE; + fmt.gltype = GL_UNSIGNED_SHORT; + fmt.scalable_internal_format = GL_LUMINANCE16; + + if (t265_device->last_img_data.get()) + for (size_t cam_id = 0; cam_id < basalt::RsT265Device::NUM_CAMS; + cam_id++) { + if (t265_device->last_img_data->img_data[cam_id].img.get()) + img_view[cam_id]->SetImage( + t265_device->last_img_data->img_data[cam_id].img->ptr, + t265_device->last_img_data->img_data[cam_id].img->w, + t265_device->last_img_data->img_data[cam_id].img->h, + t265_device->last_img_data->img_data[cam_id].img->pitch, fmt); + } + } + + if (manual_exposure && exposure.GuiChanged()) { + t265_device->setExposure(exposure); + } + + if (webp_quality.GuiChanged()) { + t265_device->setWebpQuality(webp_quality); + } + + if (skip_frames.GuiChanged()) { + t265_device->setSkipFrames(skip_frames); + } + + pangolin::FinishFrame(); + + std::this_thread::sleep_for(std::chrono::milliseconds(15)); + } + } + + if (recording) stopRecording(); + stop_workers = true; + + for (auto &t : worker_threads) t.join(); + imu_worker_thread.join(); + pose_worker_thread.join(); + + return EXIT_SUCCESS; +} diff --git a/src/rs_t265_vio.cpp b/src/rs_t265_vio.cpp new file mode 100644 index 0000000..e4a6c0b --- /dev/null +++ b/src/rs_t265_vio.cpp @@ -0,0 +1,503 @@ +/** +BSD 3-Clause License + +This file is part of the Basalt project. +https://gitlab.com/VladyslavUsenko/basalt.git + +Copyright (c) 2019, Vladyslav Usenko, Michael Loipführer and Nikolaus Demmel. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#include +#include +#include +#include +#include +#include + +#include + +#include +#include + +#include +#include +#include +#include +#include +#include + +#include + +#include +#include +#include +#include +#include +#include + +#include + +#include + +// GUI functions +void draw_image_overlay(pangolin::View& v, size_t cam_id); +void draw_scene(); +void load_data(const std::string& calib_path); +void draw_plots(); + +// Pangolin variables +constexpr int UI_WIDTH = 200; + +basalt::RsT265Device::Ptr t265_device; + +using Button = pangolin::Var>; + +pangolin::DataLog imu_data_log, vio_data_log, error_data_log; +pangolin::Plotter* plotter; + +pangolin::Var show_obs("ui.show_obs", true, false, true); +pangolin::Var show_ids("ui.show_ids", false, false, true); + +pangolin::Var show_est_pos("ui.show_est_pos", true, false, true); +pangolin::Var show_est_vel("ui.show_est_vel", false, false, true); +pangolin::Var show_est_bg("ui.show_est_bg", false, false, true); +pangolin::Var show_est_ba("ui.show_est_ba", false, false, true); + +pangolin::Var show_gt("ui.show_gt", true, false, true); + +pangolin::Var follow("ui.follow", true, false, true); + +// Visualization variables +basalt::VioVisualizationData::Ptr curr_vis_data; + +tbb::concurrent_bounded_queue out_vis_queue; +tbb::concurrent_bounded_queue::Ptr> + out_state_queue; + +std::vector vio_t_ns; +Eigen::aligned_vector vio_t_w_i; + +std::string marg_data_path; + +std::mutex m; +bool step_by_step = false; +int64_t curr_t_ns = -1; + +// VIO variables +basalt::Calibration calib; + +basalt::VioConfig vio_config; +basalt::OpticalFlowBase::Ptr opt_flow_ptr; +basalt::VioEstimatorBase::Ptr vio; + +int main(int argc, char** argv) { + bool terminate = false; + bool show_gui = true; + bool print_queue = false; + std::string cam_calib_path; + std::string config_path; + int num_threads = 0; + bool use_double = false; + + CLI::App app{"RealSense T265 Live Vio"}; + + app.add_option("--show-gui", show_gui, "Show GUI"); + app.add_option("--cam-calib", cam_calib_path, + "Ground-truth camera calibration used for simulation."); + + app.add_option("--marg-data", marg_data_path, + "Path to folder where marginalization data will be stored."); + + app.add_option("--print-queue", print_queue, "Print queue."); + app.add_option("--config-path", config_path, "Path to config file."); + app.add_option("--num-threads", num_threads, "Number of threads."); + app.add_option("--step-by-step", step_by_step, "Path to config file."); + app.add_option("--use-double", use_double, "Use double not float."); + + try { + app.parse(argc, argv); + } catch (const CLI::ParseError& e) { + return app.exit(e); + } + + // global thread limit is in effect until global_control object is destroyed + std::unique_ptr tbb_global_control; + if (num_threads > 0) { + tbb_global_control = std::make_unique( + tbb::global_control::max_allowed_parallelism, num_threads); + } + + if (!config_path.empty()) { + vio_config.load(config_path); + } else { + vio_config.optical_flow_skip_frames = 2; + } + + // realsense + t265_device.reset( + new basalt::RsT265Device(false, 1, 90, 10.0)); // TODO: add options? + + // startup device and load calibration + t265_device->start(); + if (cam_calib_path.empty()) { + calib = *t265_device->exportCalibration(); + } else { + load_data(cam_calib_path); + } + + opt_flow_ptr = basalt::OpticalFlowFactory::getOpticalFlow(vio_config, calib); + t265_device->image_data_queue = &opt_flow_ptr->input_queue; + + vio = basalt::VioEstimatorFactory::getVioEstimator( + vio_config, calib, basalt::constants::g, true, use_double); + vio->initialize(Eigen::Vector3d::Zero(), Eigen::Vector3d::Zero()); + t265_device->imu_data_queue = &vio->imu_data_queue; + + opt_flow_ptr->output_queue = &vio->vision_data_queue; + if (show_gui) vio->out_vis_queue = &out_vis_queue; + vio->out_state_queue = &out_state_queue; + + basalt::MargDataSaver::Ptr marg_data_saver; + + if (!marg_data_path.empty()) { + marg_data_saver.reset(new basalt::MargDataSaver(marg_data_path)); + vio->out_marg_queue = &marg_data_saver->in_marg_queue; + } + + vio_data_log.Clear(); + + std::shared_ptr t3; + + if (show_gui) + t3.reset(new std::thread([&]() { + while (true) { + out_vis_queue.pop(curr_vis_data); + + if (!curr_vis_data.get()) break; + } + + std::cout << "Finished t3" << std::endl; + })); + + std::thread t4([&]() { + basalt::PoseVelBiasState::Ptr data; + + while (true) { + out_state_queue.pop(data); + + if (!data.get()) break; + + int64_t t_ns = data->t_ns; + + if (curr_t_ns < 0) curr_t_ns = t_ns; + + // std::cerr << "t_ns " << t_ns << std::endl; + Sophus::SE3d T_w_i = data->T_w_i; + Eigen::Vector3d vel_w_i = data->vel_w_i; + Eigen::Vector3d bg = data->bias_gyro; + Eigen::Vector3d ba = data->bias_accel; + + vio_t_ns.emplace_back(data->t_ns); + vio_t_w_i.emplace_back(T_w_i.translation()); + + if (show_gui) { + std::vector vals; + vals.push_back((t_ns - curr_t_ns) * 1e-9); + + for (int i = 0; i < 3; i++) vals.push_back(vel_w_i[i]); + for (int i = 0; i < 3; i++) vals.push_back(T_w_i.translation()[i]); + for (int i = 0; i < 3; i++) vals.push_back(bg[i]); + for (int i = 0; i < 3; i++) vals.push_back(ba[i]); + + vio_data_log.Log(vals); + } + } + + std::cout << "Finished t4" << std::endl; + }); + + std::shared_ptr t5; + + if (print_queue) { + t5.reset(new std::thread([&]() { + while (!terminate) { + std::cout << "opt_flow_ptr->input_queue " + << opt_flow_ptr->input_queue.size() + << " opt_flow_ptr->output_queue " + << opt_flow_ptr->output_queue->size() << " out_state_queue " + << out_state_queue.size() << std::endl; + std::this_thread::sleep_for(std::chrono::seconds(1)); + } + })); + } + + if (show_gui) { + pangolin::CreateWindowAndBind("RS T265 Vio", 1800, 1000); + + glEnable(GL_DEPTH_TEST); + + pangolin::View& img_view_display = + pangolin::CreateDisplay() + .SetBounds(0.4, 1.0, pangolin::Attach::Pix(UI_WIDTH), 0.4) + .SetLayout(pangolin::LayoutEqual); + + pangolin::View& plot_display = pangolin::CreateDisplay().SetBounds( + 0.0, 0.4, pangolin::Attach::Pix(UI_WIDTH), 1.0); + + plotter = + new pangolin::Plotter(&imu_data_log, 0.0, 100, -3.0, 3.0, 0.01f, 0.01f); + plot_display.AddDisplay(*plotter); + + pangolin::CreatePanel("ui").SetBounds(0.0, 1.0, 0.0, + pangolin::Attach::Pix(UI_WIDTH)); + + std::vector> img_view; + while (img_view.size() < calib.intrinsics.size()) { + std::shared_ptr iv(new pangolin::ImageView); + + size_t idx = img_view.size(); + img_view.push_back(iv); + + img_view_display.AddDisplay(*iv); + iv->extern_draw_function = + std::bind(&draw_image_overlay, std::placeholders::_1, idx); + } + + Eigen::Vector3d cam_p(0.5, -2, -2); + cam_p = vio->getT_w_i_init().so3() * calib.T_i_c[0].so3() * cam_p; + cam_p[2] = 1; + + pangolin::OpenGlRenderState camera( + pangolin::ProjectionMatrix(640, 480, 400, 400, 320, 240, 0.001, 10000), + pangolin::ModelViewLookAt(cam_p[0], cam_p[1], cam_p[2], 0, 0, 0, + pangolin::AxisZ)); + + pangolin::View& display3D = + pangolin::CreateDisplay() + .SetAspect(-640 / 480.0) + .SetBounds(0.4, 1.0, 0.4, 1.0) + .SetHandler(new pangolin::Handler3D(camera)); + + while (!pangolin::ShouldQuit()) { + glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); + + if (follow) { + if (curr_vis_data.get()) { + auto T_w_i = curr_vis_data->states.back(); + T_w_i.so3() = Sophus::SO3d(); + + camera.Follow(T_w_i.matrix()); + } + } + + display3D.Activate(camera); + glClearColor(1.0f, 1.0f, 1.0f, 1.0f); + + draw_scene(); + + img_view_display.Activate(); + + { + pangolin::GlPixFormat fmt; + fmt.glformat = GL_LUMINANCE; + fmt.gltype = GL_UNSIGNED_SHORT; + fmt.scalable_internal_format = GL_LUMINANCE16; + + if (curr_vis_data.get() && curr_vis_data->opt_flow_res.get() && + curr_vis_data->opt_flow_res->input_images.get()) { + auto& img_data = curr_vis_data->opt_flow_res->input_images->img_data; + + for (size_t cam_id = 0; cam_id < basalt::RsT265Device::NUM_CAMS; + cam_id++) { + if (img_data[cam_id].img.get()) + img_view[cam_id]->SetImage( + img_data[cam_id].img->ptr, img_data[cam_id].img->w, + img_data[cam_id].img->h, img_data[cam_id].img->pitch, fmt); + } + } + + draw_plots(); + } + + if (show_est_vel.GuiChanged() || show_est_pos.GuiChanged() || + show_est_ba.GuiChanged() || show_est_bg.GuiChanged()) { + draw_plots(); + } + + pangolin::FinishFrame(); + } + } + + t265_device->stop(); + terminate = true; + + if (t3.get()) t3->join(); + t4.join(); + if (t5.get()) t5->join(); + + return 0; +} + +void draw_image_overlay(pangolin::View& v, size_t cam_id) { + UNUSED(v); + + if (show_obs) { + glLineWidth(1.0); + glColor3f(1.0, 0.0, 0.0); + glEnable(GL_BLEND); + glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA); + + if (curr_vis_data.get() && cam_id < curr_vis_data->projections.size()) { + const auto& points = curr_vis_data->projections[cam_id]; + + if (!points.empty()) { + double min_id = points[0][2], max_id = points[0][2]; + + for (const auto& points2 : curr_vis_data->projections) + for (const auto& p : points2) { + min_id = std::min(min_id, p[2]); + max_id = std::max(max_id, p[2]); + } + + for (const auto& c : points) { + const float radius = 6.5; + + float r, g, b; + getcolor(c[2] - min_id, max_id - min_id, b, g, r); + glColor3f(r, g, b); + + pangolin::glDrawCirclePerimeter(c[0], c[1], radius); + + if (show_ids) + pangolin::GlFont::I().Text("%d", int(c[3])).Draw(c[0], c[1]); + } + } + + glColor3f(1.0, 0.0, 0.0); + pangolin::GlFont::I() + .Text("Tracked %d points", points.size()) + .Draw(5, 20); + } + } +} + +void draw_scene() { + glPointSize(3); + glColor3f(1.0, 0.0, 0.0); + glEnable(GL_BLEND); + glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA); + + glColor3ubv(cam_color); + Eigen::aligned_vector sub_gt(vio_t_w_i.begin(), + vio_t_w_i.end()); + pangolin::glDrawLineStrip(sub_gt); + + if (curr_vis_data.get()) { + for (const auto& p : curr_vis_data->states) + for (const auto& t_i_c : calib.T_i_c) + render_camera((p * t_i_c).matrix(), 2.0f, state_color, 0.1f); + + for (const auto& p : curr_vis_data->frames) + for (const auto& t_i_c : calib.T_i_c) + render_camera((p * t_i_c).matrix(), 2.0f, pose_color, 0.1f); + + for (const auto& t_i_c : calib.T_i_c) + render_camera((curr_vis_data->states.back() * t_i_c).matrix(), 2.0f, + cam_color, 0.1f); + + glColor3ubv(pose_color); + pangolin::glDrawPoints(curr_vis_data->points); + } + + pangolin::glDrawAxis(Sophus::SE3d().matrix(), 1.0); +} + +void load_data(const std::string& calib_path) { + std::ifstream os(calib_path, std::ios::binary); + + if (os.is_open()) { + cereal::JSONInputArchive archive(os); + archive(calib); + std::cout << "Loaded camera with " << calib.intrinsics.size() << " cameras" + << std::endl; + + } else { + std::cerr << "could not load camera calibration " << calib_path + << std::endl; + std::abort(); + } +} + +void draw_plots() { + plotter->ClearSeries(); + plotter->ClearMarkers(); + + if (show_est_pos) { + plotter->AddSeries("$0", "$4", pangolin::DrawingModeLine, + pangolin::Colour::Red(), "position x", &vio_data_log); + plotter->AddSeries("$0", "$5", pangolin::DrawingModeLine, + pangolin::Colour::Green(), "position y", &vio_data_log); + plotter->AddSeries("$0", "$6", pangolin::DrawingModeLine, + pangolin::Colour::Blue(), "position z", &vio_data_log); + } + + if (show_est_vel) { + plotter->AddSeries("$0", "$1", pangolin::DrawingModeLine, + pangolin::Colour::Red(), "velocity x", &vio_data_log); + plotter->AddSeries("$0", "$2", pangolin::DrawingModeLine, + pangolin::Colour::Green(), "velocity y", &vio_data_log); + plotter->AddSeries("$0", "$3", pangolin::DrawingModeLine, + pangolin::Colour::Blue(), "velocity z", &vio_data_log); + } + + if (show_est_bg) { + plotter->AddSeries("$0", "$7", pangolin::DrawingModeLine, + pangolin::Colour::Red(), "gyro bias x", &vio_data_log); + plotter->AddSeries("$0", "$8", pangolin::DrawingModeLine, + pangolin::Colour::Green(), "gyro bias y", &vio_data_log); + plotter->AddSeries("$0", "$9", pangolin::DrawingModeLine, + pangolin::Colour::Blue(), "gyro bias z", &vio_data_log); + } + + if (show_est_ba) { + plotter->AddSeries("$0", "$10", pangolin::DrawingModeLine, + pangolin::Colour::Red(), "accel bias x", &vio_data_log); + plotter->AddSeries("$0", "$11", pangolin::DrawingModeLine, + pangolin::Colour::Green(), "accel bias y", + &vio_data_log); + plotter->AddSeries("$0", "$12", pangolin::DrawingModeLine, + pangolin::Colour::Blue(), "accel bias z", &vio_data_log); + } + + if (t265_device->last_img_data.get()) { + double t = t265_device->last_img_data->t_ns * 1e-9; + plotter->AddMarker(pangolin::Marker::Vertical, t, pangolin::Marker::Equal, + pangolin::Colour::White()); + } +} diff --git a/src/time_alignment.cpp b/src/time_alignment.cpp new file mode 100644 index 0000000..447dc10 --- /dev/null +++ b/src/time_alignment.cpp @@ -0,0 +1,522 @@ +/** +BSD 3-Clause License + +This file is part of the Basalt project. +https://gitlab.com/VladyslavUsenko/basalt.git + +Copyright (c) 2019, Vladyslav Usenko, Michael Loipführer and Nikolaus Demmel. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include + +basalt::Calibration calib; +basalt::MocapCalibration mocap_calib; + +// Linear time version +double compute_error( + int64_t offset, const std::vector &gyro_timestamps, + const Eigen::aligned_vector &gyro_data, + const std::vector &mocap_rot_vel_timestamps, + const Eigen::aligned_vector &mocap_rot_vel_data) { + double error = 0; + int num_points = 0; + + size_t j = 0; + + for (size_t i = 0; i < mocap_rot_vel_timestamps.size(); i++) { + int64_t corrected_time = mocap_rot_vel_timestamps[i] + offset; + + while (gyro_timestamps[j] < corrected_time) j++; + if (j >= gyro_timestamps.size()) break; + + int64_t dist_j = gyro_timestamps[j] - corrected_time; + int64_t dist_j_m1 = corrected_time - gyro_timestamps[j - 1]; + + BASALT_ASSERT(dist_j >= 0); + BASALT_ASSERT(dist_j_m1 >= 0); + + int idx = dist_j < dist_j_m1 ? j : j - 1; + + if (std::min(dist_j, dist_j_m1) > 1e9 / 120) continue; + + error += (gyro_data[idx] - mocap_rot_vel_data[i]).norm(); + num_points++; + } + return error / num_points; +} + +int main(int argc, char **argv) { + std::string dataset_path; + std::string calibration_path; + std::string mocap_calibration_path; + std::string dataset_type; + std::string output_path; + std::string output_error_path; + std::string output_gyro_path; + std::string output_mocap_path; + + double max_offset_s = 10.0; + + bool show_gui = true; + + CLI::App app{"Calibrate time offset"}; + + app.add_option("-d,--dataset-path", dataset_path, "Path to dataset") + ->required(); + app.add_option("--calibration", calibration_path, "Path to calibration file"); + app.add_option("--mocap-calibration", mocap_calibration_path, + "Path to mocap calibration file"); + app.add_option("--dataset-type", dataset_type, "Dataset type .") + ->required(); + + app.add_option("--output", output_path, + "Path to output file with time-offset result"); + app.add_option("--output-error", output_error_path, + "Path to output file with error time-series for plotting"); + app.add_option( + "--output-gyro", output_gyro_path, + "Path to output file with gyro rotational velocities for plotting"); + app.add_option( + "--output-mocap", output_mocap_path, + "Path to output file with mocap rotational velocities for plotting"); + + app.add_option("--max-offset", max_offset_s, + "Maximum offset for a grid search in seconds."); + + app.add_flag("--show-gui", show_gui, "Show GUI for debugging"); + + try { + app.parse(argc, argv); + } catch (const CLI::ParseError &e) { + return app.exit(e); + } + + if (!dataset_path.empty() && dataset_path[dataset_path.length() - 1] != '/') { + dataset_path += '/'; + } + + basalt::VioDatasetPtr vio_dataset; + + const bool use_calib = + !(calibration_path.empty() || mocap_calibration_path.empty()); + + if (use_calib) { + std::ifstream is(calibration_path); + + if (is.good()) { + cereal::JSONInputArchive archive(is); + archive(calib); + std::cout << "Loaded calibration from: " << calibration_path << std::endl; + } else { + std::cerr << "No calibration found" << std::endl; + std::abort(); + } + + std::ifstream mocap_is(mocap_calibration_path); + + if (mocap_is.good()) { + cereal::JSONInputArchive archive(mocap_is); + archive(mocap_calib); + std::cout << "Loaded mocap calibration from: " << mocap_calibration_path + << std::endl; + } else { + std::cerr << "No mocap calibration found" << std::endl; + std::abort(); + } + } + + basalt::DatasetIoInterfacePtr dataset_io = + basalt::DatasetIoFactory::getDatasetIo(dataset_type, true); + + dataset_io->read(dataset_path); + vio_dataset = dataset_io->get_data(); + + std::vector gyro_timestamps; + Eigen::aligned_vector gyro_data; + + std::vector mocap_rot_vel_timestamps; + Eigen::aligned_vector mocap_rot_vel_data; + + // Apply calibration to gyro + { + int saturation_count = 0; + for (size_t i = 0; i < vio_dataset->get_gyro_data().size(); i++) { + if (vio_dataset->get_gyro_data()[i].data.array().abs().maxCoeff() > + 499.0 * M_PI / 180) { + ++saturation_count; + continue; + } + gyro_timestamps.push_back(vio_dataset->get_gyro_data()[i].timestamp_ns); + + Eigen::Vector3d measurement = vio_dataset->get_gyro_data()[i].data; + if (use_calib) { + gyro_data.push_back(calib.calib_gyro_bias.getCalibrated(measurement)); + } else { + gyro_data.push_back(measurement); + } + } + std::cout << "saturated gyro measurement count: " << saturation_count + << std::endl; + } + + // compute rotational velocity from mocap data + { + Sophus::SE3d T_mark_i; + if (use_calib) T_mark_i = mocap_calib.T_i_mark.inverse(); + + int saturation_count = 0; + for (size_t i = 1; i < vio_dataset->get_gt_timestamps().size() - 1; i++) { + Sophus::SE3d p0, p1; + + // compute central differences, to have no timestamp bias + p0 = vio_dataset->get_gt_pose_data()[i - 1] * T_mark_i; + p1 = vio_dataset->get_gt_pose_data()[i + 1] * T_mark_i; + + double dt = (vio_dataset->get_gt_timestamps()[i + 1] - + vio_dataset->get_gt_timestamps()[i - 1]) * + 1e-9; + + // only compute difference, if measurements are really 2 consecutive + // measurements apart (assuming 120 Hz data) + if (dt > 2.5 / 120) continue; + + Eigen::Vector3d rot_vel = (p0.so3().inverse() * p1.so3()).log() / dt; + + // Filter outliers + if (rot_vel.array().abs().maxCoeff() > 500 * M_PI / 180) { + ++saturation_count; + continue; + } + + mocap_rot_vel_timestamps.push_back(vio_dataset->get_gt_timestamps()[i]); + mocap_rot_vel_data.push_back(rot_vel); + } + std::cout << "outlier mocap rotation velocity count: " << saturation_count + << std::endl; + } + + std::cout << "gyro_data.size() " << gyro_data.size() << std::endl; + std::cout << "mocap_rot_vel_data.size() " << mocap_rot_vel_data.size() + << std::endl; + + std::vector offsets_vec; + std::vector errors_vec; + + int best_offset_ns = 0; + double best_error = std::numeric_limits::max(); + int best_error_idx = -1; + + int64_t max_offset_ns = max_offset_s * 1e9; + int64_t offset_inc_ns = 100000; + + for (int64_t offset_ns = -max_offset_ns; offset_ns <= max_offset_ns; + offset_ns += offset_inc_ns) { + double error = compute_error(offset_ns, gyro_timestamps, gyro_data, + mocap_rot_vel_timestamps, mocap_rot_vel_data); + + offsets_vec.push_back(offset_ns * 1e-6); + errors_vec.push_back(error); + + if (error < best_error) { + best_error = error; + best_offset_ns = offset_ns; + best_error_idx = errors_vec.size() - 1; + } + } + + std::cout << "Best error: " << best_error << std::endl; + std::cout << "Best error idx : " << best_error_idx << std::endl; + std::cout << "Best offset: " << best_offset_ns << std::endl; + + pangolin::DataLog error_log; + + int best_offset_refined_ns = best_offset_ns; + + // Subpixel accuracy + Eigen::Vector3d coeff(0, 0, 0); + { + const static int SAMPLE_INTERVAL = 10; + + if (best_error_idx - SAMPLE_INTERVAL >= 0 && + best_error_idx + SAMPLE_INTERVAL < int(errors_vec.size())) { + Eigen::MatrixXd pol(2 * SAMPLE_INTERVAL + 1, 3); + Eigen::VectorXd err(2 * SAMPLE_INTERVAL + 1); + + for (int i = 0; i < 2 * SAMPLE_INTERVAL + 1; i++) { + int idx = i - SAMPLE_INTERVAL; + pol(i, 0) = idx * idx; + pol(i, 1) = idx; + pol(i, 2) = 1; + + err(i) = errors_vec[best_error_idx + idx]; + } + + coeff = + pol.jacobiSvd(Eigen::ComputeThinU | Eigen::ComputeThinV).solve(err); + + double a = coeff[0]; + double b = coeff[1]; + + if (a > 1e-9) { + best_offset_refined_ns -= offset_inc_ns * b / (2 * a); + } + } + + for (size_t i = 0; i < errors_vec.size(); i++) { + const double idx = + static_cast(static_cast(i) - best_error_idx); + + const Eigen::Vector3d pol(idx * idx, idx, 1); + + error_log.Log(offsets_vec[i], errors_vec[i], pol.transpose() * coeff); + } + } + + std::cout << "Best error refined: " + << compute_error(best_offset_refined_ns, gyro_timestamps, gyro_data, + mocap_rot_vel_timestamps, mocap_rot_vel_data) + << std::endl; + std::cout << "Best offset refined: " << best_offset_refined_ns << std::endl; + + std::cout << "Total mocap offset: " + << vio_dataset->get_mocap_to_imu_offset_ns() + + best_offset_refined_ns + << std::endl; + + if (output_path != "") { + std::ofstream os(output_path); + cereal::JSONOutputArchive archive(os); + archive(cereal::make_nvp("mocap_to_imu_initial_offset_ns", + vio_dataset->get_mocap_to_imu_offset_ns())); + archive(cereal::make_nvp("mocap_to_imu_additional_offset_refined_ns", + best_offset_refined_ns)); + archive(cereal::make_nvp( + "mocap_to_imu_total_offset_ns", + vio_dataset->get_mocap_to_imu_offset_ns() + best_offset_refined_ns)); + } + + if (output_error_path != "") { + std::cout << "Writing error time series to '" << output_error_path << "'" + << std::endl; + + std::ofstream os(output_error_path); + os << "#TIME_MS,ERROR,ERROR_FITTED" << std::endl; + os << "# best_offset_ms: " << best_offset_ns * 1e-6 + << ", best_offset_refined_ms: " << best_offset_refined_ns * 1e-6 + << std::endl; + + for (size_t i = 0; i < errors_vec.size(); ++i) { + const double idx = + static_cast(static_cast(i) - best_error_idx); + const Eigen::Vector3d pol(idx * idx, idx, 1); + const double fitted = pol.transpose() * coeff; + os << offsets_vec[i] << "," << errors_vec[i] << "," << fitted + << std::endl; + } + } + + const int64_t min_time = vio_dataset->get_gyro_data().front().timestamp_ns; + const int64_t max_time = vio_dataset->get_gyro_data().back().timestamp_ns; + + if (output_gyro_path != "") { + std::cout << "Writing gyro values to '" << output_gyro_path << "'" + << std::endl; + + std::ofstream os(output_gyro_path); + os << "#TIME_M, GX, GY, GZ" << std::endl; + + for (size_t i = 0; i < gyro_timestamps.size(); ++i) { + os << (gyro_timestamps[i] - min_time) * 1e-9 << " " + << gyro_data[i].transpose() << std::endl; + } + } + + if (output_mocap_path != "") { + std::cout << "Writing mocap rotational velocity values to '" + << output_mocap_path << "'" << std::endl; + + std::ofstream os(output_mocap_path); + os << "#TIME_M, GX, GY, GZ" << std::endl; + + for (size_t i = 0; i < gyro_timestamps.size(); ++i) { + os << (mocap_rot_vel_timestamps[i] + best_offset_ns - min_time) * 1e-9 + << " " << mocap_rot_vel_data[i].transpose() << std::endl; + } + } + + if (show_gui) { + static constexpr int UI_WIDTH = 280; + + pangolin::CreateWindowAndBind("Main", 1280, 800); + + pangolin::Plotter *plotter; + + pangolin::DataLog data_log, mocap_log; + + pangolin::View &plot_display = pangolin::CreateDisplay().SetBounds( + 0.0, 1.0, pangolin::Attach::Pix(UI_WIDTH), 1.0); + + pangolin::CreatePanel("ui").SetBounds(0.0, 1.0, 0.0, + pangolin::Attach::Pix(UI_WIDTH)); + + plotter = new pangolin::Plotter(&data_log, 0, (max_time - min_time) * 1e-9, + -10.0, 10.0, 0.01, 0.01); + + plot_display.AddDisplay(*plotter); + pangolin::Var show_gyro("ui.show_gyro", true, false, true); + pangolin::Var show_mocap_rot_vel("ui.show_mocap_rot_vel", true, false, + true); + + pangolin::Var show_error("ui.show_error", false, false, true); + + std::string save_button_name = "ui.save_aligned_dataset"; + // Disable save_aligned_dataset button if GT data already exists + if (basalt::fs::exists( + basalt::fs::path(dataset_path + "mav0/gt/data.csv"))) { + save_button_name += "(disabled)"; + } + + pangolin::Var> save_aligned_dataset( + save_button_name, [&]() { + if (basalt::fs::exists( + basalt::fs::path(dataset_path + "mav0/gt/data.csv"))) { + std::cout << "Aligned ground-truth data already exists, skipping. " + "If you want to run the calibration again delete " + << dataset_path << "mav0/gt/ folder." << std::endl; + return; + } + std::cout << "Saving aligned dataset in " + << dataset_path + "mav0/gt/data.csv" << std::endl; + // output corrected mocap data + Sophus::SE3d T_mark_i; + if (use_calib) T_mark_i = mocap_calib.T_i_mark.inverse(); + basalt::fs::create_directory(dataset_path + "mav0/gt/"); + std::ofstream gt_out_stream; + gt_out_stream.open(dataset_path + "mav0/gt/data.csv"); + gt_out_stream + << "#timestamp [ns], p_RS_R_x [m], p_RS_R_y [m], p_RS_R_z [m], " + "q_RS_w [], q_RS_x [], q_RS_y [], q_RS_z []\n"; + + for (size_t i = 0; i < vio_dataset->get_gt_timestamps().size(); i++) { + gt_out_stream << vio_dataset->get_gt_timestamps()[i] + + best_offset_refined_ns + << ","; + Sophus::SE3d pose_corrected = + vio_dataset->get_gt_pose_data()[i] * T_mark_i; + gt_out_stream << pose_corrected.translation().x() << "," + << pose_corrected.translation().y() << "," + << pose_corrected.translation().z() << "," + << pose_corrected.unit_quaternion().w() << "," + << pose_corrected.unit_quaternion().x() << "," + << pose_corrected.unit_quaternion().y() << "," + << pose_corrected.unit_quaternion().z() << std::endl; + } + gt_out_stream.close(); + }); + + auto recompute_logs = [&]() { + data_log.Clear(); + mocap_log.Clear(); + + for (size_t i = 0; i < gyro_timestamps.size(); i++) { + data_log.Log((gyro_timestamps[i] - min_time) * 1e-9, gyro_data[i][0], + gyro_data[i][1], gyro_data[i][2]); + } + + for (size_t i = 0; i < mocap_rot_vel_timestamps.size(); i++) { + mocap_log.Log( + (mocap_rot_vel_timestamps[i] + best_offset_ns - min_time) * 1e-9, + mocap_rot_vel_data[i][0], mocap_rot_vel_data[i][1], + mocap_rot_vel_data[i][2]); + } + }; + + auto drawPlots = [&]() { + plotter->ClearSeries(); + plotter->ClearMarkers(); + + if (show_gyro) { + plotter->AddSeries("$0", "$1", pangolin::DrawingModeLine, + pangolin::Colour::Red(), "g x"); + plotter->AddSeries("$0", "$2", pangolin::DrawingModeLine, + pangolin::Colour::Green(), "g y"); + plotter->AddSeries("$0", "$3", pangolin::DrawingModeLine, + pangolin::Colour::Blue(), "g z"); + } + + if (show_mocap_rot_vel) { + plotter->AddSeries("$0", "$1", pangolin::DrawingModeLine, + pangolin::Colour(1, 1, 0), "pv x", &mocap_log); + plotter->AddSeries("$0", "$2", pangolin::DrawingModeLine, + pangolin::Colour(1, 0, 1), "pv y", &mocap_log); + plotter->AddSeries("$0", "$3", pangolin::DrawingModeLine, + pangolin::Colour(0, 1, 1), "pv z", &mocap_log); + } + + if (show_error) { + plotter->AddSeries("$0", "$1", pangolin::DrawingModeLine, + pangolin::Colour(1, 1, 1), "error", &error_log); + plotter->AddSeries("$0", "$2", pangolin::DrawingModeLine, + pangolin::Colour(0.3, 1, 0.8), "fitted error", + &error_log); + plotter->AddMarker(pangolin::Marker::Vertical, + best_offset_refined_ns * 1e-6, + pangolin::Marker::Equal, pangolin::Colour(1, 0, 0)); + } + }; + + recompute_logs(); + drawPlots(); + + while (!pangolin::ShouldQuit()) { + glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); + + if (show_gyro.GuiChanged() || show_mocap_rot_vel.GuiChanged() || + show_error.GuiChanged()) { + drawPlots(); + } + + pangolin::FinishFrame(); + } + } + + return 0; +} diff --git a/src/utils/keypoints.cpp b/src/utils/keypoints.cpp new file mode 100644 index 0000000..2a2d474 --- /dev/null +++ b/src/utils/keypoints.cpp @@ -0,0 +1,428 @@ +/** +BSD 3-Clause License + +This file is part of the Basalt project. +https://gitlab.com/VladyslavUsenko/basalt.git + +Copyright (c) 2019, Vladyslav Usenko and Nikolaus Demmel. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#include + +#include + +#include +#include + +#include +#include +#include + +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wunused-parameter" +#include +#pragma GCC diagnostic pop + +namespace basalt { + +// const int PATCH_SIZE = 31; +const int HALF_PATCH_SIZE = 15; +const int EDGE_THRESHOLD = 19; + +const static char pattern_31_x_a[256] = { + 8, 4, -11, 7, 2, 1, -2, -13, -13, 10, -13, -11, 7, -4, -13, + -9, 12, -3, -6, 11, 4, 5, 3, -8, -2, -13, -7, -4, -10, 5, + 5, 1, 9, 4, 2, -4, -8, 4, 0, -13, -3, -6, 8, 0, 7, + -13, 10, -6, 10, -13, -13, 3, 5, -1, 3, 2, -13, -13, -13, -7, + 6, -9, -2, -12, 3, -7, -3, 2, -11, -1, 5, -4, -9, -12, 10, + 7, -7, -4, 7, -7, -13, -3, 7, -13, 1, 2, -4, -1, 7, 1, + 9, -1, -13, 7, 12, 6, 5, 2, 3, 2, 9, -8, -11, 1, 6, + 2, 6, 3, 7, -11, -10, -5, -10, 8, 4, -10, 4, -2, -5, 7, + -9, -5, 8, -9, 1, 7, -2, 11, -12, 3, 5, 0, -9, 0, -1, + 5, 3, -13, -5, -4, 6, -7, -13, 1, 4, -2, 2, -2, 4, -6, + -3, 7, 4, -13, 7, 7, -7, -8, -13, 2, 10, -6, 8, 2, -11, + -12, -11, 5, -2, -1, -13, -10, -3, 2, -9, -4, -4, -6, 6, -13, + 11, 7, -1, -4, -7, -13, -7, -8, -5, -13, 1, 1, 9, 5, -1, + -9, -1, -13, 8, 2, 7, -10, -10, 4, 3, -4, 5, 4, -9, 0, + -12, 3, -10, 8, -8, 2, 10, 6, -7, -3, -1, -3, -8, 4, 2, + 6, 3, 11, -3, 4, 2, -10, -13, -13, 6, 0, -13, -9, -13, 5, + 2, -1, 9, 11, 3, -1, 3, -13, 5, 8, 7, -10, 7, 9, 7, + -1}; + +const static char pattern_31_y_a[256] = { + -3, 2, 9, -12, -13, -7, -10, -13, -3, 4, -8, 7, 7, -5, 2, + 0, -6, 6, -13, -13, 7, -3, -7, -7, 11, 12, 3, 2, -12, -12, + -6, 0, 11, 7, -1, -12, -5, 11, -8, -2, -2, 9, 12, 9, -5, + -6, 7, -3, -9, 8, 0, 3, 7, 7, -10, -4, 0, -7, 3, 12, + -10, -1, -5, 5, -10, -7, -2, 9, -13, 6, -3, -13, -6, -10, 2, + 12, -13, 9, -1, 6, 11, 7, -8, -7, -3, -6, 3, -13, 1, -1, + 1, -9, -13, 7, -5, 3, -13, -12, 8, 6, -12, 4, 12, 12, -9, + 3, 3, -3, 8, -5, 11, -8, 5, -1, -6, 12, -2, 0, -8, -6, + -13, -13, -8, -11, -8, -4, 1, -6, -9, 7, 5, -4, 12, 7, 2, + 11, 5, -4, 9, -7, 5, 6, 6, -10, 1, -2, -12, -13, 1, -10, + -13, 5, -2, 9, 1, -8, -4, 11, 6, 4, -5, -5, -3, -12, -2, + -13, 0, -3, -13, -8, -11, -2, 9, -3, -13, 6, 12, -11, -3, 11, + 11, -5, 12, -8, 1, -12, -2, 5, -1, 7, 5, 0, 12, -8, 11, + -3, -10, 1, -11, -13, -13, -10, -8, -6, 12, 2, -13, -13, 9, 3, + 1, 2, -10, -13, -12, 2, 6, 8, 10, -9, -13, -7, -2, 2, -5, + -9, -1, -1, 0, -11, -4, -6, 7, 12, 0, -1, 3, 8, -6, -9, + 7, -6, 5, -3, 0, 4, -6, 0, 8, 9, -4, 4, 3, -7, 0, + -6}; + +const static char pattern_31_x_b[256] = { + 9, 7, -8, 12, 2, 1, -2, -11, -12, 11, -8, -9, 12, -3, -12, -7, + 12, -2, -4, 12, 5, 10, 6, -6, -1, -8, -5, -3, -6, 6, 7, 4, + 11, 4, 4, -2, -7, 9, 1, -8, -2, -4, 10, 1, 11, -11, 12, -6, + 12, -8, -8, 7, 10, 1, 5, 3, -13, -12, -11, -4, 12, -7, 0, -7, + 8, -4, -1, 5, -5, 0, 5, -4, -9, -8, 12, 12, -6, -3, 12, -5, + -12, -2, 12, -11, 12, 3, -2, 1, 8, 3, 12, -1, -10, 10, 12, 7, + 6, 2, 4, 12, 10, -7, -4, 2, 7, 3, 11, 8, 9, -6, -5, -3, + -9, 12, 6, -8, 6, -2, -5, 10, -8, -5, 9, -9, 1, 9, -1, 12, + -6, 7, 10, 2, -5, 2, 1, 7, 6, -8, -3, -3, 8, -6, -5, 3, + 8, 2, 12, 0, 9, -3, -1, 12, 5, -9, 8, 7, -7, -7, -12, 3, + 12, -6, 9, 2, -10, -7, -10, 11, -1, 0, -12, -10, -2, 3, -4, -3, + -2, -4, 6, -5, 12, 12, 0, -3, -6, -8, -6, -6, -4, -8, 5, 10, + 10, 10, 1, -6, 1, -8, 10, 3, 12, -5, -8, 8, 8, -3, 10, 5, + -4, 3, -6, 4, -10, 12, -6, 3, 11, 8, -6, -3, -1, -3, -8, 12, + 3, 11, 7, 12, -3, 4, 2, -8, -11, -11, 11, 1, -9, -6, -8, 8, + 3, -1, 11, 12, 3, 0, 4, -10, 12, 9, 8, -10, 12, 10, 12, 0}; + +const static char pattern_31_y_b[256] = { + 5, -12, 2, -13, 12, 6, -4, -8, -9, 9, -9, 12, 6, 0, -3, + 5, -1, 12, -8, -8, 1, -3, 12, -2, -10, 10, -3, 7, 11, -7, + -1, -5, -13, 12, 4, 7, -10, 12, -13, 2, 3, -9, 7, 3, -10, + 0, 1, 12, -4, -12, -4, 8, -7, -12, 6, -10, 5, 12, 8, 7, + 8, -6, 12, 5, -13, 5, -7, -11, -13, -1, 2, 12, 6, -4, -3, + 12, 5, 4, 2, 1, 5, -6, -7, -12, 12, 0, -13, 9, -6, 12, + 6, 3, 5, 12, 9, 11, 10, 3, -6, -13, 3, 9, -6, -8, -4, + -2, 0, -8, 3, -4, 10, 12, 0, -6, -11, 7, 7, 12, 2, 12, + -8, -2, -13, 0, -2, 1, -4, -11, 4, 12, 8, 8, -13, 12, 7, + -9, -8, 9, -3, -12, 0, 12, -2, 10, -4, -13, 12, -6, 3, -5, + 1, -11, -7, -5, 6, 6, 1, -8, -8, 9, 3, 7, -8, 8, 3, + -9, -5, 8, 12, 9, -5, 11, -13, 2, 0, -10, -7, 9, 11, 5, + 6, -2, 7, -2, 7, -13, -8, -9, 5, 10, -13, -13, -1, -9, -13, + 2, 12, -10, -6, -6, -9, -7, -13, 5, -13, -3, -12, -1, 3, -9, + 1, -8, 9, 12, -5, 7, -8, -12, 5, 9, 5, 4, 3, 12, 11, + -13, 12, 4, 6, 12, 1, 1, 1, -13, -13, 4, -2, -3, -2, 10, + -9, -1, -2, -8, 5, 10, 5, 5, 11, -6, -12, 9, 4, -2, -2, + -11}; + +void detectKeypointsMapping(const basalt::Image& img_raw, + KeypointsData& kd, int num_features) { + cv::Mat image(img_raw.h, img_raw.w, CV_8U); + + uint8_t* dst = image.ptr(); + const uint16_t* src = img_raw.ptr; + + for (size_t i = 0; i < img_raw.size(); i++) { + dst[i] = (src[i] >> 8); + } + + std::vector points; + goodFeaturesToTrack(image, points, num_features, 0.01, 8); + + kd.corners.clear(); + kd.corner_angles.clear(); + kd.corner_descriptors.clear(); + + for (size_t i = 0; i < points.size(); i++) { + if (img_raw.InBounds(points[i].x, points[i].y, EDGE_THRESHOLD)) { + kd.corners.emplace_back(points[i].x, points[i].y); + } + } +} + +void detectKeypoints( + const basalt::Image& img_raw, KeypointsData& kd, + int PATCH_SIZE, int num_points_cell, + const Eigen::aligned_vector& current_points) { + kd.corners.clear(); + kd.corner_angles.clear(); + kd.corner_descriptors.clear(); + + const size_t x_start = (img_raw.w % PATCH_SIZE) / 2; + const size_t x_stop = x_start + PATCH_SIZE * (img_raw.w / PATCH_SIZE - 1); + + const size_t y_start = (img_raw.h % PATCH_SIZE) / 2; + const size_t y_stop = y_start + PATCH_SIZE * (img_raw.h / PATCH_SIZE - 1); + + // std::cerr << "x_start " << x_start << " x_stop " << x_stop << std::endl; + // std::cerr << "y_start " << y_start << " y_stop " << y_stop << std::endl; + + Eigen::MatrixXi cells; + cells.setZero(img_raw.h / PATCH_SIZE + 1, img_raw.w / PATCH_SIZE + 1); + + for (const Eigen::Vector2d& p : current_points) { + if (p[0] >= x_start && p[1] >= y_start && p[0] < x_stop + PATCH_SIZE && + p[1] < y_stop + PATCH_SIZE) { + int x = (p[0] - x_start) / PATCH_SIZE; + int y = (p[1] - y_start) / PATCH_SIZE; + + cells(y, x) += 1; + } + } + + for (size_t x = x_start; x <= x_stop; x += PATCH_SIZE) { + for (size_t y = y_start; y <= y_stop; y += PATCH_SIZE) { + if (cells((y - y_start) / PATCH_SIZE, (x - x_start) / PATCH_SIZE) > 0) + continue; + + const basalt::Image sub_img_raw = + img_raw.SubImage(x, y, PATCH_SIZE, PATCH_SIZE); + + cv::Mat subImg(PATCH_SIZE, PATCH_SIZE, CV_8U); + + for (int y = 0; y < PATCH_SIZE; y++) { + uchar* sub_ptr = subImg.ptr(y); + for (int x = 0; x < PATCH_SIZE; x++) { + sub_ptr[x] = (sub_img_raw(x, y) >> 8); + } + } + + int points_added = 0; + int threshold = 40; + + while (points_added < num_points_cell && threshold >= 5) { + std::vector points; + cv::FAST(subImg, points, threshold); + + std::sort(points.begin(), points.end(), + [](const cv::KeyPoint& a, const cv::KeyPoint& b) -> bool { + return a.response > b.response; + }); + + // std::cout << "Detected " << points.size() << " points. + // Threshold " + // << threshold << std::endl; + + for (size_t i = 0; i < points.size() && points_added < num_points_cell; + i++) + if (img_raw.InBounds(x + points[i].pt.x, y + points[i].pt.y, + EDGE_THRESHOLD)) { + kd.corners.emplace_back(x + points[i].pt.x, y + points[i].pt.y); + points_added++; + } + + threshold /= 2; + } + } + } + + // std::cout << "Total points: " << kd.corners.size() << std::endl; + + // cv::TermCriteria criteria = + // cv::TermCriteria(CV_TERMCRIT_EPS + CV_TERMCRIT_ITER, 40, 0.001); + // cv::Size winSize = cv::Size(5, 5); + // cv::Size zeroZone = cv::Size(-1, -1); + // cv::cornerSubPix(image, points, winSize, zeroZone, criteria); + + // for (size_t i = 0; i < points.size(); i++) { + // if (img_raw.InBounds(points[i].pt.x, points[i].pt.y, EDGE_THRESHOLD)) { + // kd.corners.emplace_back(points[i].pt.x, points[i].pt.y); + // } + // } +} + +void computeAngles(const basalt::Image& img_raw, + KeypointsData& kd, bool rotate_features) { + kd.corner_angles.resize(kd.corners.size()); + + for (size_t i = 0; i < kd.corners.size(); i++) { + const Eigen::Vector2d& p = kd.corners[i]; + + const int cx = p[0]; + const int cy = p[1]; + + double angle = 0; + + if (rotate_features) { + double m01 = 0, m10 = 0; + for (int x = -HALF_PATCH_SIZE; x <= HALF_PATCH_SIZE; x++) { + for (int y = -HALF_PATCH_SIZE; y <= HALF_PATCH_SIZE; y++) { + if (x * x + y * y <= HALF_PATCH_SIZE * HALF_PATCH_SIZE) { + double val = img_raw(cx + x, cy + y); + m01 += y * val; + m10 += x * val; + } + } + } + + angle = atan2(m01, m10); + } + + kd.corner_angles[i] = angle; + } +} + +void computeDescriptors(const basalt::Image& img_raw, + KeypointsData& kd) { + kd.corner_descriptors.resize(kd.corners.size()); + + for (size_t i = 0; i < kd.corners.size(); i++) { + std::bitset<256> descriptor; + + const Eigen::Vector2d& p = kd.corners[i]; + double angle = kd.corner_angles[i]; + + int cx = p[0]; + int cy = p[1]; + + Eigen::Rotation2Dd rot(angle); + Eigen::Matrix2d mat_rot = rot.matrix(); + + for (int i = 0; i < 256; i++) { + Eigen::Vector2d va(pattern_31_x_a[i], pattern_31_y_a[i]), + vb(pattern_31_x_b[i], pattern_31_y_b[i]); + + Eigen::Vector2i vva = (mat_rot * va).array().round().cast(); + Eigen::Vector2i vvb = (mat_rot * vb).array().round().cast(); + + descriptor[i] = + img_raw(cx + vva[0], cy + vva[1]) < img_raw(cx + vvb[0], cy + vvb[1]); + } + + kd.corner_descriptors[i] = descriptor; + } +} + +void matchFastHelper(const std::vector>& corner_descriptors_1, + const std::vector>& corner_descriptors_2, + std::unordered_map& matches, int threshold, + double test_dist) { + matches.clear(); + + for (size_t i = 0; i < corner_descriptors_1.size(); i++) { + int best_idx = -1, best_dist = 500; + int best2_dist = 500; + + for (size_t j = 0; j < corner_descriptors_2.size(); j++) { + int dist = (corner_descriptors_1[i] ^ corner_descriptors_2[j]).count(); + + if (dist <= best_dist) { + best2_dist = best_dist; + + best_dist = dist; + best_idx = j; + } else if (dist < best2_dist) { + best2_dist = dist; + } + } + + if (best_dist < threshold && best_dist * test_dist <= best2_dist) { + matches.emplace(i, best_idx); + } + } +} + +void matchDescriptors(const std::vector>& corner_descriptors_1, + const std::vector>& corner_descriptors_2, + std::vector>& matches, int threshold, + double dist_2_best) { + matches.clear(); + + std::unordered_map matches_1_2, matches_2_1; + matchFastHelper(corner_descriptors_1, corner_descriptors_2, matches_1_2, + threshold, dist_2_best); + matchFastHelper(corner_descriptors_2, corner_descriptors_1, matches_2_1, + threshold, dist_2_best); + + for (const auto& kv : matches_1_2) { + if (matches_2_1[kv.second] == kv.first) { + matches.emplace_back(kv.first, kv.second); + } + } +} + +void findInliersRansac(const KeypointsData& kd1, const KeypointsData& kd2, + const double ransac_thresh, const int ransac_min_inliers, + MatchData& md) { + md.inliers.clear(); + + opengv::bearingVectors_t bearingVectors1, bearingVectors2; + + for (size_t i = 0; i < md.matches.size(); i++) { + bearingVectors1.push_back(kd1.corners_3d[md.matches[i].first].head<3>()); + bearingVectors2.push_back(kd2.corners_3d[md.matches[i].second].head<3>()); + } + + // create the central relative adapter + opengv::relative_pose::CentralRelativeAdapter adapter(bearingVectors1, + bearingVectors2); + // create a RANSAC object + opengv::sac::Ransac< + opengv::sac_problems::relative_pose::CentralRelativePoseSacProblem> + ransac; + // create a CentralRelativePoseSacProblem + // (set algorithm to STEWENIUS, NISTER, SEVENPT, or EIGHTPT) + std::shared_ptr< + opengv::sac_problems::relative_pose::CentralRelativePoseSacProblem> + relposeproblem_ptr( + new opengv::sac_problems::relative_pose:: + CentralRelativePoseSacProblem( + adapter, opengv::sac_problems::relative_pose:: + CentralRelativePoseSacProblem::STEWENIUS)); + // run ransac + ransac.sac_model_ = relposeproblem_ptr; + ransac.threshold_ = ransac_thresh; + ransac.max_iterations_ = 100; + ransac.computeModel(); + + // do non-linear refinement and add more inliers + const size_t num_inliers_ransac = ransac.inliers_.size(); + + adapter.sett12(ransac.model_coefficients_.topRightCorner<3, 1>()); + adapter.setR12(ransac.model_coefficients_.topLeftCorner<3, 3>()); + + const opengv::transformation_t nonlinear_transformation = + opengv::relative_pose::optimize_nonlinear(adapter, ransac.inliers_); + + ransac.sac_model_->selectWithinDistance(nonlinear_transformation, + ransac.threshold_, ransac.inliers_); + + // Sanity check if the number of inliers decreased, but only warn if it is + // by 3 or more, since some small fluctuation is expected. + if (ransac.inliers_.size() + 2 < num_inliers_ransac) { + std::cout << "Warning: non-linear refinement reduced the relative pose " + "ransac inlier count from " + << num_inliers_ransac << " to " << ransac.inliers_.size() << "." + << std::endl; + } + + // get the result (normalize translation) + md.T_i_j = Sophus::SE3d( + nonlinear_transformation.topLeftCorner<3, 3>(), + nonlinear_transformation.topRightCorner<3, 1>().normalized()); + + if ((long)ransac.inliers_.size() >= ransac_min_inliers) { + for (size_t i = 0; i < ransac.inliers_.size(); i++) + md.inliers.emplace_back(md.matches[ransac.inliers_[i]]); + } +} + +} // namespace basalt diff --git a/src/utils/system_utils.cpp b/src/utils/system_utils.cpp new file mode 100644 index 0000000..f686ff4 --- /dev/null +++ b/src/utils/system_utils.cpp @@ -0,0 +1,96 @@ +/** +BSD 3-Clause License + +This file is part of the Basalt project. +https://gitlab.com/VladyslavUsenko/basalt.git + +Copyright (c) 2021, Vladyslav Usenko and Nikolaus Demmel. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#include + +#include + +#if __APPLE__ +#include +#include +#elif __linux__ +#include + +#include +#endif + +namespace basalt { + +bool get_memory_info(MemoryInfo& info) { +#if __APPLE__ + mach_task_basic_info_data_t t_info; + mach_msg_type_number_t t_info_count = MACH_TASK_BASIC_INFO_COUNT; + + if (KERN_SUCCESS != task_info(mach_task_self(), MACH_TASK_BASIC_INFO, + (task_info_t)&t_info, &t_info_count)) { + return false; + } + info.resident_memory = t_info.resident_size; + info.resident_memory_peak = t_info.resident_size_max; + + /* + struct rusage resource_usage; + getrusage(RUSAGE_SELF, &resource_usage); + info.resident_memory_peak = resource_usage.ru_maxrss; + */ + + return true; +#elif __linux__ + + // get current memory first + std::size_t program_size = 0; + std::size_t resident_size = 0; + + std::ifstream fs("/proc/self/statm"); + if (fs.fail()) { + return false; + } + fs >> program_size; + fs >> resident_size; + + info.resident_memory = resident_size * sysconf(_SC_PAGESIZE); + + // get peak memory after that + struct rusage resource_usage; + getrusage(RUSAGE_SELF, &resource_usage); + info.resident_memory_peak = resource_usage.ru_maxrss * 1024; + + return true; +#else + return false; +#endif +} + +} // namespace basalt diff --git a/src/utils/time_utils.cpp b/src/utils/time_utils.cpp new file mode 100644 index 0000000..cb98ae6 --- /dev/null +++ b/src/utils/time_utils.cpp @@ -0,0 +1,212 @@ +#include +#include + +#include +#include + +#include +#include +#include + +namespace basalt { + +using namespace fmt::literals; + +// compute the median of an eigen vector +// Note: Changes the order of elements in the vector! +// Note: For even sized vectors we don't return the mean of the middle two, but +// simply the second one as is. +template +Scalar median_non_const(Eigen::Matrix& vec) { + static_assert(Rows != 0); + if constexpr (Rows < 0) { + BASALT_ASSERT(vec.size() >= 1); + } + int n = vec.size() / 2; + std::nth_element(vec.begin(), vec.begin() + n, vec.end()); + return vec(n); +} + +template +Scalar variance(const Eigen::Matrix& vec) { + static_assert(N != 0); + const Eigen::Matrix centered = vec.array() - vec.mean(); + return centered.squaredNorm() / Scalar(vec.size()); +} + +ExecutionStats::Meta& ExecutionStats::add(const std::string& name, + double value) { + auto [it, new_item] = stats_.try_emplace(name); + if (new_item) { + order_.push_back(name); + it->second.data_ = std::vector(); + } + std::get>(it->second.data_).push_back(value); + return it->second; +} + +ExecutionStats::Meta& ExecutionStats::add(const std::string& name, + const Eigen::VectorXd& value) { + auto [it, new_item] = stats_.try_emplace(name); + if (new_item) { + order_.push_back(name); + it->second.data_ = std::vector(); + } + std::get>(it->second.data_).push_back(value); + return it->second; +} + +ExecutionStats::Meta& ExecutionStats::add(const std::string& name, + const Eigen::VectorXf& value) { + Eigen::VectorXd x = value.cast(); + return add(name, x); +} + +void ExecutionStats::merge_all(const ExecutionStats& other) { + for (const auto& name : other.order_) { + const auto& meta = other.stats_.at(name); + std::visit( + [&](auto& data) { + for (auto v : data) { + add(name, v); + } + }, + meta.data_); + stats_.at(name).set_meta(meta); + } +} + +namespace { // helper +// //////////////////////////////////////////////////////////////////////////// +// overloads for generic lambdas +// See also: https://stackoverflow.com/q/55087826/1813258 +template +struct overload : Ts... { + using Ts::operator()...; +}; +template +overload(Ts...) -> overload; +} // namespace + +void ExecutionStats::merge_sums(const ExecutionStats& other) { + for (const auto& name : other.order_) { + const auto& meta = other.stats_.at(name); + std::visit(overload{[&](const std::vector& data) { + Eigen::Map map(data.data(), + data.size()); + add(name, map.sum()); + }, + [&](const std::vector& data) { + UNUSED(data); + // TODO: for now no-op + }}, + meta.data_); + stats_.at(name).set_meta(meta); + } +} + +void ExecutionStats::print() const { + for (const auto& name : order_) { + const auto& meta = stats_.at(name); + + std::visit( + overload{ + [&](const std::vector& data) { + Eigen::Map map(data.data(), data.size()); + + // create a copy for median computation + Eigen::VectorXd vec = map; + + if (meta.format_ == "ms") { + // convert seconds to milliseconds + vec *= 1000; + } + + int count = vec.size(); + // double sum = vec.sum(); + double mean = vec.mean(); + double stddev = std::sqrt(variance(vec)); + double max = vec.maxCoeff(); + double min = vec.minCoeff(); + + // double median = median_non_const(vec); + + if (meta.format_ == "count") { + std::cout << "{:20} ({:>4}):{: 8.1f}+-{:.1f} [{}, {}]\n"_format( + name, count, mean, stddev, min, max); + } else if (meta.format_ != "none") { + std::cout + << "{:20} ({:>4}):{: 8.2f}+-{:.2f} [{:.2f}, {:.2f}]\n"_format( + name, count, mean, stddev, min, max); + } + }, + [&](const std::vector& data) { + int count = data.size(); + std::cout << "{:20} ({:>4})\n"_format(name, count); + }}, + meta.data_); + } +} + +bool ExecutionStats::save_json(const std::string& path) const { + using json = nlohmann::json; + json result; + + for (const auto& name : order_) { + const auto& meta = stats_.at(name); + + std::visit( + overload{[&](const std::vector& data) { result[name] = data; }, + [&](const std::vector& data) { + std::vector indices; + std::vector values; + for (const auto& v : data) { + indices.push_back(int(values.size())); + values.insert(values.end(), v.begin(), v.end()); + } + std::string name_values = std::string(name) + "__values"; + std::string name_indices = std::string(name) + "__index"; + result[name_indices] = indices; + result[name_values] = values; + }}, + meta.data_); + } + + constexpr bool save_as_json = false; + constexpr bool save_as_ubjson = true; + + // save json + if (save_as_json) { + std::ofstream ofs(path); + + if (!ofs.is_open()) { + std::cerr << "Could not save ExecutionStats to {}.\n"_format(path); + return false; + } + + ofs << std::setw(4) << result; //!< pretty printing + // ofs << result; //!< no pretty printing + + std::cout << "Saved ExecutionStats to {}.\n"_format(path); + } + + // save ubjson + if (save_as_ubjson) { + std::string ubjson_path = + path.substr(0, path.find_last_of('.')) + ".ubjson"; + std::ofstream ofs(ubjson_path, std::ios_base::binary); + + if (!ofs.is_open()) { + std::cerr << "Could not save ExecutionStats to {}.\n"_format(ubjson_path); + return false; + } + + json::to_ubjson(result, ofs); + + std::cout << "Saved ExecutionStats to {}.\n"_format(ubjson_path); + } + + return true; +} + +} // namespace basalt diff --git a/src/utils/vio_config.cpp b/src/utils/vio_config.cpp new file mode 100644 index 0000000..0e554ed --- /dev/null +++ b/src/utils/vio_config.cpp @@ -0,0 +1,222 @@ +/** +BSD 3-Clause License + +This file is part of the Basalt project. +https://gitlab.com/VladyslavUsenko/basalt.git + +Copyright (c) 2019, Vladyslav Usenko and Nikolaus Demmel. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#include +#include + +#include + +#include +#include +#include + +namespace basalt { + +VioConfig::VioConfig() { + // optical_flow_type = "patch"; + optical_flow_type = "frame_to_frame"; + optical_flow_detection_grid_size = 50; + optical_flow_max_recovered_dist2 = 0.09f; + optical_flow_pattern = 51; + optical_flow_max_iterations = 5; + optical_flow_levels = 3; + optical_flow_epipolar_error = 0.005; + optical_flow_skip_frames = 1; + + vio_linearization_type = LinearizationType::ABS_QR; + vio_sqrt_marg = true; + + vio_max_states = 3; + vio_max_kfs = 7; + vio_min_frames_after_kf = 5; + vio_new_kf_keypoints_thresh = 0.7; + + vio_debug = false; + vio_extended_logging = false; + vio_obs_std_dev = 0.5; + vio_obs_huber_thresh = 1.0; + vio_min_triangulation_dist = 0.05; + // vio_outlier_threshold = 3.0; + // vio_filter_iteration = 4; + vio_max_iterations = 7; + + vio_enforce_realtime = false; + + vio_use_lm = false; + vio_lm_lambda_initial = 1e-4; + vio_lm_lambda_min = 1e-6; + vio_lm_lambda_max = 1e2; + + vio_scale_jacobian = true; + + vio_init_pose_weight = 1e8; + vio_init_ba_weight = 1e1; + vio_init_bg_weight = 1e2; + + vio_marg_lost_landmarks = true; + + vio_kf_marg_feature_ratio = 0.1; + + mapper_obs_std_dev = 0.25; + mapper_obs_huber_thresh = 1.5; + mapper_detection_num_points = 800; + mapper_num_frames_to_match = 30; + mapper_frames_to_match_threshold = 0.04; + mapper_min_matches = 20; + mapper_ransac_threshold = 5e-5; + mapper_min_track_length = 5; + mapper_max_hamming_distance = 70; + mapper_second_best_test_ratio = 1.2; + mapper_bow_num_bits = 16; + mapper_min_triangulation_dist = 0.07; + mapper_no_factor_weights = false; + mapper_use_factors = true; + + mapper_use_lm = false; + mapper_lm_lambda_min = 1e-32; + mapper_lm_lambda_max = 1e2; +} + +void VioConfig::save(const std::string& filename) { + std::ofstream os(filename); + + { + cereal::JSONOutputArchive archive(os); + archive(*this); + } + os.close(); +} + +void VioConfig::load(const std::string& filename) { + std::ifstream is(filename); + + { + cereal::JSONInputArchive archive(is); + archive(*this); + } + is.close(); +} +} // namespace basalt + +namespace cereal { + +template +std::string save_minimal(const Archive& ar, + const basalt::LinearizationType& linearization_type) { + UNUSED(ar); + auto name = magic_enum::enum_name(linearization_type); + return std::string(name); +} + +template +void load_minimal(const Archive& ar, + basalt::LinearizationType& linearization_type, + const std::string& name) { + UNUSED(ar); + + auto lin_enum = magic_enum::enum_cast(name); + + if (lin_enum.has_value()) { + linearization_type = lin_enum.value(); + } else { + std::cerr << "Could not find the LinearizationType for " << name + << std::endl; + std::abort(); + } +} + +template +void serialize(Archive& ar, basalt::VioConfig& config) { + ar(CEREAL_NVP(config.optical_flow_type)); + ar(CEREAL_NVP(config.optical_flow_detection_grid_size)); + ar(CEREAL_NVP(config.optical_flow_max_recovered_dist2)); + ar(CEREAL_NVP(config.optical_flow_pattern)); + ar(CEREAL_NVP(config.optical_flow_max_iterations)); + ar(CEREAL_NVP(config.optical_flow_epipolar_error)); + ar(CEREAL_NVP(config.optical_flow_levels)); + ar(CEREAL_NVP(config.optical_flow_skip_frames)); + + ar(CEREAL_NVP(config.vio_linearization_type)); + ar(CEREAL_NVP(config.vio_sqrt_marg)); + ar(CEREAL_NVP(config.vio_max_states)); + ar(CEREAL_NVP(config.vio_max_kfs)); + ar(CEREAL_NVP(config.vio_min_frames_after_kf)); + ar(CEREAL_NVP(config.vio_new_kf_keypoints_thresh)); + ar(CEREAL_NVP(config.vio_debug)); + ar(CEREAL_NVP(config.vio_extended_logging)); + ar(CEREAL_NVP(config.vio_max_iterations)); + // ar(CEREAL_NVP(config.vio_outlier_threshold)); + // ar(CEREAL_NVP(config.vio_filter_iteration)); + + ar(CEREAL_NVP(config.vio_obs_std_dev)); + ar(CEREAL_NVP(config.vio_obs_huber_thresh)); + ar(CEREAL_NVP(config.vio_min_triangulation_dist)); + + ar(CEREAL_NVP(config.vio_enforce_realtime)); + + ar(CEREAL_NVP(config.vio_use_lm)); + ar(CEREAL_NVP(config.vio_lm_lambda_initial)); + ar(CEREAL_NVP(config.vio_lm_lambda_min)); + ar(CEREAL_NVP(config.vio_lm_lambda_max)); + + ar(CEREAL_NVP(config.vio_scale_jacobian)); + + ar(CEREAL_NVP(config.vio_init_pose_weight)); + ar(CEREAL_NVP(config.vio_init_ba_weight)); + ar(CEREAL_NVP(config.vio_init_bg_weight)); + + ar(CEREAL_NVP(config.vio_marg_lost_landmarks)); + ar(CEREAL_NVP(config.vio_kf_marg_feature_ratio)); + + ar(CEREAL_NVP(config.mapper_obs_std_dev)); + ar(CEREAL_NVP(config.mapper_obs_huber_thresh)); + ar(CEREAL_NVP(config.mapper_detection_num_points)); + ar(CEREAL_NVP(config.mapper_num_frames_to_match)); + ar(CEREAL_NVP(config.mapper_frames_to_match_threshold)); + ar(CEREAL_NVP(config.mapper_min_matches)); + ar(CEREAL_NVP(config.mapper_ransac_threshold)); + ar(CEREAL_NVP(config.mapper_min_track_length)); + ar(CEREAL_NVP(config.mapper_max_hamming_distance)); + ar(CEREAL_NVP(config.mapper_second_best_test_ratio)); + ar(CEREAL_NVP(config.mapper_bow_num_bits)); + ar(CEREAL_NVP(config.mapper_min_triangulation_dist)); + ar(CEREAL_NVP(config.mapper_no_factor_weights)); + ar(CEREAL_NVP(config.mapper_use_factors)); + + ar(CEREAL_NVP(config.mapper_use_lm)); + ar(CEREAL_NVP(config.mapper_lm_lambda_min)); + ar(CEREAL_NVP(config.mapper_lm_lambda_max)); +} +} // namespace cereal diff --git a/src/vi_estimator/ba_base.cpp b/src/vi_estimator/ba_base.cpp new file mode 100644 index 0000000..187a5d0 --- /dev/null +++ b/src/vi_estimator/ba_base.cpp @@ -0,0 +1,604 @@ +/** +BSD 3-Clause License + +This file is part of the Basalt project. +https://gitlab.com/VladyslavUsenko/basalt.git + +Copyright (c) 2019, Vladyslav Usenko and Nikolaus Demmel. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#include + +#include +#include +#include + +#include + +namespace basalt { + +template +void BundleAdjustmentBase::optimize_single_frame_pose( + PoseStateWithLin& state_t, + const std::vector>& connected_obs) const { + const int num_iter = 2; + + struct AbsLinData { + Mat4 T_t_h; + Mat6 d_rel_d_t; + + EIGEN_MAKE_ALIGNED_OPERATOR_NEW; + }; + + for (int iter = 0; iter < num_iter; iter++) { + Scalar error = 0; + Mat6 Ht; + Vec6 bt; + + Ht.setZero(); + bt.setZero(); + + std::unordered_map, AbsLinData> + abs_lin_data; + + for (size_t cam_id = 0; cam_id < connected_obs.size(); cam_id++) { + TimeCamId tcid_t(state_t.getT_ns(), cam_id); + for (const auto& lm_id : connected_obs[cam_id]) { + const Keypoint& kpt_pos = lmdb.getLandmark(lm_id); + std::pair map_key(kpt_pos.host_kf_id, tcid_t); + + if (abs_lin_data.count(map_key) == 0) { + const PoseStateWithLin& state_h = + frame_poses.at(kpt_pos.host_kf_id.frame_id); + + BASALT_ASSERT(kpt_pos.host_kf_id.frame_id != state_t.getT_ns()); + + AbsLinData& ald = abs_lin_data[map_key]; + + SE3 T_t_h_sophus = computeRelPose( + state_h.getPose(), calib.T_i_c[kpt_pos.host_kf_id.cam_id], + state_t.getPose(), calib.T_i_c[cam_id], nullptr, &ald.d_rel_d_t); + ald.T_t_h = T_t_h_sophus.matrix(); + } + } + } + + for (size_t cam_id = 0; cam_id < connected_obs.size(); cam_id++) { + std::visit( + [&](const auto& cam) { + for (const auto& lm_id : connected_obs[cam_id]) { + TimeCamId tcid_t(state_t.getT_ns(), cam_id); + + const Keypoint& kpt_pos = lmdb.getLandmark(lm_id); + const Vec2& kpt_obs = kpt_pos.obs.at(tcid_t); + const AbsLinData& ald = + abs_lin_data.at(std::make_pair(kpt_pos.host_kf_id, tcid_t)); + + Vec2 res; + Eigen::Matrix d_res_d_xi; + bool valid = linearizePoint(kpt_obs, kpt_pos, ald.T_t_h, cam, res, + &d_res_d_xi); + + if (valid) { + Scalar e = res.norm(); + Scalar huber_weight = + e < huber_thresh ? Scalar(1.0) : huber_thresh / e; + Scalar obs_weight = huber_weight / (obs_std_dev * obs_std_dev); + + error += Scalar(0.5) * (2 - huber_weight) * obs_weight * + res.transpose() * res; + + d_res_d_xi *= ald.d_rel_d_t; + + Ht.noalias() += d_res_d_xi.transpose() * d_res_d_xi; + bt.noalias() += d_res_d_xi.transpose() * res; + } + } + }, + calib.intrinsics[cam_id].variant); + } + + // Add small damping for GN + constexpr Scalar lambda = 1e-6; + Vec6 diag = Ht.diagonal(); + diag *= lambda; + Ht.diagonal().array() += diag.array().max(lambda); + + // std::cout << "pose opt error " << error << std::endl; + Vec6 inc = -Ht.ldlt().solve(bt); + state_t.applyInc(inc); + } + // std::cout << "=============================" << std::endl; +} + +template +void BundleAdjustmentBase::computeError( + Scalar& error, + std::map>>* outliers, + Scalar outlier_threshold) const { + std::vector host_frames; + for (const auto& [tcid, _] : lmdb.getObservations()) { + host_frames.push_back(tcid); + } + + tbb::concurrent_unordered_map>> + outliers_concurrent; + + auto body = [&](const tbb::blocked_range& range, Scalar local_error) { + for (size_t r = range.begin(); r != range.end(); ++r) { + const TimeCamId& tcid_h = host_frames[r]; + + for (const auto& obs_kv : lmdb.getObservations().at(tcid_h)) { + const TimeCamId& tcid_t = obs_kv.first; + + Mat4 T_t_h; + + if (tcid_h != tcid_t) { + PoseStateWithLin state_h = getPoseStateWithLin(tcid_h.frame_id); + PoseStateWithLin state_t = getPoseStateWithLin(tcid_t.frame_id); + + Sophus::SE3 T_t_h_sophus = + computeRelPose(state_h.getPose(), calib.T_i_c[tcid_h.cam_id], + state_t.getPose(), calib.T_i_c[tcid_t.cam_id]); + + T_t_h = T_t_h_sophus.matrix(); + } else { + T_t_h.setIdentity(); + } + + std::visit( + [&](const auto& cam) { + for (KeypointId kpt_id : obs_kv.second) { + const Keypoint& kpt_pos = lmdb.getLandmark(kpt_id); + const Vec2& kpt_obs = kpt_pos.obs.at(tcid_t); + + Vec2 res; + + bool valid = linearizePoint(kpt_obs, kpt_pos, T_t_h, cam, res); + + if (valid) { + Scalar e = res.norm(); + + if (outliers && e > outlier_threshold) { + outliers_concurrent[kpt_id].emplace_back( + tcid_t, tcid_h != tcid_t ? e : -2); + } + + Scalar huber_weight = + e < huber_thresh ? Scalar(1.0) : huber_thresh / e; + Scalar obs_weight = + huber_weight / (obs_std_dev * obs_std_dev); + + local_error += Scalar(0.5) * (2 - huber_weight) * obs_weight * + res.transpose() * res; + } else { + if (outliers) { + outliers_concurrent[kpt_id].emplace_back( + tcid_t, tcid_h != tcid_t ? -1 : -2); + } + } + } + }, + calib.intrinsics[tcid_t.cam_id].variant); + } + } + + return local_error; + }; + + tbb::blocked_range range(0, host_frames.size()); + Scalar init = 0; + auto join = std::plus(); + error = tbb::parallel_reduce(range, init, body, join); + + if (outliers) { + outliers->clear(); + for (auto& [k, v] : outliers_concurrent) { + outliers->emplace(k, std::move(v)); + } + } +} + +template +template +void BundleAdjustmentBase::get_current_points( + Eigen::aligned_vector>& points, + std::vector& ids) const { + points.clear(); + ids.clear(); + + for (const auto& tcid_host : lmdb.getHostKfs()) { + Sophus::SE3 T_w_i; + + int64_t id = tcid_host.frame_id; + if (frame_states.count(id) > 0) { + PoseVelBiasStateWithLin state = frame_states.at(id); + T_w_i = state.getState().T_w_i; + } else if (frame_poses.count(id) > 0) { + PoseStateWithLin state = frame_poses.at(id); + + T_w_i = state.getPose(); + } else { + std::cout << "Unknown frame id: " << id << std::endl; + std::abort(); + } + + const Sophus::SE3& T_i_c = calib.T_i_c[tcid_host.cam_id]; + Mat4 T_w_c = (T_w_i * T_i_c).matrix(); + + for (const Keypoint* kpt_pos : + lmdb.getLandmarksForHost(tcid_host)) { + Vec4 pt_cam = StereographicParam::unproject(kpt_pos->direction); + pt_cam[3] = kpt_pos->inv_dist; + + Vec4 pt_w = T_w_c * pt_cam; + + points.emplace_back( + (pt_w.template head<3>() / pt_w[3]).template cast()); + ids.emplace_back(1); + } + } +} + +template +void BundleAdjustmentBase::filterOutliers(Scalar outlier_threshold, + int min_num_obs) { + Scalar error; + std::map>> outliers; + computeError(error, &outliers, outlier_threshold); + + // std::cout << "============================================" << + // std::endl; std::cout << "Num landmarks: " << lmdb.numLandmarks() << " + // with outliners + // " + // << outliers.size() << std::endl; + + for (const auto& kv : outliers) { + int num_obs = lmdb.numObservations(kv.first); + int num_outliers = kv.second.size(); + + bool remove = false; + + if (num_obs - num_outliers < min_num_obs) remove = true; + + // std::cout << "\tlm_id: " << kv.first << " num_obs: " << num_obs + // << " outliers: " << num_outliers << " ["; + + for (const auto& kv2 : kv.second) { + if (kv2.second == -2) remove = true; + + // std::cout << kv2.second << ", "; + } + + // std::cout << "] " << std::endl; + + if (remove) { + lmdb.removeLandmark(kv.first); + } else { + std::set outliers; + for (const auto& kv2 : kv.second) outliers.emplace(kv2.first); + lmdb.removeObservations(kv.first, outliers); + } + } + + // std::cout << "============================================" << + // std::endl; +} + +template +void BundleAdjustmentBase::computeDelta(const AbsOrderMap& marg_order, + VecX& delta) const { + size_t marg_size = marg_order.total_size; + delta.setZero(marg_size); + for (const auto& kv : marg_order.abs_order_map) { + if (kv.second.second == POSE_SIZE) { + BASALT_ASSERT(frame_poses.at(kv.first).isLinearized()); + delta.template segment(kv.second.first) = + frame_poses.at(kv.first).getDelta(); + } else if (kv.second.second == POSE_VEL_BIAS_SIZE) { + BASALT_ASSERT(frame_states.at(kv.first).isLinearized()); + delta.template segment(kv.second.first) = + frame_states.at(kv.first).getDelta(); + } else { + BASALT_ASSERT(false); + } + } +} + +template +Scalar_ BundleAdjustmentBase::computeModelCostChange( + const MatX& H, const VecX& b, const VecX& inc) const { + // Linearized model cost + // + // L(x) = 0.5 || J*x + r ||^2 + // = 0.5 x^T J^T J x + x^T J r + 0.5 r^T r + // = 0.5 x^T H x + x^T b + 0.5 r^T r, + // + // given in normal equation form as + // + // H = J^T J, + // b = J^T r. + // + // The expected decrease in cost for the computed increment is + // + // l_diff = L(0) - L(inc) + // = - 0.5 inc^T H inc - inc^T b + // = - inc^T (0.5 H inc + b) + + Scalar l_diff = -inc.dot(Scalar(0.5) * (H * inc) + b); + + return l_diff; +} + +template +template +void BundleAdjustmentBase::computeProjections( + std::vector>>& data, + FrameId last_state_t_ns) const { + for (const auto& kv : lmdb.getObservations()) { + const TimeCamId& tcid_h = kv.first; + + for (const auto& obs_kv : kv.second) { + const TimeCamId& tcid_t = obs_kv.first; + + if (tcid_t.frame_id != last_state_t_ns) continue; + + Mat4 T_t_h; + if (tcid_h != tcid_t) { + PoseStateWithLin state_h = getPoseStateWithLin(tcid_h.frame_id); + PoseStateWithLin state_t = getPoseStateWithLin(tcid_t.frame_id); + + Sophus::SE3 T_t_h_sophus = + computeRelPose(state_h.getPose(), calib.T_i_c[tcid_h.cam_id], + state_t.getPose(), calib.T_i_c[tcid_t.cam_id]); + + T_t_h = T_t_h_sophus.matrix(); + } else { + T_t_h.setIdentity(); + } + + std::visit( + [&](const auto& cam) { + for (KeypointId kpt_id : obs_kv.second) { + const Keypoint& kpt_pos = lmdb.getLandmark(kpt_id); + + Vec2 res; + Vec4 proj; + + using CamT = std::decay_t; + linearizePoint(Vec2::Zero(), kpt_pos, T_t_h, cam, + res, nullptr, nullptr, &proj); + + proj[3] = kpt_id; + data[tcid_t.cam_id].emplace_back(proj.template cast()); + } + }, + calib.intrinsics[tcid_t.cam_id].variant); + } + } +} + +template +void BundleAdjustmentBase::linearizeMargPrior( + const MargLinData& mld, const AbsOrderMap& aom, MatX& abs_H, + VecX& abs_b, Scalar& marg_prior_error) const { + // Prior is ordered to be in the top left corner of Hessian + + BASALT_ASSERT(size_t(mld.H.cols()) == mld.order.total_size); + + // Check if the order of variables is the same, and it's indeed top-left + // corner + for (const auto& kv : mld.order.abs_order_map) { + UNUSED(aom); + UNUSED(kv); + BASALT_ASSERT(aom.abs_order_map.at(kv.first) == kv.second); + BASALT_ASSERT(kv.second.first < int(mld.order.total_size)); + } + + // Quadratic prior and "delta" of the current state to the original + // linearization point give cost function + // + // P(x) = 0.5 || J*(delta+x) + r ||^2, + // + // alternatively stored in quadratic form as + // + // Hmarg = J^T J, + // bmarg = J^T r. + // + // This is now to be linearized at x=0, so we get linearization + // + // P(x) = 0.5 || J*x + (J*delta + r) ||^2, + // + // with Jacobian J and residual J*delta + r. The normal equations are + // + // H*x + b = 0, + // H = J^T J = Hmarg, + // b = J^T (J*delta + r) = Hmarg*delta + bmarg. + // + // The current cost is + // + // P(0) = 0.5 || J*delta + r ||^2 + // = 0.5 delta^T J^T J delta + delta^T J^T r + 0.5 r^T r. + // = 0.5 delta^T Hmarg delta + delta^T bmarg + 0.5 r^T r. + // + // Note: Since the r^T r term does not change with delta, we drop it from the + // error computation. The main need for the error value is for comparing + // the cost before and after an update to delta in the optimization loop. This + // also means the computed error can be negative. + + const size_t marg_size = mld.order.total_size; + + VecX delta; + computeDelta(mld.order, delta); + + if (mld.is_sqrt) { + abs_H.topLeftCorner(marg_size, marg_size) += mld.H.transpose() * mld.H; + + abs_b.head(marg_size) += mld.H.transpose() * (mld.b + mld.H * delta); + + marg_prior_error = delta.transpose() * mld.H.transpose() * + (Scalar(0.5) * mld.H * delta + mld.b); + } else { + abs_H.topLeftCorner(marg_size, marg_size) += mld.H; + + abs_b.head(marg_size) += mld.H * delta + mld.b; + + marg_prior_error = + delta.transpose() * (Scalar(0.5) * mld.H * delta + mld.b); + } +} + +template +void BundleAdjustmentBase::computeMargPriorError( + const MargLinData& mld, Scalar& marg_prior_error) const { + BASALT_ASSERT(size_t(mld.H.cols()) == mld.order.total_size); + + // The current cost is (see above in linearizeMargPrior()) + // + // P(0) = 0.5 || J*delta + r ||^2 + // = 0.5 delta^T J^T J delta + delta^T J^T r + 0.5 r^T r + // = 0.5 delta^T Hmarg delta + delta^T bmarg + 0.5 r^T r. + // + // Note: Since the r^T r term does not change with delta, we drop it from the + // error computation. The main need for the error value is for comparing + // the cost before and after an update to delta in the optimization loop. This + // also means the computed error can be negative. + + VecX delta; + computeDelta(mld.order, delta); + + if (mld.is_sqrt) { + marg_prior_error = delta.transpose() * mld.H.transpose() * + (Scalar(0.5) * mld.H * delta + mld.b); + } else { + marg_prior_error = + delta.transpose() * (Scalar(0.5) * mld.H * delta + mld.b); + } +} + +template +Scalar_ BundleAdjustmentBase::computeMargPriorModelCostChange( + const MargLinData& mld, const VecX& marg_scaling, + const VecX& marg_pose_inc) const { + // Quadratic prior and "delta" of the current state to the original + // linearization point give cost function + // + // P(x) = 0.5 || J*(delta+x) + r ||^2, + // + // alternatively stored in quadratic form as + // + // Hmarg = J^T J, + // bmarg = J^T r. + // + // We want to compute the model cost change. The model function is + // + // L(inc) = P(inc) = 0.5 || J*inc + (J*delta + r) ||^2 + // + // By setting rlin = J*delta + r we get + // + // L(inc) = 0.5 || J*inc + rlin ||^2 + // = P(0) + inc^T J^T rlin + 0.5 inc^T J^T J inc + // + // and thus the expected decrease in cost for the computed increment is + // + // l_diff = L(0) - L(inc) + // = - inc^T J^T rlin - 0.5 inc^T J^T J inc + // = - inc^T J^T (rlin + 0.5 J inc) + // = - (J inc)^T (rlin + 0.5 (J inc)) + // = - (J inc)^T (J*delta + r + 0.5 (J inc)). + // + // Alternatively, for squared prior storage, we get + // + // l_diff = - inc^T (Hmarg delta + bmarg + 0.5 Hmarg inc) + // = - inc^T (Hmarg (delta + 0.5 inc) + bmarg) + // + // For Jacobian scaling we need to take extra care. Note that we store the + // scale separately and don't actually update marg_sqrt_H and marg_sqrt_b + // in place with the scale. So in the computation above, we need to scale + // marg_sqrt_H whenever it is multiplied with inc, but NOT when it is + // multiplied with delta, since delta is also WITHOUT scaling. + + VecX delta; + computeDelta(mld.order, delta); + + VecX J_inc = marg_pose_inc; + if (marg_scaling.rows() > 0) J_inc = marg_scaling.asDiagonal() * J_inc; + + Scalar l_diff; + + if (mld.is_sqrt) { + // No scaling here. This is b part not Jacobian + const VecX b_Jdelta = mld.H * delta + mld.b; + + J_inc = mld.H * J_inc; + l_diff = -J_inc.transpose() * (b_Jdelta + Scalar(0.5) * J_inc); + } else { + l_diff = -J_inc.dot(mld.H * (delta + Scalar(0.5) * J_inc) + mld.b); + } + + return l_diff; +} + +// ////////////////////////////////////////////////////////////////// +// instatiate templates + +// Note: double specialization is unconditional, b/c NfrMapper depends on it. +//#ifdef BASALT_INSTANTIATIONS_DOUBLE +template class BundleAdjustmentBase; + +template void BundleAdjustmentBase::get_current_points( + Eigen::aligned_vector>& points, + std::vector& ids) const; + +template void BundleAdjustmentBase::computeProjections( + std::vector>>& data, + FrameId last_state_t_ns) const; +//#endif + +#ifdef BASALT_INSTANTIATIONS_FLOAT +template class BundleAdjustmentBase; + +// template void BundleAdjustmentBase::get_current_points( +// Eigen::aligned_vector>& points, +// std::vector& ids) const; + +template void BundleAdjustmentBase::get_current_points( + Eigen::aligned_vector>& points, + std::vector& ids) const; + +// template void BundleAdjustmentBase::computeProjections( +// std::vector>>& data, +// FrameId last_state_t_ns) const; + +template void BundleAdjustmentBase::computeProjections( + std::vector>>& data, + FrameId last_state_t_ns) const; +#endif + +} // namespace basalt diff --git a/src/vi_estimator/landmark_database.cpp b/src/vi_estimator/landmark_database.cpp new file mode 100644 index 0000000..cfa8386 --- /dev/null +++ b/src/vi_estimator/landmark_database.cpp @@ -0,0 +1,247 @@ +/** +BSD 3-Clause License + +This file is part of the Basalt project. +https://gitlab.com/VladyslavUsenko/basalt.git + +Copyright (c) 2019, Vladyslav Usenko and Nikolaus Demmel. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#include + +#include + +namespace basalt { + +template +void LandmarkDatabase::addLandmark(KeypointId lm_id, + const Keypoint &pos) { + auto &kpt = kpts[lm_id]; + kpt.direction = pos.direction; + kpt.inv_dist = pos.inv_dist; + kpt.host_kf_id = pos.host_kf_id; +} + +template +void LandmarkDatabase::removeFrame(const FrameId &frame) { + for (auto it = kpts.begin(); it != kpts.end();) { + for (auto it2 = it->second.obs.begin(); it2 != it->second.obs.end();) { + if (it2->first.frame_id == frame) + it2 = removeLandmarkObservationHelper(it, it2); + else + it2++; + } + + if (it->second.obs.size() < min_num_obs) { + it = removeLandmarkHelper(it); + } else { + ++it; + } + } +} + +template +void LandmarkDatabase::removeKeyframes( + const std::set &kfs_to_marg, + const std::set &poses_to_marg, + const std::set &states_to_marg_all) { + for (auto it = kpts.begin(); it != kpts.end();) { + if (kfs_to_marg.count(it->second.host_kf_id.frame_id) > 0) { + it = removeLandmarkHelper(it); + } else { + for (auto it2 = it->second.obs.begin(); it2 != it->second.obs.end();) { + FrameId fid = it2->first.frame_id; + if (poses_to_marg.count(fid) > 0 || states_to_marg_all.count(fid) > 0 || + kfs_to_marg.count(fid) > 0) + it2 = removeLandmarkObservationHelper(it, it2); + else + it2++; + } + + if (it->second.obs.size() < min_num_obs) { + it = removeLandmarkHelper(it); + } else { + ++it; + } + } + } +} + +template +std::vector LandmarkDatabase::getHostKfs() const { + std::vector res; + + for (const auto &kv : observations) res.emplace_back(kv.first); + + return res; +} + +template +std::vector *> +LandmarkDatabase::getLandmarksForHost(const TimeCamId &tcid) const { + std::vector *> res; + + for (const auto &[k, obs] : observations.at(tcid)) + for (const auto &v : obs) res.emplace_back(&kpts.at(v)); + + return res; +} + +template +void LandmarkDatabase::addObservation( + const TimeCamId &tcid_target, const KeypointObservation &o) { + auto it = kpts.find(o.kpt_id); + BASALT_ASSERT(it != kpts.end()); + + it->second.obs[tcid_target] = o.pos; + + observations[it->second.host_kf_id][tcid_target].insert(it->first); +} + +template +Keypoint &LandmarkDatabase::getLandmark(KeypointId lm_id) { + return kpts.at(lm_id); +} + +template +const Keypoint &LandmarkDatabase::getLandmark( + KeypointId lm_id) const { + return kpts.at(lm_id); +} + +template +const std::unordered_map>> + &LandmarkDatabase::getObservations() const { + return observations; +} + +template +const Eigen::aligned_unordered_map> + &LandmarkDatabase::getLandmarks() const { + return kpts; +} + +template +bool LandmarkDatabase::landmarkExists(int lm_id) const { + return kpts.count(lm_id) > 0; +} + +template +size_t LandmarkDatabase::numLandmarks() const { + return kpts.size(); +} + +template +int LandmarkDatabase::numObservations() const { + int num_observations = 0; + + for (const auto &[_, val_map] : observations) { + for (const auto &[_, val] : val_map) { + num_observations += val.size(); + } + } + + return num_observations; +} + +template +int LandmarkDatabase::numObservations(KeypointId lm_id) const { + return kpts.at(lm_id).obs.size(); +} + +template +typename LandmarkDatabase::MapIter +LandmarkDatabase::removeLandmarkHelper( + LandmarkDatabase::MapIter it) { + auto host_it = observations.find(it->second.host_kf_id); + + for (const auto &[k, v] : it->second.obs) { + auto target_it = host_it->second.find(k); + target_it->second.erase(it->first); + + if (target_it->second.empty()) host_it->second.erase(target_it); + } + + if (host_it->second.empty()) observations.erase(host_it); + + return kpts.erase(it); +} + +template +typename Keypoint::MapIter +LandmarkDatabase::removeLandmarkObservationHelper( + LandmarkDatabase::MapIter it, + typename Keypoint::MapIter it2) { + auto host_it = observations.find(it->second.host_kf_id); + auto target_it = host_it->second.find(it2->first); + target_it->second.erase(it->first); + + if (target_it->second.empty()) host_it->second.erase(target_it); + if (host_it->second.empty()) observations.erase(host_it); + + return it->second.obs.erase(it2); +} + +template +void LandmarkDatabase::removeLandmark(KeypointId lm_id) { + auto it = kpts.find(lm_id); + if (it != kpts.end()) removeLandmarkHelper(it); +} + +template +void LandmarkDatabase::removeObservations( + KeypointId lm_id, const std::set &obs) { + auto it = kpts.find(lm_id); + BASALT_ASSERT(it != kpts.end()); + + for (auto it2 = it->second.obs.begin(); it2 != it->second.obs.end();) { + if (obs.count(it2->first) > 0) { + it2 = removeLandmarkObservationHelper(it, it2); + } else + it2++; + } + + if (it->second.obs.size() < min_num_obs) { + removeLandmarkHelper(it); + } +} + +// ////////////////////////////////////////////////////////////////// +// instatiate templates + +// Note: double specialization is unconditional, b/c NfrMapper depends on it. +//#ifdef BASALT_INSTANTIATIONS_DOUBLE +template class LandmarkDatabase; +//#endif + +#ifdef BASALT_INSTANTIATIONS_FLOAT +template class LandmarkDatabase; +#endif + +} // namespace basalt diff --git a/src/vi_estimator/marg_helper.cpp b/src/vi_estimator/marg_helper.cpp new file mode 100644 index 0000000..25e40a7 --- /dev/null +++ b/src/vi_estimator/marg_helper.cpp @@ -0,0 +1,361 @@ +/** +BSD 3-Clause License + +This file is part of the Basalt project. +https://gitlab.com/VladyslavUsenko/basalt.git + +Copyright (c) 2019, Vladyslav Usenko and Nikolaus Demmel. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#include +#include + +namespace basalt { + +template +void MargHelper::marginalizeHelperSqToSq( + MatX& abs_H, VecX& abs_b, const std::set& idx_to_keep, + const std::set& idx_to_marg, MatX& marg_H, VecX& marg_b) { + int keep_size = idx_to_keep.size(); + int marg_size = idx_to_marg.size(); + + BASALT_ASSERT(keep_size + marg_size == abs_H.cols()); + + // Fill permutation matrix + Eigen::Matrix indices(idx_to_keep.size() + + idx_to_marg.size()); + + { + auto it = idx_to_keep.begin(); + for (size_t i = 0; i < idx_to_keep.size(); i++) { + indices[i] = *it; + it++; + } + } + + { + auto it = idx_to_marg.begin(); + for (size_t i = 0; i < idx_to_marg.size(); i++) { + indices[idx_to_keep.size() + i] = *it; + it++; + } + } + + const Eigen::PermutationWrapper> p( + indices); + + const Eigen::PermutationMatrix pt = p.transpose(); + + abs_b.applyOnTheLeft(pt); + abs_H.applyOnTheLeft(pt); + abs_H.applyOnTheRight(p); + + // Use of fullPivLu compared to alternatives was determined experimentally. It + // is more stable than ldlt when the matrix is numerically close ot + // indefinite, but it is faster than the even more stable + // fullPivHouseholderQr. + + // auto H_mm_decomposition = + // abs_H.bottomRightCorner(marg_size, marg_size).ldlt(); + + // auto H_mm_decomposition = + // abs_H.bottomRightCorner(marg_size, marg_size).fullPivLu(); + // MatX H_mm_inv = H_mm_decomposition.inverse(); + + // auto H_mm_decomposition = + // abs_H.bottomRightCorner(marg_size, marg_size).colPivHouseholderQr(); + + // DO NOT USE!!! + // Pseudoinverse with SVD. Uses iterative method and results in severe low + // accuracy. Use the version below (COD) for pseudoinverse + // auto H_mm_decomposition = + // abs_H.bottomRightCorner(marg_size, marg_size) + // .jacobiSvd(Eigen::ComputeThinU | Eigen::ComputeThinV); + + // Eigen version of pseudoinverse + auto H_mm_decomposition = abs_H.bottomRightCorner(marg_size, marg_size) + .completeOrthogonalDecomposition(); + MatX H_mm_inv = H_mm_decomposition.pseudoInverse(); + + // Should be more numerially stable version of: + abs_H.topRightCorner(keep_size, marg_size) *= H_mm_inv; + // abs_H.topRightCorner(keep_size, marg_size) = + // H_mm_decomposition + // .solve(abs_H.topRightCorner(keep_size, marg_size).transpose()) + // .transpose(); + + marg_H = abs_H.topLeftCorner(keep_size, keep_size); + marg_b = abs_b.head(keep_size); + + marg_H -= abs_H.topRightCorner(keep_size, marg_size) * + abs_H.bottomLeftCorner(marg_size, keep_size); + marg_b -= abs_H.topRightCorner(keep_size, marg_size) * abs_b.tail(marg_size); + + abs_H.resize(0, 0); + abs_b.resize(0); +} + +template +void MargHelper::marginalizeHelperSqToSqrt( + MatX& abs_H, VecX& abs_b, const std::set& idx_to_keep, + const std::set& idx_to_marg, MatX& marg_sqrt_H, VecX& marg_sqrt_b) { + // TODO: We might lose the strong initial pose prior if there are no obs + // during marginalization (e.g. during first marginalization). Unclear when + // this can happen. Keeping keyframes w/o any points in the optimization + // window might not make sense. --> Double check if and when this currently + // occurs. If no, add asserts. If yes, somehow deal with it differently. + + int keep_size = idx_to_keep.size(); + int marg_size = idx_to_marg.size(); + + BASALT_ASSERT(keep_size + marg_size == abs_H.cols()); + + // Fill permutation matrix + Eigen::Matrix indices(idx_to_keep.size() + + idx_to_marg.size()); + + { + auto it = idx_to_keep.begin(); + for (size_t i = 0; i < idx_to_keep.size(); i++) { + indices[i] = *it; + it++; + } + } + + { + auto it = idx_to_marg.begin(); + for (size_t i = 0; i < idx_to_marg.size(); i++) { + indices[idx_to_keep.size() + i] = *it; + it++; + } + } + + const Eigen::PermutationWrapper> p( + indices); + + const Eigen::PermutationMatrix pt = p.transpose(); + + abs_b.applyOnTheLeft(pt); + abs_H.applyOnTheLeft(pt); + abs_H.applyOnTheRight(p); + + // Use of fullPivLu compared to alternatives was determined experimentally. It + // is more stable than ldlt when the matrix is numerically close ot + // indefinite, but it is faster than the even more stable + // fullPivHouseholderQr. + + // auto H_mm_decomposition = + // abs_H.bottomRightCorner(marg_size, marg_size).ldlt(); + + // auto H_mm_decomposition = + // abs_H.bottomRightCorner(marg_size, marg_size).fullPivLu(); + // MatX H_mm_inv = H_mm_decomposition.inverse(); + + // auto H_mm_decomposition = + // abs_H.bottomRightCorner(marg_size, marg_size).colPivHouseholderQr(); + + // DO NOT USE!!! + // Pseudoinverse with SVD. Uses iterative method and results in severe low + // accuracy. Use the version below (COD) for pseudoinverse + // auto H_mm_decomposition = + // abs_H.bottomRightCorner(marg_size, marg_size) + // .jacobiSvd(Eigen::ComputeThinU | Eigen::ComputeThinV); + + // Eigen version of pseudoinverse + auto H_mm_decomposition = abs_H.bottomRightCorner(marg_size, marg_size) + .completeOrthogonalDecomposition(); + MatX H_mm_inv = H_mm_decomposition.pseudoInverse(); + + // Should be more numerially stable version of: + abs_H.topRightCorner(keep_size, marg_size) *= H_mm_inv; + // abs_H.topRightCorner(keep_size, marg_size).noalias() = + // H_mm_decomposition.solve(abs_H.bottomLeftCorner(marg_size, keep_size)) + // .transpose(); + + MatX marg_H; + VecX marg_b; + + marg_H = abs_H.topLeftCorner(keep_size, keep_size); + marg_b = abs_b.head(keep_size); + + marg_H -= abs_H.topRightCorner(keep_size, marg_size) * + abs_H.bottomLeftCorner(marg_size, keep_size); + marg_b -= abs_H.topRightCorner(keep_size, marg_size) * abs_b.tail(marg_size); + + Eigen::LDLT> ldlt(marg_H); + + VecX D_sqrt = ldlt.vectorD().array().max(0).sqrt().matrix(); + + // After LDLT, we have + // marg_H == P^T*L*D*L^T*P, + // so we compute the square root as + // marg_sqrt_H = sqrt(D)*L^T*P, + // such that + // marg_sqrt_H^T marg_sqrt_H == marg_H. + marg_sqrt_H.setIdentity(keep_size, keep_size); + marg_sqrt_H = ldlt.transpositionsP() * marg_sqrt_H; + marg_sqrt_H = ldlt.matrixU() * marg_sqrt_H; // U == L^T + marg_sqrt_H = D_sqrt.asDiagonal() * marg_sqrt_H; + + // For the right hand side, we want + // marg_b == marg_sqrt_H^T * marg_sqrt_b + // so we compute + // marg_sqrt_b = (marg_sqrt_H^T)^-1 * marg_b + // = (P^T*L*sqrt(D))^-1 * marg_b + // = sqrt(D)^-1 * L^-1 * P * marg_b + marg_sqrt_b = ldlt.transpositionsP() * marg_b; + ldlt.matrixL().solveInPlace(marg_sqrt_b); + + // We already clamped negative values in D_sqrt to 0 above, but for values + // close to 0 we set b to 0. + for (int i = 0; i < marg_sqrt_b.size(); ++i) { + if (D_sqrt(i) > std::sqrt(std::numeric_limits::min())) + marg_sqrt_b(i) /= D_sqrt(i); + else + marg_sqrt_b(i) = 0; + } + + // std::cout << "marg_sqrt_H diff: " + // << (marg_sqrt_H.transpose() * marg_sqrt_H - marg_H).norm() << " + // " + // << marg_H.norm() << std::endl; + + // std::cout << "marg_sqrt_b diff: " + // << (marg_sqrt_H.transpose() * marg_sqrt_b - marg_b).norm() + // << std::endl; + + abs_H.resize(0, 0); + abs_b.resize(0); +} + +template +void MargHelper::marginalizeHelperSqrtToSqrt( + MatX& Q2Jp, VecX& Q2r, const std::set& idx_to_keep, + const std::set& idx_to_marg, MatX& marg_sqrt_H, VecX& marg_sqrt_b) { + Eigen::Index keep_size = idx_to_keep.size(); + Eigen::Index marg_size = idx_to_marg.size(); + + BASALT_ASSERT(keep_size + marg_size == Q2Jp.cols()); + BASALT_ASSERT(Q2Jp.rows() == Q2r.rows()); + + // Fill permutation matrix + Eigen::Matrix indices(idx_to_keep.size() + + idx_to_marg.size()); + + { + auto it = idx_to_marg.begin(); + for (size_t i = 0; i < idx_to_marg.size(); i++) { + indices[i] = *it; + it++; + } + } + + { + auto it = idx_to_keep.begin(); + for (size_t i = 0; i < idx_to_keep.size(); i++) { + indices[idx_to_marg.size() + i] = *it; + it++; + } + } + + // TODO: check if using TranspositionMatrix is faster + const Eigen::PermutationWrapper> p( + indices); + + Q2Jp.applyOnTheRight(p); + + Eigen::Index marg_rank = 0; + Eigen::Index total_rank = 0; + + { + const Scalar rank_threshold = + std::sqrt(std::numeric_limits::epsilon()); + + const Eigen::Index rows = Q2Jp.rows(); + const Eigen::Index cols = Q2Jp.cols(); + + VecX tempVector; + tempVector.resize(cols + 1); + Scalar* tempData = tempVector.data(); + + for (Eigen::Index k = 0; k < cols && total_rank < rows; ++k) { + Eigen::Index remainingRows = rows - total_rank; + Eigen::Index remainingCols = cols - k - 1; + + Scalar beta; + Scalar hCoeff; + Q2Jp.col(k).tail(remainingRows).makeHouseholderInPlace(hCoeff, beta); + + if (std::abs(beta) > rank_threshold) { + Q2Jp.coeffRef(total_rank, k) = beta; + + Q2Jp.bottomRightCorner(remainingRows, remainingCols) + .applyHouseholderOnTheLeft(Q2Jp.col(k).tail(remainingRows - 1), + hCoeff, tempData + k + 1); + Q2r.tail(remainingRows) + .applyHouseholderOnTheLeft(Q2Jp.col(k).tail(remainingRows - 1), + hCoeff, tempData + cols); + total_rank++; + } else { + Q2Jp.coeffRef(total_rank, k) = 0; + } + + // Overwrite householder vectors with 0 + Q2Jp.col(k).tail(remainingRows - 1).setZero(); + + // Save the rank of marginalize-out part + if (k == marg_size - 1) { + marg_rank = total_rank; + } + } + } + + Eigen::Index keep_valid_rows = + std::max(total_rank - marg_rank, Eigen::Index(1)); + + marg_sqrt_H = Q2Jp.block(marg_rank, marg_size, keep_valid_rows, keep_size); + marg_sqrt_b = Q2r.segment(marg_rank, keep_valid_rows); + + Q2Jp.resize(0, 0); + Q2r.resize(0); +} + +// ////////////////////////////////////////////////////////////////// +// instatiate templates + +// Note: double specialization is unconditional, b/c NfrMapper depends on it. +//#ifdef BASALT_INSTANTIATIONS_DOUBLE +template class MargHelper; +//#endif + +#ifdef BASALT_INSTANTIATIONS_FLOAT +template class MargHelper; +#endif + +} // namespace basalt diff --git a/src/vi_estimator/nfr_mapper.cpp b/src/vi_estimator/nfr_mapper.cpp new file mode 100644 index 0000000..7d5fe4f --- /dev/null +++ b/src/vi_estimator/nfr_mapper.cpp @@ -0,0 +1,758 @@ +/** +BSD 3-Clause License + +This file is part of the Basalt project. +https://gitlab.com/VladyslavUsenko/basalt.git + +Copyright (c) 2019, Vladyslav Usenko and Nikolaus Demmel. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#include +#include +#include +#include +#include +#include + +#include + +namespace basalt { + +NfrMapper::NfrMapper(const Calibration& calib, const VioConfig& config) + : config(config), + lambda(config.mapper_lm_lambda_min), + min_lambda(config.mapper_lm_lambda_min), + max_lambda(config.mapper_lm_lambda_max), + lambda_vee(2) { + this->calib = calib; + this->obs_std_dev = config.mapper_obs_std_dev; + this->huber_thresh = config.mapper_obs_huber_thresh; + + hash_bow_database.reset(new HashBow<256>(config.mapper_bow_num_bits)); +} + +void NfrMapper::addMargData(MargData::Ptr& data) { + processMargData(*data); + bool valid = extractNonlinearFactors(*data); + + if (valid) { + for (const auto& kv : data->frame_poses) { + PoseStateWithLin p(kv.second.getT_ns(), kv.second.getPose()); + + frame_poses[kv.first] = p; + } + + for (const auto& kv : data->frame_states) { + if (data->kfs_all.count(kv.first) > 0) { + auto state = kv.second; + PoseStateWithLin p(state.getState().t_ns, + state.getState().T_w_i); + frame_poses[kv.first] = p; + } + } + } +} + +void NfrMapper::processMargData(MargData& m) { + BASALT_ASSERT(m.aom.total_size == size_t(m.abs_H.cols())); + + // std::cout << "rank " << m.abs_H.fullPivLu().rank() << " size " + // << m.abs_H.cols() << std::endl; + + AbsOrderMap aom_new; + std::set idx_to_keep; + std::set idx_to_marg; + + for (const auto& kv : m.aom.abs_order_map) { + if (kv.second.second == POSE_SIZE) { + for (size_t i = 0; i < POSE_SIZE; i++) + idx_to_keep.emplace(kv.second.first + i); + aom_new.abs_order_map.emplace(kv); + aom_new.total_size += POSE_SIZE; + } else if (kv.second.second == POSE_VEL_BIAS_SIZE) { + if (m.kfs_all.count(kv.first) > 0) { + for (size_t i = 0; i < POSE_SIZE; i++) + idx_to_keep.emplace(kv.second.first + i); + for (size_t i = POSE_SIZE; i < POSE_VEL_BIAS_SIZE; i++) + idx_to_marg.emplace(kv.second.first + i); + + aom_new.abs_order_map[kv.first] = + std::make_pair(aom_new.total_size, POSE_SIZE); + aom_new.total_size += POSE_SIZE; + + PoseStateWithLin p(m.frame_states.at(kv.first)); + m.frame_poses[kv.first] = p; + m.frame_states.erase(kv.first); + } else { + for (size_t i = 0; i < POSE_VEL_BIAS_SIZE; i++) + idx_to_marg.emplace(kv.second.first + i); + m.frame_states.erase(kv.first); + } + } else { + std::cerr << "Unknown size" << std::endl; + std::abort(); + } + + // std::cout << kv.first << " " << kv.second.first << " " << + // kv.second.second + // << std::endl; + } + + if (!idx_to_marg.empty()) { + Eigen::MatrixXd marg_H_new; + Eigen::VectorXd marg_b_new; + MargHelper::marginalizeHelperSqToSq( + m.abs_H, m.abs_b, idx_to_keep, idx_to_marg, marg_H_new, marg_b_new); + + // std::cout << "new rank " << marg_H_new.fullPivLu().rank() << " size " + // << marg_H_new.cols() << std::endl; + + m.abs_H = marg_H_new; + m.abs_b = marg_b_new; + m.aom = aom_new; + } + + BASALT_ASSERT(m.aom.total_size == size_t(m.abs_H.cols())); + + // save image data + { + for (const auto& v : m.opt_flow_res) { + img_data[v->t_ns] = v->input_images; + } + } +} + +bool NfrMapper::extractNonlinearFactors(MargData& m) { + size_t asize = m.aom.total_size; + // std::cout << "asize " << asize << std::endl; + + Eigen::FullPivHouseholderQR qr(m.abs_H); + if (qr.rank() != m.abs_H.cols()) return false; + + Eigen::MatrixXd cov_old = qr.solve(Eigen::MatrixXd::Identity(asize, asize)); + + int64_t kf_id = *m.kfs_to_marg.cbegin(); + int kf_start_idx = m.aom.abs_order_map.at(kf_id).first; + + auto state_kf = m.frame_poses.at(kf_id); + + Sophus::SE3d T_w_i_kf = state_kf.getPose(); + + Eigen::Vector3d pos = T_w_i_kf.translation(); + Eigen::Vector3d yaw_dir_body = + T_w_i_kf.so3().inverse() * Eigen::Vector3d::UnitX(); + + Sophus::Matrix d_pos_d_T_w_i; + Sophus::Matrix d_yaw_d_T_w_i; + Sophus::Matrix d_rp_d_T_w_i; + + absPositionError(T_w_i_kf, pos, &d_pos_d_T_w_i); + yawError(T_w_i_kf, yaw_dir_body, &d_yaw_d_T_w_i); + rollPitchError(T_w_i_kf, T_w_i_kf.so3(), &d_rp_d_T_w_i); + + { + Eigen::MatrixXd J; + J.setZero(POSE_SIZE, asize); + J.block<3, POSE_SIZE>(0, kf_start_idx) = d_pos_d_T_w_i; + J.block<1, POSE_SIZE>(3, kf_start_idx) = d_yaw_d_T_w_i; + J.block<2, POSE_SIZE>(4, kf_start_idx) = d_rp_d_T_w_i; + + Sophus::Matrix6d cov_new = J * cov_old * J.transpose(); + + // std::cout << "cov_new\n" << cov_new << std::endl; + + RollPitchFactor rpf; + rpf.t_ns = kf_id; + rpf.R_w_i_meas = T_w_i_kf.so3(); + + if (!config.mapper_no_factor_weights) { + rpf.cov_inv = cov_new.block<2, 2>(4, 4).inverse(); + } else { + rpf.cov_inv.setIdentity(); + } + + if (m.use_imu) { + roll_pitch_factors.emplace_back(rpf); + } + } + + for (int64_t other_id : m.kfs_all) { + if (m.frame_poses.count(other_id) == 0 || other_id == kf_id) { + continue; + } + + auto state_o = m.frame_poses.at(other_id); + + Sophus::SE3d T_w_i_o = state_o.getPose(); + Sophus::SE3d T_kf_o = T_w_i_kf.inverse() * T_w_i_o; + + int o_start_idx = m.aom.abs_order_map.at(other_id).first; + + Sophus::Matrix6d d_res_d_T_w_i, d_res_d_T_w_j; + relPoseError(T_kf_o, T_w_i_kf, T_w_i_o, &d_res_d_T_w_i, &d_res_d_T_w_j); + + Eigen::MatrixXd J; + J.setZero(POSE_SIZE, asize); + J.block(0, kf_start_idx) = d_res_d_T_w_i; + J.block(0, o_start_idx) = d_res_d_T_w_j; + + Sophus::Matrix6d cov_new = J * cov_old * J.transpose(); + RelPoseFactor rpf; + rpf.t_i_ns = kf_id; + rpf.t_j_ns = other_id; + rpf.T_i_j = T_kf_o; + rpf.cov_inv.setIdentity(); + + if (!config.mapper_no_factor_weights) { + cov_new.ldlt().solveInPlace(rpf.cov_inv); + } + + // std::cout << "rpf.cov_inv\n" << rpf.cov_inv << std::endl; + + rel_pose_factors.emplace_back(rpf); + } + + return true; +} + +void NfrMapper::optimize(int num_iterations) { + AbsOrderMap aom; + + for (const auto& kv : frame_poses) { + aom.abs_order_map[kv.first] = std::make_pair(aom.total_size, POSE_SIZE); + aom.total_size += POSE_SIZE; + } + + for (int iter = 0; iter < num_iterations; iter++) { + auto t1 = std::chrono::high_resolution_clock::now(); + + double rld_error; + Eigen::aligned_vector rld_vec; + linearizeHelper(rld_vec, lmdb.getObservations(), rld_error); + + // SparseHashAccumulator accum; + // accum.reset(aom.total_size); + + // for (auto& rld : rld_vec) { + // rld.invert_keypoint_hessians(); + + // Eigen::MatrixXd rel_H; + // Eigen::VectorXd rel_b; + // linearizeRel(rld, rel_H, rel_b); + + // linearizeAbs(rel_H, rel_b, rld, aom, accum); + // } + + MapperLinearizeAbsReduce> lopt(aom, + &frame_poses); + tbb::blocked_range::const_iterator> range( + rld_vec.begin(), rld_vec.end()); + tbb::blocked_range::const_iterator> + range1(roll_pitch_factors.begin(), roll_pitch_factors.end()); + tbb::blocked_range::const_iterator> + range2(rel_pose_factors.begin(), rel_pose_factors.end()); + + tbb::parallel_reduce(range, lopt); + + if (config.mapper_use_factors) { + tbb::parallel_reduce(range1, lopt); + tbb::parallel_reduce(range2, lopt); + } + + double error_total = rld_error + lopt.rel_error + lopt.roll_pitch_error; + + std::cout << "[LINEARIZE] iter " << iter + << " before_update_error: vision: " << rld_error + << " rel_error: " << lopt.rel_error + << " roll_pitch_error: " << lopt.roll_pitch_error + << " total: " << error_total << std::endl; + + lopt.accum.iterative_solver = true; + lopt.accum.print_info = true; + + lopt.accum.setup_solver(); + const Eigen::VectorXd Hdiag = lopt.accum.Hdiagonal(); + + bool converged = false; + + if (config.mapper_use_lm) { // Use Levenberg–Marquardt + bool step = false; + int max_iter = 10; + + while (!step && max_iter > 0 && !converged) { + Eigen::VectorXd Hdiag_lambda = Hdiag * lambda; + for (int i = 0; i < Hdiag_lambda.size(); i++) + Hdiag_lambda[i] = std::max(Hdiag_lambda[i], min_lambda); + + const Eigen::VectorXd inc = lopt.accum.solve(&Hdiag_lambda); + double max_inc = inc.array().abs().maxCoeff(); + if (max_inc < 1e-5) converged = true; + + backup(); + + // apply increment to poses + for (auto& kv : frame_poses) { + int idx = aom.abs_order_map.at(kv.first).first; + BASALT_ASSERT(!kv.second.isLinearized()); + kv.second.applyInc(-inc.segment(idx)); + } + + // Update points + tbb::blocked_range keys_range(0, rld_vec.size()); + auto update_points_func = [&](const tbb::blocked_range& r) { + for (size_t i = r.begin(); i != r.end(); ++i) { + const auto& rld = rld_vec[i]; + updatePoints(aom, rld, inc, lmdb); + } + }; + tbb::parallel_for(keys_range, update_points_func); + + double after_update_vision_error = 0; + double after_rel_error = 0; + double after_roll_pitch_error = 0; + + computeError(after_update_vision_error); + if (config.mapper_use_factors) { + computeRelPose(after_rel_error); + computeRollPitch(after_roll_pitch_error); + } + + double after_error_total = after_update_vision_error + after_rel_error + + after_roll_pitch_error; + + double f_diff = (error_total - after_error_total); + + if (f_diff < 0) { + std::cout << "\t[REJECTED] lambda:" << lambda << " f_diff: " << f_diff + << " max_inc: " << max_inc + << " vision_error: " << after_update_vision_error + << " rel_error: " << after_rel_error + << " roll_pitch_error: " << after_roll_pitch_error + << " total: " << after_error_total << std::endl; + lambda = std::min(max_lambda, lambda_vee * lambda); + lambda_vee *= 2; + + restore(); + } else { + std::cout << "\t[ACCEPTED] lambda:" << lambda << " f_diff: " << f_diff + << " max_inc: " << max_inc + << " vision_error: " << after_update_vision_error + << " rel_error: " << after_rel_error + << " roll_pitch_error: " << after_roll_pitch_error + << " total: " << after_error_total << std::endl; + + lambda = std::max(min_lambda, lambda / 3); + lambda_vee = 2; + + step = true; + } + + max_iter--; + + if (after_error_total > error_total) { + std::cout << "increased error after update!!!" << std::endl; + } + } + } else { // Use Gauss-Newton + Eigen::VectorXd Hdiag_lambda = Hdiag * min_lambda; + for (int i = 0; i < Hdiag_lambda.size(); i++) + Hdiag_lambda[i] = std::max(Hdiag_lambda[i], min_lambda); + + const Eigen::VectorXd inc = lopt.accum.solve(&Hdiag_lambda); + double max_inc = inc.array().abs().maxCoeff(); + if (max_inc < 1e-5) converged = true; + + // apply increment to poses + for (auto& kv : frame_poses) { + int idx = aom.abs_order_map.at(kv.first).first; + BASALT_ASSERT(!kv.second.isLinearized()); + kv.second.applyInc(-inc.segment(idx)); + } + + // Update points + tbb::blocked_range keys_range(0, rld_vec.size()); + auto update_points_func = [&](const tbb::blocked_range& r) { + for (size_t i = r.begin(); i != r.end(); ++i) { + const auto& rld = rld_vec[i]; + updatePoints(aom, rld, inc, lmdb); + } + }; + tbb::parallel_for(keys_range, update_points_func); + } + + auto t2 = std::chrono::high_resolution_clock::now(); + auto elapsed = + std::chrono::duration_cast(t2 - t1); + + std::cout << "iter " << iter << " time : " << elapsed.count() + << "(us), num_states " << frame_states.size() << " num_poses " + << frame_poses.size() << std::endl; + + if (converged) break; + + // std::cerr << "LT\n" << LT << std::endl; + // std::cerr << "z_p\n" << z_p.transpose() << std::endl; + // std::cerr << "inc\n" << inc.transpose() << std::endl; + } +} + +Eigen::aligned_map>& +NfrMapper::getFramePoses() { + return frame_poses; +} + +void NfrMapper::computeRelPose(double& rel_error) { + rel_error = 0; + + for (const RelPoseFactor& rpf : rel_pose_factors) { + const Sophus::SE3d& pose_i = frame_poses.at(rpf.t_i_ns).getPose(); + const Sophus::SE3d& pose_j = frame_poses.at(rpf.t_j_ns).getPose(); + + Sophus::Vector6d res = relPoseError(rpf.T_i_j, pose_i, pose_j); + + rel_error += res.transpose() * rpf.cov_inv * res; + } +} + +void NfrMapper::computeRollPitch(double& roll_pitch_error) { + roll_pitch_error = 0; + + for (const RollPitchFactor& rpf : roll_pitch_factors) { + const Sophus::SE3d& pose = frame_poses.at(rpf.t_ns).getPose(); + + Sophus::Vector2d res = rollPitchError(pose, rpf.R_w_i_meas); + + roll_pitch_error += res.transpose() * rpf.cov_inv * res; + } +} + +void NfrMapper::detect_keypoints() { + std::vector keys; + for (const auto& kv : img_data) { + if (frame_poses.count(kv.first) > 0) { + keys.emplace_back(kv.first); + } + } + + auto t1 = std::chrono::high_resolution_clock::now(); + + tbb::parallel_for( + tbb::blocked_range(0, keys.size()), + [&](const tbb::blocked_range& r) { + for (size_t j = r.begin(); j != r.end(); ++j) { + auto kv = img_data.find(keys[j]); + if (kv->second.get()) { + for (size_t i = 0; i < kv->second->img_data.size(); i++) { + TimeCamId tcid(kv->first, i); + KeypointsData& kd = feature_corners[tcid]; + + if (!kv->second->img_data[i].img.get()) continue; + + const Image img = + kv->second->img_data[i].img->Reinterpret(); + + detectKeypointsMapping(img, kd, + config.mapper_detection_num_points); + computeAngles(img, kd, true); + computeDescriptors(img, kd); + + std::vector success; + calib.intrinsics[tcid.cam_id].unproject(kd.corners, kd.corners_3d, + success); + + hash_bow_database->compute_bow(kd.corner_descriptors, kd.hashes, + kd.bow_vector); + + hash_bow_database->add_to_database(tcid, kd.bow_vector); + + // std::cout << "bow " << kd.bow_vector.size() << " desc " + // << kd.corner_descriptors.size() << std::endl; + } + } + } + }); + + auto t2 = std::chrono::high_resolution_clock::now(); + + auto elapsed1 = + std::chrono::duration_cast(t2 - t1); + + std::cout << "Processed " << feature_corners.size() << " frames." + << std::endl; + + std::cout << "Detection time: " << elapsed1.count() * 1e-6 << "s." + << std::endl; +} + +void NfrMapper::match_stereo() { + // Pose of camera 1 (right) w.r.t camera 0 (left) + const Sophus::SE3d T_0_1 = calib.T_i_c[0].inverse() * calib.T_i_c[1]; + + // Essential matrix + Eigen::Matrix4d E; + computeEssential(T_0_1, E); + + std::cout << "Matching " << img_data.size() << " stereo pairs..." + << std::endl; + + int num_matches = 0; + int num_inliers = 0; + + for (const auto& kv : img_data) { + const TimeCamId tcid1(kv.first, 0), tcid2(kv.first, 1); + + MatchData md; + md.T_i_j = T_0_1; + + const KeypointsData& kd1 = feature_corners[tcid1]; + const KeypointsData& kd2 = feature_corners[tcid2]; + + matchDescriptors(kd1.corner_descriptors, kd2.corner_descriptors, md.matches, + config.mapper_max_hamming_distance, + config.mapper_second_best_test_ratio); + + num_matches += md.matches.size(); + + findInliersEssential(kd1, kd2, E, 1e-3, md); + + if (md.inliers.size() > 16) { + num_inliers += md.inliers.size(); + feature_matches[std::make_pair(tcid1, tcid2)] = md; + } + } + + std::cout << "Matched " << img_data.size() << " stereo pairs with " + << num_inliers << " inlier matches (" << num_matches << " total)." + << std::endl; +} + +void NfrMapper::match_all() { + std::vector keys; + std::unordered_map id_to_key_idx; + + for (const auto& kv : feature_corners) { + id_to_key_idx[kv.first] = keys.size(); + keys.push_back(kv.first); + } + + auto t1 = std::chrono::high_resolution_clock::now(); + + struct match_pair { + size_t i; + size_t j; + double score; + }; + + tbb::concurrent_vector ids_to_match; + + tbb::blocked_range keys_range(0, keys.size()); + auto compute_pairs = [&](const tbb::blocked_range& r) { + for (size_t i = r.begin(); i != r.end(); ++i) { + const TimeCamId& tcid = keys[i]; + const KeypointsData& kd = feature_corners.at(tcid); + + std::vector> results; + + hash_bow_database->querry_database(kd.bow_vector, + config.mapper_num_frames_to_match, + results, &tcid.frame_id); + + // std::cout << "Closest frames for " << tcid << ": "; + for (const auto& otcid_score : results) { + // std::cout << otcid_score.first << "(" << otcid_score.second << ") + // "; + if (otcid_score.first.frame_id != tcid.frame_id && + otcid_score.second > config.mapper_frames_to_match_threshold) { + match_pair m; + m.i = i; + m.j = id_to_key_idx.at(otcid_score.first); + m.score = otcid_score.second; + + ids_to_match.emplace_back(m); + } + } + // std::cout << std::endl; + } + }; + + tbb::parallel_for(keys_range, compute_pairs); + // compute_pairs(keys_range); + + auto t2 = std::chrono::high_resolution_clock::now(); + + std::cout << "Matching " << ids_to_match.size() << " image pairs..." + << std::endl; + + std::atomic total_matched = 0; + + tbb::blocked_range range(0, ids_to_match.size()); + auto match_func = [&](const tbb::blocked_range& r) { + int matched = 0; + + for (size_t j = r.begin(); j != r.end(); ++j) { + const TimeCamId& id1 = keys[ids_to_match[j].i]; + const TimeCamId& id2 = keys[ids_to_match[j].j]; + + const KeypointsData& f1 = feature_corners[id1]; + const KeypointsData& f2 = feature_corners[id2]; + + MatchData md; + + matchDescriptors(f1.corner_descriptors, f2.corner_descriptors, md.matches, + 70, 1.2); + + if (int(md.matches.size()) > config.mapper_min_matches) { + matched++; + + findInliersRansac(f1, f2, config.mapper_ransac_threshold, + config.mapper_min_matches, md); + } + + if (!md.inliers.empty()) feature_matches[std::make_pair(id1, id2)] = md; + } + total_matched += matched; + }; + + tbb::parallel_for(range, match_func); + // match_func(range); + + auto t3 = std::chrono::high_resolution_clock::now(); + + auto elapsed1 = + std::chrono::duration_cast(t2 - t1); + auto elapsed2 = + std::chrono::duration_cast(t3 - t2); + + // + int num_matches = 0; + int num_inliers = 0; + + for (const auto& kv : feature_matches) { + num_matches += kv.second.matches.size(); + num_inliers += kv.second.inliers.size(); + } + + std::cout << "Matched " << ids_to_match.size() << " image pairs with " + << num_inliers << " inlier matches (" << num_matches << " total)." + << std::endl; + + std::cout << "DB query " << elapsed1.count() * 1e-6 << "s. matching " + << elapsed2.count() * 1e-6 + << "s. Geometric verification attemts: " << total_matched << "." + << std::endl; +} + +void NfrMapper::build_tracks() { + TrackBuilder trackBuilder; + // Build: Efficient fusion of correspondences + trackBuilder.Build(feature_matches); + // Filter: Remove tracks that have conflict + trackBuilder.Filter(config.mapper_min_track_length); + // Export tree to usable data structure + trackBuilder.Export(feature_tracks); + + // info + size_t inlier_match_count = 0; + for (const auto& it : feature_matches) { + inlier_match_count += it.second.inliers.size(); + } + + size_t total_track_obs_count = 0; + for (const auto& it : feature_tracks) { + total_track_obs_count += it.second.size(); + } + + std::cout << "Built " << feature_tracks.size() << " feature tracks from " + << inlier_match_count << " matches. Average track length is " + << total_track_obs_count / (double)feature_tracks.size() << "." + << std::endl; +} + +void NfrMapper::setup_opt() { + const double min_triang_distance2 = config.mapper_min_triangulation_dist * + config.mapper_min_triangulation_dist; + + for (const auto& kv : feature_tracks) { + if (kv.second.size() < 2) continue; + + // Take first observation as host + auto it = kv.second.begin(); + TimeCamId tcid_h = it->first; + + FeatureId feat_id_h = it->second; + Eigen::Vector2d pos_2d_h = feature_corners.at(tcid_h).corners[feat_id_h]; + Eigen::Vector4d pos_3d_h; + calib.intrinsics[tcid_h.cam_id].unproject(pos_2d_h, pos_3d_h); + + it++; + + for (; it != kv.second.end(); it++) { + TimeCamId tcid_o = it->first; + + FeatureId feat_id_o = it->second; + Eigen::Vector2d pos_2d_o = feature_corners.at(tcid_o).corners[feat_id_o]; + Eigen::Vector4d pos_3d_o; + calib.intrinsics[tcid_o.cam_id].unproject(pos_2d_o, pos_3d_o); + + Sophus::SE3d T_w_h = frame_poses.at(tcid_h.frame_id).getPose() * + calib.T_i_c[tcid_h.cam_id]; + Sophus::SE3d T_w_o = frame_poses.at(tcid_o.frame_id).getPose() * + calib.T_i_c[tcid_o.cam_id]; + + Sophus::SE3d T_h_o = T_w_h.inverse() * T_w_o; + + if (T_h_o.translation().squaredNorm() < min_triang_distance2) continue; + + Eigen::Vector4d pos_3d = + triangulate(pos_3d_h.head<3>(), pos_3d_o.head<3>(), T_h_o); + + if (!pos_3d.array().isFinite().all() || pos_3d[3] <= 0 || pos_3d[3] > 2.0) + continue; + + Keypoint pos; + pos.host_kf_id = tcid_h; + pos.direction = StereographicParam::project(pos_3d); + pos.inv_dist = pos_3d[3]; + + lmdb.addLandmark(kv.first, pos); + + for (const auto& obs_kv : kv.second) { + KeypointObservation ko; + ko.kpt_id = kv.first; + ko.pos = feature_corners.at(obs_kv.first).corners[obs_kv.second]; + + lmdb.addObservation(obs_kv.first, ko); + // obs[tcid_h][obs_kv.first].emplace_back(ko); + } + break; + } + } +} + +} // namespace basalt diff --git a/src/vi_estimator/sc_ba_base.cpp b/src/vi_estimator/sc_ba_base.cpp new file mode 100644 index 0000000..cba100f --- /dev/null +++ b/src/vi_estimator/sc_ba_base.cpp @@ -0,0 +1,808 @@ +/** +BSD 3-Clause License + +This file is part of the Basalt project. +https://gitlab.com/VladyslavUsenko/basalt.git + +Copyright (c) 2019, Vladyslav Usenko and Nikolaus Demmel. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#include + +#include + +#include +#include + +namespace basalt { + +template +void ScBundleAdjustmentBase::updatePoints( + const AbsOrderMap& aom, const RelLinData& rld, const VecX& inc, + LandmarkDatabase& lmdb, Scalar* l_diff) { + // We want to compute the model cost change. The model fuction is + // + // L(inc) = 0.5 r^T r + inc^T b + 0.5 inc^T H inc, + // + // and thus the expected decrease in cost for the computed increment is + // + // l_diff = L(0) - L(inc) + // = - inc^T b - 0.5 inc^T H inc. + // + // Here we have + // + // | incp | | bp | | Hpp Hpl | + // inc = | | b = | | H = | | + // | incl |, | bl |, | Hlp Hll |, + // + // and thus + // + // l_diff = - incp^T bp - incl^T bl - + // 0.5 incp^T Hpp incp - incp^T Hpl incl - 0.5 incl^T Hll incl + // = - sum_{p} (incp^T (bp + 0.5 Hpp incp)) - + // sum_{l} (incl^T (bl + 0.5 Hll incl + + // sum_{obs} (Hpl^T incp))), + // + // where sum_{p} sums over all (relative) poses, sum_{l} over all landmarks, + // and sum_{obs} over all observations of each landmark. + // + // Note: l_diff acts like an accumulator; we just add w/o initializing to 0. + + VecX rel_inc; + rel_inc.setZero(rld.order.size() * POSE_SIZE); + for (size_t i = 0; i < rld.order.size(); i++) { + const TimeCamId& tcid_h = rld.order[i].first; + const TimeCamId& tcid_t = rld.order[i].second; + + if (tcid_h.frame_id != tcid_t.frame_id) { + int abs_h_idx = aom.abs_order_map.at(tcid_h.frame_id).first; + int abs_t_idx = aom.abs_order_map.at(tcid_t.frame_id).first; + + Eigen::Matrix inc_p = + rld.d_rel_d_h[i] * inc.template segment(abs_h_idx) + + rld.d_rel_d_t[i] * inc.template segment(abs_t_idx); + + rel_inc.template segment(i * POSE_SIZE) = inc_p; + + if (l_diff) { + // Note: inc_p is still negated b/c we solve H(-x) = b + const FrameRelLinData& frld = rld.Hpppl[i]; + *l_diff -= -inc_p.dot((frld.bp - Scalar(0.5) * (frld.Hpp * inc_p))); + } + } + } + + for (const auto& kv : rld.lm_to_obs) { + int lm_idx = kv.first; + const auto& lm_obs = kv.second; + + Vec3 H_l_p_x; + H_l_p_x.setZero(); + + for (size_t k = 0; k < lm_obs.size(); k++) { + int rel_idx = lm_obs[k].first; + const FrameRelLinData& frld = rld.Hpppl.at(rel_idx); + const Eigen::Matrix& H_p_l_other = + frld.Hpl.at(lm_obs[k].second); + + // Note: pose increment is still negated b/c we solve "H (-inc) = b" + H_l_p_x += H_p_l_other.transpose() * + rel_inc.template segment(rel_idx * POSE_SIZE); + } + + // final negation of inc_l b/c we solve "H (-inc) = b" + Vec3 inc_l = -(rld.Hllinv.at(lm_idx) * (rld.bl.at(lm_idx) - H_l_p_x)); + + Keypoint& kpt = lmdb.getLandmark(lm_idx); + kpt.direction += inc_l.template head<2>(); + kpt.inv_dist += inc_l[2]; + + kpt.inv_dist = std::max(Scalar(0), kpt.inv_dist); + + if (l_diff) { + // Note: rel_inc and thus H_l_p_x are still negated b/c we solve H(-x) = b + *l_diff -= inc_l.transpose() * + (rld.bl.at(lm_idx) + + Scalar(0.5) * (rld.Hll.at(lm_idx) * inc_l) - H_l_p_x); + } + } +} + +template +void ScBundleAdjustmentBase::updatePointsAbs( + const AbsOrderMap& aom, const AbsLinData& ald, const VecX& inc, + LandmarkDatabase& lmdb, Scalar* l_diff) { + // We want to compute the model cost change. The model fuction is + // + // L(inc) = 0.5 r^T r + inc^T b + 0.5 inc^T H inc, + // + // and thus the expected decrease in cost for the computed increment is + // + // l_diff = L(0) - L(inc) + // = - inc^T b - 0.5 inc^T H inc. + // + // Here we have + // + // | incp | | bp | | Hpp Hpl | + // inc = | | b = | | H = | | + // | incl |, | bl |, | Hlp Hll |, + // + // and thus + // + // l_diff = - incp^T bp - incl^T bl - + // 0.5 incp^T Hpp incp - incp^T Hpl incl - 0.5 incl^T Hll incl + // = - sum_{p} (incp^T (bp + 0.5 Hpp incp)) - + // sum_{l} (incl^T (bl + 0.5 Hll incl + + // sum_{obs} (Hpl^T incp))), + // + // where sum_{p} sums over all (relative) poses, sum_{l} over all landmarks, + // and sum_{obs} over all observations of each landmark. + // + // Note: l_diff acts like an accumulator; we just add w/o initializing to 0. + + if (l_diff) { + for (size_t i = 0; i < ald.order.size(); i++) { + const TimeCamId& tcid_h = ald.order[i].first; + const TimeCamId& tcid_t = ald.order[i].second; + + int abs_h_idx = aom.abs_order_map.at(tcid_h.frame_id).first; + int abs_t_idx = aom.abs_order_map.at(tcid_t.frame_id).first; + + auto inc_h = inc.template segment(abs_h_idx); + auto inc_t = inc.template segment(abs_t_idx); + + // Note: inc_p is still negated b/c we solve H(-x) = b + const FrameAbsLinData& fald = ald.Hpppl[i]; + *l_diff -= -inc_h.dot((fald.bph - Scalar(0.5) * (fald.Hphph * inc_h)) - + fald.Hphpt * inc_t) - + inc_t.dot((fald.bpt - Scalar(0.5) * (fald.Hptpt * inc_t))); + } + } + + for (const auto& kv : ald.lm_to_obs) { + int lm_idx = kv.first; + const auto& lm_obs = kv.second; + + Vec3 H_l_p_x; + H_l_p_x.setZero(); + + for (size_t k = 0; k < lm_obs.size(); k++) { + int rel_idx = lm_obs[k].first; + const TimeCamId& tcid_h = ald.order.at(rel_idx).first; + const TimeCamId& tcid_t = ald.order.at(rel_idx).second; + + int abs_h_idx = aom.abs_order_map.at(tcid_h.frame_id).first; + int abs_t_idx = aom.abs_order_map.at(tcid_t.frame_id).first; + + auto inc_h = inc.template segment(abs_h_idx); + auto inc_t = inc.template segment(abs_t_idx); + + const FrameAbsLinData& fald = ald.Hpppl.at(rel_idx); + const Eigen::Matrix& H_ph_l_other = + fald.Hphl.at(lm_obs[k].second); + const Eigen::Matrix& H_pt_l_other = + fald.Hptl.at(lm_obs[k].second); + + // Note: pose increment is still negated b/c we solve "H (-inc) = b" + H_l_p_x += H_ph_l_other.transpose() * inc_h; + H_l_p_x += H_pt_l_other.transpose() * inc_t; + } + + // final negation of inc_l b/c we solve "H (-inc) = b" + Vec3 inc_l = -(ald.Hllinv.at(lm_idx) * (ald.bl.at(lm_idx) - H_l_p_x)); + + Keypoint& kpt = lmdb.getLandmark(lm_idx); + kpt.direction += inc_l.template head<2>(); + kpt.inv_dist += inc_l[2]; + + kpt.inv_dist = std::max(Scalar(0), kpt.inv_dist); + + if (l_diff) { + // Note: rel_inc and thus H_l_p_x are still negated b/c we solve H(-x) = b + *l_diff -= inc_l.transpose() * + (ald.bl.at(lm_idx) + + Scalar(0.5) * (ald.Hll.at(lm_idx) * inc_l) - H_l_p_x); + } + } +} + +template +void ScBundleAdjustmentBase::linearizeHelperStatic( + Eigen::aligned_vector& rld_vec, + const std::unordered_map< + TimeCamId, std::map>>& obs_to_lin, + const BundleAdjustmentBase* ba_base, Scalar& error) { + error = 0; + + rld_vec.clear(); + + std::vector obs_tcid_vec; + for (const auto& kv : obs_to_lin) { + obs_tcid_vec.emplace_back(kv.first); + rld_vec.emplace_back(ba_base->lmdb.numLandmarks(), kv.second.size()); + } + + tbb::parallel_for( + tbb::blocked_range(0, obs_tcid_vec.size()), + [&](const tbb::blocked_range& range) { + for (size_t r = range.begin(); r != range.end(); ++r) { + auto kv = obs_to_lin.find(obs_tcid_vec[r]); + + RelLinData& rld = rld_vec[r]; + + rld.error = Scalar(0); + + const TimeCamId& tcid_h = kv->first; + + for (const auto& obs_kv : kv->second) { + const TimeCamId& tcid_t = obs_kv.first; + rld.order.emplace_back(std::make_pair(tcid_h, tcid_t)); + + Mat4 T_t_h; + + if (tcid_h != tcid_t) { + // target and host are not the same + PoseStateWithLin state_h = + ba_base->getPoseStateWithLin(tcid_h.frame_id); + PoseStateWithLin state_t = + ba_base->getPoseStateWithLin(tcid_t.frame_id); + + Mat6 d_rel_d_h, d_rel_d_t; + + SE3 T_t_h_sophus = computeRelPose( + state_h.getPoseLin(), ba_base->calib.T_i_c[tcid_h.cam_id], + state_t.getPoseLin(), ba_base->calib.T_i_c[tcid_t.cam_id], + &d_rel_d_h, &d_rel_d_t); + + rld.d_rel_d_h.emplace_back(d_rel_d_h); + rld.d_rel_d_t.emplace_back(d_rel_d_t); + + if (state_h.isLinearized() || state_t.isLinearized()) { + T_t_h_sophus = computeRelPose( + state_h.getPose(), ba_base->calib.T_i_c[tcid_h.cam_id], + state_t.getPose(), ba_base->calib.T_i_c[tcid_t.cam_id]); + } + + T_t_h = T_t_h_sophus.matrix(); + } else { + T_t_h.setIdentity(); + rld.d_rel_d_h.emplace_back(Mat6::Zero()); + rld.d_rel_d_t.emplace_back(Mat6::Zero()); + } + + FrameRelLinData frld; + + std::visit( + [&](const auto& cam) { + for (KeypointId kpt_id : obs_kv.second) { + const Keypoint& kpt_pos = + ba_base->lmdb.getLandmark(kpt_id); + const Vec2& kpt_obs = kpt_pos.obs.at(tcid_t); + + Vec2 res; + Eigen::Matrix d_res_d_xi; + Eigen::Matrix d_res_d_p; + + bool valid = linearizePoint(kpt_obs, kpt_pos, T_t_h, cam, + res, &d_res_d_xi, &d_res_d_p); + + if (valid) { + Scalar e = res.norm(); + Scalar huber_weight = e < ba_base->huber_thresh + ? Scalar(1.0) + : ba_base->huber_thresh / e; + Scalar obs_weight = huber_weight / (ba_base->obs_std_dev * + ba_base->obs_std_dev); + + rld.error += Scalar(0.5) * (2 - huber_weight) * + obs_weight * res.transpose() * res; + + if (rld.Hll.count(kpt_id) == 0) { + rld.Hll[kpt_id].setZero(); + rld.bl[kpt_id].setZero(); + } + + rld.Hll[kpt_id] += + obs_weight * d_res_d_p.transpose() * d_res_d_p; + rld.bl[kpt_id] += + obs_weight * d_res_d_p.transpose() * res; + + frld.Hpp += + obs_weight * d_res_d_xi.transpose() * d_res_d_xi; + frld.bp += obs_weight * d_res_d_xi.transpose() * res; + frld.Hpl.emplace_back(obs_weight * + d_res_d_xi.transpose() * d_res_d_p); + frld.lm_id.emplace_back(kpt_id); + + rld.lm_to_obs[kpt_id].emplace_back(rld.Hpppl.size(), + frld.lm_id.size() - 1); + } + } + }, + ba_base->calib.intrinsics[tcid_t.cam_id].variant); + + rld.Hpppl.emplace_back(frld); + } + + rld.invert_keypoint_hessians(); + } + }); + + for (const auto& rld : rld_vec) error += rld.error; +} + +template +void ScBundleAdjustmentBase::linearizeHelperAbsStatic( + Eigen::aligned_vector& ald_vec, + const std::unordered_map< + TimeCamId, std::map>>& obs_to_lin, + const BundleAdjustmentBase* ba_base, Scalar& error) { + error = 0; + + ald_vec.clear(); + + std::vector obs_tcid_vec; + for (const auto& kv : obs_to_lin) { + obs_tcid_vec.emplace_back(kv.first); + ald_vec.emplace_back(ba_base->lmdb.numLandmarks(), kv.second.size()); + } + + tbb::parallel_for( + tbb::blocked_range(0, obs_tcid_vec.size()), + [&](const tbb::blocked_range& range) { + for (size_t r = range.begin(); r != range.end(); ++r) { + auto kv = obs_to_lin.find(obs_tcid_vec[r]); + + AbsLinData& ald = ald_vec[r]; + + ald.error = Scalar(0); + + const TimeCamId& tcid_h = kv->first; + + for (const auto& obs_kv : kv->second) { + const TimeCamId& tcid_t = obs_kv.first; + ald.order.emplace_back(std::make_pair(tcid_h, tcid_t)); + + Mat4 T_t_h; + Mat6 d_rel_d_h, d_rel_d_t; + + if (tcid_h != tcid_t) { + // target and host are not the same + PoseStateWithLin state_h = + ba_base->getPoseStateWithLin(tcid_h.frame_id); + PoseStateWithLin state_t = + ba_base->getPoseStateWithLin(tcid_t.frame_id); + + SE3 T_t_h_sophus = computeRelPose( + state_h.getPoseLin(), ba_base->calib.T_i_c[tcid_h.cam_id], + state_t.getPoseLin(), ba_base->calib.T_i_c[tcid_t.cam_id], + &d_rel_d_h, &d_rel_d_t); + + if (state_h.isLinearized() || state_t.isLinearized()) { + T_t_h_sophus = computeRelPose( + state_h.getPose(), ba_base->calib.T_i_c[tcid_h.cam_id], + state_t.getPose(), ba_base->calib.T_i_c[tcid_t.cam_id]); + } + + T_t_h = T_t_h_sophus.matrix(); + } else { + T_t_h.setIdentity(); + d_rel_d_h.setZero(); + d_rel_d_t.setZero(); + } + + FrameAbsLinData fald; + + std::visit( + [&](const auto& cam) { + for (KeypointId kpt_id : obs_kv.second) { + const Keypoint& kpt_pos = + ba_base->lmdb.getLandmark(kpt_id); + const Vec2& kpt_obs = kpt_pos.obs.at(tcid_t); + + Vec2 res; + Eigen::Matrix d_res_d_xi_h, + d_res_d_xi_t; + Eigen::Matrix d_res_d_p; + + bool valid; + { + Eigen::Matrix d_res_d_xi; + + valid = linearizePoint(kpt_obs, kpt_pos, T_t_h, cam, res, + &d_res_d_xi, &d_res_d_p); + + d_res_d_xi_h = d_res_d_xi * d_rel_d_h; + d_res_d_xi_t = d_res_d_xi * d_rel_d_t; + } + + if (valid) { + Scalar e = res.norm(); + Scalar huber_weight = e < ba_base->huber_thresh + ? Scalar(1.0) + : ba_base->huber_thresh / e; + Scalar obs_weight = huber_weight / (ba_base->obs_std_dev * + ba_base->obs_std_dev); + + ald.error += Scalar(0.5) * (2 - huber_weight) * + obs_weight * res.transpose() * res; + + if (ald.Hll.count(kpt_id) == 0) { + ald.Hll[kpt_id].setZero(); + ald.bl[kpt_id].setZero(); + } + + ald.Hll[kpt_id] += + obs_weight * d_res_d_p.transpose() * d_res_d_p; + ald.bl[kpt_id] += + obs_weight * d_res_d_p.transpose() * res; + + fald.Hphph += + obs_weight * d_res_d_xi_h.transpose() * d_res_d_xi_h; + fald.Hptpt += + obs_weight * d_res_d_xi_t.transpose() * d_res_d_xi_t; + fald.Hphpt += + obs_weight * d_res_d_xi_h.transpose() * d_res_d_xi_t; + + fald.bph += obs_weight * d_res_d_xi_h.transpose() * res; + fald.bpt += obs_weight * d_res_d_xi_t.transpose() * res; + + fald.Hphl.emplace_back( + obs_weight * d_res_d_xi_h.transpose() * d_res_d_p); + fald.Hptl.emplace_back( + obs_weight * d_res_d_xi_t.transpose() * d_res_d_p); + + fald.lm_id.emplace_back(kpt_id); + + ald.lm_to_obs[kpt_id].emplace_back(ald.Hpppl.size(), + fald.lm_id.size() - 1); + } + } + }, + ba_base->calib.intrinsics[tcid_t.cam_id].variant); + + ald.Hpppl.emplace_back(fald); + } + + ald.invert_keypoint_hessians(); + } + }); + + for (const auto& rld : ald_vec) error += rld.error; +} + +template +void ScBundleAdjustmentBase::linearizeRel(const RelLinData& rld, + MatX& H, VecX& b) { + // std::cout << "linearizeRel: KF " << frame_states.size() << " obs " + // << obs.size() << std::endl; + + // Do schur complement + size_t msize = rld.order.size(); + H.setZero(POSE_SIZE * msize, POSE_SIZE * msize); + b.setZero(POSE_SIZE * msize); + + for (size_t i = 0; i < rld.order.size(); i++) { + const FrameRelLinData& frld = rld.Hpppl.at(i); + + H.template block(POSE_SIZE * i, POSE_SIZE * i) += + frld.Hpp; + b.template segment(POSE_SIZE * i) += frld.bp; + + for (size_t j = 0; j < frld.lm_id.size(); j++) { + Eigen::Matrix H_pl_H_ll_inv; + int lm_id = frld.lm_id[j]; + + H_pl_H_ll_inv = frld.Hpl[j] * rld.Hllinv.at(lm_id); + b.template segment(POSE_SIZE * i) -= + H_pl_H_ll_inv * rld.bl.at(lm_id); + + const auto& other_obs = rld.lm_to_obs.at(lm_id); + for (size_t k = 0; k < other_obs.size(); k++) { + const FrameRelLinData& frld_other = rld.Hpppl.at(other_obs[k].first); + int other_i = other_obs[k].first; + + Eigen::Matrix H_l_p_other = + frld_other.Hpl[other_obs[k].second].transpose(); + + H.template block( + POSE_SIZE * i, POSE_SIZE * other_i) -= H_pl_H_ll_inv * H_l_p_other; + } + } + } +} + +template +Eigen::VectorXd ScBundleAdjustmentBase::checkNullspace( + const MatX& H, const VecX& b, const AbsOrderMap& order, + const Eigen::aligned_map>& + frame_states, + const Eigen::aligned_map>& frame_poses, + bool verbose) { + using Vec3d = Eigen::Vector3d; + using VecXd = Eigen::VectorXd; + using Mat3d = Eigen::Matrix3d; + using MatXd = Eigen::MatrixXd; + using SO3d = Sophus::SO3d; + + BASALT_ASSERT(size_t(H.cols()) == order.total_size); + size_t marg_size = order.total_size; + + // Idea: We construct increments that we know should lie in the null-space of + // the prior from marginalized observations (except for the initial pose prior + // we set at initialization) --> shift global translations (x,y,z separately), + // or global rotations (r,p,y separately); for VIO only yaw rotation shift is + // in nullspace. Compared to a random increment, we expect them to stay small + // (at initial level of the initial pose prior). If they increase over time, + // we accumulate spurious information on unobservable degrees of freedom. + // + // Poses are cam-to-world and we use left-increment in optimization, so + // perturbations are in world frame. Hence translational increments are + // independent. For rotational increments we also need to rotate translations + // and velocities, both of which are expressed in world frame. For + // translations, we move the center of rotation to the camera center centroid + // for better numerics. + + VecXd inc_x, inc_y, inc_z, inc_roll, inc_pitch, inc_yaw; + inc_x.setZero(marg_size); + inc_y.setZero(marg_size); + inc_z.setZero(marg_size); + inc_roll.setZero(marg_size); + inc_pitch.setZero(marg_size); + inc_yaw.setZero(marg_size); + + int num_trans = 0; + Vec3d mean_trans; + mean_trans.setZero(); + + // Compute mean translation + for (const auto& kv : order.abs_order_map) { + Vec3d trans; + if (kv.second.second == POSE_SIZE) { + mean_trans += frame_poses.at(kv.first) + .getPoseLin() + .translation() + .template cast(); + num_trans++; + } else if (kv.second.second == POSE_VEL_BIAS_SIZE) { + mean_trans += frame_states.at(kv.first) + .getStateLin() + .T_w_i.translation() + .template cast(); + num_trans++; + } else { + std::cerr << "Unknown size of the state: " << kv.second.second + << std::endl; + std::abort(); + } + } + mean_trans /= num_trans; + + double eps = 0.01; + + // Compute nullspace increments + for (const auto& kv : order.abs_order_map) { + inc_x(kv.second.first + 0) = eps; + inc_y(kv.second.first + 1) = eps; + inc_z(kv.second.first + 2) = eps; + inc_roll(kv.second.first + 3) = eps; + inc_pitch(kv.second.first + 4) = eps; + inc_yaw(kv.second.first + 5) = eps; + + Vec3d trans; + if (kv.second.second == POSE_SIZE) { + trans = frame_poses.at(kv.first) + .getPoseLin() + .translation() + .template cast(); + } else if (kv.second.second == POSE_VEL_BIAS_SIZE) { + trans = frame_states.at(kv.first) + .getStateLin() + .T_w_i.translation() + .template cast(); + } else { + BASALT_ASSERT(false); + } + + // combine rotation with global translation to make it rotation around + // translation centroid, not around origin (better numerics). Note that + // velocities are not affected by global translation. + trans -= mean_trans; + + // Jacobian of translation w.r.t. the rotation increment (one column each + // for the 3 different increments) + Mat3d J = -SO3d::hat(trans); + J *= eps; + + inc_roll.template segment<3>(kv.second.first) = J.col(0); + inc_pitch.template segment<3>(kv.second.first) = J.col(1); + inc_yaw.template segment<3>(kv.second.first) = J.col(2); + + if (kv.second.second == POSE_VEL_BIAS_SIZE) { + Vec3d vel = frame_states.at(kv.first) + .getStateLin() + .vel_w_i.template cast(); + + // Jacobian of velocity w.r.t. the rotation increment (one column each + // for the 3 different increments) + Mat3d J_vel = -SO3d::hat(vel); + J_vel *= eps; + + inc_roll.template segment<3>(kv.second.first + POSE_SIZE) = J_vel.col(0); + inc_pitch.template segment<3>(kv.second.first + POSE_SIZE) = J_vel.col(1); + inc_yaw.template segment<3>(kv.second.first + POSE_SIZE) = J_vel.col(2); + } + } + + inc_x.normalize(); + inc_y.normalize(); + inc_z.normalize(); + inc_roll.normalize(); + inc_pitch.normalize(); + inc_yaw.normalize(); + + // std::cout << "inc_x " << inc_x.transpose() << std::endl; + // std::cout << "inc_y " << inc_y.transpose() << std::endl; + // std::cout << "inc_z " << inc_z.transpose() << std::endl; + // std::cout << "inc_yaw " << inc_yaw.transpose() << std::endl; + + VecXd inc_random; + inc_random.setRandom(marg_size); + inc_random.normalize(); + + MatXd H_d = H.template cast(); + VecXd b_d = b.template cast(); + + VecXd xHx(7); + VecXd xb(7); + + xHx[0] = inc_x.transpose() * H_d * inc_x; + xHx[1] = inc_y.transpose() * H_d * inc_y; + xHx[2] = inc_z.transpose() * H_d * inc_z; + xHx[3] = inc_roll.transpose() * H_d * inc_roll; + xHx[4] = inc_pitch.transpose() * H_d * inc_pitch; + xHx[5] = inc_yaw.transpose() * H_d * inc_yaw; + xHx[6] = inc_random.transpose() * H_d * inc_random; + + // b == J^T r, so the increments should also lie in left-nullspace of b + xb[0] = inc_x.transpose() * b_d; + xb[1] = inc_y.transpose() * b_d; + xb[2] = inc_z.transpose() * b_d; + xb[3] = inc_roll.transpose() * b_d; + xb[4] = inc_pitch.transpose() * b_d; + xb[5] = inc_yaw.transpose() * b_d; + xb[6] = inc_random.transpose() * b_d; + + if (verbose) { + std::cout << "nullspace x_trans: " << xHx[0] << " + " << xb[0] << std::endl; + std::cout << "nullspace y_trans: " << xHx[1] << " + " << xb[1] << std::endl; + std::cout << "nullspace z_trans: " << xHx[2] << " + " << xb[2] << std::endl; + std::cout << "nullspace roll : " << xHx[3] << " + " << xb[3] << std::endl; + std::cout << "nullspace pitch : " << xHx[4] << " + " << xb[4] << std::endl; + std::cout << "nullspace yaw : " << xHx[5] << " + " << xb[5] << std::endl; + std::cout << "nullspace random : " << xHx[6] << " + " << xb[6] << std::endl; + } + + return xHx + xb; +} + +template +Eigen::VectorXd ScBundleAdjustmentBase::checkEigenvalues( + const MatX& H, bool verbose) { + // For EV, we use SelfAdjointEigenSolver to avoid getting (numerically) + // complex eigenvalues. + + // We do this computation in double precision to avoid any inaccuracies that + // come from the squaring. + + Eigen::SelfAdjointEigenSolver eigensolver( + H.template cast()); + if (eigensolver.info() != Eigen::Success) { + BASALT_LOG_FATAL("eigen solver failed"); + } + + if (verbose) { + std::cout << "EV:\n" << eigensolver.eigenvalues() << std::endl; + } + + return eigensolver.eigenvalues(); +} + +template +void ScBundleAdjustmentBase::computeImuError( + const AbsOrderMap& aom, Scalar& imu_error, Scalar& bg_error, + Scalar& ba_error, + const Eigen::aligned_map>& states, + const Eigen::aligned_map>& + imu_meas, + const Vec3& gyro_bias_weight, const Vec3& accel_bias_weight, + const Vec3& g) { + imu_error = 0; + bg_error = 0; + ba_error = 0; + for (const auto& kv : imu_meas) { + if (kv.second.get_dt_ns() != 0) { + int64_t start_t = kv.second.get_start_t_ns(); + int64_t end_t = kv.second.get_start_t_ns() + kv.second.get_dt_ns(); + + if (aom.abs_order_map.count(start_t) == 0 || + aom.abs_order_map.count(end_t) == 0) + continue; + + PoseVelBiasStateWithLin start_state = states.at(start_t); + PoseVelBiasStateWithLin end_state = states.at(end_t); + + const typename PoseVelState::VecN res = kv.second.residual( + start_state.getState(), g, end_state.getState(), + start_state.getState().bias_gyro, start_state.getState().bias_accel); + + // std::cout << "res: (" << start_t << "," << end_t << ") " + // << res.transpose() << std::endl; + + // std::cerr << "cov_inv:\n" << kv.second.get_cov_inv() << + // std::endl; + + imu_error += + Scalar(0.5) * res.transpose() * kv.second.get_cov_inv() * res; + + Scalar dt = kv.second.get_dt_ns() * Scalar(1e-9); + { + Vec3 gyro_bias_weight_dt = gyro_bias_weight / dt; + Vec3 res_bg = + start_state.getState().bias_gyro - end_state.getState().bias_gyro; + + bg_error += Scalar(0.5) * res_bg.transpose() * + gyro_bias_weight_dt.asDiagonal() * res_bg; + } + + { + Vec3 accel_bias_weight_dt = accel_bias_weight / dt; + Vec3 res_ba = + start_state.getState().bias_accel - end_state.getState().bias_accel; + + ba_error += Scalar(0.5) * res_ba.transpose() * + accel_bias_weight_dt.asDiagonal() * res_ba; + } + } + } +} + +// ////////////////////////////////////////////////////////////////// +// instatiate templates + +// Note: double specialization is unconditional, b/c NfrMapper depends on it. +//#ifdef BASALT_INSTANTIATIONS_DOUBLE +template class ScBundleAdjustmentBase; +//#endif + +#ifdef BASALT_INSTANTIATIONS_FLOAT +template class ScBundleAdjustmentBase; +#endif + +} // namespace basalt diff --git a/src/vi_estimator/sqrt_ba_base.cpp b/src/vi_estimator/sqrt_ba_base.cpp new file mode 100644 index 0000000..b30a15f --- /dev/null +++ b/src/vi_estimator/sqrt_ba_base.cpp @@ -0,0 +1,270 @@ +/** +BSD 3-Clause License + +This file is part of the Basalt project. +https://gitlab.com/VladyslavUsenko/basalt.git + +Copyright (c) 2019, Vladyslav Usenko and Nikolaus Demmel. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#include + +#include + +namespace basalt { + +template +Eigen::VectorXd SqrtBundleAdjustmentBase::checkNullspace( + const MargLinData& mld, + const Eigen::aligned_map>& + frame_states, + const Eigen::aligned_map>& frame_poses, + bool verbose) { + using Vec3d = Eigen::Vector3d; + using VecXd = Eigen::VectorXd; + using Mat3d = Eigen::Matrix3d; + using MatXd = Eigen::MatrixXd; + using SO3d = Sophus::SO3d; + + BASALT_ASSERT_STREAM(size_t(mld.H.cols()) == mld.order.total_size, + mld.H.cols() << " " << mld.order.total_size); + size_t marg_size = mld.order.total_size; + + // Idea: We construct increments that we know should lie in the null-space of + // the prior from marginalized observations (except for the initial pose prior + // we set at initialization) --> shift global translations (x,y,z separately), + // or global rotations (r,p,y separately); for VIO only yaw rotation shift is + // in nullspace. Compared to a random increment, we expect them to stay small + // (at initial level of the initial pose prior). If they increase over time, + // we accumulate spurious information on unobservable degrees of freedom. + // + // Poses are cam-to-world and we use left-increment in optimization, so + // perturbations are in world frame. Hence translational increments are + // independent. For rotational increments we also need to rotate translations + // and velocities, both of which are expressed in world frame. For + // translations, we move the center of rotation to the camera center centroid + // for better numerics. + + VecXd inc_x, inc_y, inc_z, inc_roll, inc_pitch, inc_yaw; + inc_x.setZero(marg_size); + inc_y.setZero(marg_size); + inc_z.setZero(marg_size); + inc_roll.setZero(marg_size); + inc_pitch.setZero(marg_size); + inc_yaw.setZero(marg_size); + + int num_trans = 0; + Vec3d mean_trans; + mean_trans.setZero(); + + // Compute mean translation + for (const auto& kv : mld.order.abs_order_map) { + Vec3d trans; + if (kv.second.second == POSE_SIZE) { + mean_trans += frame_poses.at(kv.first) + .getPoseLin() + .translation() + .template cast(); + num_trans++; + } else if (kv.second.second == POSE_VEL_BIAS_SIZE) { + mean_trans += frame_states.at(kv.first) + .getStateLin() + .T_w_i.translation() + .template cast(); + num_trans++; + } else { + std::cerr << "Unknown size of the state: " << kv.second.second + << std::endl; + std::abort(); + } + } + mean_trans /= num_trans; + + double eps = 0.01; + + // Compute nullspace increments + for (const auto& kv : mld.order.abs_order_map) { + inc_x(kv.second.first + 0) = eps; + inc_y(kv.second.first + 1) = eps; + inc_z(kv.second.first + 2) = eps; + inc_roll(kv.second.first + 3) = eps; + inc_pitch(kv.second.first + 4) = eps; + inc_yaw(kv.second.first + 5) = eps; + + Vec3d trans; + if (kv.second.second == POSE_SIZE) { + trans = frame_poses.at(kv.first) + .getPoseLin() + .translation() + .template cast(); + } else if (kv.second.second == POSE_VEL_BIAS_SIZE) { + trans = frame_states.at(kv.first) + .getStateLin() + .T_w_i.translation() + .template cast(); + } else { + BASALT_ASSERT(false); + } + + // combine rotation with global translation to make it rotation around + // translation centroid, not around origin (better numerics). Note that + // velocities are not affected by global translation. + trans -= mean_trans; + + // Jacobian of translation w.r.t. the rotation increment (one column each + // for the 3 different increments) + Mat3d J = -SO3d::hat(trans); + J *= eps; + + inc_roll.template segment<3>(kv.second.first) = J.col(0); + inc_pitch.template segment<3>(kv.second.first) = J.col(1); + inc_yaw.template segment<3>(kv.second.first) = J.col(2); + + if (kv.second.second == POSE_VEL_BIAS_SIZE) { + Vec3d vel = frame_states.at(kv.first) + .getStateLin() + .vel_w_i.template cast(); + + // Jacobian of velocity w.r.t. the rotation increment (one column each + // for the 3 different increments) + Mat3d J_vel = -SO3d::hat(vel); + J_vel *= eps; + + inc_roll.template segment<3>(kv.second.first + POSE_SIZE) = J_vel.col(0); + inc_pitch.template segment<3>(kv.second.first + POSE_SIZE) = J_vel.col(1); + inc_yaw.template segment<3>(kv.second.first + POSE_SIZE) = J_vel.col(2); + } + } + + inc_x.normalize(); + inc_y.normalize(); + inc_z.normalize(); + inc_roll.normalize(); + inc_pitch.normalize(); + inc_yaw.normalize(); + + // std::cout << "inc_x " << inc_x.transpose() << std::endl; + // std::cout << "inc_y " << inc_y.transpose() << std::endl; + // std::cout << "inc_z " << inc_z.transpose() << std::endl; + // std::cout << "inc_yaw " << inc_yaw.transpose() << std::endl; + + VecXd inc_random; + inc_random.setRandom(marg_size); + inc_random.normalize(); + + MatXd H_d; + VecXd b_d; + + if (mld.is_sqrt) { + MatXd H_sqrt_d = mld.H.template cast(); + VecXd b_sqrt_d = mld.b.template cast(); + + H_d = H_sqrt_d.transpose() * H_sqrt_d; + b_d = H_sqrt_d.transpose() * b_sqrt_d; + + } else { + H_d = mld.H.template cast(); + b_d = mld.b.template cast(); + } + + VecXd xHx(7); + VecXd xb(7); + + xHx[0] = inc_x.transpose() * H_d * inc_x; + xHx[1] = inc_y.transpose() * H_d * inc_y; + xHx[2] = inc_z.transpose() * H_d * inc_z; + xHx[3] = inc_roll.transpose() * H_d * inc_roll; + xHx[4] = inc_pitch.transpose() * H_d * inc_pitch; + xHx[5] = inc_yaw.transpose() * H_d * inc_yaw; + xHx[6] = inc_random.transpose() * H_d * inc_random; + + // b == J^T r, so the increments should also lie in left-nullspace of b + xb[0] = inc_x.transpose() * b_d; + xb[1] = inc_y.transpose() * b_d; + xb[2] = inc_z.transpose() * b_d; + xb[3] = inc_roll.transpose() * b_d; + xb[4] = inc_pitch.transpose() * b_d; + xb[5] = inc_yaw.transpose() * b_d; + xb[6] = inc_random.transpose() * b_d; + + if (verbose) { + std::cout << "nullspace x_trans: " << xHx[0] << " + " << xb[0] << std::endl; + std::cout << "nullspace y_trans: " << xHx[1] << " + " << xb[1] << std::endl; + std::cout << "nullspace z_trans: " << xHx[2] << " + " << xb[2] << std::endl; + std::cout << "nullspace roll : " << xHx[3] << " + " << xb[3] << std::endl; + std::cout << "nullspace pitch : " << xHx[4] << " + " << xb[4] << std::endl; + std::cout << "nullspace yaw : " << xHx[5] << " + " << xb[5] << std::endl; + std::cout << "nullspace random : " << xHx[6] << " + " << xb[6] << std::endl; + } + + return xHx + xb; +} + +template +Eigen::VectorXd SqrtBundleAdjustmentBase::checkEigenvalues( + const MargLinData& mld, bool verbose) { + // Check EV of J^T J explicitly instead of doing SVD on J to easily notice if + // we have negative EVs (numerically). We do this computation in double + // precision to avoid any inaccuracies that come from the squaring. + + // For EV, we use SelfAdjointEigenSolver to avoid getting (numerically) + // complex eigenvalues. + Eigen::MatrixXd H; + + if (mld.is_sqrt) { + Eigen::MatrixXd sqrt_H_double = mld.H.template cast(); + H = sqrt_H_double.transpose() * sqrt_H_double; + } else { + H = mld.H.template cast(); + } + + Eigen::SelfAdjointEigenSolver eigensolver(H); + if (eigensolver.info() != Eigen::Success) { + BASALT_LOG_FATAL("eigen solver failed"); + } + + if (verbose) { + std::cout << "EV:\n" << eigensolver.eigenvalues() << std::endl; + } + + return eigensolver.eigenvalues(); +} + +// ////////////////////////////////////////////////////////////////// +// instatiate templates + +#ifdef BASALT_INSTANTIATIONS_DOUBLE +template class SqrtBundleAdjustmentBase; +#endif + +#ifdef BASALT_INSTANTIATIONS_FLOAT +template class SqrtBundleAdjustmentBase; +#endif + +} // namespace basalt diff --git a/src/vi_estimator/sqrt_keypoint_vio.cpp b/src/vi_estimator/sqrt_keypoint_vio.cpp new file mode 100644 index 0000000..55a7618 --- /dev/null +++ b/src/vi_estimator/sqrt_keypoint_vio.cpp @@ -0,0 +1,1458 @@ +/** +BSD 3-Clause License + +This file is part of the Basalt project. +https://gitlab.com/VladyslavUsenko/basalt.git + +Copyright (c) 2019, Vladyslav Usenko and Nikolaus Demmel. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#include +#include + +#include +#include +#include +#include +#include +#include + +#include + +#include + +#include +#include +#include + +#include + +namespace basalt { + +using namespace fmt::literals; + +template +SqrtKeypointVioEstimator::SqrtKeypointVioEstimator( + const Eigen::Vector3d& g_, const basalt::Calibration& calib_, + const VioConfig& config_) + : take_kf(true), + frames_after_kf(0), + g(g_.cast()), + initialized(false), + config(config_), + lambda(config_.vio_lm_lambda_initial), + min_lambda(config_.vio_lm_lambda_min), + max_lambda(config_.vio_lm_lambda_max), + lambda_vee(2) { + obs_std_dev = Scalar(config.vio_obs_std_dev); + huber_thresh = Scalar(config.vio_obs_huber_thresh); + calib = calib_.cast(); + + // Setup marginalization + marg_data.is_sqrt = config.vio_sqrt_marg; + marg_data.H.setZero(POSE_VEL_BIAS_SIZE, POSE_VEL_BIAS_SIZE); + marg_data.b.setZero(POSE_VEL_BIAS_SIZE); + + // Version without prior + nullspace_marg_data.is_sqrt = marg_data.is_sqrt; + nullspace_marg_data.H.setZero(POSE_VEL_BIAS_SIZE, POSE_VEL_BIAS_SIZE); + nullspace_marg_data.b.setZero(POSE_VEL_BIAS_SIZE); + + if (marg_data.is_sqrt) { + // prior on position + marg_data.H.diagonal().template head<3>().setConstant( + std::sqrt(Scalar(config.vio_init_pose_weight))); + // prior on yaw + marg_data.H(5, 5) = std::sqrt(Scalar(config.vio_init_pose_weight)); + + // small prior to avoid jumps in bias + marg_data.H.diagonal().template segment<3>(9).array() = + std::sqrt(Scalar(config.vio_init_ba_weight)); + marg_data.H.diagonal().template segment<3>(12).array() = + std::sqrt(Scalar(config.vio_init_bg_weight)); + } else { + // prior on position + marg_data.H.diagonal().template head<3>().setConstant( + Scalar(config.vio_init_pose_weight)); + // prior on yaw + marg_data.H(5, 5) = Scalar(config.vio_init_pose_weight); + + // small prior to avoid jumps in bias + marg_data.H.diagonal().template segment<3>(9).array() = + Scalar(config.vio_init_ba_weight); + marg_data.H.diagonal().template segment<3>(12).array() = + Scalar(config.vio_init_bg_weight); + } + + std::cout << "marg_H (sqrt:" << marg_data.is_sqrt << ")\n" + << marg_data.H << std::endl; + + gyro_bias_sqrt_weight = calib.gyro_bias_std.array().inverse(); + accel_bias_sqrt_weight = calib.accel_bias_std.array().inverse(); + + max_states = config.vio_max_states; + max_kfs = config.vio_max_kfs; + + opt_started = false; + + vision_data_queue.set_capacity(10); + imu_data_queue.set_capacity(300); +} + +template +void SqrtKeypointVioEstimator::initialize( + int64_t t_ns, const Sophus::SE3d& T_w_i, const Eigen::Vector3d& vel_w_i, + const Eigen::Vector3d& bg, const Eigen::Vector3d& ba) { + initialized = true; + T_w_i_init = T_w_i.cast(); + + last_state_t_ns = t_ns; + imu_meas[t_ns] = IntegratedImuMeasurement(t_ns, bg.cast(), + ba.cast()); + frame_states[t_ns] = PoseVelBiasStateWithLin( + t_ns, T_w_i_init, vel_w_i.cast(), bg.cast(), + ba.cast(), true); + + marg_data.order.abs_order_map[t_ns] = std::make_pair(0, POSE_VEL_BIAS_SIZE); + marg_data.order.total_size = POSE_VEL_BIAS_SIZE; + marg_data.order.items = 1; + + nullspace_marg_data.order = marg_data.order; + + initialize(bg, ba); +} + +template +void SqrtKeypointVioEstimator::initialize(const Eigen::Vector3d& bg_, + const Eigen::Vector3d& ba_) { + auto proc_func = [&, bg = bg_.cast(), ba = ba_.cast()] { + OpticalFlowResult::Ptr prev_frame, curr_frame; + typename IntegratedImuMeasurement::Ptr meas; + + const Vec3 accel_cov = + calib.dicrete_time_accel_noise_std().array().square(); + const Vec3 gyro_cov = calib.dicrete_time_gyro_noise_std().array().square(); + + typename ImuData::Ptr data = popFromImuDataQueue(); + BASALT_ASSERT_MSG(data, "first IMU measurment is nullptr"); + + data->accel = calib.calib_accel_bias.getCalibrated(data->accel); + data->gyro = calib.calib_gyro_bias.getCalibrated(data->gyro); + + while (true) { + vision_data_queue.pop(curr_frame); + + if (config.vio_enforce_realtime) { + // drop current frame if another frame is already in the queue. + while (!vision_data_queue.empty()) vision_data_queue.pop(curr_frame); + } + + if (!curr_frame.get()) { + break; + } + + // Correct camera time offset + // curr_frame->t_ns += calib.cam_time_offset_ns; + + if (!initialized) { + while (data->t_ns < curr_frame->t_ns) { + data = popFromImuDataQueue(); + if (!data) break; + data->accel = calib.calib_accel_bias.getCalibrated(data->accel); + data->gyro = calib.calib_gyro_bias.getCalibrated(data->gyro); + // std::cout << "Skipping IMU data.." << std::endl; + } + + Vec3 vel_w_i_init; + vel_w_i_init.setZero(); + + T_w_i_init.setQuaternion(Eigen::Quaternion::FromTwoVectors( + data->accel, Vec3::UnitZ())); + + last_state_t_ns = curr_frame->t_ns; + imu_meas[last_state_t_ns] = + IntegratedImuMeasurement(last_state_t_ns, bg, ba); + frame_states[last_state_t_ns] = PoseVelBiasStateWithLin( + last_state_t_ns, T_w_i_init, vel_w_i_init, bg, ba, true); + + marg_data.order.abs_order_map[last_state_t_ns] = + std::make_pair(0, POSE_VEL_BIAS_SIZE); + marg_data.order.total_size = POSE_VEL_BIAS_SIZE; + marg_data.order.items = 1; + + std::cout << "Setting up filter: t_ns " << last_state_t_ns << std::endl; + std::cout << "T_w_i\n" << T_w_i_init.matrix() << std::endl; + std::cout << "vel_w_i " << vel_w_i_init.transpose() << std::endl; + + if (config.vio_debug || config.vio_extended_logging) { + logMargNullspace(); + } + + initialized = true; + } + + if (prev_frame) { + // preintegrate measurements + + auto last_state = frame_states.at(last_state_t_ns); + + meas.reset(new IntegratedImuMeasurement( + prev_frame->t_ns, last_state.getState().bias_gyro, + last_state.getState().bias_accel)); + + BASALT_ASSERT_MSG(prev_frame->t_ns < curr_frame->t_ns, + "duplicate frame timestamps?! zero time delta leads " + "to invalid IMU integration."); + + while (data->t_ns <= prev_frame->t_ns) { + data = popFromImuDataQueue(); + if (!data) break; + data->accel = calib.calib_accel_bias.getCalibrated(data->accel); + data->gyro = calib.calib_gyro_bias.getCalibrated(data->gyro); + } + + while (data->t_ns <= curr_frame->t_ns) { + meas->integrate(*data, accel_cov, gyro_cov); + data = popFromImuDataQueue(); + if (!data) break; + data->accel = calib.calib_accel_bias.getCalibrated(data->accel); + data->gyro = calib.calib_gyro_bias.getCalibrated(data->gyro); + } + + if (meas->get_start_t_ns() + meas->get_dt_ns() < curr_frame->t_ns) { + if (!data.get()) break; + int64_t tmp = data->t_ns; + data->t_ns = curr_frame->t_ns; + meas->integrate(*data, accel_cov, gyro_cov); + data->t_ns = tmp; + } + } + + measure(curr_frame, meas); + prev_frame = curr_frame; + } + + if (out_vis_queue) out_vis_queue->push(nullptr); + if (out_marg_queue) out_marg_queue->push(nullptr); + if (out_state_queue) out_state_queue->push(nullptr); + + finished = true; + + std::cout << "Finished VIOFilter " << std::endl; + }; + + processing_thread.reset(new std::thread(proc_func)); +} + +template +void SqrtKeypointVioEstimator::addIMUToQueue( + const ImuData::Ptr& data) { + imu_data_queue.emplace(data); +} + +template +void SqrtKeypointVioEstimator::addVisionToQueue( + const OpticalFlowResult::Ptr& data) { + vision_data_queue.push(data); +} + +template +typename ImuData::Ptr +SqrtKeypointVioEstimator::popFromImuDataQueue() { + ImuData::Ptr data; + imu_data_queue.pop(data); + + if constexpr (std::is_same_v) { + return data; + } else { + typename ImuData::Ptr data2; + if (data) { + data2.reset(new ImuData); + *data2 = data->cast(); + } + return data2; + } +} + +template +bool SqrtKeypointVioEstimator::measure( + const OpticalFlowResult::Ptr& opt_flow_meas, + const typename IntegratedImuMeasurement::Ptr& meas) { + stats_sums_.add("frame_id", opt_flow_meas->t_ns).format("none"); + Timer t_total; + + if (meas.get()) { + BASALT_ASSERT(frame_states[last_state_t_ns].getState().t_ns == + meas->get_start_t_ns()); + BASALT_ASSERT(opt_flow_meas->t_ns == + meas->get_dt_ns() + meas->get_start_t_ns()); + BASALT_ASSERT(meas->get_dt_ns() > 0); + + PoseVelBiasState next_state = + frame_states.at(last_state_t_ns).getState(); + + meas->predictState(frame_states.at(last_state_t_ns).getState(), g, + next_state); + + last_state_t_ns = opt_flow_meas->t_ns; + next_state.t_ns = opt_flow_meas->t_ns; + + frame_states[last_state_t_ns] = PoseVelBiasStateWithLin(next_state); + + imu_meas[meas->get_start_t_ns()] = *meas; + } + + // save results + prev_opt_flow_res[opt_flow_meas->t_ns] = opt_flow_meas; + + // Make new residual for existing keypoints + int connected0 = 0; + std::map num_points_connected; + std::unordered_set unconnected_obs0; + for (size_t i = 0; i < opt_flow_meas->observations.size(); i++) { + TimeCamId tcid_target(opt_flow_meas->t_ns, i); + + for (const auto& kv_obs : opt_flow_meas->observations[i]) { + int kpt_id = kv_obs.first; + + if (lmdb.landmarkExists(kpt_id)) { + const TimeCamId& tcid_host = lmdb.getLandmark(kpt_id).host_kf_id; + + KeypointObservation kobs; + kobs.kpt_id = kpt_id; + kobs.pos = kv_obs.second.translation().cast(); + + lmdb.addObservation(tcid_target, kobs); + // obs[tcid_host][tcid_target].push_back(kobs); + + if (num_points_connected.count(tcid_host.frame_id) == 0) { + num_points_connected[tcid_host.frame_id] = 0; + } + num_points_connected[tcid_host.frame_id]++; + + if (i == 0) connected0++; + } else { + if (i == 0) { + unconnected_obs0.emplace(kpt_id); + } + } + } + } + + if (Scalar(connected0) / (connected0 + unconnected_obs0.size()) < + Scalar(config.vio_new_kf_keypoints_thresh) && + frames_after_kf > config.vio_min_frames_after_kf) + take_kf = true; + + if (config.vio_debug) { + std::cout << "connected0 " << connected0 << " unconnected0 " + << unconnected_obs0.size() << std::endl; + } + + if (take_kf) { + // Triangulate new points from one of the observations (with sufficient + // baseline) and make keyframe for camera 0 + take_kf = false; + frames_after_kf = 0; + kf_ids.emplace(last_state_t_ns); + + TimeCamId tcidl(opt_flow_meas->t_ns, 0); + + int num_points_added = 0; + for (int lm_id : unconnected_obs0) { + // Find all observations + std::map> kp_obs; + + for (const auto& kv : prev_opt_flow_res) { + for (size_t k = 0; k < kv.second->observations.size(); k++) { + auto it = kv.second->observations[k].find(lm_id); + if (it != kv.second->observations[k].end()) { + TimeCamId tcido(kv.first, k); + + KeypointObservation kobs; + kobs.kpt_id = lm_id; + kobs.pos = it->second.translation().template cast(); + + // obs[tcidl][tcido].push_back(kobs); + kp_obs[tcido] = kobs; + } + } + } + + // triangulate + bool valid_kp = false; + const Scalar min_triang_distance2 = + Scalar(config.vio_min_triangulation_dist * + config.vio_min_triangulation_dist); + for (const auto& kv_obs : kp_obs) { + if (valid_kp) break; + TimeCamId tcido = kv_obs.first; + + const Vec2 p0 = opt_flow_meas->observations.at(0) + .at(lm_id) + .translation() + .cast(); + const Vec2 p1 = prev_opt_flow_res[tcido.frame_id] + ->observations[tcido.cam_id] + .at(lm_id) + .translation() + .template cast(); + + Vec4 p0_3d, p1_3d; + bool valid1 = calib.intrinsics[0].unproject(p0, p0_3d); + bool valid2 = calib.intrinsics[tcido.cam_id].unproject(p1, p1_3d); + if (!valid1 || !valid2) continue; + + SE3 T_i0_i1 = getPoseStateWithLin(tcidl.frame_id).getPose().inverse() * + getPoseStateWithLin(tcido.frame_id).getPose(); + SE3 T_0_1 = + calib.T_i_c[0].inverse() * T_i0_i1 * calib.T_i_c[tcido.cam_id]; + + if (T_0_1.translation().squaredNorm() < min_triang_distance2) continue; + + Vec4 p0_triangulated = triangulate(p0_3d.template head<3>(), + p1_3d.template head<3>(), T_0_1); + + if (p0_triangulated.array().isFinite().all() && + p0_triangulated[3] > 0 && p0_triangulated[3] < 3.0) { + Keypoint kpt_pos; + kpt_pos.host_kf_id = tcidl; + kpt_pos.direction = + StereographicParam::project(p0_triangulated); + kpt_pos.inv_dist = p0_triangulated[3]; + lmdb.addLandmark(lm_id, kpt_pos); + + num_points_added++; + valid_kp = true; + } + } + + if (valid_kp) { + for (const auto& kv_obs : kp_obs) { + lmdb.addObservation(kv_obs.first, kv_obs.second); + } + } + } + + num_points_kf[opt_flow_meas->t_ns] = num_points_added; + } else { + frames_after_kf++; + } + + std::unordered_set lost_landmaks; + if (config.vio_marg_lost_landmarks) { + for (const auto& kv : lmdb.getLandmarks()) { + bool connected = false; + for (size_t i = 0; i < opt_flow_meas->observations.size(); i++) { + if (opt_flow_meas->observations[i].count(kv.first) > 0) + connected = true; + } + if (!connected) { + lost_landmaks.emplace(kv.first); + } + } + } + + optimize_and_marg(num_points_connected, lost_landmaks); + + if (out_state_queue) { + PoseVelBiasStateWithLin p = frame_states.at(last_state_t_ns); + + typename PoseVelBiasState::Ptr data( + new PoseVelBiasState(p.getState().template cast())); + + out_state_queue->push(data); + } + + if (out_vis_queue) { + VioVisualizationData::Ptr data(new VioVisualizationData); + + data->t_ns = last_state_t_ns; + + for (const auto& kv : frame_states) { + data->states.emplace_back( + kv.second.getState().T_w_i.template cast()); + } + + for (const auto& kv : frame_poses) { + data->frames.emplace_back(kv.second.getPose().template cast()); + } + + get_current_points(data->points, data->point_ids); + + data->projections.resize(opt_flow_meas->observations.size()); + computeProjections(data->projections, last_state_t_ns); + + data->opt_flow_res = prev_opt_flow_res[last_state_t_ns]; + + out_vis_queue->push(data); + } + + last_processed_t_ns = last_state_t_ns; + + stats_sums_.add("measure", t_total.elapsed()).format("ms"); + + return true; +} + +template +void SqrtKeypointVioEstimator::logMargNullspace() { + nullspace_marg_data.order = marg_data.order; + if (config.vio_debug) { + std::cout << "======== Marg nullspace ==========" << std::endl; + stats_sums_.add("marg_ns", checkMargNullspace()); + std::cout << "=================================" << std::endl; + } else { + stats_sums_.add("marg_ns", checkMargNullspace()); + } + stats_sums_.add("marg_ev", checkMargEigenvalues()); +} + +template +Eigen::VectorXd SqrtKeypointVioEstimator::checkMargNullspace() const { + return checkNullspace(nullspace_marg_data, frame_states, frame_poses, + config.vio_debug); +} + +template +Eigen::VectorXd SqrtKeypointVioEstimator::checkMargEigenvalues() + const { + return checkEigenvalues(nullspace_marg_data, false); +} + +template +void SqrtKeypointVioEstimator::marginalize( + const std::map& num_points_connected, + const std::unordered_set& lost_landmaks) { + if (!opt_started) return; + + Timer t_total; + + if (frame_poses.size() > max_kfs || frame_states.size() >= max_states) { + // Marginalize + + const int states_to_remove = frame_states.size() - max_states + 1; + + auto it = frame_states.cbegin(); + for (int i = 0; i < states_to_remove; i++) it++; + int64_t last_state_to_marg = it->first; + + AbsOrderMap aom; + + // remove all frame_poses that are not kfs + std::set poses_to_marg; + for (const auto& kv : frame_poses) { + aom.abs_order_map[kv.first] = std::make_pair(aom.total_size, POSE_SIZE); + + if (kf_ids.count(kv.first) == 0) poses_to_marg.emplace(kv.first); + + // Check that we have the same order as marginalization + BASALT_ASSERT(marg_data.order.abs_order_map.at(kv.first) == + aom.abs_order_map.at(kv.first)); + + aom.total_size += POSE_SIZE; + aom.items++; + } + + std::set states_to_marg_vel_bias; + std::set states_to_marg_all; + for (const auto& kv : frame_states) { + if (kv.first > last_state_to_marg) break; + + if (kv.first != last_state_to_marg) { + if (kf_ids.count(kv.first) > 0) { + states_to_marg_vel_bias.emplace(kv.first); + } else { + states_to_marg_all.emplace(kv.first); + } + } + + aom.abs_order_map[kv.first] = + std::make_pair(aom.total_size, POSE_VEL_BIAS_SIZE); + + // Check that we have the same order as marginalization + if (aom.items < marg_data.order.abs_order_map.size()) + BASALT_ASSERT(marg_data.order.abs_order_map.at(kv.first) == + aom.abs_order_map.at(kv.first)); + + aom.total_size += POSE_VEL_BIAS_SIZE; + aom.items++; + } + + auto kf_ids_all = kf_ids; + std::set kfs_to_marg; + while (kf_ids.size() > max_kfs && !states_to_marg_vel_bias.empty()) { + int64_t id_to_marg = -1; + + // starting from the oldest kf (and skipping the newest 2 kfs), try to + // find a kf that has less than a small percentage of it's landmarks + // tracked by the current frame + if (kf_ids.size() > 2) { + // Note: size > 2 check is to ensure prev(kf_ids.end(), 2) is valid + auto end_minus_2 = std::prev(kf_ids.end(), 2); + + for (auto it = kf_ids.begin(); it != end_minus_2; ++it) { + if (num_points_connected.count(*it) == 0 || + (num_points_connected.at(*it) / + static_cast(num_points_kf.at(*it)) < + config.vio_kf_marg_feature_ratio)) { + id_to_marg = *it; + break; + } + } + } + + // Note: This score function is taken from DSO, but it seems to mostly + // marginalize the oldest keyframe. This may be due to the fact that + // we don't have as long-lived landmarks, which may change if we ever + // implement "rediscovering" of lost feature tracks by projecting + // untracked landmarks into the localized frame. + if (kf_ids.size() > 2 && id_to_marg < 0) { + // Note: size > 2 check is to ensure prev(kf_ids.end(), 2) is valid + auto end_minus_2 = std::prev(kf_ids.end(), 2); + + int64_t last_kf = *kf_ids.crbegin(); + Scalar min_score = std::numeric_limits::max(); + int64_t min_score_id = -1; + + for (auto it1 = kf_ids.begin(); it1 != end_minus_2; ++it1) { + // small distance to other keyframes --> higher score + Scalar denom = 0; + for (auto it2 = kf_ids.begin(); it2 != end_minus_2; ++it2) { + denom += 1 / ((frame_poses.at(*it1).getPose().translation() - + frame_poses.at(*it2).getPose().translation()) + .norm() + + Scalar(1e-5)); + } + + // small distance to latest kf --> lower score + Scalar score = + std::sqrt( + (frame_poses.at(*it1).getPose().translation() - + frame_states.at(last_kf).getState().T_w_i.translation()) + .norm()) * + denom; + + if (score < min_score) { + min_score_id = *it1; + min_score = score; + } + } + + id_to_marg = min_score_id; + } + + // if no frame was selected, the logic above is faulty + BASALT_ASSERT(id_to_marg >= 0); + + kfs_to_marg.emplace(id_to_marg); + poses_to_marg.emplace(id_to_marg); + + kf_ids.erase(id_to_marg); + } + + // std::cout << "marg order" << std::endl; + // aom.print_order(); + + // std::cout << "marg prior order" << std::endl; + // marg_order.print_order(); + + if (config.vio_debug) { + std::cout << "states_to_remove " << states_to_remove << std::endl; + std::cout << "poses_to_marg.size() " << poses_to_marg.size() << std::endl; + std::cout << "states_to_marg.size() " << states_to_marg_all.size() + << std::endl; + std::cout << "state_to_marg_vel_bias.size() " + << states_to_marg_vel_bias.size() << std::endl; + std::cout << "kfs_to_marg.size() " << kfs_to_marg.size() << std::endl; + } + + Timer t_actual_marg; + + size_t asize = aom.total_size; + + bool is_lin_sqrt = isLinearizationSqrt(config.vio_linearization_type); + + MatX Q2Jp_or_H; + VecX Q2r_or_b; + + { + Timer t_linearize; + + typename LinearizationBase::Options lqr_options; + lqr_options.lb_options.huber_parameter = huber_thresh; + lqr_options.lb_options.obs_std_dev = obs_std_dev; + lqr_options.linearization_type = config.vio_linearization_type; + + ImuLinData ild = { + g, gyro_bias_sqrt_weight, accel_bias_sqrt_weight, {}}; + + for (const auto& kv : imu_meas) { + int64_t start_t = kv.second.get_start_t_ns(); + int64_t end_t = kv.second.get_start_t_ns() + kv.second.get_dt_ns(); + + if (aom.abs_order_map.count(start_t) == 0 || + aom.abs_order_map.count(end_t) == 0) + continue; + + ild.imu_meas[kv.first] = &kv.second; + } + + auto lqr = LinearizationBase::create( + this, aom, lqr_options, &marg_data, &ild, &kfs_to_marg, + &lost_landmaks, last_state_to_marg); + + lqr->linearizeProblem(); + lqr->performQR(); + + if (is_lin_sqrt && marg_data.is_sqrt) { + lqr->get_dense_Q2Jp_Q2r(Q2Jp_or_H, Q2r_or_b); + } else { + lqr->get_dense_H_b(Q2Jp_or_H, Q2r_or_b); + } + + stats_sums_.add("marg_linearize", t_linearize.elapsed()).format("ms"); + } + + // KeypointVioEstimator::linearizeAbsIMU( + // aom, accum.getH(), accum.getB(), imu_error, bg_error, ba_error, + // frame_states, imu_meas, gyro_bias_weight, accel_bias_weight, g); + // linearizeMargPrior(marg_order, marg_sqrt_H, marg_sqrt_b, aom, + // accum.getH(), + // accum.getB(), marg_prior_error); + + // Save marginalization prior + if (out_marg_queue && !kfs_to_marg.empty()) { + // int64_t kf_id = *kfs_to_marg.begin(); + + { + MargData::Ptr m(new MargData); + m->aom = aom; + + if (is_lin_sqrt && marg_data.is_sqrt) { + m->abs_H = + (Q2Jp_or_H.transpose() * Q2Jp_or_H).template cast(); + m->abs_b = (Q2Jp_or_H.transpose() * Q2r_or_b).template cast(); + } else { + m->abs_H = Q2Jp_or_H.template cast(); + + m->abs_b = Q2r_or_b.template cast(); + } + + assign_cast_map_values(m->frame_poses, frame_poses); + assign_cast_map_values(m->frame_states, frame_states); + m->kfs_all = kf_ids_all; + m->kfs_to_marg = kfs_to_marg; + m->use_imu = true; + + for (int64_t t : m->kfs_all) { + m->opt_flow_res.emplace_back(prev_opt_flow_res.at(t)); + } + + out_marg_queue->push(m); + } + } + + std::set idx_to_keep, idx_to_marg; + for (const auto& kv : aom.abs_order_map) { + if (kv.second.second == POSE_SIZE) { + int start_idx = kv.second.first; + if (poses_to_marg.count(kv.first) == 0) { + for (size_t i = 0; i < POSE_SIZE; i++) + idx_to_keep.emplace(start_idx + i); + } else { + for (size_t i = 0; i < POSE_SIZE; i++) + idx_to_marg.emplace(start_idx + i); + } + } else { + BASALT_ASSERT(kv.second.second == POSE_VEL_BIAS_SIZE); + // state + int start_idx = kv.second.first; + if (states_to_marg_all.count(kv.first) > 0) { + for (size_t i = 0; i < POSE_VEL_BIAS_SIZE; i++) + idx_to_marg.emplace(start_idx + i); + } else if (states_to_marg_vel_bias.count(kv.first) > 0) { + for (size_t i = 0; i < POSE_SIZE; i++) + idx_to_keep.emplace(start_idx + i); + for (size_t i = POSE_SIZE; i < POSE_VEL_BIAS_SIZE; i++) + idx_to_marg.emplace(start_idx + i); + } else { + BASALT_ASSERT(kv.first == last_state_to_marg); + for (size_t i = 0; i < POSE_VEL_BIAS_SIZE; i++) + idx_to_keep.emplace(start_idx + i); + } + } + } + + if (config.vio_debug) { + std::cout << "keeping " << idx_to_keep.size() << " marg " + << idx_to_marg.size() << " total " << asize << std::endl; + std::cout << "last_state_to_marg " << last_state_to_marg + << " frame_poses " << frame_poses.size() << " frame_states " + << frame_states.size() << std::endl; + } + + if (config.vio_debug || config.vio_extended_logging) { + MatX Q2Jp_or_H_nullspace; + VecX Q2r_or_b_nullspace; + + typename LinearizationBase::Options lqr_options; + lqr_options.lb_options.huber_parameter = huber_thresh; + lqr_options.lb_options.obs_std_dev = obs_std_dev; + lqr_options.linearization_type = config.vio_linearization_type; + + nullspace_marg_data.order = marg_data.order; + + ImuLinData ild = { + g, gyro_bias_sqrt_weight, accel_bias_sqrt_weight, {}}; + + for (const auto& kv : imu_meas) { + int64_t start_t = kv.second.get_start_t_ns(); + int64_t end_t = kv.second.get_start_t_ns() + kv.second.get_dt_ns(); + + if (aom.abs_order_map.count(start_t) == 0 || + aom.abs_order_map.count(end_t) == 0) + continue; + + ild.imu_meas[kv.first] = &kv.second; + } + + auto lqr = LinearizationBase::create( + this, aom, lqr_options, &nullspace_marg_data, &ild, &kfs_to_marg, + &lost_landmaks, last_state_to_marg); + + lqr->linearizeProblem(); + lqr->performQR(); + + if (is_lin_sqrt && marg_data.is_sqrt) { + lqr->get_dense_Q2Jp_Q2r(Q2Jp_or_H_nullspace, Q2r_or_b_nullspace); + } else { + lqr->get_dense_H_b(Q2Jp_or_H_nullspace, Q2r_or_b_nullspace); + } + + MatX nullspace_sqrt_H_new; + VecX nullspace_sqrt_b_new; + + if (is_lin_sqrt && marg_data.is_sqrt) { + MargHelper::marginalizeHelperSqrtToSqrt( + Q2Jp_or_H_nullspace, Q2r_or_b_nullspace, idx_to_keep, idx_to_marg, + nullspace_sqrt_H_new, nullspace_sqrt_b_new); + } else if (marg_data.is_sqrt) { + MargHelper::marginalizeHelperSqToSqrt( + Q2Jp_or_H_nullspace, Q2r_or_b_nullspace, idx_to_keep, idx_to_marg, + nullspace_sqrt_H_new, nullspace_sqrt_b_new); + } else { + MargHelper::marginalizeHelperSqToSq( + Q2Jp_or_H_nullspace, Q2r_or_b_nullspace, idx_to_keep, idx_to_marg, + nullspace_sqrt_H_new, nullspace_sqrt_b_new); + } + + nullspace_marg_data.H = nullspace_sqrt_H_new; + nullspace_marg_data.b = nullspace_sqrt_b_new; + } + + MatX marg_H_new; + VecX marg_b_new; + + { + Timer t; + if (is_lin_sqrt && marg_data.is_sqrt) { + MargHelper::marginalizeHelperSqrtToSqrt( + Q2Jp_or_H, Q2r_or_b, idx_to_keep, idx_to_marg, marg_H_new, + marg_b_new); + } else if (marg_data.is_sqrt) { + MargHelper::marginalizeHelperSqToSqrt(Q2Jp_or_H, Q2r_or_b, + idx_to_keep, idx_to_marg, + marg_H_new, marg_b_new); + } else { + MargHelper::marginalizeHelperSqToSq(Q2Jp_or_H, Q2r_or_b, + idx_to_keep, idx_to_marg, + marg_H_new, marg_b_new); + } + + stats_sums_.add("marg_helper", t.elapsed()).format("ms"); + } + + { + BASALT_ASSERT(frame_states.at(last_state_to_marg).isLinearized() == + false); + frame_states.at(last_state_to_marg).setLinTrue(); + } + + for (const int64_t id : states_to_marg_all) { + frame_states.erase(id); + imu_meas.erase(id); + prev_opt_flow_res.erase(id); + } + + for (const int64_t id : states_to_marg_vel_bias) { + const PoseVelBiasStateWithLin& state = frame_states.at(id); + PoseStateWithLin pose(state); + + frame_poses[id] = pose; + frame_states.erase(id); + imu_meas.erase(id); + } + + for (const int64_t id : poses_to_marg) { + frame_poses.erase(id); + prev_opt_flow_res.erase(id); + } + + lmdb.removeKeyframes(kfs_to_marg, poses_to_marg, states_to_marg_all); + + if (config.vio_marg_lost_landmarks) { + for (const auto& lm_id : lost_landmaks) lmdb.removeLandmark(lm_id); + } + + AbsOrderMap marg_order_new; + + for (const auto& kv : frame_poses) { + marg_order_new.abs_order_map[kv.first] = + std::make_pair(marg_order_new.total_size, POSE_SIZE); + + marg_order_new.total_size += POSE_SIZE; + marg_order_new.items++; + } + + { + marg_order_new.abs_order_map[last_state_to_marg] = + std::make_pair(marg_order_new.total_size, POSE_VEL_BIAS_SIZE); + marg_order_new.total_size += POSE_VEL_BIAS_SIZE; + marg_order_new.items++; + } + + marg_data.H = marg_H_new; + marg_data.b = marg_b_new; + marg_data.order = marg_order_new; + + BASALT_ASSERT(size_t(marg_data.H.cols()) == marg_data.order.total_size); + + // Quadratic prior and "delta" of the current state to the original + // linearization point give cost function + // + // P(x) = 0.5 || J*(delta+x) + r ||^2. + // + // For marginalization this has been linearized at x=0 to give + // linearization + // + // P(x) = 0.5 || J*x + (J*delta + r) ||^2, + // + // with Jacobian J and residual J*delta + r. + // + // After marginalization, we recover the original form of the + // prior. We are left with linearization (in sqrt form) + // + // Pnew(x) = 0.5 || Jnew*x + res ||^2. + // + // To recover the original form with delta-independent r, we set + // + // Pnew(x) = 0.5 || Jnew*(delta+x) + (res - Jnew*delta) ||^2, + // + // and thus rnew = (res - Jnew*delta). + + VecX delta; + computeDelta(marg_data.order, delta); + marg_data.b -= marg_data.H * delta; + + if (config.vio_debug || config.vio_extended_logging) { + VecX delta; + computeDelta(marg_data.order, delta); + nullspace_marg_data.b -= nullspace_marg_data.H * delta; + } + + stats_sums_.add("marg", t_actual_marg.elapsed()).format("ms"); + + if (config.vio_debug) { + std::cout << "marginalizaon done!!" << std::endl; + } + + if (config.vio_debug || config.vio_extended_logging) { + Timer t; + logMargNullspace(); + stats_sums_.add("marg_log", t.elapsed()).format("ms"); + } + + // std::cout << "new marg prior order" << std::endl; + // marg_order.print_order(); + } + + stats_sums_.add("marginalize", t_total.elapsed()).format("ms"); +} + +template +void SqrtKeypointVioEstimator::optimize() { + if (config.vio_debug) { + std::cout << "=================================" << std::endl; + } + + if (opt_started || frame_states.size() > 4) { + opt_started = true; + + // harcoded configs + // bool scale_Jp = config.vio_scale_jacobian && is_qr_solver(); + // bool scale_Jl = config.vio_scale_jacobian && is_qr_solver(); + + // timing + ExecutionStats stats; + Timer timer_total; + Timer timer_iteration; + + // construct order of states in linear system --> sort by ascending + // timestamp + AbsOrderMap aom; + + for (const auto& kv : frame_poses) { + aom.abs_order_map[kv.first] = std::make_pair(aom.total_size, POSE_SIZE); + + // Check that we have the same order as marginalization + BASALT_ASSERT(marg_data.order.abs_order_map.at(kv.first) == + aom.abs_order_map.at(kv.first)); + + aom.total_size += POSE_SIZE; + aom.items++; + } + + for (const auto& kv : frame_states) { + aom.abs_order_map[kv.first] = + std::make_pair(aom.total_size, POSE_VEL_BIAS_SIZE); + + // Check that we have the same order as marginalization + if (aom.items < marg_data.order.abs_order_map.size()) + BASALT_ASSERT(marg_data.order.abs_order_map.at(kv.first) == + aom.abs_order_map.at(kv.first)); + + aom.total_size += POSE_VEL_BIAS_SIZE; + aom.items++; + } + + // TODO: Check why we get better accuracy with old SC loop. Possible + // culprits: + // - different initial lambda (based on previous iteration) + // - no landmark damping + // - outlier removal after 4 iterations? + lambda = Scalar(config.vio_lm_lambda_initial); + + // record stats + stats.add("num_cams", this->frame_poses.size()).format("count"); + stats.add("num_lms", this->lmdb.numLandmarks()).format("count"); + stats.add("num_obs", this->lmdb.numObservations()).format("count"); + + // setup landmark blocks + typename LinearizationBase::Options lqr_options; + lqr_options.lb_options.huber_parameter = huber_thresh; + lqr_options.lb_options.obs_std_dev = obs_std_dev; + lqr_options.linearization_type = config.vio_linearization_type; + + std::unique_ptr> lqr; + + ImuLinData ild = { + g, gyro_bias_sqrt_weight, accel_bias_sqrt_weight, {}}; + for (const auto& kv : imu_meas) { + ild.imu_meas[kv.first] = &kv.second; + } + + { + Timer t; + lqr = LinearizationBase::create(this, aom, lqr_options, + &marg_data, &ild); + stats.add("allocateLMB", t.reset()).format("ms"); + lqr->log_problem_stats(stats); + } + + bool terminated = false; + bool converged = false; + std::string message; + + int it = 0; + int it_rejected = 0; + for (; it <= config.vio_max_iterations && !terminated;) { + if (it > 0) { + timer_iteration.reset(); + } + + Scalar error_total = 0; + VecX Jp_column_norm2; + + { + // TODO: execution could be done staged + + Timer t; + + // linearize residuals + bool numerically_valid; + error_total = lqr->linearizeProblem(&numerically_valid); + BASALT_ASSERT_STREAM( + numerically_valid, + "did not expect numerical failure during linearization"); + stats.add("linearizeProblem", t.reset()).format("ms"); + + // // compute pose jacobian norm squared for Jacobian scaling + // if (scale_Jp) { + // Jp_column_norm2 = lqr->getJp_diag2(); + // stats.add("getJp_diag2", t.reset()).format("ms"); + // } + + // // scale landmark jacobians + // if (scale_Jl) { + // lqr->scaleJl_cols(); + // stats.add("scaleJl_cols", t.reset()).format("ms"); + // } + + // marginalize points in place + lqr->performQR(); + stats.add("performQR", t.reset()).format("ms"); + } + + if (config.vio_debug) { + // TODO: num_points debug output missing + std::cout << "[LINEARIZE] Error: " << error_total << " num points " + << std::endl; + std::cout << "Iteration " << it << " " << error_total << std::endl; + } + + // compute pose jacobian scaling + // VecX jacobian_scaling; + // if (scale_Jp) { + // // TODO: what about jacobian scaling for SC solver? + + // // ceres uses 1.0 / (1.0 + sqrt(SquaredColumnNorm)) + // // we use 1.0 / (eps + sqrt(SquaredColumnNorm)) + // jacobian_scaling = (lqr_options.lb_options.jacobi_scaling_eps + + // Jp_column_norm2.array().sqrt()) + // .inverse(); + // } + // if (config.vio_debug) { + // std::cout << "\t[INFO] Stage 1" << std::endl; + //} + + // inner loop for backtracking in LM (still count as main iteration + // though) + for (int j = 0; it <= config.vio_max_iterations && !terminated; j++) { + if (j > 0) { + timer_iteration.reset(); + if (config.vio_debug) { + std::cout << "Iteration " << it << ", backtracking" << std::endl; + } + } + + { + // Timer t; + + // TODO: execution could be done staged + + // // set (updated) damping for poses + // if (config.vio_lm_pose_damping_variant == 0) { + // lqr->setPoseDamping(lambda); + // stats.add("setPoseDamping", t.reset()).format("ms"); + // } + + // // scale landmark Jacobians only on the first inner + // iteration. if (scale_Jp && j == 0) { + // lqr->scaleJp_cols(jacobian_scaling); + // stats.add("scaleJp_cols", t.reset()).format("ms"); + // } + + // // set (updated) damping for landmarks + // if (config.vio_lm_landmark_damping_variant == 0) { + // lqr->setLandmarkDamping(lambda); + // stats.add("setLandmarkDamping", t.reset()).format("ms"); + // } + } + + // if (config.vio_debug) { + // std::cout << "\t[INFO] Stage 2 " << std::endl; + // } + + VecX inc; + { + Timer t; + + // get dense reduced camera system + MatX H; + VecX b; + + lqr->get_dense_H_b(H, b); + + stats.add("get_dense_H_b", t.reset()).format("ms"); + + int iter = 0; + bool inc_valid = false; + constexpr int max_num_iter = 3; + + while (iter < max_num_iter && !inc_valid) { + VecX Hdiag_lambda = (H.diagonal() * lambda).cwiseMax(min_lambda); + MatX H_copy = H; + H_copy.diagonal() += Hdiag_lambda; + + Eigen::LDLT> ldlt(H_copy); + inc = ldlt.solve(b); + stats.add("solve", t.reset()).format("ms"); + + if (!inc.array().isFinite().all()) { + lambda = lambda_vee * lambda; + lambda_vee *= vee_factor; + } else { + inc_valid = true; + } + iter++; + } + + if (!inc_valid) { + std::cerr << "Still invalid inc after " << max_num_iter + << " iterations." << std::endl; + } + } + + // backup state (then apply increment and check cost decrease) + backup(); + + // backsubstitute (with scaled pose increment) + Scalar l_diff = 0; + { + // negate pose increment before point update + inc = -inc; + + Timer t; + l_diff = lqr->backSubstitute(inc); + stats.add("backSubstitute", t.reset()).format("ms"); + } + + // undo jacobian scaling before applying increment to poses + // if (scale_Jp) { + // inc.array() *= jacobian_scaling.array(); + // } + + // apply increment to poses + for (auto& [frame_id, state] : frame_poses) { + int idx = aom.abs_order_map.at(frame_id).first; + state.applyInc(inc.template segment(idx)); + } + + for (auto& [frame_id, state] : frame_states) { + int idx = aom.abs_order_map.at(frame_id).first; + state.applyInc(inc.template segment(idx)); + } + + // compute stepsize + Scalar step_norminf = inc.array().abs().maxCoeff(); + + // compute error update applying increment + Scalar after_update_marg_prior_error = 0; + Scalar after_update_vision_and_inertial_error = 0; + + { + Timer t; + computeError(after_update_vision_and_inertial_error); + computeMargPriorError(marg_data, after_update_marg_prior_error); + + Scalar after_update_imu_error = 0, after_bg_error = 0, + after_ba_error = 0; + ScBundleAdjustmentBase::computeImuError( + aom, after_update_imu_error, after_bg_error, after_ba_error, + frame_states, imu_meas, gyro_bias_sqrt_weight.array().square(), + accel_bias_sqrt_weight.array().square(), g); + + after_update_vision_and_inertial_error += + after_update_imu_error + after_bg_error + after_ba_error; + + stats.add("computerError2", t.reset()).format("ms"); + } + + Scalar after_error_total = after_update_vision_and_inertial_error + + after_update_marg_prior_error; + + // check cost decrease compared to quadratic model cost + Scalar f_diff; + bool step_is_valid = false; + bool step_is_successful = false; + Scalar relative_decrease = 0; + { + // compute actual cost decrease + f_diff = error_total - after_error_total; + + relative_decrease = f_diff / l_diff; + + if (config.vio_debug) { + std::cout << "\t[EVAL] error: {:.4e}, f_diff {:.4e} l_diff {:.4e} " + "step_quality {:.2e} step_size {:.2e}\n"_format( + after_error_total, f_diff, l_diff, + relative_decrease, step_norminf); + } + + // TODO: consider to remove assert. For now we want to test if we + // even run into the l_diff <= 0 case ever in practice + // BASALT_ASSERT_STREAM(l_diff > 0, "l_diff " << l_diff); + + // l_diff <= 0 is a theoretical possibility if the model cost change + // is tiny and becomes numerically negative (non-positive). It might + // not occur since our linear systems are not that big (compared to + // large scale BA for example) and we also abort optimization quite + // early and usually don't have large damping (== tiny step size). + step_is_valid = l_diff > 0; + step_is_successful = step_is_valid && relative_decrease > 0; + } + + double iteration_time = timer_iteration.elapsed(); + double cumulative_time = timer_total.elapsed(); + + stats.add("iteration", iteration_time).format("ms"); + { + basalt::MemoryInfo mi; + if (get_memory_info(mi)) { + stats.add("resident_memory", mi.resident_memory); + stats.add("resident_memory_peak", mi.resident_memory_peak); + } + } + + if (step_is_successful) { + BASALT_ASSERT(step_is_valid); + + if (config.vio_debug) { + // std::cout << "\t[ACCEPTED] lambda:" << lambda + // << " Error: " << after_error_total << + // std::endl; + + std::cout << "\t[ACCEPTED] error: {:.4e}, lambda: {:.1e}, it_time: " + "{:.3f}s, total_time: {:.3f}s\n" + ""_format(after_error_total, lambda, iteration_time, + cumulative_time); + } + + lambda *= std::max( + Scalar(1.0) / 3, + 1 - std::pow(2 * relative_decrease - 1, 3)); + lambda = std::max(min_lambda, lambda); + + lambda_vee = initial_vee; + + it++; + + // check function and parameter tolerance + if ((f_diff > 0 && f_diff < Scalar(1e-6)) || + step_norminf < Scalar(1e-4)) { + converged = true; + terminated = true; + } + + // stop inner lm loop + break; + } else { + std::string reason = step_is_valid ? "REJECTED" : "INVALID"; + + if (config.vio_debug) { + // std::cout << "\t[REJECTED] lambda:" << lambda + // << " Error: " << after_error_total << + // std::endl; + + std::cout << "\t[{}] error: {}, lambda: {:.1e}, it_time:" + "{:.3f}s, total_time: {:.3f}s\n" + ""_format(reason, after_error_total, lambda, + iteration_time, cumulative_time); + } + + lambda = lambda_vee * lambda; + lambda_vee *= vee_factor; + + // lambda = std::max(min_lambda, lambda); + // lambda = std::min(max_lambda, lambda); + + restore(); + it++; + it_rejected++; + + if (lambda > max_lambda) { + terminated = true; + message = + "Solver did not converge and reached maximum damping lambda"; + } + } + } + } + + stats.add("optimize", timer_total.elapsed()).format("ms"); + stats.add("num_it", it).format("count"); + stats.add("num_it_rejected", it_rejected).format("count"); + + // TODO: call filterOutliers at least once (also for CG version) + + stats_all_.merge_all(stats); + stats_sums_.merge_sums(stats); + + if (config.vio_debug) { + if (!converged) { + if (terminated) { + std::cout << "Solver terminated early after {} iterations: {}"_format( + it, message); + } else { + std::cout + << "Solver did not converge after maximum number of {} iterations"_format( + it); + } + } + + stats.print(); + + std::cout << "=================================" << std::endl; + } + } +} + +template +void SqrtKeypointVioEstimator::optimize_and_marg( + const std::map& num_points_connected, + const std::unordered_set& lost_landmaks) { + optimize(); + marginalize(num_points_connected, lost_landmaks); +} + +template +void SqrtKeypointVioEstimator::debug_finalize() { + std::cout << "=== stats all ===\n"; + stats_all_.print(); + std::cout << "=== stats sums ===\n"; + stats_sums_.print(); + + // save files + stats_all_.save_json("stats_all.json"); + stats_sums_.save_json("stats_sums.json"); +} + +// ////////////////////////////////////////////////////////////////// +// instatiate templates + +#ifdef BASALT_INSTANTIATIONS_DOUBLE +template class SqrtKeypointVioEstimator; +#endif + +#ifdef BASALT_INSTANTIATIONS_FLOAT +template class SqrtKeypointVioEstimator; +#endif + +} // namespace basalt diff --git a/src/vi_estimator/sqrt_keypoint_vo.cpp b/src/vi_estimator/sqrt_keypoint_vo.cpp new file mode 100644 index 0000000..968df71 --- /dev/null +++ b/src/vi_estimator/sqrt_keypoint_vo.cpp @@ -0,0 +1,1341 @@ +/** +BSD 3-Clause License + +This file is part of the Basalt project. +https://gitlab.com/VladyslavUsenko/basalt.git + +Copyright (c) 2019, Vladyslav Usenko and Nikolaus Demmel. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#include +#include + +#include +#include +#include +#include +#include + +#include + +#include +#include +#include + +#include + +#include + +namespace basalt { + +using namespace fmt::literals; + +template +SqrtKeypointVoEstimator::SqrtKeypointVoEstimator( + const basalt::Calibration& calib_, const VioConfig& config_) + : take_kf(true), + frames_after_kf(0), + initialized(false), + config(config_), + lambda(config_.vio_lm_lambda_initial), + min_lambda(config_.vio_lm_lambda_min), + max_lambda(config_.vio_lm_lambda_max), + lambda_vee(2) { + obs_std_dev = Scalar(config.vio_obs_std_dev); + huber_thresh = Scalar(config.vio_obs_huber_thresh); + calib = calib_.cast(); + + // Setup marginalization + marg_data.is_sqrt = config.vio_sqrt_marg; + marg_data.H.setZero(POSE_SIZE, POSE_SIZE); + marg_data.b.setZero(POSE_SIZE); + + // Version without prior + nullspace_marg_data.is_sqrt = marg_data.is_sqrt; + nullspace_marg_data.H.setZero(POSE_SIZE, POSE_SIZE); + nullspace_marg_data.b.setZero(POSE_SIZE); + + // prior on pose + if (marg_data.is_sqrt) { + marg_data.H.diagonal().setConstant( + std::sqrt(Scalar(config.vio_init_pose_weight))); + } else { + marg_data.H.diagonal().setConstant(Scalar(config.vio_init_pose_weight)); + } + + std::cout << "marg_H (sqrt:" << marg_data.is_sqrt << ")\n" + << marg_data.H << std::endl; + + max_states = config.vio_max_states; + max_kfs = config.vio_max_kfs; + + vision_data_queue.set_capacity(10); + imu_data_queue.set_capacity(300); +} + +template +void SqrtKeypointVoEstimator::initialize( + int64_t t_ns, const Sophus::SE3d& T_w_i, const Eigen::Vector3d& vel_w_i, + const Eigen::Vector3d& bg, const Eigen::Vector3d& ba) { + UNUSED(vel_w_i); + + initialized = true; + T_w_i_init = T_w_i.cast(); + + last_state_t_ns = t_ns; + frame_poses[t_ns] = PoseStateWithLin(t_ns, T_w_i_init, true); + + marg_data.order.abs_order_map[t_ns] = std::make_pair(0, POSE_SIZE); + marg_data.order.total_size = POSE_SIZE; + marg_data.order.items = 1; + + initialize(bg, ba); +} + +template +void SqrtKeypointVoEstimator::initialize(const Eigen::Vector3d& bg, + const Eigen::Vector3d& ba) { + UNUSED(bg); + UNUSED(ba); + + auto proc_func = [&] { + OpticalFlowResult::Ptr prev_frame, curr_frame; + bool add_pose = false; + + while (true) { + // get next optical flow result (blocking if queue empty) + vision_data_queue.pop(curr_frame); + + if (config.vio_enforce_realtime) { + // drop current frame if another frame is already in the queue. + while (!vision_data_queue.empty()) vision_data_queue.pop(curr_frame); + } + + if (!curr_frame.get()) { + break; + } + + // Correct camera time offset (relevant for VIO) + // curr_frame->t_ns += calib.cam_time_offset_ns; + + // this is VO not VIO --> just drain IMU queue and ignore + while (!imu_data_queue.empty()) { + ImuData::Ptr d; + imu_data_queue.pop(d); + } + + if (!initialized) { + last_state_t_ns = curr_frame->t_ns; + + frame_poses[last_state_t_ns] = + PoseStateWithLin(last_state_t_ns, T_w_i_init, true); + + marg_data.order.abs_order_map[last_state_t_ns] = + std::make_pair(0, POSE_SIZE); + marg_data.order.total_size = POSE_SIZE; + marg_data.order.items = 1; + + nullspace_marg_data.order = marg_data.order; + + std::cout << "Setting up filter: t_ns " << last_state_t_ns << std::endl; + std::cout << "T_w_i\n" << T_w_i_init.matrix() << std::endl; + + if (config.vio_debug || config.vio_extended_logging) { + logMargNullspace(); + } + + initialized = true; + } + + if (prev_frame) { + add_pose = true; + } + + measure(curr_frame, add_pose); + prev_frame = curr_frame; + } + + if (out_vis_queue) out_vis_queue->push(nullptr); + if (out_marg_queue) out_marg_queue->push(nullptr); + if (out_state_queue) out_state_queue->push(nullptr); + + finished = true; + + std::cout << "Finished VIOFilter " << std::endl; + }; + + processing_thread.reset(new std::thread(proc_func)); +} + +template +void SqrtKeypointVoEstimator::addVisionToQueue( + const OpticalFlowResult::Ptr& data) { + vision_data_queue.push(data); +} + +template +void SqrtKeypointVoEstimator::addIMUToQueue( + const ImuData::Ptr& data) { + UNUSED(data); +} + +template +bool SqrtKeypointVoEstimator::measure( + const OpticalFlowResult::Ptr& opt_flow_meas, const bool add_pose) { + stats_sums_.add("frame_id", opt_flow_meas->t_ns).format("none"); + Timer t_total; + + // std::cout << "=== measure frame " << opt_flow_meas->t_ns << "\n"; + // std::cout.flush(); + + // TODO: For VO there is probably no point to non kfs as state in the sliding + // window... Just do pose-only optimization and never enter them into the + // sliding window. + // TODO: If we do pose-only optimization first for all frames (also KFs), this + // may also speed up the joint optimization. + // TODO: Moreover, initial pose-only localization may allow us to project + // untracked landmarks and "rediscover them" by optical flow or feature / + // patch matching. + + if (add_pose) { + // The state for the first frame is added by the initialization code, but + // otherwise we insert a new pose state here. So add_pose is only false + // right after initialization. + + const PoseStateWithLin& curr_state = + frame_poses.at(last_state_t_ns); + + last_state_t_ns = opt_flow_meas->t_ns; + + PoseStateWithLin next_state(opt_flow_meas->t_ns, curr_state.getPose()); + frame_poses[last_state_t_ns] = next_state; + } + + // invariants: opt_flow_meas->t_ns is last pose state and equal to + // last_state_t_ns + BASALT_ASSERT(opt_flow_meas->t_ns == last_state_t_ns); + BASALT_ASSERT(!frame_poses.empty()); + BASALT_ASSERT(last_state_t_ns == frame_poses.rbegin()->first); + + // save results + prev_opt_flow_res[opt_flow_meas->t_ns] = opt_flow_meas; + + // For feature tracks that exist as landmarks, add the new frames as + // additional observations. For every host frame, compute how many of it's + // landmarks are tracked by the current frame: this will help later during + // marginalization to remove the host frames with a low number of landmarks + // connected with the latest frame. For new tracks, remember their ids for + // possible later landmark creation. + int connected0 = 0; // num tracked landmarks cam0 + std::map num_points_connected; // num tracked landmarks by host + std::unordered_set unconnected_obs0; // new tracks cam0 + std::vector> connected_obs0( + opt_flow_meas->observations.size()); + + for (size_t i = 0; i < opt_flow_meas->observations.size(); i++) { + TimeCamId tcid_target(opt_flow_meas->t_ns, i); + + for (const auto& kv_obs : opt_flow_meas->observations[i]) { + int kpt_id = kv_obs.first; + + if (lmdb.landmarkExists(kpt_id)) { + const TimeCamId& tcid_host = lmdb.getLandmark(kpt_id).host_kf_id; + + KeypointObservation kobs; + kobs.kpt_id = kpt_id; + kobs.pos = kv_obs.second.translation().cast(); + + lmdb.addObservation(tcid_target, kobs); + // obs[tcid_host][tcid_target].push_back(kobs); + + num_points_connected[tcid_host.frame_id]++; + connected_obs0[i].emplace_back(kpt_id); + + if (i == 0) connected0++; + } else { + if (i == 0) { + unconnected_obs0.emplace(kpt_id); + } + } + } + } + + if (Scalar(connected0) / (connected0 + unconnected_obs0.size()) < + Scalar(config.vio_new_kf_keypoints_thresh) && + frames_after_kf > config.vio_min_frames_after_kf) + take_kf = true; + + if (config.vio_debug) { + std::cout << "connected0 " << connected0 << " unconnected0 " + << unconnected_obs0.size() << std::endl; + } + + BundleAdjustmentBase::optimize_single_frame_pose( + frame_poses[last_state_t_ns], connected_obs0); + + if (take_kf) { + // For keyframes, we don't only add pose state and observations to existing + // landmarks (done above for all frames), but also triangulate new + // landmarks. + + // Triangulate new points from one of the observations (with sufficient + // baseline) and make keyframe for camera 0 + take_kf = false; + frames_after_kf = 0; + kf_ids.emplace(last_state_t_ns); + + TimeCamId tcidl(opt_flow_meas->t_ns, 0); + + int num_points_added = 0; + for (int lm_id : unconnected_obs0) { + // Find all observations + std::map> kp_obs; + + for (const auto& kv : prev_opt_flow_res) { + for (size_t k = 0; k < kv.second->observations.size(); k++) { + auto it = kv.second->observations[k].find(lm_id); + if (it != kv.second->observations[k].end()) { + TimeCamId tcido(kv.first, k); + + KeypointObservation kobs; + kobs.kpt_id = lm_id; + kobs.pos = it->second.translation().template cast(); + + // obs[tcidl][tcido].push_back(kobs); + kp_obs[tcido] = kobs; + } + } + } + + // triangulate + bool valid_kp = false; + const Scalar min_triang_distance2 = + Scalar(config.vio_min_triangulation_dist * + config.vio_min_triangulation_dist); + for (const auto& kv_obs : kp_obs) { + if (valid_kp) break; + TimeCamId tcido = kv_obs.first; + + const Vec2 p0 = opt_flow_meas->observations.at(0) + .at(lm_id) + .translation() + .cast(); + const Vec2 p1 = prev_opt_flow_res[tcido.frame_id] + ->observations[tcido.cam_id] + .at(lm_id) + .translation() + .template cast(); + + Vec4 p0_3d, p1_3d; + bool valid1 = calib.intrinsics[0].unproject(p0, p0_3d); + bool valid2 = calib.intrinsics[tcido.cam_id].unproject(p1, p1_3d); + if (!valid1 || !valid2) continue; + + SE3 T_i0_i1 = getPoseStateWithLin(tcidl.frame_id).getPose().inverse() * + getPoseStateWithLin(tcido.frame_id).getPose(); + SE3 T_0_1 = + calib.T_i_c[0].inverse() * T_i0_i1 * calib.T_i_c[tcido.cam_id]; + + if (T_0_1.translation().squaredNorm() < min_triang_distance2) continue; + + Vec4 p0_triangulated = triangulate(p0_3d.template head<3>(), + p1_3d.template head<3>(), T_0_1); + + if (p0_triangulated.array().isFinite().all() && + p0_triangulated[3] > 0 && p0_triangulated[3] < Scalar(3.0)) { + Keypoint kpt_pos; + kpt_pos.host_kf_id = tcidl; + kpt_pos.direction = + StereographicParam::project(p0_triangulated); + kpt_pos.inv_dist = p0_triangulated[3]; + lmdb.addLandmark(lm_id, kpt_pos); + + num_points_added++; + valid_kp = true; + } + } + + if (valid_kp) { + for (const auto& kv_obs : kp_obs) { + lmdb.addObservation(kv_obs.first, kv_obs.second); + } + + // TODO: non-linear refinement of landmark position from all + // observations; may speed up later joint optimization + } + } + + num_points_kf[opt_flow_meas->t_ns] = num_points_added; + } else { + frames_after_kf++; + } + + std::unordered_set lost_landmaks; + if (config.vio_marg_lost_landmarks) { + for (const auto& kv : lmdb.getLandmarks()) { + bool connected = false; + for (size_t i = 0; i < opt_flow_meas->observations.size(); i++) { + if (opt_flow_meas->observations[i].count(kv.first) > 0) + connected = true; + } + if (!connected) { + lost_landmaks.emplace(kv.first); + } + } + } + + optimize_and_marg(num_points_connected, lost_landmaks); + + if (out_state_queue) { + const PoseStateWithLin& p = frame_poses.at(last_state_t_ns); + + typename PoseVelBiasState::Ptr data(new PoseVelBiasState( + p.getT_ns(), p.getPose().template cast(), + Eigen::Vector3d::Zero(), Eigen::Vector3d::Zero(), + Eigen::Vector3d::Zero())); + + out_state_queue->push(data); + } + + if (out_vis_queue) { + VioVisualizationData::Ptr data(new VioVisualizationData); + + data->t_ns = last_state_t_ns; + + BASALT_ASSERT(frame_states.empty()); + + for (const auto& kv : frame_poses) { + data->frames.emplace_back(kv.second.getPose().template cast()); + } + + get_current_points(data->points, data->point_ids); + + data->projections.resize(opt_flow_meas->observations.size()); + computeProjections(data->projections, last_state_t_ns); + + data->opt_flow_res = prev_opt_flow_res[last_state_t_ns]; + + out_vis_queue->push(data); + } + + last_processed_t_ns = last_state_t_ns; + + stats_sums_.add("measure", t_total.elapsed()).format("ms"); + + return true; +} + +template +void SqrtKeypointVoEstimator::logMargNullspace() { + nullspace_marg_data.order = marg_data.order; + if (config.vio_debug) { + std::cout << "======== Marg nullspace ==========" << std::endl; + stats_sums_.add("marg_ns", checkMargNullspace()); + std::cout << "=================================" << std::endl; + } else { + stats_sums_.add("marg_ns", checkMargNullspace()); + } + stats_sums_.add("marg_ev", checkMargEigenvalues()); +} + +template +Eigen::VectorXd SqrtKeypointVoEstimator::checkMargNullspace() const { + return checkNullspace(nullspace_marg_data, frame_states, frame_poses, + config.vio_debug); +} + +template +Eigen::VectorXd SqrtKeypointVoEstimator::checkMargEigenvalues() const { + return checkEigenvalues(nullspace_marg_data, false); +} + +template +void SqrtKeypointVoEstimator::marginalize( + const std::map& num_points_connected, + const std::unordered_set& lost_landmaks) { + BASALT_ASSERT(frame_states.empty()); + + Timer t_total; + + if (true) { + // Marginalize + + // remove all frame_poses that are not kfs and not the current frame + std::set non_kf_poses; + for (const auto& kv : frame_poses) { + if (kf_ids.count(kv.first) == 0 && kv.first != last_state_t_ns) { + non_kf_poses.emplace(kv.first); + } + } + + for (int64_t id : non_kf_poses) { + frame_poses.erase(id); + lmdb.removeFrame(id); + prev_opt_flow_res.erase(id); + } + + auto kf_ids_all = kf_ids; + std::set kfs_to_marg; + while (kf_ids.size() > max_kfs) { + int64_t id_to_marg = -1; + + // starting from the oldest kf (and skipping the newest 2 kfs), try to + // find a kf that has less than a small percentage of it's landmarks + // tracked by the current frame + if (kf_ids.size() > 2) { + // Note: size > 2 check is to ensure prev(kf_ids.end(), 2) is valid + auto end_minus_2 = std::prev(kf_ids.end(), 2); + + for (auto it = kf_ids.begin(); it != end_minus_2; ++it) { + if (num_points_connected.count(*it) == 0 || + (num_points_connected.at(*it) / Scalar(num_points_kf.at(*it)) < + Scalar(config.vio_kf_marg_feature_ratio))) { + id_to_marg = *it; + break; + } + } + } + + // Note: This score function is taken from DSO, but it seems to mostly + // marginalize the oldest keyframe. This may be due to the fact that + // we don't have as long-lived landmarks, which may change if we ever + // implement "rediscovering" of lost feature tracks by projecting + // untracked landmarks into the localized frame. + if (kf_ids.size() > 2 && id_to_marg < 0) { + // Note: size > 2 check is to ensure prev(kf_ids.end(), 2) is valid + auto end_minus_2 = std::prev(kf_ids.end(), 2); + + int64_t last_kf = *kf_ids.crbegin(); + Scalar min_score = std::numeric_limits::max(); + int64_t min_score_id = -1; + + for (auto it1 = kf_ids.begin(); it1 != end_minus_2; ++it1) { + // small distance to other keyframes --> higher score + Scalar denom = 0; + for (auto it2 = kf_ids.begin(); it2 != end_minus_2; ++it2) { + denom += 1 / ((frame_poses.at(*it1).getPose().translation() - + frame_poses.at(*it2).getPose().translation()) + .norm() + + Scalar(1e-5)); + } + + // small distance to latest kf --> lower score + Scalar score = + std::sqrt((frame_poses.at(*it1).getPose().translation() - + frame_poses.at(last_kf).getPose().translation()) + .norm()) * + denom; + + if (score < min_score) { + min_score_id = *it1; + min_score = score; + } + } + + id_to_marg = min_score_id; + } + + // if no frame was selected, the logic above is faulty + BASALT_ASSERT(id_to_marg >= 0); + + kfs_to_marg.emplace(id_to_marg); + + // Note: this looks like a leftover from VIO that is not doing anything in + // VO -> we could check / compare / remove + non_kf_poses.emplace(id_to_marg); + + kf_ids.erase(id_to_marg); + } + + // Create AbsOrderMap entries that are in the marg prior or connected to the + // keyframes that we marginalize + // Create AbsOrderMap entries that are in the marg prior or connected to the + // keyframes that we marginalize + AbsOrderMap aom; + { + const auto& obs = lmdb.getObservations(); + + aom.abs_order_map = marg_data.order.abs_order_map; + aom.total_size = marg_data.order.total_size; + aom.items = marg_data.order.items; + + for (const auto& kv : frame_poses) { + if (aom.abs_order_map.count(kv.first) == 0) { + bool add_pose = false; + + for (const auto& [host, target_map] : obs) { + // if one of the host frames that we marg out + if (kfs_to_marg.count(host.frame_id) > 0) { + for (const auto& [target, obs_map] : target_map) { + // has observations in the frame also add it to marg prior + if (target.frame_id == kv.first) { + add_pose = true; + break; + } + } + } + // Break if we already found one observation. + if (add_pose) break; + } + + if (add_pose) { + aom.abs_order_map[kv.first] = + std::make_pair(aom.total_size, POSE_SIZE); + + aom.total_size += POSE_SIZE; + aom.items++; + } + } + } + + // If marg lost landmakrs add corresponding frames to linearization + if (config.vio_marg_lost_landmarks) { + for (const auto& lm_id : lost_landmaks) { + const auto& lm = lmdb.getLandmark(lm_id); + if (aom.abs_order_map.count(lm.host_kf_id.frame_id) == 0) { + aom.abs_order_map[lm.host_kf_id.frame_id] = + std::make_pair(aom.total_size, POSE_SIZE); + + aom.total_size += POSE_SIZE; + aom.items++; + } + + for (const auto& [target, o] : lm.obs) { + if (aom.abs_order_map.count(target.frame_id) == 0) { + aom.abs_order_map[target.frame_id] = + std::make_pair(aom.total_size, POSE_SIZE); + + aom.total_size += POSE_SIZE; + aom.items++; + } + } + } + } + } + + // std::cout << "marg order" << std::endl; + // aom.print_order(); + + // std::cout << "marg prior order" << std::endl; + // marg_order.print_order(); + + if (config.vio_debug) { + std::cout << "non_kf_poses.size() " << non_kf_poses.size() << std::endl; + for (const auto& v : non_kf_poses) std::cout << v << ' '; + std::cout << std::endl; + + std::cout << "kfs_to_marg.size() " << kfs_to_marg.size() << std::endl; + for (const auto& v : kfs_to_marg) std::cout << v << ' '; + std::cout << std::endl; + + std::cout << "last_state_t_ns " << last_state_t_ns << std::endl; + + std::cout << "frame_poses.size() " << frame_poses.size() << std::endl; + for (const auto& v : frame_poses) std::cout << v.first << ' '; + std::cout << std::endl; + } + + // Remove unconnected frames + if (!kfs_to_marg.empty()) { + for (auto it = kfs_to_marg.cbegin(); it != kfs_to_marg.cend();) { + if (aom.abs_order_map.count(*it) == 0) { + frame_poses.erase(*it); + prev_opt_flow_res.erase(*it); + lmdb.removeKeyframes({*it}, {}, {}); + it = kfs_to_marg.erase(it); + } else { + it++; + } + } + } + + if (!kfs_to_marg.empty()) { + Timer t_actual_marg; + + // Marginalize only if last state is a keyframe + BASALT_ASSERT(kf_ids_all.count(last_state_t_ns) > 0); + + size_t asize = aom.total_size; + // double marg_prior_error; + + // DenseAccumulator accum; + // accum.reset(asize); + + bool is_lin_sqrt = isLinearizationSqrt(config.vio_linearization_type); + + MatX Q2Jp_or_H; + VecX Q2r_or_b; + + { + // Linearize points + Timer t_linearize; + + typename LinearizationBase::Options lqr_options; + lqr_options.lb_options.huber_parameter = huber_thresh; + lqr_options.lb_options.obs_std_dev = obs_std_dev; + lqr_options.linearization_type = config.vio_linearization_type; + + auto lqr = LinearizationBase::create( + this, aom, lqr_options, &marg_data, nullptr, &kfs_to_marg, + &lost_landmaks); + + lqr->linearizeProblem(); + lqr->performQR(); + + if (is_lin_sqrt && marg_data.is_sqrt) { + lqr->get_dense_Q2Jp_Q2r(Q2Jp_or_H, Q2r_or_b); + } else { + lqr->get_dense_H_b(Q2Jp_or_H, Q2r_or_b); + } + + stats_sums_.add("marg_linearize", t_linearize.elapsed()).format("ms"); + } + + // Save marginalization prior + if (out_marg_queue && !kfs_to_marg.empty()) { + // int64_t kf_id = *kfs_to_marg.begin(); + + { + MargData::Ptr m(new MargData); + m->aom = aom; + + if (is_lin_sqrt && marg_data.is_sqrt) { + m->abs_H = + (Q2Jp_or_H.transpose() * Q2Jp_or_H).template cast(); + m->abs_b = + (Q2Jp_or_H.transpose() * Q2r_or_b).template cast(); + } else { + m->abs_H = Q2Jp_or_H.template cast(); + + m->abs_b = Q2r_or_b.template cast(); + } + + assign_cast_map_values(m->frame_poses, frame_poses); + assign_cast_map_values(m->frame_states, frame_states); + m->kfs_all = kf_ids_all; + m->kfs_to_marg = kfs_to_marg; + m->use_imu = false; + + for (int64_t t : m->kfs_all) { + m->opt_flow_res.emplace_back(prev_opt_flow_res.at(t)); + } + + out_marg_queue->push(m); + } + } + + std::set idx_to_keep, idx_to_marg; + for (const auto& kv : aom.abs_order_map) { + if (kv.second.second == POSE_SIZE) { + int start_idx = kv.second.first; + if (kfs_to_marg.count(kv.first) == 0) { + for (size_t i = 0; i < POSE_SIZE; i++) + idx_to_keep.emplace(start_idx + i); + } else { + for (size_t i = 0; i < POSE_SIZE; i++) + idx_to_marg.emplace(start_idx + i); + } + } else { + BASALT_ASSERT(false); + } + } + + if (config.vio_debug) { + std::cout << "keeping " << idx_to_keep.size() << " marg " + << idx_to_marg.size() << " total " << asize << std::endl; + std::cout << " frame_poses " << frame_poses.size() << " frame_states " + << frame_states.size() << std::endl; + } + + if (config.vio_debug || config.vio_extended_logging) { + MatX Q2Jp_or_H_nullspace; + VecX Q2r_or_b_nullspace; + + typename LinearizationBase::Options lqr_options; + lqr_options.lb_options.huber_parameter = huber_thresh; + lqr_options.lb_options.obs_std_dev = obs_std_dev; + lqr_options.linearization_type = config.vio_linearization_type; + + nullspace_marg_data.order = marg_data.order; + + auto lqr = LinearizationBase::create( + this, aom, lqr_options, &nullspace_marg_data, nullptr, &kfs_to_marg, + &lost_landmaks); + + lqr->linearizeProblem(); + lqr->performQR(); + + if (is_lin_sqrt && marg_data.is_sqrt) { + lqr->get_dense_Q2Jp_Q2r(Q2Jp_or_H_nullspace, Q2r_or_b_nullspace); + } else { + lqr->get_dense_H_b(Q2Jp_or_H_nullspace, Q2r_or_b_nullspace); + } + + MatX nullspace_sqrt_H_new; + VecX nullspace_sqrt_b_new; + + if (is_lin_sqrt && marg_data.is_sqrt) { + MargHelper::marginalizeHelperSqrtToSqrt( + Q2Jp_or_H_nullspace, Q2r_or_b_nullspace, idx_to_keep, idx_to_marg, + nullspace_sqrt_H_new, nullspace_sqrt_b_new); + } else if (marg_data.is_sqrt) { + MargHelper::marginalizeHelperSqToSqrt( + Q2Jp_or_H_nullspace, Q2r_or_b_nullspace, idx_to_keep, idx_to_marg, + nullspace_sqrt_H_new, nullspace_sqrt_b_new); + } else { + MargHelper::marginalizeHelperSqToSq( + Q2Jp_or_H_nullspace, Q2r_or_b_nullspace, idx_to_keep, idx_to_marg, + nullspace_sqrt_H_new, nullspace_sqrt_b_new); + } + + nullspace_marg_data.H = nullspace_sqrt_H_new; + nullspace_marg_data.b = nullspace_sqrt_b_new; + } + + MatX marg_sqrt_H_new; + VecX marg_sqrt_b_new; + + { + Timer t; + if (is_lin_sqrt && marg_data.is_sqrt) { + MargHelper::marginalizeHelperSqrtToSqrt( + Q2Jp_or_H, Q2r_or_b, idx_to_keep, idx_to_marg, marg_sqrt_H_new, + marg_sqrt_b_new); + } else if (marg_data.is_sqrt) { + MargHelper::marginalizeHelperSqToSqrt( + Q2Jp_or_H, Q2r_or_b, idx_to_keep, idx_to_marg, marg_sqrt_H_new, + marg_sqrt_b_new); + } else { + MargHelper::marginalizeHelperSqToSq( + Q2Jp_or_H, Q2r_or_b, idx_to_keep, idx_to_marg, marg_sqrt_H_new, + marg_sqrt_b_new); + } + stats_sums_.add("marg_helper", t.elapsed()).format("ms"); + } + + for (auto& kv : frame_poses) { + if (aom.abs_order_map.count(kv.first) > 0) { + if (!kv.second.isLinearized()) kv.second.setLinTrue(); + } + } + + for (const int64_t id : kfs_to_marg) { + frame_poses.erase(id); + prev_opt_flow_res.erase(id); + } + + lmdb.removeKeyframes(kfs_to_marg, kfs_to_marg, kfs_to_marg); + + if (config.vio_marg_lost_landmarks) { + for (const auto& lm_id : lost_landmaks) lmdb.removeLandmark(lm_id); + } + + AbsOrderMap marg_order_new; + + for (const auto& kv : frame_poses) { + if (aom.abs_order_map.count(kv.first) > 0) { + marg_order_new.abs_order_map[kv.first] = + std::make_pair(marg_order_new.total_size, POSE_SIZE); + + marg_order_new.total_size += POSE_SIZE; + marg_order_new.items++; + } + } + + marg_data.H = marg_sqrt_H_new; + marg_data.b = marg_sqrt_b_new; + marg_data.order = marg_order_new; + + BASALT_ASSERT(size_t(marg_data.H.cols()) == marg_data.order.total_size); + + // Quadratic prior and "delta" of the current state to the original + // linearization point give cost function + // + // P(x) = 0.5 || J*(delta+x) + r ||^2. + // + // For marginalization this has been linearized at x=0 to give + // linearization + // + // P(x) = 0.5 || J*x + (J*delta + r) ||^2, + // + // with Jacobian J and residual J*delta + r. + // + // After marginalization, we recover the original form of the + // prior. We are left with linearization (in sqrt form) + // + // Pnew(x) = 0.5 || Jnew*x + res ||^2. + // + // To recover the original form with delta-independent r, we set + // + // Pnew(x) = 0.5 || Jnew*(delta+x) + (res - Jnew*delta) ||^2, + // + // and thus rnew = (res - Jnew*delta). + + VecX delta; + computeDelta(marg_data.order, delta); + marg_data.b -= marg_data.H * delta; + + if (config.vio_debug || config.vio_extended_logging) { + VecX delta; + computeDelta(marg_data.order, delta); + nullspace_marg_data.b -= nullspace_marg_data.H * delta; + } + + stats_sums_.add("marg_total", t_actual_marg.elapsed()).format("ms"); + + if (config.vio_debug) { + std::cout << "marginalizaon done!!" << std::endl; + } + + if (config.vio_debug || config.vio_extended_logging) { + Timer t; + logMargNullspace(); + stats_sums_.add("marg_log", t.elapsed()).format("ms"); + } + } + + // std::cout << "new marg prior order" << std::endl; + // marg_order.print_order(); + } + + stats_sums_.add("marginalize", t_total.elapsed()).format("ms"); +} + +template +void SqrtKeypointVoEstimator::optimize() { + if (config.vio_debug) { + std::cout << "=================================" << std::endl; + } + + // harcoded configs + // bool scale_Jp = config.vio_scale_jacobian && is_qr_solver(); + // bool scale_Jl = config.vio_scale_jacobian && is_qr_solver(); + + // timing + ExecutionStats stats; + Timer timer_total; + Timer timer_iteration; + + // construct order of states in linear system --> sort by ascending + // timestamp + AbsOrderMap aom; + aom.abs_order_map = marg_data.order.abs_order_map; + aom.total_size = marg_data.order.total_size; + aom.items = marg_data.order.items; + + for (const auto& kv : frame_poses) { + if (aom.abs_order_map.count(kv.first) == 0) { + aom.abs_order_map[kv.first] = std::make_pair(aom.total_size, POSE_SIZE); + aom.total_size += POSE_SIZE; + aom.items++; + } + } + + // This is VO not VIO, so expect no IMU states + BASALT_ASSERT(frame_states.empty()); + + // TODO: Check why we get better accuracy with old SC loop. Possible culprits: + // - different initial lambda (based on previous iteration) + // - no landmark damping + // - outlier removal after 4 iterations? + lambda = Scalar(config.vio_lm_lambda_initial); + + // record stats + stats.add("num_cams", frame_poses.size()).format("count"); + stats.add("num_lms", lmdb.numLandmarks()).format("count"); + stats.add("num_obs", lmdb.numObservations()).format("count"); + + // setup landmark blocks + typename LinearizationBase::Options lqr_options; + lqr_options.lb_options.huber_parameter = huber_thresh; + lqr_options.lb_options.obs_std_dev = obs_std_dev; + lqr_options.linearization_type = config.vio_linearization_type; + std::unique_ptr> lqr; + + { + Timer t; + lqr = LinearizationBase::create(this, aom, lqr_options, + &marg_data); + stats.add("allocateLMB", t.reset()).format("ms"); + lqr->log_problem_stats(stats); + } + + bool terminated = false; + bool converged = false; + std::string message; + + int it = 0; + int it_rejected = 0; + for (; it <= config.vio_max_iterations && !terminated;) { + if (it > 0) { + timer_iteration.reset(); + } + + Scalar error_total = 0; + VecX Jp_column_norm2; + + // TODO: execution could be done staged + + Timer t; + + // linearize residuals + bool numerically_valid; + error_total = lqr->linearizeProblem(&numerically_valid); + BASALT_ASSERT_STREAM( + numerically_valid, + "did not expect numerical failure during linearization"); + stats.add("linearizeProblem", t.reset()).format("ms"); + + // // compute pose jacobian norm squared for Jacobian scaling + // if (scale_Jp) { + // Jp_column_norm2 = lqr->getJp_diag2(); + // stats.add("getJp_diag2", t.reset()).format("ms"); + // } + + // // scale landmark jacobians + // if (scale_Jl) { + // lqr->scaleJl_cols(); + // stats.add("scaleJl_cols", t.reset()).format("ms"); + // } + + // marginalize points in place + lqr->performQR(); + stats.add("performQR", t.reset()).format("ms"); + + if (config.vio_debug) { + // TODO: num_points debug output missing + std::cout << "[LINEARIZE] Error: " << error_total << " num points " + << std::endl; + std::cout << "Iteration " << it << " " << error_total << std::endl; + } + + // compute pose jacobian scaling + // VecX jacobian_scaling; + // if (scale_Jp) { + // // TODO: what about jacobian scaling for SC solver? + + // // ceres uses 1.0 / (1.0 + sqrt(SquaredColumnNorm)) + // // we use 1.0 / (eps + sqrt(SquaredColumnNorm)) + // jacobian_scaling = (lqr_options.lb_options.jacobi_scaling_eps + + // Jp_column_norm2.array().sqrt()) + // .inverse(); + // } + // if (config.vio_debug) { + // std::cout << "\t[INFO] Stage 1" << std::endl; + //} + + // inner loop for backtracking in LM (still count as main iteration though) + for (int j = 0; it <= config.vio_max_iterations && !terminated; j++) { + if (j > 0) { + timer_iteration.reset(); + if (config.vio_debug) { + std::cout << "Iteration " << it << ", backtracking" << std::endl; + } + } + + { + // Timer t; + + // TODO: execution could be done staged + + // set (updated) damping for poses + // if (config.vio_lm_pose_damping_variant == 0) { + // lqr->setPoseDamping(lambda); + // stats.add("setPoseDamping", t.reset()).format("ms"); + // } + + // // scale landmark Jacobians only on the first inner iteration. + // if (scale_Jp && j == 0) { + // lqr->scaleJp_cols(jacobian_scaling); + // stats.add("scaleJp_cols", t.reset()).format("ms"); + // } + + // // set (updated) damping for landmarks + // if (config.vio_lm_landmark_damping_variant == 0) { + // lqr->setLandmarkDamping(lambda); + // stats.add("setLandmarkDamping", t.reset()).format("ms"); + // } + } + + // if (config.vio_debug) { + // std::cout << "\t[INFO] Stage 2 " << std::endl; + // } + + VecX inc; + { + Timer t; + + // get dense reduced camera system + MatX H; + VecX b; + + lqr->get_dense_H_b(H, b); + + stats.add("get_dense_H_b", t.reset()).format("ms"); + + int iter = 0; + bool inc_valid = false; + constexpr int max_num_iter = 3; + + while (iter < max_num_iter && !inc_valid) { + VecX Hdiag_lambda = (H.diagonal() * lambda).cwiseMax(min_lambda); + MatX H_copy = H; + H_copy.diagonal() += Hdiag_lambda; + + Eigen::LDLT> ldlt(H_copy); + inc = ldlt.solve(b); + stats.add("solve", t.reset()).format("ms"); + + if (!inc.array().isFinite().all()) { + lambda = lambda_vee * lambda; + lambda_vee *= vee_factor; + } else { + inc_valid = true; + } + iter++; + } + + if (!inc_valid) { + std::cerr << "Still invalid inc after " << max_num_iter + << " iterations." << std::endl; + } + } + + // backup state (then apply increment and check cost decrease) + backup(); + + // TODO: directly invert pose increment when solving; change SC + // `updatePoints` to recieve unnegated increment + + // backsubstitute (with scaled pose increment) + Scalar l_diff = 0; + { + // negate pose increment before point update + inc = -inc; + + Timer t; + l_diff = lqr->backSubstitute(inc); + stats.add("backSubstitute", t.reset()).format("ms"); + } + + // undo jacobian scaling before applying increment to poses + // if (scale_Jp) { + // inc.array() *= jacobian_scaling.array(); + // } + + // apply increment to poses + for (auto& [frame_id, state] : frame_poses) { + int idx = aom.abs_order_map.at(frame_id).first; + state.applyInc(inc.template segment(idx)); + } + + // compute stepsize + Scalar step_norminf = inc.array().abs().maxCoeff(); + + // this is VO not VIO + BASALT_ASSERT(frame_states.empty()); + + // compute error update applying increment + Scalar after_update_marg_prior_error = 0; + Scalar after_update_vision_error = 0; + + { + Timer t; + computeError(after_update_vision_error); + computeMargPriorError(marg_data, after_update_marg_prior_error); + stats.add("computerError2", t.reset()).format("ms"); + } + + Scalar after_error_total = + after_update_vision_error + after_update_marg_prior_error; + + // check cost decrease compared to quadratic model cost + Scalar f_diff; + bool step_is_valid = false; + bool step_is_successful = false; + Scalar relative_decrease = 0; + { + // compute actual cost decrease + f_diff = error_total - after_error_total; + + relative_decrease = f_diff / l_diff; + + if (config.vio_debug) { + std::cout << "\t[EVAL] error: {:.4e}, f_diff {:.4e} l_diff {:.4e} " + "step_quality {:.2e} step_size {:.2e}\n"_format( + after_error_total, f_diff, l_diff, relative_decrease, + step_norminf); + } + + // TODO: consider to remove assert. For now we want to test if we even + // run into the l_diff <= 0 case ever in practice + // BASALT_ASSERT_STREAM(l_diff > 0, "l_diff " << l_diff); + + // l_diff <= 0 is a theoretical possibility if the model cost change is + // tiny and becomes numerically negative (non-positive). It might not + // occur since our linear systems are not that big (compared to large + // scale BA for example) and we also abort optimization quite early and + // usually don't have large damping (== tiny step size). + step_is_valid = l_diff > 0; + step_is_successful = step_is_valid && relative_decrease > 0; + } + + double iteration_time = timer_iteration.elapsed(); + double cumulative_time = timer_total.elapsed(); + + stats.add("iteration", iteration_time).format("ms"); + { + basalt::MemoryInfo mi; + if (get_memory_info(mi)) { + stats.add("resident_memory", mi.resident_memory); + stats.add("resident_memory_peak", mi.resident_memory_peak); + } + } + + if (step_is_successful) { + BASALT_ASSERT(step_is_valid); + + if (config.vio_debug) { + // std::cout << "\t[ACCEPTED] lambda:" << lambda + // << " Error: " << after_error_total << std::endl; + + std::cout << "\t[ACCEPTED] error: {:.4e}, lambda: {:.1e}, it_time: " + "{:.3f}s, total_time: {:.3f}s\n" + ""_format(after_error_total, lambda, iteration_time, + cumulative_time); + } + + lambda *= std::max( + Scalar(1.0) / 3, + 1 - std::pow(2 * relative_decrease - 1, 3)); + lambda = std::max(min_lambda, lambda); + + lambda_vee = initial_vee; + + it++; + + // check function and parameter tolerance + if ((f_diff > 0 && f_diff < Scalar(1e-6)) || + step_norminf < Scalar(1e-4)) { + converged = true; + terminated = true; + } + + // stop inner lm loop + break; + } else { + std::string reason = step_is_valid ? "REJECTED" : "INVALID"; + + if (config.vio_debug) { + // std::cout << "\t[REJECTED] lambda:" << lambda + // << " Error: " << after_error_total << std::endl; + + std::cout << "\t[{}] error: {}, lambda: {:.1e}, it_time:" + "{:.3f}s, total_time: {:.3f}s\n" + ""_format(reason, after_error_total, lambda, + iteration_time, cumulative_time); + } + + lambda = lambda_vee * lambda; + lambda_vee *= vee_factor; + + // lambda = std::max(min_lambda, lambda); + // lambda = std::min(max_lambda, lambda); + + restore(); + it++; + it_rejected++; + + if (lambda > max_lambda) { + terminated = true; + message = + "Solver did not converge and reached maximum damping lambda"; + } + } + } + } + + stats.add("optimize", timer_total.elapsed()).format("ms"); + stats.add("num_it", it).format("count"); + stats.add("num_it_rejected", it_rejected).format("count"); + + // TODO: call filterOutliers at least once (also for CG version) + + stats_all_.merge_all(stats); + stats_sums_.merge_sums(stats); + + if (config.vio_debug) { + if (!converged) { + if (terminated) { + std::cout << "Solver terminated early after {} iterations: {}"_format( + it, message); + } else { + std::cout + << "Solver did not converge after maximum number of {} iterations"_format( + it); + } + } + + stats.print(); + + std::cout << "=================================" << std::endl; + } +} + +template +void SqrtKeypointVoEstimator::optimize_and_marg( + const std::map& num_points_connected, + const std::unordered_set& lost_landmaks) { + optimize(); + marginalize(num_points_connected, lost_landmaks); +} + +template +void SqrtKeypointVoEstimator::debug_finalize() { + std::cout << "=== stats all ===\n"; + stats_all_.print(); + std::cout << "=== stats sums ===\n"; + stats_sums_.print(); + + // save files + stats_all_.save_json("stats_all.json"); + stats_sums_.save_json("stats_sums.json"); +} + +// ////////////////////////////////////////////////////////////////// +// instatiate templates + +#ifdef BASALT_INSTANTIATIONS_DOUBLE +template class SqrtKeypointVoEstimator; +#endif + +#ifdef BASALT_INSTANTIATIONS_FLOAT +template class SqrtKeypointVoEstimator; +#endif +} // namespace basalt diff --git a/src/vi_estimator/vio_estimator.cpp b/src/vi_estimator/vio_estimator.cpp new file mode 100644 index 0000000..96cabca --- /dev/null +++ b/src/vi_estimator/vio_estimator.cpp @@ -0,0 +1,176 @@ +/** +BSD 3-Clause License + +This file is part of the Basalt project. +https://gitlab.com/VladyslavUsenko/basalt.git + +Copyright (c) 2019, Vladyslav Usenko and Nikolaus Demmel. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#include + +#include +#include + +namespace basalt { + +namespace { + +template +VioEstimatorBase::Ptr factory_helper(const VioConfig& config, + const Calibration& cam, + const Eigen::Vector3d& g, bool use_imu) { + VioEstimatorBase::Ptr res; + + if (use_imu) { + res.reset(new SqrtKeypointVioEstimator(g, cam, config)); + + } else { + res.reset(new SqrtKeypointVoEstimator(cam, config)); + } + + return res; +} + +} // namespace + +VioEstimatorBase::Ptr VioEstimatorFactory::getVioEstimator( + const VioConfig& config, const Calibration& cam, + const Eigen::Vector3d& g, bool use_imu, bool use_double) { + if (use_double) { +#ifdef BASALT_INSTANTIATIONS_DOUBLE + return factory_helper(config, cam, g, use_imu); +#else + BASALT_LOG_FATAL("Compiled without double support."); +#endif + } else { +#ifdef BASALT_INSTANTIATIONS_FLOAT + return factory_helper(config, cam, g, use_imu); +#else + BASALT_LOG_FATAL("Compiled without float support."); +#endif + } +} + +double alignSVD(const std::vector& filter_t_ns, + const Eigen::aligned_vector& filter_t_w_i, + const std::vector& gt_t_ns, + Eigen::aligned_vector& gt_t_w_i) { + Eigen::aligned_vector est_associations; + Eigen::aligned_vector gt_associations; + + for (size_t i = 0; i < filter_t_w_i.size(); i++) { + int64_t t_ns = filter_t_ns[i]; + + size_t j; + for (j = 0; j < gt_t_ns.size(); j++) { + if (gt_t_ns.at(j) > t_ns) break; + } + j--; + + if (j >= gt_t_ns.size() - 1) { + continue; + } + + double dt_ns = t_ns - gt_t_ns.at(j); + double int_t_ns = gt_t_ns.at(j + 1) - gt_t_ns.at(j); + + BASALT_ASSERT_STREAM(dt_ns >= 0, "dt_ns " << dt_ns); + BASALT_ASSERT_STREAM(int_t_ns > 0, "int_t_ns " << int_t_ns); + + // Skip if the interval between gt larger than 100ms + if (int_t_ns > 1.1e8) continue; + + double ratio = dt_ns / int_t_ns; + + BASALT_ASSERT(ratio >= 0); + BASALT_ASSERT(ratio < 1); + + Eigen::Vector3d gt = (1 - ratio) * gt_t_w_i[j] + ratio * gt_t_w_i[j + 1]; + + gt_associations.emplace_back(gt); + est_associations.emplace_back(filter_t_w_i[i]); + } + + int num_kfs = est_associations.size(); + + Eigen::Matrix gt, est; + gt.setZero(3, num_kfs); + est.setZero(3, num_kfs); + + for (size_t i = 0; i < est_associations.size(); i++) { + gt.col(i) = gt_associations[i]; + est.col(i) = est_associations[i]; + } + + Eigen::Vector3d mean_gt = gt.rowwise().mean(); + Eigen::Vector3d mean_est = est.rowwise().mean(); + + gt.colwise() -= mean_gt; + est.colwise() -= mean_est; + + Eigen::Matrix3d cov = gt * est.transpose(); + + Eigen::JacobiSVD svd( + cov, Eigen::ComputeFullU | Eigen::ComputeFullV); + + Eigen::Matrix3d S; + S.setIdentity(); + + if (svd.matrixU().determinant() * svd.matrixV().determinant() < 0) + S(2, 2) = -1; + + Eigen::Matrix3d rot_gt_est = svd.matrixU() * S * svd.matrixV().transpose(); + Eigen::Vector3d trans = mean_gt - rot_gt_est * mean_est; + + Sophus::SE3d T_gt_est(rot_gt_est, trans); + Sophus::SE3d T_est_gt = T_gt_est.inverse(); + + for (size_t i = 0; i < gt_t_w_i.size(); i++) { + gt_t_w_i[i] = T_est_gt * gt_t_w_i[i]; + } + + double error = 0; + for (size_t i = 0; i < est_associations.size(); i++) { + est_associations[i] = T_gt_est * est_associations[i]; + Eigen::Vector3d res = est_associations[i] - gt_associations[i]; + + error += res.transpose() * res; + } + + error /= est_associations.size(); + error = std::sqrt(error); + + std::cout << "T_align\n" << T_gt_est.matrix() << std::endl; + std::cout << "error " << error << std::endl; + std::cout << "number of associations " << num_kfs << std::endl; + + return error; +} +} // namespace basalt diff --git a/src/vio.cpp b/src/vio.cpp new file mode 100644 index 0000000..0acaf2a --- /dev/null +++ b/src/vio.cpp @@ -0,0 +1,1482 @@ +/** +BSD 3-Clause License + +This file is part of the Basalt project. +https://gitlab.com/VladyslavUsenko/basalt.git + +Copyright (c) 2019, Vladyslav Usenko and Nikolaus Demmel. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#include +#include +#include +#include +#include + +#include + +#include + +#include +#include + +#include +#include +#include +#include +#include +#include + +#include + +#include +#include +#include +#include +#include + +#include + +#include +#include +#include + +#include "opencv2/core/core.hpp" +#include "opencv2/imgproc/imgproc.hpp" +#include "opencv2/calib3d/calib3d.hpp" +#include "opencv2/highgui/highgui.hpp" + +using namespace fmt::literals; + +// GUI functions +void draw_image_overlay(pangolin::View& v, size_t cam_id); +void draw_scene(pangolin::View& view); +void draw_scene_no_camera(pangolin::View& view); +void load_data(const std::string& calib_path); +bool next_step(); +bool prev_step(); +void draw_plots(); +void alignButton(); +void alignDeviceButton(); +void saveTrajectoryButton(); + +// Pangolin variables +constexpr int UI_WIDTH = 200; + +using Button = pangolin::Var>; + +pangolin::DataLog imu_data_log, vio_data_log, error_data_log; +pangolin::Plotter* plotter; + +pangolin::Var show_frame("ui.show_frame", 0, 0, 1500); + +// Added by Ivan Podmogilnyi 17.02.2022 +pangolin::Var changeAngleSign("ui.ChangeAngleSign", false, true); +pangolin::Var drawCubeBool("ui.DrawCube", false, true); +pangolin::Var drawLineCubeBool("ui.DrawCubeLine", true, true); +pangolin::Var zeroOut("ui.ZeroOut", false, true); +pangolin::Var drawTexture("ui.drawTexture", true, false); +pangolin::Var IfDrawOpenCVCube("ui.DrawOpenCVCube", false, true); + +pangolin::Var xSkew("ui.x", 0, -500, 500); +pangolin::Var ySkew("ui.y", 0, -50, 50); +pangolin::Var zSkew("ui.z", 0, -500, 500); +pangolin::Var cubeSize("ui.cubeSize", 0.05, 0.01, 2); +//End of append by Ivan + +pangolin::Var show_flow("ui.show_flow", false, false, true); +pangolin::Var show_obs("ui.show_obs", true, false, true); +pangolin::Var show_ids("ui.show_ids", false, false, true); + +pangolin::Var show_est_pos("ui.show_est_pos", true, false, true); +pangolin::Var show_est_vel("ui.show_est_vel", false, false, true); +pangolin::Var show_est_bg("ui.show_est_bg", false, false, true); +pangolin::Var show_est_ba("ui.show_est_ba", false, false, true); + +pangolin::Var show_gt("ui.show_gt", true, false, true); + +Button next_step_btn("ui.next_step", &next_step); +Button prev_step_btn("ui.prev_step", &prev_step); + +pangolin::Var continue_btn("ui.continue", false, false, true); +pangolin::Var continue_fast("ui.continue_fast", true, false, true); + +Button align_se3_btn("ui.align_se3", &alignButton); + +pangolin::Var euroc_fmt("ui.euroc_fmt", true, false, true); +pangolin::Var tum_rgbd_fmt("ui.tum_rgbd_fmt", false, false, true); +pangolin::Var kitti_fmt("ui.kitti_fmt", false, false, true); +pangolin::Var save_groundtruth("ui.save_groundtruth", false, false, true); +Button save_traj_btn("ui.save_traj", &saveTrajectoryButton); + +pangolin::Var follow("ui.follow", false, false, true); + +// pangolin::Var record("ui.record", false, false, true); + +pangolin::OpenGlRenderState camera; +pangolin::OpenGlRenderState ar_3d_camera; +pangolin::OpenGlMatrixSpec P; + +cv::Mat K = cv::Mat::eye(3, 3, cv::DataType::type); +cv::Mat image_to_texture; +std::vector distCoeffs(5); + + // Visualization variables +std::unordered_map vis_map; + +tbb::concurrent_bounded_queue out_vis_queue; +tbb::concurrent_bounded_queue::Ptr> + out_state_queue; + +std::vector vio_t_ns; +Eigen::aligned_vector vio_t_w_i; +Eigen::aligned_vector vio_T_w_i; + +// Appended by Ivan Podmogilnyi +std::vector img_cv_data; +std::string axes_seq; +std::string axes_signs; +// End Appended by Ivan Podmogilnyi + +std::vector gt_t_ns; +Eigen::aligned_vector gt_t_w_i; + +std::string marg_data_path; +size_t last_frame_processed = 0; + +tbb::concurrent_unordered_map> timestamp_to_id; + +std::mutex m; +std::condition_variable conditionVariable; +bool step_by_step = false; +size_t max_frames = 0; + +std::atomic terminate = false; + +// VIO variables +basalt::Calibration calib; + +basalt::VioDatasetPtr vio_dataset; +basalt::VioConfig vio_config; +basalt::OpticalFlowBase::Ptr opt_flow_ptr; +basalt::VioEstimatorBase::Ptr vio; + +void LoadCameraPose(const Eigen::Matrix &Tcw) +//void LoadCameraPose(const Sophus::Transformation &Tcw) +{ + pangolin::OpenGlMatrix M; + + M.m[0] = Tcw(0,0); + M.m[1] = Tcw(1,0); + M.m[2] = Tcw(2,0); + M.m[3] = 0.0; + + M.m[4] = Tcw(0,1); + M.m[5] = Tcw(1,1); + M.m[6] = Tcw(2,1); + M.m[7] = 0.0; + + M.m[8] = Tcw(0,2); + M.m[9] = Tcw(1,2); + M.m[10] = Tcw(2,2); + M.m[11] = 0.0; + + M.m[12] = Tcw(0,3); + M.m[13] = Tcw(1,3); + M.m[14] = Tcw(2,3); + M.m[15] = 1.0; + + M.Load(); +} + +void euler2Rot(Eigen::Matrix& Rot, double const rx, double const ry, double const rz){ + Eigen::Matrix Rx = Eigen::Matrix::Zero(); + Eigen::Matrix Ry = Eigen::Matrix::Zero(); + Eigen::Matrix Rz = Eigen::Matrix::Zero(); + + Rx(0, 0) = 1; + Rx(1, 1) = cos(rx); + Rx(2, 1) = sin(rx); + Rx(1, 2) = -sin(rx); + Rx(2, 2) = cos(rx); + + Ry(0, 0) = cos(ry); + Ry(0, 2) = sin(ry); + Ry(1, 1) = 1; + Ry(2, 0) = -sin(ry); + Ry(2, 2) = cos(ry); + + Rz(0, 0) = cos(rz); + Rz(0, 1) = -sin(rz); + Rz(1, 0) = sin(rz); + Rz(1, 1) = cos(rz); + Rz(2, 2) = 1; + + Rot = Rx * Ry * Rz; + +} + +Eigen::Vector3d rot2rvec(Eigen::Matrix& Mat, bool changeAngleSign_ = false){ + // Continue the convertion here. + // Convert from matrix to axis-angle representation. + double angle = acos( (Mat.trace() - 1) / 2 ); + Eigen::Vector3d vec = {Mat(2, 1) - Mat(1, 2), Mat(0, 2) - Mat(2, 0), Mat(1, 0) - Mat(0, 1)}; + // The unit vector + Eigen::Vector3d axis = {( 1.0 / 2.0 * sin(angle) ) * vec(0), ( 1.0 / 2.0 * sin(angle) ) * vec(1), ( 1.0 / 2.0 * sin(angle) ) * vec(2)}; + Eigen::Vector3d rvec; + if (changeAngleSign_){ + rvec = -angle * axis; + } + else{ + rvec = angle * axis; + } + return rvec; +} + +void drawLinesCube(float x, float y, float z){ +// float size = 1.0; + pangolin::OpenGlMatrix M = pangolin::OpenGlMatrix::Translate(-x,-y,-z); + x = 0; y = 0; z = 0; + glPushMatrix(); + M.Multiply(); + glColor3f(0, 1, 0); + glLineWidth(5); + // glColor3ui(133, 247, 208); + glBegin(GL_LINES); + + // Bottom + // glColor3ui(133, 247, 208); + glVertex3f(x, y, z); + glVertex3f(x, y+1, z); + + // glColor3ui(253, 59, 86); + glVertex3f(x, y, z); + glVertex3f(x+1, y, z); + + // glColor3ui(147, 175, 215); + glVertex3f(x, y+1, z); + glVertex3f(x+1, y+1, z); + + // glColor3ui(80, 209, 168); + glVertex3f(x+1, y, z); + glVertex3f(x+1, y+1, z); + + // Top + // glColor3ui(154, 13, 88); + glVertex3f(x, y, z+1); + glVertex3f(x, y+1, z+1); + + // glColor3ui(253, 59, 86); + glVertex3f(x, y, z+1); + glVertex3f(x+1, y, z+1); + + // glColor3ui(5, 26, 72); + glVertex3f(x, y+1, z+1); + glVertex3f(x+1, y+1, z+1); + + // glColor3ui(72, 182, 8); + glVertex3f(x+1, y, z+1); + glVertex3f(x+1, y+1, z+1); + + // Sides + // glColor3ui(28, 122, 71); + glVertex3f(x, y, z); + glVertex3f(x, y, z+1); + + // glColor3ui(244, 207, 185); + glVertex3f(x, y+1, z); + glVertex3f(x, y+1, z+1); + + // glColor3ui(88, 153, 225); + glVertex3f(x+1, y, z); + glVertex3f(x+1, y, z+1); + + // glColor3ui(184, 151, 253); + glVertex3f(x+1, y+1, z); + glVertex3f(x+1, y+1, z+1); + + glEnd(); + glPopMatrix(); +} + +// Feed functions +void feed_images() { + std::cout << "Started input_data thread " << std::endl; + + for (int iterator=0; iterator<2;iterator++){ + for (size_t i = 0; i < vio_dataset->get_image_timestamps().size(); i++) { + if (vio->finished || terminate || (max_frames > 0 && i >= max_frames)) { + // stop loop early if we set a limit on number of frames to process + break; + } + + if (step_by_step) { + std::unique_lock lk(m); + conditionVariable.wait(lk); + } + + basalt::OpticalFlowInput::Ptr data(new basalt::OpticalFlowInput); + + data->t_ns = vio_dataset->get_image_timestamps()[i]; + data->img_data = vio_dataset->get_image_data(data->t_ns); + data->img_cv_data = vio_dataset->get_image_data_cv_mat(data->t_ns); + + timestamp_to_id[data->t_ns] = i; + + opt_flow_ptr->input_queue.push(data); + } + } + // Indicate the end of the sequence + opt_flow_ptr->input_queue.push(nullptr); + + std::cout << "Finished input_data thread " << std::endl; +} + +void feed_imu() { + for (int iterator=0;iterator<2;iterator++){ + for (size_t i = 0; i < vio_dataset->get_gyro_data().size(); i++) { + if (vio->finished || terminate) { + break; + } + + basalt::ImuData::Ptr data(new basalt::ImuData); + data->t_ns = vio_dataset->get_gyro_data()[i].timestamp_ns; + + data->accel = vio_dataset->get_accel_data()[i].data; + data->gyro = vio_dataset->get_gyro_data()[i].data; + + vio->imu_data_queue.push(data); + } + } + vio->imu_data_queue.push(nullptr); +} + +void DrawImageBasaltTexture(pangolin::GlTexture &imageTexture_, basalt::ImageData &im) +{ + imageTexture_.Upload(im.img->ptr,GL_RGB,GL_UNSIGNED_BYTE); + imageTexture_.RenderToViewportFlipY(); +} + +void DrawImageTexture(pangolin::GlTexture &imageTexture_, cv::Mat &im) +{ + imageTexture_.Upload(im.data, GL_LUMINANCE, GL_UNSIGNED_BYTE); + imageTexture_.RenderToViewportFlipY(); +// imageTexture_.RenderToViewport(); +} + +void DrawCube(const float &size,const float x, const float y, const float z) +{ + pangolin::OpenGlMatrix M = pangolin::OpenGlMatrix::Translate(-x,-size-y,-z); + glPushMatrix(); + M.Multiply(); + pangolin::glDrawColouredCube(-size,size); + glPopMatrix(); +} + +void DrawOpenCVCube(cv::Mat& image, const Sophus::SE3d& Trans, const cv::Mat& K_, const std::vector& distCoeffs_){ + cv::Point3d point1 = {xSkew, ySkew, zSkew}; + cv::Point3d point2 = {xSkew, ySkew+1, zSkew}; + cv::Point3d point3 = {xSkew+1, ySkew, zSkew}; + cv::Point3d point4 = {xSkew+1, ySkew+1, zSkew}; + cv::Point3d point5 = {xSkew, ySkew, zSkew+1}; + cv::Point3d point6 = {xSkew, ySkew+1, zSkew+1}; + cv::Point3d point7 = {xSkew+1, ySkew, zSkew+1}; + cv::Point3d point8 = {xSkew+1, ySkew+1, zSkew+1}; + + std::vector objectPoints = {point1, point2, point3, point4, point5, point6, point7, point8}; + std::vector imagePoints; + + Eigen::Matrix rotation = Trans.so3().matrix(); + Eigen::Vector3d rvec = rot2rvec(rotation); + std::vector rVec(rvec.data(), rvec.data() + rvec.rows() * rvec.cols()); + Eigen::Vector3d tvec = Trans.translation(); + std::vector tVec(tvec.data(), tvec.data() + tvec.rows() * tvec.cols()); + + // Projecting points + cv::projectPoints(objectPoints, rVec, tVec, K_, distCoeffs_, imagePoints); + cv::Vec3b color1 = {133, 247, 208}; + + cv::line(image, imagePoints[0], imagePoints[1], color1, 2); + cv::line(image, imagePoints[0], imagePoints[2], color1, 2); + cv::line(image, imagePoints[1], imagePoints[3], color1, 2); + cv::line(image, imagePoints[2], imagePoints[3], color1, 2); + cv::line(image, imagePoints[4], imagePoints[5], color1, 2); + cv::line(image, imagePoints[4], imagePoints[6], color1, 2); + cv::line(image, imagePoints[5], imagePoints[7], color1, 2); + cv::line(image, imagePoints[6], imagePoints[7], color1, 2); + cv::line(image, imagePoints[0], imagePoints[4], color1, 2); + cv::line(image, imagePoints[1], imagePoints[5], color1, 2); + cv::line(image, imagePoints[2], imagePoints[6], color1, 2); + cv::line(image, imagePoints[3], imagePoints[7], color1, 2); +} + +int main(int argc, char** argv) { + bool show_gui = true; + bool print_queue = false; + std::string cam_calib_path; + std::string dataset_path; + std::string dataset_type; + std::string config_path; + std::string result_path; + std::string trajectory_fmt; + bool trajectory_groundtruth; + int num_threads = 0; + double angles_divider; + bool use_imu = true; + bool use_double = false; + + CLI::App app{"App description"}; + + app.add_option("--show-gui", show_gui, "Show GUI"); + app.add_option("--cam-calib", cam_calib_path, + "Ground-truth camera calibration used for simulation.") + ->required(); + + app.add_option("--dataset-path", dataset_path, "Path to dataset.") + ->required(); + + app.add_option("--dataset-type", dataset_type, "Dataset type .") + ->required(); + + app.add_option("--marg-data", marg_data_path, + "Path to folder where marginalization data will be stored."); + + app.add_option("--print-queue", print_queue, "Print queue."); + app.add_option("--config-path", config_path, "Path to config file."); + app.add_option("--result-path", result_path, + "Path to result file where the system will write RMSE ATE."); + app.add_option("--num-threads", num_threads, "Number of threads."); + app.add_option("--step-by-step", step_by_step, "Path to config file."); + app.add_option("--save-trajectory", trajectory_fmt, + "Save trajectory. Supported formats "); + app.add_option("--save-groundtruth", trajectory_groundtruth, + "In addition to trajectory, save also ground turth"); + app.add_option("--use-imu", use_imu, "Use IMU."); + app.add_option("--use-double", use_double, "Use double not float."); + app.add_option( + "--max-frames", max_frames, + "Limit number of frames to process from dataset (0 means unlimited)"); + app.add_option("--axes-seq", axes_seq, "The desired sequence of axes"); + app.add_option("--axes-signs", axes_signs, "The desired sequence of the signs for the axes"); + app.add_option("--divide-angles", angles_divider, "The number by which we need to divide the angles"); + + // Appended by Ivan Podmogilnyi + + try { + app.parse(argc, argv); + } catch (const CLI::ParseError& e) { + return app.exit(e); + } + + // global thread limit is in effect until global_control object is destroyed + std::unique_ptr tbb_global_control; + if (num_threads > 0) { + tbb_global_control = std::make_unique( + tbb::global_control::max_allowed_parallelism, num_threads); + } + + if (!config_path.empty()) { + vio_config.load(config_path); + + if (vio_config.vio_enforce_realtime) { + vio_config.vio_enforce_realtime = false; + std::cout + << "The option vio_config.vio_enforce_realtime was enabled, " + "but it should only be used with the live executables (supply " + "images at a constant framerate). This executable runs on the " + "datasets and processes images as fast as it can, so the option " + "will be disabled. " + << std::endl; + } + } + + load_data(cam_calib_path); + + { + basalt::DatasetIoInterfacePtr dataset_io = + basalt::DatasetIoFactory::getDatasetIo(dataset_type); + + dataset_io->read(dataset_path); + + vio_dataset = dataset_io->get_data(); + + show_frame.Meta().range[1] = vio_dataset->get_image_timestamps().size() - 1; + show_frame.Meta().gui_changed = true; + + opt_flow_ptr = + basalt::OpticalFlowFactory::getOpticalFlow(vio_config, calib); + + for (size_t i = 0; i < vio_dataset->get_gt_pose_data().size(); i++) { + gt_t_ns.push_back(vio_dataset->get_gt_timestamps()[i]); + gt_t_w_i.push_back(vio_dataset->get_gt_pose_data()[i].translation()); + } + } + + const int64_t start_t_ns = vio_dataset->get_image_timestamps().front(); + { + vio = basalt::VioEstimatorFactory::getVioEstimator( + vio_config, calib, basalt::constants::g, use_imu, use_double); + vio->initialize(Eigen::Vector3d::Zero(), Eigen::Vector3d::Zero()); + + opt_flow_ptr->output_queue = &vio->vision_data_queue; + if (show_gui) vio->out_vis_queue = &out_vis_queue; + vio->out_state_queue = &out_state_queue; + } + + basalt::MargDataSaver::Ptr marg_data_saver; + + if (!marg_data_path.empty()) { + marg_data_saver.reset(new basalt::MargDataSaver(marg_data_path)); + vio->out_marg_queue = &marg_data_saver->in_marg_queue; + + // Save gt. + { + std::string p = marg_data_path + "/gt.cereal"; + std::ofstream os(p, std::ios::binary); + + { + cereal::BinaryOutputArchive archive(os); + archive(gt_t_ns); + archive(gt_t_w_i); + } + os.close(); + } + } + + vio_data_log.Clear(); + + std::thread t1(&feed_images); + std::thread t2(&feed_imu); + + std::shared_ptr t3; + + if (show_gui) + t3.reset(new std::thread([&]() { + basalt::VioVisualizationData::Ptr data; + + while (true) { + out_vis_queue.pop(data); + + if (data.get()) { + vis_map[data->t_ns] = data; + } else { + break; + } + } + + std::cout << "Finished t3" << std::endl; + })); + + std::thread t4([&]() { + basalt::PoseVelBiasState::Ptr data; + + while (true) { + out_state_queue.pop(data); + + if (!data.get()) break; + + int64_t t_ns = data->t_ns; + + // std::cerr << "t_ns " << t_ns << std::endl; + Sophus::SE3d T_w_i = data->T_w_i; + Eigen::Vector3d vel_w_i = data->vel_w_i; + Eigen::Vector3d bg = data->bias_gyro; + Eigen::Vector3d ba = data->bias_accel; + + vio_t_ns.emplace_back(data->t_ns); + vio_t_w_i.emplace_back(T_w_i.translation()); + vio_T_w_i.emplace_back(T_w_i); + + if (show_gui) { + std::vector vals; + vals.push_back((t_ns - start_t_ns) * 1e-9); + + for (int i = 0; i < 3; i++) vals.push_back(vel_w_i[i]); + for (int i = 0; i < 3; i++) vals.push_back(T_w_i.translation()[i]); + for (int i = 0; i < 3; i++) vals.push_back(bg[i]); + for (int i = 0; i < 3; i++) vals.push_back(ba[i]); + + vio_data_log.Log(vals); + } + } + + std::cout << "Finished t4" << std::endl; + }); + + std::shared_ptr t5; + + auto print_queue_fn = [&]() { + std::cout << "opt_flow_ptr->input_queue " + << opt_flow_ptr->input_queue.size() + << " opt_flow_ptr->output_queue " + << opt_flow_ptr->output_queue->size() << " out_state_queue " + << out_state_queue.size() << " imu_data_queue " + << vio->imu_data_queue.size() << std::endl; + }; + + if (print_queue) { + t5.reset(new std::thread([&]() { + while (!terminate) { + print_queue_fn(); + std::this_thread::sleep_for(std::chrono::seconds(1)); + } + })); + } + + auto time_start = std::chrono::high_resolution_clock::now(); + + // record if we close the GUI before VIO is finished. + bool aborted = false; + + if (show_gui) { + pangolin::CreateWindowAndBind("Main", 1800, 1000); + + glEnable(GL_DEPTH_TEST); + + pangolin::View& main_display = pangolin::CreateDisplay().SetBounds( + 0.0, 1.0, pangolin::Attach::Pix(UI_WIDTH), 1.0); + + pangolin::View& img_view_display = pangolin::CreateDisplay() + .SetBounds(0.4, 1.0, 0.0, 0.4) + .SetLayout(pangolin::LayoutEqual); + + pangolin::View& plot_display = pangolin::CreateDisplay().SetBounds( + 0.0, 0.4, pangolin::Attach::Pix(UI_WIDTH), 1.0); + + plotter = new pangolin::Plotter(&imu_data_log, 0.0, 100, -10.0, 10.0, 0.01f, + 0.01f); + plot_display.AddDisplay(*plotter); + + pangolin::CreatePanel("ui").SetBounds(0.0, 1.0, 0.0, + pangolin::Attach::Pix(UI_WIDTH)); + + std::vector> img_view; + + while (img_view.size() < calib.intrinsics.size()) { + std::shared_ptr iv(new pangolin::ImageView); + + size_t idx = img_view.size(); + img_view.push_back(iv); + + img_view_display.AddDisplay(*iv); + iv->extern_draw_function = + std::bind(&draw_image_overlay, std::placeholders::_1, idx); + } + + + Eigen::Vector3d cam_p(-0.5, -3, -5); + cam_p = vio->getT_w_i_init().so3() * calib.T_i_c[0].so3() * cam_p; + + camera = pangolin::OpenGlRenderState( + pangolin::ProjectionMatrix(640, 480, 400, 400, 320, 240, 0.001, 10000), + pangolin::ModelViewLookAt(cam_p[0], cam_p[1], cam_p[2], 0, 0, 0, + pangolin::AxisZ)); + + pangolin::View& display3D = + pangolin::CreateDisplay() + .SetAspect(-640 / 480.0) + .SetBounds(0.4, 1.0, 0.4, 1.0) + .SetHandler(new pangolin::Handler3D(camera)); + + // Appended by Ivan + auto intrinsics_vector = calib.intrinsics[0].getParam(); + auto fx = intrinsics_vector[0]; + auto fy = intrinsics_vector[1]; + auto cx = intrinsics_vector[2]; + auto cy = intrinsics_vector[3]; + + // std::cout << "Hessian calib" << std::endl; + K.at(0,0) = 718.856; + // std::cout << "fx: " << K.at(0, 0); + K.at(1,1) = 718.856; + // std::cout << "fy: " << K.at(1, 1); + K.at(0,2) = 607.1928; + // std::cout << "cx: " << K.at(0, 2); + K.at(1,2) = 185.2157; + // std::cout << "cy: " << K.at (1, 2); + + distCoeffs[0] = 0.003482389402; + distCoeffs[1] = 0.000715034845; + distCoeffs[2] = -0.002053236141; + distCoeffs[3] = 0.000202936736; + distCoeffs[4] = 0; + + ar_3d_camera = pangolin::OpenGlRenderState( + pangolin::ProjectionMatrix(1241.0, 376.0, fx, fy, cx, cy, 0.001, 10000), + pangolin::ModelViewLookAt(0.0, 0.0, 0.0, 0, 0, 1, + pangolin::AxisNegY)); + + P = pangolin::ProjectionMatrix(1241, 376, fx, fy, cx, cy, 0.001, 10000); +// pangolin::GlTexture imageTexture(1241, +// 376, GL_LUMINANCE, false, 0, GL_LUMINANCE, GL_UNSIGNED_BYTE); + // Appended + pangolin::View& ar_view = + pangolin::CreateDisplay() + .SetAspect(-1241.0 / 376.0) + .SetBounds(0.0, 1.0, 0.0, 1.0) + .SetHandler(new pangolin::Handler3D(ar_3d_camera)); + + // End Appended + + display3D.extern_draw_function = draw_scene; + + // Appended. + ar_view.extern_draw_function = draw_scene_no_camera; + + img_view_display.AddDisplay(ar_view); + // End Appended. + + main_display.AddDisplay(img_view_display); + main_display.AddDisplay(display3D); + + std::cout << "fx: " << fx << " fy: " << fy << " cx: " << cx << " cy: " << cy << std::endl; + // end append. + + while (!pangolin::ShouldQuit()) { +// glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); + + if (follow) { + size_t frame_id = show_frame; + int64_t t_ns = vio_dataset->get_image_timestamps()[frame_id]; + auto it = vis_map.find(t_ns); + + if (it != vis_map.end()) { + Sophus::SE3d T_w_i; + if (!it->second->states.empty()) { + T_w_i = it->second->states.back(); + } else if (!it->second->frames.empty()) { + T_w_i = it->second->frames.back(); + } + T_w_i.so3() = Sophus::SO3d(); + + camera.Follow(T_w_i.matrix()); + } + } + + // Appended + size_t frame_id_ar = static_cast(show_frame); + int64_t timestamp_ar = vio_dataset->get_image_timestamps()[frame_id_ar]; + + std::vector img_vec_ar = + vio_dataset->get_image_data(timestamp_ar); + + img_cv_data = vio_dataset->get_image_data_cv_mat(timestamp_ar); +// std::cout << "img_cv_data size: " << img_cv_data.size() << std::endl; +// for (uint i=0;isecond->states.empty()) { + T_w_i = it_ar->second->states.back(); + } else if (!it_ar->second->frames.empty()) { + T_w_i = it_ar->second->frames.back(); + } + + // For some reason, there are 4 elements, and two first of them are the empty images. + if (drawTexture) { + DrawImageTexture(imageTexture, img_cv_data[2]); + } + glClear(GL_DEPTH_BUFFER_BIT); +// glClear(GL_DEPTH_BUFFER_BIT); +// glClear(GL_DEPTH_BUFFER_BIT); + + // Ok, so here by the moment we are watching from the camera position and orientation. + // Drawing the cube from here might result in drawing the cube in the camera coordinate system. + // Although! We want to draw the cube from the world coordinate system, therefore + // I need to translate to the zero coordinates the world, draw the cube, and then + // Go back to the position and the orientation of the camera. + + // Load camera projection +// glMatrixMode(GL_PROJECTION); +// P.Load(); + +// if (it_ar != vis_map.end()) { +// Eigen::Matrix Rot; +// // Continue here. +// std::string desired_axes_sequence = axes_seq; +// std::vector axes = {0.0, 0.0, 0.0}; +// for (int i=0;i<3;i++){ +// if (desired_axes_sequence[i] == 'X'){ +// axes[i] = T_w_i.angleX(); +// } +// else if (desired_axes_sequence[i] == 'Y'){ +// axes[i] = T_w_i.angleY(); +// } +// else if (desired_axes_sequence[i] == 'Z'){ +// axes[i] = T_w_i.angleZ(); +// } +// +// if (axes_signs[i] == '+'){ +// axes[i] = axes[i]; +// } +// else if (axes_signs[i] == '-'){ +// axes[i] = -axes[i]; +// } +// } +// std::cout << "Rotation is: " << axes[0] << " " << axes[1] << " " << axes[2] << std::endl; +// euler2Rot(Rot, axes[0] / angles_divider, axes[1] / angles_divider, axes[2] / angles_divider); +// T_w_i.setRotationMatrix(Rot); + + // Load camera pose +// Eigen::Matrix Transform_matrix = T_w_i.matrix(); +// std::cout << "Current T_w_i: " << Transform_matrix << std::endl; +// glMatrixMode(GL_MODELVIEW); +// LoadCameraPose(Transform_matrix); +// } + // End appended. + + if (show_frame.GuiChanged()) { + for (size_t cam_id = 0; cam_id < calib.intrinsics.size(); cam_id++) { + size_t frame_id = static_cast(show_frame); + int64_t timestamp = vio_dataset->get_image_timestamps()[frame_id]; + + std::vector img_vec = + vio_dataset->get_image_data(timestamp); + + pangolin::GlPixFormat fmt; + fmt.glformat = GL_LUMINANCE; + fmt.gltype = GL_UNSIGNED_SHORT; + fmt.scalable_internal_format = GL_LUMINANCE16; + + if (img_vec[cam_id].img.get()) + img_view[cam_id]->SetImage( + img_vec[cam_id].img->ptr, img_vec[cam_id].img->w, + img_vec[cam_id].img->h, img_vec[cam_id].img->pitch, fmt); + } + + draw_plots(); + } + + if (show_est_vel.GuiChanged() || show_est_pos.GuiChanged() || + show_est_ba.GuiChanged() || show_est_bg.GuiChanged()) { + draw_plots(); + } + + if (euroc_fmt.GuiChanged()) { + euroc_fmt = true; + tum_rgbd_fmt = false; + kitti_fmt = false; + } + + if (tum_rgbd_fmt.GuiChanged()) { + tum_rgbd_fmt = true; + euroc_fmt = false; + kitti_fmt = false; + } + + if (kitti_fmt.GuiChanged()) { + kitti_fmt = true; + euroc_fmt = false; + tum_rgbd_fmt = false; + } + + // if (record) { + // main_display.RecordOnRender( + // "ffmpeg:[fps=50,bps=80000000,unique_filename]///tmp/" + // "vio_screencap.avi"); + // record = false; + // } + + pangolin::FinishFrame(); + + if (continue_btn) { + if (!next_step()) + std::this_thread::sleep_for(std::chrono::milliseconds(50)); + } else { + std::this_thread::sleep_for(std::chrono::milliseconds(50)); + } + + if (continue_fast) { + int64_t t_ns = vio->last_processed_t_ns; + if (timestamp_to_id.count(t_ns)) { + show_frame = timestamp_to_id[t_ns]; + show_frame.Meta().gui_changed = true; + } + + if (vio->finished) { + continue_fast = false; + } + } + } + + // If GUI closed but VIO not yet finished --> abort input queues, which in + // turn aborts processing + if (!vio->finished) { + std::cout << "GUI closed but odometry still running --> aborting.\n"; + print_queue_fn(); // print queue size at time of aborting + terminate = true; + aborted = true; + } + } + + // wait first for vio to complete processing + vio->maybe_join(); + + // input threads will abort when vio is finished, but might be stuck in full + // push to full queue, so drain queue now + vio->drain_input_queues(); + + // join input threads + t1.join(); + t2.join(); + + // std::cout << "Data input finished, terminate auxiliary threads."; + terminate = true; + + // join other threads + if (t3) t3->join(); + t4.join(); + if (t5) t5->join(); + + // after joining all threads, print final queue sizes. + if (print_queue) { + std::cout << "Final queue sizes:" << std::endl; + print_queue_fn(); + } + + auto time_end = std::chrono::high_resolution_clock::now(); + const double duration_total = + std::chrono::duration(time_end - time_start).count(); + + // TODO: remove this unconditional call (here for debugging); + const double ate_rmse = + basalt::alignSVD(vio_t_ns, vio_t_w_i, gt_t_ns, gt_t_w_i); + vio->debug_finalize(); + std::cout << "Total runtime: {:.3f}s\n"_format(duration_total); + + { + basalt::ExecutionStats stats; + stats.add("exec_time_s", duration_total); + stats.add("ate_rmse", ate_rmse); + stats.add("ate_num_kfs", vio_t_w_i.size()); + stats.add("num_frames", vio_dataset->get_image_timestamps().size()); + + { + basalt::MemoryInfo mi; + if (get_memory_info(mi)) { + stats.add("resident_memory_peak", mi.resident_memory_peak); + } + } + + stats.save_json("stats_vio.json"); + } + + if (!aborted && !trajectory_fmt.empty()) { + std::cout << "Saving trajectory..." << std::endl; + + if (trajectory_fmt == "kitti") { + kitti_fmt = true; + euroc_fmt = false; + tum_rgbd_fmt = false; + } + if (trajectory_fmt == "euroc") { + euroc_fmt = true; + kitti_fmt = false; + tum_rgbd_fmt = false; + } + if (trajectory_fmt == "tum") { + tum_rgbd_fmt = true; + euroc_fmt = false; + kitti_fmt = false; + } + + save_groundtruth = trajectory_groundtruth; + + saveTrajectoryButton(); + } + + if (!aborted && !result_path.empty()) { + double error = basalt::alignSVD(vio_t_ns, vio_t_w_i, gt_t_ns, gt_t_w_i); + + auto exec_time_ns = std::chrono::duration_cast( + time_end - time_start); + + std::ofstream os(result_path); + { + cereal::JSONOutputArchive ar(os); + ar(cereal::make_nvp("rms_ate", error)); + ar(cereal::make_nvp("num_frames", + vio_dataset->get_image_timestamps().size())); + ar(cereal::make_nvp("exec_time_ns", exec_time_ns.count())); + } + os.close(); + } + + return 0; +} + +void draw_image_overlay(pangolin::View& v, size_t cam_id) { + UNUSED(v); + + size_t frame_id = show_frame; + auto it = vis_map.find(vio_dataset->get_image_timestamps()[frame_id]); + + if (show_obs) { + glLineWidth(1.0); + glColor3f(1.0, 0.0, 0.0); + glEnable(GL_BLEND); + glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA); + + if (it != vis_map.end() && cam_id < it->second->projections.size()) { + const auto& points = it->second->projections[cam_id]; + + if (points.size() > 0) { + double min_id = points[0][2], max_id = points[0][2]; + + for (const auto& points2 : it->second->projections) + for (const auto& p : points2) { + min_id = std::min(min_id, p[2]); + max_id = std::max(max_id, p[2]); + } + + for (const auto& c : points) { + const float radius = 6.5; + + float r, g, b; + getcolor(c[2] - min_id, max_id - min_id, b, g, r); + glColor3f(r, g, b); + + pangolin::glDrawCirclePerimeter(c[0], c[1], radius); + + if (show_ids) + pangolin::GlFont::I().Text("%d", int(c[3])).Draw(c[0], c[1]); + } + } + + glColor3f(1.0, 0.0, 0.0); + pangolin::GlFont::I() + .Text("Tracked %d points", points.size()) + .Draw(5, 20); + } + } + + if (show_flow) { + glLineWidth(1.0); + glColor3f(1.0, 0.0, 0.0); + glEnable(GL_BLEND); + glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA); + + if (it != vis_map.end()) { + const Eigen::aligned_map& + kp_map = it->second->opt_flow_res->observations[cam_id]; + + for (const auto& kv : kp_map) { + Eigen::MatrixXf transformed_patch = + kv.second.linear() * opt_flow_ptr->patch_coord; + transformed_patch.colwise() += kv.second.translation(); + + for (int i = 0; i < transformed_patch.cols(); i++) { + const Eigen::Vector2f c = transformed_patch.col(i); + pangolin::glDrawCirclePerimeter(c[0], c[1], 0.5f); + } + + const Eigen::Vector2f c = kv.second.translation(); + + if (show_ids) + pangolin::GlFont::I().Text("%d", kv.first).Draw(5 + c[0], 5 + c[1]); + } + + pangolin::GlFont::I() + .Text("%d opt_flow patches", kp_map.size()) + .Draw(5, 20); + } + } +} + +void draw_scene(pangolin::View& view) { + UNUSED(view); + view.Activate(camera); + + if (drawCubeBool){ + DrawCube(cubeSize, xSkew, ySkew, zSkew); + } + else if(drawLineCubeBool){ + drawLinesCube(xSkew, ySkew, zSkew); + } +// try{ +// view.Activate(T); +// throw (0); +// } +// catch (int status) { +// if (status == 0){ +// std::cout << "No passed camera" << std::endl; +// view.Activate(); +// } +// }; + + glClearColor(1.0f, 1.0f, 1.0f, 1.0f); + + glPointSize(3); + glColor3f(1.0, 0.0, 0.0); + glEnable(GL_BLEND); + glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA); + + glColor3ubv(cam_color); + if (!vio_t_w_i.empty()) { + size_t end = std::min(vio_t_w_i.size(), size_t(show_frame + 1)); + Eigen::aligned_vector sub_gt(vio_t_w_i.begin(), + vio_t_w_i.begin() + end); + pangolin::glDrawLineStrip(sub_gt); + } + + glColor3ubv(gt_color); + if (show_gt) pangolin::glDrawLineStrip(gt_t_w_i); + + size_t frame_id = show_frame; + int64_t t_ns = vio_dataset->get_image_timestamps()[frame_id]; + auto it = vis_map.find(t_ns); + + if (it != vis_map.end()) { + // T_i_c - transformation from camera coordinate point to the imu coordinate point. + for (size_t i = 0; i < calib.T_i_c.size(); i++) + if (!it->second->states.empty()) { + render_camera((it->second->states.back() * calib.T_i_c[i]).matrix(), + 2.0f, cam_color, 0.1f); + } else if (!it->second->frames.empty()) { + render_camera((it->second->frames.back() * calib.T_i_c[i]).matrix(), + 2.0f, cam_color, 0.1f); + } + + for (const auto& p : it->second->states) + for (size_t i = 0; i < calib.T_i_c.size(); i++) + render_camera((p * calib.T_i_c[i]).matrix(), 2.0f, state_color, 0.1f); + + for (const auto& p : it->second->frames) + for (size_t i = 0; i < calib.T_i_c.size(); i++) + render_camera((p * calib.T_i_c[i]).matrix(), 2.0f, pose_color, 0.1f); + + glColor3ubv(pose_color); + pangolin::glDrawPoints(it->second->points); + } + + pangolin::glDrawAxis(Sophus::SE3d().matrix(), 1.0); +} + +void draw_scene_no_camera(pangolin::View& view) { + UNUSED(view); + view.Activate(ar_3d_camera); + auto T_w_i = vio_T_w_i[show_frame]; + auto T_i_c = calib.T_i_c[0]; + + glMatrixMode(GL_PROJECTION); + P.Load(); + + glMatrixMode(GL_MODELVIEW); +//// std::cout << "Extracted T_w_i matrix: " << T_w_i.matrix() << std::endl; + // If you want to translate OR to Load the constructed matrix (doesn't matter) for + // the camera, you need to pass it with the NEGATIVE sign (but probably only the translation part.). + + std::string desired_axes_sequence = axes_seq; + std::vector axesTwi = {0.0, 0.0, 0.0}; + std::vector axesTic = {0.0, 0.0, 0.0}; + for (int i=0;i<3;i++){ + if (desired_axes_sequence[i] == 'X'){ + axesTwi[i] = T_w_i.angleX(); + axesTic[i] = T_i_c.angleX(); + } + else if (desired_axes_sequence[i] == 'Y'){ + axesTwi[i] = T_w_i.angleY(); + axesTic[i] = T_i_c.angleY(); + } + else if (desired_axes_sequence[i] == 'Z'){ + axesTwi[i] = T_w_i.angleZ(); + axesTic[i] = T_i_c.angleZ(); + } + + if (axes_signs[i] == '+'){ + axesTwi[i] = axesTwi[i]; + axesTic[i] = axesTic[i]; + } + else if (axes_signs[i] == '-'){ + axesTwi[i] = -axesTwi[i]; + axesTic[i] = -axesTic[i]; + } + } + + if (zeroOut){ + xSkew = 0; + ySkew = 0; + zSkew = 0; + } + if (drawCubeBool){ + DrawCube(cubeSize, xSkew, ySkew, zSkew); + } + else if(drawLineCubeBool){ + drawLinesCube(xSkew, ySkew, zSkew); + } + +// std::cout << "Twi with rot before: " << T_w_i.matrix() << std::endl; + Eigen::Matrix Rot; + euler2Rot(Rot, axesTwi[0], axesTwi[1], axesTwi[2]); + T_w_i.setRotationMatrix(Rot); + euler2Rot(Rot, axesTic[0], axesTic[1], axesTic[2]); + T_i_c.setRotationMatrix(Rot); +// std::cout << "Twi with rot after: " << T_w_i.matrix() << std::endl; +// std::cout << "Translation before: " << T_w_i.translation() << std::endl; +// std::cout << std::endl << "Translation after: " << T_w_i.translation() << std::endl; + +// LoadCameraPose((T_w_i * T_i_c).matrix()); + ar_3d_camera.Follow((T_w_i * T_i_c).matrix()); + + glClearColor(1.0f, 1.0f, 1.0f, 1.0f); + + glPointSize(3); + glColor3f(1.0, 0.0, 0.0); + glEnable(GL_BLEND); + glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA); + + glColor3ubv(cam_color); + + if (!vio_t_w_i.empty()) { + size_t end = std::min(vio_t_w_i.size(), size_t(show_frame + 1)); + Eigen::aligned_vector sub_gt(vio_t_w_i.begin(), + vio_t_w_i.begin() + end); + pangolin::glDrawLineStrip(sub_gt); + } + + glColor3ubv(gt_color); + if (show_gt) pangolin::glDrawLineStrip(gt_t_w_i); + + size_t frame_id = show_frame; + int64_t t_ns = vio_dataset->get_image_timestamps()[frame_id]; + auto it = vis_map.find(t_ns); + + if (it != vis_map.end()) { + // T_i_c - transformation from camera coordinate point to the imu coordinate point. + for (size_t i = 0; i < calib.T_i_c.size(); i++) + if (!it->second->states.empty()) { + render_camera((it->second->states.back() * calib.T_i_c[i]).matrix(), + 2.0f, cam_color, 0.1f); + } else if (!it->second->frames.empty()) { + render_camera((it->second->frames.back() * calib.T_i_c[i]).matrix(), + 2.0f, cam_color, 0.1f); + } + + for (const auto& p : it->second->states) + for (size_t i = 0; i < calib.T_i_c.size(); i++) + render_camera((p * calib.T_i_c[i]).matrix(), 2.0f, state_color, 0.1f); + + for (const auto& p : it->second->frames) + for (size_t i = 0; i < calib.T_i_c.size(); i++) + render_camera((p * calib.T_i_c[i]).matrix(), 2.0f, pose_color, 0.1f); + + glColor3ubv(pose_color); + pangolin::glDrawPoints(it->second->points); + } + + pangolin::glDrawAxis(Sophus::SE3d().matrix(), 1.0); +} + +void load_data(const std::string& calib_path) { + std::ifstream os(calib_path, std::ios::binary); + + if (os.is_open()) { + cereal::JSONInputArchive archive(os); + archive(calib); + std::cout << "Loaded camera with " << calib.intrinsics.size() << " cameras" + << std::endl; + + } else { + std::cerr << "could not load camera calibration " << calib_path + << std::endl; + std::abort(); + } +} + +bool next_step() { + if (show_frame < int(vio_dataset->get_image_timestamps().size()) - 1) { + show_frame = show_frame + 1; + show_frame.Meta().gui_changed = true; + conditionVariable.notify_one(); + return true; + } else { + return false; + } +} + +bool prev_step() { + if (show_frame > 1) { + show_frame = show_frame - 1; + show_frame.Meta().gui_changed = true; + return true; + } else { + return false; + } +} + +void draw_plots() { + plotter->ClearSeries(); + plotter->ClearMarkers(); + + if (show_est_pos) { + plotter->AddSeries("$0", "$4", pangolin::DrawingModeLine, + pangolin::Colour::Red(), "position x", &vio_data_log); + plotter->AddSeries("$0", "$5", pangolin::DrawingModeLine, + pangolin::Colour::Green(), "position y", &vio_data_log); + plotter->AddSeries("$0", "$6", pangolin::DrawingModeLine, + pangolin::Colour::Blue(), "position z", &vio_data_log); + } + + if (show_est_vel) { + plotter->AddSeries("$0", "$1", pangolin::DrawingModeLine, + pangolin::Colour::Red(), "velocity x", &vio_data_log); + plotter->AddSeries("$0", "$2", pangolin::DrawingModeLine, + pangolin::Colour::Green(), "velocity y", &vio_data_log); + plotter->AddSeries("$0", "$3", pangolin::DrawingModeLine, + pangolin::Colour::Blue(), "velocity z", &vio_data_log); + } + + if (show_est_bg) { + plotter->AddSeries("$0", "$7", pangolin::DrawingModeLine, + pangolin::Colour::Red(), "gyro bias x", &vio_data_log); + plotter->AddSeries("$0", "$8", pangolin::DrawingModeLine, + pangolin::Colour::Green(), "gyro bias y", &vio_data_log); + plotter->AddSeries("$0", "$9", pangolin::DrawingModeLine, + pangolin::Colour::Blue(), "gyro bias z", &vio_data_log); + } + + if (show_est_ba) { + plotter->AddSeries("$0", "$10", pangolin::DrawingModeLine, + pangolin::Colour::Red(), "accel bias x", &vio_data_log); + plotter->AddSeries("$0", "$11", pangolin::DrawingModeLine, + pangolin::Colour::Green(), "accel bias y", + &vio_data_log); + plotter->AddSeries("$0", "$12", pangolin::DrawingModeLine, + pangolin::Colour::Blue(), "accel bias z", &vio_data_log); + } + + double t = vio_dataset->get_image_timestamps()[show_frame] * 1e-9; + plotter->AddMarker(pangolin::Marker::Vertical, t, pangolin::Marker::Equal, + pangolin::Colour::White()); +} + +void alignButton() { basalt::alignSVD(vio_t_ns, vio_t_w_i, gt_t_ns, gt_t_w_i); } + +void saveTrajectoryButton() { + if (tum_rgbd_fmt) { + { + std::ofstream os("trajectory.txt"); + + os << "# timestamp tx ty tz qx qy qz qw" << std::endl; + + for (size_t i = 0; i < vio_t_ns.size(); i++) { + const Sophus::SE3d& pose = vio_T_w_i[i]; + os << std::scientific << std::setprecision(18) << vio_t_ns[i] * 1e-9 + << " " << pose.translation().x() << " " << pose.translation().y() + << " " << pose.translation().z() << " " << pose.unit_quaternion().x() + << " " << pose.unit_quaternion().y() << " " + << pose.unit_quaternion().z() << " " << pose.unit_quaternion().w() + << std::endl; + } + + os.close(); + } + + if (save_groundtruth) { + std::ofstream os("groundtruth.txt"); + + os << "# timestamp tx ty tz qx qy qz qw" << std::endl; + + for (size_t i = 0; i < gt_t_ns.size(); i++) { + const Eigen::Vector3d& pos = gt_t_w_i[i]; + os << std::scientific << std::setprecision(18) << gt_t_ns[i] * 1e-9 + << " " << pos.x() << " " << pos.y() << " " << pos.z() << " " + << "0 0 0 1" << std::endl; + } + + os.close(); + } + + std::cout + << "Saved trajectory in TUM RGB-D Dataset format in trajectory.txt" + << std::endl; + } else if (euroc_fmt) { + std::ofstream os("trajectory.csv"); + + os << "#timestamp [ns],p_RS_R_x [m],p_RS_R_y [m],p_RS_R_z [m],q_RS_w " + "[],q_RS_x [],q_RS_y [],q_RS_z []" + << std::endl; + + for (size_t i = 0; i < vio_t_ns.size(); i++) { + const Sophus::SE3d& pose = vio_T_w_i[i]; + os << std::scientific << std::setprecision(18) << vio_t_ns[i] << "," + << pose.translation().x() << "," << pose.translation().y() << "," + << pose.translation().z() << "," << pose.unit_quaternion().w() << "," + << pose.unit_quaternion().x() << "," << pose.unit_quaternion().y() + << "," << pose.unit_quaternion().z() << std::endl; + } + + std::cout << "Saved trajectory in Euroc Dataset format in trajectory.csv" + << std::endl; + } else { + std::ofstream os("trajectory_kitti.txt"); + + for (size_t i = 0; i < vio_t_ns.size(); i++) { + Eigen::Matrix mat = vio_T_w_i[i].matrix3x4(); + os << std::scientific << std::setprecision(12) << mat.row(0) << " " + << mat.row(1) << " " << mat.row(2) << " " << std::endl; + } + + os.close(); + + std::cout + << "Saved trajectory in KITTI Dataset format in trajectory_kitti.txt" + << std::endl; + } +} + diff --git a/src/vio_sim.cpp b/src/vio_sim.cpp new file mode 100644 index 0000000..f5dd80a --- /dev/null +++ b/src/vio_sim.cpp @@ -0,0 +1,911 @@ +/** +BSD 3-Clause License + +This file is part of the Basalt project. +https://gitlab.com/VladyslavUsenko/basalt.git + +Copyright (c) 2019, Vladyslav Usenko and Nikolaus Demmel. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#include +#include +#include +#include + +#include + +#include + +#include +#include +#include +#include +#include +#include + +#include + +#include +#include +#include +#include +#include +#include + +#include + +#include + +#include + +// GUI functions +void draw_image_overlay(pangolin::View& v, size_t cam_id); +void draw_scene(); +void load_data(const std::string& calib_path); +void gen_data(); +void compute_projections(); +void setup_vio(const std::string& config_path); +void draw_plots(); +bool next_step(); +void alignButton(); + +// Parameters for simulations +int NUM_POINTS = 1000; +double POINT_DIST = 10.0; +constexpr int NUM_FRAMES = 1000; +constexpr int CAM_FREQ = 20; +constexpr int IMU_FREQ = 200; + +static const int knot_time = 3; +static const double obs_std_dev = 0.5; + +Eigen::Vector3d g(0, 0, -9.81); + +// std::random_device rd{}; +// std::mt19937 gen{rd()}; +std::mt19937 gen{1}; +std::normal_distribution<> obs_noise_dist{0, obs_std_dev}; + +// Simulated data + +basalt::Se3Spline<5> gt_spline(int64_t(knot_time * 1e9)); + +Eigen::aligned_vector gt_points; +Eigen::aligned_vector gt_frame_T_w_i; +Eigen::aligned_vector gt_frame_t_w_i, vio_t_w_i; +std::vector gt_frame_t_ns; +Eigen::aligned_vector gt_accel, gt_gyro, gt_accel_bias, + gt_gyro_bias, noisy_accel, noisy_gyro, gt_vel; +std::vector gt_imu_t_ns; + +std::map gt_observations; +std::map noisy_observations; + +std::string marg_data_path; + +// VIO vars +basalt::Calibration calib; +basalt::VioEstimatorBase::Ptr vio; + +// Visualization vars +std::unordered_map vis_map; +tbb::concurrent_bounded_queue out_vis_queue; +tbb::concurrent_bounded_queue::Ptr> + out_state_queue; + +std::vector images; + +// Pangolin vars +constexpr int UI_WIDTH = 200; +pangolin::DataLog imu_data_log, vio_data_log, error_data_log; +pangolin::Plotter* plotter; + +pangolin::Var show_frame("ui.show_frame", 0, 0, NUM_FRAMES - 1); + +pangolin::Var show_obs("ui.show_obs", true, false, true); +pangolin::Var show_obs_noisy("ui.show_obs_noisy", true, false, true); +pangolin::Var show_obs_vio("ui.show_obs_vio", true, false, true); + +pangolin::Var show_ids("ui.show_ids", false, false, true); + +pangolin::Var show_accel("ui.show_accel", false, false, true); +pangolin::Var show_gyro("ui.show_gyro", false, false, true); +pangolin::Var show_gt_vel("ui.show_gt_vel", false, false, true); +pangolin::Var show_gt_pos("ui.show_gt_pos", true, false, true); +pangolin::Var show_gt_bg("ui.show_gt_bg", false, false, true); +pangolin::Var show_gt_ba("ui.show_gt_ba", false, false, true); + +pangolin::Var show_est_vel("ui.show_est_vel", false, false, true); +pangolin::Var show_est_pos("ui.show_est_pos", true, false, true); +pangolin::Var show_est_bg("ui.show_est_bg", false, false, true); +pangolin::Var show_est_ba("ui.show_est_ba", false, false, true); + +using Button = pangolin::Var>; + +Button next_step_btn("ui.next_step", &next_step); + +pangolin::Var continue_btn("ui.continue", true, false, true); + +Button align_step_btn("ui.align_se3", &alignButton); + +bool use_imu = true; +bool use_double = true; + +int main(int argc, char** argv) { + srand(1); + + bool show_gui = true; + std::string cam_calib_path; + std::string result_path; + std::string config_path; + + CLI::App app{"App description"}; + + app.add_option("--show-gui", show_gui, "Show GUI"); + app.add_option("--cam-calib", cam_calib_path, + "Ground-truth camera calibration used for simulation.") + ->required(); + + app.add_option("--marg-data", marg_data_path, + "Folder to store marginalization data.") + ->required(); + + app.add_option("--result-path", result_path, + "Path to result file where the system will write RMSE ATE."); + + app.add_option("--config-path", config_path, "Path to config file."); + + app.add_option("--num-points", NUM_POINTS, "Number of points in simulation."); + app.add_option("--use-imu", use_imu, "Use IMU."); + + try { + app.parse(argc, argv); + } catch (const CLI::ParseError& e) { + return app.exit(e); + } + + basalt::MargDataSaver::Ptr mds; + if (!marg_data_path.empty()) { + mds.reset(new basalt::MargDataSaver(marg_data_path)); + } + + load_data(cam_calib_path); + + gen_data(); + + setup_vio(config_path); + + vio->out_vis_queue = &out_vis_queue; + vio->out_state_queue = &out_state_queue; + + if (mds.get()) { + vio->out_marg_queue = &mds->in_marg_queue; + } + + std::thread t0([&]() { + for (size_t i = 0; i < gt_imu_t_ns.size(); i++) { + basalt::ImuData::Ptr data(new basalt::ImuData); + data->t_ns = gt_imu_t_ns[i]; + + data->accel = noisy_accel[i]; + data->gyro = noisy_gyro[i]; + + vio->imu_data_queue.push(data); + } + + vio->imu_data_queue.push(nullptr); + + std::cout << "Finished t0" << std::endl; + }); + + std::thread t1([&]() { + for (size_t i = 0; i < gt_frame_t_ns.size(); i++) { + basalt::OpticalFlowResult::Ptr data(new basalt::OpticalFlowResult); + data->t_ns = gt_frame_t_ns[i]; + + for (size_t j = 0; j < calib.T_i_c.size(); j++) { + data->observations.emplace_back(); + basalt::TimeCamId tcid(data->t_ns, j); + const basalt::SimObservations& obs = noisy_observations.at(tcid); + for (size_t k = 0; k < obs.pos.size(); k++) { + Eigen::AffineCompact2f t; + t.setIdentity(); + t.translation() = obs.pos[k].cast(); + data->observations.back()[obs.id[k]] = t; + } + } + + vio->vision_data_queue.push(data); + } + + vio->vision_data_queue.push(nullptr); + + std::cout << "Finished t1" << std::endl; + }); + + std::thread t2([&]() { + basalt::VioVisualizationData::Ptr data; + + while (true) { + out_vis_queue.pop(data); + + if (data.get()) { + vis_map[data->t_ns] = data; + } else { + break; + } + } + + std::cout << "Finished t2" << std::endl; + }); + + std::thread t3([&]() { + basalt::PoseVelBiasState::Ptr data; + + while (true) { + out_state_queue.pop(data); + + if (!data.get()) break; + + int64_t t_ns = data->t_ns; + + // std::cerr << "t_ns " << t_ns << std::endl; + Sophus::SE3d T_w_i = data->T_w_i; + Eigen::Vector3d vel_w_i = data->vel_w_i; + Eigen::Vector3d bg = data->bias_gyro; + Eigen::Vector3d ba = data->bias_accel; + + vio_t_w_i.emplace_back(T_w_i.translation()); + + { + std::vector vals; + vals.push_back(t_ns * 1e-9); + + for (int i = 0; i < 3; i++) vals.push_back(vel_w_i[i]); + for (int i = 0; i < 3; i++) vals.push_back(T_w_i.translation()[i]); + for (int i = 0; i < 3; i++) vals.push_back(bg[i]); + for (int i = 0; i < 3; i++) vals.push_back(ba[i]); + + vio_data_log.Log(vals); + } + } + + std::cout << "Finished t3" << std::endl; + }); + + // std::thread t4([&]() { + + // basalt::MargData::Ptr data; + + // while (true) { + // out_marg_queue.pop(data); + + // if (data.get()) { + // int64_t kf_id = *data->kfs_to_marg.begin(); + + // std::string path = cache_path + "/" + std::to_string(kf_id) + + // ".cereal"; + // std::ofstream os(path, std::ios::binary); + + // { + // cereal::BinaryOutputArchive archive(os); + // archive(*data); + // } + // os.close(); + + // } else { + // break; + // } + // } + + // std::cout << "Finished t4" << std::endl; + + // }); + + if (show_gui) { + pangolin::CreateWindowAndBind("Main", 1800, 1000); + + glEnable(GL_DEPTH_TEST); + + pangolin::View& img_view_display = + pangolin::CreateDisplay() + .SetBounds(0.4, 1.0, pangolin::Attach::Pix(UI_WIDTH), 0.5) + .SetLayout(pangolin::LayoutEqual); + + pangolin::View& plot_display = pangolin::CreateDisplay().SetBounds( + 0.0, 0.4, pangolin::Attach::Pix(UI_WIDTH), 1.0); + + plotter = new pangolin::Plotter(&imu_data_log, 0.0, + gt_frame_t_ns[NUM_FRAMES - 1] * 1e-9, -10.0, + 10.0, 0.01f, 0.01f); + plot_display.AddDisplay(*plotter); + + pangolin::CreatePanel("ui").SetBounds(0.0, 1.0, 0.0, + pangolin::Attach::Pix(UI_WIDTH)); + + std::vector> img_view; + while (img_view.size() < calib.intrinsics.size()) { + std::shared_ptr iv(new pangolin::ImageView); + + size_t idx = img_view.size(); + img_view.push_back(iv); + + img_view_display.AddDisplay(*iv); + iv->extern_draw_function = + std::bind(&draw_image_overlay, std::placeholders::_1, idx); + } + + pangolin::OpenGlRenderState camera( + pangolin::ProjectionMatrix(640, 480, 400, 400, 320, 240, 0.001, 10000), + pangolin::ModelViewLookAt(15, 3, 15, 0, 0, 0, pangolin::AxisZ)); + + pangolin::View& display3D = + pangolin::CreateDisplay() + .SetAspect(-640 / 480.0) + .SetBounds(0.4, 1.0, 0.5, 1.0) + .SetHandler(new pangolin::Handler3D(camera)); + + while (!pangolin::ShouldQuit()) { + glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); + + display3D.Activate(camera); + glClearColor(0.95f, 0.95f, 0.95f, 1.0f); + + draw_scene(); + + img_view_display.Activate(); + + if (show_frame.GuiChanged()) { + for (size_t i = 0; i < calib.intrinsics.size(); i++) { + img_view[i]->SetImage(images[i]); + } + draw_plots(); + } + + if (show_accel.GuiChanged() || show_gyro.GuiChanged() || + show_gt_vel.GuiChanged() || show_gt_pos.GuiChanged() || + show_gt_ba.GuiChanged() || show_gt_bg.GuiChanged() || + show_est_vel.GuiChanged() || show_est_pos.GuiChanged() || + show_est_ba.GuiChanged() || show_est_bg.GuiChanged()) { + draw_plots(); + } + + pangolin::FinishFrame(); + + if (continue_btn) { + if (!next_step()) continue_btn = false; + } else { + std::this_thread::sleep_for(std::chrono::milliseconds(50)); + } + } + } + + t0.join(); + t1.join(); + t2.join(); + t3.join(); + // t4.join(); + + if (!result_path.empty()) { + double error = basalt::alignSVD(gt_frame_t_ns, vio_t_w_i, gt_frame_t_ns, + gt_frame_t_w_i); + + std::ofstream os(result_path); + os << error << std::endl; + os.close(); + } + + return 0; +} + +void draw_image_overlay(pangolin::View& v, size_t cam_id) { + UNUSED(v); + + size_t frame_id = show_frame; + basalt::TimeCamId tcid(gt_frame_t_ns[frame_id], cam_id); + + if (show_obs) { + glLineWidth(1.0); + glColor3ubv(gt_color); + glEnable(GL_BLEND); + glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA); + + if (gt_observations.find(tcid) != gt_observations.end()) { + const basalt::SimObservations& cr = gt_observations.at(tcid); + + for (size_t i = 0; i < cr.pos.size(); i++) { + const float radius = 2; + const Eigen::Vector2f c = cr.pos[i].cast(); + pangolin::glDrawCirclePerimeter(c[0], c[1], radius); + + if (show_ids) + pangolin::GlFont::I().Text("%d", cr.id[i]).Draw(c[0], c[1]); + } + + pangolin::GlFont::I().Text("%d gt points", cr.pos.size()).Draw(5, 20); + } + } + + if (show_obs_noisy) { + glLineWidth(1.0); + glColor3f(1.0, 1.0, 0.0); + glEnable(GL_BLEND); + glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA); + + if (noisy_observations.find(tcid) != noisy_observations.end()) { + const basalt::SimObservations& cr = noisy_observations.at(tcid); + + for (size_t i = 0; i < cr.pos.size(); i++) { + const float radius = 2; + const Eigen::Vector2f c = cr.pos[i].cast(); + pangolin::glDrawCirclePerimeter(c[0], c[1], radius); + + if (show_ids) + pangolin::GlFont::I().Text("%d", cr.id[i]).Draw(c[0], c[1]); + } + + pangolin::GlFont::I().Text("%d noisy points", cr.pos.size()).Draw(5, 40); + } + } + + if (show_obs_vio) { + glLineWidth(1.0); + glColor3f(0.0, 0.0, 1.0); + glEnable(GL_BLEND); + glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA); + + auto it = vis_map.find(gt_frame_t_ns[frame_id]); + + if (it != vis_map.end() && cam_id < it->second->projections.size()) { + const auto& points = it->second->projections[cam_id]; + + if (points.size() > 0) { + double min_id = points[0][2], max_id = points[0][2]; + for (size_t i = 0; i < points.size(); i++) { + min_id = std::min(min_id, points[i][2]); + max_id = std::max(max_id, points[i][2]); + } + + for (size_t i = 0; i < points.size(); i++) { + const float radius = 2; + const Eigen::Vector4d c = points[i]; + pangolin::glDrawCirclePerimeter(c[0], c[1], radius); + + if (show_ids) + pangolin::GlFont::I().Text("%d", int(c[3])).Draw(c[0], c[1]); + } + } + + glColor3f(0.0, 0.0, 1.0); + pangolin::GlFont::I().Text("%d vio points", points.size()).Draw(5, 60); + } + } +} + +void draw_scene() { + glPointSize(3); + glColor3f(1.0, 0.0, 0.0); + glEnable(GL_BLEND); + glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA); + + glColor3ubv(gt_color); + pangolin::glDrawPoints(gt_points); + pangolin::glDrawLineStrip(gt_frame_t_w_i); + + glColor3ubv(cam_color); + pangolin::glDrawLineStrip(vio_t_w_i); + + size_t frame_id = show_frame; + + auto it = vis_map.find(gt_frame_t_ns[frame_id]); + + if (it != vis_map.end()) { + for (const auto& p : it->second->states) + for (size_t i = 0; i < calib.T_i_c.size(); i++) + render_camera((p * calib.T_i_c[i]).matrix(), 2.0f, cam_color, 0.1f); + + for (const auto& p : it->second->frames) + for (size_t i = 0; i < calib.T_i_c.size(); i++) + render_camera((p * calib.T_i_c[i]).matrix(), 2.0f, pose_color, 0.1f); + + glColor3ubv(pose_color); + pangolin::glDrawPoints(it->second->points); + } + + pangolin::glDrawAxis(gt_frame_T_w_i[frame_id].matrix(), 0.1); + + pangolin::glDrawAxis(Sophus::SE3d().matrix(), 1.0); +} + +void load_data(const std::string& calib_path) { + std::ifstream os(calib_path, std::ios::binary); + + if (os.is_open()) { + cereal::JSONInputArchive archive(os); + archive(calib); + std::cout << "Loaded camera with " << calib.intrinsics.size() << " cameras" + << std::endl; + + } else { + std::cerr << "could not load camera calibration " << calib_path + << std::endl; + std::abort(); + } +} + +void compute_projections() { + for (size_t i = 0; i < gt_frame_T_w_i.size(); i++) { + for (size_t j = 0; j < calib.T_i_c.size(); j++) { + basalt::TimeCamId tcid(gt_frame_t_ns[i], j); + basalt::SimObservations obs, obs_noisy; + + for (size_t k = 0; k < gt_points.size(); k++) { + Eigen::Vector4d p_cam; + p_cam.head<3>() = + (gt_frame_T_w_i[i] * calib.T_i_c[j]).inverse() * gt_points[k]; + + std::visit( + [&](const auto& cam) { + if (p_cam[2] > 0.1) { + Eigen::Vector2d p_2d; + cam.project(p_cam, p_2d); + + const int border = 5; + if (p_2d[0] >= border && p_2d[1] >= border && + p_2d[0] < calib.resolution[j][0] - border - 1 && + p_2d[1] < calib.resolution[j][1] - border - 1) { + obs.pos.emplace_back(p_2d); + obs.id.emplace_back(k); + + p_2d[0] += obs_noise_dist(gen); + p_2d[1] += obs_noise_dist(gen); + + obs_noisy.pos.emplace_back(p_2d); + obs_noisy.id.emplace_back(k); + } + } + }, + calib.intrinsics[j].variant); + } + + gt_observations[tcid] = obs; + noisy_observations[tcid] = obs_noisy; + } + } +} + +void gen_data() { + std::normal_distribution<> gyro_noise_dist{ + 0, calib.dicrete_time_gyro_noise_std()[0]}; + std::normal_distribution<> accel_noise_dist{ + 0, calib.dicrete_time_accel_noise_std()[0]}; + + std::normal_distribution<> gyro_bias_dist{0, calib.gyro_bias_std[0]}; + std::normal_distribution<> accel_bias_dist{0, calib.accel_bias_std[0]}; + + for (size_t i = 0; i < calib.intrinsics.size(); i++) { + images.emplace_back(); + images.back() = + pangolin::TypedImage(calib.resolution[i][0], calib.resolution[i][1], + pangolin::PixelFormatFromString("GRAY8")); + + images.back().Fill(200); + } + show_frame.Meta().gui_changed = true; + + double seconds = NUM_FRAMES / CAM_FREQ; + int num_knots = seconds / knot_time + 5; + // for (int i = 0; i < 2; i++) gt_spline.knots_push_back(Sophus::SE3d()); + gt_spline.genRandomTrajectory(num_knots); + + int64_t t_ns = 0; + int64_t dt_ns = int64_t(1e9) / CAM_FREQ; + + for (int i = 0; i < NUM_FRAMES; i++) { + gt_frame_T_w_i.emplace_back(gt_spline.pose(t_ns)); + gt_frame_t_w_i.emplace_back(gt_frame_T_w_i.back().translation()); + gt_frame_t_ns.emplace_back(t_ns); + + t_ns += dt_ns; + } + + dt_ns = int64_t(1e9) / IMU_FREQ; + + int64_t offset = + dt_ns / 2; // Offset to make IMU in the center of the interval + t_ns = offset; + + imu_data_log.Clear(); + + gt_accel_bias.clear(); + gt_gyro_bias.clear(); + + gt_accel_bias.emplace_back(Eigen::Vector3d::Random() / 10); + gt_gyro_bias.emplace_back(Eigen::Vector3d::Random() / 100); + + // gt_accel_bias.emplace_back(Eigen::Vector3d::Zero()); + // gt_gyro_bias.emplace_back(Eigen::Vector3d::Zero()); + + while (t_ns < gt_frame_t_ns.back()) { + Sophus::SE3d pose = gt_spline.pose(t_ns); + Eigen::Vector3d accel_body = + pose.so3().inverse() * (gt_spline.transAccelWorld(t_ns) - g); + Eigen::Vector3d rot_vel_body = gt_spline.rotVelBody(t_ns); + + gt_accel.emplace_back(accel_body); + gt_gyro.emplace_back(rot_vel_body); + gt_vel.emplace_back(gt_spline.transVelWorld(t_ns)); + + accel_body[0] += accel_noise_dist(gen); + accel_body[1] += accel_noise_dist(gen); + accel_body[2] += accel_noise_dist(gen); + + accel_body += gt_accel_bias.back(); + + rot_vel_body[0] += gyro_noise_dist(gen); + rot_vel_body[1] += gyro_noise_dist(gen); + rot_vel_body[2] += gyro_noise_dist(gen); + + rot_vel_body += gt_gyro_bias.back(); + + noisy_accel.emplace_back(accel_body); + noisy_gyro.emplace_back(rot_vel_body); + + gt_imu_t_ns.emplace_back(t_ns + offset); + + std::vector vals; + vals.push_back(t_ns * 1e-9); + + for (int i = 0; i < 3; i++) vals.push_back(gt_accel.back()[i]); + for (int i = 0; i < 3; i++) vals.push_back(gt_gyro.back()[i]); + for (int i = 0; i < 3; i++) vals.push_back(gt_vel.back()[i]); + for (int i = 0; i < 3; i++) vals.push_back(pose.translation()[i]); + for (int i = 0; i < 3; i++) vals.push_back(gt_gyro_bias.back()[i]); + for (int i = 0; i < 3; i++) vals.push_back(gt_accel_bias.back()[i]); + + imu_data_log.Log(vals); + + double dt_sqrt = std::sqrt(dt_ns * 1e-9); + Eigen::Vector3d gt_accel_bias_next = gt_accel_bias.back(); + gt_accel_bias_next[0] += accel_bias_dist(gen) * dt_sqrt; + gt_accel_bias_next[1] += accel_bias_dist(gen) * dt_sqrt; + gt_accel_bias_next[2] += accel_bias_dist(gen) * dt_sqrt; + gt_accel_bias.emplace_back(gt_accel_bias_next); + + Eigen::Vector3d gt_gyro_bias_next = gt_gyro_bias.back(); + gt_gyro_bias_next[0] += gyro_bias_dist(gen) * dt_sqrt; + gt_gyro_bias_next[1] += gyro_bias_dist(gen) * dt_sqrt; + gt_gyro_bias_next[2] += gyro_bias_dist(gen) * dt_sqrt; + gt_gyro_bias.emplace_back(gt_gyro_bias_next); + + t_ns += dt_ns; + } + + show_accel.Meta().gui_changed = true; + + for (int i = 0; i < NUM_POINTS; i++) { + Eigen::Vector3d point; + + point = Eigen::Vector3d::Random().normalized() * POINT_DIST; + + gt_points.push_back(point); + } + + compute_projections(); + + // Save spline data + { + std::string path = marg_data_path + "/gt_spline.cereal"; + + std::cout << "Saving gt_spline " << path << std::endl; + + std::ofstream os(path, std::ios::binary); + { + cereal::JSONOutputArchive archive(os); + + int64_t t_ns = gt_spline.getDtNs(); + + Eigen::aligned_vector knots; + for (size_t i = 0; i < gt_spline.numKnots(); i++) { + knots.push_back(gt_spline.getKnot(i)); + } + + archive(cereal::make_nvp("t_ns", t_ns)); + archive(cereal::make_nvp("knots", knots)); + + archive(cereal::make_nvp("noisy_accel", noisy_accel)); + archive(cereal::make_nvp("noisy_gyro", noisy_gyro)); + archive(cereal::make_nvp("noisy_accel", gt_accel)); + archive(cereal::make_nvp("gt_gyro", gt_gyro)); + archive(cereal::make_nvp("gt_points", gt_points)); + archive(cereal::make_nvp("gt_accel_bias", gt_accel_bias)); + archive(cereal::make_nvp("gt_gyro_bias", gt_gyro_bias)); + + archive(cereal::make_nvp("gt_observations", gt_observations)); + archive(cereal::make_nvp("noisy_observations", noisy_observations)); + + archive(cereal::make_nvp("gt_points", gt_points)); + + archive(cereal::make_nvp("gt_frame_t_ns", gt_frame_t_ns)); + archive(cereal::make_nvp("gt_imu_t_ns", gt_imu_t_ns)); + } + + os.close(); + } +} + +void draw_plots() { + plotter->ClearSeries(); + plotter->ClearMarkers(); + + if (show_accel) { + plotter->AddSeries("$0", "$1", pangolin::DrawingModeDashed, + pangolin::Colour::Red(), "accel measurements x"); + plotter->AddSeries("$0", "$2", pangolin::DrawingModeDashed, + pangolin::Colour::Green(), "accel measurements y"); + plotter->AddSeries("$0", "$3", pangolin::DrawingModeDashed, + pangolin::Colour::Blue(), "accel measurements z"); + } + + if (show_gyro) { + plotter->AddSeries("$0", "$4", pangolin::DrawingModeDashed, + pangolin::Colour::Red(), "gyro measurements x"); + plotter->AddSeries("$0", "$5", pangolin::DrawingModeDashed, + pangolin::Colour::Green(), "gyro measurements y"); + plotter->AddSeries("$0", "$6", pangolin::DrawingModeDashed, + pangolin::Colour::Blue(), "gyro measurements z"); + } + + if (show_gt_vel) { + plotter->AddSeries("$0", "$7", pangolin::DrawingModeDashed, + pangolin::Colour::Red(), "ground-truth velocity x"); + plotter->AddSeries("$0", "$8", pangolin::DrawingModeDashed, + pangolin::Colour::Green(), "ground-truth velocity y"); + plotter->AddSeries("$0", "$9", pangolin::DrawingModeDashed, + pangolin::Colour::Blue(), "ground-truth velocity z"); + } + + if (show_gt_pos) { + plotter->AddSeries("$0", "$10", pangolin::DrawingModeDashed, + pangolin::Colour::Red(), "ground-truth position x"); + plotter->AddSeries("$0", "$11", pangolin::DrawingModeDashed, + pangolin::Colour::Green(), "ground-truth position y"); + plotter->AddSeries("$0", "$12", pangolin::DrawingModeDashed, + pangolin::Colour::Blue(), "ground-truth position z"); + } + + if (show_gt_bg) { + plotter->AddSeries("$0", "$13", pangolin::DrawingModeDashed, + pangolin::Colour::Red(), "ground-truth gyro bias x"); + plotter->AddSeries("$0", "$14", pangolin::DrawingModeDashed, + pangolin::Colour::Green(), "ground-truth gyro bias y"); + plotter->AddSeries("$0", "$15", pangolin::DrawingModeDashed, + pangolin::Colour::Blue(), "ground-truth gyro bias z"); + } + + if (show_gt_ba) { + plotter->AddSeries("$0", "$16", pangolin::DrawingModeDashed, + pangolin::Colour::Red(), "ground-truth accel bias x"); + plotter->AddSeries("$0", "$17", pangolin::DrawingModeDashed, + pangolin::Colour::Green(), "ground-truth accel bias y"); + plotter->AddSeries("$0", "$18", pangolin::DrawingModeDashed, + pangolin::Colour::Blue(), "ground-truth accel bias z"); + } + + if (show_est_vel) { + plotter->AddSeries("$0", "$1", pangolin::DrawingModeLine, + pangolin::Colour::Red(), "estimated velocity x", + &vio_data_log); + plotter->AddSeries("$0", "$2", pangolin::DrawingModeLine, + pangolin::Colour::Green(), "estimated velocity y", + &vio_data_log); + plotter->AddSeries("$0", "$3", pangolin::DrawingModeLine, + pangolin::Colour::Blue(), "estimated velocity z", + &vio_data_log); + } + + if (show_est_pos) { + plotter->AddSeries("$0", "$4", pangolin::DrawingModeLine, + pangolin::Colour::Red(), "estimated position x", + &vio_data_log); + plotter->AddSeries("$0", "$5", pangolin::DrawingModeLine, + pangolin::Colour::Green(), "estimated position y", + &vio_data_log); + plotter->AddSeries("$0", "$6", pangolin::DrawingModeLine, + pangolin::Colour::Blue(), "estimated position z", + &vio_data_log); + } + + if (show_est_bg) { + plotter->AddSeries("$0", "$7", pangolin::DrawingModeLine, + pangolin::Colour::Red(), "estimated gyro bias x", + &vio_data_log); + plotter->AddSeries("$0", "$8", pangolin::DrawingModeLine, + pangolin::Colour::Green(), "estimated gyro bias y", + &vio_data_log); + plotter->AddSeries("$0", "$9", pangolin::DrawingModeLine, + pangolin::Colour::Blue(), "estimated gyro bias z", + &vio_data_log); + } + + if (show_est_ba) { + plotter->AddSeries("$0", "$10", pangolin::DrawingModeLine, + pangolin::Colour::Red(), "estimated accel bias x", + &vio_data_log); + plotter->AddSeries("$0", "$11", pangolin::DrawingModeLine, + pangolin::Colour::Green(), "estimated accel bias y", + &vio_data_log); + plotter->AddSeries("$0", "$12", pangolin::DrawingModeLine, + pangolin::Colour::Blue(), "estimated accel bias z", + &vio_data_log); + } + + double t = gt_frame_t_ns[show_frame] * 1e-9; + plotter->AddMarker(pangolin::Marker::Vertical, t, pangolin::Marker::Equal, + pangolin::Colour::White()); +} + +void setup_vio(const std::string& config_path) { + int64_t t_init_ns = gt_frame_t_ns[0]; + Sophus::SE3d T_w_i_init = gt_frame_T_w_i[0]; + Eigen::Vector3d vel_w_i_init = gt_spline.transVelWorld(t_init_ns); + + std::cout << "Setting up filter: t_ns " << t_init_ns << std::endl; + std::cout << "T_w_i\n" << T_w_i_init.matrix() << std::endl; + std::cout << "vel_w_i " << vel_w_i_init.transpose() << std::endl; + + basalt::VioConfig config; + if (!config_path.empty()) { + config.load(config_path); + } + + vio = basalt::VioEstimatorFactory::getVioEstimator( + config, calib, basalt::constants::g, use_imu, use_double); + vio->initialize(t_init_ns, T_w_i_init, vel_w_i_init, gt_gyro_bias.front(), + gt_accel_bias.front()); + + // int iteration = 0; + vio_data_log.Clear(); + error_data_log.Clear(); + vio_t_w_i.clear(); +} + +bool next_step() { + if (show_frame < NUM_FRAMES - 1) { + show_frame = show_frame + 1; + show_frame.Meta().gui_changed = true; + return true; + } else { + return false; + } +} + +void alignButton() { + basalt::alignSVD(gt_frame_t_ns, vio_t_w_i, gt_frame_t_ns, gt_frame_t_w_i); +} diff --git a/test/CMakeLists.txt b/test/CMakeLists.txt new file mode 100644 index 0000000..7583fd7 --- /dev/null +++ b/test/CMakeLists.txt @@ -0,0 +1,39 @@ +cmake_minimum_required(VERSION 3.10...3.18) + +# Note: add_subdirectory(googletest ...) is called in basalt-headers + +include_directories(../thirdparty/basalt-headers/test/include) + + +add_executable(test_spline_opt src/test_spline_opt.cpp) +target_link_libraries(test_spline_opt gtest gtest_main basalt) + +add_executable(test_vio src/test_vio.cpp) +target_link_libraries(test_vio gtest gtest_main basalt) + +add_executable(test_nfr src/test_nfr.cpp) +target_link_libraries(test_nfr gtest gtest_main basalt) + +add_executable(test_qr src/test_qr.cpp) +target_link_libraries(test_qr gtest gtest_main basalt) + +add_executable(test_linearization src/test_linearization.cpp) +target_link_libraries(test_linearization gtest gtest_main basalt) + +add_executable(test_patch src/test_patch.cpp) +target_link_libraries(test_patch gtest gtest_main basalt) + +enable_testing() + +include(GoogleTest) + +#gtest_discover_tests(test_spline_opt DISCOVERY_TIMEOUT 60) +#gtest_discover_tests(test_vio DISCOVERY_TIMEOUT 60) +#gtest_discover_tests(test_nfr DISCOVERY_TIMEOUT 60) + +gtest_add_tests(TARGET test_spline_opt AUTO) +gtest_add_tests(TARGET test_vio AUTO) +gtest_add_tests(TARGET test_nfr AUTO) +gtest_add_tests(TARGET test_qr AUTO) +gtest_add_tests(TARGET test_linearization AUTO) +gtest_add_tests(TARGET test_patch AUTO) diff --git a/test/ivan-tests/euler2RotTest.cpp b/test/ivan-tests/euler2RotTest.cpp new file mode 100644 index 0000000..a8fb170 --- /dev/null +++ b/test/ivan-tests/euler2RotTest.cpp @@ -0,0 +1,71 @@ +// +// Created by ivan on 28.03.2022. +// + +#include +#include +#include + +void euler2Rot(Eigen::Matrix& Rot, double const rx, double const ry, double const rz){ + Eigen::Matrix Rx = Eigen::Matrix::Zero(); + Eigen::Matrix Ry = Eigen::Matrix::Zero(); + Eigen::Matrix Rz = Eigen::Matrix::Zero(); + + Rx(0, 0) = 1; + Rx(1, 1) = cos(rx); + Rx(2, 1) = sin(rx); + Rx(1, 2) = -sin(rx); + Rx(2, 2) = cos(rx); + + std::cout << Rx << std::endl; + std::cout << " " << std::endl; + + Ry(0, 0) = cos(ry); + Ry(0, 2) = sin(ry); + Ry(1, 1) = 1; + Ry(2, 0) = -sin(ry); + Ry(2, 2) = cos(ry); + + std::cout << Ry << std::endl; + std::cout << " " << std::endl; + + Rz(0, 0) = cos(rz); + Rz(0, 1) = -sin(rz); + Rz(1, 0) = sin(rz); + Rz(1, 1) = cos(rz); + Rz(2, 2) = 1; + + std::cout << Rz << std::endl; + std::cout << " " << std::endl; + + auto Rxy = Rx * Ry; + Rot = Rxy * Rz; + +} + +int main(int argc, char** argv){ + double rx; + double ry; + double rz; + rx = 1.1; + ry = 1.2; + rz = 1.1; + Eigen::Matrix Rot; + + euler2Rot(Rot, rx, ry, rz); + std::cout << Rot << std::endl; + + std::cout << "" << std::endl; + std::cout << "The eigen matrix multiplication" << std::endl; + Eigen::Matrix M1; + Eigen::Matrix M2; + M1 << 1, 2, 3, + 4, 5, 6, + 7, 8, 9; + + M2 << 1, 2, 3, + 4, 5, 6, + 7, 8, 9; + + std::cout << M1 * M2 << std::endl; +} \ No newline at end of file diff --git a/test/src/test_linearization.cpp b/test/src/test_linearization.cpp new file mode 100644 index 0000000..7b18deb --- /dev/null +++ b/test/src/test_linearization.cpp @@ -0,0 +1,418 @@ + + +#include + +#include + +#include "gtest/gtest.h" +#include "test_utils.h" + +template +void get_vo_estimator(int num_frames, + basalt::BundleAdjustmentBase& estimator, + basalt::AbsOrderMap& aom) { + static constexpr int POSE_SIZE = 6; + + estimator.huber_thresh = 0.5; + estimator.obs_std_dev = 2.0; + + estimator.calib.T_i_c.emplace_back( + Sophus::se3_expd(Sophus::Vector6d::Random() / 100)); + estimator.calib.T_i_c.emplace_back( + Sophus::se3_expd(Sophus::Vector6d::Random() / 100)); + + basalt::GenericCamera cam; + cam.variant = basalt::KannalaBrandtCamera4::getTestProjections()[0]; + + estimator.calib.intrinsics.emplace_back(cam); + estimator.calib.intrinsics.emplace_back(cam); + + Eigen::MatrixXd points_3d; + points_3d.setRandom(3, num_frames * 10); + points_3d.row(2).array() += 5.0; + + aom.total_size = 0; + + for (int i = 0; i < num_frames; i++) { + Sophus::SE3d T_w_i; + T_w_i.so3() = Sophus::SO3d::exp(Eigen::Vector3d::Random() / 100); + T_w_i.translation()[0] = i * 0.1; + + aom.abs_order_map[i] = std::make_pair(i * POSE_SIZE, POSE_SIZE); + aom.total_size += POSE_SIZE; + + estimator.frame_poses[i] = basalt::PoseStateWithLin(i, T_w_i, false); + + for (int j = 0; j < 10; j++) { + const int kp_idx = 10 * i + j; + Eigen::Vector3d p3d = points_3d.col(kp_idx); + basalt::Keypoint kpt; + + Sophus::SE3d T_c_w = estimator.calib.T_i_c[0].inverse() * T_w_i.inverse(); + Eigen::Vector3d p3d_cam = T_c_w * p3d; + + kpt.direction = + basalt::StereographicParam::project(p3d_cam.homogeneous()); + kpt.inv_dist = 1.0 / p3d_cam.norm(); + kpt.host_kf_id = basalt::TimeCamId(i, 0); + + estimator.lmdb.addLandmark(kp_idx, kpt); + + for (const auto& [frame_id, frame_pose] : estimator.frame_poses) { + for (int c = 0; c < 2; c++) { + basalt::TimeCamId tcid(frame_id, c); + Sophus::SE3d T_c_w = estimator.calib.T_i_c[c].inverse() * + frame_pose.getPose().inverse(); + + Eigen::Vector3d p3d_cam = T_c_w * p3d; + Eigen::Vector2d p2d_cam; + cam.project(p3d_cam, p2d_cam); + + p2d_cam += Eigen::Vector2d::Random() / 100; + + basalt::KeypointObservation ko; + ko.kpt_id = kp_idx; + ko.pos = p2d_cam; + + estimator.lmdb.addObservation(tcid, ko); + } + } + } + } +} + +template +void get_vo_estimator_with_marg(int num_frames, + basalt::BundleAdjustmentBase& estimator, + basalt::AbsOrderMap& aom, + basalt::MargLinData& mld) { + static constexpr int POSE_SIZE = 6; + + get_vo_estimator(num_frames, estimator, aom); + + mld.H.setIdentity(2 * POSE_SIZE, 2 * POSE_SIZE); + mld.H *= 1e6; + + mld.b.setRandom(2 * POSE_SIZE); + mld.b *= 10; + + mld.order.abs_order_map[0] = std::make_pair(0, POSE_SIZE); + mld.order.abs_order_map[1] = std::make_pair(POSE_SIZE, POSE_SIZE); + mld.order.total_size = 2 * POSE_SIZE; + + estimator.frame_poses[0].setLinTrue(); + estimator.frame_poses[1].setLinTrue(); + + estimator.frame_poses[0].applyInc(Sophus::Vector6d::Random() / 100); + estimator.frame_poses[1].applyInc(Sophus::Vector6d::Random() / 100); +} + +#ifdef BASALT_INSTANTIATIONS_DOUBLE +TEST(LinearizationTestSuite, VoNoMargLinearizationTest) { + using Scalar = double; + static constexpr int POSE_SIZE = 6; + static constexpr int NUM_FRAMES = 6; + + basalt::BundleAdjustmentBase estimator; + basalt::AbsOrderMap aom; + + get_vo_estimator(NUM_FRAMES, estimator, aom); + + typename basalt::LinearizationBase::Options options; + options.lb_options.huber_parameter = estimator.huber_thresh; + options.lb_options.obs_std_dev = estimator.obs_std_dev; + + Eigen::MatrixXd H_abs_qr, H_abs_sc, H_rel_sc; + Eigen::VectorXd b_abs_qr, b_abs_sc, b_rel_sc; + + options.linearization_type = basalt::LinearizationType::ABS_QR; + std::unique_ptr> l_abs_qr; + + l_abs_qr = basalt::LinearizationBase::create(&estimator, + aom, options); + Scalar error_abs_qr = l_abs_qr->linearizeProblem(); + l_abs_qr->performQR(); + + l_abs_qr->get_dense_H_b(H_abs_qr, b_abs_qr); + + options.linearization_type = basalt::LinearizationType::ABS_SC; + std::unique_ptr> l_abs_sc; + l_abs_sc = basalt::LinearizationBase::create(&estimator, + aom, options); + + Scalar error_abs_sc = l_abs_sc->linearizeProblem(); + l_abs_sc->performQR(); + + l_abs_sc->get_dense_H_b(H_abs_sc, b_abs_sc); + + options.linearization_type = basalt::LinearizationType::REL_SC; + std::unique_ptr> l_rel_sc; + l_rel_sc = basalt::LinearizationBase::create(&estimator, + aom, options); + + Scalar error_rel_sc = l_rel_sc->linearizeProblem(); + l_rel_sc->performQR(); + + l_rel_sc->get_dense_H_b(H_rel_sc, b_rel_sc); + + Scalar error_diff = std::abs(error_abs_qr - error_abs_sc); + Scalar H_diff = (H_abs_qr - H_abs_sc).norm(); + Scalar b_diff = (b_abs_qr - b_abs_sc).norm(); + + Scalar error_diff2 = std::abs(error_abs_qr - error_rel_sc); + Scalar H_diff2 = (H_abs_qr - H_rel_sc).norm(); + Scalar b_diff2 = (b_abs_qr - b_rel_sc).norm(); + + EXPECT_LE(error_diff, 1e-8); + EXPECT_LE(H_diff, 1e-8); + EXPECT_LE(b_diff, 1e-8); + + EXPECT_LE(error_diff2, 1e-8); + EXPECT_LE(H_diff2, 1e-8); + EXPECT_LE(b_diff2, 1e-8); +} +#endif + +#ifdef BASALT_INSTANTIATIONS_DOUBLE +TEST(LinearizationTestSuite, VoMargLinearizationTest) { + using Scalar = double; + static constexpr int POSE_SIZE = 6; + static constexpr int NUM_FRAMES = 6; + + basalt::BundleAdjustmentBase estimator; + basalt::MargLinData mld; + basalt::AbsOrderMap aom; + + get_vo_estimator_with_marg(NUM_FRAMES, estimator, aom, mld); + + typename basalt::LinearizationBase::Options options; + options.lb_options.huber_parameter = estimator.huber_thresh; + options.lb_options.obs_std_dev = estimator.obs_std_dev; + + Eigen::MatrixXd H_abs_qr, H_abs_sc, H_rel_sc; + Eigen::VectorXd b_abs_qr, b_abs_sc, b_rel_sc; + + options.linearization_type = basalt::LinearizationType::ABS_QR; + std::unique_ptr> l_abs_qr; + + l_abs_qr = basalt::LinearizationBase::create( + &estimator, aom, options, &mld); + Scalar error_abs_qr = l_abs_qr->linearizeProblem(); + l_abs_qr->performQR(); + + l_abs_qr->get_dense_H_b(H_abs_qr, b_abs_qr); + + options.linearization_type = basalt::LinearizationType::ABS_SC; + std::unique_ptr> l_abs_sc; + l_abs_sc = basalt::LinearizationBase::create( + &estimator, aom, options, &mld); + + Scalar error_abs_sc = l_abs_sc->linearizeProblem(); + l_abs_sc->performQR(); + + l_abs_sc->get_dense_H_b(H_abs_sc, b_abs_sc); + + options.linearization_type = basalt::LinearizationType::REL_SC; + std::unique_ptr> l_rel_sc; + l_rel_sc = basalt::LinearizationBase::create( + &estimator, aom, options, &mld); + + Scalar error_rel_sc = l_rel_sc->linearizeProblem(); + l_rel_sc->performQR(); + + l_rel_sc->get_dense_H_b(H_rel_sc, b_rel_sc); + + Scalar error_diff = std::abs(error_abs_qr - error_abs_sc); + Scalar H_diff = (H_abs_qr - H_abs_sc).norm(); + Scalar b_diff = (b_abs_qr - b_abs_sc).norm(); + + Scalar error_diff2 = std::abs(error_abs_qr - error_rel_sc); + Scalar H_diff2 = (H_abs_qr - H_rel_sc).norm(); + Scalar b_diff2 = (b_abs_qr - b_rel_sc).norm(); + + EXPECT_LE(error_diff, 1e-8); + EXPECT_LE(H_diff, 1e-8); + EXPECT_LE(b_diff, 1e-8); + + EXPECT_LE(error_diff2, 1e-8); + EXPECT_LE(H_diff2, 1e-8); + EXPECT_LE(b_diff2, 1e-8); +} +#endif + +#ifdef BASALT_INSTANTIATIONS_DOUBLE +TEST(LinearizationTestSuite, VoMargBacksubstituteTest) { + using Scalar = double; + static constexpr int POSE_SIZE = 6; + static constexpr int NUM_FRAMES = 6; + + basalt::BundleAdjustmentBase estimator; + basalt::MargLinData mld; + basalt::AbsOrderMap aom; + + get_vo_estimator_with_marg(NUM_FRAMES, estimator, aom, mld); + + basalt::BundleAdjustmentBase estimator2 = estimator, + estimator3 = estimator; + + typename basalt::LinearizationBase::Options options; + options.lb_options.huber_parameter = estimator.huber_thresh; + options.lb_options.obs_std_dev = estimator.obs_std_dev; + + Eigen::MatrixXd H_abs_qr, H_abs_sc, H_rel_sc; + Eigen::VectorXd b_abs_qr, b_abs_sc, b_rel_sc; + + options.linearization_type = basalt::LinearizationType::ABS_QR; + std::unique_ptr> l_abs_qr; + + l_abs_qr = basalt::LinearizationBase::create( + &estimator, aom, options, &mld); + Scalar error_abs_qr = l_abs_qr->linearizeProblem(); + l_abs_qr->performQR(); + l_abs_qr->get_dense_H_b(H_abs_qr, b_abs_qr); + + options.linearization_type = basalt::LinearizationType::ABS_SC; + std::unique_ptr> l_abs_sc; + l_abs_sc = basalt::LinearizationBase::create( + &estimator2, aom, options, &mld); + + Scalar error_abs_sc = l_abs_sc->linearizeProblem(); + l_abs_sc->performQR(); + l_abs_sc->get_dense_H_b(H_abs_sc, b_abs_sc); + + options.linearization_type = basalt::LinearizationType::REL_SC; + std::unique_ptr> l_rel_sc; + l_rel_sc = basalt::LinearizationBase::create( + &estimator3, aom, options, &mld); + + Scalar error_rel_sc = l_rel_sc->linearizeProblem(); + l_rel_sc->performQR(); + l_rel_sc->get_dense_H_b(H_rel_sc, b_rel_sc); + + Scalar error_diff = std::abs(error_abs_qr - error_abs_sc); + Scalar H_diff = (H_abs_qr - H_abs_sc).norm(); + Scalar b_diff = (b_abs_qr - b_abs_sc).norm(); + + Scalar error_diff2 = std::abs(error_abs_qr - error_rel_sc); + Scalar H_diff2 = (H_abs_qr - H_rel_sc).norm(); + Scalar b_diff2 = (b_abs_qr - b_rel_sc).norm(); + + EXPECT_LE(error_diff, 1e-8); + EXPECT_LE(H_diff, 1e-8); + EXPECT_LE(b_diff, 1e-8); + + EXPECT_LE(error_diff2, 1e-8); + EXPECT_LE(H_diff2, 1e-8); + EXPECT_LE(b_diff2, 1e-8); + + const Eigen::VectorXd inc = -H_rel_sc.ldlt().solve(b_rel_sc); + + Scalar l_diff1 = l_abs_qr->backSubstitute(inc); + Scalar l_diff2 = l_abs_sc->backSubstitute(inc); + Scalar l_diff3 = l_rel_sc->backSubstitute(inc); + + EXPECT_LE(abs(l_diff1 - l_diff2), 1e-8); + EXPECT_LE(abs(l_diff2 - l_diff3), 1e-8); + + Scalar error1, error2, error3; + + estimator.computeError(error1); + estimator2.computeError(error2); + estimator3.computeError(error3); + + EXPECT_LE(abs(error1 - error2), 1e-8); + EXPECT_LE(abs(error1 - error3), 1e-8); +} +#endif + +#ifdef BASALT_INSTANTIATIONS_DOUBLE +TEST(LinearizationTestSuite, VoMargSqrtLinearizationTest) { + using Scalar = double; + static constexpr int POSE_SIZE = 6; + static constexpr int NUM_FRAMES = 6; + + basalt::BundleAdjustmentBase estimator; + basalt::MargLinData mld; + basalt::AbsOrderMap aom; + + get_vo_estimator_with_marg(NUM_FRAMES, estimator, aom, mld); + + typename basalt::LinearizationBase::Options options; + options.lb_options.huber_parameter = estimator.huber_thresh; + options.lb_options.obs_std_dev = estimator.obs_std_dev; + + Eigen::MatrixXd H_abs_qr, H_abs_sc, H_rel_sc, Q2TJp_abs_qr, Q2TJp_rel_sc, + Q2TJp_abs_sc; + Eigen::VectorXd b_abs_qr, b_abs_sc, b_rel_sc, Q2Tr_abs_qr, Q2Tr_rel_sc, + Q2Tr_abs_sc; + + Scalar error_abs_qr, error_abs_sc, error_rel_sc; + + { + options.linearization_type = basalt::LinearizationType::ABS_QR; + std::unique_ptr> l_abs_qr; + + l_abs_qr = basalt::LinearizationBase::create( + &estimator, aom, options, &mld); + error_abs_qr = l_abs_qr->linearizeProblem(); + l_abs_qr->performQR(); + l_abs_qr->get_dense_H_b(H_abs_qr, b_abs_qr); + l_abs_qr->get_dense_Q2Jp_Q2r(Q2TJp_abs_qr, Q2Tr_abs_qr); + } + { + options.linearization_type = basalt::LinearizationType::ABS_SC; + std::unique_ptr> l_abs_sc; + l_abs_sc = basalt::LinearizationBase::create( + &estimator, aom, options, &mld); + + error_abs_sc = l_abs_sc->linearizeProblem(); + l_abs_sc->performQR(); + l_abs_sc->get_dense_H_b(H_abs_sc, b_abs_sc); + l_abs_sc->get_dense_Q2Jp_Q2r(Q2TJp_abs_sc, Q2Tr_abs_sc); + } + + { + options.linearization_type = basalt::LinearizationType::REL_SC; + std::unique_ptr> l_rel_sc; + l_rel_sc = basalt::LinearizationBase::create( + &estimator, aom, options, &mld); + + error_rel_sc = l_rel_sc->linearizeProblem(); + l_rel_sc->performQR(); + l_rel_sc->get_dense_H_b(H_rel_sc, b_rel_sc); + l_rel_sc->get_dense_Q2Jp_Q2r(Q2TJp_rel_sc, Q2Tr_rel_sc); + } + + Scalar error_diff = std::abs(error_abs_qr - error_abs_sc); + Scalar H_diff = (H_abs_qr - H_abs_sc).norm(); + Scalar b_diff = (b_abs_qr - b_abs_sc).norm(); + + Scalar error_diff2 = std::abs(error_abs_qr - error_rel_sc); + Scalar H_diff2 = (H_abs_qr - H_rel_sc).norm(); + Scalar b_diff2 = (b_abs_qr - b_rel_sc).norm(); + + EXPECT_LE(error_diff, 1e-8); + EXPECT_LE(H_diff, 1e-8); + EXPECT_LE(b_diff, 1e-8); + + EXPECT_LE(error_diff2, 1e-8); + EXPECT_LE(H_diff2, 1e-8); + EXPECT_LE(b_diff2, 1e-8); + + // Should hold for full rank problems + Scalar H_diff3 = (Q2TJp_abs_qr.transpose() * Q2TJp_abs_qr - H_abs_qr).norm(); + Scalar b_diff3 = (Q2TJp_abs_qr.transpose() * Q2Tr_abs_qr - b_abs_qr).norm(); + EXPECT_LE(H_diff3, 1e-3); + EXPECT_LE(b_diff3, 1e-5); + + Scalar H_diff4 = (Q2TJp_abs_sc.transpose() * Q2TJp_abs_sc - H_abs_sc).norm(); + Scalar b_diff4 = (Q2TJp_abs_sc.transpose() * Q2Tr_abs_sc - b_abs_sc).norm(); + EXPECT_LE(H_diff4, 1e-3); + EXPECT_LE(b_diff4, 1e-5); + + Scalar H_diff5 = (Q2TJp_rel_sc.transpose() * Q2TJp_rel_sc - H_rel_sc).norm(); + Scalar b_diff5 = (Q2TJp_rel_sc.transpose() * Q2Tr_rel_sc - b_rel_sc).norm(); + EXPECT_LE(H_diff5, 1e-3); + EXPECT_LE(b_diff5, 1e-5); +} +#endif diff --git a/test/src/test_nfr.cpp b/test/src/test_nfr.cpp new file mode 100644 index 0000000..5055b90 --- /dev/null +++ b/test/src/test_nfr.cpp @@ -0,0 +1,137 @@ + + +#include +#include + +#include + +#include "gtest/gtest.h" +#include "test_utils.h" + +static const double accel_std_dev = 0.23; +static const double gyro_std_dev = 0.0027; + +// Smaller noise for testing +// static const double accel_std_dev = 0.00023; +// static const double gyro_std_dev = 0.0000027; + +std::random_device rd{}; +std::mt19937 gen{rd()}; + +std::normal_distribution<> gyro_noise_dist{0, gyro_std_dev}; +std::normal_distribution<> accel_noise_dist{0, accel_std_dev}; + +TEST(PreIntegrationTestSuite, RelPoseTest) { + Sophus::SE3d T_w_i = Sophus::se3_expd(Sophus::Vector6d::Random()); + Sophus::SE3d T_w_j = Sophus::se3_expd(Sophus::Vector6d::Random()); + + Sophus::SE3d T_i_j = Sophus::se3_expd(Sophus::Vector6d::Random() / 100) * + T_w_i.inverse() * T_w_j; + + Sophus::Matrix6d d_res_d_T_w_i, d_res_d_T_w_j; + basalt::relPoseError(T_i_j, T_w_i, T_w_j, &d_res_d_T_w_i, &d_res_d_T_w_j); + + { + Sophus::Vector6d x0; + x0.setZero(); + test_jacobian( + "d_res_d_T_w_i", d_res_d_T_w_i, + [&](const Sophus::Vector6d& x) { + auto T_w_i_new = T_w_i; + basalt::PoseState::incPose(x, T_w_i_new); + + return basalt::relPoseError(T_i_j, T_w_i_new, T_w_j); + }, + x0); + } + + { + Sophus::Vector6d x0; + x0.setZero(); + test_jacobian( + "d_res_d_T_w_j", d_res_d_T_w_j, + [&](const Sophus::Vector6d& x) { + auto T_w_j_new = T_w_j; + basalt::PoseState::incPose(x, T_w_j_new); + + return basalt::relPoseError(T_i_j, T_w_i, T_w_j_new); + }, + x0); + } +} + +TEST(PreIntegrationTestSuite, AbsPositionTest) { + Sophus::SE3d T_w_i = Sophus::se3_expd(Sophus::Vector6d::Random()); + + Eigen::Vector3d pos = T_w_i.translation() + Eigen::Vector3d::Random() / 10; + + Sophus::Matrix d_res_d_T_w_i; + basalt::absPositionError(T_w_i, pos, &d_res_d_T_w_i); + + { + Sophus::Vector6d x0; + x0.setZero(); + test_jacobian( + "d_res_d_T_w_i", d_res_d_T_w_i, + [&](const Sophus::Vector6d& x) { + auto T_w_i_new = T_w_i; + basalt::PoseState::incPose(x, T_w_i_new); + + return basalt::absPositionError(T_w_i_new, pos); + }, + x0); + } +} + +TEST(PreIntegrationTestSuite, YawTest) { + Sophus::SE3d T_w_i = Sophus::se3_expd(Sophus::Vector6d::Random()); + + Eigen::Vector3d yaw_dir_body = + T_w_i.so3().inverse() * Eigen::Vector3d::UnitX(); + + T_w_i = Sophus::se3_expd(Sophus::Vector6d::Random() / 100) * T_w_i; + + Sophus::Matrix d_res_d_T_w_i; + basalt::yawError(T_w_i, yaw_dir_body, &d_res_d_T_w_i); + + { + Sophus::Vector6d x0; + x0.setZero(); + test_jacobian( + "d_res_d_T_w_i", d_res_d_T_w_i, + [&](const Sophus::Vector6d& x) { + auto T_w_i_new = T_w_i; + basalt::PoseState::incPose(x, T_w_i_new); + + double res = basalt::yawError(T_w_i_new, yaw_dir_body); + + return Eigen::Matrix(res); + }, + x0); + } +} + +TEST(PreIntegrationTestSuite, RollPitchTest) { + Sophus::SE3d T_w_i = Sophus::se3_expd(Sophus::Vector6d::Random()); + + Sophus::SO3d R_w_i = T_w_i.so3(); + + T_w_i = Sophus::se3_expd(Sophus::Vector6d::Random() / 100) * T_w_i; + + Sophus::Matrix d_res_d_T_w_i; + basalt::rollPitchError(T_w_i, R_w_i, &d_res_d_T_w_i); + + { + Sophus::Vector6d x0; + x0.setZero(); + test_jacobian( + "d_res_d_T_w_i", d_res_d_T_w_i, + [&](const Sophus::Vector6d& x) { + auto T_w_i_new = T_w_i; + basalt::PoseState::incPose(x, T_w_i_new); + + return basalt::rollPitchError(T_w_i_new, R_w_i); + }, + x0); + } +} diff --git a/test/src/test_patch.cpp b/test/src/test_patch.cpp new file mode 100644 index 0000000..6cedea2 --- /dev/null +++ b/test/src/test_patch.cpp @@ -0,0 +1,89 @@ + + +#include +#include + +#include + +#include "gtest/gtest.h" +#include "test_utils.h" + +struct SmoothFunction { + template + Scalar interp(const Eigen::Matrix& p) const { + return sin(p[0] / 100.0 + p[1] / 20.0); + } + + template + Eigen::Matrix interpGrad( + const Eigen::Matrix& p) const { + return Eigen::Matrix(sin(p[0] / 100.0 + p[1] / 20.0), + cos(p[0] / 100.0 + p[1] / 20.0) / 100.0, + cos(p[0] / 100.0 + p[1] / 20.0) / 20.0); + } + + template + BASALT_HOST_DEVICE inline bool InBounds( + const Eigen::MatrixBase& p, + const typename Derived::Scalar border) const { + return true; + } +}; + +TEST(Patch, ImageInterpolateGrad) { + Eigen::Vector2i offset(231, 123); + + SmoothFunction img; + + Eigen::Vector2d pd = offset.cast() + Eigen::Vector2d(0.4, 0.34345); + + Eigen::Vector3d val_grad = img.interpGrad(pd); + Eigen::Matrix J_x = val_grad.tail<2>(); + + test_jacobian( + "d_res_d_x", J_x, + [&](const Eigen::Vector2d& x) { + return Eigen::Matrix(img.interp(pd + x)); + }, + Eigen::Vector2d::Zero(), 1); +} + +TEST(Patch, PatchSe2Jac) { + Eigen::Vector2i offset(231, 123); + + SmoothFunction img_view; + + Eigen::Vector2d pd = offset.cast() + Eigen::Vector2d(0.4, 0.34345); + + using PatternT = basalt::Pattern52; + using PatchT = basalt::OpticalFlowPatch; + + double mean, mean2; + + PatchT::VectorP data, data2; + PatchT::MatrixP3 J_se2; + + basalt::OpticalFlowPatch>::setDataJacSe2( + img_view, pd, mean, data, J_se2); + + basalt::OpticalFlowPatch>::setData( + img_view, pd, mean2, data2); + + EXPECT_NEAR(mean, mean2, 1e-8); + EXPECT_TRUE(data.isApprox(data2)); + + test_jacobian( + "d_res_d_se2", J_se2, + [&](const Eigen::Vector3d& x) { + Sophus::SE2d transform = Sophus::SE2d::exp(x); + + double mean3; + PatchT::VectorP data3; + + basalt::OpticalFlowPatch>::setData( + img_view, pd, mean3, data3, &transform); + + return data3; + }, + Eigen::Vector3d::Zero()); +} diff --git a/test/src/test_qr.cpp b/test/src/test_qr.cpp new file mode 100644 index 0000000..4df65cd --- /dev/null +++ b/test/src/test_qr.cpp @@ -0,0 +1,127 @@ + +#include +#include + +#include + +#include "gtest/gtest.h" + +TEST(QRTestSuite, QRvsLLT) { + Eigen::MatrixXd J; + J.setRandom(10, 6); + + Eigen::HouseholderQR qr(J); + Eigen::MatrixXd R = qr.matrixQR().triangularView(); + Eigen::MatrixXd LT = (J.transpose() * J).llt().matrixU(); + + std::cout << "J\n" << J << "\ncol_norms: " << J.colwise().norm() << std::endl; + std::cout << "R\n" << R << "\ncol_norms: " << R.colwise().norm() << std::endl; + std::cout << "LT\n" + << LT << "\ncol_norms: " << LT.colwise().norm() << std::endl; +} + +TEST(QRTestSuite, QRvsLLTRankDef) { + Eigen::MatrixXd J; + J.setRandom(10, 6); + + J.col(2) = J.col(4); + + Eigen::HouseholderQR qr(J); + Eigen::MatrixXd R = qr.matrixQR().triangularView(); + Eigen::MatrixXd LT = (J.transpose() * J).llt().matrixU(); + + std::cout << "J\n" << J << "\ncol_norms: " << J.colwise().norm() << std::endl; + std::cout << "R\n" << R << "\ncol_norms: " << R.colwise().norm() << std::endl; + std::cout << "LT\n" + << LT << "\ncol_norms: " << LT.colwise().norm() << std::endl; +} + +#ifdef BASALT_INSTANTIATIONS_DOUBLE +TEST(QRTestSuite, RankDefLeastSquares) { + Eigen::MatrixXd J; + Eigen::VectorXd r; + J.setRandom(10, 6); + r.setRandom(10); + + J.col(1) = J.col(4); + + auto decomp = J.completeOrthogonalDecomposition(); + + Eigen::VectorXd original_solution = decomp.solve(r); + + std::cout << "full solution: " << original_solution.transpose() << std::endl; + std::cout << "Rank " << decomp.rank() << std::endl; + + std::cout << "sol OR:\t" << original_solution.tail<4>().transpose() + << std::endl; + + std::set idx_to_marg = {0, 1}; + std::set idx_to_keep = {2, 3, 4, 5}; + + Eigen::VectorXd sol_sc, sol_sqrt_sc, sol_sqrt_sc2, sol_qr; + + // sqrt SC version + { + Eigen::MatrixXd H = J.transpose() * J; + Eigen::VectorXd b = J.transpose() * r; + + Eigen::MatrixXd marg_sqrt_H; + Eigen::VectorXd marg_sqrt_b; + + basalt::MargHelper::marginalizeHelperSqToSqrt( + H, b, idx_to_keep, idx_to_marg, marg_sqrt_H, marg_sqrt_b); + + auto dec = marg_sqrt_H.completeOrthogonalDecomposition(); + + sol_sqrt_sc = dec.solve(marg_sqrt_b); + std::cout << "sol SQ:\t" << sol_sqrt_sc.transpose() << std::endl; + std::cout << "rank " << dec.rank() << std::endl; + + auto dec2 = (marg_sqrt_H.transpose() * marg_sqrt_H) + .completeOrthogonalDecomposition(); + + sol_sqrt_sc2 = dec2.solve(marg_sqrt_H.transpose() * marg_sqrt_b); + std::cout << "sol SH:\t" << sol_sqrt_sc2.transpose() << std::endl; + std::cout << "rank " << dec2.rank() << std::endl; + } + + // SC version + { + Eigen::MatrixXd H = J.transpose() * J; + Eigen::VectorXd b = J.transpose() * r; + + Eigen::MatrixXd marg_H; + Eigen::VectorXd marg_b; + + basalt::MargHelper::marginalizeHelperSqToSq( + H, b, idx_to_keep, idx_to_marg, marg_H, marg_b); + + auto dec = marg_H.completeOrthogonalDecomposition(); + + sol_sc = dec.solve(marg_b); + std::cout << "sol SC:\t" << sol_sc.transpose() << std::endl; + std::cout << "rank " << dec.rank() << std::endl; + } + + // QR version + { + Eigen::MatrixXd J1 = J; + Eigen::VectorXd r1 = r; + + Eigen::MatrixXd marg_sqrt_H; + Eigen::VectorXd marg_sqrt_b; + + basalt::MargHelper::marginalizeHelperSqrtToSqrt( + J1, r1, idx_to_keep, idx_to_marg, marg_sqrt_H, marg_sqrt_b); + + auto dec = marg_sqrt_H.completeOrthogonalDecomposition(); + + sol_qr = dec.solve(marg_sqrt_b); + std::cout << "sol QR:\t" << sol_qr.transpose() << std::endl; + std::cout << "rank " << dec.rank() << std::endl; + } + + EXPECT_TRUE(sol_qr.isApprox(sol_sc)); + EXPECT_TRUE(sol_qr.isApprox(sol_sqrt_sc2)); +} +#endif diff --git a/test/src/test_spline_opt.cpp b/test/src/test_spline_opt.cpp new file mode 100644 index 0000000..d0f97e1 --- /dev/null +++ b/test/src/test_spline_opt.cpp @@ -0,0 +1,107 @@ + +#include + +#include + +#include "gtest/gtest.h" +#include "test_utils.h" + +TEST(SplineOpt, SplineOptTest) { + int num_knots = 15; + + basalt::CalibAccelBias accel_bias_full; + accel_bias_full.setRandom(); + + basalt::CalibGyroBias gyro_bias_full; + gyro_bias_full.setRandom(); + + Eigen::Vector3d g(0, 0, -9.81); + + basalt::Se3Spline<5> gt_spline(int64_t(2e9)); + gt_spline.genRandomTrajectory(num_knots); + + basalt::SplineOptimization<5, double> spline_opt(int64_t(2e9)); + + int64_t pose_dt_ns = 1e8; + for (int64_t t_ns = pose_dt_ns / 2; t_ns < gt_spline.maxTimeNs(); + t_ns += pose_dt_ns) { + Sophus::SE3d pose_gt = gt_spline.pose(t_ns); + + spline_opt.addPoseMeasurement(t_ns, pose_gt); + } + + int64_t dt_ns = 1e7; + for (int64_t t_ns = 0; t_ns < gt_spline.maxTimeNs(); t_ns += dt_ns) { + Sophus::SE3d pose = gt_spline.pose(t_ns); + Eigen::Vector3d accel_body = + pose.so3().inverse() * (gt_spline.transAccelWorld(t_ns) + g); + + // accel_body + accel_bias = (I + scale) * meas + + spline_opt.addAccelMeasurement( + t_ns, accel_bias_full.invertCalibration(accel_body)); + } + + for (int64_t t_ns = 0; t_ns < gt_spline.maxTimeNs(); t_ns += dt_ns) { + Eigen::Vector3d rot_vel_body = gt_spline.rotVelBody(t_ns); + + spline_opt.addGyroMeasurement( + t_ns, gyro_bias_full.invertCalibration(rot_vel_body)); + } + + spline_opt.resetCalib(0, {}); + + spline_opt.initSpline(gt_spline); + spline_opt.setG(g + Eigen::Vector3d::Random() / 10); + spline_opt.init(); + + double error; + double reprojection_error; + int num_inliers; + for (int i = 0; i < 10; i++) + spline_opt.optimize(false, true, false, false, true, false, 0.002, 1e-10, + error, num_inliers, reprojection_error); + + ASSERT_TRUE( + spline_opt.getAccelBias().getParam().isApprox(accel_bias_full.getParam())) + << "spline_opt.getCalib().accel_bias " + << spline_opt.getGyroBias().getParam().transpose() << " and accel_bias " + << accel_bias_full.getParam().transpose() << " are not the same"; + + ASSERT_TRUE( + spline_opt.getGyroBias().getParam().isApprox(gyro_bias_full.getParam())) + << "spline_opt.getCalib().gyro_bias " + << spline_opt.getGyroBias().getParam().transpose() << " and gyro_bias " + << gyro_bias_full.getParam().transpose() << " are not the same"; + + ASSERT_TRUE(spline_opt.getG().isApprox(g)) + << "spline_opt.getG() " << spline_opt.getG().transpose() << " and g " + << g.transpose() << " are not the same"; + + for (int64_t t_ns = 0; t_ns < gt_spline.maxTimeNs(); t_ns += 1e7) { + Sophus::SE3d pose_gt = gt_spline.pose(t_ns); + Sophus::SE3d pose = spline_opt.getSpline().pose(t_ns); + + Eigen::Vector3d pos_gt = pose_gt.translation(); + Eigen::Vector3d pos = pose.translation(); + + Eigen::Quaterniond quat_gt = pose_gt.unit_quaternion(); + Eigen::Quaterniond quat = pose.unit_quaternion(); + + Eigen::Vector3d accel_gt = gt_spline.transAccelWorld(t_ns); + Eigen::Vector3d accel = spline_opt.getSpline().transAccelWorld(t_ns); + + Eigen::Vector3d gyro_gt = gt_spline.rotVelBody(t_ns); + Eigen::Vector3d gyro = spline_opt.getSpline().rotVelBody(t_ns); + + ASSERT_TRUE(pos_gt.isApprox(pos)) << "pos_gt and pos are not the same"; + + ASSERT_TRUE(quat_gt.angularDistance(quat) < 1e-2) + << "quat_gt and quat are not the same"; + + ASSERT_TRUE(accel_gt.isApprox(accel)) + << "accel_gt and accel are not the same"; + + ASSERT_TRUE(gyro_gt.isApprox(gyro)) << "gyro_gt and gyro are not the same"; + } +} diff --git a/test/src/test_vio.cpp b/test/src/test_vio.cpp new file mode 100644 index 0000000..34d8ec9 --- /dev/null +++ b/test/src/test_vio.cpp @@ -0,0 +1,430 @@ + + +#include +#include +#include +#include +#include + +#include + +#include "gtest/gtest.h" +#include "test_utils.h" + +static const double accel_std_dev = 0.23; +static const double gyro_std_dev = 0.0027; + +// Smaller noise for testing +// static const double accel_std_dev = 0.00023; +// static const double gyro_std_dev = 0.0000027; + +std::random_device rd{}; +std::mt19937 gen{rd()}; + +std::normal_distribution<> gyro_noise_dist{0, gyro_std_dev}; +std::normal_distribution<> accel_noise_dist{0, accel_std_dev}; + +#ifdef BASALT_INSTANTIATIONS_DOUBLE +TEST(VioTestSuite, ImuNullspace2Test) { + int num_knots = 15; + + Eigen::Vector3d bg, ba; + bg = Eigen::Vector3d::Random() / 100; + ba = Eigen::Vector3d::Random() / 10; + + basalt::IntegratedImuMeasurement imu_meas(0, bg, ba); + + basalt::Se3Spline<5> gt_spline(int64_t(10e9)); + gt_spline.genRandomTrajectory(num_knots); + + basalt::PoseVelBiasState state0, state1, state1_gt; + + state0.t_ns = 0; + state0.T_w_i = gt_spline.pose(int64_t(0)); + state0.vel_w_i = gt_spline.transVelWorld(int64_t(0)); + state0.bias_gyro = bg; + state0.bias_accel = ba; + + Eigen::Vector3d accel_cov, gyro_cov; + accel_cov.setConstant(accel_std_dev * accel_std_dev); + gyro_cov.setConstant(gyro_std_dev * gyro_std_dev); + + int64_t dt_ns = 1e7; + for (int64_t t_ns = dt_ns / 2; + t_ns < int64_t(1e8); // gt_spline.maxTimeNs() - int64_t(1e9); + t_ns += dt_ns) { + Sophus::SE3d pose = gt_spline.pose(t_ns); + Eigen::Vector3d accel_body = + pose.so3().inverse() * + (gt_spline.transAccelWorld(t_ns) - basalt::constants::g); + Eigen::Vector3d rot_vel_body = gt_spline.rotVelBody(t_ns); + + basalt::ImuData data; + data.accel = accel_body + ba; + data.gyro = rot_vel_body + bg; + + data.accel[0] += accel_noise_dist(gen); + data.accel[1] += accel_noise_dist(gen); + data.accel[2] += accel_noise_dist(gen); + + data.gyro[0] += gyro_noise_dist(gen); + data.gyro[1] += gyro_noise_dist(gen); + data.gyro[2] += gyro_noise_dist(gen); + + data.t_ns = t_ns + dt_ns / 2; // measurement in the middle of the interval; + + imu_meas.integrate(data, accel_cov, gyro_cov); + } + + state1.t_ns = imu_meas.get_dt_ns(); + state1.T_w_i = gt_spline.pose(imu_meas.get_dt_ns()) * + Sophus::se3_expd(Sophus::Vector6d::Random() / 10); + state1.vel_w_i = gt_spline.transVelWorld(imu_meas.get_dt_ns()) + + Sophus::Vector3d::Random() / 10; + state1.bias_gyro = bg; + state1.bias_accel = ba; + + Eigen::Vector3d gyro_weight_sqrt; + gyro_weight_sqrt.setConstant(1e3); + + Eigen::Vector3d accel_weight_sqrt; + accel_weight_sqrt.setConstant(1e3); + + Eigen::aligned_map> + imu_meas_vec; + Eigen::aligned_map> + frame_states; + Eigen::aligned_map> frame_poses; + + imu_meas_vec[state0.t_ns] = imu_meas; + frame_states[state0.t_ns] = basalt::PoseVelBiasStateWithLin(state0); + frame_states[state1.t_ns] = basalt::PoseVelBiasStateWithLin(state1); + + int asize = 30; + + basalt::AbsOrderMap aom; + aom.total_size = 30; + aom.items = 2; + aom.abs_order_map[state0.t_ns] = std::make_pair(0, 15); + aom.abs_order_map[state1.t_ns] = std::make_pair(15, 15); + + basalt::ImuLinData ild = {basalt::constants::g, + gyro_weight_sqrt, + accel_weight_sqrt, + {std::make_pair(state0.t_ns, &imu_meas)}}; + + basalt::DenseAccumulator accum; + accum.reset(aom.total_size); + + basalt::ImuBlock ib(&imu_meas, &ild, aom); + double e0 = ib.linearizeImu(frame_states); + ib.add_dense_H_b(accum); + + // Check quadratic approximation + for (int i = 0; i < 10; i++) { + Eigen::VectorXd rand_inc; + rand_inc.setRandom(asize); + rand_inc.normalize(); + rand_inc /= 10000; + + auto frame_states_copy = frame_states; + frame_states_copy[state0.t_ns].applyInc(rand_inc.segment<15>(0)); + frame_states_copy[state1.t_ns].applyInc(rand_inc.segment<15>(15)); + + double imu_error_u, bg_error_u, ba_error_u; + basalt::ScBundleAdjustmentBase::computeImuError( + aom, imu_error_u, bg_error_u, ba_error_u, frame_states_copy, + imu_meas_vec, gyro_weight_sqrt.array().square(), + accel_weight_sqrt.array().square(), basalt::constants::g); + + double e1 = imu_error_u + bg_error_u + ba_error_u - e0; + + double e2 = 0.5 * rand_inc.transpose() * accum.getH() * rand_inc; + e2 += rand_inc.transpose() * accum.getB(); + + EXPECT_LE(std::abs(e1 - e2), 2e-2) << "e1 " << e1 << " e2 " << e2; + } + + std::cout << "=========================================" << std::endl; + Eigen::VectorXd null_res = + basalt::ScBundleAdjustmentBase::checkNullspace( + accum.getH(), accum.getB(), aom, frame_states, frame_poses); + std::cout << "=========================================" << std::endl; + + EXPECT_LE(std::abs(null_res[0]), 1e-8); + EXPECT_LE(std::abs(null_res[1]), 1e-8); + EXPECT_LE(std::abs(null_res[2]), 1e-8); + EXPECT_LE(std::abs(null_res[5]), 1e-6); +} +#endif + +#ifdef BASALT_INSTANTIATIONS_DOUBLE +TEST(VioTestSuite, ImuNullspace3Test) { + int num_knots = 15; + + Eigen::Vector3d bg, ba; + bg = Eigen::Vector3d::Random() / 100; + ba = Eigen::Vector3d::Random() / 10; + + basalt::IntegratedImuMeasurement imu_meas1(0, bg, ba); + + basalt::Se3Spline<5> gt_spline(int64_t(10e9)); + gt_spline.genRandomTrajectory(num_knots); + + basalt::PoseVelBiasState state0, state1, state2; + + state0.t_ns = 0; + state0.T_w_i = gt_spline.pose(int64_t(0)); + state0.vel_w_i = gt_spline.transVelWorld(int64_t(0)); + state0.bias_gyro = bg; + state0.bias_accel = ba; + + Eigen::Vector3d accel_cov, gyro_cov; + accel_cov.setConstant(accel_std_dev * accel_std_dev); + gyro_cov.setConstant(gyro_std_dev * gyro_std_dev); + + int64_t dt_ns = 1e7; + for (int64_t t_ns = dt_ns / 2; + t_ns < int64_t(1e9); // gt_spline.maxTimeNs() - int64_t(1e9); + t_ns += dt_ns) { + Sophus::SE3d pose = gt_spline.pose(t_ns); + Eigen::Vector3d accel_body = + pose.so3().inverse() * + (gt_spline.transAccelWorld(t_ns) - basalt::constants::g); + Eigen::Vector3d rot_vel_body = gt_spline.rotVelBody(t_ns); + + basalt::ImuData data; + data.accel = accel_body + ba; + data.gyro = rot_vel_body + bg; + + data.accel[0] += accel_noise_dist(gen); + data.accel[1] += accel_noise_dist(gen); + data.accel[2] += accel_noise_dist(gen); + + data.gyro[0] += gyro_noise_dist(gen); + data.gyro[1] += gyro_noise_dist(gen); + data.gyro[2] += gyro_noise_dist(gen); + + data.t_ns = t_ns + dt_ns / 2; // measurement in the middle of the interval; + + imu_meas1.integrate(data, accel_cov, gyro_cov); + } + + basalt::IntegratedImuMeasurement imu_meas2(imu_meas1.get_dt_ns(), bg, + ba); + for (int64_t t_ns = imu_meas1.get_dt_ns() + dt_ns / 2; + t_ns < int64_t(2e9); // gt_spline.maxTimeNs() - int64_t(1e9); + t_ns += dt_ns) { + Sophus::SE3d pose = gt_spline.pose(t_ns); + Eigen::Vector3d accel_body = + pose.so3().inverse() * + (gt_spline.transAccelWorld(t_ns) - basalt::constants::g); + Eigen::Vector3d rot_vel_body = gt_spline.rotVelBody(t_ns); + + basalt::ImuData data; + data.accel = accel_body + ba; + data.gyro = rot_vel_body + bg; + + data.accel[0] += accel_noise_dist(gen); + data.accel[1] += accel_noise_dist(gen); + data.accel[2] += accel_noise_dist(gen); + + data.gyro[0] += gyro_noise_dist(gen); + data.gyro[1] += gyro_noise_dist(gen); + data.gyro[2] += gyro_noise_dist(gen); + + data.t_ns = t_ns + dt_ns / 2; // measurement in the middle of the interval; + + imu_meas2.integrate(data, accel_cov, gyro_cov); + } + + state1.t_ns = imu_meas1.get_dt_ns(); + state1.T_w_i = gt_spline.pose(state1.t_ns) * + Sophus::se3_expd(Sophus::Vector6d::Random() / 10); + state1.vel_w_i = + gt_spline.transVelWorld(state1.t_ns) + Sophus::Vector3d::Random() / 10; + state1.bias_gyro = bg; + state1.bias_accel = ba; + + state2.t_ns = imu_meas1.get_dt_ns() + imu_meas2.get_dt_ns(); + state2.T_w_i = gt_spline.pose(state2.t_ns) * + Sophus::se3_expd(Sophus::Vector6d::Random() / 10); + state2.vel_w_i = + gt_spline.transVelWorld(state2.t_ns) + Sophus::Vector3d::Random() / 10; + state2.bias_gyro = bg; + state2.bias_accel = ba; + + Eigen::Vector3d gyro_weight_sqrt; + gyro_weight_sqrt.setConstant(1e3); + + Eigen::Vector3d accel_weight_sqrt; + accel_weight_sqrt.setConstant(1e3); + + Eigen::aligned_map> + imu_meas_vec; + Eigen::aligned_map> + frame_states; + Eigen::aligned_map> frame_poses; + + imu_meas_vec[imu_meas1.get_start_t_ns()] = imu_meas1; + imu_meas_vec[imu_meas2.get_start_t_ns()] = imu_meas2; + frame_states[state0.t_ns] = basalt::PoseVelBiasStateWithLin(state0); + frame_states[state1.t_ns] = basalt::PoseVelBiasStateWithLin(state1); + frame_states[state2.t_ns] = basalt::PoseVelBiasStateWithLin(state2); + + int asize = 45; + + basalt::AbsOrderMap aom; + aom.total_size = asize; + aom.items = 2; + aom.abs_order_map[state0.t_ns] = std::make_pair(0, 15); + aom.abs_order_map[state1.t_ns] = std::make_pair(15, 15); + aom.abs_order_map[state2.t_ns] = std::make_pair(30, 15); + + basalt::ImuLinData ild = {basalt::constants::g, + gyro_weight_sqrt, + accel_weight_sqrt, + { + std::make_pair(state0.t_ns, &imu_meas1), + std::make_pair(state1.t_ns, &imu_meas2), + }}; + + basalt::DenseAccumulator accum; + accum.reset(aom.total_size); + + basalt::ImuBlock ib1(&imu_meas1, &ild, aom), + ib2(&imu_meas2, &ild, aom); + ib1.linearizeImu(frame_states); + ib2.linearizeImu(frame_states); + ib1.add_dense_H_b(accum); + ib2.add_dense_H_b(accum); + + std::cout << "=========================================" << std::endl; + Eigen::VectorXd null_res = + basalt::ScBundleAdjustmentBase::checkNullspace( + accum.getH(), accum.getB(), aom, frame_states, frame_poses); + std::cout << "=========================================" << std::endl; + + EXPECT_LE(std::abs(null_res[0]), 1e-8); + EXPECT_LE(std::abs(null_res[1]), 1e-8); + EXPECT_LE(std::abs(null_res[2]), 1e-8); + EXPECT_LE(std::abs(null_res[5]), 1e-6); +} +#endif + +TEST(VioTestSuite, RelPoseTest) { + Sophus::SE3d T_w_i_h = Sophus::se3_expd(Sophus::Vector6d::Random()); + Sophus::SE3d T_w_i_t = Sophus::se3_expd(Sophus::Vector6d::Random()); + + Sophus::SE3d T_i_c_h = Sophus::se3_expd(Sophus::Vector6d::Random() / 10); + Sophus::SE3d T_i_c_t = Sophus::se3_expd(Sophus::Vector6d::Random() / 10); + + Sophus::Matrix6d d_rel_d_h, d_rel_d_t; + + Sophus::SE3d T_t_h_sophus = basalt::computeRelPose( + T_w_i_h, T_i_c_h, T_w_i_t, T_i_c_t, &d_rel_d_h, &d_rel_d_t); + + { + Sophus::Vector6d x0; + x0.setZero(); + test_jacobian( + "d_rel_d_h", d_rel_d_h, + [&](const Sophus::Vector6d& x) { + Sophus::SE3d T_w_h_new = T_w_i_h; + basalt::PoseState::incPose(x, T_w_h_new); + + Sophus::SE3d T_t_h_sophus_new = + basalt::computeRelPose(T_w_h_new, T_i_c_h, T_w_i_t, T_i_c_t); + + return Sophus::se3_logd(T_t_h_sophus_new * T_t_h_sophus.inverse()); + }, + x0); + } + + { + Sophus::Vector6d x0; + x0.setZero(); + test_jacobian( + "d_rel_d_t", d_rel_d_t, + [&](const Sophus::Vector6d& x) { + Sophus::SE3d T_w_t_new = T_w_i_t; + basalt::PoseState::incPose(x, T_w_t_new); + + Sophus::SE3d T_t_h_sophus_new = + basalt::computeRelPose(T_w_i_h, T_i_c_h, T_w_t_new, T_i_c_t); + return Sophus::se3_logd(T_t_h_sophus_new * T_t_h_sophus.inverse()); + }, + x0); + } +} + +TEST(VioTestSuite, LinearizePointsTest) { + basalt::ExtendedUnifiedCamera cam = + basalt::ExtendedUnifiedCamera::getTestProjections()[0]; + + basalt::Keypoint kpt_pos; + + Eigen::Vector4d point3d; + cam.unproject(Eigen::Vector2d::Random() * 50, point3d); + kpt_pos.direction = basalt::StereographicParam::project(point3d); + kpt_pos.inv_dist = 0.1231231; + + Sophus::SE3d T_w_h = Sophus::se3_expd(Sophus::Vector6d::Random() / 100); + Sophus::SE3d T_w_t = Sophus::se3_expd(Sophus::Vector6d::Random() / 100); + T_w_t.translation()[0] += 0.1; + + Sophus::SE3d T_t_h_sophus = T_w_t.inverse() * T_w_h; + Eigen::Matrix4d T_t_h = T_t_h_sophus.matrix(); + + Eigen::Vector4d p_trans; + p_trans = basalt::StereographicParam::unproject(kpt_pos.direction); + p_trans(3) = kpt_pos.inv_dist; + + p_trans = T_t_h * p_trans; + + basalt::KeypointObservation kpt_obs; + cam.project(p_trans, kpt_obs.pos); + + Eigen::Vector2d res; + Eigen::Matrix d_res_d_xi; + Eigen::Matrix d_res_d_p; + + basalt::linearizePoint(kpt_obs.pos, kpt_pos, T_t_h, cam, res, &d_res_d_xi, + &d_res_d_p); + + { + Sophus::Vector6d x0; + x0.setZero(); + test_jacobian( + "d_res_d_xi", d_res_d_xi, + [&](const Sophus::Vector6d& x) { + Eigen::Matrix4d T_t_h_new = + (Sophus::se3_expd(x) * T_t_h_sophus).matrix(); + + Eigen::Vector2d res; + basalt::linearizePoint(kpt_obs.pos, kpt_pos, T_t_h_new, cam, res); + + return res; + }, + x0); + } + + { + Eigen::Vector3d x0; + x0.setZero(); + test_jacobian( + "d_res_d_p", d_res_d_p, + [&](const Eigen::Vector3d& x) { + basalt::Keypoint kpt_pos_new = kpt_pos; + + kpt_pos_new.direction += x.head<2>(); + kpt_pos_new.inv_dist += x[2]; + + Eigen::Vector2d res; + basalt::linearizePoint(kpt_obs.pos, kpt_pos_new, T_t_h, cam, res); + + return res; + }, + x0); + } +} diff --git a/thirdparty/CLI11/.all-contributorsrc b/thirdparty/CLI11/.all-contributorsrc new file mode 100644 index 0000000..f699ad2 --- /dev/null +++ b/thirdparty/CLI11/.all-contributorsrc @@ -0,0 +1,447 @@ +{ + "projectName": "CLI11", + "projectOwner": "CLIUtils", + "repoType": "github", + "repoHost": "https://github.com", + "files": [ + "README.md" + ], + "imageSize": 100, + "commit": true, + "commitConvention": "atom", + "contributors": [ + { + "login": "henryiii", + "name": "Henry Schreiner", + "avatar_url": "https://avatars1.githubusercontent.com/u/4616906?v=4", + "profile": "http://iscinumpy.gitlab.io", + "contributions": [ + "bug", + "doc", + "code" + ] + }, + { + "login": "phlptp", + "name": "Philip Top", + "avatar_url": "https://avatars0.githubusercontent.com/u/20667153?v=4", + "profile": "https://github.com/phlptp", + "contributions": [ + "bug", + "doc", + "code" + ] + }, + { + "login": "cbachhuber", + "name": "Christoph Bachhuber", + "avatar_url": "https://avatars0.githubusercontent.com/u/27212661?v=4", + "profile": "https://www.linkedin.com/in/cbachhuber/", + "contributions": [ + "example", + "code" + ] + }, + { + "login": "lambdafu", + "name": "Marcus Brinkmann", + "avatar_url": "https://avatars1.githubusercontent.com/u/1138455?v=4", + "profile": "https://lambdafu.net/", + "contributions": [ + "bug", + "code" + ] + }, + { + "login": "SkyToGround", + "name": "Jonas Nilsson", + "avatar_url": "https://avatars1.githubusercontent.com/u/58835?v=4", + "profile": "https://github.com/SkyToGround", + "contributions": [ + "bug", + "code" + ] + }, + { + "login": "dvj", + "name": "Doug Johnston", + "avatar_url": "https://avatars2.githubusercontent.com/u/77217?v=4", + "profile": "https://github.com/dvj", + "contributions": [ + "bug", + "code" + ] + }, + { + "login": "lczech", + "name": "Lucas Czech", + "avatar_url": "https://avatars0.githubusercontent.com/u/4741887?v=4", + "profile": "http://lucas-czech.de", + "contributions": [ + "bug", + "code" + ] + }, + { + "login": "rafiw", + "name": "Rafi Wiener", + "avatar_url": "https://avatars3.githubusercontent.com/u/3034707?v=4", + "profile": "https://github.com/rafiw", + "contributions": [ + "bug", + "code" + ] + }, + { + "login": "mensinda", + "name": "Daniel Mensinger", + "avatar_url": "https://avatars3.githubusercontent.com/u/3407462?v=4", + "profile": "https://github.com/mensinda", + "contributions": [ + "platform" + ] + }, + { + "login": "jbriales", + "name": "Jesus Briales", + "avatar_url": "https://avatars1.githubusercontent.com/u/6850478?v=4", + "profile": "https://github.com/jbriales", + "contributions": [ + "code", + "bug" + ] + }, + { + "login": "seanfisk", + "name": "Sean Fisk", + "avatar_url": "https://avatars0.githubusercontent.com/u/410322?v=4", + "profile": "https://seanfisk.com/", + "contributions": [ + "bug", + "code" + ] + }, + { + "login": "fpeng1985", + "name": "fpeng1985", + "avatar_url": "https://avatars1.githubusercontent.com/u/87981?v=4", + "profile": "https://github.com/fpeng1985", + "contributions": [ + "code" + ] + }, + { + "login": "almikhayl", + "name": "almikhayl", + "avatar_url": "https://avatars2.githubusercontent.com/u/6747040?v=4", + "profile": "https://github.com/almikhayl", + "contributions": [ + "code", + "platform" + ] + }, + { + "login": "andrew-hardin", + "name": "Andrew Hardin", + "avatar_url": "https://avatars0.githubusercontent.com/u/16496326?v=4", + "profile": "https://github.com/andrew-hardin", + "contributions": [ + "code" + ] + }, + { + "login": "SX91", + "name": "Anton", + "avatar_url": "https://avatars2.githubusercontent.com/u/754754?v=4", + "profile": "https://github.com/SX91", + "contributions": [ + "code" + ] + }, + { + "login": "helmesjo", + "name": "Fred Helmesjö", + "avatar_url": "https://avatars0.githubusercontent.com/u/2501070?v=4", + "profile": "https://github.com/helmesjo", + "contributions": [ + "bug", + "code" + ] + }, + { + "login": "skannan89", + "name": "Kannan", + "avatar_url": "https://avatars0.githubusercontent.com/u/11918764?v=4", + "profile": "https://github.com/skannan89", + "contributions": [ + "bug", + "code" + ] + }, + { + "login": "kraj", + "name": "Khem Raj", + "avatar_url": "https://avatars3.githubusercontent.com/u/465279?v=4", + "profile": "http://himvis.com", + "contributions": [ + "code" + ] + }, + { + "login": "mogigoma", + "name": "Mak Kolybabi", + "avatar_url": "https://avatars2.githubusercontent.com/u/130862?v=4", + "profile": "https://www.mogigoma.com/", + "contributions": [ + "doc" + ] + }, + { + "login": "msoeken", + "name": "Mathias Soeken", + "avatar_url": "https://avatars0.githubusercontent.com/u/1998245?v=4", + "profile": "http://msoeken.github.io", + "contributions": [ + "doc" + ] + }, + { + "login": "nathanhourt", + "name": "Nathan Hourt", + "avatar_url": "https://avatars2.githubusercontent.com/u/271977?v=4", + "profile": "https://github.com/nathanhourt", + "contributions": [ + "bug", + "code" + ] + }, + { + "login": "pleroux0", + "name": "Paul le Roux", + "avatar_url": "https://avatars2.githubusercontent.com/u/39619854?v=4", + "profile": "https://github.com/pleroux0", + "contributions": [ + "code", + "platform" + ] + }, + { + "login": "chfast", + "name": "Paweł Bylica", + "avatar_url": "https://avatars1.githubusercontent.com/u/573380?v=4", + "profile": "https://github.com/chfast", + "contributions": [ + "platform" + ] + }, + { + "login": "peterazmanov", + "name": "Peter Azmanov", + "avatar_url": "https://avatars0.githubusercontent.com/u/15322318?v=4", + "profile": "https://github.com/peterazmanov", + "contributions": [ + "code" + ] + }, + { + "login": "delpinux", + "name": "Stéphane Del Pino", + "avatar_url": "https://avatars0.githubusercontent.com/u/35096584?v=4", + "profile": "https://github.com/delpinux", + "contributions": [ + "code" + ] + }, + { + "login": "metopa", + "name": "Viacheslav Kroilov", + "avatar_url": "https://avatars2.githubusercontent.com/u/3974178?v=4", + "profile": "https://github.com/metopa", + "contributions": [ + "code" + ] + }, + { + "login": "ChristosT", + "name": "christos", + "avatar_url": "https://avatars0.githubusercontent.com/u/6725596?v=4", + "profile": "http://cs.odu.edu/~ctsolakis", + "contributions": [ + "code" + ] + }, + { + "login": "deining", + "name": "deining", + "avatar_url": "https://avatars3.githubusercontent.com/u/18169566?v=4", + "profile": "https://github.com/deining", + "contributions": [ + "doc" + ] + }, + { + "login": "elszon", + "name": "elszon", + "avatar_url": "https://avatars0.githubusercontent.com/u/2971495?v=4", + "profile": "https://github.com/elszon", + "contributions": [ + "code" + ] + }, + { + "login": "ncihnegn", + "name": "ncihnegn", + "avatar_url": "https://avatars3.githubusercontent.com/u/12021721?v=4", + "profile": "https://github.com/ncihnegn", + "contributions": [ + "code" + ] + }, + { + "login": "nurelin", + "name": "nurelin", + "avatar_url": "https://avatars3.githubusercontent.com/u/5276274?v=4", + "profile": "https://github.com/nurelin", + "contributions": [ + "code" + ] + }, + { + "login": "ryan4729", + "name": "ryan4729", + "avatar_url": "https://avatars3.githubusercontent.com/u/40183301?v=4", + "profile": "https://github.com/ryan4729", + "contributions": [ + "test" + ] + }, + { + "login": "slurps-mad-rips", + "name": "Isabella Muerte", + "avatar_url": "https://avatars0.githubusercontent.com/u/63051?v=4", + "profile": "https://izzys.casa", + "contributions": [ + "platform" + ] + }, + { + "login": "KOLANICH", + "name": "KOLANICH", + "avatar_url": "https://avatars1.githubusercontent.com/u/240344?v=4", + "profile": "https://github.com/KOLANICH", + "contributions": [ + "platform" + ] + }, + { + "login": "jgerityneurala", + "name": "James Gerity", + "avatar_url": "https://avatars2.githubusercontent.com/u/57360646?v=4", + "profile": "https://github.com/jgerityneurala", + "contributions": [ + "doc" + ] + }, + { + "login": "jsoref", + "name": "Josh Soref", + "avatar_url": "https://avatars0.githubusercontent.com/u/2119212?v=4", + "profile": "https://github.com/jsoref", + "contributions": [ + "tool" + ] + }, + { + "login": "geir-t", + "name": "geir-t", + "avatar_url": "https://avatars3.githubusercontent.com/u/35292136?v=4", + "profile": "https://github.com/geir-t", + "contributions": [ + "platform" + ] + }, + { + "login": "certik", + "name": "Ondřej Čertík", + "avatar_url": "https://avatars3.githubusercontent.com/u/20568?v=4", + "profile": "https://ondrejcertik.com/", + "contributions": [ + "bug" + ] + }, + { + "login": "samhocevar", + "name": "Sam Hocevar", + "avatar_url": "https://avatars2.githubusercontent.com/u/245089?v=4", + "profile": "http://sam.hocevar.net/", + "contributions": [ + "code" + ] + }, + { + "login": "rcurtin", + "name": "Ryan Curtin", + "avatar_url": "https://avatars0.githubusercontent.com/u/1845039?v=4", + "profile": "http://www.ratml.org/", + "contributions": [ + "doc" + ] + }, + { + "login": "mbhall88", + "name": "Michael Hall", + "avatar_url": "https://avatars3.githubusercontent.com/u/20403931?v=4", + "profile": "https://mbh.sh", + "contributions": [ + "doc" + ] + }, + { + "login": "ferdymercury", + "name": "ferdymercury", + "avatar_url": "https://avatars3.githubusercontent.com/u/10653970?v=4", + "profile": "https://github.com/ferdymercury", + "contributions": [ + "doc" + ] + }, + { + "login": "jakoblover", + "name": "Jakob Lover", + "avatar_url": "https://avatars0.githubusercontent.com/u/14160441?v=4", + "profile": "https://github.com/jakoblover", + "contributions": [ + "code" + ] + }, + { + "login": "ZeeD26", + "name": "Dominik Steinberger", + "avatar_url": "https://avatars2.githubusercontent.com/u/2487468?v=4", + "profile": "https://github.com/ZeeD26", + "contributions": [ + "code" + ] + }, + { + "login": "dfleury2", + "name": "D. Fleury", + "avatar_url": "https://avatars1.githubusercontent.com/u/4805384?v=4", + "profile": "https://github.com/dfleury2", + "contributions": [ + "code" + ] + }, + { + "login": "dbarowy", + "name": "Dan Barowy", + "avatar_url": "https://avatars3.githubusercontent.com/u/573142?v=4", + "profile": "https://github.com/dbarowy", + "contributions": [ + "doc" + ] + } + ], + "contributorsPerLine": 7, + "skipCi": true +} diff --git a/thirdparty/CLI11/.appveyor.yml b/thirdparty/CLI11/.appveyor.yml new file mode 100644 index 0000000..a4ae118 --- /dev/null +++ b/thirdparty/CLI11/.appveyor.yml @@ -0,0 +1,36 @@ +version: 1.9.1.{build} + +branches: + only: + - master + - v1 + +install: + - git submodule update --init --recursive + - py -3 --version + - set PATH=C:\Python38-x64;C:\Python38-x64\Scripts;%PATH% + - cmake --version + - python --version + - python -m pip --version + - python -m pip install conan + - conan user + - conan --version + +build_script: + - mkdir build + - cd build + - ps: cmake .. -DCLI11_WARNINGS_AS_ERRORS=ON -DCLI11_SINGLE_FILE_TESTS=ON -DCMAKE_BUILD_TYPE=Debug -DCMAKE_GENERATOR="Visual Studio 14 2015" + - ps: cmake --build . + - cd .. + - conan create . CLIUtils/CLI11 + +test_script: + - cd build + - ps: ctest --output-on-failure -C Debug + +notifications: + - provider: Webhook + url: https://webhooks.gitter.im/e/0185e91c5d989a476d7b + on_build_success: false + on_build_failure: true + on_build_status_changed: true diff --git a/thirdparty/CLI11/.ci/azure-build.yml b/thirdparty/CLI11/.ci/azure-build.yml new file mode 100644 index 0000000..06d60ce --- /dev/null +++ b/thirdparty/CLI11/.ci/azure-build.yml @@ -0,0 +1,11 @@ +steps: + +- task: CMake@1 + inputs: + cmakeArgs: .. -DCLI11_WARNINGS_AS_ERRORS=ON -DCLI11_SINGLE_FILE=$(cli11.single) -DCMAKE_CXX_STANDARD=$(cli11.std) -DCLI11_SINGLE_FILE_TESTS=$(cli11.single) -DCMAKE_BUILD_TYPE=$(cli11.build_type) $(cli11.options) + displayName: 'Configure' + +- script: cmake --build . + displayName: 'Build' + workingDirectory: build + diff --git a/thirdparty/CLI11/.ci/azure-cmake.yml b/thirdparty/CLI11/.ci/azure-cmake.yml new file mode 100644 index 0000000..59b6ceb --- /dev/null +++ b/thirdparty/CLI11/.ci/azure-cmake.yml @@ -0,0 +1,16 @@ +steps: + +# Note that silkeh/clang does not include ca-certificates, so check the shasum for verification +- bash: | + wget --no-check-certificate "https://cmake.org/files/v3.14/cmake-3.14.3-Linux-x86_64.tar.gz" + echo "29faa62fb3a0b6323caa3d9557e1a5f1205614c0d4c5c2a9917f16a74f7eff68 cmake-3.14.3-Linux-x86_64.tar.gz" | shasum -sca 256 + displayName: Download CMake + +- task: ExtractFiles@1 + inputs: + archiveFilePatterns: 'cmake*.tar.gz' + destinationFolder: 'cmake_program' + displayName: Extract CMake + +- bash: echo "##vso[task.prependpath]$(Build.SourcesDirectory)/cmake_program/cmake-3.14.3-Linux-x86_64/bin" + displayName: Add CMake to PATH diff --git a/thirdparty/CLI11/.ci/azure-test.yml b/thirdparty/CLI11/.ci/azure-test.yml new file mode 100644 index 0000000..ec1d1f5 --- /dev/null +++ b/thirdparty/CLI11/.ci/azure-test.yml @@ -0,0 +1,12 @@ +steps: + +- script: ctest --output-on-failure -C $(cli11.build_type) -T test + displayName: 'Test' + workingDirectory: build + +- task: PublishTestResults@2 + inputs: + testResultsFormat: 'cTest' + testResultsFiles: '**/Test.xml' + + diff --git a/thirdparty/CLI11/.ci/make_and_test.sh b/thirdparty/CLI11/.ci/make_and_test.sh new file mode 100755 index 0000000..af8de34 --- /dev/null +++ b/thirdparty/CLI11/.ci/make_and_test.sh @@ -0,0 +1,23 @@ +#!/usr/bin/env bash +echo -en "travis_fold:start:script.build\\r" +echo "Building..." +STD=$1 +shift +set -evx + + +mkdir -p build +cd build +cmake .. -DCLI11_WARNINGS_AS_ERRORS=ON -DCLI11_SINGLE_FILE=ON -DCMAKE_CXX_STANDARD=$STD -DCLI11_SINGLE_FILE_TESTS=ON -DCMAKE_BUILD_TYPE=Debug -DCMAKE_CXX_COMPILER_LAUNCHER=ccache $@ +cmake --build . -- -j2 + +set +evx +echo -en "travis_fold:end:script.build\\r" +echo -en "travis_fold:start:script.test\\r" +echo "Testing..." +set -evx + +ctest --output-on-failure + +set +evx +echo -en "travis_fold:end:script.test\\r" diff --git a/thirdparty/CLI11/.ci/run_codecov.sh b/thirdparty/CLI11/.ci/run_codecov.sh new file mode 100755 index 0000000..28d149a --- /dev/null +++ b/thirdparty/CLI11/.ci/run_codecov.sh @@ -0,0 +1,27 @@ +#!/usr/bin/env bash + +echo -en "travis_fold:start:script.build\\r" +echo "Building..." +set -evx + +cd ${TRAVIS_BUILD_DIR} +mkdir -p build +cd build +cmake .. -DCLI11_SINGLE_FILE_TESTS=OFF -DCLI11_EXAMPLES=OFF -DCMAKE_BUILD_TYPE=Coverage +cmake --build . -- -j2 +cmake --build . --target CLI11_coverage + +set +evx +echo -en "travis_fold:end:script.build\\r" +echo -en "travis_fold:start:script.lcov\\r" +echo "Capturing and uploading LCov..." +set -evx + +lcov --directory . --capture --output-file coverage.info # capture coverage info +lcov --remove coverage.info '*/tests/*' '*/examples/*' '*gtest*' '*gmock*' '/usr/*' --output-file coverage.info # filter out system +lcov --list coverage.info #debug info +# Uploading report to CodeCov +bash <(curl -s https://codecov.io/bash) || echo "Codecov did not collect coverage reports" + +set +evx +echo -en "travis_fold:end:script.lcov\\r" diff --git a/thirdparty/CLI11/.clang-format b/thirdparty/CLI11/.clang-format new file mode 100644 index 0000000..0879ffa --- /dev/null +++ b/thirdparty/CLI11/.clang-format @@ -0,0 +1,86 @@ +Language: Cpp +BasedOnStyle: LLVM +# AccessModifierOffset: -2 +# AlignAfterOpenBracket: Align +# AlignConsecutiveAssignments: false +# AlignConsecutiveDeclarations: false +# AlignEscapedNewlinesLeft: false +# AlignOperands: true +# AlignTrailingComments: true +# AllowAllParametersOfDeclarationOnNextLine: true +# AllowShortBlocksOnASingleLine: false +# AllowShortCaseLabelsOnASingleLine: false +# AllowShortFunctionsOnASingleLine: All +# AllowShortIfStatementsOnASingleLine: false +# AllowShortLoopsOnASingleLine: false +# AlwaysBreakAfterDefinitionReturnType: None +# AlwaysBreakAfterReturnType: None +# AlwaysBreakBeforeMultilineStrings: false +# AlwaysBreakTemplateDeclarations: false +BinPackArguments: false +BinPackParameters: false +# BraceWrapping: +# AfterClass: false +# AfterControlStatement: false +# AfterEnum: false +# AfterFunction: false +# AfterNamespace: false +# AfterObjCDeclaration: false +# AfterStruct: false +# AfterUnion: false +# BeforeCatch: false +# BeforeElse: false +# IndentBraces: false +# BreakBeforeBinaryOperators: None +# BreakBeforeBraces: Attach +# BreakBeforeTernaryOperators: true +# BreakConstructorInitializersBeforeComma: false +# BreakAfterJavaFieldAnnotations: false +# BreakStringLiterals: true +ColumnLimit: 120 +# CommentPragmas: '^ IWYU pragma:' +# ConstructorInitializerAllOnOneLineOrOnePerLine: false +# ConstructorInitializerIndentWidth: 4 +# ContinuationIndentWidth: 4 +# Cpp11BracedListStyle: true +# DerivePointerAlignment: false +# DisableFormat: false +# ExperimentalAutoDetectBinPacking: false +# ForEachMacros: [ foreach, Q_FOREACH, BOOST_FOREACH ] +# IncludeIsMainRegex: '$' +# IndentCaseLabels: false +IndentWidth: 4 +# IndentWrappedFunctionNames: false +# JavaScriptQuotes: Leave +# JavaScriptWrapImports: true +# KeepEmptyLinesAtTheStartOfBlocks: true +# MacroBlockBegin: '' +# MacroBlockEnd: '' +# MaxEmptyLinesToKeep: 1 +# NamespaceIndentation: None +# ObjCBlockIndentWidth: 2 +# ObjCSpaceAfterProperty: false +# ObjCSpaceBeforeProtocolList: true +# PenaltyBreakBeforeFirstCallParameter: 19 +# PenaltyBreakComment: 300 +# PenaltyBreakFirstLessLess: 120 +# PenaltyBreakString: 1000 +# PenaltyExcessCharacter: 1000000 +# PenaltyReturnTypeOnItsOwnLine: 60 +# PointerAlignment: Right +# ReflowComments: true +SortIncludes: true +# SpaceAfterCStyleCast: false +# SpaceAfterTemplateKeyword: true +# SpaceBeforeAssignmentOperators: true +SpaceBeforeParens: Never +# SpaceInEmptyParentheses: false +SpacesBeforeTrailingComments: 2 +# SpacesInAngles: false +# SpacesInContainerLiterals: true +# SpacesInCStyleCastParentheses: false +# SpacesInParentheses: false +# SpacesInSquareBrackets: false +Standard: Cpp11 +TabWidth: 4 +UseTab: Never diff --git a/thirdparty/CLI11/.clang-tidy b/thirdparty/CLI11/.clang-tidy new file mode 100644 index 0000000..09875c8 --- /dev/null +++ b/thirdparty/CLI11/.clang-tidy @@ -0,0 +1,25 @@ +# Checks that will be implemented in future PRs: +# performance-unnecessary-value-param, hints to ~110 issues. Be careful with implementing the suggested changes of this one, as auto-fixes may break the code + +FormatStyle: file + +Checks: ' +-*, +google-*, +-google-runtime-references, +llvm-include-order, +llvm-namespace-comment, +misc-throw-by-value-catch-by-reference, +modernize*, +-modernize-use-trailing-return-type, +readability-container-size-empty, +' + +WarningsAsErrors: '*' + +HeaderFilterRegex: '.*hpp' + +CheckOptions: +- key: google-readability-braces-around-statements.ShortStatementLines + value: '3' + diff --git a/thirdparty/CLI11/.codecov.yml b/thirdparty/CLI11/.codecov.yml new file mode 100644 index 0000000..4181c54 --- /dev/null +++ b/thirdparty/CLI11/.codecov.yml @@ -0,0 +1,4 @@ + +ignore: + - "tests" + - "examples" diff --git a/thirdparty/CLI11/.editorconfig b/thirdparty/CLI11/.editorconfig new file mode 100644 index 0000000..979b049 --- /dev/null +++ b/thirdparty/CLI11/.editorconfig @@ -0,0 +1,11 @@ +root = true + +[*] +indent_style = space +indent_size = 4 +insert_final_newline = true +end_of_line = lf +trim_trailing_whitespace = true + +[*.yml] +indent_size = 2 diff --git a/thirdparty/CLI11/.github/CONTRIBUTING.md b/thirdparty/CLI11/.github/CONTRIBUTING.md new file mode 100644 index 0000000..fcee45d --- /dev/null +++ b/thirdparty/CLI11/.github/CONTRIBUTING.md @@ -0,0 +1,80 @@ +Thanks for considering to write a Pull Request (PR) for CLI11! Here are a few guidelines to get you started: + +Make sure you are comfortable with the license; all contributions are licensed under the original license. + +## Adding functionality +Make sure any new functions you add are are: + +* Documented by `///` documentation for Doxygen +* Mentioned in the instructions in the README, though brief mentions are okay +* Explained in your PR (or previously explained in an Issue mentioned in the PR) +* Completely covered by tests + +In general, make sure the addition is well thought out and does not increase the complexity of CLI11 needlessly. + +## Things you should know: + +* Once you make the PR, tests will run to make sure your code works on all supported platforms +* The test coverage is also measured, and that should remain 100% +* Formatting should be done with pre-commit, otherwise the format check will not pass. However, it is trivial to apply this to your PR, so don't worry about this check. If you do want to run it, see below. +* Everything must pass clang-tidy as well, run with `-DCLI11_CLANG_TIDY=ON` (if you set `-DCLI11_CLANG_TIDY_OPTIONS="-fix"`, make sure you use a single threaded build process, or just build one example target). +* Your changes must also conform to most of the [Google C++ Style Guide](https://google.github.io/styleguide/cppguide.html) rules checked by [cpplint](https://github.com/cpplint/cpplint). For unused cpplint filters and justifications, see [CPPLINT.cfg](/CPPLINT.cfg). + + +## Pre-commit + +Format is handled by pre-commit. You should install it: + +```bash +python3 -m pip install pre-commit +``` + +Then, you can run it on the items you've added to your staging area, or all files: + +``` +pre-commit run +# OR +pre-commit run --all-files +``` + + +And, if you want to always use it, you can install it as a git hook (hence the name, pre-commit): + +```bash +pre-commit install +``` + +## For developers releasing to Conan.io + +This is now done by the CI system on tagged releases. Previously, the steps to make a Conan.io release were: + +```bash +conan remove '*' # optional, I like to be clean +conan create . cliutils/stable +conan upload "*" -r cli11 --all +``` + +Here I've assumed that the remote is `cli11`. + +## For maintainers: remember to add contributions + +In a commit to a PR, just add "`@all-contributors please add for `" or similar (see ). Use `code` for code, `bug` if an issue was submitted, `platform` for packaging stuff, and `doc` for documentation updates. + +To run locally, do: + +```bash +yarn add --dev all-contributors-cli +yarn all-contributors add username code,bug +``` + +## For maintainers: Making a release + +Remember to replace the emoji in the readme, being careful not to replace the ones in all-contributors if any overlap. + +Steps: +* Update changelog if needed +* Update the version in `.appveyor.yml` and `include/CLI/Version.hpp`. +* Find and replace in README: + * Replace " 🆕" and "🆕 " with "" (ignores the description line) + * Check for `\/\/$` (vi syntax) to catch leftover `// 🆕` + * Replace "🚧" with "🆕" (manually ignore the description line) diff --git a/thirdparty/CLI11/.github/actions/quick_cmake/action.yml b/thirdparty/CLI11/.github/actions/quick_cmake/action.yml new file mode 100644 index 0000000..da721a7 --- /dev/null +++ b/thirdparty/CLI11/.github/actions/quick_cmake/action.yml @@ -0,0 +1,18 @@ +name: Quick CMake config +description: 'Runs CMake 3.4+ (if already setup)' +inputs: + args: + description: 'Other arguments' + required: false + default: '' + +runs: + using: composite + steps: + - run: | + mkdir -p build-tmp + touch build-tmp/tmp + rm -r build-tmp/* + (cd build-tmp && cmake .. ${{ inputs.args}}) + rm -r build-tmp + shell: bash diff --git a/thirdparty/CLI11/.github/dependabot.yml b/thirdparty/CLI11/.github/dependabot.yml new file mode 100644 index 0000000..7327336 --- /dev/null +++ b/thirdparty/CLI11/.github/dependabot.yml @@ -0,0 +1,16 @@ +version: 2 +updates: + # Maintain dependencies for GitHub Actions + - package-ecosystem: "github-actions" + directory: "/" + schedule: + interval: "daily" + ignore: + # Official actions have moving tags like v1 + # that are used, so they don't need updates here + - dependency-name: "actions/checkout" + - dependency-name: "actions/setup-python" + - dependency-name: "actions/cache" + - dependency-name: "actions/upload-artifact" + - dependency-name: "actions/download-artifact" + - dependency-name: "actions/labeler" diff --git a/thirdparty/CLI11/.github/labeler_merged.yml b/thirdparty/CLI11/.github/labeler_merged.yml new file mode 100644 index 0000000..434ab58 --- /dev/null +++ b/thirdparty/CLI11/.github/labeler_merged.yml @@ -0,0 +1,4 @@ +needs changelog: + - all: ['!CHANGELOG.md'] +needs README: + - all: ['!README.md'] diff --git a/thirdparty/CLI11/.github/workflows/pr_merged.yml b/thirdparty/CLI11/.github/workflows/pr_merged.yml new file mode 100644 index 0000000..6fadc0f --- /dev/null +++ b/thirdparty/CLI11/.github/workflows/pr_merged.yml @@ -0,0 +1,15 @@ +name: PR merged +on: + pull_request_target: + types: [closed] + +jobs: + label-merged: + name: Changelog needed + runs-on: ubuntu-latest + if: github.event.pull_request.merged == true + steps: + - uses: actions/labeler@main + with: + repo-token: ${{ secrets.GITHUB_TOKEN }} + configuration-path: .github/labeler_merged.yml diff --git a/thirdparty/CLI11/.github/workflows/tests.yml b/thirdparty/CLI11/.github/workflows/tests.yml new file mode 100644 index 0000000..60f10f9 --- /dev/null +++ b/thirdparty/CLI11/.github/workflows/tests.yml @@ -0,0 +1,182 @@ +name: Tests +on: + push: + branches: + - master + - v* + pull_request: + branches: + - master + +jobs: + pre-commit: + name: Formatting + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - uses: actions/setup-python@v2 + - uses: pre-commit/action@v2.0.2 + + cuda-build: + name: CUDA build only + runs-on: ubuntu-latest + container: nvidia/cuda:10.2-devel-ubuntu18.04 + steps: + - uses: actions/checkout@v1 + with: + submodules: true + - name: Add wget + run: apt-get update && apt-get install -y wget + - name: Setup cmake + uses: jwlawson/actions-setup-cmake@v1.8 + - name: Configure + run: cmake -S . -B build -DCLI11_CUDA_TESTS=ON + - name: Build + run: cmake --build build -j2 + + cmake-config: + name: CMake config check + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + + - name: CMake 3.4 + uses: jwlawson/actions-setup-cmake@v1.8 + with: + cmake-version: "3.4" + - name: Check CMake 3.4 + uses: ./.github/actions/quick_cmake + + - name: CMake 3.5 + uses: jwlawson/actions-setup-cmake@v1.8 + with: + cmake-version: "3.5" + - name: Check CMake 3.5 + uses: ./.github/actions/quick_cmake + if: success() || failure() + + - name: CMake 3.6 + uses: jwlawson/actions-setup-cmake@v1.8 + with: + cmake-version: "3.6" + - name: Check CMake 3.6 + uses: ./.github/actions/quick_cmake + if: success() || failure() + + - name: CMake 3.7 + uses: jwlawson/actions-setup-cmake@v1.8 + with: + cmake-version: "3.7" + - name: Check CMake 3.7 + uses: ./.github/actions/quick_cmake + if: success() || failure() + + - name: CMake 3.8 + uses: jwlawson/actions-setup-cmake@v1.8 + with: + cmake-version: "3.8" + - name: Check CMake 3.8 + uses: ./.github/actions/quick_cmake + if: success() || failure() + + - name: CMake 3.9 + uses: jwlawson/actions-setup-cmake@v1.8 + with: + cmake-version: "3.9" + - name: Check CMake 3.9 + uses: ./.github/actions/quick_cmake + if: success() || failure() + + - name: CMake 3.10 + uses: jwlawson/actions-setup-cmake@v1.8 + with: + cmake-version: "3.10" + - name: Check CMake 3.10 + uses: ./.github/actions/quick_cmake + if: success() || failure() + + - name: CMake 3.11 + uses: jwlawson/actions-setup-cmake@v1.8 + with: + cmake-version: "3.11" + - name: Check CMake 3.11 (full) + uses: ./.github/actions/quick_cmake + with: + args: -DCLI11_SANITIZERS=ON -DCLI11_BUILD_EXAMPLES_JSON=ON + if: success() || failure() + + - name: CMake 3.12 + uses: jwlawson/actions-setup-cmake@v1.8 + with: + cmake-version: "3.12" + - name: Check CMake 3.12 + uses: ./.github/actions/quick_cmake + if: success() || failure() + + - name: CMake 3.13 + uses: jwlawson/actions-setup-cmake@v1.8 + with: + cmake-version: "3.13" + - name: Check CMake 3.13 + uses: ./.github/actions/quick_cmake + if: success() || failure() + + - name: CMake 3.14 + uses: jwlawson/actions-setup-cmake@v1.8 + with: + cmake-version: "3.14" + - name: Check CMake 3.14 + uses: ./.github/actions/quick_cmake + if: success() || failure() + + - name: CMake 3.15 + uses: jwlawson/actions-setup-cmake@v1.8 + with: + cmake-version: "3.15" + - name: Check CMake 3.15 + uses: ./.github/actions/quick_cmake + if: success() || failure() + + - name: CMake 3.16 + uses: jwlawson/actions-setup-cmake@v1.8 + with: + cmake-version: "3.16" + - name: Check CMake 3.16 + uses: ./.github/actions/quick_cmake + if: success() || failure() + + - name: CMake 3.17 + uses: jwlawson/actions-setup-cmake@v1.8 + with: + cmake-version: "3.17" + - name: Check CMake 3.17 + uses: ./.github/actions/quick_cmake + if: success() || failure() + + - name: CMake 3.18 + uses: jwlawson/actions-setup-cmake@v1.8 + with: + cmake-version: "3.18" + - name: Check CMake 3.18 + uses: ./.github/actions/quick_cmake + if: success() || failure() + + - name: CMake 3.19 + uses: jwlawson/actions-setup-cmake@v1.8 + with: + cmake-version: "3.19" + - name: Check CMake 3.19 (full) + uses: ./.github/actions/quick_cmake + with: + args: -DCLI11_SANITIZERS=ON -DCLI11_BUILD_EXAMPLES_JSON=ON + if: success() || failure() + + - name: CMake 3.20 + uses: jwlawson/actions-setup-cmake@v1.8 + with: + cmake-version: "3.20" + - name: Check CMake 3.20 + uses: ./.github/actions/quick_cmake + if: success() || failure() + + diff --git a/thirdparty/CLI11/.gitignore b/thirdparty/CLI11/.gitignore new file mode 100644 index 0000000..cc1b9d0 --- /dev/null +++ b/thirdparty/CLI11/.gitignore @@ -0,0 +1,15 @@ +a.out* +*.swp +/*build* +/test_package/build +/Makefile +/CMakeFiles/* +/cmake_install.cmake +/*.kdev4 +/html/* +!/meson.build + +/node_modules/* +/package.json +/yarn.lock +/CLI11.hpp diff --git a/thirdparty/CLI11/.pre-commit-config.yaml b/thirdparty/CLI11/.pre-commit-config.yaml new file mode 100644 index 0000000..f48801a --- /dev/null +++ b/thirdparty/CLI11/.pre-commit-config.yaml @@ -0,0 +1,29 @@ + +repos: +- repo: https://github.com/psf/black + rev: 20.8b1 + hooks: + - id: black + +- repo: https://github.com/pre-commit/pre-commit-hooks + rev: v3.4.0 + hooks: + - id: check-added-large-files + - id: mixed-line-ending + - id: trailing-whitespace + - id: check-merge-conflict + - id: check-case-conflict + - id: check-symlinks + - id: check-yaml + +- repo: local + hooks: + - id: docker-clang-format + name: Docker Clang Format + language: docker_image + types: + - c++ + entry: unibeautify/clang-format:latest + args: + - -style=file + - -i diff --git a/thirdparty/CLI11/.pre-commit-nodocker.yaml b/thirdparty/CLI11/.pre-commit-nodocker.yaml new file mode 100644 index 0000000..5fcaf3e --- /dev/null +++ b/thirdparty/CLI11/.pre-commit-nodocker.yaml @@ -0,0 +1,27 @@ + +repos: +- repo: https://github.com/psf/black + rev: 19.3b0 + hooks: + - id: black +- repo: https://github.com/pre-commit/pre-commit-hooks + rev: v2.3.0 + hooks: + - id: check-added-large-files + - id: mixed-line-ending + - id: trailing-whitespace + - id: check-merge-conflict + - id: check-case-conflict + - id: check-symlinks + - id: check-yaml +- repo: local + hooks: + - id: clang-format + name: Clang Format + language: system + types: + - c++ + entry: clang-format + args: + - -style=file + - -i diff --git a/thirdparty/CLI11/.travis.yml b/thirdparty/CLI11/.travis.yml new file mode 100644 index 0000000..d352aed --- /dev/null +++ b/thirdparty/CLI11/.travis.yml @@ -0,0 +1,122 @@ +language: cpp +dist: trusty + +# Exclude ghpages, +# but even better, don't build branch and PR, just PR +# Include tags starting with v and a digit +branches: + only: + - master + - /^v\d/ + +cache: + apt: true + directories: + - "${TRAVIS_BUILD_DIR}/deps/doxygen" + +matrix: + include: + # Default clang + - compiler: clang + script: + - .ci/make_and_test.sh 11 + - .ci/make_and_test.sh 14 + - .ci/make_and_test.sh 17 + + # Docs and clang 3.5 + - compiler: clang + language: node_js + node_js: "7.4.0" + env: + - DEPLOY_MAT=yes + addons: + apt: + packages: + - clang-3.5 + install: + - export CC=clang-3.5 + - export CXX=clang++-3.5 + - npm install gitbook-cli -g + - gitbook fetch 3.2.3 + - gitbook install book + script: + - .ci/make_and_test.sh 11 + after_success: + - export DEPS_DIR="${TRAVIS_BUILD_DIR}/deps" + - . .ci/build_doxygen.sh + - doxygen docs/Doxyfile + - gitbook build book html/book + + # GCC 7 and coverage (8 does not support lcov, wait till 9 and new lcov) + - compiler: gcc + dist: bionic + addons: + apt: + packages: + - curl + - lcov + install: + - DEPS_DIR="${TRAVIS_BUILD_DIR}/deps" + - cd $TRAVIS_BUILD_DIR + - ". .ci/build_lcov.sh" + - ".ci/run_codecov.sh" + script: + - .ci/make_and_test.sh 11 -DCLI11_EXAMPLE_JSON=ON + - .ci/make_and_test.sh 14 -DCLI11_EXAMPLE_JSON=ON + - .ci/make_and_test.sh 17 -DCLI11_EXAMPLE_JSON=ON + + # GCC 4.8 and Conan + - compiler: gcc + dist: bionic + addons: + apt: + packages: + - python3-pip + - python3-setuptools + install: + - python3 -VV + - python3 -m pip install --user conan + - conan user + script: + - .ci/make_and_test.sh 11 + after_success: + - conan create . cliutils/stable + - | + if [ "${TRAVIS_TAG}" ] + then + conan remote add origin https://api.bintray.com/conan/cliutils/CLI11 + conan user -p ${BINFROG_API_KEY} -r origin henryiii + conan upload "*" -c -r origin --all + fi + + +install: skip + +script: +- .ci/make_and_test.sh 11 +- .ci/make_and_test.sh 14 + + +deploy: +- provider: pages + skip_cleanup: true + github_token: ${GH_REPO_TOKEN} + keep_history: false + local_dir: ${TRAVIS_BUILD_DIR}/html + on: + branch: master + condition: "$DEPLOY_MAT = yes" + +notifications: + webhooks: + urls: + - https://webhooks.gitter.im/e/bbdb3befce4c00448d24 + on_success: change + on_failure: always + on_start: never + +env: + global: + - secure: cY0OI609iTAxLRYuYQnNMi+H6n0dBwioTAoFXGGRTnngw2V9om3UmY5eUu4HQEQsQZovHdYpNhlSgRmdwQ4UqSp3FGyrwobf0kzacV4bVnMDeXDmHt8RzE5wP/LwDd8elNF6RRYjElY99f0k0FyXVd0fIvuVkGKQECNLOtEk0jQo+4YTh7dhuCxRhBYgTbNiRL6UJynfrcK0YN+DQ+8CJNupu2VxgaEpCSngTfvDHLcddcrXwpvn3MPc3FsDUbtN389ZCIe41qqIL0ATv46DQaTw4FOevyVfRyrBOznONoGCVeAYKL6VBdrk01Fh6aytF5zgI3hKaKobgEn+QFfzR6l68c6APvqA0Qv39iLjuh6KbdIV2YsqXfyt6FBgqP2xZuNEZW1jZ8LxUOLl2I40UEh87nFutvnSbfIzN+FcLrajm2H2jV2kZGNKAMx+4qxkZuXSre4JPkENfJm2WNFAKlqPt4ZSEQarkDYzZPcEr2I9fbGjQYVJICoN4LikCv9K5z7ujpTxCTNbVpQWZcEOT6QQBc6Vml/N/NKAIl9o2OeTLiXCmT31+KQMeO492KYNQ6VmkeqrVhGExOUcJdNyDJV9C+3mSekb3Sq78SneYRKDechkWbMl0ol07wGTdBwQQwgaorjRyn07x1rDxpPr3z19/+eubnpPUW4UQ5MYsjs= + - secure: G6H5HA9pPUgsd96A+uvTxbLjR1rcT9NtxsknIkFDfzGDpffn6wVX+kCIQLf9zFDnQnsfYA/4piiuoBN5U5C7HQrh9UCvBVptXjWviea0Y7CRbMJZpw2rPvXWQtrFNzYkaV7kdJ5B0Mmvh6rcH/I8gKFrkdjF7i7sfzWdFWRU5QXfxXOk2n+xCXX6uFemxHH9850XEjVtnU7YYUebQFaoTYLLy05nlt9JaEF84wfJljY/SJX7I9gpNLtizE9MpJylnrwUeL66OqFievmjL3/bWpPUBjUF0WdtXYlVDja7O582FQDs94ofgqeGieGIMQ0VuovpbQOJSdjs5XHZwu2ce6HZxtOhJJqw6xEwbq43ZdofAlJ5GUEOgrr+j25zIDkdzOhliDKJtw5ysmmTUKEcZ36iWbCE0YP/IC42yOV9oOP6UkgbuwpVDdxAFRgLZLahW9Ok+c1PlzIauPxv+jIEI4rSEEJRKZG2JK3TXUdhd58mHBfQMNjKQMF+Y2wCCGjfMO0q4SgvBhYyb4oBTxEqnc2Pzh2DJdNzRFsV7ktsQSRglHGVI+1XTmQ+2kbBzNOQBLjOuRvDZENUhyxPKGZDHyAOMlVvYm8vvWebM1/F3YgDb/tPh33+EGSvpKkCZ5nUxB5e605H6gdYlNKNhuWKlEKTo2/kF0D39gAUCIcGbzw= + - CCACHE_CPP2: yes diff --git a/thirdparty/CLI11/CHANGELOG.md b/thirdparty/CLI11/CHANGELOG.md new file mode 100644 index 0000000..d2a59b4 --- /dev/null +++ b/thirdparty/CLI11/CHANGELOG.md @@ -0,0 +1,637 @@ +## Version 2.0: In progress + +* Built-in config format is TOML compliant now [#435][] + * Support multiline TOML [#528][] +* Support short/positional options in config mode [#443][] +* More powerful containers, `%%` separator [#423][] +* Add a version flag easily [#452][] +* Support atomic types [#520][] +* Add a type validator `CLI::TypeValidator` [#526][] +* Support `->silent()` on subcommands. [#529][] +* Add alias section to help for subcommands [#545][] +* Redesigned MakeSingleFiles to have a higher level of manual control, to support future features. [#546][] +* Moved testing from GTest to Catch2 [#574][] + +* Bugfix: avoid listing helpall as a required flag [#530][] +* Bugfix: avoid a clash with WINDOWS define [#563][] + +* Removed deprecated set commands, use validators instead. [#565][] + +* Build: support pkg-config [#523][] + + +[#435]: https://github.com/CLIUtils/CLI11/pull/435 +[#443]: https://github.com/CLIUtils/CLI11/pull/443 +[#423]: https://github.com/CLIUtils/CLI11/pull/423 +[#452]: https://github.com/CLIUtils/CLI11/pull/452 +[#520]: https://github.com/CLIUtils/CLI11/pull/520 +[#523]: https://github.com/CLIUtils/CLI11/pull/523 +[#526]: https://github.com/CLIUtils/CLI11/pull/526 +[#528]: https://github.com/CLIUtils/CLI11/pull/528 +[#529]: https://github.com/CLIUtils/CLI11/pull/529 +[#530]: https://github.com/CLIUtils/CLI11/pull/530 +[#545]: https://github.com/CLIUtils/CLI11/pull/545 +[#546]: https://github.com/CLIUtils/CLI11/pull/546 +[#563]: https://github.com/CLIUtils/CLI11/pull/563 +[#565]: https://github.com/CLIUtils/CLI11/pull/565 +[#574]: https://github.com/CLIUtils/CLI11/pull/574 + + + + +### Version 1.9.1: Backporting fixes + +This is a patch version that backports fixes from the development of 2.0. + +* Support relative inclusion [#475][] +* Fix cases where spaces in paths could break CMake support [#471][] +* Fix an issue with string conversion [#421][] +* Cross-compiling improvement for Conan.io [#430][] +* Fix option group default propagation [#450][] +* Fix for C++20 [#459][] +* Support compiling with RTTI off [#461][] + +[#421]: https://github.com/CLIUtils/CLI11/pull/421 +[#430]: https://github.com/CLIUtils/CLI11/pull/430 +[#450]: https://github.com/CLIUtils/CLI11/pull/450 +[#459]: https://github.com/CLIUtils/CLI11/pull/459 +[#461]: https://github.com/CLIUtils/CLI11/pull/461 +[#471]: https://github.com/CLIUtils/CLI11/pull/471 +[#475]: https://github.com/CLIUtils/CLI11/pull/475 + + +## Version 1.9: Config files and cleanup + +Config file handling was revamped to fix common issues, and now supports reading [TOML](https://github.com/toml-lang/toml). + +Adding options is significantly more powerful with support for things like +`std::tuple` and `std::array`, including with transforms. Several new +configuration options were added to facilitate a wider variety of apps. GCC +4.7 is no longer supported. + +* Config files refactored, supports TOML (may become default output in 2.0) [#362][] +* Added two template parameter form of `add_option`, allowing `std::optional` to be supported without a special import [#285][] +* `string_view` now supported in reasonable places [#300][], [#285][] +* `immediate_callback`, `final_callback`, and `parse_complete_callback` added to support controlling the App callback order [#292][], [#313][] +* Multiple positional arguments maintain order if `positionals_at_end` is set. [#306][] +* Pair/tuple/array now supported, and validators indexed to specific components in the objects [#307][], [#310][] +* Footer callbacks supported [#309][] +* Subcommands now support needs (including nameless subcommands) [#317][] +* More flexible type size, more useful `add_complex` [#325][], [#370][] +* Added new validators `CLI::NonNegativeNumber` and `CLI::PositiveNumber` [#342][] +* Transform now supports arrays [#349][] +* Option groups can be hidden [#356][] +* Add `CLI::deprecate_option` and `CLI::retire_option` functions [#358][] +* More flexible and safer Option `default_val` [#387][] +* Backend: Cleaner type traits [#286][] +* Backend: File checking updates [#341][] +* Backend: Using pre-commit to format, checked in GitHub Actions [#336][] +* Backend: Clang-tidy checked again, CMake option now `CL11_CLANG_TIDY` [#390][] +* Backend: Warning cleanup, more checks from klocwork [#350][], Effective C++ [#354][], clang-tidy [#360][], CUDA NVCC [#365][], cross compile [#373][], sign conversion [#382][], and cpplint [#400][] +* Docs: CLI11 Tutorial now hosted in the same repository [#304][], [#318][], [#374][] +* Bugfix: Fixed undefined behavior in `checked_multiply` [#290][] +* Bugfix: `->check()` was adding the name to the wrong validator [#320][] +* Bugfix: Resetting config option works properly [#301][] +* Bugfix: Hidden flags were showing up in error printout [#333][] +* Bugfix: Enum conversion no longer broken if stream operator added [#348][] +* Build: The meson build system supported [#299][] +* Build: GCC 4.7 is no longer supported, due mostly to GoogleTest. GCC 4.8+ is now required. [#160][] +* Build: Restructured significant portions of CMake build system [#394][] + +> ### Converting from CLI11 1.8: +> +> * Some deprecated methods dropped +> - `add_set*` should be replaced with `->check`/`->transform` and `CLI::IsMember` since 1.8 +> - `get_defaultval` was replaced by `get_default_str` in 1.8 +> * The true/false 4th argument to `add_option` is expected to be removed in 2.0, use `->capture_default_str()` since 1.8 + +[#160]: https://github.com/CLIUtils/CLI11/pull/160 +[#285]: https://github.com/CLIUtils/CLI11/pull/285 +[#286]: https://github.com/CLIUtils/CLI11/pull/286 +[#290]: https://github.com/CLIUtils/CLI11/pull/290 +[#292]: https://github.com/CLIUtils/CLI11/pull/292 +[#299]: https://github.com/CLIUtils/CLI11/pull/299 +[#300]: https://github.com/CLIUtils/CLI11/pull/300 +[#301]: https://github.com/CLIUtils/CLI11/pull/301 +[#304]: https://github.com/CLIUtils/CLI11/pull/304 +[#306]: https://github.com/CLIUtils/CLI11/pull/306 +[#307]: https://github.com/CLIUtils/CLI11/pull/307 +[#309]: https://github.com/CLIUtils/CLI11/pull/309 +[#310]: https://github.com/CLIUtils/CLI11/pull/310 +[#312]: https://github.com/CLIUtils/CLI11/pull/312 +[#313]: https://github.com/CLIUtils/CLI11/pull/313 +[#317]: https://github.com/CLIUtils/CLI11/pull/317 +[#318]: https://github.com/CLIUtils/CLI11/pull/318 +[#320]: https://github.com/CLIUtils/CLI11/pull/320 +[#325]: https://github.com/CLIUtils/CLI11/pull/325 +[#333]: https://github.com/CLIUtils/CLI11/pull/333 +[#336]: https://github.com/CLIUtils/CLI11/pull/336 +[#342]: https://github.com/CLIUtils/CLI11/pull/342 +[#348]: https://github.com/CLIUtils/CLI11/pull/348 +[#349]: https://github.com/CLIUtils/CLI11/pull/349 +[#350]: https://github.com/CLIUtils/CLI11/pull/350 +[#354]: https://github.com/CLIUtils/CLI11/pull/354 +[#356]: https://github.com/CLIUtils/CLI11/pull/356 +[#358]: https://github.com/CLIUtils/CLI11/pull/358 +[#360]: https://github.com/CLIUtils/CLI11/pull/360 +[#362]: https://github.com/CLIUtils/CLI11/pull/362 +[#365]: https://github.com/CLIUtils/CLI11/pull/365 +[#373]: https://github.com/CLIUtils/CLI11/pull/373 +[#374]: https://github.com/CLIUtils/CLI11/pull/374 +[#382]: https://github.com/CLIUtils/CLI11/pull/382 +[#390]: https://github.com/CLIUtils/CLI11/pull/390 +[#394]: https://github.com/CLIUtils/CLI11/pull/394 +[#400]: https://github.com/CLIUtils/CLI11/pull/400 + + +## Version 1.8: Transformers, default strings, and flags + +Set handling has been completely replaced by a new backend that works as a Validator or Transformer. This provides a single interface instead of the 16 different functions in App. It also allows ordered collections to be used, custom functions for filtering, and better help and error messages. You can also use a collection of pairs (like `std::map`) to transform the match into an output. Also new are inverted flags, which can cancel or reduce the count of flags, and can also support general flag types. A new `add_option_fn` lets you more easily program CLI11 options with the types you choose. Vector options now support a custom separator. Apps can now be composed with unnamed subcommand support. The final bool "defaults" flag when creating options has been replaced by `->capture_default_str()` (ending an old limitation in construction made this possible); the old method is still available but may be removed in future versions. + +* Replaced default help capture: `.add_option("name", value, "", True)` becomes `.add_option("name", value)->capture_default_str()` [#242] +* Added `.always_capture_default()` [#242] +* New `CLI::IsMember` validator replaces set validation [#222] +* IsMember also supports container of pairs, transform allows modification of result [#228] +* Added new Transformers, `CLI::AsNumberWithUnit` and `CLI::AsSizeValue` [#253] +* Much more powerful flags with different values [#211], general types [#235] +* `add_option` now supports bool due to unified bool handling [#211] +* Support for composable unnamed subcommands [#216] +* Reparsing is better supported with `.remaining_for_passthrough()` [#265] +* Custom vector separator using `->delimiter(char)` [#209], [#221], [#240] +* Validators added for IP4 addresses and positive numbers [#210] and numbers [#262] +* Minimum required Boost for optional Optionals has been corrected to 1.61 [#226] +* Positionals can stop options from being parsed with `app.positionals_at_end()` [#223] +* Added `validate_positionals` [#262] +* Positional parsing is much more powerful [#251], duplicates supported []#247] +* Validators can be negated with `!` [#230], and now handle tname functions [#228] +* Better enum support and streaming helper [#233] and [#228] +* Cleanup for shadow warnings [#232] +* Better alignment on multiline descriptions [#269] +* Better support for aarch64 [#266] +* Respect `BUILD_TESTING` only if CLI11 is the main project; otherwise, `CLI11_TESTING` must be used [#277] +* Drop auto-detection of experimental optional and boost::optional; must be enabled explicitly (too fragile) [#277] [#279] + +> ### Converting from CLI11 1.7: +> +> * `.add_option(..., true)` should be replaced by `.add_option(...)->capture_default_str()` or `app.option_defaults()->always_capture_default()` can be used +> * `app.add_set("--name", value, {"choice1", "choice2"})` should become `app.add_option("--name", value)->check(CLI::IsMember({"choice1", "choice2"}))` +> * The `_ignore_case` version of this can be replaced by adding `CLI::ignore_case` to the argument list in `IsMember` +> * The `_ignore_underscore` version of this can be replaced by adding `CLI::ignore_underscore` to the argument list in `IsMember` +> * The `_ignore_case_underscore` version of this can be replaced by adding both functions listed above to the argument list in `IsMember` +> * If you want an exact match to the original choice after one of the modifier functions matches, use `->transform` instead of `->check` +> * The `_mutable` versions of this can be replaced by passing a pointer or shared pointer into `IsMember` +> * An error with sets now produces a `ValidationError` instead of a `ConversionError` + +[#209]: https://github.com/CLIUtils/CLI11/pull/209 +[#210]: https://github.com/CLIUtils/CLI11/pull/210 +[#211]: https://github.com/CLIUtils/CLI11/pull/211 +[#216]: https://github.com/CLIUtils/CLI11/pull/216 +[#221]: https://github.com/CLIUtils/CLI11/pull/221 +[#222]: https://github.com/CLIUtils/CLI11/pull/222 +[#223]: https://github.com/CLIUtils/CLI11/pull/223 +[#226]: https://github.com/CLIUtils/CLI11/pull/226 +[#228]: https://github.com/CLIUtils/CLI11/pull/228 +[#230]: https://github.com/CLIUtils/CLI11/pull/230 +[#232]: https://github.com/CLIUtils/CLI11/pull/232 +[#233]: https://github.com/CLIUtils/CLI11/pull/233 +[#235]: https://github.com/CLIUtils/CLI11/pull/235 +[#240]: https://github.com/CLIUtils/CLI11/pull/240 +[#242]: https://github.com/CLIUtils/CLI11/pull/242 +[#247]: https://github.com/CLIUtils/CLI11/pull/247 +[#251]: https://github.com/CLIUtils/CLI11/pull/251 +[#253]: https://github.com/CLIUtils/CLI11/pull/253 +[#262]: https://github.com/CLIUtils/CLI11/pull/262 +[#265]: https://github.com/CLIUtils/CLI11/pull/265 +[#266]: https://github.com/CLIUtils/CLI11/pull/266 +[#269]: https://github.com/CLIUtils/CLI11/pull/269 +[#277]: https://github.com/CLIUtils/CLI11/pull/277 +[#279]: https://github.com/CLIUtils/CLI11/pull/279 + + +## Version 1.7.1: Quick patch + +This version provides a quick patch for a (correct) warning from GCC 8 for the windows options code. + + +* Fix for Windows style option parsing [#201] +* Improve `add_subcommand` when throwing an exception [#204] +* Better metadata for Conan package [#202] + +[#201]: https://github.com/CLIUtils/CLI11/pull/201 +[#202]: https://github.com/CLIUtils/CLI11/pull/202 +[#204]: https://github.com/CLIUtils/CLI11/pull/204 + +## Version 1.7: Parse breakup + +The parsing procedure now maps much more sensibly to complex, nested subcommand structures. Each phase of the parsing happens on all subcommands before moving on with the next phase of the parse. This allows several features, like required environment variables, to work properly even through subcommand boundaries. +Passing the same subcommand multiple times is better supported. Several new features were added as well, including Windows style option support, parsing strings directly, and ignoring underscores in names. Adding a set that you plan to change later must now be done with `add_mutable_set`. + +* Support Windows style options with `->allow_windows_style_options`. [#187] On by default on Windows. [#190] +* Added `parse(string)` to split up and parse a command-line style string directly. [#186] +* Added `ignore_underscore` and related functions, to ignore underscores when matching names. [#185] +* The default INI Config will now add quotes to strings with spaces [#195] +* The default message now will mention the help-all flag also if present [#197] +* Added `->description` to set Option descriptions [#199] +* Mutating sets (introduced in Version 1.6) now have a clear add method, `add_mutable_set*`, since the set reference should not expire [#200] +* Subcommands now track how many times they were parsed in a parsing process. `count()` with no arguments will return the number of times a subcommand was encountered. [#179] +* Parsing is now done in phases: `shortcurcuits`, `ini`, `env`, `callbacks`, and `requirements`; all subcommands complete a phase before moving on. [#179] +* Calling parse multiple times is now officially supported without `clear` (automatic). [#179] +* Dropped the mostly undocumented `short_circuit` property, as help flag parsing is a bit more complex, and the default callback behavior of options now works properly. [#179] +* Use the standard `BUILD_TESTING` over `CLI11_TESTING` if defined [#183] +* Cleanup warnings [#191] +* Remove deprecated names: `set_footer`, `set_name`, `set_callback`, and `set_type_name`. Use without the `set_` instead. [#192] + +> ### Converting from CLI11 1.6: +> +> * `->short_circuit()` is no longer needed, just remove it if you were using it - raising an exception will happen in the proper place now without it. +> * `->add_set*` becomes `->add_mutable_set*` if you were using the editable set feature +> * `footer`, `name`, `callback`, and `type_name` must be used instead of the `set_*` versions (deprecated previously). + +[#179]: https://github.com/CLIUtils/CLI11/pull/179 +[#183]: https://github.com/CLIUtils/CLI11/pull/183 +[#185]: https://github.com/CLIUtils/CLI11/pull/185 +[#186]: https://github.com/CLIUtils/CLI11/pull/186 +[#187]: https://github.com/CLIUtils/CLI11/pull/187 +[#190]: https://github.com/CLIUtils/CLI11/pull/190 +[#191]: https://github.com/CLIUtils/CLI11/pull/191 +[#192]: https://github.com/CLIUtils/CLI11/pull/192 +[#197]: https://github.com/CLIUtils/CLI11/pull/197 +[#195]: https://github.com/CLIUtils/CLI11/issues/195 +[#199]: https://github.com/CLIUtils/CLI11/pull/199 +[#200]: https://github.com/CLIUtils/CLI11/pull/200 + +## Version 1.6.2: Help-all + +This version fixes some formatting bugs with help-all. It also adds fixes for several warnings, including an experimental optional error on Clang 7. Several smaller fixes. + +* Fixed help-all formatting [#163] + * Printing help-all on nested command now fixed (App) + * Missing space after help-all restored (Default formatter) + * More detail printed on help all (Default formatter) + * Help-all subcommands get indented with inner blank lines removed (Default formatter) + * `detail::find_and_replace` added to utilities +* Fixed CMake install as subproject with `CLI11_INSTALL` flag. [#156] +* Fixed warning about local variable hiding class member with MSVC [#157] +* Fixed compile error with default settings on Clang 7 and libc++ [#158] +* Fixed special case of `--help` on subcommands (general fix planned for 1.7) [#168] +* Removing an option with links [#179] + +[#156]: https://github.com/CLIUtils/CLI11/issues/156 +[#157]: https://github.com/CLIUtils/CLI11/issues/157 +[#158]: https://github.com/CLIUtils/CLI11/issues/158 +[#163]: https://github.com/CLIUtils/CLI11/pull/163 +[#168]: https://github.com/CLIUtils/CLI11/issues/168 +[#179]: https://github.com/CLIUtils/CLI11/pull/179 + + +## Version 1.6.1: Platform fixes + +This version provides a few fixes for special cases, such as mixing with `Windows.h` and better defaults +for systems like Hunter. The one new feature is the ability to produce "branded" single file output for +providing custom namespaces or custom macro names. + +* Added fix and test for including Windows.h [#145] +* No longer build single file by default if main project, supports systems stuck on Python 2.6 [#149], [#151] +* Branding support for single file output [#150] + +[#145]: https://github.com/CLIUtils/CLI11/pull/145 +[#149]: https://github.com/CLIUtils/CLI11/pull/149 +[#150]: https://github.com/CLIUtils/CLI11/pull/150 +[#151]: https://github.com/CLIUtils/CLI11/pull/151 + +## Version 1.6: Formatting help + +Added a new formatting system [#109]. You can now set the formatter on Apps. This has also simplified the internals of Apps and Options a bit by separating most formatting code. + +* Added `CLI::Formatter` and `formatter` slot for apps, inherited. +* `FormatterBase` is the minimum required. +* `FormatterLambda` provides for the easy addition of an arbitrary function. +* Added `help_all` support (not added by default). + +Changes to the help system (most normal users will not notice this): + +* Renamed `single_name` to `get_name(false, false)` (the default). +* The old `get_name()` is now `get_name(false, true)`. +* The old `get_pname()` is now `get_name(true, false)`. +* Removed `help_*` functions. +* Protected function `_has_help_positional` removed. +* `format_help` can now be chained. +* Added getters for the missing parts of options (help no longer uses any private parts). +* Help flags now use new `short_circuit` property to simplify parsing. [#121] + + +New for Config file reading and writing [#121]: + +* Overridable, bidirectional Config. +* ConfigINI provided and used by default. +* Renamed ini to config in many places. +* Has `config_formatter()` and `get_config_formatter()`. +* Dropped prefix argument from `config_to_str`. +* Added `ConfigItem`. +* Added an example of a custom config format using [nlohmann/json]. [#138] + + +Validators are now much more powerful [#118], all built in validators upgraded to the new form: + +* A subclass of `CLI::Validator` is now also accepted. +* They now can set the type name to things like `PATH` and `INT in [1-4]`. +* Validators can be combined with `&` and `|`. +* Old form simple validators are still accepted. + +Other changes: + +* Fixing `parse(args)`'s `args` setting and ordering after parse. [#141] +* Replaced `set_custom_option` with `type_name` and `type_size` instead of `set_custom_option`. Methods return `this`. [#136] +* Dropped `set_` on Option's `type_name`, `default_str`, and `default_val`. [#136] +* Removed `set_` from App's `failure_message`, `footer`, `callback`, and `name`. [#136] +* Fixed support `N<-1` for `type_size`. [#140] +* Added `->each()` to make adding custom callbacks easier. [#126] +* Allow empty options `add_option("-n",{})` to be edited later with `each` [#142] +* Added filter argument to `get_subcommands`, `get_options`; use empty filter `{}` to avoid filtering. +* Added `get_groups()` to get groups. +* Better support for manual options with `get_option`, `set_results`, and `empty`. [#119] +* `lname` and `sname` have getters, added `const get_parent`. [#120] +* Using `add_set` will now capture L-values for sets, allowing further modification. [#113] +* Dropped duplicate way to run `get_type_name` (`get_typeval`). +* Removed `requires` in favor of `needs` (deprecated in last version). [#112] +* Const added to argv. [#126] + +Backend and testing changes: + +* Internally, `type_name` is now a lambda function; for sets, this reads the set live. [#116] +* Cleaner tests without `app.reset()` (and `reset` is now `clear`). [#141] +* Better CMake policy handling. [#110] +* Includes are properly sorted. [#120] +* Testing (only) now uses submodules. [#111] + +[#109]: https://github.com/CLIUtils/CLI11/pull/109 +[#110]: https://github.com/CLIUtils/CLI11/pull/110 +[#111]: https://github.com/CLIUtils/CLI11/pull/111 +[#112]: https://github.com/CLIUtils/CLI11/pull/112 +[#113]: https://github.com/CLIUtils/CLI11/issues/113 +[#116]: https://github.com/CLIUtils/CLI11/pull/116 +[#118]: https://github.com/CLIUtils/CLI11/pull/118 +[#119]: https://github.com/CLIUtils/CLI11/pull/119 +[#120]: https://github.com/CLIUtils/CLI11/pull/120 +[#121]: https://github.com/CLIUtils/CLI11/pull/121 +[#126]: https://github.com/CLIUtils/CLI11/pull/126 +[#127]: https://github.com/CLIUtils/CLI11/pull/127 +[#138]: https://github.com/CLIUtils/CLI11/pull/138 +[#140]: https://github.com/CLIUtils/CLI11/pull/140 +[#141]: https://github.com/CLIUtils/CLI11/pull/141 +[#142]: https://github.com/CLIUtils/CLI11/pull/142 + +[nlohmann/json]: https://github.com/nlohmann/json + +### Version 1.5.4: Optionals +This version fixes the optional search in the single file version; some macros were not yet defined when it did the search. You can define the `CLI11_*_OPTIONAL` macros to 0 if needed to eliminate the search. + +### Version 1.5.3: Compiler compatibility +This version fixes older AppleClang compilers by removing the optimization for casting. The minimum version of Boost Optional supported has been clarified to be 1.58. CUDA 7.0 NVCC is now supported. + +### Version 1.5.2: LICENSE in single header mode + +This is a quick patch release that makes LICENSE part of the single header file, making it easier to include. Minor cleanup from codacy. No significant code changes from 1.5.1. + +### Version 1.5.1: Access + +This patch release adds better access to the App programmatically, to assist with writing custom converters to other formats. It also improves the help output, and uses a new feature in CLI11 1.5 to fix an old "quirk" in the way unlimited options and positionals interact. + +* Make mixing unlimited positionals and options more intuitive [#102] +* Add missing getters `get_options` and `get_description` to App [#105] +* The app name now can be set, and will override the auto name if present [#105] +* Add `(REQUIRED)` for required options [#104] +* Print simple name for Needs/Excludes [#104] +* Use Needs instead of Requires in help print [#104] +* Groups now are listed in the original definition order [#106] + +[#102]: https://github.com/CLIUtils/CLI11/issues/102 +[#104]: https://github.com/CLIUtils/CLI11/pull/104 +[#105]: https://github.com/CLIUtils/CLI11/pull/105 +[#106]: https://github.com/CLIUtils/CLI11/pull/106 + + +## Version 1.5: Optionals + +This version introduced support for optionals, along with clarification and examples of custom conversion overloads. Enums now have been dropped from the automatic conversion system, allowing explicit protection for out-of-range ints (or a completely custom conversion). This version has some internal cleanup and improved support for the newest compilers. Several bugs were fixed, as well. + +Note: This is the final release with `requires`, please switch to `needs`. + +* Fix unlimited short options eating two values before checking for positionals when no space present [#90] +* Symmetric exclude text when excluding options, exclude can be called multiple times [#64] +* Support for `std::optional`, `std::experimental::optional`, and `boost::optional` added if `__has_include` is supported [#95] +* All macros/CMake variables now start with `CLI11_` instead of just `CLI_` [#95] +* The internal stream was not being cleared before use in some cases. Fixed. [#95] +* Using an enum now requires explicit conversion overload [#97] +* The separator `--` now is removed when it ends unlimited arguments [#100] + +Other, non-user facing changes: + +* Added `Macros.hpp` with better C++ mode discovery [#95] +* Deprecated macros added for all platforms +* C++17 is now tested on supported platforms [#95] +* Informational printout now added to CTest [#95] +* Better single file generation [#95] +* Added support for GTest on MSVC 2017 (but not in C++17 mode, will need next version of GTest) +* Types now have a specific size, separate from the expected number - cleaner and more powerful internally [#92] +* Examples now run as part of testing [#99] + +[#64]: https://github.com/CLIUtils/CLI11/issues/64 +[#90]: https://github.com/CLIUtils/CLI11/issues/90 +[#92]: https://github.com/CLIUtils/CLI11/issues/92 +[#95]: https://github.com/CLIUtils/CLI11/pull/95 +[#97]: https://github.com/CLIUtils/CLI11/pull/97 +[#99]: https://github.com/CLIUtils/CLI11/pull/99 +[#100]: https://github.com/CLIUtils/CLI11/pull/100 + + +## Version 1.4: More feedback + +This version adds lots of smaller fixes and additions after the refactor in version 1.3. More ways to download and use CLI11 in CMake have been added. INI files have improved support. + +* Lexical cast is now more strict than before [#68] and fails on overflow [#84] +* Added `get_parent()` to access the parent from a subcommand +* Added `ExistingPath` validator [#73] +* `app.allow_ini_extras()` added to allow extras in INI files [#70] +* Multiline INI comments now supported +* Descriptions can now be written with `config_to_str` [#66] +* Double printing of error message fixed [#77] +* Renamed `requires` to `needs` to avoid C++20 keyword [#75], [#82] +* MakeSingleHeader now works if outside of git [#78] +* Adding install support for CMake [#79], improved support for `find_package` [#83], [#84] +* Added support for Conan.io [#83] + +[#70]: https://github.com/CLIUtils/CLI11/issues/70 +[#75]: https://github.com/CLIUtils/CLI11/issues/75 + +[#84]: https://github.com/CLIUtils/CLI11/pull/84 +[#83]: https://github.com/CLIUtils/CLI11/pull/83 +[#82]: https://github.com/CLIUtils/CLI11/pull/82 +[#79]: https://github.com/CLIUtils/CLI11/pull/79 +[#78]: https://github.com/CLIUtils/CLI11/pull/78 +[#77]: https://github.com/CLIUtils/CLI11/pull/77 +[#73]: https://github.com/CLIUtils/CLI11/pull/73 +[#68]: https://github.com/CLIUtils/CLI11/pull/68 +[#66]: https://github.com/CLIUtils/CLI11/pull/66 + +## Version 1.3: Refactor + +This version focused on refactoring several key systems to ensure correct behavior in the interaction of different settings. Most caveats about +features only working on the main App have been addressed, and extra arguments have been reworked. Inheritance +of defaults makes configuring CLI11 much easier without having to subclass. Policies add new ways to handle multiple arguments to match your +favorite CLI programs. Error messages and help messages are better and more flexible. Several bugs and odd behaviors in the parser have been fixed. + +* Added a version macro, `CLI11_VERSION`, along with `*_MAJOR`, `*_MINOR`, and `*_PATCH`, for programmatic access to the version. +* Reworked the way defaults are set and inherited; explicit control given to user with `->option_defaults()` [#48](https://github.com/CLIUtils/CLI11/pull/48) +* Hidden options now are based on an empty group name, instead of special "hidden" keyword [#48](https://github.com/CLIUtils/CLI11/pull/48) +* `parse` no longer returns (so `CLI11_PARSE` is always usable) [#37](https://github.com/CLIUtils/CLI11/pull/37) +* Added `remaining()` and `remaining_size()` [#37](https://github.com/CLIUtils/CLI11/pull/37) +* `allow_extras` and `prefix_command` are now valid on subcommands [#37](https://github.com/CLIUtils/CLI11/pull/37) +* Added `take_last` to only take last value passed [#40](https://github.com/CLIUtils/CLI11/pull/40) +* Added `multi_option_policy` and shortcuts to provide more control than just a take last policy [#59](https://github.com/CLIUtils/CLI11/pull/59) +* More detailed error messages in a few cases [#41](https://github.com/CLIUtils/CLI11/pull/41) +* Footers can be added to help [#42](https://github.com/CLIUtils/CLI11/pull/42) +* Help flags are easier to customize [#43](https://github.com/CLIUtils/CLI11/pull/43) +* Subcommand now support groups [#46](https://github.com/CLIUtils/CLI11/pull/46) +* `CLI::RuntimeError` added, for easy exit with error codes [#45](https://github.com/CLIUtils/CLI11/pull/45) +* The clang-format script is now no longer "hidden" [#48](https://github.com/CLIUtils/CLI11/pull/48) +* The order is now preserved for subcommands (list and callbacks) [#49](https://github.com/CLIUtils/CLI11/pull/49) +* Tests now run individually, utilizing CMake 3.10 additions if possible [#50](https://github.com/CLIUtils/CLI11/pull/50) +* Failure messages are now customizable, with a shorter default [#52](https://github.com/CLIUtils/CLI11/pull/52) +* Some improvements to error codes [#53](https://github.com/CLIUtils/CLI11/pull/53) +* `require_subcommand` now offers a two-argument form and negative values on the one-argument form are more useful [#51](https://github.com/CLIUtils/CLI11/pull/51) +* Subcommands no longer match after the max required number is obtained [#51](https://github.com/CLIUtils/CLI11/pull/51) +* Unlimited options no longer prioritize over remaining/unlimited positionals [#51](https://github.com/CLIUtils/CLI11/pull/51) +* Added `->transform` which modifies the string parsed [#54](https://github.com/CLIUtils/CLI11/pull/54) +* Changed of API in validators to `void(std::string &)` (const for users), throwing providing nicer errors [#54](https://github.com/CLIUtils/CLI11/pull/54) +* Added `CLI::ArgumentMismatch` [#56](https://github.com/CLIUtils/CLI11/pull/56) and fixed missing failure if one arg expected [#55](https://github.com/CLIUtils/CLI11/issues/55) +* Support for minimum unlimited expected arguments [#56](https://github.com/CLIUtils/CLI11/pull/56) +* Single internal arg parse function [#56](https://github.com/CLIUtils/CLI11/pull/56) +* Allow options to be disabled from INI file, rename `add_config` to `set_config` [#60](https://github.com/CLIUtils/CLI11/pull/60) + +> ### Converting from CLI11 1.2: +> +> * `app.parse` no longer returns a vector. Instead, use `app.remaining(true)`. +> * `"hidden"` is no longer a special group name, instead use `""` +> * Validators API has changed to return an error string; use `.empty()` to get the old bool back +> * Use `.set_help_flag` instead of accessing the help pointer directly (discouraged, but not removed yet) +> * `add_config` has been renamed to `set_config` +> * Errors thrown in some cases are slightly more specific + +## Version 1.2: Stability + +This release focuses on making CLI11 behave properly in corner cases, and with config files on the command line. This includes fixes for a variety of reported issues. A few features were added to make life easier, as well; such as a new flag callback and a macro for the parse command. + +* Added functional form of flag [#33](https://github.com/CLIUtils/CLI11/pull/33), automatic on C++14 +* Fixed Config file search if passed on command line [#30](https://github.com/CLIUtils/CLI11/issues/30) +* Added `CLI11_PARSE(app, argc, argv)` macro for simple parse commands (does not support returning arg) +* The name string can now contain spaces around commas [#29](https://github.com/CLIUtils/CLI11/pull/29) +* `set_default_str` now only sets string, and `set_default_val` will evaluate the default string given [#26](https://github.com/CLIUtils/CLI11/issues/26) +* Required positionals now take priority over subcommands [#23](https://github.com/CLIUtils/CLI11/issues/23) +* Extra requirements enforced by Travis + +## Version 1.1: Feedback + +This release incorporates feedback from the release announcement. The examples are slowly being expanded, some corner cases improved, and some new functionality for tricky parsing situations. + +* Added simple support for enumerations, allow non-printable objects [#12](https://github.com/CLIUtils/CLI11/issues/12) +* Added `app.parse_order()` with original parse order ([#13](https://github.com/CLIUtils/CLI11/issues/13), [#16](https://github.com/CLIUtils/CLI11/pull/16)) +* Added `prefix_command()`, which is like `allow_extras` but instantly stops and returns. ([#8](https://github.com/CLIUtils/CLI11/issues/8), [#17](https://github.com/CLIUtils/CLI11/pull/17)) +* Removed Windows warning ([#10](https://github.com/CLIUtils/CLI11/issues/10), [#20](https://github.com/CLIUtils/CLI11/pull/20)) +* Some improvements to CMake, detect Python and no dependencies on Python 2 (like Python 3) ([#18](https://github.com/CLIUtils/CLI11/issues/18), [#21](https://github.com/CLIUtils/CLI11/pull/21)) + +## Version 1.0: Official release + +This is the first stable release for CLI11. Future releases will try to remain backward compatible and will follow semantic versioning if possible. There were a few small changes since version 0.9: + +* Cleanup using `clang-tidy` and `clang-format` +* Small improvements to Timers, easier to subclass Error +* Move to 3-Clause BSD license + +## Version 0.9: Polish + +This release focused on cleaning up the most exotic compiler warnings, fixing a few oddities of the config parser, and added a more natural method to check subcommands. + +* Better CMake named target (CLI11) +* More warnings added, fixed +* Ini output now includes `=false` when `default_also` is true +* Ini no longer lists the help pointer +* Added test for inclusion in multiple files and linking, fixed issues (rarely needed for CLI, but nice for tools) +* Support for complex numbers +* Subcommands now test true/false directly or with `->parsed()`, cleaner parse + +## Version 0.8: CLIUtils + +This release moved the repository to the CLIUtils master organization. + +* Moved to CLIUtils on GitHub +* Fixed docs build and a few links + +## Version 0.7: Code coverage 100% + +Lots of small bugs fixed when adding code coverage, better in edge cases. Much more powerful ini support. + +* Allow comments in ini files (lines starting with `;`) +* Ini files support flags, vectors, subcommands +* Added CodeCov code coverage reports +* Lots of small bugfixes related to adding tests to increase coverage to 100% +* Error handling now uses scoped enum in errors +* Reparsing rules changed a little to accommodate Ini files. Callbacks are now called when parsing INI, and reset any time results are added. +* Adding extra utilities in full version only, `Timer` (not needed for parsing, but useful for general CLI applications). +* Better support for custom `add_options` like functions. + +## Version 0.6: Cleanup + +Lots of cleanup and docs additions made it into this release. Parsing is simpler and more robust; fall through option added and works as expected; much more consistent variable names internally. + +* Simplified parsing to use `vector` only +* Fixed fallthrough, made it optional as well (default: off): `.fallthrough()`. +* Added string versions of `->requires()` and `->excludes()` for consistency. +* Renamed protected members for internal consistency, grouped docs. +* Added the ability to add a number to `.require_subcommand()`. + +## Version 0.5: Windows support + +* Allow `Hidden` options. +* Throw `OptionAlreadyAdded` errors for matching subcommands or options, with ignore-case included, tests +* `->ignore_case()` added to subcommands, options, and `add_set_ignore_case`. Subcommands inherit setting from parent App on creation. +* Subcommands now can be "chained", that is, left over arguments can now include subcommands that then get parsed. Subcommands are now a list (`get_subcommands`). Added `got_subcommand(App_or_name)` to check for subcommands. +* Added `.allow_extras()` to disable error on failure. Parse returns a vector of leftover options. Renamed error to `ExtrasError`, and now triggers on extra options too. +* Added `require_subcommand` to `App`, to simplify forcing subcommands. Do **not** do `add_subcommand()->require_subcommand`, since that is the subcommand, not the master `App`. +* Added printout of ini file text given parsed options, skips flags. +* Support for quotes and spaces in ini files +* Fixes to allow support for Windows (added Appveyor) (Uses `-`, not `/` syntax) + +## Version 0.4: Ini support + +* Updates to help print +* Removed `run`, please use `parse` unless you subclass and add it +* Supports ini files mixed with command line, tested +* Added Range for further Plumbum compatibility +* Added function to print out ini file + +## Version 0.3: Plumbum compatibility + +* Added `->requires`, `->excludes`, and `->envname` from [Plumbum](http://plumbum.readthedocs.io/en/latest/) +* Supports `->mandatory` from Plumbum +* More tests for help strings, improvements in formatting +* Support type and set syntax in positionals help strings +* Added help groups, with `->group("name")` syntax +* Added initial support for ini file reading with `add_config` option. +* Supports GCC 4.7 again +* Clang 3.5 now required for tests due to googlemock usage, 3.4 should still work otherwise +* Changes `setup` for an explicit help bool in constructor/`add_subcommand` + +## Version 0.2: Leaner and meaner + +* Moved to simpler syntax, where `Option` pointers are returned and operated on +* Removed `make_` style options +* Simplified Validators, now only requires `->check(function)` +* Removed Combiners +* Fixed pointers to Options, stored in `unique_ptr` now +* Added `Option_p` and `App_p`, mostly for internal use +* Startup sequence, including help flag, can be modified by subclasses + +## Version 0.1: First release + +First release before major cleanup. Still has make syntax and combiners; very clever syntax but not the best or most commonly expected way to work. diff --git a/thirdparty/CLI11/CLI11.CPack.Description.txt b/thirdparty/CLI11/CLI11.CPack.Description.txt new file mode 100644 index 0000000..1fd074a --- /dev/null +++ b/thirdparty/CLI11/CLI11.CPack.Description.txt @@ -0,0 +1,2 @@ +CLI11 provides all the features you expect in a powerful command line parser, with a beautiful, minimal syntax and no dependencies beyond C++11. It is header only, and comes in a single file form for easy inclusion in projects. It is easy to use for small projects, but powerful enough for complex command line projects, and can be customized for frameworks. + diff --git a/thirdparty/CLI11/CLI11.hpp.in b/thirdparty/CLI11/CLI11.hpp.in new file mode 100644 index 0000000..8de9c53 --- /dev/null +++ b/thirdparty/CLI11/CLI11.hpp.in @@ -0,0 +1,67 @@ +// CLI11: Version {version} +// Originally designed by Henry Schreiner +// https://github.com/CLIUtils/CLI11 +// +// This is a standalone header file generated by MakeSingleHeader.py in CLI11/scripts +// from: {git} +// +// CLI11 {version} Copyright (c) 2017-2020 University of Cincinnati, developed by Henry +// Schreiner under NSF AWARD 1414736. All rights reserved. +// +// Redistribution and use in source and binary forms of CLI11, with or without +// modification, are permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this +// list of conditions and the following disclaimer. +// 2. Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// 3. Neither the name of the copyright holder nor the names of its contributors +// may be used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR +// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +// ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Standard combined includes: +{public_includes} + +{version_hpp} + +{macros_hpp} + +{validators_hpp_filesystem} + +namespace {namespace} {{ + +{string_tools_hpp} + +{error_hpp} + +{type_tools_hpp} + +{split_hpp} + +{config_fwd_hpp} + +{validators_hpp} + +{formatter_fwd_hpp} + +{option_hpp} + +{app_hpp} + +{config_hpp} + +{formatter_hpp} + +}} // namespace {namespace} diff --git a/thirdparty/CLI11/CMakeLists.txt b/thirdparty/CLI11/CMakeLists.txt new file mode 100644 index 0000000..ee0228e --- /dev/null +++ b/thirdparty/CLI11/CMakeLists.txt @@ -0,0 +1,327 @@ +cmake_minimum_required(VERSION 3.4) +# Note: this is a header only library. If you have an older CMake than 3.4, +# just add the CLI11/include directory and that's all you need to do. + +# Make sure users don't get warnings on a tested (3.4 to 3.16) version +# of CMake. For most of the policies, the new version is better (hence the change). +# We don't use the 3.4...3.17 syntax because of a bug in an older MSVC's +# built-in and modified CMake 3.11 +if(${CMAKE_VERSION} VERSION_LESS 3.17) + cmake_policy(VERSION ${CMAKE_MAJOR_VERSION}.${CMAKE_MINOR_VERSION}) +else() + cmake_policy(VERSION 3.17) +endif() + +set(VERSION_REGEX "#define CLI11_VERSION[ \t]+\"(.+)\"") + +# Read in the line containing the version +file(STRINGS "${CMAKE_CURRENT_SOURCE_DIR}/include/CLI/Version.hpp" + VERSION_STRING REGEX ${VERSION_REGEX}) + +# Pick out just the version +string(REGEX REPLACE ${VERSION_REGEX} "\\1" VERSION_STRING "${VERSION_STRING}") + +# Add the project +project(CLI11 LANGUAGES CXX VERSION ${VERSION_STRING}) + +# Print the version number of CMake if this is the main project +if(CMAKE_PROJECT_NAME STREQUAL PROJECT_NAME) + message(STATUS "CMake ${CMAKE_VERSION}") +endif() + +include(CMakeDependentOption) +include(GNUInstallDirs) +include(CTest) + +if(NOT CMAKE_VERSION VERSION_LESS 3.11) + include(FetchContent) +endif() + +find_package(Doxygen) + +list(APPEND force-libcxx "CMAKE_CXX_COMPILER_ID STREQUAL \"Clang\"") +list(APPEND force-libcxx "CMAKE_SYSTEM_NAME STREQUAL \"Linux\"") +list(APPEND force-libcxx "CMAKE_PROJECT_NAME STREQUAL PROJECT_NAME") + +list(APPEND build-docs "CMAKE_PROJECT_NAME STREQUAL PROJECT_NAME") +list(APPEND build-docs "NOT CMAKE_VERSION VERSION_LESS 3.11") +list(APPEND build-docs "Doxygen_FOUND") + +# Necessary to support paths with spaces, see #457 +if(EXISTS "${CMAKE_CURRENT_SOURCE_DIR}/docs") + set(docs_EXIST TRUE) +else() + set(docs_EXIST FALSE) +endif() +list(APPEND build-docs "docs_EXIST") + +if(EXISTS "${CMAKE_CURRENT_SOURCE_DIR}/examples") + set(examples_EXIST TRUE) +else() + set(examples_EXIST FALSE) +endif() + +option(CLI11_WARNINGS_AS_ERRORS "Turn all warnings into errors (for CI)") +option(CLI11_SINGLE_FILE "Generate a single header file") +cmake_dependent_option(CLI11_SANITIZERS + "Download the sanitizers CMake config" OFF + "NOT CMAKE_VERSION VERSION_LESS 3.11" OFF) + +cmake_dependent_option(CLI11_BUILD_DOCS + "Build CLI11 documentation" ON + "${build-docs}" OFF) + +cmake_dependent_option(CLI11_BUILD_TESTS + "Build CLI11 tests" ON + "BUILD_TESTING;CMAKE_PROJECT_NAME STREQUAL PROJECT_NAME" OFF) + +cmake_dependent_option(CLI11_BUILD_EXAMPLES + "Build CLI11 examples" ON + "CMAKE_PROJECT_NAME STREQUAL PROJECT_NAME;${examples_EXIST}" OFF) + +cmake_dependent_option(CLI11_BUILD_EXAMPLES_JSON + "Build CLI11 json example" OFF + "CLI11_BUILD_EXAMPLES;NOT CMAKE_VERSION VERSION_LESS 3.11" OFF) + +cmake_dependent_option(CLI11_SINGLE_FILE_TESTS + "Duplicate all the tests for a single file build" OFF + "BUILD_TESTING;CLI11_SINGLE_FILE" OFF) + +cmake_dependent_option(CLI11_INSTALL + "Install the CLI11 folder to include during install process" ON + "CMAKE_PROJECT_NAME STREQUAL PROJECT_NAME" OFF) + +cmake_dependent_option(CLI11_FORCE_LIBCXX + "Force clang to use libc++ instead of libstdc++ (Linux only)" OFF + "${force-libcxx}" OFF) + +cmake_dependent_option(CLI11_CUDA_TESTS + "Build the tests with NVCC to check for warnings there - requires CMake 3.9+" OFF + "CMAKE_PROJECT_NAME STREQUAL PROJECT_NAME" OFF) + +cmake_dependent_option(CLI11_CLANG_TIDY + "Look for and use Clang-Tidy" OFF + "CMAKE_PROJECT_NAME STREQUAL PROJECT_NAME;NOT CMAKE_VERSION VERSION_LESS 3.6" OFF) +set(CLI11_CLANG_TIDY_OPTIONS "" CACHE STRING "Clang tidy options, such as -fix, semicolon separated") + +if(CMAKE_PROJECT_NAME STREQUAL PROJECT_NAME AND NOT DEFINED CMAKE_CXX_STANDARD) + set(CMAKE_CXX_STANDARD 11) +endif() + +if(NOT DEFINED CMAKE_CXX_EXTENSIONS) + set(CMAKE_CXX_EXTENSIONS OFF) +endif() + +if(NOT DEFINED CMAKE_CXX_STANDARD_REQUIRED) + set(CMAKE_CXX_STANDARD_REQUIRED ON) +endif() + + +# Allow IDE's to group targets into folders +if(CMAKE_PROJECT_NAME STREQUAL PROJECT_NAME) + set_property(GLOBAL PROPERTY USE_FOLDERS ON) +endif() + +if(CMAKE_VERSION VERSION_LESS 3.10) + message(STATUS "CMake 3.10+ adds Doxygen support. Update CMake to build documentation") +elseif(NOT Doxygen_FOUND) + message(STATUS "Doxygen not found, building docs has been disabled") +endif() + +# Special target that adds warnings. Is not exported. +add_library(CLI11_warnings INTERFACE) + +set(unix-warnings -Wall -Wextra -pedantic -Wshadow -Wsign-conversion -Wswitch-enum) + +# Buggy in GCC 4.8 +if(CMAKE_CXX_COMPILER_ID STREQUAL "GNU" AND NOT CMAKE_CXX_COMPILER_VERSION VERSION_LESS 4.9) + list(APPEND unix-warnings -Weffc++) +endif() + +target_compile_options(CLI11_warnings + INTERFACE + $<$:-stdlib=libc++> + $<$:/W4 $<$:/WX>> + $<$>:${unix-warnings} $<$:-Werror>>) + + + if(NOT CMAKE_VERSION VERSION_LESS 3.13) + target_link_options(CLI11_warnings + INTERFACE + $<$:-stdlib=libc++>) + endif() + + +# Allow IDE's to group targets into folders +add_library(CLI11 INTERFACE) +add_library(CLI11::CLI11 ALIAS CLI11) # for add_subdirectory calls + +# Duplicated because CMake adds the current source dir if you don't. +target_include_directories(CLI11 INTERFACE + $ + $) + + +# To see in IDE, headers must be listed for target +set(header-patterns "${PROJECT_SOURCE_DIR}/include/CLI/*") +if(NOT CMAKE_VERSION VERSION_LESS 3.12) + list(INSERT header-patterns 0 CONFIGURE_DEPENDS) +endif() + +file(GLOB CLI11_headers ${header-patterns}) + +# Allow tests to be run on CUDA + if(CLI11_CUDA_TESTS) + enable_language(CUDA) + + # Print out warning and error numbers + set(CMAKE_CUDA_FLAGS "${CMAKE_CUDA_FLAGS} -Xcudafe --display_error_number") + endif() + + +# Prepare Clang-Tidy +if(CLI11_CLANG_TIDY) + find_program( + CLANG_TIDY_EXE + NAMES "clang-tidy" + DOC "Path to clang-tidy executable" + REQUIRED + ) + + set(DO_CLANG_TIDY "${CLANG_TIDY_EXE}" ${CLI11_CLANG_TIDY_OPTIONS}) +endif() + + +# This folder should be installed +if(CLI11_INSTALL) + install(DIRECTORY "${PROJECT_SOURCE_DIR}/include/" + DESTINATION "${CMAKE_INSTALL_INCLUDEDIR}") + + # Make an export target + install(TARGETS CLI11 EXPORT CLI11Targets) + + # Use find_package on the installed package + # Since we have no custom code, we can directly write this + # to Config.cmake (otherwise we'd have a custom config and would + # import Targets.cmake + + # Add the version in a CMake readable way + configure_file("cmake/CLI11ConfigVersion.cmake.in" + "CLI11ConfigVersion.cmake" @ONLY) + + # Make version available in the install + install(FILES "${PROJECT_BINARY_DIR}/CLI11ConfigVersion.cmake" + DESTINATION "${CMAKE_INSTALL_LIBDIR}/cmake/CLI11") + + # Install the export target as a file + install(EXPORT CLI11Targets + FILE CLI11Config.cmake + NAMESPACE CLI11:: + DESTINATION "${CMAKE_INSTALL_LIBDIR}/cmake/CLI11") + + # Use find_package on the installed package + export(TARGETS CLI11 + NAMESPACE CLI11:: + FILE CLI11Targets.cmake) + + include(cmake/CLI11GeneratePkgConfig.cmake) + + # Register in the user cmake package registry + export(PACKAGE CLI11) +endif() + +if(CLI11_SINGLE_FILE) + # Single file test + if(CMAKE_VERSION VERSION_LESS 3.12) + find_package(PythonInterp REQUIRED) + add_executable(Python::Interpreter IMPORTED) + set_target_properties(Python::Interpreter + PROPERTIES + IMPORTED_LOCATION "${PYTHON_EXECUTABLE}" + VERSION "${PYTHON_VERSION_STRING}") + else() + find_package(Python COMPONENTS Interpreter REQUIRED) + endif() + + file(MAKE_DIRECTORY "${CMAKE_CURRENT_BINARY_DIR}/include") + add_custom_command(OUTPUT "${CMAKE_CURRENT_BINARY_DIR}/include/CLI11.hpp" + COMMAND Python::Interpreter + "${CMAKE_CURRENT_SOURCE_DIR}/scripts/MakeSingleHeader.py" + ${CLI11_headers} + --main "${CMAKE_CURRENT_SOURCE_DIR}/CLI11.hpp.in" + --output "${CMAKE_CURRENT_BINARY_DIR}/include/CLI11.hpp" + --version "${CLI11_VERSION}" + DEPENDS + "${CMAKE_CURRENT_SOURCE_DIR}/include/CLI/CLI.hpp" + ${CLI11_headers}) + add_custom_target(CLI11-generate-single-file ALL + DEPENDS "${CMAKE_CURRENT_BINARY_DIR}/include/CLI11.hpp") + set_property(TARGET CLI11-generate-single-file PROPERTY FOLDER "Scripts") + install(FILES "${CMAKE_CURRENT_BINARY_DIR}/include/CLI11.hpp" + DESTINATION include) + add_library(CLI11_SINGLE INTERFACE) + target_link_libraries(CLI11_SINGLE INTERFACE CLI11) + add_dependencies(CLI11_SINGLE CLI11-generate-single-file) + target_compile_definitions(CLI11_SINGLE INTERFACE -DCLI11_SINGLE_FILE) + target_include_directories(CLI11_SINGLE INTERFACE + $ + $) +endif() + + +if(CLI11_BUILD_TESTS) + add_subdirectory(tests) +endif() + +if(CLI11_BUILD_EXAMPLES) + add_subdirectory(examples) +endif() + +if(CLI11_BUILD_DOCS) + add_subdirectory(docs) +endif() + +# From a build system, this might not be included. +if(CMAKE_PROJECT_NAME STREQUAL PROJECT_NAME AND EXISTS "${CMAKE_CURRENT_SOURCE_DIR}/book") + add_subdirectory(book) +endif() + +# Packaging support +set(CPACK_PACKAGE_VENDOR "github.com/CLIUtils/CLI11") +set(CPACK_PACKAGE_CONTACT "https://${CPACK_PACKAGE_VENDOR}") +set(CPACK_PACKAGE_VERSION_MAJOR ${PROJECT_VERSION_MAJOR}) # Automatic in CMake 3.12+ +set(CPACK_PACKAGE_VERSION_MINOR ${PROJECT_VERSION_MINOR}) # Automatic in CMake 3.12+ +set(CPACK_PACKAGE_VERSION_PATCH ${PROJECT_VERSION_PATCH}) # Automatic in CMake 3.12+ +set(CPACK_PACKAGE_DESCRIPTION_SUMMARY "Command line parser with simple and intuitive interface") +set(CPACK_RESOURCE_FILE_LICENSE "${CMAKE_CURRENT_SOURCE_DIR}/LICENSE") +set(CPACK_RESOURCE_FILE_README "${CMAKE_CURRENT_SOURCE_DIR}/README.md") +set(CPACK_PACKAGE_DESCRIPTION_FILE "${CMAKE_CURRENT_SOURCE_DIR}/CLI11.CPack.Description.txt") +set(CPACK_SOURCE_GENERATOR "TGZ;ZIP") + +# CPack collects *everything* except what's listed here. +set(CPACK_SOURCE_IGNORE_FILES + /.git + /dist + /.*build.* + /\\\\.DS_Store + /.*\\\\.egg-info + /var + /azure-pipelines.yml + /.ci + /docs + /examples + /test_package + /book + /.travis.yml + .swp + /.all-contributorsrc + /.appveyor.yml + /.pre-commit.*yaml +) + +set(CPACK_DEBIAN_PACKAGE_ARCHITECTURE "all") +set(CPACK_DEBIAN_COMPRESSION_TYPE "xz") +set(CPACK_DEBIAN_PACKAGE_NAME "libcli11-dev") + +include(CPack) + diff --git a/thirdparty/CLI11/CPPLINT.cfg b/thirdparty/CLI11/CPPLINT.cfg new file mode 100644 index 0000000..0a1758d --- /dev/null +++ b/thirdparty/CLI11/CPPLINT.cfg @@ -0,0 +1,14 @@ +set noparent +linelength=120 # As in .clang-format + +# Unused filters +filter=-build/c++11 # Reports e.g. chrono and thread, which overlap with Chromium's API. Not applicable to general C++ projects. +filter=-build/include_order # Requires unusual include order that encourages creating not self-contained headers +filter=-readability/nolint # Conflicts with clang-tidy +filter=-readability/check # Catch uses CHECK(a == b) (Tests only) +filter=-build/namespaces # Currently using it for one test (Tests only) +filter=-runtime/references # Requires fundamental change of API, don't see need for this +filter=-whitespace/blank_line # Unnecessarily strict with blank lines that otherwise help with readability +filter=-whitespace/indent # Requires strange 3-space indent of private/protected/public markers +filter=-whitespace/parens,-whitespace/braces # Conflict with clang-format + diff --git a/thirdparty/CLI11/LICENSE b/thirdparty/CLI11/LICENSE new file mode 100644 index 0000000..17739d1 --- /dev/null +++ b/thirdparty/CLI11/LICENSE @@ -0,0 +1,25 @@ +CLI11 1.8 Copyright (c) 2017-2019 University of Cincinnati, developed by Henry +Schreiner under NSF AWARD 1414736. All rights reserved. + +Redistribution and use in source and binary forms of CLI11, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. +3. Neither the name of the copyright holder nor the names of its contributors + may be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/thirdparty/CLI11/README.md b/thirdparty/CLI11/README.md new file mode 100644 index 0000000..846d44e --- /dev/null +++ b/thirdparty/CLI11/README.md @@ -0,0 +1,1043 @@ +# CLI11: Command line parser for C++11 + +![CLI11 Logo](./docs/CLI11_300.png) + +[![Build Status Linux and macOS][travis-badge]][travis] +[![Build Status Windows][appveyor-badge]][appveyor] +[![Build Status Azure][azure-badge]][azure] +[![Actions Status][actions-badge]][actions-link] +[![Code Coverage][codecov-badge]][codecov] +[![Codacy Badge][codacy-badge]][codacy-link] +[![Join the chat at https://gitter.im/CLI11gitter/Lobby][gitter-badge]][gitter] +[![License: BSD][license-badge]](./LICENSE) +[![Latest release][releases-badge]][github releases] +[![DOI][doi-badge]][doi-link] +[![Conan.io][conan-badge]][conan-link] +[![Try CLI11 1.9 online][wandbox-badge]][wandbox-link] + +[What's new](./CHANGELOG.md) • +[Documentation][gitbook] • +[API Reference][api-docs] + +CLI11 is a command line parser for C++11 and beyond that provides a rich feature set with a simple and intuitive interface. + +## Table of Contents + +- [Background](#background) + - [Introduction](#introduction) + - [Why write another CLI parser?](#why-write-another-cli-parser) + - [Other parsers](#other-parsers) + - [Features not supported by this library](#features-not-supported-by-this-library) +- [Install](#install) +- [Usage](#usage) + - [Adding options](#adding-options) + - [Option types](#option-types) + - [Example](#example) + - [Option options](#option-options) + - [Validators](#validators) + - [Transforming Validators](#transforming-validators) + - [Validator operations](#validator-operations) + - [Custom Validators](#custom-validators) + - [Querying Validators](#querying-validators) + - [Getting Results](#getting-results) + - [Subcommands](#subcommands) + - [Subcommand options](#subcommand-options) + - [Option groups](#option-groups) + - [Callbacks](#callbacks) + - [Configuration file](#configuration-file) + - [Inheriting defaults](#inheriting-defaults) + - [Formatting](#formatting) + - [Subclassing](#subclassing) + - [How it works](#how-it-works) + - [Utilities](#utilities) + - [Other libraries](#other-libraries) +- [API](#api) +- [Examples](#Examples) +- [Contribute](#contribute) +- [License](#license) + +Features that were added in the last released major version are marked with "🆕". Features only available in master are marked with "🚧". + +## Background + +### Introduction + +CLI11 provides all the features you expect in a powerful command line parser, with a beautiful, minimal syntax and no dependencies beyond C++11. It is header only, and comes in a single file form for easy inclusion in projects. It is easy to use for small projects, but powerful enough for complex command line projects, and can be customized for frameworks. +It is tested on [Travis][], [AppVeyor][], [Azure][], and [GitHub Actions][actions-link], and is used by the [GooFit GPU fitting framework][goofit]. It was inspired by [`plumbum.cli`][plumbum] for Python. CLI11 has a user friendly introduction in this README, a more in-depth tutorial [GitBook][], as well as [API documentation][api-docs] generated by Travis. +See the [changelog](./CHANGELOG.md) or [GitHub Releases][] for details for current and past releases. Also see the [Version 1.0 post][], [Version 1.3 post][], or [Version 1.6 post][] for more information. + +You can be notified when new releases are made by subscribing to on an RSS reader, like Feedly, or use the releases mode of the GitHub watching tool. + +### Why write another CLI parser? + +An acceptable CLI parser library should be all of the following: + +- Easy to include (i.e., header only, one file if possible, **no external requirements**). +- Short, simple syntax: This is one of the main reasons to use a CLI parser, it should make variables from the command line nearly as easy to define as any other variables. If most of your program is hidden in CLI parsing, this is a problem for readability. +- C++11 or better: Should work with GCC 4.8+ (default on CentOS/RHEL 7), Clang 3.4+, AppleClang 7+, NVCC 7.0+, or MSVC 2015+. +- Work on Linux, macOS, and Windows. +- Well tested using [Travis][] (Linux) and [AppVeyor][] (Windows) or [Azure][] (all three). "Well" is defined as having good coverage measured by [CodeCov][]. +- Clear help printing. +- Nice error messages. +- Standard shell idioms supported naturally, like grouping flags, a positional separator, etc. +- Easy to execute, with help, parse errors, etc. providing correct exit and details. +- Easy to extend as part of a framework that provides "applications" to users. +- Usable subcommand syntax, with support for multiple subcommands, nested subcommands, option groups, and optional fallthrough (explained later). +- Ability to add a configuration file (`ini` or `TOML`🆕 format), and produce it as well. +- Produce real values that can be used directly in code, not something you have pay compute time to look up, for HPC applications. +- Work with standard types, simple custom types, and extensible to exotic types. +- Permissively licensed. + +### Other parsers + +
The major CLI parsers for C++ include, with my biased opinions: (click to expand)

+ +| Library | My biased opinion | +| ----------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| [Boost Program Options][] | A great library if you already depend on Boost, but its pre-C++11 syntax is really odd and setting up the correct call in the main function is poorly documented (and is nearly a page of code). A simple wrapper for the Boost library was originally developed, but was discarded as CLI11 became more powerful. The idea of capturing a value and setting it originated with Boost PO. [See this comparison.][cli11-po-compare] | +| [The Lean Mean C++ Option Parser][] | One header file is great, but the syntax is atrocious, in my opinion. It was quite impractical to wrap the syntax or to use in a complex project. It seems to handle standard parsing quite well. | +| [TCLAP][] | The not-quite-standard command line parsing causes common shortcuts to fail. It also seems to be poorly supported, with only minimal bugfixes accepted. Header only, but in quite a few files. Has not managed to get enough support to move to GitHub yet. No subcommands. Produces wrapped values. | +| [Cxxopts][] | C++11, single file, and nice CMake support, but requires regex, therefore GCC 4.8 (CentOS 7 default) does not work. Syntax closely based on Boost PO, so not ideal but familiar. | +| [DocOpt][] | Completely different approach to program options in C++11, you write the docs and the interface is generated. Too fragile and specialized. | + +After I wrote this, I also found the following libraries: + +| Library | My biased opinion | +| ----------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| [GFlags][] | The Google Commandline Flags library. Uses macros heavily, and is limited in scope, missing things like subcommands. It provides a simple syntax and supports config files/env vars. | +| [GetOpt][] | Very limited C solution with long, convoluted syntax. Does not support much of anything, like help generation. Always available on UNIX, though (but in different flavors). | +| [ProgramOptions.hxx][] | Interesting library, less powerful and no subcommands. Nice callback system. | +| [Args][] | Also interesting, and supports subcommands. I like the optional-like design, but CLI11 is cleaner and provides direct value access, and is less verbose. | +| [Argument Aggregator][] | I'm a big fan of the [fmt][] library, and the try-catch statement looks familiar. :thumbsup: Doesn't seem to support subcommands. | +| [Clara][] | Simple library built for the excellent [Catch][] testing framework. Unique syntax, limited scope. | +| [Argh!][] | Very minimalistic C++11 parser, single header. Don't have many features. No help generation?!?! At least it's exception-free. | +| [CLI][] | Custom language and parser. Huge build-system overkill for very little benefit. Last release in 2009, but still occasionally active. | +| [argparse][] | C++17 single file argument parser. Design seems similar to CLI11 in some ways. The author has several other interesting projects. | + +See [Awesome C++][] for a less-biased list of parsers. You can also find other single file libraries at [Single file libs][]. + +

+
+ +None of these libraries fulfill all the above requirements, or really even come close. As you probably have already guessed, CLI11 does. +So, this library was designed to provide a great syntax, good compiler compatibility, and minimal installation fuss. + +### Features not supported by this library + +There are some other possible "features" that are intentionally not supported by this library: + +- Non-standard variations on syntax, like `-long` options. This is non-standard and should be avoided, so that is enforced by this library. +- Completion of partial options, such as Python's `argparse` supplies for incomplete arguments. It's better not to guess. Most third party command line parsers for python actually reimplement command line parsing rather than using argparse because of this perceived design flaw. +- Autocomplete: This might eventually be added to both Plumbum and CLI11, but it is not supported yet. +- Wide strings / unicode: Since this uses the standard library only, it might be hard to properly implement, but I would be open to suggestions in how to do this. + +## Install + +To use, there are several methods: + +1. All-in-one local header: Copy `CLI11.hpp` from the [most recent release][github releases] into your include directory, and you are set. This is combined from the source files for every release. This includes the entire command parser library, but does not include separate utilities (like `Timer`, `AutoTimer`). The utilities are completely self contained and can be copied separately. +2. All-in-one global header: Like above, but copying the file to a shared folder location like `/opt/CLI11`. Then, the C++ include path has to be extended to point at this folder. With CMake, use `include_directories(/opt/CLI11)` +3. Local headers and target: Use `CLI/*.hpp` files. You could check out the repository as a git submodule, for example. With CMake, you can use `add_subdirectory` and the `CLI11::CLI11` interface target when linking. If not using a submodule, you must ensure that the copied files are located inside the same tree directory than your current project, to prevent an error with CMake and `add_subdirectory`. +4. Global headers: Use `CLI/*.hpp` files stored in a shared folder. You could check out the git repository in a system-wide folder, for example `/opt/`. With CMake, you could add to the include path via: +```bash +if(NOT DEFINED CLI11_DIR) +set (CLI11_DIR "/opt/CLI11" CACHE STRING "CLI11 git repository") +endif() +include_directories(${CLI11_DIR}/include) +``` +And then in the source code (adding several headers might be needed to prevent linker errors): +```cpp +#include "CLI/App.hpp" +#include "CLI/Formatter.hpp" +#include "CLI/Config.hpp" +``` +5. Global headers and target: configuring and installing the project is required for linking CLI11 to your project in the same way as you would do with any other external library. With CMake, this step allows using `find_package(CLI11 CONFIG REQUIRED)` and then using the `CLI11::CLI11` target when linking. If `CMAKE_INSTALL_PREFIX` was changed during install to a specific folder like `/opt/CLI11`, then you have to pass `-DCLI11_DIR=/opt/CLI11` when building your current project. You can also use [Conan.io][conan-link] or [Hunter][]. + (These are just conveniences to allow you to use your favorite method of managing packages; it's just header only so including the correct path and + using C++11 is all you really need.) + +To build the tests, checkout the repository and use CMake: + +```bash +mkdir build +cd build +cmake .. +make +GTEST_COLOR=1 CTEST_OUTPUT_ON_FAILURE=1 make test +``` + +
Note: Special instructions for GCC 8

+ +If you are using GCC 8 and using it in C++17 mode with CLI11. CLI11 makes use of the `` header if available, but specifically for this compiler, the `filesystem` library is separate from the standard library and needs to be linked separately. So it is available but CLI11 doesn't use it by default. + +Specifically `libstdc++fs` needs to be added to the linking list and `CLI11_HAS_FILESYSTEM=1` has to be defined. Then the filesystem variant of the Validators could be used on GCC 8. GCC 9+ does not have this issue so the `` is used by default. + +There may also be other cases where a specific library needs to be linked. + +Defining `CLI11_HAS_FILESYSTEM=0` which will remove the usage and hence any linking issue. + +In some cases certain clang compilations may require linking against `libc++fs`. These situations have not been encountered so the specific situations requiring them are unknown yet. + +

+
+ +## Usage + +### Adding options + +To set up, add options, and run, your main function will look something like this: + +```cpp +int main(int argc, char** argv) { + CLI::App app{"App description"}; + + std::string filename = "default"; + app.add_option("-f,--file", filename, "A help string"); + + CLI11_PARSE(app, argc, argv); + return 0; +} +``` + +
Note: If you don't like macros, this is what that macro expands to: (click to expand)

+ +```cpp +try { + app.parse(argc, argv); +} catch (const CLI::ParseError &e) { + return app.exit(e); +} +``` + +The try/catch block ensures that `-h,--help` or a parse error will exit with the correct return code (selected from `CLI::ExitCodes`). (The return here should be inside `main`). You should not assume that the option values have been set inside the catch block; for example, help flags intentionally short-circuit all other processing for speed and to ensure required options and the like do not interfere. + +

+
+ +The initialization is just one line, adding options is just two each. The parse macro is just one line (or 5 for the contents of the macro). After the app runs, the filename will be set to the correct value if it was passed, otherwise it will be set to the default. You can check to see if this was passed on the command line with `app.count("--file")`. + +#### Option types + +While all options internally are the same type, there are several ways to add an option depending on what you need. The supported values are: + +```cpp +// Add options +app.add_option(option_name, help_str="") + +app.add_option(option_name, + variable_to_bind_to, // bool, char(see note)🚧, int, float, vector, enum, std::atomic 🚧, or string-like, or anything with a defined conversion from a string or that takes an int 🆕, double 🆕, or string in a constructor. Also allowed are tuples 🆕, std::array 🆕 or std::pair 🆕. Also supported are complex numbers🚧, wrapper types🚧, and containers besides vector🚧 of any other supported type. + help_string="") + +app.add_option_function(option_name, + function , // type can be any type supported by add_option + help_string="") + +app.add_complex(... // Special case: support for complex numbers ⚠️. Complex numbers are now fully supported in the add_option so this function is redundant. + +// char as an option type is supported before 2.0 but in 2.0 it defaulted to allowing single non numerical characters in addition to the numeric values. + +// 🆕 There is a template overload which takes two template parameters the first is the type of object to assign the value to, the second is the conversion type. The conversion type should have a known way to convert from a string, such as any of the types that work in the non-template version. If XC is a std::pair and T is some non pair type. Then a two argument constructor for T is called to assign the value. For tuples or other multi element types, XC must be a single type or a tuple like object of the same size as the assignment type +app.add_option(option_name, + T &output, // output must be assignable or constructible from a value of type XC + help_string="") + +// Add flags +app.add_flag(option_name, + help_string="") + +app.add_flag(option_name, + variable_to_bind_to, // bool, int, float, complex, containers, enum, std::atomic 🚧, or string-like, or any singular object with a defined conversion from a string like add_option + help_string="") + +app.add_flag_function(option_name, + function , + help_string="") + +app.add_flag_callback(option_name,function,help_string="") + +// Add subcommands +App* subcom = app.add_subcommand(name, description); + +Option_group *app.add_option_group(name,description); +``` + +An option name must start with a alphabetic character, underscore, a number, '?', or '@'. For long options, after the first character '.', and '-' are also valid characters. For the `add_flag*` functions '{' has special meaning. Names are given as a comma separated string, with the dash or dashes. An option or flag can have as many names as you want, and afterward, using `count`, you can use any of the names, with dashes as needed, to count the options. One of the names is allowed to be given without proceeding dash(es); if present the option is a positional option, and that name will be used on the help line for its positional form. + +The `add_option_function(...` function will typically require the template parameter be given unless a `std::function` object with an exact match is passed. The type can be any type supported by the `add_option` function. The function should throw an error (`CLI::ConversionError` or `CLI::ValidationError` possibly) if the value is not valid. + +🆕 The two parameter template overload can be used in cases where you want to restrict the input such as +``` +double val +app.add_option("-v",val); +``` +which would first verify the input is convertible to an `unsigned int` before assigning it. Or using some variant type +``` +using vtype=std::variant; + vtype v1; +app.add_option("--vs",v1); +app.add_option("--vi",v1); +app.add_option("--vf",v1); +``` +otherwise the output would default to a string. The `add_option` can be used with any integral or floating point types, enumerations, or strings. Or any type that takes an int, double, or std\::string in an assignment operator or constructor. If an object can take multiple varieties of those, std::string takes precedence, then double then int. To better control which one is used or to use another type for the underlying conversions use the two parameter template to directly specify the conversion type. + +Types such as (std or boost) `optional`, `optional`, and `optional` and any other wrapper types are supported directly. For purposes of CLI11 wrapper types are those which `value_type` definition. See [CLI11 Advanced Topics/Custom Converters][] for information on how you can add your own converters for additional types. + +Vector types can also be used in the two parameter template overload +``` +std::vector v1; +app.add_option,int>("--vs",v1); +``` +would load a vector of doubles but ensure all values can be represented as integers. + +Automatic direct capture of the default string is disabled when using the two parameter template. Use `set_default_str(...)` or `->default_function(std::string())` to set the default string or capture function directly for these cases. + +Flag options specified through the `add_flag*` functions allow a syntax for the option names to default particular options to a false value or any other value if some flags are passed. For example: + +```cpp +app.add_flag("--flag,!--no-flag",result,"help for flag"); +``` + +specifies that if `--flag` is passed on the command line result will be true or contain a value of 1. If `--no-flag` is +passed `result` will contain false or -1 if `result` is a signed integer type, or 0 if it is an unsigned type. An +alternative form of the syntax is more explicit: `"--flag,--no-flag{false}"`; this is equivalent to the previous +example. This also works for short form options `"-f,!-n"` or `"-f,-n{false}"`. If `variable_to_bind_to` is anything but an integer value the +default behavior is to take the last value given, while if `variable_to_bind_to` is an integer type the behavior will be to sum +all the given arguments and return the result. This can be modified if needed by changing the `multi_option_policy` on each flag (this is not inherited). +The default value can be any value. For example if you wished to define a numerical flag: + +```cpp +app.add_flag("-1{1},-2{2},-3{3}",result,"numerical flag") +``` + +using any of those flags on the command line will result in the specified number in the output. Similar things can be done for string values, and enumerations, as long as the default value can be converted to the given type. + + +On a `C++14` compiler, you can pass a callback function directly to `.add_flag`, while in C++11 mode you'll need to use `.add_flag_function` if you want a callback function. The function will be given the number of times the flag was passed. You can throw a relevant `CLI::ParseError` to signal a failure. + +#### Example + +- `"one,-o,--one"`: Valid as long as not a flag, would create an option that can be specified positionally, or with `-o` or `--one` +- `"this"` Can only be passed positionally +- `"-a,-b,-c"` No limit to the number of non-positional option names + +The add commands return a pointer to an internally stored `Option`. +This option can be used directly to check for the count (`->count()`) after parsing to avoid a string based lookup. +⚠️ Deprecated: The `add_*` commands have a final argument than can be set to true, which causes the default value to be captured and printed on the command line with the help flag. Since CLI11 1.8, you can simply add `->capture_default_str()`. + +#### Option options + +Before parsing, you can set the following options: + +- `->required()`: The program will quit if this option is not present. This is `mandatory` in Plumbum, but required options seems to be a more standard term. For compatibility, `->mandatory()` also works. +- `->expected(N)`: Take `N` values instead of as many as possible, only for vector args. If negative, require at least `-N`; end with `--` or another recognized option or subcommand. +- `->type_name(typename)`: Set the name of an Option's type (`type_name_fn` allows a function instead) +- `->type_size(N)`: Set the intrinsic size of an option. The parser will require multiples of this number if negative. +- `->needs(opt)`: This option requires another option to also be present, opt is an `Option` pointer. +- `->excludes(opt)`: This option cannot be given with `opt` present, opt is an `Option` pointer. +- `->envname(name)`: Gets the value from the environment if present and not passed on the command line. +- `->group(name)`: The help group to put the option in. No effect for positional options. Defaults to `"Options"`. `""` will not show up in the help print (hidden). +- `->ignore_case()`: Ignore the case on the command line (also works on subcommands, does not affect arguments). +- `->ignore_underscore()`: Ignore any underscores in the options names (also works on subcommands, does not affect arguments). For example "option_one" will match with "optionone". This does not apply to short form options since they only have one character +- `->disable_flag_override()`: From the command line long form flag options can be assigned a value on the command line using the `=` notation `--flag=value`. If this behavior is not desired, the `disable_flag_override()` disables it and will generate an exception if it is done on the command line. The `=` does not work with short form flag options. +- `->allow_extra_args(true/false)`: 🚧 If set to true the option will take an unlimited number of arguments like a vector, if false it will limit the number of arguments to the size of the type used in the option. Default value depends on the nature of the type use, containers default to true, others default to false. +- `->delimiter(char)`: Allows specification of a custom delimiter for separating single arguments into vector arguments, for example specifying `->delimiter(',')` on an option would result in `--opt=1,2,3` producing 3 elements of a vector and the equivalent of --opt 1 2 3 assuming opt is a vector value. +- `->description(str)`: Set/change the description. +- `->multi_option_policy(CLI::MultiOptionPolicy::Throw)`: Set the multi-option policy. Shortcuts available: `->take_last()`, `->take_first()`,`->take_all()`, and `->join()`. This will only affect options expecting 1 argument or bool flags (which do not inherit their default but always start with a specific policy). +- `->check(std::string(const std::string &), validator_name="",validator_description="")`: Define a check function. The function should return a non empty string with the error message if the check fails +- `->check(Validator)`: Use a Validator object to do the check see [Validators](#validators) for a description of available Validators and how to create new ones. +- `->transform(std::string(std::string &), validator_name="",validator_description=")`: Converts the input string into the output string, in-place in the parsed options. +- `->transform(Validator)`: Uses a Validator object to do the transformation see [Validators](#validators) for a description of available Validators and how to create new ones. +- `->each(void(const std::string &)>`: Run this function on each value received, as it is received. It should throw a `ValidationError` if an error is encountered. +- `->configurable(false)`: Disable this option from being in a configuration file. + `->capture_default_str()`: Store the current value attached and display it in the help string. +- `->default_function(std::string())`: Advanced: Change the function that `capture_default_str()` uses. +- `->always_capture_default()`: Always run `capture_default_str()` when creating new options. Only useful on an App's `option_defaults`. +- `default_str(string)`: Set the default string directly. This string will also be used as a default value if no arguments are passed and the value is requested. +- `default_val(value)`: 🆕 Generate the default string from a value and validate that the value is also valid. For options that assign directly to a value type the value in that type is also updated. Value must be convertible to a string(one of known types or have a stream operator). +- `->option_text(string)`: Sets the text between the option name and description. + + +These options return the `Option` pointer, so you can chain them together, and even skip storing the pointer entirely. The `each` function takes any function that has the signature `void(const std::string&)`; it should throw a `ValidationError` when validation fails. The help message will have the name of the parent option prepended. Since `each`, `check` and `transform` use the same underlying mechanism, you can chain as many as you want, and they will be executed in order. Operations added through `transform` are executed first in reverse order of addition, and `check` and `each` are run following the transform functions in order of addition. If you just want to see the unconverted values, use `.results()` to get the `std::vector` of results. + +On the command line, options can be given as: + +- `-a` (flag) +- `-abc` (flags can be combined) +- `-f filename` (option) +- `-ffilename` (no space required) +- `-abcf filename` (flags and option can be combined) +- `--long` (long flag) +- `--long_flag=true` (long flag with equals to override default value) +- `--file filename` (space) +- `--file=filename` (equals) + +If `allow_windows_style_options()` is specified in the application or subcommand options can also be given as: +- `/a` (flag) +- `/f filename` (option) +- `/long` (long flag) +- `/file filename` (space) +- `/file:filename` (colon) +- `/long_flag:false` (long flag with : to override the default value) += Windows style options do not allow combining short options or values not separated from the short option like with `-` options + +Long flag options may be given with an `=` to allow specifying a false value, or some other value to the flag. See [config files](#configuration-file) for details on the values supported. NOTE: only the `=` or `:` for windows-style options may be used for this, using a space will result in the argument being interpreted as a positional argument. This syntax can override the default values, and can be disabled by using `disable_flag_override()`. + +Extra positional arguments will cause the program to exit, so at least one positional option with a vector is recommended if you want to allow extraneous arguments. +If you set `.allow_extras()` on the main `App`, you will not get an error. You can access the missing options using `remaining` (if you have subcommands, `app.remaining(true)` will get all remaining options, subcommands included). +If the remaining arguments are to processed by another `App` then the function `remaining_for_passthrough()` can be used to get the remaining arguments in reverse order such that `app.parse(vector)` works directly and could even be used inside a subcommand callback. + +You can access a vector of pointers to the parsed options in the original order using `parse_order()`. +If `--` is present in the command line that does not end an unlimited option, then +everything after that is positional only. + +#### Validators +Validators are structures to check or modify inputs, they can be used to verify that an input meets certain criteria or transform it into another value. They are added through the `check` or `transform` functions. The differences between the two function are that checks do not modify the input whereas transforms can and are executed before any Validators added through `check`. + +CLI11 has several Validators built-in that perform some common checks + +- `CLI::IsMember(...)`: Require an option be a member of a given set. See [Transforming Validators](#transforming-validators) for more details. +- `CLI::Transformer(...)`: Modify the input using a map. See [Transforming Validators](#transforming-validators) for more details. +- `CLI::CheckedTransformer(...)`: Modify the input using a map, and require that the input is either in the set or already one of the outputs of the set. See [Transforming Validators](#transforming-validators) for more details. +- `CLI::AsNumberWithUnit(...)`: Modify the ` ` pair by matching the unit and multiplying the number by the corresponding factor. It can be used as a base for transformers, that accept things like size values (`1 KB`) or durations (`0.33 ms`). +- `CLI::AsSizeValue(...)`: Convert inputs like `100b`, `42 KB`, `101 Mb`, `11 Mib` to absolute values. `KB` can be configured to be interpreted as 10^3 or 2^10. +- `CLI::ExistingFile`: Requires that the file exists if given. +- `CLI::ExistingDirectory`: Requires that the directory exists. +- `CLI::ExistingPath`: Requires that the path (file or directory) exists. +- `CLI::NonexistentPath`: Requires that the path does not exist. +- `CLI::Range(min,max)`: Requires that the option be between min and max (make sure to use floating point if needed). Min defaults to 0. +- `CLI::Bounded(min,max)`: Modify the input such that it is always between min and max (make sure to use floating point if needed). Min defaults to 0. Will produce an error if conversion is not possible. +- `CLI::PositiveNumber`: Requires the number be greater than 0 +- `CLI::NonNegativeNumber`: Requires the number be greater or equal to 0 +- `CLI::Number`: Requires the input be a number. +- `CLI::ValidIPV4`: Requires that the option be a valid IPv4 string e.g. `'255.255.255.255'`, `'10.1.1.7'`. +- `CLI::TypeValidator`:🚧 Requires that the option be convertible to the specified type e.g. `CLI::TypeValidator()` would require that the input be convertible to an `unsigned int` regardless of the end conversion. + +These Validators can be used by simply passing the name into the `check` or `transform` methods on an option + +```cpp +->check(CLI::ExistingFile); +->check(CLI::Range(0,10)); +``` + +Validators can be merged using `&` and `|` and inverted using `!`. For example: + +```cpp +->check(CLI::Range(0,10)|CLI::Range(20,30)); +``` + +will produce a check to ensure a value is between 0 and 10 or 20 and 30. + +```cpp +->check(!CLI::PositiveNumber); +``` + +will produce a check for a number less than or equal to 0. + +##### Transforming Validators +There are a few built in Validators that let you transform values if used with the `transform` function. If they also do some checks then they can be used `check` but some may do nothing in that case. +- `CLI::Bounded(min,max)` will bound values between min and max and values outside of that range are limited to min or max, it will fail if the value cannot be converted and produce a `ValidationError` +- The `IsMember` Validator lets you specify a set of predefined options. You can pass any container or copyable pointer (including `std::shared_ptr`) to a container to this Validator; the container just needs to be iterable and have a `::value_type`. The key type should be convertible from a string, You can use an initializer list directly if you like. If you need to modify the set later, the pointer form lets you do that; the type message and check will correctly refer to the current version of the set. The container passed in can be a set, vector, or a map like structure. If used in the `transform` method the output value will be the matching key as it could be modified by filters. +After specifying a set of options, you can also specify "filter" functions of the form `T(T)`, where `T` is the type of the values. The most common choices probably will be `CLI::ignore_case` an `CLI::ignore_underscore`, and `CLI::ignore_space`. These all work on strings but it is possible to define functions that work on other types. +Here are some examples +of `IsMember`: + +- `CLI::IsMember({"choice1", "choice2"})`: Select from exact match to choices. +- `CLI::IsMember({"choice1", "choice2"}, CLI::ignore_case, CLI::ignore_underscore)`: Match things like `Choice_1`, too. +- `CLI::IsMember(std::set({2,3,4}))`: Most containers and types work; you just need `std::begin`, `std::end`, and `::value_type`. +- `CLI::IsMember(std::map({{"one", 1}, {"two", 2}}))`: You can use maps; in `->transform()` these replace the matched value with the matched key. The value member of the map is not used in `IsMember`, so it can be any type. +- `auto p = std::make_shared>(std::initializer_list("one", "two")); CLI::IsMember(p)`: You can modify `p` later. +- The `Transformer` and `CheckedTransformer` Validators transform one value into another. Any container or copyable pointer (including `std::shared_ptr`) to a container that generates pairs of values can be passed to these `Validator's`; the container just needs to be iterable and have a `::value_type` that consists of pairs. The key type should be convertible from a string, and the value type should be convertible to a string You can use an initializer list directly if you like. If you need to modify the map later, the pointer form lets you do that; the description message will correctly refer to the current version of the map. `Transformer` does not do any checking so values not in the map are ignored. `CheckedTransformer` takes an extra step of verifying that the value is either one of the map key values, in which case it is transformed, or one of the expected output values, and if not will generate a `ValidationError`. A Transformer placed using `check` will not do anything. +After specifying a map of options, you can also specify "filter" just like in `CLI::IsMember`. +Here are some examples (`Transformer` and `CheckedTransformer` are interchangeable in the examples) +of `Transformer`: + +- `CLI::Transformer({{"key1", "map1"},{"key2","map2"}})`: Select from key values and produce map values. + +- `CLI::Transformer(std::map({"two",2},{"three",3},{"four",4}}))`: most maplike containers work, the `::value_type` needs to produce a pair of some kind. +- `CLI::CheckedTransformer(std::map({{"one", 1}, {"two", 2}}))`: You can use maps; in `->transform()` these replace the matched key with the value. `CheckedTransformer` also requires that the value either match one of the keys or match one of known outputs. +- `auto p = std::make_shared>(std::initializer_list>({"key1", "map1"},{"key2","map2"})); CLI::Transformer(p)`: You can modify `p` later. `TransformPairs` is an alias for `std::vector>` + +NOTES: If the container used in `IsMember`, `Transformer`, or `CheckedTransformer` has a `find` function like `std::unordered_map` or `std::map` then that function is used to do the searching. If it does not have a `find` function a linear search is performed. If there are filters present, the fast search is performed first, and if that fails a linear search with the filters on the key values is performed. + +##### Validator operations +Validators are copyable and have a few operations that can be performed on them to alter settings. Most of the built in Validators have a default description that is displayed in the help. This can be altered via `.description(validator_description)`. +The name of a Validator, which is useful for later reference from the `get_validator(name)` method of an `Option` can be set via `.name(validator_name)` +The operation function of a Validator can be set via +`.operation(std::function)`. The `.active()` function can activate or deactivate a Validator from the operation. A validator can be set to apply only to a specific element of the output. For example in a pair option `std::pair` the first element may need to be a positive integer while the second may need to be a valid file. The `.application_index(int)` 🆕 function can specify this. It is zero based and negative indices apply to all values. +```cpp +opt->check(CLI::Validator(CLI::PositiveNumber).application_index(0)); +opt->check(CLI::Validator(CLI::ExistingFile).application_index(1)); +``` + +All the validator operation functions return a Validator reference allowing them to be chained. For example + +```cpp +opt->check(CLI::Range(10,20).description("range is limited to sensible values").active(false).name("range")); +``` + +will specify a check on an option with a name "range", but deactivate it for the time being. +The check can later be activated through + +```cpp +opt->get_validator("range")->active(); +``` + +##### Custom Validators + +A validator object with a custom function can be created via + +```cpp +CLI::Validator(std::function,validator_description,validator_name=""); +``` + +or if the operation function is set later they can be created with + +```cpp +CLI::Validator(validator_description); +``` + + It is also possible to create a subclass of `CLI::Validator`, in which case it can also set a custom description function, and operation function. + +##### Querying Validators + +Once loaded into an Option, a pointer to a named Validator can be retrieved via + +```cpp +opt->get_validator(name); +``` + +This will retrieve a Validator with the given name or throw a `CLI::OptionNotFound` error. If no name is given or name is empty the first unnamed Validator will be returned or the first Validator if there is only one. + +or 🆕 + +```cpp +opt->get_validator(index); +``` + +Which will return a validator in the index it is applied which isn't necessarily the order in which was defined. The pointer can be `nullptr` if an invalid index is given. +Validators have a few functions to query the current values +- `get_description()`: Will return a description string +- `get_name()`: Will return the Validator name +- `get_active()`: Will return the current active state, true if the Validator is active. +- `get_application_index()`: 🆕 Will return the current application index. +- `get_modifying()`: Will return true if the Validator is allowed to modify the input, this can be controlled via the `non_modifying()` method, though it is recommended to let `check` and `transform` option methods manipulate it if needed. + +#### Getting results + +In most cases, the fastest and easiest way is to return the results through a callback or variable specified in one of the `add_*` functions. But there are situations where this is not possible or desired. For these cases the results may be obtained through one of the following functions. Please note that these functions will do any type conversions and processing during the call so should not used in performance critical code: + +- `results()`: Retrieves a vector of strings with all the results in the order they were given. +- `results(variable_to_bind_to)`: Gets the results according to the MultiOptionPolicy and converts them just like the `add_option_function` with a variable. +- `Value=as()`: Returns the result or default value directly as the specified type if possible, can be vector to return all results, and a non-vector to get the result according to the MultiOptionPolicy in place. + +### Subcommands + +Subcommands are supported, and can be nested infinitely. To add a subcommand, call the `add_subcommand` method with a name and an optional description. This gives a pointer to an `App` that behaves just like the main app, and can take options or further subcommands. Add `->ignore_case()` to a subcommand to allow any variation of caps to also be accepted. `->ignore_underscore()` is similar, but for underscores. Children inherit the current setting from the parent. You cannot add multiple matching subcommand names at the same level (including `ignore_case` and `ignore_underscore`). + +If you want to require that at least one subcommand is given, use `.require_subcommand()` on the parent app. You can optionally give an exact number of subcommands to require, as well. If you give two arguments, that sets the min and max number allowed. +0 for the max number allowed will allow an unlimited number of subcommands. As a handy shortcut, a single negative value N will set "up to N" values. Limiting the maximum number allows you to keep arguments that match a previous +subcommand name from matching. + +If an `App` (main or subcommand) has been parsed on the command line, `->parsed` will be true (or convert directly to bool). +All `App`s have a `get_subcommands()` method, which returns a list of pointers to the subcommands passed on the command line. A `got_subcommand(App_or_name)` method is also provided that will check to see if an `App` pointer or a string name was collected on the command line. + +For many cases, however, using an app's callback capabilities may be easier. Every app has a set of callbacks that can be executed at various stages of parsing; a `C++` lambda function (with capture to get parsed values) can be used as input to the callback definition function. If you throw `CLI::Success` or `CLI::RuntimeError(return_value)`, you can +even exit the program through the callback. + +Multiple subcommands are allowed, to allow [`Click`][click] like series of commands (order is preserved). The same subcommand can be triggered multiple times but all positional arguments will take precedence over the second and future calls of the subcommand. `->count()` on the subcommand will return the number of times the subcommand was called. The subcommand callback will only be triggered once unless the `.immediate_callback()` flag is set or the callback is specified through the `parse_complete_callback()` function. The `final_callback()` is triggered only once. In which case the callback executes on completion of the subcommand arguments but after the arguments for that subcommand have been parsed, and can be triggered multiple times. + +Subcommands may also have an empty name either by calling `add_subcommand` with an empty string for the name or with no arguments. +Nameless subcommands function a similarly to groups in the main `App`. See [Option groups](#option-groups) to see how this might work. If an option is not defined in the main App, all nameless subcommands are checked as well. This allows for the options to be defined in a composable group. The `add_subcommand` function has an overload for adding a `shared_ptr` so the subcommand(s) could be defined in different components and merged into a main `App`, or possibly multiple `Apps`. Multiple nameless subcommands are allowed. Callbacks for nameless subcommands are only triggered if any options from the subcommand were parsed. + +#### Subcommand options + +There are several options that are supported on the main app and subcommands and option_groups. These are: + +- `.ignore_case()`: Ignore the case of this subcommand. Inherited by added subcommands, so is usually used on the main `App`. +- `.ignore_underscore()`: Ignore any underscores in the subcommand name. Inherited by added subcommands, so is usually used on the main `App`. +- `.allow_windows_style_options()`: Allow command line options to be parsed in the form of `/s /long /file:file_name.ext` This option does not change how options are specified in the `add_option` calls or the ability to process options in the form of `-s --long --file=file_name.ext`. +- `.fallthrough()`: Allow extra unmatched options and positionals to "fall through" and be matched on a parent option. Subcommands always are allowed to "fall through" as in they will first attempt to match on the current subcommand and if they fail will progressively check parents for matching subcommands. +- `.configurable()`: 🆕 Allow the subcommand to be triggered from a configuration file. By default subcommand options in a configuration file do not trigger a subcommand but will just update default values. +- `.disable()`: Specify that the subcommand is disabled, if given with a bool value it will enable or disable the subcommand or option group. +- `.disabled_by_default()`: Specify that at the start of parsing the subcommand/option_group should be disabled. This is useful for allowing some Subcommands to trigger others. +- `.enabled_by_default()`: Specify that at the start of each parse the subcommand/option_group should be enabled. This is useful for allowing some Subcommands to disable others. +- `.silent()`: 🚧 Specify that the subcommand is silent meaning that if used it won't show up in the subcommand list. This allows the use of subcommands as modifiers +- `.validate_positionals()`: Specify that positionals should pass validation before matching. Validation is specified through `transform`, `check`, and `each` for an option. If an argument fails validation it is not an error and matching proceeds to the next available positional or extra arguments. +- `.excludes(option_or_subcommand)`: If given an option pointer or pointer to another subcommand, these subcommands cannot be given together. In the case of options, if the option is passed the subcommand cannot be used and will generate an error. +- `.needs(option_or_subcommand)`: 🆕 If given an option pointer or pointer to another subcommand, the subcommands will require the given option to have been given before this subcommand is validated which occurs prior to execution of any callback or after parsing is completed. +- `.require_option()`: Require 1 or more options or option groups be used. +- `.require_option(N)`: Require `N` options or option groups, if `N>0`, or up to `N` if `N<0`. `N=0` resets to the default to 0 or more. +- `.require_option(min, max)`: Explicitly set min and max allowed options or option groups. Setting `max` to 0 implies unlimited options. +- `.require_subcommand()`: Require 1 or more subcommands. +- `.require_subcommand(N)`: Require `N` subcommands if `N>0`, or up to `N` if `N<0`. `N=0` resets to the default to 0 or more. +- `.require_subcommand(min, max)`: Explicitly set min and max allowed subcommands. Setting `max` to 0 is unlimited. +- `.add_subcommand(name="", description="")`: Add a subcommand, returns a pointer to the internally stored subcommand. +- `.add_subcommand(shared_ptr)`: Add a subcommand by shared_ptr, returns a pointer to the internally stored subcommand. +- `.remove_subcommand(App)`: Remove a subcommand from the app or subcommand. +- `.got_subcommand(App_or_name)`: Check to see if a subcommand was received on the command line. +- `.get_subcommands(filter)`: The list of subcommands that match a particular filter function. +- `.add_option_group(name="", description="")`: Add an [option group](#option-groups) to an App, an option group is specialized subcommand intended for containing groups of options or other groups for controlling how options interact. +- `.get_parent()`: Get the parent App or `nullptr` if called on master App. +- `.get_option(name)`: Get an option pointer by option name will throw if the specified option is not available, nameless subcommands are also searched +- `.get_option_no_throw(name)`: Get an option pointer by option name. This function will return a `nullptr` instead of throwing if the option is not available. +- `.get_options(filter)`: Get the list of all defined option pointers (useful for processing the app for custom output formats). +- `.parse_order()`: Get the list of option pointers in the order they were parsed (including duplicates). +- `.formatter(fmt)`: Set a formatter, with signature `std::string(const App*, std::string, AppFormatMode)`. See Formatting for more details. +- `.description(str)`: Set/change the description. +- `.get_description()`: Access the description. +- `.alias(str)`: 🆕 set an alias for the subcommand, this allows subcommands to be called by more than one name. +- `.parsed()`: True if this subcommand was given on the command line. +- `.count()`: Returns the number of times the subcommand was called. +- `.count(option_name)`: Returns the number of times a particular option was called. +- `.count_all()`: Returns the total number of arguments a particular subcommand processed, on the master App it returns the total number of processed commands. +- `.name(name)`: Add or change the name. +- `.callback(void() function)`: Set the callback for an app. 🆕 either sets the pre_parse_callback or the final_callback depending on the value of `immediate_callback`. See [Subcommand callbacks](#callbacks) for some additional details. +- `.parse_complete_callback(void() function)`: 🆕 Set the callback that runs at the completion of parsing. for subcommands this is executed at the completion of the single subcommand and can be executed multiple times. See [Subcommand callbacks](#callbacks) for some additional details. +- `.final_callback(void() function)`: 🆕 Set the callback that runs at the end of all processing. This is the last thing that is executed before returning. See [Subcommand callbacks](#callbacks) for some additional details. +- `.immediate_callback()`: Specifies whether the callback for a subcommand should be run as a `parse_complete_callback`(true) or `final_callback`(false). When used on the main app 🆕 it will execute the main app callback prior to the callbacks for a subcommand if they do not also have the `immediate_callback` flag set. 🆕 It is preferable to use the `parse_complete_callback` or `final_callback` directly instead of the `callback` and `immediate_callback` if one wishes to control the ordering and timing of callback. Though `immediate_callback` can be used to swap them if that is needed. +- `.pre_parse_callback(void(std::size_t) function)`: Set a callback that executes after the first argument of an application is processed. See [Subcommand callbacks](#callbacks) for some additional details. +- `.allow_extras()`: Do not throw an error if extra arguments are left over. +- `.positionals_at_end()`: Specify that positional arguments occur as the last arguments and throw an error if an unexpected positional is encountered. +- `.prefix_command()`: Like `allow_extras`, but stop immediately on the first unrecognized item. It is ideal for allowing your app or subcommand to be a "prefix" to calling another app. +- `.footer(message)`: Set text to appear at the bottom of the help string. +- `.footer(std::string())`: 🆕 Set a callback to generate a string that will appear at the end of the help string. +- `.set_help_flag(name, message)`: Set the help flag name and message, returns a pointer to the created option. +- `.set_help_all_flag(name, message)`: Set the help all flag name and message, returns a pointer to the created option. Expands subcommands. +- `.failure_message(func)`: Set the failure message function. Two provided: `CLI::FailureMessage::help` and `CLI::FailureMessage::simple` (the default). +- `.group(name)`: Set a group name, defaults to `"Subcommands"`. Setting `""` will be hide the subcommand. +- `[option_name]`: retrieve a const pointer to an option given by `option_name` for Example `app["--flag1"]` will get a pointer to the option for the "--flag1" value, `app["--flag1"]->as()` will get the results of the command line for a flag. The operation will throw an exception if the option name is not valid. + +> Note: if you have a fixed number of required positional options, that will match before subcommand names. `{}` is an empty filter function, and any positional argument will match before repeated subcommand names. + + +#### Callbacks +A subcommand has three optional callbacks that are executed at different stages of processing. The `preparse_callback` is executed once after the first argument of a subcommand or application is processed and gives an argument for the number of remaining arguments to process. For the main app the first argument is considered the program name, for subcommands the first argument is the subcommand name. For Option groups and nameless subcommands the first argument is after the first argument or subcommand is processed from that group. +The second callback is executed after parsing. This is known as the `parse_complete_callback`. For subcommands this is executed immediately after parsing and can be executed multiple times if a subcommand is called multiple times. On the main app this callback is executed after all the `parse_complete_callback`s for the subcommands are executed but prior to any `final_callback` calls in the subcommand or option groups. If the main app or subcommand has a config file, no data from the config file will be reflected in `parse_complete_callback` on named subcommands 🆕. For `option_group`s the `parse_complete_callback` is executed prior to the `parse_complete_callback` on the main app but after the `config_file` is loaded (if specified). The 🆕 `final_callback` is executed after all processing is complete. After the `parse_complete_callback` is executed on the main app, the used subcommand `final_callback` are executed followed by the "final callback" for option groups. The last thing to execute is the `final_callback` for the `main_app`. +For example say an application was set up like + +```cpp +app.parse_complete_callback(ac1); +app.final_callback(ac2); +auto sub1=app.add_subcommand("sub1")->parse_complete_callback(c1)->preparse_callback(pc1); +auto sub2=app.add_subcommand("sub2")->final_callback(c2)->preparse_callback(pc2); +app.preparse_callback( pa); + +... A bunch of other options + +``` + +Then the command line is given as + +``` +program --opt1 opt1_val sub1 --sub1opt --sub1optb val sub2 --sub2opt sub1 --sub1opt2 sub2 --sub2opt2 val +``` + +- pa will be called prior to parsing any values with an argument of 13. +- pc1 will be called immediately after processing the sub1 command with a value of 10. +- c1 will be called when the `sub2` command is encountered. +- pc2 will be called with value of 6 after the sub2 command is encountered. +- c1 will be called again after the second sub2 command is encountered. +- ac1 will be called after processing of all arguments +- c2 will be called once after processing all arguments. +- ac2 will be called last after completing all lower level callbacks have been executed. + +A subcommand is considered terminated when one of the following conditions are met. +1. There are no more arguments to process +2. Another subcommand is encountered that would not fit in an optional slot of the subcommand +3. The `positional_mark` (`--`) is encountered and there are no available positional slots in the subcommand. +4. The `subcommand_terminator` mark (`++`) is encountered + +Prior to executed a `parse_complete_callback` all contained options are processed before the callback is triggered. If a subcommand with a `parse_complete_callback` is called again, then the contained options are reset, and can be triggered again. + + + +#### Option groups + +The subcommand method + +```cpp +.add_option_group(name,description) +``` + +Will create an option group, and return a pointer to it. The argument for `description` is optional and can be omitted. An option group allows creation of a collection of options, similar to the groups function on options, but with additional controls and requirements. They allow specific sets of options to be composed and controlled as a collective. For an example see [range example](https://github.com/CLIUtils/CLI11/blob/master/examples/ranges.cpp). Option groups are a specialization of an App so all [functions](#subcommand-options) that work with an App or subcommand also work on option groups. Options can be created as part of an option group using the add functions just like a subcommand, or previously created options can be added through + +```cpp +ogroup->add_option(option_pointer); +ogroup->add_options(option_pointer); +ogroup->add_options(option1,option2,option3,...); +``` + +The option pointers used in this function must be options defined in the parent application of the option group otherwise an error will be generated. Subcommands can also be added via + +```cpp +ogroup->add_subcommand(subcom_pointer); +``` + +This results in the subcommand being moved from its parent into the option group. + +Options in an option group are searched for a command line match after any options in the main app, so any positionals in the main app would be matched first. So care must be taken to make sure of the order when using positional arguments and option groups. +Option groups work well with `excludes` and `require_options` methods, as an application will treat an option group as a single option for the purpose of counting and requirements, and an option group will be considered used if any of the options or subcommands contained in it are used. Option groups allow specifying requirements such as requiring 1 of 3 options in one group and 1 of 3 options in a different group. Option groups can contain other groups as well. Disabling an option group will turn off all options within the group. + +The `CLI::TriggerOn` and `CLI::TriggerOff` methods are helper functions to allow the use of options/subcommands from one group to trigger another group on or off. + +```cpp +CLI::TriggerOn(group1_pointer, triggered_group); +CLI::TriggerOff(group2_pointer, disabled_group); +``` + +These functions make use of `preparse_callback`, `enabled_by_default()` and `disabled_by_default`. The triggered group may be a vector of group pointers. These methods should only be used once per group and will override any previous use of the underlying functions. More complex arrangements can be accomplished using similar methodology with a custom `preparse_callback` function that does more. + +Additional helper functions `deprecate_option` 🆕 and `retire_option` 🆕 are available to deprecate or retire options +```cpp +CLI::deprecate_option(option *, replacement_name=""); +CLI::deprecate_option(App,option_name,replacement_name=""); +``` +will specify that the option is deprecated which will display a message in the help and a warning on first usage. Deprecated options function normally but will add a message in the help and display a warning on first use. + +```cpp +CLI::retire_option(App,option *); +CLI::retire_option(App,option_name); +``` +will create an option that does nothing by default and will display a warning on first usage that the option is retired and has no effect. If the option exists it is replaces with a dummy option that takes the same arguments. + +If an empty string is passed the option group name the entire group will be hidden in the help results. For example. + +```cpp +auto hidden_group=app.add_option_group(""); +``` +will create a group such that no options in that group are displayed in the help string. + +### Configuration file + +```cpp +app.set_config(option_name="", + default_file_name="", + help_string="Read an ini file", + required=false) +``` + +If this is called with no arguments, it will remove the configuration file option (like `set_help_flag`). Setting a configuration option is special. If it is present, it will be read along with the normal command line arguments. The file will be read if it exists, and does not throw an error unless `required` is `true`. Configuration files are in [TOML] format by default 🚧, though the default reader can also accept files in INI format as well 🆕. It should be noted that CLI11 does not contain a full TOML parser but can read strings from most TOML file and run them through the CLI11 parser. Other formats can be added by an adept user, some variations are available through customization points in the default formatter. An example of a TOML file 🆕: + +```toml +# Comments are supported, using a # +# The default section is [default], case insensitive + +value = 1 +str = "A string" +vector = [1,2,3] +str_vector = ["one","two","and three"] + +# Sections map to subcommands +[subcommand] +in_subcommand = Wow +sub.subcommand = true +``` +or equivalently in INI format +```ini +; Comments are supported, using a ; +; The default section is [default], case insensitive + +value = 1 +str = "A string" +vector = 1 2 3 +str_vector = "one" "two" "and three" + +; Sections map to subcommands +[subcommand] +in_subcommand = Wow +sub.subcommand = true +``` + +Spaces before and after the name and argument are ignored. Multiple arguments are separated by spaces. One set of quotes will be removed, preserving spaces (the same way the command line works). Boolean options can be `true`, `on`, `1`, `yes`, `enable`; or `false`, `off`, `0`, `no`, `disable` (case insensitive). Sections (and `.` separated names) are treated as subcommands (note: this does not necessarily mean that subcommand was passed, it just sets the "defaults"). You cannot set positional-only arguments. 🆕 Subcommands can be triggered from configuration files if the `configurable` flag was set on the subcommand. Then the use of `[subcommand]` notation will trigger a subcommand and cause it to act as if it were on the command line. + +To print a configuration file from the passed +arguments, use `.config_to_str(default_also=false, write_description=false)`, where `default_also` will also show any defaulted arguments, and `write_description` will include the app and option descriptions. See [Config files](https://cliutils.github.io/CLI11/book/chapters/config.html) for some additional details. + +If it is desired that multiple configuration be allowed. Use + +```cpp +app.set_config("--config")->expected(1, X); +``` + +Where X is some positive number and will allow up to `X` configuration files to be specified by separate `--config` arguments. + +### Inheriting defaults + +Many of the defaults for subcommands and even options are inherited from their creators. The inherited default values for subcommands are `allow_extras`, `prefix_command`, `ignore_case`, `ignore_underscore`, `fallthrough`, `group`, `footer`,`immediate_callback` and maximum number of required subcommands. The help flag existence, name, and description are inherited, as well. + +Options have defaults for `group`, `required`, `multi_option_policy`, `ignore_case`, `ignore_underscore`, `delimiter`, and `disable_flag_override`. To set these defaults, you should set the `option_defaults()` object, for example: + +```cpp +app.option_defaults()->required(); +// All future options will be required +``` + +The default settings for options are inherited to subcommands, as well. + +### Formatting + +The job of formatting help printouts is delegated to a formatter callable object on Apps and Options. You are free to replace either formatter by calling `formatter(fmt)` on an `App`, where fmt is any copyable callable with the correct signature. +CLI11 comes with a default App formatter functional, `Formatter`. It is customizable; you can set `label(key, value)` to replace the default labels like `REQUIRED`, and `column_width(n)` to set the width of the columns before you add the functional to the app or option. You can also override almost any stage of the formatting process in a subclass of either formatter. If you want to make a new formatter from scratch, you can do +that too; you just need to implement the correct signature. The first argument is a const pointer to the in question. The formatter will get a `std::string` usage name as the second option, and a `AppFormatMode` mode for the final option. It should return a `std::string`. + +The `AppFormatMode` can be `Normal`, `All`, or `Sub`, and it indicates the situation the help was called in. `Sub` is optional, but the default formatter uses it to make sure expanded subcommands are called with +their own formatter since you can't access anything but the call operator once a formatter has been set. + +### Subclassing + +The App class was designed allow toolkits to subclass it, to provide preset default options (see above) and setup/teardown code. Subcommands remain an unsubclassed `App`, since those are not expected to need setup and teardown. The default `App` only adds a help flag, `-h,--help`, than can removed/replaced using `.set_help_flag(name, help_string)`. You can also set a help-all flag with `.set_help_all_flag(name, help_string)`; this will expand the subcommands (one level only). You can remove options if you have pointers to them using `.remove_option(opt)`. You can add a `pre_callback` override to customize the after parse +but before run behavior, while +still giving the user freedom to `callback` on the main app. + +The most important parse function is `parse(std::vector)`, which takes a reversed list of arguments (so that `pop_back` processes the args in the correct order). `get_help_ptr` and `get_config_ptr` give you access to the help/config option pointers. The standard `parse` manually sets the name from the first argument, so it should not be in this vector. You can also use `parse(string, bool)` to split up and parse a string; the optional boolean should be set to true if you are +including the program name in the string, and false otherwise. + +Also, in a related note, the `App` you get a pointer to is stored in the parent `App` in a `shared_ptr`s (similar to `Option`s) and are deleted when the main `App` goes out of scope unless the object has another owner. + +### How it works + +Every `add_` option you have seen so far depends on one method that takes a lambda function. Each of these methods is just making a different lambda function with capture to populate the option. The function has full access to the vector of strings, so it knows how many times an option was passed or how many arguments it received. The lambda returns `true` if it could validate the option strings, and +`false` if it failed. + +Other values can be added as long as they support `operator>>` (and defaults can be printed if they support `operator<<`). To add a new type, for example, provide a custom `operator>>` with an `istream` (inside the CLI namespace is fine if you don't want to interfere with an existing `operator>>`). + +If you wanted to extend this to support a completely new type, use a lambda or add a specialization of the `lexical_cast` function template in the namespace `CLI::detail` with the type you need to convert to. Some examples of some new parsers for `complex` that support all of the features of a standard `add_options` call are in [one of the tests](./tests/NewParseTest.cpp). A simpler example is shown below: + +#### Example + +```cpp +app.add_option("--fancy-count", [](std::vector val){ + std::cout << "This option was given " << val.size() << " times." << std::endl; + return true; + }); +``` + +### Utilities + +There are a few other utilities that are often useful in CLI programming. These are in separate headers, and do not appear in `CLI11.hpp`, but are completely independent and can be used as needed. The `Timer`/`AutoTimer` class allows you to easily time a block of code, with custom print output. + +```cpp +{ +CLI::AutoTimer timer {"My Long Process", CLI::Timer::Big}; +some_long_running_process(); +} +``` + +This will create a timer with a title (default: `Timer`), and will customize the output using the predefined `Big` output (default: `Simple`). Because it is an `AutoTimer`, it will print out the time elapsed when the timer is destroyed at the end of the block. If you use `Timer` instead, you can use `to_string` or `std::cout << timer << std::endl;` to print the time. The print function can be any function that takes two strings, the title and the time, and returns a formatted +string for printing. + +### Other libraries + +If you use the excellent [Rang][] library to add color to your terminal in a safe, multi-platform way, you can combine it with CLI11 nicely: + +```cpp +std::atexit([](){std::cout << rang::style::reset;}); +try { + app.parse(argc, argv); +} catch (const CLI::ParseError &e) { + std::cout << (e.get_exit_code()==0 ? rang::fg::blue : rang::fg::red); + return app.exit(e); +} +``` + +This will print help in blue, errors in red, and will reset before returning the terminal to the user. + +If you are on a Unix-like system, and you'd like to handle control-c and color, you can add: + +```cpp + #include + void signal_handler(int s) { + std::cout << std::endl << rang::style::reset << rang::fg::red << rang::fg::bold; + std::cout << "Control-C detected, exiting..." << rang::style::reset << std::endl; + std::exit(1); // will call the correct exit func, no unwinding of the stack though + } +``` + +And, in your main function: + +```cpp + // Nice Control-C + struct sigaction sigIntHandler; + sigIntHandler.sa_handler = signal_handler; + sigemptyset(&sigIntHandler.sa_mask); + sigIntHandler.sa_flags = 0; + sigaction(SIGINT, &sigIntHandler, nullptr); +``` + +## API + +The API is [documented here][api-docs]. Also see the [CLI11 tutorial GitBook][gitbook]. + +## Examples + +Several short examples of different features are included in the repository. A brief description of each is included here + + - [callback_passthrough](https://github.com/CLIUtils/CLI11/blob/master/examples/callback_passthrough.cpp): Example of directly passing remaining arguments through to a callback function which generates a CLI11 application based on existing arguments. + - [digit_args](https://github.com/CLIUtils/CLI11/blob/master/examples/digit_args.cpp): Based on [Issue #123](https://github.com/CLIUtils/CLI11/issues/123), uses digit flags to pass a value + - [enum](https://github.com/CLIUtils/CLI11/blob/master/examples/enum.cpp): Using enumerations in an option, and the use of [CheckedTransformer](#transforming-validators) + - [enum_ostream](https://github.com/CLIUtils/CLI11/blob/master/examples/enum_ostream.cpp): In addition to the contents of example enum.cpp, this example shows how a custom ostream operator overrides CLI11's enum streaming. + - [formatter](https://github.com/CLIUtils/CLI11/blob/master/examples/formatter.cpp): Illustrating usage of a custom formatter + - [groups](https://github.com/CLIUtils/CLI11/blob/master/examples/groups.cpp): Example using groups of options for help grouping and a the timer helper class + - [inter_argument_order](https://github.com/CLIUtils/CLI11/blob/master/examples/inter_argument_order.cpp): An app to practice mixing unlimited arguments, but still recover the original order. + - [json](https://github.com/CLIUtils/CLI11/blob/master/examples/json.cpp): Using JSON as a config file parser + - [modhelp](https://github.com/CLIUtils/CLI11/blob/master/examples/modhelp.cpp): How to modify the help flag to do something other than default + - [nested](https://github.com/CLIUtils/CLI11/blob/master/examples/nested.cpp): Nested subcommands + - [option_groups](https://github.com/CLIUtils/CLI11/blob/master/examples/option_groups.cpp): illustrating the use of option groups and a required number of options. + based on [Issue #88](https://github.com/CLIUtils/CLI11/issues/88) to set interacting groups of options + - [positional_arity](https://github.com/CLIUtils/CLI11/blob/master/examples/positional_arity.cpp): Illustrating use of `preparse_callback` to handle situations where the number of arguments can determine which should get parsed, Based on [Issue #166](https://github.com/CLIUtils/CLI11/issues/166) + - [positional_validation](https://github.com/CLIUtils/CLI11/blob/master/examples/positional_validation.cpp): Example of how positional arguments are validated using the `validate_positional` flag, also based on [Issue #166](https://github.com/CLIUtils/CLI11/issues/166) + - [prefix_command](https://github.com/CLIUtils/CLI11/blob/master/examples/prefix_command.cpp): illustrating use of the `prefix_command` flag. + - [ranges](https://github.com/CLIUtils/CLI11/blob/master/examples/ranges.cpp): App to demonstrate exclusionary option groups based on [Issue #88](https://github.com/CLIUtils/CLI11/issues/88) + - [shapes](https://github.com/CLIUtils/CLI11/blob/master/examples/shapes.cpp): illustrating how to set up repeated subcommands Based on [gitter discussion](https://gitter.im/CLI11gitter/Lobby?at=5c7af6b965ffa019ea788cd5) + - [simple](https://github.com/CLIUtils/CLI11/blob/master/examples/simple.cpp): a simple example of how to set up a CLI11 Application with different flags and options + - [subcom_help](https://github.com/CLIUtils/CLI11/blob/master/examples/subcom_help.cpp): configuring help for subcommands + - [subcom_partitioned](https://github.com/CLIUtils/CLI11/blob/master/examples/subcom_partitioned.cpp): Example with a timer and subcommands generated separately and added to the main app later. + - [subcommands](https://github.com/CLIUtils/CLI11/blob/master/examples/subcommands.cpp): Short example of subcommands + - [validators](https://github.com/CLIUtils/CLI11/blob/master/examples/validators.cpp): Example illustrating use of validators + +## Contribute + +To contribute, open an [issue][github issues] or [pull request][github pull requests] on GitHub, or ask a question on [gitter][]. There is also a short note to contributors [here](./.github/CONTRIBUTING.md). +This readme roughly follows the [Standard Readme Style][] and includes a mention of almost every feature of the library. More complex features are documented in more detail in the [CLI11 tutorial GitBook][gitbook]. + +This project was created by [Henry Schreiner](https://github.com/henryiii) and major features were added by [Philip Top](https://github.com/phlptp). Special thanks to all the contributors ([emoji key](https://allcontributors.org/docs/en/emoji-key)): + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

Henry Schreiner

🐛 📖 💻

Philip Top

🐛 📖 💻

Christoph Bachhuber

💡 💻

Marcus Brinkmann

🐛 💻

Jonas Nilsson

🐛 💻

Doug Johnston

🐛 💻

Lucas Czech

🐛 💻

Rafi Wiener

🐛 💻

Daniel Mensinger

📦

Jesus Briales

💻 🐛

Sean Fisk

🐛 💻

fpeng1985

💻

almikhayl

💻 📦

Andrew Hardin

💻

Anton

💻

Fred Helmesjö

🐛 💻

Kannan

🐛 💻

Khem Raj

💻

Mak Kolybabi

📖

Mathias Soeken

📖

Nathan Hourt

🐛 💻

Paul le Roux

💻 📦

Paweł Bylica

📦

Peter Azmanov

💻

Stéphane Del Pino

💻

Viacheslav Kroilov

💻

christos

💻

deining

📖

elszon

💻

ncihnegn

💻

nurelin

💻

ryan4729

⚠️

Isabella Muerte

📦

KOLANICH

📦

James Gerity

📖

Josh Soref

🔧

geir-t

📦

Ondřej Čertík

🐛

Sam Hocevar

💻

Ryan Curtin

📖

Michael Hall

📖

ferdymercury

📖

Jakob Lover

💻

Dominik Steinberger

💻

D. Fleury

💻

Dan Barowy

📖
+ + + + + + +This project follows the [all-contributors](https://github.com/all-contributors/all-contributors) specification. Contributions of any kind welcome! + +## License + +As of version 1.0, this library is available under a 3-Clause BSD license. See the [LICENSE](./LICENSE) file for details. + +CLI11 was developed at the [University of Cincinnati][] to support of the [GooFit][] library under [NSF Award 1414736][]. Version 0.9 was featured in a [DIANA/HEP][] meeting at CERN ([see the slides][diana slides]). Please give it a try! Feedback is always welcome. + +[doi-badge]: https://zenodo.org/badge/80064252.svg +[doi-link]: https://zenodo.org/badge/latestdoi/80064252 +[azure-badge]: https://dev.azure.com/CLIUtils/CLI11/_apis/build/status/CLIUtils.CLI11?branchName=master +[azure]: https://dev.azure.com/CLIUtils/CLI11 +[travis-badge]: https://img.shields.io/travis/CLIUtils/CLI11/master.svg?label=Linux/macOS +[travis]: https://travis-ci.org/CLIUtils/CLI11 +[appveyor-badge]: https://img.shields.io/appveyor/ci/HenrySchreiner/cli11/master.svg?label=AppVeyor +[appveyor]: https://ci.appveyor.com/project/HenrySchreiner/cli11 +[actions-badge]: https://github.com/CLIUtils/CLI11/workflows/Tests/badge.svg +[actions-link]: https://github.com/CLIUtils/CLI11/actions +[codecov-badge]: https://codecov.io/gh/CLIUtils/CLI11/branch/master/graph/badge.svg +[codecov]: https://codecov.io/gh/CLIUtils/CLI11 +[gitter-badge]: https://badges.gitter.im/CLI11gitter/Lobby.svg +[gitter]: https://gitter.im/CLI11gitter/Lobby +[license-badge]: https://img.shields.io/badge/License-BSD-blue.svg +[conan-badge]: https://api.bintray.com/packages/cliutils/CLI11/CLI11%3Acliutils/images/download.svg +[conan-link]: https://bintray.com/cliutils/CLI11/CLI11%3Acliutils/_latestVersion +[github releases]: https://github.com/CLIUtils/CLI11/releases +[github issues]: https://github.com/CLIUtils/CLI11/issues +[github pull requests]: https://github.com/CLIUtils/CLI11/pulls +[goofit]: https://GooFit.github.io +[plumbum]: https://plumbum.readthedocs.io/en/latest/ +[click]: http://click.pocoo.org +[api-docs]: https://CLIUtils.github.io/CLI11/index.html +[rang]: https://github.com/agauniyal/rang +[boost program options]: http://www.boost.org/doc/libs/1_63_0/doc/html/program_options.html +[the lean mean c++ option parser]: http://optionparser.sourceforge.net +[tclap]: http://tclap.sourceforge.net +[cxxopts]: https://github.com/jarro2783/cxxopts +[docopt]: https://github.com/docopt/docopt.cpp +[gflags]: https://gflags.github.io/gflags +[getopt]: https://www.gnu.org/software/libc/manual/html_node/Getopt.html +[diana/hep]: http://diana-hep.org +[nsf award 1414736]: https://nsf.gov/awardsearch/showAward?AWD_ID=1414736 +[university of cincinnati]: http://www.uc.edu +[gitbook]: https://cliutils.github.io/CLI11/book/ +[cli11 advanced topics/custom converters]: https://cliutils.gitlab.io/CLI11Tutorial/chapters/advanced-topics.html#custom-converters +[programoptions.hxx]: https://github.com/Fytch/ProgramOptions.hxx +[argument aggregator]: https://github.com/vietjtnguyen/argagg +[args]: https://github.com/Taywee/args +[argh!]: https://github.com/adishavit/argh +[fmt]: https://github.com/fmtlib/fmt +[catch]: https://github.com/philsquared/Catch +[clara]: https://github.com/philsquared/Clara +[version 1.0 post]: https://iscinumpy.gitlab.io/post/announcing-cli11-10/ +[version 1.3 post]: https://iscinumpy.gitlab.io/post/announcing-cli11-13/ +[version 1.6 post]: https://iscinumpy.gitlab.io/post/announcing-cli11-16/ +[wandbox-badge]: https://img.shields.io/badge/try_1.9-online-blue.svg +[wandbox-link]: https://wandbox.org/permlink/8SirASwhbFQZyDTW +[releases-badge]: https://img.shields.io/github/release/CLIUtils/CLI11.svg +[cli11-po-compare]: https://iscinumpy.gitlab.io/post/comparing-cli11-and-boostpo/ +[diana slides]: https://indico.cern.ch/event/619465/contributions/2507949/attachments/1448567/2232649/20170424-diana-2.pdf +[awesome c++]: https://github.com/fffaraz/awesome-cpp/blob/master/README.md#cli +[cli]: https://codesynthesis.com/projects/cli/ +[single file libs]: https://github.com/nothings/single_file_libs/blob/master/README.md +[codacy-badge]: https://api.codacy.com/project/badge/Grade/ac0df3aead2a4421b02070c3f324a0b9 +[codacy-link]: https://www.codacy.com/app/henryiii/CLI11?utm_source=github.com&utm_medium=referral&utm_content=CLIUtils/CLI11&utm_campaign=Badge_Grade +[hunter]: https://docs.hunter.sh/en/latest/packages/pkg/CLI11.html +[standard readme style]: https://github.com/RichardLitt/standard-readme +[argparse]: https://github.com/p-ranav/argparse diff --git a/thirdparty/CLI11/azure-pipelines.yml b/thirdparty/CLI11/azure-pipelines.yml new file mode 100644 index 0000000..c72c748 --- /dev/null +++ b/thirdparty/CLI11/azure-pipelines.yml @@ -0,0 +1,131 @@ +# C/C++ with GCC +# Build your C/C++ project with GCC using make. +# Add steps that publish test results, save build artifacts, deploy, and more: +# https://docs.microsoft.com/azure/devops/pipelines/apps/c-cpp/gcc + +trigger: +- master +- 'v*' + +pr: +- master +- 'v*' + +variables: + cli11.single: ON + cli11.std: 14 + cli11.build_type: Debug + cli11.options: -DCLI11_EXAMPLES_JSON=ON + CMAKE_BUILD_PARALLEL_LEVEL: 4 + +jobs: + +- job: ClangTidy + variables: + CXX_FLAGS: "-Werror -Wcast-align -Wfloat-equal -Wimplicit-atomic-properties -Wmissing-declarations -Woverlength-strings -Wshadow -Wstrict-selector-match -Wundeclared-selector -Wunreachable-code -std=c++11" + cli11.options: -DCLI11_CLANG_TIDY=ON -DCLI11_CLANG_TIDY_OPTIONS="-fix" + cli11.std: 11 + cli11.single: OFF + CMAKE_BUILD_PARALLEL_LEVEL: 1 + pool: + vmImage: 'ubuntu-latest' + container: silkeh/clang:8 + steps: + - template: .ci/azure-cmake.yml + - template: .ci/azure-build.yml + - script: git diff --exit-code --color + displayName: Check tidy + +- job: CppLint + pool: + vmImage: 'ubuntu-latest' + container: sharaku/cpplint:latest + steps: + - bash: cpplint --counting=detailed --recursive examples include/CLI tests + displayName: Checking against google style guide + +# TODO: Fix macOS error and windows warning in c++17 mode +- job: Native + strategy: + matrix: + Linux14: + vmImage: 'ubuntu-latest' + macOS17: + vmImage: 'macOS-latest' + cli11.std: 17 + macOS11: + vmImage: 'macOS-latest' + cli11.std: 11 + Windows17: + vmImage: 'vs2017-win2016' + cli11.std: 17 + Windows11: + vmImage: 'vs2017-win2016' + cli11.std: 11 + Windowslatest: + vmImage: 'windows-2019' + cli11.std: 20 + cli11.options: -DCMAKE_CXX_FLAGS="/std:c++latest /EHsc" + Linux17nortti: + vmImage: 'ubuntu-latest' + cli11.std: 17 + cli11.options: -DCMAKE_CXX_FLAGS="-fno-rtti" + pool: + vmImage: $(vmImage) + steps: + - template: .ci/azure-build.yml + - template: .ci/azure-test.yml + +- job: Meson + pool: + vmImage: 'ubuntu-latest' + steps: + - task: UsePythonVersion@0 + inputs: + versionSpec: '3.6' + - script: python3 -m pip install meson ninja + - script: meson build + displayName: Run meson to generate build + workingDirectory: tests/mesonTest + - script: ninja -C tests/mesonTest/build + displayName: Build with Ninja + - script: ./tests/mesonTest/build/main --help + displayName: Run help + +- job: Docker + variables: + cli11.single: OFF + pool: + vmImage: 'ubuntu-latest' + strategy: + matrix: + gcc9: + containerImage: gcc:9 + cli11.std: 17 + gcc8: + containerImage: gcc:8 + cli11.std: 17 + gcc4.8: + containerImage: gcc:4.8 + cli11.std: 11 + cli11.options: + clang3.4: + containerImage: silkeh/clang:3.4 + cli11.std: 11 + clang8: + containerImage: silkeh/clang:8 + cli11.std: 14 + cli11.options: -DCLI11_FORCE_LIBCXX=ON + clang8_17: + containerImage: silkeh/clang:8 + cli11.std: 17 + cli11.options: -DCLI11_FORCE_LIBCXX=ON + clang10_20: + containerImage: silkeh/clang:10 + cli11.std: 20 + cli11.options: -DCLI11_FORCE_LIBCXX=ON -DCMAKE_CXX_FLAGS=-std=c++20 + container: $[ variables['containerImage'] ] + steps: + - template: .ci/azure-cmake.yml + - template: .ci/azure-build.yml + - template: .ci/azure-test.yml diff --git a/thirdparty/CLI11/book/.gitignore b/thirdparty/CLI11/book/.gitignore new file mode 100644 index 0000000..bb77454 --- /dev/null +++ b/thirdparty/CLI11/book/.gitignore @@ -0,0 +1,20 @@ +# Node rules: +## Grunt intermediate storage (http://gruntjs.com/creating-plugins#storing-task-files) +.grunt + +## Dependency directory +## Commenting this out is preferred by some people, see +## https://docs.npmjs.com/misc/faq#should-i-check-my-node_modules-folder-into-git +node_modules + +# Book build output +_book + +# eBook build output +*.epub +*.mobi +*.pdf + +a.out + +*build* diff --git a/thirdparty/CLI11/book/CMakeLists.txt b/thirdparty/CLI11/book/CMakeLists.txt new file mode 100644 index 0000000..e99bde6 --- /dev/null +++ b/thirdparty/CLI11/book/CMakeLists.txt @@ -0,0 +1,10 @@ + +set( + book_sources + README.md + SUMMARY.md +) + +file(GLOB book_chapters RELATIVE ${CMAKE_CURRENT_SOURCE_DIR} chapters/*.md) +add_custom_target(cli_book SOURCES ${book_sources} ${book_chapters}) + diff --git a/thirdparty/CLI11/book/README.md b/thirdparty/CLI11/book/README.md new file mode 100644 index 0000000..79a061f --- /dev/null +++ b/thirdparty/CLI11/book/README.md @@ -0,0 +1,69 @@ +# CLI11: An introduction + +This gitbook is designed to provide an introduction to using the CLI11 library to write your own command line programs. The library is designed to be clean, intuitive, but powerful. There are no requirements beyond C++11 support (and even `` support not required). It works on Mac, Linux, and Windows, and has 100% test coverage on all three systems. You can simply drop in a single header file (`CLI11.hpp` available in [releases]) to use CLI11 in your own application. Other ways to integrate it into a build system are listed in the [README]. + +The library was inspired the Python libraries [Plumbum] and [Click], and incorporates many of their user friendly features. The library is extensively documented, with a [friendly introduction][README], this tutorial book, and more technical [API docs]. + +> Feel free to contribute to [this documentation here][CLI11Tutorial] if something can be improved! + +The syntax is simple and scales from a basic application to a massive physics analysis with multiple models and many parameters and switches. For example, this is a simple program that has an optional parameter that defaults to 0: + +```term +gitbook $ ./a.out +Parameter value: 0 + +gitbook $ ./a.out -p 4 +Parameter value: 4 + +gitbook $ ./a.out --help +App description +Usage: ./a.out [OPTIONS] + +Options: + -h,--help Print this help message and exit + -p INT Parameter +``` + +Like any good command line application, help is provided. This program can be implemented in 10 lines: + +[include](code/intro.cpp) + +[Source code](https://github.com/CLIUtils/CLI11/blob/master/book/code/intro.cpp) + +Unlike some other libraries, this is enough to exit correctly and cleanly if help is requested or if incorrect arguments are passed. You can try this example out for yourself. To compile with GCC: + +```term +gitbook:examples $ c++ -std=c++11 intro.cpp +``` + +Much more complicated options are handled elegantly: + +```cpp +std::string file; +app.add_option("-f,--file", file, "Require an existing file") + ->required() + ->check(CLI::ExistingFile); +``` + +You can use any valid type; the above example could have used a `boost::file_system` file instead of a `std::string`. The value is a real value and does not require any special lookups to access. You do not have to risk typos by repeating the values after parsing like some libraries require. The library also handles positional arguments, flags, fixed or unlimited repeating options, interdependent options, flags, custom validators, help groups, and more. + +You can use subcommands, as well. Subcommands support callback lambda functions when parsed, or they can be checked later. You can infinitely nest subcommands, and each is a full `App` instance, supporting everything listed above. + +Reading/producing `.ini` files for configuration is also supported, as is using environment variables as input. The base `App` can be subclassed and customized for use in a toolkit (like [GooFit]). All the standard shell idioms, like `--`, work as well. + +CLI11 was developed at the [University of Cincinnati] in support of the [GooFit] library under [NSF Award 1414736][NSF 1414736]. It was featured in a [DIANA/HEP] meeting at CERN. Please give it a try! Feedback is always welcome. + +[GooFit]: https://github.com/GooFit/GooFit +[DIANA/HEP]: http://diana-hep.org +[CLI11]: https://github.com/CLIUtils/CLI11 +[CLI11Tutorial]: https://cliutils.github.io/CLI11/book +[releases]: https://github.com/CLIUtils/CLI11/releases +[API docs]: https://cliutils.github.io/CLI11 +[README]: https://github.com/CLIUtils/CLI11/blob/master/README.md +[NSF 1414736]: https://nsf.gov/awardsearch/showAward?AWD_ID=1414736 +[University of Cincinnati]: http://www.uc.edu +[Plumbum]: http://plumbum.readthedocs.io/en/latest/ +[Click]: https://click.palletsprojects.com/ + + + diff --git a/thirdparty/CLI11/book/SUMMARY.md b/thirdparty/CLI11/book/SUMMARY.md new file mode 100644 index 0000000..cdbc049 --- /dev/null +++ b/thirdparty/CLI11/book/SUMMARY.md @@ -0,0 +1,16 @@ +# Summary + +* [Introduction](/README.md) +* [Installation](/chapters/installation.md) +* [Basics](/chapters/basics.md) +* [Flags](/chapters/flags.md) +* [Options](/chapters/options.md) +* [Validators](/chapters/validators.md) +* [Subcommands and the App](/chapters/subcommands.md) +* [An advanced example](/chapters/an-advanced-example.md) +* [Configuration files](/chapters/config.md) +* [Formatting help output](/chapters/formatting.md) +* [Toolkits](/chapters/toolkits.md) +* [Advanced topics](/chapters/advanced-topics.md) +* [Internals](/chapters/internals.md) + diff --git a/thirdparty/CLI11/book/book.json b/thirdparty/CLI11/book/book.json new file mode 100644 index 0000000..f8745d6 --- /dev/null +++ b/thirdparty/CLI11/book/book.json @@ -0,0 +1,16 @@ +{ +"title": "CLI11 Tutorial", +"description": "A set of examples and detailed information about CLI11", +"author": "Henry Schreiner", +"plugins": [ + "include-codeblock", + "term", + "hints" + ], +"pluginsConfig": { + "include-codeblock": { + "unindent": true, + "fixlang": true + } + } +} diff --git a/thirdparty/CLI11/book/chapters/advanced-topics.md b/thirdparty/CLI11/book/chapters/advanced-topics.md new file mode 100644 index 0000000..1a7ef53 --- /dev/null +++ b/thirdparty/CLI11/book/chapters/advanced-topics.md @@ -0,0 +1,138 @@ +# Advanced topics + + +## Environment variables + +Environment variables can be used to fill in the value of an option: + +```cpp +std::string opt; +app.add_option("--my_option", opt)->envname("MY_OPTION"); +``` +If not given on the command line, the environment variable will be checked and read from if it exists. All the standard tools, like default and required, work as expected. +If passed on the command line, this will ignore the environment variable. + +## Needs/excludes + +You can set a network of requirements. For example, if flag a needs flag b but cannot be given with flag c, that would be: + +```cpp +auto a = app.add_flag("-a"); +auto b = app.add_flag("-b"); +auto c = app.add_flag("-c"); + +a->needs(b); +a->excludes(c); +``` + +CLI11 will make sure your network of requirements makes sense, and will throw an error immediately if it does not. + +## Custom option callbacks + +You can make a completely generic option with a custom callback. For example, if you wanted to add a complex number (already exists, so please don't actually do this): + +```cpp +CLI::Option * +add_option(CLI::App &app, std::string name, cx &variable, std::string description = "", bool defaulted = false) { + CLI::callback_t fun = [&variable](CLI::results_t res) { + double x, y; + bool worked = CLI::detail::lexical_cast(res[0], x) && CLI::detail::lexical_cast(res[1], y); + if(worked) + variable = cx(x, y); + return worked; + }; + + CLI::Option *opt = app.add_option(name, fun, description, defaulted); + opt->set_custom_option("COMPLEX", 2); + if(defaulted) { + std::stringstream out; + out << variable; + opt->set_default_str(out.str()); + } + return opt; +} +``` + +Then you could use it like this: + +``` +std::complex comp{0, 0}; +add_option(app, "-c,--complex", comp); +``` + +## Custom converters + +You can add your own converters to allow CLI11 to accept more option types in the standard calls. These can only be used for "single" size options (so complex, vector, etc. are a separate topic). If you set up a custom `istringstream& operator <<` overload before include CLI11, you can support different conversions. If you place this in the CLI namespace, you can even keep this from affecting the rest of your code. Here's how you could add `boost::optional` for a compiler that does not have `__has_include`: + +```cpp +// CLI11 already does this if __has_include is defined +#ifndef __has_include + +#include + +// Use CLI namespace to avoid the conversion leaking into your other code +namespace CLI { + +template std::istringstream &operator>>(std::istringstream &in, boost::optional &val) { + T v; + in >> v; + val = v; + return in; +} + +} + +#endif + +#include +``` + +This is an example of how to use the system only; if you are just looking for a way to activate `boost::optional` support on older compilers, you should define `CLI11_BOOST_OPTIONAL` before including a CLI11 file, you'll get the `boost::optional` support. + + +## Custom converters and type names: std::chrono example + +An example of adding a custom converter and typename for `std::chrono` follows: + +```cpp +namespace CLI +{ + template + std::istringstream &operator>>(std::istringstream &in, std::chrono::duration &val) + { + T v; + in >> v; + val = std::chrono::duration(v); + return in; + } + + template + std::stringstream &operator<<(std::stringstream &in, std::chrono::duration &val) + { + in << val.count(); + return in; + } + } + +#include + +namespace CLI +{ + namespace detail + { + template <> + constexpr const char *type_name() + { + return "TIME [H]"; + } + + template <> + constexpr const char *type_name() + { + return "TIME [MIN]"; + } + } +} +``` + +Thanks to Olivier Hartmann for the example. diff --git a/thirdparty/CLI11/book/chapters/an-advanced-example.md b/thirdparty/CLI11/book/chapters/an-advanced-example.md new file mode 100644 index 0000000..2b20dde --- /dev/null +++ b/thirdparty/CLI11/book/chapters/an-advanced-example.md @@ -0,0 +1,33 @@ +# Making a git clone + + + +Let's try our hand at a little `git` clone, called `geet`. It will just print it's intent, rather than running actual code, since it's just a demonstration. Let's start by adding an app and requiring 1 subcommand to run: + +[include:"Intro"](../code/geet.cpp) + +Now, let's define the first subcommand, `add`, along with a few options: + +[include:"Add"](../code/geet.cpp) + +Now, let's add `commit`: + +[include:"Commit"](../code/geet.cpp) + +All that's need now is the parse call. We'll print a little message after the code runs, and then return: + +[include:"Parse"](../code/geet.cpp) + +[Source code](https://github.com/CLIUtils/CLI11/tree/master/book/code/geet.cpp) + +If you compile and run: + +```term +gitbook:examples $ c++ -std=c++11 geet.cpp -o geet +``` + +You'll see it behaves pretty much like `git`. + +## Multi-file App parse code + +This example could be made much nicer if it was split into files, one per subcommand. If you simply use shared pointers instead of raw values in the lambda capture, you can tie the lifetime to the lambda function lifetime. CLI11 has a multifile example in its example folder. diff --git a/thirdparty/CLI11/book/chapters/basics.md b/thirdparty/CLI11/book/chapters/basics.md new file mode 100644 index 0000000..aa52cff --- /dev/null +++ b/thirdparty/CLI11/book/chapters/basics.md @@ -0,0 +1,27 @@ +# The Basics + +The simplest CLI11 program looks like this: + +[include](../code/simplest.cpp) + +The first line includes the library; this explicitly uses the single file edition (see [Selecting an edition](/chapters/installation)). + +After entering the main function, you'll see that a `CLI::App` object is created. This is the basis for all interactions with the library. You could optionally provide a description for your app here. + +A normal CLI11 application would define some flags and options next. This is a simplest possible example, so we'll go on. + +The macro `CLI11_PARSE` just runs five simple lines. This internally runs `app.parse(argc, argv)`, which takes the command line info from C++ and parses it. If there is an error, it throws a `ParseError`; if you catch it, you can use `app.exit` with the error as an argument to print a nice message and produce the correct return code for your application. + +If you just use `app.parse` directly, your application will still work, but the stack will not be correctly unwound since you have an uncaught exception, and the command line output will be cluttered, especially for help. + +For this (and most of the examples in this book) we will assume that we have the `CLI11.hpp` file in the current directory and that we are creating an output executable `a.out` on a macOS or Linux system. The commands to compile and test this example would be: + +```term +gitbook:examples $ g++ -std=c++11 simplest.cpp +gitbook:examples $ ./a.out -h +Usage: ./a.out [OPTIONS] + +Options: + -h,--help Print this help message and exit +``` + diff --git a/thirdparty/CLI11/book/chapters/config.md b/thirdparty/CLI11/book/chapters/config.md new file mode 100644 index 0000000..219c3d9 --- /dev/null +++ b/thirdparty/CLI11/book/chapters/config.md @@ -0,0 +1,181 @@ +# Accepting configure files + +## Reading a configure file + +You can tell your app to allow configure files with `set_config("--config")`. There are arguments: the first is the option name. If empty, it will clear the config flag. The second item is the default file name. If that is specified, the config will try to read that file. The third item is the help string, with a reasonable default, and the final argument is a boolean (default: false) that indicates that the configuration file is required and an error will be thrown if the file is not found and this is set to true. + +### Extra fields +Sometimes configuration files are used for multiple purposes so CLI11 allows options on how to deal with extra fields + +```cpp +app.allow_config_extras(true); +``` +will allow capture the extras in the extras field of the app. (NOTE: This also sets the `allow_extras` in the app to true) + +```cpp +app.allow_config_extras(false); +``` +will generate an error if there are any extra fields + +for slightly finer control there is a scoped enumeration of the modes +or +```cpp +app.allow_config_extras(CLI::config_extras_mode::ignore); +``` +will completely ignore extra parameters in the config file. This mode is the default. + +```cpp +app.allow_config_extras(CLI::config_extras_mode::capture); +``` +will store the unrecognized options in the app extras fields. This option is the closest equivalent to `app.allow_config_extras(true);` with the exception that it does not also set the `allow_extras` flag so using this option without also setting `allow_extras(true)` will generate an error which may or may not be the desired behavior. + +```cpp +app.allow_config_extras(CLI::config_extras_mode::error); +``` +is equivalent to `app.allow_config_extras(false);` + +### Getting the used configuration file name +If it is needed to get the configuration file name used this can be obtained via +`app.get_config_ptr()->as()` or +`app["--config"]->as()` assuming `--config` was the configuration option name. + +## Configure file format + +Here is an example configuration file, in [TOML](https://github.com/toml-lang/toml) format: + +```ini +# Comments are supported, using a # +# The default section is [default], case insensitive + +value = 1 +str = "A string" +vector = [1,2,3] + +# Section map to subcommands +[subcommand] +in_subcommand = Wow +[subcommand.sub] +subcommand = true # could also be give as sub.subcommand=true +``` + +Spaces before and after the name and argument are ignored. Multiple arguments are separated by spaces. One set of quotes will be removed, preserving spaces (the same way the command line works). Boolean options can be `true`, `on`, `1`, `y`, `t`, `+`, `yes`, `enable`; or `false`, `off`, `0`, `no`, `n`, `f`, `-`, `disable`, (case insensitive). Sections (and `.` separated names) are treated as subcommands (note: this does not necessarily mean that subcommand was passed, it just sets the "defaults". If a subcommand is set to `configurable` then passing the subcommand using `[sub]` in a configuration file will trigger the subcommand.) + +CLI11 also supports configuration file in INI format. + +```ini +; Comments are supported, using a ; +; The default section is [default], case insensitive + +value = 1 +str = "A string" +vector = 1 2 3 + +; Section map to subcommands +[subcommand] +in_subcommand = Wow +sub.subcommand = true +``` + +The main differences are in vector notation and comment character. Note: CLI11 is not a full TOML parser as it just reads values as strings. It is possible (but not recommended) to mix notation. + +## Multiple configuration files + +If it is desired that multiple configuration be allowed. Use + +```cpp +app.set_config("--config")->expected(1, X); +``` + +Where X is some positive integer and will allow up to `X` configuration files to be specified by separate `--config` arguments. + +## Writing out a configure file + +To print a configuration file from the passed arguments, use `.config_to_str(default_also=false, write_description=false)`, where `default_also` will also show any defaulted arguments, and `write_description` will include option descriptions and the App description + +```cpp + + CLI::App app; + app.add_option(...); + // several other options + CLI11_PARSE(app, argc, argv); + //the config printout should be after the parse to capture the given arguments + std::cout<to_config(&app,true,true,"sub."); + //prefix can be used to set a prefix before each argument, like "sub." +``` + +### Customization of configure file output +The default config parser/generator has some customization points that allow variations on the TOML format. The default formatter has a base configuration that matches the TOML format. It defines 5 characters that define how different aspects of the configuration are handled +```cpp +/// the character used for comments +char commentChar = '#'; +/// the character used to start an array '\0' is a default to not use +char arrayStart = '['; +/// the character used to end an array '\0' is a default to not use +char arrayEnd = ']'; +/// the character used to separate elements in an array +char arraySeparator = ','; +/// the character used separate the name from the value +char valueDelimiter = '='; +``` + +These can be modified via setter functions + +- ` ConfigBase *comment(char cchar)` Specify the character to start a comment block +- `ConfigBase *arrayBounds(char aStart, char aEnd)` Specify the start and end characters for an array +- `ConfigBase *arrayDelimiter(char aSep)` Specify the delimiter character for an array +- `ConfigBase *valueSeparator(char vSep)` Specify the delimiter between a name and value + +For example to specify reading a configure file that used `:` to separate name and values + +```cpp +auto config_base=app.get_config_formatter_base(); +config_base->valueSeparator(':'); +``` + +The default configuration file will read INI files, but will write out files in the TOML format. To specify outputting INI formatted files use +```cpp +app.config_formatter(std::make_shared()); +``` +which makes use of a predefined modification of the ConfigBase class which TOML also uses. If a custom formatter is used that is not inheriting from the from ConfigBase class `get_config_formatter_base() will return a nullptr if RTTI is on (usually the default), or garbage if RTTI is off, so some care must be exercised in its use with custom configurations. + +## Custom formats + +You can invent a custom format and set that instead of the default INI formatter. You need to inherit from `CLI::Config` and implement the following two functions: + +```cpp +std::string to_config(const CLI::App *app, bool default_also, bool, std::string) const; +std::vector from_config(std::istream &input) const; +``` + +The `CLI::ConfigItem`s that you return are simple structures with a name, a vector of parents, and a vector of results. A optionally customizable `to_flag` method on the formatter lets you change what happens when a ConfigItem turns into a flag. + +Finally, set your new class as new config formatter: + +```cpp +app.config_formatter(std::make_shared()); +``` + +See [`examples/json.cpp`](https://github.com/CLIUtils/CLI11/blob/master/examples/json.cpp) for a complete JSON config example. + + +## Triggering Subcommands +Configuration files can be used to trigger subcommands if a subcommand is set to configure. By default configuration file just set the default values of a subcommand. But if the `configure()` option is set on a subcommand then the if the subcommand is utilized via a `[subname]` block in the configuration file it will act as if it were called from the command line. Subsubcommands can be triggered via [subname.subsubname]. Using the `[[subname]]` will be as if the subcommand were triggered multiple times from the command line. This functionality can allow the configuration file to act as a scripting file. + +For custom configuration files this behavior can be triggered by specifying the parent subcommands in the structure and `++` as the name to open a new subcommand scope and `--` to close it. These names trigger the different callbacks of configurable subcommands. + +## Implementation Notes +The config file input works with any form of the option given: Long, short, positional, or the environment variable name. When generating a config file it will create a name in following priority. + +1. First long name +1. Positional name +1. First short name +1. Environment name diff --git a/thirdparty/CLI11/book/chapters/flags.md b/thirdparty/CLI11/book/chapters/flags.md new file mode 100644 index 0000000..d4f88fb --- /dev/null +++ b/thirdparty/CLI11/book/chapters/flags.md @@ -0,0 +1,126 @@ +# Adding Flags + +The most basic addition to a command line program is a flag. This is simply something that does not take any arguments. Adding a flag in CLI11 is done in one of three ways. + +## Boolean flags + +The simplest way to add a flag is probably a boolean flag: + +```cpp +bool my_flag{false}; +app.add_flag("-f", my_flag, "Optional description"); +``` + +This will bind the flag `-f` to the boolean `my_flag`. After the parsing step, `my_flag` will be `false` if the flag was not found on the command line, or `true` if it was. By default, it will be allowed any number of times, but if you explicitly[^1] request `->take_last(false)`, it will only be allowed once; passing something like `./my_app -f -f` or `./my_app -ff` will throw a `ParseError` with a nice help description. + + +## Integer flags + +If you want to allow multiple flags, simply use any integer-like instead of a bool: + +```cpp +int my_flag{0}; +app.add_flag("-f", my_flag, "Optional description"); +``` + +After the parsing step, `my_flag` will contain the number of times this flag was found on the command line, including 0 if not found. + +## Arbitrary type flags + +CLI11 allows the type of the variable to assign to in the `add_flag` function to be any supported type. This is particularly useful in combination with specifying default values for flags. The allowed types include bool, int, float, vector, enum, or string-like. + +### Default Flag Values + +Flag options specified through the `add_flag*` functions allow a syntax for the option names to default particular options to a false value or any other value if some flags are passed. For example: + +```cpp +app.add_flag("--flag,!--no-flag",result,"help for flag"); +``` + +specifies that if `--flag` is passed on the command line result will be true or contain a value of 1. If `--no-flag` is +passed `result` will contain false or -1 if `result` is a signed integer type, or 0 if it is an unsigned type. An +alternative form of the syntax is more explicit: `"--flag,--no-flag{false}"`; this is equivalent to the previous +example. This also works for short form options `"-f,!-n"` or `"-f,-n{false}"`. If `variable_to_bind_to` is anything but an integer value the +default behavior is to take the last value given, while if `variable_to_bind_to` is an integer type the behavior will be to sum +all the given arguments and return the result. This can be modified if needed by changing the `multi_option_policy` on each flag (this is not inherited). +The default value can be any value. For example if you wished to define a numerical flag: + +```cpp +app.add_flag("-1{1},-2{2},-3{3}",result,"numerical flag") +``` + +using any of those flags on the command line will result in the specified number in the output. Similar things can be done for string values, and enumerations, as long as the default value can be converted to the given type. + +## Pure flags + +Every command that starts with `add_`, such as the flag commands, return a pointer to the internally stored `CLI::Option` that describes your addition. If you prefer, you can capture this pointer and use it, and that allows you to skip adding a variable to bind to entirely: + +```cpp +CLI::Option* my_flag = app.add_flag("-f", "Optional description"); +``` + +After parsing, you can use `my_flag->count()` to count the number of times this was found. You can also directly use the value (`*my_flag`) as a bool. `CLI::Option` will be discussed in more detail later. + +## Callback flags + +If you want to define a callback that runs when you make a flag, you can use `add_flag_function` (C++11 or newer) or `add_flag` (C++14 or newer only) to add a callback function. The function should have the signature `void(std::size_t)`. This could be useful for a version printout, etc. + +``` +auto callback = [](int count){std::cout << "This was called " << count << " times";}; +app.add_flag_function("-c", callback, "Optional description"); +``` + + +## Aliases + +The name string, the first item of every `add_` method, can contain as many short and long names as you want, separated by commas. For example, `"-a,--alpha,-b,--beta"` would allow any of those to be recognized on the command line. If you use the same name twice, or if you use the same name in multiple flags, CLI11 will immediately throw a `CLI::ConstructionError` describing your problem (it will not wait until the parsing step). + +If you want to make an option case insensitive, you can use the `->ignore_case()` method on the `CLI::Option` to do that. For example, + +```cpp +bool flag{false}; +app.add_flag("--flag", flag) + ->ignore_case(); +``` + +would allow the following to count as passing the flag: + +```term +gitbook $ ./my_app --fLaG +``` + +## Example + +The following program will take several flags: + +[include:"define"](../code/flags.cpp) + +The values would be used like this: + +[include:"usage"](../code/flags.cpp) + +[Source code](https://github.com/CLIUtils/CLI11/tree/master/book/code/flags.cpp) + +If you compile and run: + +```term +gitbook:examples $ g++ -std=c++11 flags.cpp +gitbook:examples $ ./a.out -h +Flag example program +Usage: ./a.out [OPTIONS] + +Options: + -h,--help Print this help message and exit + -b,--bool This is a bool flag + -i,--int This is an int flag + -p,--plain This is a plain flag + +gitbook:examples $ ./a.out -bii --plain -i +The flags program +Bool flag passed +Flag int: 3 +Flag plain: 1 +``` + + +[^1] It will not inherit this from the parent defaults, since this is often useful even if you don't want all options to allow multiple passed options. diff --git a/thirdparty/CLI11/book/chapters/formatting.md b/thirdparty/CLI11/book/chapters/formatting.md new file mode 100644 index 0000000..66dd228 --- /dev/null +++ b/thirdparty/CLI11/book/chapters/formatting.md @@ -0,0 +1,78 @@ +# Formatting help output + +{% hint style='info' %} +New in CLI11 1.6 +{% endhint %} + +## Customizing an existing formatter + +In CLI11, you can control the output of the help printout in full or in part. The default formatter was written in such a way as to be customizable. You can use `app.get_formatter()` to get the current formatter. The formatter you set will be inherited by subcommands that are created after you set the formatter. + +There are several configuration options that you can set: + + +| Set method | Description | Availability | +|------------|-------------|--------------| +| `column_width(width)` | The width of the columns | Both | +| `label(key, value)` | Set a label to a different value | Both | + +Labels will map the built in names and type names from key to value if present. For example, if you wanted to change the width of the columns to 40 and the `REQUIRED` label from `(REQUIRED)` to `(MUST HAVE)`: + +```cpp +app.get_formatter()->column_width(40); +app.get_formatter()->label("REQUIRED", "(MUST HAVE)"); +``` + +## Subclassing + +You can further configure pieces of the code while still keeping most of the formatting intact by subclassing either formatter and replacing any of the methods with your own. The formatters use virtual functions most places, so you are free to add or change anything about them. For example, if you wanted to remove the info that shows up between the option name and the description: + +```cpp +class MyFormatter : public CLI::Formatter { + public: + std::string make_option_opts(const CLI::Option *) const override {return "";} +}; +app.formatter(std::make_shared()); +``` + +Look at the class definitions in `FormatterFwd.hpp` or the method definitions in `Formatter.hpp` to see what methods you have access to and how they are put together. + +## Anatomy of a help message + +This is a normal printout, with `<>` indicating the methods used to produce each line. + +``` + + + + + + + + ... + + + + +``` + +`make_usage` calls `make_option_usage(opt)` on all the positionals to build that part of the line. `make_subcommand` passes the subcommand as the app pointer. + +The `make_groups` print the group name then call `make_option(o)` on the options listed in that group. The normal printout for an option looks like this: + +``` + make_option_opts(o) + ┌───┴────┐ + -n,--name (REQUIRED) This is a description +└────┬────┘ └──────────┬──────────┘ +make_option_name(o,p) make_option_desc(o) +``` + +Notes: + +* `*1`: This signature depends on whether the call is from a positional or optional. +* `o` is opt pointer, `p` is true if positional. + + + + diff --git a/thirdparty/CLI11/book/chapters/installation.md b/thirdparty/CLI11/book/chapters/installation.md new file mode 100644 index 0000000..c7bcc18 --- /dev/null +++ b/thirdparty/CLI11/book/chapters/installation.md @@ -0,0 +1,92 @@ +# Installation + +## Single file edition + +```cpp +#include +``` + +This example uses the single file edition of CLI11. You can download `CLI11.hpp` from the latest release and put it into the same folder as your source code, then compile this with C++ enabled. For a larger project, you can just put this in an include folder and you are set. + +## Full edition + +```cpp +#include +``` + +If you want to use CLI11 in its full form, you can also use the original multiple file edition. This has an extra utility (`Timer`), and is does not require that you use a release. The only change to your code would be the include shown above. + +### CMake support for the full edition + +If you use CMake 3.4+ for your project (highly recommended), CLI11 comes with a powerful CMakeLists.txt file that was designed to also be used with `add_subproject`. You can add the repository to your code (preferably as a git submodule), then add the following line to your project (assuming your folder is called CLI11): + +```cmake +add_subdirectory(CLI11) +``` + +Then, you will have a target `CLI11::CLI11` that you can link to with `target_link_libraries`. It will provide the include paths you need for the library. This is the way [GooFit](https://github.com/GooFit/GooFit) uses CLI11, for example. + +You can also configure and optionally install CLI11, and CMake will create the necessary `lib/cmake/CLI11/CLI11Config.cmake` files, so `find_package(CLI11 CONFIG REQUIRED)` also works. + +If you use conan.io, CLI11 supports that too. + +### Running tests on the full edition + +CLI11 has examples and tests that can be accessed using a CMake build on any platform. Simply build and run ctest to run the 200+ tests to ensure CLI11 works on your system. + +As an example of the build system, the following code will download and test CLI11 in a simple Alpine Linux docker container [^1]: + +```term +gitbook:~ $ docker run -it alpine +root:/ # apk add --no-cache g++ cmake make git +fetch ... +root:/ # git clone https://github.com/CLIUtils/CLI11.git +Cloning into 'CLI11' ... +root:/ # cd CLI11 +root:CLI11 # mkdir build +root:CLI11 # cd build +root:build # cmake .. +-- The CXX compiler identification is GNU 6.3.0 ... +root:build # make +Scanning dependencies ... +root:build # make test +[warning]Running tests... +Test project /CLI11/build + Start 1: HelpersTest + 1/10 Test #1: HelpersTest ...................... Passed 0.01 sec + Start 2: IniTest + 2/10 Test #2: IniTest .......................... Passed 0.01 sec + Start 3: SimpleTest + 3/10 Test #3: SimpleTest ....................... Passed 0.01 sec + Start 4: AppTest + 4/10 Test #4: AppTest .......................... Passed 0.02 sec + Start 5: CreationTest + 5/10 Test #5: CreationTest ..................... Passed 0.01 sec + Start 6: SubcommandTest + 6/10 Test #6: SubcommandTest ................... Passed 0.01 sec + Start 7: HelpTest + 7/10 Test #7: HelpTest ......................... Passed 0.01 sec + Start 8: NewParseTest + 8/10 Test #8: NewParseTest ..................... Passed 0.01 sec + Start 9: TimerTest + 9/10 Test #9: TimerTest ........................ Passed 0.24 sec + Start 10: link_test_2 +10/10 Test #10: link_test_2 ...................... Passed 0.00 sec + +100% tests passed, 0 tests failed out of 10 + +Total Test time (real) = 0.34 sec +``` + +For the curious, the CMake options and defaults are listed below. Most options default to off if CLI11 is used as a subdirectory in another project. + +| Option | Description | +|--------|-------------| +| `CLI11_SINGLE_FILE=ON` | Build the `CLI11.hpp` file from the sources. Requires Python (version 3 or 2.7). | +| `CLI11_SINGLE_FILE_TESTS=OFF` | Run the tests on the generated single file version as well | +| `CLI11_EXAMPLES=ON` | Build the example programs. | +| `CLI11_TESTING=ON` | Build the tests. | +| `CLI11_CLANG_TIDY=OFF` | Run `clang-tidy` on the examples and headers. Requires CMake 3.6+. | +| `CLI11_CLANG_TIDY_OPTIONS=""` | Options to pass to `clang-tidy`, such as `-fix` (single threaded build only if applying fixes!) | + +[^1]: Docker is being used to create a pristine disposable environment; there is nothing special about this container. Alpine is being used because it is small, modern, and fast. Commands are similar on any other platform. diff --git a/thirdparty/CLI11/book/chapters/internals.md b/thirdparty/CLI11/book/chapters/internals.md new file mode 100644 index 0000000..17bc54a --- /dev/null +++ b/thirdparty/CLI11/book/chapters/internals.md @@ -0,0 +1,45 @@ +# CLI11 Internals + +## Callbacks + +The library was designed to bind to existing variables without requiring typed classes or inheritance. This is accomplished through lambda functions. + +This looks like: + +```cpp +Option* add_option(string name, T item) { + this->function = [&item](string value){ + item = detail::lexical_cast(value); + } +} +``` + +Obviously, you can't access `T` after the `add_` method is over, so it stores the string representation of the default value if it receives the special `true` value as the final argument (not shown above). + +## Parsing + +Parsing follows the following procedure: + +1. `_validate`: Make sure the defined options are self consistent. +2. `_parse`: Main parsing routine. See below. +3. `_run_callback`: Run an App callback if present. + +The parsing phase is the most interesting: + +1. `_parse_single`: Run on each entry on the command line and fill the options/subcommands. +2. `_process`: Run the procedure listed below. +3. `_process_extra`: This throws an error if needed on extra arguments that didn't fit in the parse. + +The `_process` procedure runs the following steps; each step is recursive and completes all subcommands before moving to the next step (new in 1.7). This ensures that interactions between options and subcommand options is consistent. + +1. `_process_ini`: This reads an INI file and fills/changes options as needed. +2. `_process_env`: Look for environment variables. +3. `_process_callbacks`: Run the callback functions - this fills out the variables. +4. `_process_help_flags`: Run help flags if present (and quit). +5. `_process_requirements`: Make sure needs/excludes, required number of options present. + +## Exceptions + +The library immediately returns a C++ exception when it detects a problem, such as an incorrect construction or a malformed command line. + + diff --git a/thirdparty/CLI11/book/chapters/options.md b/thirdparty/CLI11/book/chapters/options.md new file mode 100644 index 0000000..04bfd0b --- /dev/null +++ b/thirdparty/CLI11/book/chapters/options.md @@ -0,0 +1,269 @@ +# Options + +## Simple options +The most versatile addition to a command line program is a option. This is like a flag, but it takes an argument. CLI11 handles all the details for many types of options for you, based on their type. To add an option: + + +```cpp +int int_option{0}; +app.add_option("-i", int_option, "Optional description"); +``` + +This will bind the option `-i` to the integer `int_option`. On the command line, a single value that can be converted to an integer will be expected. Non-integer results will fail. If that option is not given, CLI11 will not touch the initial value. This allows you to set up defaults by simply setting your value beforehand. If you want CLI11 to display your default value, you can add the optional final argument `true` when you add the option. + +```cpp +int int_option{0}; +app.add_option("-i", int_option, "Optional description", true); +``` + +You can use any C++ int-like type, not just `int`. CLI11 understands the following categories of types: + +| Type | CLI11 | +|-------------|-------| +| number like | Integers, floats, bools, or any type that can be constructed from an integer or floating point number | +| string-like | std\::string, or anything that can be constructed from or assigned a std\::string | +| char | For a single char, single string values are accepted, otherwise longer strings are treated as integral values and a conversion is attempted | +| complex-number | std::complex or any type which has a real(), and imag() operations available, will allow 1 or 2 string definitions like "1+2j" or two arguments "1","2" | +| enumeration | any enum or enum class type is supported through conversion from the underlying type(typically int, though it can be specified otherwise) | +| container-like | a container(like vector) of any available types including other containers | +| wrapper | any other object with a `value_type` static definition where the type specified by `value_type` is one of type in this list | +| tuple | a tuple, pair, or array, or other type with a tuple size and tuple_type operations defined and the members being a type contained in this list | +| function | A function that takes an array of strings and returns a string that describes the conversion failure or empty for success. May be the empty function. (`{}`) | +| streamable | any other type with a `<<` operator will also work | + +By default, CLI11 will assume that an option is optional, and one value is expected if you do not use a vector. You can change this on a specific option using option modifiers. + +## Positional options and aliases + +When you give an option on the command line without a name, that is a positional option. Positional options are accepted in the same order they are defined. So, for example: + +```term +gitbook:examples $ ./a.out one --two three four +``` + +The string `one` would have to be the first positional option. If `--two` is a flag, then the remaining two strings are positional. If `--two` is a one-argument option, then `four` is the second positional. If `--two` accepts two or more arguments, then there are no more positionals. + +To make a positional option, you simply give CLI11 one name that does not start with a dash. You can have as many (non-overlapping) names as you want for an option, but only one positional name. So the following name string is valid: + +```cpp +"-a,-b,--alpha,--beta,mypos" +``` + +This would make two short option aliases, two long option alias, and the option would be also be accepted as a positional. + +## Containers of options + +If you use a vector or other container instead of a plain option, you can accept more than one value on the command line. By default, a container accepts as many options as possible, until the next value that could be a valid option name. You can specify a set number using an option modifier `->expected(N)`. (The default unlimited behavior on vectors is restored with `N=-1`) CLI11 does not differentiate between these two methods for unlimited acceptance options. + +| Separate names | Combined names | +|-------------------|-----------------| +| `--vec 1 --vec 2` | `--vec 1 2` | + +It is also possible to specify a minimum and maximum number through `->expected(Min,Max)`. It is also possible to specify a min and max type size for the elements of the container. It most cases these values will be automatically determined but a user can manually restrict them. + +An example of setting up a vector option: + +```cpp +std::vector int_vec; +app.add_option("--vec", int_vec, "My vector option"); +``` + +Vectors will be replaced by the parsed content if the option is given on the command line. + +A definition of a container for purposes of CLI11 is a type with a `end()`, `insert(...)`, `clear()` and `value_type` definitions. This includes `vector`, `set`, `deque`, `list`, `forward_iist`, `map`, `unordered_map` and a few others from the standard library, and many other containers from the boost library. + +### containers of containers +Containers of containers are also supported. +```cpp +std::vector> int_vec; +app.add_option("--vec", int_vec, "My vector of vectors option"); +``` +CLI11 inserts a separator sequence at the start of each argument call to separate the vectors. So unless the separators are injected as part of the command line each call of the option on the command line will result in a separate element of the outer vector. This can be manually controlled via `inject_separator(true|false)` but in nearly all cases this should be left to the defaults. To insert of a separator from the command line add a `%%` where the separation should occur. +``` +cmd --vec_of_vec 1 2 3 4 %% 1 2 +``` +would then result in a container of size 2 with the first element containing 4 values and the second 2. + +This separator is also the only way to get values into something like +```cpp +std::pair,std::vector> two_vecs; +app.add_option("--vec", two_vecs, "pair of vectors"); +``` +without calling the argument twice. + +Further levels of nesting containers should compile but intermediate layers will only have a single element in the container, so is probably not that useful. + +### Nested types +Types can be nested For example +```cpp +std::map> map; +app.add_option("--dict", map, "map of pairs"); +``` + +will require 3 arguments for each invocation, and multiple sets of 3 arguments can be entered for a single invocation on the command line. + +```cpp +std::map>> map; +app.add_option("--dict", map, "map of pairs"); +``` +will result in a requirement for 2 integers on each invocation and absorb an unlimited number of strings including 0. + +## Option modifiers + +When you call `add_option`, you get a pointer to the added option. You can use that to add option modifiers. A full listing of the option modifiers: + +| Modifier | Description | +|----------|-------------| +| `->required()` | The program will quit if this option is not present. This is `mandatory` in Plumbum, but required options seems to be a more standard term. For compatibility, `->mandatory()` also works. | +| `->expected(N)` | Take `N` values instead of as many as possible, mainly for vector args. | +| `->expected(Nmin,Nmax)` | Take between `Nmin` and `Nmax` values. | +| `->type_size(N)` | specify that each block of values would consist of N elements | +| `->type_size(Nmin,Nmax)` | specify that each block of values would consist of between Nmin and Nmax elements | +| `->needs(opt)` | This option requires another option to also be present, opt is an `Option` pointer. | +| `->excludes(opt)` | This option cannot be given with `opt` present, opt is an `Option` pointer. | +| `->envname(name)` | Gets the value from the environment if present and not passed on the command line. | +| `->group(name)` | The help group to put the option in. No effect for positional options. Defaults to `"Options"`. `"Hidden"` will not show up in the help print. | +| `->description(string)` | Set/change the description | +| `->ignore_case()` | Ignore the case on the command line (also works on subcommands, does not affect arguments). | +| `->ignore_underscore()` | Ignore any underscores on the command line (also works on subcommands, does not affect arguments). | +| `->allow_extra_args()` | Allow extra argument values to be included when an option is passed. Enabled by default for vector options. | +| `->disable_flag_override()` | specify that flag options cannot be overridden on the command line use `=` | +| `->delimiter('')` | specify a character that can be used to separate elements in a command line argument, default is , common values are ',', and ';' | +| `->multi_option_policy( CLI::MultiOptionPolicy::Throw)` | Sets the policy for handling multiple arguments if the option was received on the command line several times. `Throw`ing an error is the default, but `TakeLast`, `TakeFirst`, `TakeAll`, and `Join` are also available. See the next four lines for shortcuts to set this more easily. | +| `->take_last()` | Only use the last option if passed several times. This is always true by default for bool options, regardless of the app default, but can be set to false explicitly with `->multi_option_policy()`. | +| `->take_first()` | sets `->multi_option_policy(CLI::MultiOptionPolicy::TakeFirst)` | +| `->take_all()` | sets `->multi_option_policy(CLI::MultiOptionPolicy::TakeAll)` | +| `->join()` | sets `->multi_option_policy(CLI::MultiOptionPolicy::Join)`, which uses newlines or the specified delimiter to join all arguments into a single string output. | +| `->join(delim)` | sets `->multi_option_policy(CLI::MultiOptionPolicy::Join)`, which uses `delim` to join all arguments into a single string output. this also sets the delimiter | +| `->check(Validator)` | perform a check on the returned results to verify they meet some criteria. See [Validators](./validators.md) for more info | +| `->transform(Validator)` | Run a transforming validator on each value passed. See [Validators](./validators.md) for more info | +| `->each(void(std::string))` | Run a function on each parsed value, *in order*. | +| `->default_str(string)` | set a default string for use in the help and as a default value if no arguments are passed and a value is requested | +| `->default_function(string())` | Advanced: Change the function that `capture_default_str()` uses. | +| `->default_val(value)` | Generate the default string from a value and validate that the value is also valid. For options that assign directly to a value type the value in that type is also updated. Value must be convertible to a string(one of known types or have a stream operator). | + +The `->check(...)` and `->transform(...)` modifiers can also take a callback function of the form `bool function(std::string)` that runs on every value that the option receives, and returns a value that tells CLI11 whether the check passed or failed. + +## Using the `CLI::Option` pointer + +Each of the option creation mechanisms returns a pointer to the internally stored option. If you save that pointer, you can continue to access the option, and change setting on it later. The Option object can also be converted to a bool to see if it was passed, or `->count()` can be used to see how many times the option was passed. Since flags are also options, the same methods work on them. + +```cpp +CLI::Option* opt = app.add_flag("--opt"); + +CLI11_PARSE(app, argv, argc); + +if(* opt) + std::cout << "Flag received " << opt->count() << " times." << std::endl; +``` + +## Inheritance of defaults + +One of CLI11's systems to allow customizability without high levels of verbosity is the inheritance system. You can set default values on the parent `App`, and all options and subcommands created from it remember the default values at the point of creation. The default value for Options, specifically, are accessible through the `option_defaults()` method. There are a number of settings that can be set and inherited: + +* `group`: The group name starts as "Options" +* `required`: If the option must be given. Defaults to `false`. Is ignored for flags. +* `multi_option_policy`: What to do if several copies of an option are passed and one value is expected. Defaults to `CLI::MultiOptionPolicy::Throw`. This is also used for bool flags, but they always are created with the value `CLI::MultiOptionPolicy::TakeLast` regardless of the default, so that multiple bool flags does not cause an error. But you can override that flag by flag. +* `ignore_case`: Allow any mixture of cases for the option or flag name +* `ignore_underscore`: Allow any number of underscores in the option or flag name +* `configurable`: Specify whether an option can be configured through a config file +* `disable_flag_override`: do not allow flag values to be overridden on the command line +* `always_capture_default`: specify that the default values should be automatically captured. +* `delimiter`: A delimiter to use for capturing multiple values in a single command line string (e.g. --flag="flag,-flag2,flag3") + +An example of usage: + +``` +app.option_defaults()->ignore_case()->group("Required"); + +app.add_flag("--CaSeLeSs"); +app.get_group() // is "Required" +``` + +Groups are mostly for visual organization, but an empty string for a group name will hide the option. + + +### Windows style options + +You can also set the app setting `app->allow_windows_style_options()` to allow windows style options to also be recognized on the command line: + +* `/a` (flag) +* `/f filename` (option) +* `/long` (long flag) +* `/file filename` (space) +* `/file:filename` (colon) +* `/long_flag:false` (long flag with : to override the default value) + +Windows style options do not allow combining short options or values not separated from the short option like with `-` options. You still specify option names in the same manner as on Linux with single and double dashes when you use the `add_*` functions, and the Linux style on the command line will still work. If a long and a short option share the same name, the option will match on the first one defined. + +## Parse configuration + +How an option and its arguments are parsed depends on a set of controls that are part of the option structure. In most circumstances these controls are set automatically based on the type or function used to create the option and the type the arguments are parsed into. The variables define the size of the underlying type (essentially how many strings make up the type), the expected size (how many groups are expected) and a flag indicating if multiple groups are allowed with a single option. And these interact with the `multi_option_policy` when it comes time to parse. + +### examples +How options manage this is best illustrated through some examples +```cpp +std::string val; +app.add_option("--opt",val,"description"); +``` +creates an option that assigns a value to a `std::string` When this option is constructed it sets a type_size min and max of 1. Meaning that the assignment uses a single string. The Expected size is also set to 1 by default, and `allow_extra_args` is set to false. meaning that each time this option is called 1 argument is expected. This would also be the case if val were a `double`, `int` or any other single argument types. + +now for example +```cpp +std::pair val; +app.add_option("--opt",val,"description"); +``` + +In this case the typesize is automatically detected to be 2 instead of 1, so the parsing would expect 2 arguments associated with the option. + +```cpp +std::vector val; +app.add_option("--opt",val,"description"); +``` + +detects a type size of 1, since the underlying element type is a single string, so the minimum number of strings is 1. But since it is a vector the expected number can be very big. The default for a vector is (1<<30), and the allow_extra_args is set to true. This means that at least 1 argument is expected to follow the option, but arbitrary numbers of arguments may follow. These are checked if they have the form of an option but if not they are added to the argument. + +```cpp +std::vector> val; +app.add_option("--opt",val,"description"); +``` +gets into the complicated cases where the type size is now 3. and the expected max is set to a large number and `allow_extra_args` is set to true. In this case at least 3 arguments are required to follow the option, and subsequent groups must come in groups of three, otherwise an error will result. + +```cpp +bool val{false}; +app.add_flag("--opt",val,"description"); +``` + +Using the add_flag methods for creating options creates an option with an expected size of 0, implying no arguments can be passed. + +```cpp +std::complex val; +app.add_option("--opt",val,"description"); +``` + +triggers the complex number type which has a min of 1 and max of 2, so 1 or 2 strings can be passed. Complex number conversion supports arguments of the form "1+2j" or "1","2", or "1" "2i". The imaginary number symbols `i` and `j` are interchangeable in this context. + + +```cpp +std::vector> val; +app.add_option("--opt",val,"description"); +``` +has a type size of 1 to (1<<30). + +### Customization + +The `type_size(N)`, `type_size(Nmin, Nmax)`, `expected(N)`, `expected(Nmin,Nmax)`, and `allow_extra_args()` can be used to customize an option. For example + +```cpp +std::string val; +auto opt=app.add_flag("--opt{vvv}",val,"description"); +opt->expected(0,1); +``` +will create a hybrid option, that can exist on its own in which case the value "vvv" is used or if a value is given that value will be used. + +There are some additional options that can be specified to modify an option for specific cases +- `->run_callback_for_default()` will specify that the callback should be executed when a default_val is set. This is set automatically when appropriate though it can be turned on or off and any user specified callback for an option will be executed when the default value for an option is set. + +## Unusual circumstances +There are a few cases where some things break down in the type system managing options and definitions. Using the `add_option` method defines a lambda function to extract a default value if required. In most cases this either straightforward or a failure is detected automatically and handled. But in a few cases a streaming template is available that several layers down may not actually be defined. The conditions in CLI11 cannot detect this circumstance automatically and will result in compile error. One specific known case is `boost::optional` if the boost optional_io header is included. This header defines a template for all boost optional values even if they do no actually have a streaming operator. For example `boost::optional` does not have a streaming operator but one is detected since it is part of a template. For these cases a secondary method `app->add_option_no_stream(...)` is provided that bypasses this operation completely and should compile in these cases. + diff --git a/thirdparty/CLI11/book/chapters/subcommands.md b/thirdparty/CLI11/book/chapters/subcommands.md new file mode 100644 index 0000000..230cca8 --- /dev/null +++ b/thirdparty/CLI11/book/chapters/subcommands.md @@ -0,0 +1,129 @@ +# Subcommands and the App + +Subcommands are keyword that invoke a new set of options and features. For example, the `git` +command has a long series of subcommands, like `add` and `commit`. Each can have its own options +and implementations. This chapter will focus on implementations that are contained in the same +C++ application, though the system git uses to extend the main command by calling other commands +in separate executables is supported too; that's called "Prefix commands" and is included at the +end of this chapter. + + +## The parent App + +We'll start by discussing the parent `App`. You've already used it quite a bit, to create +options and set option defaults. There are several other things you can do with an `App`, however. + +You are given a lot of control the help output. You can set a footer with `app.footer("My Footer")`. +You can replace the default help print when a `ParseError` is thrown with `app.set_failure_message(CLI::FailureMessage::help)`. +The default is `CLI:::FailureMessage::simple`, and you can easily define a new one. Just make a (lambda) function that takes an App pointer +and a reference to an error code (even if you don't use them), and returns a string. + + +## Adding a subcommand + +Subcommands can be added just like an option: + +```cpp +CLI::App* sub = app.add_subcommand("sub", "This is a subcommand"); +``` + +The subcommand should have a name as the first argument, and a little description for the +second argument. A pointer to the internally stored subcommand is provided; you usually will +be capturing that pointer and using it later (though you can use callbacks if you prefer). As +always, feel free to use `auto sub = ...` instead of naming the type. + +You can check to see if the subcommand was received on the command line several ways: + +```cpp +if(*sub) ... +if(sub->parsed()) ... +if(app.got_subcommand(sub)) ... +if(app.got_subcommand("sub")) ... +``` + +You can also get a list of subcommands with `get_subcommands()`, and they will be in parsing order. + +There are a lot of options that you can set on a subcommand; in fact, +subcommands have exactly the same options as your main app, since they are actually +the same class of object (as you may have guessed from the type above). This has the +pleasant side affect of making subcommands infinitely nestable. + +## Required subcommands + +Each App has controls to set the number of subcommands you expect. This is controlled by: + +```cpp +app.require_subcommand(/* min */ 0, /* max */ 1); +``` +If you set the max to 0, CLI11 will allow an unlimited number of subcommands. After the (non-unlimited) maximum +is reached, CLI11 will stop trying to match subcommands. So the if you pass "`one two`" to a command, and both `one` +and `two` are subcommands, it will depend on the maximum number as to whether the "`two`" is a subcommand or an argument to the +"`one`" subcommand. + +As a shortcut, you can also call the `require_subcommand` method with one argument; that will be the fixed number of subcommands if positive, it +will be the maximum number if negative. Calling it without an argument will set the required subcommands to 1 or more. + +The maximum number of subcommands is inherited by subcommands. This allows you to set the maximum to 1 once at the beginning on the parent app if you only want single subcommands throughout your app. You should keep this in mind, if you are dealing with lots of nested subcommands. + +## Using callbacks + +You've already seen how to check to see what subcommands were given. It's often much easier, however, to just define the code you want to run when you are making your parser, and not run a bunch of code after `CLI11_PARSE` to analyse the state (Procedural! Yuck!). You can do that with lambda functions. A `std::function` callback `.callback()` is provided, and CLI11 ensures that all options are prepared and usable by reference capture before entering the callback. An +example is shown below in the `geet` program. + +## Inheritance of defaults + +The following values are inherited when you add a new subcommand. This happens at the point the subcommand is created: + +* The name and description for the help flag +* The footer +* The failure message printer function +* Option defaults +* Allow extras +* Prefix command +* Ignore case +* Ignore underscore +* Allow Windows style options +* Fallthrough +* Group name +* Max required subcommands + +## Special modes + +There are several special modes for Apps and Subcommands. + +### Allow extras + +Normally CLI11 throws an error if you don't match all items given on the command line. However, you can enable `allow_extras()` +to instead store the extra values in `.remaining()`. You can get all remaining options including those in contained subcommands recursively in the original order with `.remaining(true)`. +`.remaining_size()` is also provided; this counts the size but ignores the `--` special separator if present. + +### Fallthrough + +Fallthrough allows an option that does not match in a subcommand to "fall through" to the parent command; if that parent +allows that option, it matches there instead. This was added to allow CLI11 to represent models: + +```term +gitbook:code $ ./my_program my_model_1 --model_flag --shared_flag +``` + +Here, `--shared_flag` was set on the main app, and on the command line it "falls through" `my_model_1` to match on the main app. + +### Prefix command + +This is a special mode that allows "prefix" commands, where the parsing completely stops when it gets to an unknown option. Further unknown options are ignored, even if they could match. Git is the traditional example for prefix commands; if you run git with an unknown subcommand, like "`git thing`", it then calls another command called "`git-thing`" with the remaining options intact. + +### Silent subcommands + +Subcommands can be modified by using the `silent` option. This will prevent the subcommand from showing up in the get_subcommands list. This can be used to make subcommands into modifiers. For example, a help subcommand might look like + +```c++ + auto sub1 = app.add_subcommand("help")->silent(); + sub1->parse_complete_callback([]() { throw CLI::CallForHelp(); }); +``` + +This would allow calling help such as: + +```bash +./app help +./app help sub1 +``` diff --git a/thirdparty/CLI11/book/chapters/toolkits.md b/thirdparty/CLI11/book/chapters/toolkits.md new file mode 100644 index 0000000..3b8e93a --- /dev/null +++ b/thirdparty/CLI11/book/chapters/toolkits.md @@ -0,0 +1,30 @@ +# Using CLI11 in a Toolkit + +CLI11 was designed to be integrate into a toolkit, providing a native experience for users. This was used in GooFit to provide `GooFit::Application`, an class designed to make ROOT users feel at home. + +# Custom namespace + +If you want to provide CLI11 in a custom namespace, you'll want to at least put `using CLI::App` in your namespace. You can also include Option, some errors, and validators. You can also put `using namespace CLI` inside your namespace to import everything. + +You may also want to make your own copy of the `CLI11_PARSE` macro. Something like: + +```cpp + #define MYPACKAGE_PARSE(app, argv, argc) \ + try { \ + app.parse(argv, argc); \ + } catch(const CLI::ParseError &e) { \ + return app.exit(e); \ + } +``` + + +# Subclassing App + +If you subclass `App`, you'll just need to do a few things. You'll need a constructor; calling the base `App` constructor is a good idea, but not necessary (it just sets a description and adds a help flag. + +You can call anything you would like to configure in the constructor, like `option_defaults()->take_last()` or `fallthrough()`, and it will be set on all user instances. You can add flags and options, as well. + + +# Virtual functions provided + +You are given a few virtual functions that you can change (only on the main App). `pre_callback` runs right before the callbacks run, letting you print out custom messages at the top of your app. diff --git a/thirdparty/CLI11/book/chapters/validators.md b/thirdparty/CLI11/book/chapters/validators.md new file mode 100644 index 0000000..6266a9c --- /dev/null +++ b/thirdparty/CLI11/book/chapters/validators.md @@ -0,0 +1,64 @@ +# Validators + +There are two forms of validators: + +* `transform` validators: mutating +* `check` validators: non-mutating (recommended unless the parsed string must be mutated) + +A transform validator comes in one form, a function with the signature `std::string(std::string)`. +The function will take a string and return the modified version of the string. If there is an error, +the function should throw a `CLI::ValidationError` with the appropriate reason as a message. + +However, `check` validators come in two forms; either a simple function with the const version of the +above signature, `std::string(const std::string &)`, or a subclass of `struct CLI::Validator`. This +structure has two members that a user should set; one (`func_`) is the function to add to the Option +(exactly matching the above function signature, since it will become that function), and the other is +`name_`, and is the type name to set on the Option (unless empty, in which case the typename will be +left unchanged). + +Validators can be combined with `&` and `|`, and they have an `operator()` so that you can call them +as if they were a function. In CLI11, const static versions of the validators are provided so that +the user does not have to call a constructor also. + +An example of a custom validator: + +```cpp +struct LowerCaseValidator : public Validator { + LowerCaseValidator() { + name_ = "LOWER"; + func_ = [](const std::string &str) { + if(CLI::detail::to_lower(str) != str) + return std::string("String is not lower case"); + else + return std::string(); + }; + } +}; +const static LowerCaseValidator Lowercase; +``` + +If you were not interested in the extra features of Validator, you could simply pass the lambda function above to the `->check()` method of `Option`. + +The built-in validators for CLI11 are: + +| Validator | Description | +|---------------------|-------------| +| `ExistingFile` | Check for existing file (returns error message if check fails) | +| `ExistingDirectory` | Check for an existing directory (returns error message if check fails) | +| `ExistingPath` | Check for an existing path | +| `NonexistentPath` | Check for an non-existing path | +| `Range(min=0, max)` | Produce a range (factory). Min and max are inclusive. | + + +And, the protected members that you can set when you make your own are: + +| Type | Member | Description | +|------|--------|-------------| +| `std::function` | `func_` | Core validation function - modifies input and returns "" if successful | +| `std::function` | `desc_function` | Optional description function (uses `description_` instead if not set) | +| `std::string` | `name_` | The name for search purposes | +| `int` (`-1`) | `application_index_` | The element this validator applies to (-1 for all) | +| `bool` (`true`) | `active_` | This can be disabled | +| `bool` (`false`) | `non_modifying_` | Specify that this is a Validator instead of a Transformer | + + diff --git a/thirdparty/CLI11/book/code/CMakeLists.txt b/thirdparty/CLI11/book/code/CMakeLists.txt new file mode 100644 index 0000000..b91a9c7 --- /dev/null +++ b/thirdparty/CLI11/book/code/CMakeLists.txt @@ -0,0 +1,38 @@ +cmake_minimum_required(VERSION 3.11) + +project(CLI11_Examples LANGUAGES CXX) + +# Using CMake 3.11's ability to set imported interface targets +add_library(CLI11::CLI11 IMPORTED INTERFACE) +target_include_directories(CLI11::CLI11 INTERFACE "${CMAKE_CURRENT_SOURCE_DIR}/../../include") +target_compile_features(CLI11::CLI11 INTERFACE cxx_std_11) + +# Add CTest +enable_testing() + +# Quick function to add the base executable +function(add_cli_exe NAME) + add_executable(${NAME} ${NAME}.cpp) + target_link_libraries(${NAME} CLI11::CLI11) +endfunction() + + +add_cli_exe(simplest) +add_test(NAME simplest COMMAND simplest) + + +add_cli_exe(intro) +add_test(NAME intro COMMAND intro) +add_test(NAME intro_p COMMAND intro -p 5) + + +add_cli_exe(flags) +add_test(NAME flags COMMAND flags) +add_test(NAME flags_bip COMMAND flags -b -i -p) + + +add_cli_exe(geet) +add_test(NAME geet_add COMMAND geet add) +add_test(NAME geet_commit COMMAND geet commit -m "Test") + + diff --git a/thirdparty/CLI11/book/code/flags.cpp b/thirdparty/CLI11/book/code/flags.cpp new file mode 100644 index 0000000..54ac71a --- /dev/null +++ b/thirdparty/CLI11/book/code/flags.cpp @@ -0,0 +1,36 @@ +#include "CLI/CLI.hpp" +#include + +int main(int argc, char **argv) { + using std::cout; + using std::endl; + CLI::App app{"Flag example program"}; + + /// [define] + bool flag_bool; + app.add_flag("--bool,-b", flag_bool, "This is a bool flag"); + + int flag_int; + app.add_flag("-i,--int", flag_int, "This is an int flag"); + + CLI::Option *flag_plain = app.add_flag("--plain,-p", "This is a plain flag"); + /// [define] + + /// [parser] + try { + app.parse(argc, argv); + } catch(const CLI::ParseError &e) { + return app.exit(e); + } + /// [parser] + + /// [usage] + cout << "The flags program" << endl; + if(flag_bool) + cout << "Bool flag passed" << endl; + if(flag_int > 0) + cout << "Flag int: " << flag_int << endl; + if(*flag_plain) + cout << "Flag plain: " << flag_plain->count() << endl; + /// [usage] +} diff --git a/thirdparty/CLI11/book/code/geet.cpp b/thirdparty/CLI11/book/code/geet.cpp new file mode 100644 index 0000000..a4caf26 --- /dev/null +++ b/thirdparty/CLI11/book/code/geet.cpp @@ -0,0 +1,50 @@ +#include "CLI/CLI.hpp" + +#include + +int main(int argc, char **argv) { + + /// [Intro] + CLI::App app{"Geet, a command line git lookalike that does nothing"}; + app.require_subcommand(1); + /// [Intro] + + /// [Add] + auto add = app.add_subcommand("add", "Add file(s)"); + + bool add_update; + add->add_flag("-u,--update", add_update, "Add updated files only"); + + std::vector add_files; + add->add_option("files", add_files, "Files to add"); + + add->callback([&]() { + std::cout << "Adding:"; + if(add_files.empty()) { + if(add_update) + std::cout << " all updated files"; + else + std::cout << " all files"; + } else { + for(auto file : add_files) + std::cout << " " << file; + } + }); + /// [Add] + + /// [Commit] + auto commit = app.add_subcommand("commit", "Commit files"); + + std::string commit_message; + commit->add_option("-m,--message", commit_message, "A message")->required(); + + commit->callback([&]() { std::cout << "Commit message: " << commit_message; }); + /// [Commit] + + /// [Parse] + CLI11_PARSE(app, argc, argv); + + std::cout << "\nThanks for using geet!\n" << std::endl; + return 0; + /// [Parse] +} diff --git a/thirdparty/CLI11/book/code/intro.cpp b/thirdparty/CLI11/book/code/intro.cpp new file mode 100644 index 0000000..2db50f1 --- /dev/null +++ b/thirdparty/CLI11/book/code/intro.cpp @@ -0,0 +1,15 @@ +#include "CLI/CLI.hpp" +#include + +int main(int argc, char **argv) { + CLI::App app{"App description"}; + + // Define options + int p = 0; + app.add_option("-p", p, "Parameter"); + + CLI11_PARSE(app, argc, argv); + + std::cout << "Parameter value: " << p << std::endl; + return 0; +} diff --git a/thirdparty/CLI11/book/code/simplest.cpp b/thirdparty/CLI11/book/code/simplest.cpp new file mode 100644 index 0000000..a848ff3 --- /dev/null +++ b/thirdparty/CLI11/book/code/simplest.cpp @@ -0,0 +1,11 @@ +#include "CLI/CLI.hpp" + +int main(int argc, char **argv) { + CLI::App app; + + // Add new options/flags here + + CLI11_PARSE(app, argc, argv); + + return 0; +} diff --git a/thirdparty/CLI11/cmake/CLI11.pc.in b/thirdparty/CLI11/cmake/CLI11.pc.in new file mode 100644 index 0000000..8d41873 --- /dev/null +++ b/thirdparty/CLI11/cmake/CLI11.pc.in @@ -0,0 +1,9 @@ +prefix=@CMAKE_INSTALL_PREFIX@ +exec_prefix=${prefix} +includedir=${prefix}/include + +Name: CLI11 +Description: C++ command line parser +Version: @PROJECT_VERSION@ + +Cflags: -I${includedir} diff --git a/thirdparty/CLI11/cmake/CLI11ConfigVersion.cmake.in b/thirdparty/CLI11/cmake/CLI11ConfigVersion.cmake.in new file mode 100644 index 0000000..49faee5 --- /dev/null +++ b/thirdparty/CLI11/cmake/CLI11ConfigVersion.cmake.in @@ -0,0 +1,13 @@ +# Adapted from write_basic_package_version_file(... COMPATIBILITY AnyNewerVersion) output +# ARCH_INDEPENDENT is only present in cmake 3.14 and onwards + +set(PACKAGE_VERSION "@VERSION_STRING@") + +if(PACKAGE_VERSION VERSION_LESS PACKAGE_FIND_VERSION) + set(PACKAGE_VERSION_COMPATIBLE FALSE) +else() + set(PACKAGE_VERSION_COMPATIBLE TRUE) + if(PACKAGE_FIND_VERSION STREQUAL PACKAGE_VERSION) + set(PACKAGE_VERSION_EXACT TRUE) + endif() +endif() diff --git a/thirdparty/CLI11/cmake/CLI11GeneratePkgConfig.cmake b/thirdparty/CLI11/cmake/CLI11GeneratePkgConfig.cmake new file mode 100644 index 0000000..667705d --- /dev/null +++ b/thirdparty/CLI11/cmake/CLI11GeneratePkgConfig.cmake @@ -0,0 +1,6 @@ +configure_file("cmake/CLI11.pc.in" "CLI11.pc" @ONLY) + +install(FILES "${PROJECT_BINARY_DIR}/CLI11.pc" + DESTINATION "${CMAKE_INSTALL_LIBDIR}/pkgconfig") + + diff --git a/thirdparty/CLI11/cmake/CodeCoverage.cmake b/thirdparty/CLI11/cmake/CodeCoverage.cmake new file mode 100644 index 0000000..907cf72 --- /dev/null +++ b/thirdparty/CLI11/cmake/CodeCoverage.cmake @@ -0,0 +1,244 @@ +# Copyright (c) 2012 - 2017, Lars Bilke +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without modification, +# are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its contributors +# may be used to endorse or promote products derived from this software without +# specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR +# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +# CHANGES: +# +# 2012-01-31, Lars Bilke +# - Enable Code Coverage +# +# 2013-09-17, Joakim Söderberg +# - Added support for Clang. +# - Some additional usage instructions. +# +# 2016-02-03, Lars Bilke +# - Refactored functions to use named parameters +# +# 2017-06-02, Lars Bilke +# - Merged with modified version from github.com/ufz/ogs +# +# +# USAGE: +# +# 1. Copy this file into your cmake modules path. +# +# 2. Add the following line to your CMakeLists.txt: +# include(CodeCoverage) +# +# 3. Append necessary compiler flags: +# APPEND_COVERAGE_COMPILER_FLAGS() +# +# 4. If you need to exclude additional directories from the report, specify them +# using the COVERAGE_EXCLUDES variable before calling SETUP_TARGET_FOR_COVERAGE. +# Example: +# set(COVERAGE_EXCLUDES 'dir1/*' 'dir2/*') +# +# 5. Use the functions described below to create a custom make target which +# runs your test executable and produces a code coverage report. +# +# 6. Build a Debug build: +# cmake -DCMAKE_BUILD_TYPE=Debug .. +# make +# make my_coverage_target +# + +include(CMakeParseArguments) + +# Check prereqs +find_program( GCOV_PATH gcov ) +find_program( LCOV_PATH NAMES lcov lcov.bat lcov.exe lcov.perl) +find_program( GENHTML_PATH NAMES genhtml genhtml.perl genhtml.bat ) +find_program( GCOVR_PATH gcovr PATHS ${CMAKE_SOURCE_DIR}/scripts/test) +find_program( SIMPLE_PYTHON_EXECUTABLE python ) + +if(NOT GCOV_PATH) + message(FATAL_ERROR "gcov not found! Aborting...") +endif() # NOT GCOV_PATH + +if("${CMAKE_CXX_COMPILER_ID}" MATCHES "(Apple)?[Cc]lang") + if("${CMAKE_CXX_COMPILER_VERSION}" VERSION_LESS 3) + message(FATAL_ERROR "Clang version must be 3.0.0 or greater! Aborting...") + endif() +elseif(NOT CMAKE_COMPILER_IS_GNUCXX) + message(FATAL_ERROR "Compiler is not GNU gcc! Aborting...") +endif() + +set(COVERAGE_COMPILER_FLAGS "-g -O0 --coverage -fprofile-arcs -ftest-coverage -fno-inline -fno-inline-small-functions -fno-default-inline" + CACHE INTERNAL "") + +set(CMAKE_CXX_FLAGS_COVERAGE + ${COVERAGE_COMPILER_FLAGS} + CACHE STRING "Flags used by the C++ compiler during coverage builds." + FORCE ) +set(CMAKE_C_FLAGS_COVERAGE + ${COVERAGE_COMPILER_FLAGS} + CACHE STRING "Flags used by the C compiler during coverage builds." + FORCE ) +set(CMAKE_EXE_LINKER_FLAGS_COVERAGE + "" + CACHE STRING "Flags used for linking binaries during coverage builds." + FORCE ) +set(CMAKE_SHARED_LINKER_FLAGS_COVERAGE + "" + CACHE STRING "Flags used by the shared libraries linker during coverage builds." + FORCE ) +mark_as_advanced( + CMAKE_CXX_FLAGS_COVERAGE + CMAKE_C_FLAGS_COVERAGE + CMAKE_EXE_LINKER_FLAGS_COVERAGE + CMAKE_SHARED_LINKER_FLAGS_COVERAGE ) + +if(NOT CMAKE_BUILD_TYPE STREQUAL "Debug") + message(WARNING "Code coverage results with an optimised (non-Debug) build may be misleading") +endif() # NOT CMAKE_BUILD_TYPE STREQUAL "Debug" + +if(CMAKE_C_COMPILER_ID STREQUAL "GNU") + link_libraries(gcov) +else() + set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} --coverage") +endif() + +# Defines a target for running and collection code coverage information +# Builds dependencies, runs the given executable and outputs reports. +# NOTE! The executable should always have a ZERO as exit code otherwise +# the coverage generation will not complete. +# +# SETUP_TARGET_FOR_COVERAGE( +# NAME testrunner_coverage # New target name +# EXECUTABLE testrunner -j ${PROCESSOR_COUNT} # Executable in PROJECT_BINARY_DIR +# DEPENDENCIES testrunner # Dependencies to build first +# ) +function(SETUP_TARGET_FOR_COVERAGE) + + set(options NONE) + set(oneValueArgs NAME) + set(multiValueArgs EXECUTABLE EXECUTABLE_ARGS DEPENDENCIES) + cmake_parse_arguments(Coverage "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN}) + + if(NOT LCOV_PATH) + message(FATAL_ERROR "lcov not found! Aborting...") + endif() # NOT LCOV_PATH + + if(NOT GENHTML_PATH) + message(FATAL_ERROR "genhtml not found! Aborting...") + endif() # NOT GENHTML_PATH + + # Setup target + add_custom_target(${Coverage_NAME} + + # Cleanup lcov + COMMAND ${LCOV_PATH} --directory . --zerocounters + # Create baseline to make sure untouched files show up in the report + COMMAND ${LCOV_PATH} -c -i -d . -o ${Coverage_NAME}.base + + # Run tests + COMMAND ${Coverage_EXECUTABLE} + + # Capturing lcov counters and generating report + COMMAND ${LCOV_PATH} --directory . --capture --output-file ${Coverage_NAME}.info + # add baseline counters + COMMAND ${LCOV_PATH} -a ${Coverage_NAME}.base -a ${Coverage_NAME}.info --output-file ${Coverage_NAME}.total + COMMAND ${LCOV_PATH} --remove ${Coverage_NAME}.total ${COVERAGE_EXCLUDES} --output-file ${PROJECT_BINARY_DIR}/${Coverage_NAME}.info.cleaned + COMMAND ${GENHTML_PATH} -o ${Coverage_NAME} ${PROJECT_BINARY_DIR}/${Coverage_NAME}.info.cleaned + COMMAND ${CMAKE_COMMAND} -E remove ${Coverage_NAME}.base ${Coverage_NAME}.total ${PROJECT_BINARY_DIR}/${Coverage_NAME}.info.cleaned + + WORKING_DIRECTORY ${PROJECT_BINARY_DIR} + DEPENDS ${Coverage_DEPENDENCIES} + COMMENT "Resetting code coverage counters to zero.\nProcessing code coverage counters and generating report." + ) + + # Show where to find the lcov info report + add_custom_command(TARGET ${Coverage_NAME} POST_BUILD + COMMAND ; + COMMENT "Lcov code coverage info report saved in ${Coverage_NAME}.info." + ) + + # Show info where to find the report + add_custom_command(TARGET ${Coverage_NAME} POST_BUILD + COMMAND ; + COMMENT "Open ./${Coverage_NAME}/index.html in your browser to view the coverage report." + ) + +endfunction() # SETUP_TARGET_FOR_COVERAGE + +# Defines a target for running and collection code coverage information +# Builds dependencies, runs the given executable and outputs reports. +# NOTE! The executable should always have a ZERO as exit code otherwise +# the coverage generation will not complete. +# +# SETUP_TARGET_FOR_COVERAGE_COBERTURA( +# NAME ctest_coverage # New target name +# EXECUTABLE ctest -j ${PROCESSOR_COUNT} # Executable in PROJECT_BINARY_DIR +# DEPENDENCIES executable_target # Dependencies to build first +# ) +function(SETUP_TARGET_FOR_COVERAGE_COBERTURA) + + set(options NONE) + set(oneValueArgs NAME) + set(multiValueArgs EXECUTABLE EXECUTABLE_ARGS DEPENDENCIES) + cmake_parse_arguments(Coverage "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN}) + + if(NOT SIMPLE_PYTHON_EXECUTABLE) + message(FATAL_ERROR "python not found! Aborting...") + endif() # NOT SIMPLE_PYTHON_EXECUTABLE + + if(NOT GCOVR_PATH) + message(FATAL_ERROR "gcovr not found! Aborting...") + endif() # NOT GCOVR_PATH + + # Combine excludes to several -e arguments + set(COBERTURA_EXCLUDES "") + foreach(EXCLUDE ${COVERAGE_EXCLUDES}) + set(COBERTURA_EXCLUDES "-e ${EXCLUDE} ${COBERTURA_EXCLUDES}") + endforeach() + + add_custom_target(${Coverage_NAME} + + # Run tests + ${Coverage_EXECUTABLE} + + # Running gcovr + COMMAND ${GCOVR_PATH} -x -r ${CMAKE_SOURCE_DIR} ${COBERTURA_EXCLUDES} + -o ${Coverage_NAME}.xml + WORKING_DIRECTORY ${PROJECT_BINARY_DIR} + DEPENDS ${Coverage_DEPENDENCIES} + COMMENT "Running gcovr to produce Cobertura code coverage report." + ) + + # Show info where to find the report + add_custom_command(TARGET ${Coverage_NAME} POST_BUILD + COMMAND ; + COMMENT "Cobertura code coverage report saved in ${Coverage_NAME}.xml." + ) + +endfunction() # SETUP_TARGET_FOR_COVERAGE_COBERTURA + +function(APPEND_COVERAGE_COMPILER_FLAGS) + set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${COVERAGE_COMPILER_FLAGS}" PARENT_SCOPE) + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${COVERAGE_COMPILER_FLAGS}" PARENT_SCOPE) + message(STATUS "Appending code coverage compiler flags: ${COVERAGE_COMPILER_FLAGS}") +endfunction() # APPEND_COVERAGE_COMPILER_FLAGS diff --git a/thirdparty/CLI11/conanfile.py b/thirdparty/CLI11/conanfile.py new file mode 100644 index 0000000..377cd01 --- /dev/null +++ b/thirdparty/CLI11/conanfile.py @@ -0,0 +1,48 @@ +from conans import ConanFile, CMake +from conans.tools import load, cross_building +import re + + +def get_version(): + try: + content = load("include/CLI/Version.hpp") + version = re.search(r'#define CLI11_VERSION "(.*)"', content).group(1) + return version + except Exception: + return None + + +class CLI11Conan(ConanFile): + name = "CLI11" + version = get_version() + description = "Command Line Interface toolkit for C++11" + topics = ("cli", "c++11", "parser", "cli11") + url = "https://github.com/CLIUtils/CLI11" + homepage = "https://github.com/CLIUtils/CLI11" + author = "Henry Schreiner " + license = "BSD-3-Clause" + + settings = "os", "compiler", "arch", "build_type" + exports_sources = ( + "LICENSE", + "README.md", + "include/*", + "extern/*", + "cmake/*", + "CMakeLists.txt", + "CLI11.CPack.Description.txt", + "tests/*", + ) + + def build(self): # this is not building a library, just tests + cmake = CMake(self) + cmake.definitions["CLI11_EXAMPLES"] = "OFF" + cmake.definitions["CLI11_SINGLE_FILE"] = "OFF" + cmake.configure() + cmake.build() + if not cross_building(self.settings): + cmake.test() + cmake.install() + + def package_id(self): + self.info.header_only() diff --git a/thirdparty/CLI11/docs/.gitignore b/thirdparty/CLI11/docs/.gitignore new file mode 100644 index 0000000..a243610 --- /dev/null +++ b/thirdparty/CLI11/docs/.gitignore @@ -0,0 +1,2 @@ +/html/* +/latex/* diff --git a/thirdparty/CLI11/docs/CLI11.svg b/thirdparty/CLI11/docs/CLI11.svg new file mode 100644 index 0000000..e12b99b --- /dev/null +++ b/thirdparty/CLI11/docs/CLI11.svg @@ -0,0 +1,114 @@ + + + + + + + + + + image/svg+xml + + + + + + + + + + + + + + + + + + + diff --git a/thirdparty/CLI11/docs/CLI11_100.png b/thirdparty/CLI11/docs/CLI11_100.png new file mode 100644 index 0000000..d318c48 Binary files /dev/null and b/thirdparty/CLI11/docs/CLI11_100.png differ diff --git a/thirdparty/CLI11/docs/CLI11_300.png b/thirdparty/CLI11/docs/CLI11_300.png new file mode 100644 index 0000000..3dcb425 Binary files /dev/null and b/thirdparty/CLI11/docs/CLI11_300.png differ diff --git a/thirdparty/CLI11/docs/CMakeLists.txt b/thirdparty/CLI11/docs/CMakeLists.txt new file mode 100644 index 0000000..f98ff35 --- /dev/null +++ b/thirdparty/CLI11/docs/CMakeLists.txt @@ -0,0 +1,18 @@ +set(DOXYGEN_EXTRACT_ALL YES) +set(DOXYGEN_BUILTIN_STL_SUPPORT YES) +set(PROJECT_BRIEF "C++11 Command Line Interface Parser") + +file(GLOB DOC_LIST + RELATIVE "${PROJECT_SOURCE_DIR}/include" + "${PROJECT_SOURCE_DIR}/include/CLI/*.hpp" + ) + +doxygen_add_docs(docs + ${DOC_LIST} + "${CMAKE_CURRENT_SOURCE_DIR}/mainpage.md" + WORKING_DIRECTORY + "${PROJECT_SOURCE_DIR}/include" +) + + + diff --git a/thirdparty/CLI11/docs/Doxyfile b/thirdparty/CLI11/docs/Doxyfile new file mode 100644 index 0000000..d08a290 --- /dev/null +++ b/thirdparty/CLI11/docs/Doxyfile @@ -0,0 +1,2475 @@ +# Doxyfile 1.8.13 + +# Designed to be run from the main directory with `doxygen docs/Doxygen` + +# This file describes the settings to be used by the documentation system +# doxygen (www.doxygen.org) for a project. +# +# All text after a double hash (##) is considered a comment and is placed in +# front of the TAG it is preceding. +# +# All text after a single hash (#) is considered a comment and will be ignored. +# The format is: +# TAG = value [value, ...] +# For lists, items can also be appended using: +# TAG += value [value, ...] +# Values that contain spaces should be placed between quotes (\" \"). + +#--------------------------------------------------------------------------- +# Project related configuration options +#--------------------------------------------------------------------------- + +# This tag specifies the encoding used for all characters in the config file +# that follow. The default is UTF-8 which is also the encoding used for all text +# before the first occurrence of this tag. Doxygen uses libiconv (or the iconv +# built into libc) for the transcoding. See http://www.gnu.org/software/libiconv +# for the list of possible encodings. +# The default value is: UTF-8. + +DOXYFILE_ENCODING = UTF-8 + +# The PROJECT_NAME tag is a single word (or a sequence of words surrounded by +# double-quotes, unless you are using Doxywizard) that should identify the +# project for which the documentation is generated. This name is used in the +# title of most generated pages and in a few other places. +# The default value is: My Project. + +PROJECT_NAME = "CLI11" + +# The PROJECT_NUMBER tag can be used to enter a project or revision number. This +# could be handy for archiving the generated documentation or if some version +# control system is used. + +PROJECT_NUMBER = $(TRAVIS_TAG) + +# Using the PROJECT_BRIEF tag one can provide an optional one line description +# for a project that appears at the top of each page and should give viewer a +# quick idea about the purpose of the project. Keep the description short. + +PROJECT_BRIEF = "C++11 Command Line Interface Parser" + +# With the PROJECT_LOGO tag one can specify a logo or an icon that is included +# in the documentation. The maximum height of the logo should not exceed 55 +# pixels and the maximum width should not exceed 200 pixels. Doxygen will copy +# the logo to the output directory. + +PROJECT_LOGO = + +# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute) path +# into which the generated documentation will be written. If a relative path is +# entered, it will be relative to the location where doxygen was started. If +# left blank the current directory will be used. + +OUTPUT_DIRECTORY = + +# If the CREATE_SUBDIRS tag is set to YES then doxygen will create 4096 sub- +# directories (in 2 levels) under the output directory of each output format and +# will distribute the generated files over these directories. Enabling this +# option can be useful when feeding doxygen a huge amount of source files, where +# putting all generated files in the same directory would otherwise causes +# performance problems for the file system. +# The default value is: NO. + +CREATE_SUBDIRS = NO + +# If the ALLOW_UNICODE_NAMES tag is set to YES, doxygen will allow non-ASCII +# characters to appear in the names of generated files. If set to NO, non-ASCII +# characters will be escaped, for example _xE3_x81_x84 will be used for Unicode +# U+3044. +# The default value is: NO. + +ALLOW_UNICODE_NAMES = NO + +# The OUTPUT_LANGUAGE tag is used to specify the language in which all +# documentation generated by doxygen is written. Doxygen will use this +# information to generate all constant output in the proper language. +# Possible values are: Afrikaans, Arabic, Armenian, Brazilian, Catalan, Chinese, +# Chinese-Traditional, Croatian, Czech, Danish, Dutch, English (United States), +# Esperanto, Farsi (Persian), Finnish, French, German, Greek, Hungarian, +# Indonesian, Italian, Japanese, Japanese-en (Japanese with English messages), +# Korean, Korean-en (Korean with English messages), Latvian, Lithuanian, +# Macedonian, Norwegian, Persian (Farsi), Polish, Portuguese, Romanian, Russian, +# Serbian, Serbian-Cyrillic, Slovak, Slovene, Spanish, Swedish, Turkish, +# Ukrainian and Vietnamese. +# The default value is: English. + +OUTPUT_LANGUAGE = English + +# If the BRIEF_MEMBER_DESC tag is set to YES, doxygen will include brief member +# descriptions after the members that are listed in the file and class +# documentation (similar to Javadoc). Set to NO to disable this. +# The default value is: YES. + +BRIEF_MEMBER_DESC = YES + +# If the REPEAT_BRIEF tag is set to YES, doxygen will prepend the brief +# description of a member or function before the detailed description +# +# Note: If both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the +# brief descriptions will be completely suppressed. +# The default value is: YES. + +REPEAT_BRIEF = YES + +# This tag implements a quasi-intelligent brief description abbreviator that is +# used to form the text in various listings. Each string in this list, if found +# as the leading text of the brief description, will be stripped from the text +# and the result, after processing the whole list, is used as the annotated +# text. Otherwise, the brief description is used as-is. If left blank, the +# following values are used ($name is automatically replaced with the name of +# the entity):The $name class, The $name widget, The $name file, is, provides, +# specifies, contains, represents, a, an and the. + +ABBREVIATE_BRIEF = "The $name class" \ + "The $name widget" \ + "The $name file" \ + is \ + provides \ + specifies \ + contains \ + represents \ + a \ + an \ + the + +# If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then +# doxygen will generate a detailed section even if there is only a brief +# description. +# The default value is: NO. + +ALWAYS_DETAILED_SEC = NO + +# If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all +# inherited members of a class in the documentation of that class as if those +# members were ordinary class members. Constructors, destructors and assignment +# operators of the base classes will not be shown. +# The default value is: NO. + +INLINE_INHERITED_MEMB = NO + +# If the FULL_PATH_NAMES tag is set to YES, doxygen will prepend the full path +# before files name in the file list and in the header files. If set to NO the +# shortest path that makes the file name unique will be used +# The default value is: YES. + +FULL_PATH_NAMES = YES + +# The STRIP_FROM_PATH tag can be used to strip a user-defined part of the path. +# Stripping is only done if one of the specified strings matches the left-hand +# part of the path. The tag can be used to show relative paths in the file list. +# If left blank the directory from which doxygen is run is used as the path to +# strip. +# +# Note that you can specify absolute paths here, but also relative paths, which +# will be relative from the directory where doxygen is started. +# This tag requires that the tag FULL_PATH_NAMES is set to YES. + +STRIP_FROM_PATH = + +# The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of the +# path mentioned in the documentation of a class, which tells the reader which +# header file to include in order to use a class. If left blank only the name of +# the header file containing the class definition is used. Otherwise one should +# specify the list of include paths that are normally passed to the compiler +# using the -I flag. + +STRIP_FROM_INC_PATH = + +# If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter (but +# less readable) file names. This can be useful is your file systems doesn't +# support long names like on DOS, Mac, or CD-ROM. +# The default value is: NO. + +SHORT_NAMES = NO + +# If the JAVADOC_AUTOBRIEF tag is set to YES then doxygen will interpret the +# first line (until the first dot) of a Javadoc-style comment as the brief +# description. If set to NO, the Javadoc-style will behave just like regular Qt- +# style comments (thus requiring an explicit @brief command for a brief +# description.) +# The default value is: NO. + +JAVADOC_AUTOBRIEF = NO + +# If the QT_AUTOBRIEF tag is set to YES then doxygen will interpret the first +# line (until the first dot) of a Qt-style comment as the brief description. If +# set to NO, the Qt-style will behave just like regular Qt-style comments (thus +# requiring an explicit \brief command for a brief description.) +# The default value is: NO. + +QT_AUTOBRIEF = NO + +# The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make doxygen treat a +# multi-line C++ special comment block (i.e. a block of //! or /// comments) as +# a brief description. This used to be the default behavior. The new default is +# to treat a multi-line C++ comment block as a detailed description. Set this +# tag to YES if you prefer the old behavior instead. +# +# Note that setting this tag to YES also means that rational rose comments are +# not recognized any more. +# The default value is: NO. + +MULTILINE_CPP_IS_BRIEF = NO + +# If the INHERIT_DOCS tag is set to YES then an undocumented member inherits the +# documentation from any documented member that it re-implements. +# The default value is: YES. + +INHERIT_DOCS = YES + +# If the SEPARATE_MEMBER_PAGES tag is set to YES then doxygen will produce a new +# page for each member. If set to NO, the documentation of a member will be part +# of the file/class/namespace that contains it. +# The default value is: NO. + +SEPARATE_MEMBER_PAGES = NO + +# The TAB_SIZE tag can be used to set the number of spaces in a tab. Doxygen +# uses this value to replace tabs by spaces in code fragments. +# Minimum value: 1, maximum value: 16, default value: 4. + +TAB_SIZE = 4 + +# This tag can be used to specify a number of aliases that act as commands in +# the documentation. An alias has the form: +# name=value +# For example adding +# "sideeffect=@par Side Effects:\n" +# will allow you to put the command \sideeffect (or @sideeffect) in the +# documentation, which will result in a user-defined paragraph with heading +# "Side Effects:". You can put \n's in the value part of an alias to insert +# newlines. + +ALIASES = + +# This tag can be used to specify a number of word-keyword mappings (TCL only). +# A mapping has the form "name=value". For example adding "class=itcl::class" +# will allow you to use the command class in the itcl::class meaning. + +TCL_SUBST = + +# Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C sources +# only. Doxygen will then generate output that is more tailored for C. For +# instance, some of the names that are used will be different. The list of all +# members will be omitted, etc. +# The default value is: NO. + +OPTIMIZE_OUTPUT_FOR_C = NO + +# Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java or +# Python sources only. Doxygen will then generate output that is more tailored +# for that language. For instance, namespaces will be presented as packages, +# qualified scopes will look different, etc. +# The default value is: NO. + +OPTIMIZE_OUTPUT_JAVA = NO + +# Set the OPTIMIZE_FOR_FORTRAN tag to YES if your project consists of Fortran +# sources. Doxygen will then generate output that is tailored for Fortran. +# The default value is: NO. + +OPTIMIZE_FOR_FORTRAN = NO + +# Set the OPTIMIZE_OUTPUT_VHDL tag to YES if your project consists of VHDL +# sources. Doxygen will then generate output that is tailored for VHDL. +# The default value is: NO. + +OPTIMIZE_OUTPUT_VHDL = NO + +# Doxygen selects the parser to use depending on the extension of the files it +# parses. With this tag you can assign which parser to use for a given +# extension. Doxygen has a built-in mapping, but you can override or extend it +# using this tag. The format is ext=language, where ext is a file extension, and +# language is one of the parsers supported by doxygen: IDL, Java, JavaScript, +# C#, C, C++, D, PHP, Objective-C, Python, Fortran (fixed format Fortran: +# FortranFixed, free formatted Fortran: FortranFree, unknown formatted Fortran: +# Fortran. In the later case the parser tries to guess whether the code is fixed +# or free formatted code, this is the default for Fortran type files), VHDL. For +# instance to make doxygen treat .inc files as Fortran files (default is PHP), +# and .f files as C (default is Fortran), use: inc=Fortran f=C. +# +# Note: For files without extension you can use no_extension as a placeholder. +# +# Note that for custom extensions you also need to set FILE_PATTERNS otherwise +# the files are not read by doxygen. + +EXTENSION_MAPPING = + +# If the MARKDOWN_SUPPORT tag is enabled then doxygen pre-processes all comments +# according to the Markdown format, which allows for more readable +# documentation. See http://daringfireball.net/projects/markdown/ for details. +# The output of markdown processing is further processed by doxygen, so you can +# mix doxygen, HTML, and XML commands with Markdown formatting. Disable only in +# case of backward compatibilities issues. +# The default value is: YES. + +MARKDOWN_SUPPORT = YES + +# When the TOC_INCLUDE_HEADINGS tag is set to a non-zero value, all headings up +# to that level are automatically included in the table of contents, even if +# they do not have an id attribute. +# Note: This feature currently applies only to Markdown headings. +# Minimum value: 0, maximum value: 99, default value: 0. +# This tag requires that the tag MARKDOWN_SUPPORT is set to YES. + +TOC_INCLUDE_HEADINGS = 0 + +# When enabled doxygen tries to link words that correspond to documented +# classes, or namespaces to their corresponding documentation. Such a link can +# be prevented in individual cases by putting a % sign in front of the word or +# globally by setting AUTOLINK_SUPPORT to NO. +# The default value is: YES. + +AUTOLINK_SUPPORT = YES + +# If you use STL classes (i.e. std::string, std::vector, etc.) but do not want +# to include (a tag file for) the STL sources as input, then you should set this +# tag to YES in order to let doxygen match functions declarations and +# definitions whose arguments contain STL classes (e.g. func(std::string); +# versus func(std::string) {}). This also make the inheritance and collaboration +# diagrams that involve STL classes more complete and accurate. +# The default value is: NO. + +BUILTIN_STL_SUPPORT = NO + +# If you use Microsoft's C++/CLI language, you should set this option to YES to +# enable parsing support. +# The default value is: NO. + +CPP_CLI_SUPPORT = NO + +# Set the SIP_SUPPORT tag to YES if your project consists of sip (see: +# http://www.riverbankcomputing.co.uk/software/sip/intro) sources only. Doxygen +# will parse them like normal C++ but will assume all classes use public instead +# of private inheritance when no explicit protection keyword is present. +# The default value is: NO. + +SIP_SUPPORT = NO + +# For Microsoft's IDL there are propget and propput attributes to indicate +# getter and setter methods for a property. Setting this option to YES will make +# doxygen to replace the get and set methods by a property in the documentation. +# This will only work if the methods are indeed getting or setting a simple +# type. If this is not the case, or you want to show the methods anyway, you +# should set this option to NO. +# The default value is: YES. + +IDL_PROPERTY_SUPPORT = YES + +# If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC +# tag is set to YES then doxygen will reuse the documentation of the first +# member in the group (if any) for the other members of the group. By default +# all members of a group must be documented explicitly. +# The default value is: NO. + +DISTRIBUTE_GROUP_DOC = NO + +# If one adds a struct or class to a group and this option is enabled, then also +# any nested class or struct is added to the same group. By default this option +# is disabled and one has to add nested compounds explicitly via \ingroup. +# The default value is: NO. + +GROUP_NESTED_COMPOUNDS = NO + +# Set the SUBGROUPING tag to YES to allow class member groups of the same type +# (for instance a group of public functions) to be put as a subgroup of that +# type (e.g. under the Public Functions section). Set it to NO to prevent +# subgrouping. Alternatively, this can be done per class using the +# \nosubgrouping command. +# The default value is: YES. + +SUBGROUPING = YES + +# When the INLINE_GROUPED_CLASSES tag is set to YES, classes, structs and unions +# are shown inside the group in which they are included (e.g. using \ingroup) +# instead of on a separate page (for HTML and Man pages) or section (for LaTeX +# and RTF). +# +# Note that this feature does not work in combination with +# SEPARATE_MEMBER_PAGES. +# The default value is: NO. + +INLINE_GROUPED_CLASSES = NO + +# When the INLINE_SIMPLE_STRUCTS tag is set to YES, structs, classes, and unions +# with only public data fields or simple typedef fields will be shown inline in +# the documentation of the scope in which they are defined (i.e. file, +# namespace, or group documentation), provided this scope is documented. If set +# to NO, structs, classes, and unions are shown on a separate page (for HTML and +# Man pages) or section (for LaTeX and RTF). +# The default value is: NO. + +INLINE_SIMPLE_STRUCTS = NO + +# When TYPEDEF_HIDES_STRUCT tag is enabled, a typedef of a struct, union, or +# enum is documented as struct, union, or enum with the name of the typedef. So +# typedef struct TypeS {} TypeT, will appear in the documentation as a struct +# with name TypeT. When disabled the typedef will appear as a member of a file, +# namespace, or class. And the struct will be named TypeS. This can typically be +# useful for C code in case the coding convention dictates that all compound +# types are typedef'ed and only the typedef is referenced, never the tag name. +# The default value is: NO. + +TYPEDEF_HIDES_STRUCT = NO + +# The size of the symbol lookup cache can be set using LOOKUP_CACHE_SIZE. This +# cache is used to resolve symbols given their name and scope. Since this can be +# an expensive process and often the same symbol appears multiple times in the +# code, doxygen keeps a cache of pre-resolved symbols. If the cache is too small +# doxygen will become slower. If the cache is too large, memory is wasted. The +# cache size is given by this formula: 2^(16+LOOKUP_CACHE_SIZE). The valid range +# is 0..9, the default is 0, corresponding to a cache size of 2^16=65536 +# symbols. At the end of a run doxygen will report the cache usage and suggest +# the optimal cache size from a speed point of view. +# Minimum value: 0, maximum value: 9, default value: 0. + +LOOKUP_CACHE_SIZE = 0 + +#--------------------------------------------------------------------------- +# Build related configuration options +#--------------------------------------------------------------------------- + +# If the EXTRACT_ALL tag is set to YES, doxygen will assume all entities in +# documentation are documented, even if no documentation was available. Private +# class members and static file members will be hidden unless the +# EXTRACT_PRIVATE respectively EXTRACT_STATIC tags are set to YES. +# Note: This will also disable the warnings about undocumented members that are +# normally produced when WARNINGS is set to YES. +# The default value is: NO. + +EXTRACT_ALL = NO + +# If the EXTRACT_PRIVATE tag is set to YES, all private members of a class will +# be included in the documentation. +# The default value is: NO. + +EXTRACT_PRIVATE = NO + +# If the EXTRACT_PACKAGE tag is set to YES, all members with package or internal +# scope will be included in the documentation. +# The default value is: NO. + +EXTRACT_PACKAGE = NO + +# If the EXTRACT_STATIC tag is set to YES, all static members of a file will be +# included in the documentation. +# The default value is: NO. + +EXTRACT_STATIC = YES + +# If the EXTRACT_LOCAL_CLASSES tag is set to YES, classes (and structs) defined +# locally in source files will be included in the documentation. If set to NO, +# only classes defined in header files are included. Does not have any effect +# for Java sources. +# The default value is: YES. + +EXTRACT_LOCAL_CLASSES = YES + +# This flag is only useful for Objective-C code. If set to YES, local methods, +# which are defined in the implementation section but not in the interface are +# included in the documentation. If set to NO, only methods in the interface are +# included. +# The default value is: NO. + +EXTRACT_LOCAL_METHODS = NO + +# If this flag is set to YES, the members of anonymous namespaces will be +# extracted and appear in the documentation as a namespace called +# 'anonymous_namespace{file}', where file will be replaced with the base name of +# the file that contains the anonymous namespace. By default anonymous namespace +# are hidden. +# The default value is: NO. + +EXTRACT_ANON_NSPACES = NO + +# If the HIDE_UNDOC_MEMBERS tag is set to YES, doxygen will hide all +# undocumented members inside documented classes or files. If set to NO these +# members will be included in the various overviews, but no documentation +# section is generated. This option has no effect if EXTRACT_ALL is enabled. +# The default value is: NO. + +HIDE_UNDOC_MEMBERS = NO + +# If the HIDE_UNDOC_CLASSES tag is set to YES, doxygen will hide all +# undocumented classes that are normally visible in the class hierarchy. If set +# to NO, these classes will be included in the various overviews. This option +# has no effect if EXTRACT_ALL is enabled. +# The default value is: NO. + +HIDE_UNDOC_CLASSES = NO + +# If the HIDE_FRIEND_COMPOUNDS tag is set to YES, doxygen will hide all friend +# (class|struct|union) declarations. If set to NO, these declarations will be +# included in the documentation. +# The default value is: NO. + +HIDE_FRIEND_COMPOUNDS = NO + +# If the HIDE_IN_BODY_DOCS tag is set to YES, doxygen will hide any +# documentation blocks found inside the body of a function. If set to NO, these +# blocks will be appended to the function's detailed documentation block. +# The default value is: NO. + +HIDE_IN_BODY_DOCS = NO + +# The INTERNAL_DOCS tag determines if documentation that is typed after a +# \internal command is included. If the tag is set to NO then the documentation +# will be excluded. Set it to YES to include the internal documentation. +# The default value is: NO. + +INTERNAL_DOCS = NO + +# If the CASE_SENSE_NAMES tag is set to NO then doxygen will only generate file +# names in lower-case letters. If set to YES, upper-case letters are also +# allowed. This is useful if you have classes or files whose names only differ +# in case and if your file system supports case sensitive file names. Windows +# and Mac users are advised to set this option to NO. +# The default value is: system dependent. + +CASE_SENSE_NAMES = NO + +# If the HIDE_SCOPE_NAMES tag is set to NO then doxygen will show members with +# their full class and namespace scopes in the documentation. If set to YES, the +# scope will be hidden. +# The default value is: NO. + +HIDE_SCOPE_NAMES = NO + +# If the HIDE_COMPOUND_REFERENCE tag is set to NO (default) then doxygen will +# append additional text to a page's title, such as Class Reference. If set to +# YES the compound reference will be hidden. +# The default value is: NO. + +HIDE_COMPOUND_REFERENCE= NO + +# If the SHOW_INCLUDE_FILES tag is set to YES then doxygen will put a list of +# the files that are included by a file in the documentation of that file. +# The default value is: YES. + +SHOW_INCLUDE_FILES = YES + +# If the SHOW_GROUPED_MEMB_INC tag is set to YES then Doxygen will add for each +# grouped member an include statement to the documentation, telling the reader +# which file to include in order to use the member. +# The default value is: NO. + +SHOW_GROUPED_MEMB_INC = NO + +# If the FORCE_LOCAL_INCLUDES tag is set to YES then doxygen will list include +# files with double quotes in the documentation rather than with sharp brackets. +# The default value is: NO. + +FORCE_LOCAL_INCLUDES = NO + +# If the INLINE_INFO tag is set to YES then a tag [inline] is inserted in the +# documentation for inline members. +# The default value is: YES. + +INLINE_INFO = YES + +# If the SORT_MEMBER_DOCS tag is set to YES then doxygen will sort the +# (detailed) documentation of file and class members alphabetically by member +# name. If set to NO, the members will appear in declaration order. +# The default value is: YES. + +SORT_MEMBER_DOCS = YES + +# If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the brief +# descriptions of file, namespace and class members alphabetically by member +# name. If set to NO, the members will appear in declaration order. Note that +# this will also influence the order of the classes in the class list. +# The default value is: NO. + +SORT_BRIEF_DOCS = NO + +# If the SORT_MEMBERS_CTORS_1ST tag is set to YES then doxygen will sort the +# (brief and detailed) documentation of class members so that constructors and +# destructors are listed first. If set to NO the constructors will appear in the +# respective orders defined by SORT_BRIEF_DOCS and SORT_MEMBER_DOCS. +# Note: If SORT_BRIEF_DOCS is set to NO this option is ignored for sorting brief +# member documentation. +# Note: If SORT_MEMBER_DOCS is set to NO this option is ignored for sorting +# detailed member documentation. +# The default value is: NO. + +SORT_MEMBERS_CTORS_1ST = NO + +# If the SORT_GROUP_NAMES tag is set to YES then doxygen will sort the hierarchy +# of group names into alphabetical order. If set to NO the group names will +# appear in their defined order. +# The default value is: NO. + +SORT_GROUP_NAMES = NO + +# If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be sorted by +# fully-qualified names, including namespaces. If set to NO, the class list will +# be sorted only by class name, not including the namespace part. +# Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES. +# Note: This option applies only to the class list, not to the alphabetical +# list. +# The default value is: NO. + +SORT_BY_SCOPE_NAME = NO + +# If the STRICT_PROTO_MATCHING option is enabled and doxygen fails to do proper +# type resolution of all parameters of a function it will reject a match between +# the prototype and the implementation of a member function even if there is +# only one candidate or it is obvious which candidate to choose by doing a +# simple string match. By disabling STRICT_PROTO_MATCHING doxygen will still +# accept a match between prototype and implementation in such cases. +# The default value is: NO. + +STRICT_PROTO_MATCHING = NO + +# The GENERATE_TODOLIST tag can be used to enable (YES) or disable (NO) the todo +# list. This list is created by putting \todo commands in the documentation. +# The default value is: YES. + +GENERATE_TODOLIST = YES + +# The GENERATE_TESTLIST tag can be used to enable (YES) or disable (NO) the test +# list. This list is created by putting \test commands in the documentation. +# The default value is: YES. + +GENERATE_TESTLIST = YES + +# The GENERATE_BUGLIST tag can be used to enable (YES) or disable (NO) the bug +# list. This list is created by putting \bug commands in the documentation. +# The default value is: YES. + +GENERATE_BUGLIST = YES + +# The GENERATE_DEPRECATEDLIST tag can be used to enable (YES) or disable (NO) +# the deprecated list. This list is created by putting \deprecated commands in +# the documentation. +# The default value is: YES. + +GENERATE_DEPRECATEDLIST= YES + +# The ENABLED_SECTIONS tag can be used to enable conditional documentation +# sections, marked by \if ... \endif and \cond +# ... \endcond blocks. + +ENABLED_SECTIONS = + +# The MAX_INITIALIZER_LINES tag determines the maximum number of lines that the +# initial value of a variable or macro / define can have for it to appear in the +# documentation. If the initializer consists of more lines than specified here +# it will be hidden. Use a value of 0 to hide initializers completely. The +# appearance of the value of individual variables and macros / defines can be +# controlled using \showinitializer or \hideinitializer command in the +# documentation regardless of this setting. +# Minimum value: 0, maximum value: 10000, default value: 30. + +MAX_INITIALIZER_LINES = 30 + +# Set the SHOW_USED_FILES tag to NO to disable the list of files generated at +# the bottom of the documentation of classes and structs. If set to YES, the +# list will mention the files that were used to generate the documentation. +# The default value is: YES. + +SHOW_USED_FILES = YES + +# Set the SHOW_FILES tag to NO to disable the generation of the Files page. This +# will remove the Files entry from the Quick Index and from the Folder Tree View +# (if specified). +# The default value is: YES. + +SHOW_FILES = YES + +# Set the SHOW_NAMESPACES tag to NO to disable the generation of the Namespaces +# page. This will remove the Namespaces entry from the Quick Index and from the +# Folder Tree View (if specified). +# The default value is: YES. + +SHOW_NAMESPACES = YES + +# The FILE_VERSION_FILTER tag can be used to specify a program or script that +# doxygen should invoke to get the current version for each file (typically from +# the version control system). Doxygen will invoke the program by executing (via +# popen()) the command command input-file, where command is the value of the +# FILE_VERSION_FILTER tag, and input-file is the name of an input file provided +# by doxygen. Whatever the program writes to standard output is used as the file +# version. For an example see the documentation. + +FILE_VERSION_FILTER = + +# The LAYOUT_FILE tag can be used to specify a layout file which will be parsed +# by doxygen. The layout file controls the global structure of the generated +# output files in an output format independent way. To create the layout file +# that represents doxygen's defaults, run doxygen with the -l option. You can +# optionally specify a file name after the option, if omitted DoxygenLayout.xml +# will be used as the name of the layout file. +# +# Note that if you run doxygen from a directory containing a file called +# DoxygenLayout.xml, doxygen will parse it automatically even if the LAYOUT_FILE +# tag is left empty. + +LAYOUT_FILE = + +# The CITE_BIB_FILES tag can be used to specify one or more bib files containing +# the reference definitions. This must be a list of .bib files. The .bib +# extension is automatically appended if omitted. This requires the bibtex tool +# to be installed. See also http://en.wikipedia.org/wiki/BibTeX for more info. +# For LaTeX the style of the bibliography can be controlled using +# LATEX_BIB_STYLE. To use this feature you need bibtex and perl available in the +# search path. See also \cite for info how to create references. + +CITE_BIB_FILES = + +#--------------------------------------------------------------------------- +# Configuration options related to warning and progress messages +#--------------------------------------------------------------------------- + +# The QUIET tag can be used to turn on/off the messages that are generated to +# standard output by doxygen. If QUIET is set to YES this implies that the +# messages are off. +# The default value is: NO. + +QUIET = NO + +# The WARNINGS tag can be used to turn on/off the warning messages that are +# generated to standard error (stderr) by doxygen. If WARNINGS is set to YES +# this implies that the warnings are on. +# +# Tip: Turn warnings on while writing the documentation. +# The default value is: YES. + +WARNINGS = YES + +# If the WARN_IF_UNDOCUMENTED tag is set to YES then doxygen will generate +# warnings for undocumented members. If EXTRACT_ALL is set to YES then this flag +# will automatically be disabled. +# The default value is: YES. + +WARN_IF_UNDOCUMENTED = YES + +# If the WARN_IF_DOC_ERROR tag is set to YES, doxygen will generate warnings for +# potential errors in the documentation, such as not documenting some parameters +# in a documented function, or documenting parameters that don't exist or using +# markup commands wrongly. +# The default value is: YES. + +WARN_IF_DOC_ERROR = YES + +# This WARN_NO_PARAMDOC option can be enabled to get warnings for functions that +# are documented, but have no documentation for their parameters or return +# value. If set to NO, doxygen will only warn about wrong or incomplete +# parameter documentation, but not about the absence of documentation. +# The default value is: NO. + +WARN_NO_PARAMDOC = NO + +# If the WARN_AS_ERROR tag is set to YES then doxygen will immediately stop when +# a warning is encountered. +# The default value is: NO. + +WARN_AS_ERROR = NO + +# The WARN_FORMAT tag determines the format of the warning messages that doxygen +# can produce. The string should contain the $file, $line, and $text tags, which +# will be replaced by the file and line number from which the warning originated +# and the warning text. Optionally the format may contain $version, which will +# be replaced by the version of the file (if it could be obtained via +# FILE_VERSION_FILTER) +# The default value is: $file:$line: $text. + +WARN_FORMAT = "$file:$line: $text" + +# The WARN_LOGFILE tag can be used to specify a file to which warning and error +# messages should be written. If left blank the output is written to standard +# error (stderr). + +WARN_LOGFILE = + +#--------------------------------------------------------------------------- +# Configuration options related to the input files +#--------------------------------------------------------------------------- + +# The INPUT tag is used to specify the files and/or directories that contain +# documented source files. You may enter file names like myfile.cpp or +# directories like /usr/src/myproject. Separate the files or directories with +# spaces. See also FILE_PATTERNS and EXTENSION_MAPPING +# Note: If this tag is empty the current directory is searched. + +INPUT = include docs/mainpage.md + +# This tag can be used to specify the character encoding of the source files +# that doxygen parses. Internally doxygen uses the UTF-8 encoding. Doxygen uses +# libiconv (or the iconv built into libc) for the transcoding. See the libiconv +# documentation (see: http://www.gnu.org/software/libiconv) for the list of +# possible encodings. +# The default value is: UTF-8. + +INPUT_ENCODING = UTF-8 + +# If the value of the INPUT tag contains directories, you can use the +# FILE_PATTERNS tag to specify one or more wildcard patterns (like *.cpp and +# *.h) to filter out the source-files in the directories. +# +# Note that for custom extensions or not directly supported extensions you also +# need to set EXTENSION_MAPPING for the extension otherwise the files are not +# read by doxygen. +# +# If left blank the following patterns are tested:*.c, *.cc, *.cxx, *.cpp, +# *.c++, *.java, *.ii, *.ixx, *.ipp, *.i++, *.inl, *.idl, *.ddl, *.odl, *.h, +# *.hh, *.hxx, *.hpp, *.h++, *.cs, *.d, *.php, *.php4, *.php5, *.phtml, *.inc, +# *.m, *.markdown, *.md, *.mm, *.dox, *.py, *.pyw, *.f90, *.f95, *.f03, *.f08, +# *.f, *.for, *.tcl, *.vhd, *.vhdl, *.ucf and *.qsf. + +FILE_PATTERNS = *.c \ + *.cc \ + *.cxx \ + *.cpp \ + *.c++ \ + *.java \ + *.ii \ + *.ixx \ + *.ipp \ + *.i++ \ + *.inl \ + *.idl \ + *.ddl \ + *.odl \ + *.h \ + *.hh \ + *.hxx \ + *.hpp \ + *.h++ \ + *.cs \ + *.d \ + *.php \ + *.php4 \ + *.php5 \ + *.phtml \ + *.inc \ + *.m \ + *.markdown \ + *.md \ + *.mm \ + *.dox \ + *.py \ + *.pyw \ + *.f90 \ + *.f95 \ + *.f03 \ + *.f08 \ + *.f \ + *.for \ + *.tcl \ + *.vhd \ + *.vhdl \ + *.ucf \ + *.qsf + +# The RECURSIVE tag can be used to specify whether or not subdirectories should +# be searched for input files as well. +# The default value is: NO. + +RECURSIVE = YES + +# The EXCLUDE tag can be used to specify files and/or directories that should be +# excluded from the INPUT source files. This way you can easily exclude a +# subdirectory from a directory tree whose root is specified with the INPUT tag. +# +# Note that relative paths are relative to the directory from which doxygen is +# run. + +EXCLUDE = + +# The EXCLUDE_SYMLINKS tag can be used to select whether or not files or +# directories that are symbolic links (a Unix file system feature) are excluded +# from the input. +# The default value is: NO. + +EXCLUDE_SYMLINKS = NO + +# If the value of the INPUT tag contains directories, you can use the +# EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude +# certain files from those directories. +# +# Note that the wildcards are matched against the file with absolute path, so to +# exclude all test directories for example use the pattern */test/* + +EXCLUDE_PATTERNS = + +# The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names +# (namespaces, classes, functions, etc.) that should be excluded from the +# output. The symbol name can be a fully qualified name, a word, or if the +# wildcard * is used, a substring. Examples: ANamespace, AClass, +# AClass::ANamespace, ANamespace::*Test +# +# Note that the wildcards are matched against the file with absolute path, so to +# exclude all test directories use the pattern */test/* + +EXCLUDE_SYMBOLS = + +# The EXAMPLE_PATH tag can be used to specify one or more files or directories +# that contain example code fragments that are included (see the \include +# command). + +EXAMPLE_PATH = + +# If the value of the EXAMPLE_PATH tag contains directories, you can use the +# EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp and +# *.h) to filter out the source-files in the directories. If left blank all +# files are included. + +EXAMPLE_PATTERNS = * + +# If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be +# searched for input files to be used with the \include or \dontinclude commands +# irrespective of the value of the RECURSIVE tag. +# The default value is: NO. + +EXAMPLE_RECURSIVE = NO + +# The IMAGE_PATH tag can be used to specify one or more files or directories +# that contain images that are to be included in the documentation (see the +# \image command). + +IMAGE_PATH = + +# The INPUT_FILTER tag can be used to specify a program that doxygen should +# invoke to filter for each input file. Doxygen will invoke the filter program +# by executing (via popen()) the command: +# +# +# +# where is the value of the INPUT_FILTER tag, and is the +# name of an input file. Doxygen will then use the output that the filter +# program writes to standard output. If FILTER_PATTERNS is specified, this tag +# will be ignored. +# +# Note that the filter must not add or remove lines; it is applied before the +# code is scanned, but not when the output code is generated. If lines are added +# or removed, the anchors will not be placed correctly. +# +# Note that for custom extensions or not directly supported extensions you also +# need to set EXTENSION_MAPPING for the extension otherwise the files are not +# properly processed by doxygen. + +INPUT_FILTER = + +# The FILTER_PATTERNS tag can be used to specify filters on a per file pattern +# basis. Doxygen will compare the file name with each pattern and apply the +# filter if there is a match. The filters are a list of the form: pattern=filter +# (like *.cpp=my_cpp_filter). See INPUT_FILTER for further information on how +# filters are used. If the FILTER_PATTERNS tag is empty or if none of the +# patterns match the file name, INPUT_FILTER is applied. +# +# Note that for custom extensions or not directly supported extensions you also +# need to set EXTENSION_MAPPING for the extension otherwise the files are not +# properly processed by doxygen. + +FILTER_PATTERNS = + +# If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using +# INPUT_FILTER) will also be used to filter the input files that are used for +# producing the source files to browse (i.e. when SOURCE_BROWSER is set to YES). +# The default value is: NO. + +FILTER_SOURCE_FILES = NO + +# The FILTER_SOURCE_PATTERNS tag can be used to specify source filters per file +# pattern. A pattern will override the setting for FILTER_PATTERN (if any) and +# it is also possible to disable source filtering for a specific pattern using +# *.ext= (so without naming a filter). +# This tag requires that the tag FILTER_SOURCE_FILES is set to YES. + +FILTER_SOURCE_PATTERNS = + +# If the USE_MDFILE_AS_MAINPAGE tag refers to the name of a markdown file that +# is part of the input, its contents will be placed on the main page +# (index.html). This can be useful if you have a project on for instance GitHub +# and want to reuse the introduction page also for the doxygen output. + +USE_MDFILE_AS_MAINPAGE = docs/mainpage.md + +#--------------------------------------------------------------------------- +# Configuration options related to source browsing +#--------------------------------------------------------------------------- + +# If the SOURCE_BROWSER tag is set to YES then a list of source files will be +# generated. Documented entities will be cross-referenced with these sources. +# +# Note: To get rid of all source code in the generated output, make sure that +# also VERBATIM_HEADERS is set to NO. +# The default value is: NO. + +SOURCE_BROWSER = NO + +# Setting the INLINE_SOURCES tag to YES will include the body of functions, +# classes and enums directly into the documentation. +# The default value is: NO. + +INLINE_SOURCES = NO + +# Setting the STRIP_CODE_COMMENTS tag to YES will instruct doxygen to hide any +# special comment blocks from generated source code fragments. Normal C, C++ and +# Fortran comments will always remain visible. +# The default value is: YES. + +STRIP_CODE_COMMENTS = YES + +# If the REFERENCED_BY_RELATION tag is set to YES then for each documented +# function all documented functions referencing it will be listed. +# The default value is: NO. + +REFERENCED_BY_RELATION = NO + +# If the REFERENCES_RELATION tag is set to YES then for each documented function +# all documented entities called/used by that function will be listed. +# The default value is: NO. + +REFERENCES_RELATION = NO + +# If the REFERENCES_LINK_SOURCE tag is set to YES and SOURCE_BROWSER tag is set +# to YES then the hyperlinks from functions in REFERENCES_RELATION and +# REFERENCED_BY_RELATION lists will link to the source code. Otherwise they will +# link to the documentation. +# The default value is: YES. + +REFERENCES_LINK_SOURCE = YES + +# If SOURCE_TOOLTIPS is enabled (the default) then hovering a hyperlink in the +# source code will show a tooltip with additional information such as prototype, +# brief description and links to the definition and documentation. Since this +# will make the HTML file larger and loading of large files a bit slower, you +# can opt to disable this feature. +# The default value is: YES. +# This tag requires that the tag SOURCE_BROWSER is set to YES. + +SOURCE_TOOLTIPS = YES + +# If the USE_HTAGS tag is set to YES then the references to source code will +# point to the HTML generated by the htags(1) tool instead of doxygen built-in +# source browser. The htags tool is part of GNU's global source tagging system +# (see http://www.gnu.org/software/global/global.html). You will need version +# 4.8.6 or higher. +# +# To use it do the following: +# - Install the latest version of global +# - Enable SOURCE_BROWSER and USE_HTAGS in the config file +# - Make sure the INPUT points to the root of the source tree +# - Run doxygen as normal +# +# Doxygen will invoke htags (and that will in turn invoke gtags), so these +# tools must be available from the command line (i.e. in the search path). +# +# The result: instead of the source browser generated by doxygen, the links to +# source code will now point to the output of htags. +# The default value is: NO. +# This tag requires that the tag SOURCE_BROWSER is set to YES. + +USE_HTAGS = NO + +# If the VERBATIM_HEADERS tag is set the YES then doxygen will generate a +# verbatim copy of the header file for each class for which an include is +# specified. Set to NO to disable this. +# See also: Section \class. +# The default value is: YES. + +VERBATIM_HEADERS = YES + +#--------------------------------------------------------------------------- +# Configuration options related to the alphabetical class index +#--------------------------------------------------------------------------- + +# If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index of all +# compounds will be generated. Enable this if the project contains a lot of +# classes, structs, unions or interfaces. +# The default value is: YES. + +ALPHABETICAL_INDEX = YES + +# The COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns in +# which the alphabetical index list will be split. +# Minimum value: 1, maximum value: 20, default value: 5. +# This tag requires that the tag ALPHABETICAL_INDEX is set to YES. + +COLS_IN_ALPHA_INDEX = 5 + +# In case all classes in a project start with a common prefix, all classes will +# be put under the same header in the alphabetical index. The IGNORE_PREFIX tag +# can be used to specify a prefix (or a list of prefixes) that should be ignored +# while generating the index headers. +# This tag requires that the tag ALPHABETICAL_INDEX is set to YES. + +IGNORE_PREFIX = + +#--------------------------------------------------------------------------- +# Configuration options related to the HTML output +#--------------------------------------------------------------------------- + +# If the GENERATE_HTML tag is set to YES, doxygen will generate HTML output +# The default value is: YES. + +GENERATE_HTML = YES + +# The HTML_OUTPUT tag is used to specify where the HTML docs will be put. If a +# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of +# it. +# The default directory is: html. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_OUTPUT = html + +# The HTML_FILE_EXTENSION tag can be used to specify the file extension for each +# generated HTML page (for example: .htm, .php, .asp). +# The default value is: .html. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_FILE_EXTENSION = .html + +# The HTML_HEADER tag can be used to specify a user-defined HTML header file for +# each generated HTML page. If the tag is left blank doxygen will generate a +# standard header. +# +# To get valid HTML the header file that includes any scripts and style sheets +# that doxygen needs, which is dependent on the configuration options used (e.g. +# the setting GENERATE_TREEVIEW). It is highly recommended to start with a +# default header using +# doxygen -w html new_header.html new_footer.html new_stylesheet.css +# YourConfigFile +# and then modify the file new_header.html. See also section "Doxygen usage" +# for information on how to generate the default header that doxygen normally +# uses. +# Note: The header is subject to change so you typically have to regenerate the +# default header when upgrading to a newer version of doxygen. For a description +# of the possible markers and block names see the documentation. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_HEADER = + +# The HTML_FOOTER tag can be used to specify a user-defined HTML footer for each +# generated HTML page. If the tag is left blank doxygen will generate a standard +# footer. See HTML_HEADER for more information on how to generate a default +# footer and what special commands can be used inside the footer. See also +# section "Doxygen usage" for information on how to generate the default footer +# that doxygen normally uses. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_FOOTER = + +# The HTML_STYLESHEET tag can be used to specify a user-defined cascading style +# sheet that is used by each HTML page. It can be used to fine-tune the look of +# the HTML output. If left blank doxygen will generate a default style sheet. +# See also section "Doxygen usage" for information on how to generate the style +# sheet that doxygen normally uses. +# Note: It is recommended to use HTML_EXTRA_STYLESHEET instead of this tag, as +# it is more robust and this tag (HTML_STYLESHEET) will in the future become +# obsolete. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_STYLESHEET = + +# The HTML_EXTRA_STYLESHEET tag can be used to specify additional user-defined +# cascading style sheets that are included after the standard style sheets +# created by doxygen. Using this option one can overrule certain style aspects. +# This is preferred over using HTML_STYLESHEET since it does not replace the +# standard style sheet and is therefore more robust against future updates. +# Doxygen will copy the style sheet files to the output directory. +# Note: The order of the extra style sheet files is of importance (e.g. the last +# style sheet in the list overrules the setting of the previous ones in the +# list). For an example see the documentation. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_EXTRA_STYLESHEET = + +# The HTML_EXTRA_FILES tag can be used to specify one or more extra images or +# other source files which should be copied to the HTML output directory. Note +# that these files will be copied to the base HTML output directory. Use the +# $relpath^ marker in the HTML_HEADER and/or HTML_FOOTER files to load these +# files. In the HTML_STYLESHEET file, use the file name only. Also note that the +# files will be copied as-is; there are no commands or markers available. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_EXTRA_FILES = + +# The HTML_COLORSTYLE_HUE tag controls the color of the HTML output. Doxygen +# will adjust the colors in the style sheet and background images according to +# this color. Hue is specified as an angle on a colorwheel, see +# http://en.wikipedia.org/wiki/Hue for more information. For instance the value +# 0 represents red, 60 is yellow, 120 is green, 180 is cyan, 240 is blue, 300 +# purple, and 360 is red again. +# Minimum value: 0, maximum value: 359, default value: 220. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_COLORSTYLE_HUE = 220 + +# The HTML_COLORSTYLE_SAT tag controls the purity (or saturation) of the colors +# in the HTML output. For a value of 0 the output will use grayscales only. A +# value of 255 will produce the most vivid colors. +# Minimum value: 0, maximum value: 255, default value: 100. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_COLORSTYLE_SAT = 100 + +# The HTML_COLORSTYLE_GAMMA tag controls the gamma correction applied to the +# luminance component of the colors in the HTML output. Values below 100 +# gradually make the output lighter, whereas values above 100 make the output +# darker. The value divided by 100 is the actual gamma applied, so 80 represents +# a gamma of 0.8, The value 220 represents a gamma of 2.2, and 100 does not +# change the gamma. +# Minimum value: 40, maximum value: 240, default value: 80. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_COLORSTYLE_GAMMA = 80 + +# If the HTML_TIMESTAMP tag is set to YES then the footer of each generated HTML +# page will contain the date and time when the page was generated. Setting this +# to YES can help to show when doxygen was last run and thus if the +# documentation is up to date. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_TIMESTAMP = NO + +# If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML +# documentation will contain sections that can be hidden and shown after the +# page has loaded. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_DYNAMIC_SECTIONS = NO + +# With HTML_INDEX_NUM_ENTRIES one can control the preferred number of entries +# shown in the various tree structured indices initially; the user can expand +# and collapse entries dynamically later on. Doxygen will expand the tree to +# such a level that at most the specified number of entries are visible (unless +# a fully collapsed tree already exceeds this amount). So setting the number of +# entries 1 will produce a full collapsed tree by default. 0 is a special value +# representing an infinite number of entries and will result in a full expanded +# tree by default. +# Minimum value: 0, maximum value: 9999, default value: 100. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_INDEX_NUM_ENTRIES = 100 + +# If the GENERATE_DOCSET tag is set to YES, additional index files will be +# generated that can be used as input for Apple's Xcode 3 integrated development +# environment (see: http://developer.apple.com/tools/xcode/), introduced with +# OSX 10.5 (Leopard). To create a documentation set, doxygen will generate a +# Makefile in the HTML output directory. Running make will produce the docset in +# that directory and running make install will install the docset in +# ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find it at +# startup. See http://developer.apple.com/tools/creatingdocsetswithdoxygen.html +# for more information. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +GENERATE_DOCSET = NO + +# This tag determines the name of the docset feed. A documentation feed provides +# an umbrella under which multiple documentation sets from a single provider +# (such as a company or product suite) can be grouped. +# The default value is: Doxygen generated docs. +# This tag requires that the tag GENERATE_DOCSET is set to YES. + +DOCSET_FEEDNAME = "Doxygen generated docs" + +# This tag specifies a string that should uniquely identify the documentation +# set bundle. This should be a reverse domain-name style string, e.g. +# com.mycompany.MyDocSet. Doxygen will append .docset to the name. +# The default value is: org.doxygen.Project. +# This tag requires that the tag GENERATE_DOCSET is set to YES. + +DOCSET_BUNDLE_ID = org.doxygen.Project + +# The DOCSET_PUBLISHER_ID tag specifies a string that should uniquely identify +# the documentation publisher. This should be a reverse domain-name style +# string, e.g. com.mycompany.MyDocSet.documentation. +# The default value is: org.doxygen.Publisher. +# This tag requires that the tag GENERATE_DOCSET is set to YES. + +DOCSET_PUBLISHER_ID = org.doxygen.Publisher + +# The DOCSET_PUBLISHER_NAME tag identifies the documentation publisher. +# The default value is: Publisher. +# This tag requires that the tag GENERATE_DOCSET is set to YES. + +DOCSET_PUBLISHER_NAME = Publisher + +# If the GENERATE_HTMLHELP tag is set to YES then doxygen generates three +# additional HTML index files: index.hhp, index.hhc, and index.hhk. The +# index.hhp is a project file that can be read by Microsoft's HTML Help Workshop +# (see: http://www.microsoft.com/en-us/download/details.aspx?id=21138) on +# Windows. +# +# The HTML Help Workshop contains a compiler that can convert all HTML output +# generated by doxygen into a single compiled HTML file (.chm). Compiled HTML +# files are now used as the Windows 98 help format, and will replace the old +# Windows help format (.hlp) on all Windows platforms in the future. Compressed +# HTML files also contain an index, a table of contents, and you can search for +# words in the documentation. The HTML workshop also contains a viewer for +# compressed HTML files. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +GENERATE_HTMLHELP = NO + +# The CHM_FILE tag can be used to specify the file name of the resulting .chm +# file. You can add a path in front of the file if the result should not be +# written to the html output directory. +# This tag requires that the tag GENERATE_HTMLHELP is set to YES. + +CHM_FILE = + +# The HHC_LOCATION tag can be used to specify the location (absolute path +# including file name) of the HTML help compiler (hhc.exe). If non-empty, +# doxygen will try to run the HTML help compiler on the generated index.hhp. +# The file has to be specified with full path. +# This tag requires that the tag GENERATE_HTMLHELP is set to YES. + +HHC_LOCATION = + +# The GENERATE_CHI flag controls if a separate .chi index file is generated +# (YES) or that it should be included in the master .chm file (NO). +# The default value is: NO. +# This tag requires that the tag GENERATE_HTMLHELP is set to YES. + +GENERATE_CHI = NO + +# The CHM_INDEX_ENCODING is used to encode HtmlHelp index (hhk), content (hhc) +# and project file content. +# This tag requires that the tag GENERATE_HTMLHELP is set to YES. + +CHM_INDEX_ENCODING = + +# The BINARY_TOC flag controls whether a binary table of contents is generated +# (YES) or a normal table of contents (NO) in the .chm file. Furthermore it +# enables the Previous and Next buttons. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTMLHELP is set to YES. + +BINARY_TOC = NO + +# The TOC_EXPAND flag can be set to YES to add extra items for group members to +# the table of contents of the HTML help documentation and to the tree view. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTMLHELP is set to YES. + +TOC_EXPAND = NO + +# If the GENERATE_QHP tag is set to YES and both QHP_NAMESPACE and +# QHP_VIRTUAL_FOLDER are set, an additional index file will be generated that +# can be used as input for Qt's qhelpgenerator to generate a Qt Compressed Help +# (.qch) of the generated HTML documentation. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +GENERATE_QHP = NO + +# If the QHG_LOCATION tag is specified, the QCH_FILE tag can be used to specify +# the file name of the resulting .qch file. The path specified is relative to +# the HTML output folder. +# This tag requires that the tag GENERATE_QHP is set to YES. + +QCH_FILE = + +# The QHP_NAMESPACE tag specifies the namespace to use when generating Qt Help +# Project output. For more information please see Qt Help Project / Namespace +# (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#namespace). +# The default value is: org.doxygen.Project. +# This tag requires that the tag GENERATE_QHP is set to YES. + +QHP_NAMESPACE = org.doxygen.Project + +# The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating Qt +# Help Project output. For more information please see Qt Help Project / Virtual +# Folders (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#virtual- +# folders). +# The default value is: doc. +# This tag requires that the tag GENERATE_QHP is set to YES. + +QHP_VIRTUAL_FOLDER = doc + +# If the QHP_CUST_FILTER_NAME tag is set, it specifies the name of a custom +# filter to add. For more information please see Qt Help Project / Custom +# Filters (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#custom- +# filters). +# This tag requires that the tag GENERATE_QHP is set to YES. + +QHP_CUST_FILTER_NAME = + +# The QHP_CUST_FILTER_ATTRS tag specifies the list of the attributes of the +# custom filter to add. For more information please see Qt Help Project / Custom +# Filters (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#custom- +# filters). +# This tag requires that the tag GENERATE_QHP is set to YES. + +QHP_CUST_FILTER_ATTRS = + +# The QHP_SECT_FILTER_ATTRS tag specifies the list of the attributes this +# project's filter section matches. Qt Help Project / Filter Attributes (see: +# http://qt-project.org/doc/qt-4.8/qthelpproject.html#filter-attributes). +# This tag requires that the tag GENERATE_QHP is set to YES. + +QHP_SECT_FILTER_ATTRS = + +# The QHG_LOCATION tag can be used to specify the location of Qt's +# qhelpgenerator. If non-empty doxygen will try to run qhelpgenerator on the +# generated .qhp file. +# This tag requires that the tag GENERATE_QHP is set to YES. + +QHG_LOCATION = + +# If the GENERATE_ECLIPSEHELP tag is set to YES, additional index files will be +# generated, together with the HTML files, they form an Eclipse help plugin. To +# install this plugin and make it available under the help contents menu in +# Eclipse, the contents of the directory containing the HTML and XML files needs +# to be copied into the plugins directory of eclipse. The name of the directory +# within the plugins directory should be the same as the ECLIPSE_DOC_ID value. +# After copying Eclipse needs to be restarted before the help appears. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +GENERATE_ECLIPSEHELP = NO + +# A unique identifier for the Eclipse help plugin. When installing the plugin +# the directory name containing the HTML and XML files should also have this +# name. Each documentation set should have its own identifier. +# The default value is: org.doxygen.Project. +# This tag requires that the tag GENERATE_ECLIPSEHELP is set to YES. + +ECLIPSE_DOC_ID = org.doxygen.Project + +# If you want full control over the layout of the generated HTML pages it might +# be necessary to disable the index and replace it with your own. The +# DISABLE_INDEX tag can be used to turn on/off the condensed index (tabs) at top +# of each HTML page. A value of NO enables the index and the value YES disables +# it. Since the tabs in the index contain the same information as the navigation +# tree, you can set this option to YES if you also set GENERATE_TREEVIEW to YES. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +DISABLE_INDEX = NO + +# The GENERATE_TREEVIEW tag is used to specify whether a tree-like index +# structure should be generated to display hierarchical information. If the tag +# value is set to YES, a side panel will be generated containing a tree-like +# index structure (just like the one that is generated for HTML Help). For this +# to work a browser that supports JavaScript, DHTML, CSS and frames is required +# (i.e. any modern browser). Windows users are probably better off using the +# HTML help feature. Via custom style sheets (see HTML_EXTRA_STYLESHEET) one can +# further fine-tune the look of the index. As an example, the default style +# sheet generated by doxygen has an example that shows how to put an image at +# the root of the tree instead of the PROJECT_NAME. Since the tree basically has +# the same information as the tab index, you could consider setting +# DISABLE_INDEX to YES when enabling this option. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +GENERATE_TREEVIEW = NO + +# The ENUM_VALUES_PER_LINE tag can be used to set the number of enum values that +# doxygen will group on one line in the generated HTML documentation. +# +# Note that a value of 0 will completely suppress the enum values from appearing +# in the overview section. +# Minimum value: 0, maximum value: 20, default value: 4. +# This tag requires that the tag GENERATE_HTML is set to YES. + +ENUM_VALUES_PER_LINE = 4 + +# If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be used +# to set the initial width (in pixels) of the frame in which the tree is shown. +# Minimum value: 0, maximum value: 1500, default value: 250. +# This tag requires that the tag GENERATE_HTML is set to YES. + +TREEVIEW_WIDTH = 250 + +# If the EXT_LINKS_IN_WINDOW option is set to YES, doxygen will open links to +# external symbols imported via tag files in a separate window. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +EXT_LINKS_IN_WINDOW = NO + +# Use this tag to change the font size of LaTeX formulas included as images in +# the HTML documentation. When you change the font size after a successful +# doxygen run you need to manually remove any form_*.png images from the HTML +# output directory to force them to be regenerated. +# Minimum value: 8, maximum value: 50, default value: 10. +# This tag requires that the tag GENERATE_HTML is set to YES. + +FORMULA_FONTSIZE = 10 + +# Use the FORMULA_TRANPARENT tag to determine whether or not the images +# generated for formulas are transparent PNGs. Transparent PNGs are not +# supported properly for IE 6.0, but are supported on all modern browsers. +# +# Note that when changing this option you need to delete any form_*.png files in +# the HTML output directory before the changes have effect. +# The default value is: YES. +# This tag requires that the tag GENERATE_HTML is set to YES. + +FORMULA_TRANSPARENT = YES + +# Enable the USE_MATHJAX option to render LaTeX formulas using MathJax (see +# http://www.mathjax.org) which uses client side JavaScript for the rendering +# instead of using pre-rendered bitmaps. Use this if you do not have LaTeX +# installed or if you want to formulas look prettier in the HTML output. When +# enabled you may also need to install MathJax separately and configure the path +# to it using the MATHJAX_RELPATH option. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +USE_MATHJAX = NO + +# When MathJax is enabled you can set the default output format to be used for +# the MathJax output. See the MathJax site (see: +# http://docs.mathjax.org/en/latest/output.html) for more details. +# Possible values are: HTML-CSS (which is slower, but has the best +# compatibility), NativeMML (i.e. MathML) and SVG. +# The default value is: HTML-CSS. +# This tag requires that the tag USE_MATHJAX is set to YES. + +MATHJAX_FORMAT = HTML-CSS + +# When MathJax is enabled you need to specify the location relative to the HTML +# output directory using the MATHJAX_RELPATH option. The destination directory +# should contain the MathJax.js script. For instance, if the mathjax directory +# is located at the same level as the HTML output directory, then +# MATHJAX_RELPATH should be ../mathjax. The default value points to the MathJax +# Content Delivery Network so you can quickly see the result without installing +# MathJax. However, it is strongly recommended to install a local copy of +# MathJax from http://www.mathjax.org before deployment. +# The default value is: http://cdn.mathjax.org/mathjax/latest. +# This tag requires that the tag USE_MATHJAX is set to YES. + +MATHJAX_RELPATH = http://cdn.mathjax.org/mathjax/latest + +# The MATHJAX_EXTENSIONS tag can be used to specify one or more MathJax +# extension names that should be enabled during MathJax rendering. For example +# MATHJAX_EXTENSIONS = TeX/AMSmath TeX/AMSsymbols +# This tag requires that the tag USE_MATHJAX is set to YES. + +MATHJAX_EXTENSIONS = + +# The MATHJAX_CODEFILE tag can be used to specify a file with javascript pieces +# of code that will be used on startup of the MathJax code. See the MathJax site +# (see: http://docs.mathjax.org/en/latest/output.html) for more details. For an +# example see the documentation. +# This tag requires that the tag USE_MATHJAX is set to YES. + +MATHJAX_CODEFILE = + +# When the SEARCHENGINE tag is enabled doxygen will generate a search box for +# the HTML output. The underlying search engine uses javascript and DHTML and +# should work on any modern browser. Note that when using HTML help +# (GENERATE_HTMLHELP), Qt help (GENERATE_QHP), or docsets (GENERATE_DOCSET) +# there is already a search function so this one should typically be disabled. +# For large projects the javascript based search engine can be slow, then +# enabling SERVER_BASED_SEARCH may provide a better solution. It is possible to +# search using the keyboard; to jump to the search box use + S +# (what the is depends on the OS and browser, but it is typically +# , /