This commit is contained in:
Ivan
2022-04-05 11:42:28 +03:00
commit 6dc0eb0fcf
5565 changed files with 1200500 additions and 0 deletions

View File

@@ -0,0 +1,7 @@
---
Language: Cpp
BasedOnStyle: Google
IndentWidth: 2
IncludeBlocks: Preserve
...

80
thirdparty/basalt-headers/.clang-tidy vendored Normal file
View File

@@ -0,0 +1,80 @@
Checks: 'readability-*, -readability-magic-numbers, -readability-function-cognitive-complexity, -readability-else-after-return, -readability-redundant-access-specifiers, performance-*, modernize-*, -modernize-use-trailing-return-type, -modernize-avoid-c-arrays, -modernize-use-nodiscard, -modernize-use-auto, -modernize-pass-by-value, misc-assert-side-effect, -clang-analyzer-osx.*, -clang-analyzer-cplusplus.Move, -clang-analyzer-core.uninitialized.UndefReturn, -clang-analyzer-optin.portability.UnixAPI, -clang-analyzer-unix.Malloc'
WarningsAsErrors: '*'
HeaderFilterRegex: '.*/include/basalt/*'
CheckOptions:
# Classes, structs, ...
- key: readability-identifier-naming.NamespaceCase
value: lower_case
- key: readability-identifier-naming.ClassCase
value: CamelCase
- key: readability-identifier-naming.StructCase
value: CamelCase
- key: readability-identifier-naming.EnumCase
value: CamelCase
- key: readability-identifier-naming.UnionCase
value: CamelCase
- key: readability-identifier-naming.TypedefCase
value: CamelCase
# Variables, member variables, ...
- key: readability-identifier-naming.ParameterCase
value: lower_case
- key: readability-identifier-naming.ParameterIgnoredRegexp
value: 'Q[12]?_.*|[A-Z]|[JH]_.*|[TR]_[a-z]+_[a-z]+.*'
- key: readability-identifier-naming.VariableCase
value: lower_case
- key: readability-identifier-naming.VariableIgnoredRegexp
value: 'Q[12]?_.*|[A-Z]|[JH]_.*|[TR]_[a-z]+_[a-z]+.*'
- key: readability-identifier-naming.MemberCase
value: lower_case
- key: readability-identifier-naming.PublicMemberCase
value: lower_case
- key: readability-identifier-naming.PublicMemberIgnoredRegexp
value: 'Q[12]?_.*|[A-Z]|[JH]_.*|[TR]_[a-z]+_[a-z]+.*'
- key: readability-identifier-naming.ProtectedMemberCase
value: lower_case
- key: readability-identifier-naming.ProtectedMemberSuffix
value: _
- key: readability-identifier-naming.ProtectedMemberIgnoredRegexp
value: 'Q[12]?_.*|[A-Z]|[JH]_.*|[TR]_[a-z]+_[a-z]+.*'
- key: readability-identifier-naming.PrivateMemberCase
value: lower_case
- key: readability-identifier-naming.PrivateMemberIgnoredRegexp
value: 'Q[12]?_.*|[A-Z]|[JH]_.*|[TR]_[a-z]+_[a-z]+.*'
- key: readability-identifier-naming.PrivateMemberSuffix
value: _
# Functions, methods, ...
- key: readability-identifier-naming.FunctionCase
value: camelBack
- key: readability-identifier-naming.MethodCase
value: camelBack
# Constants
- key: readability-identifier-naming.ConstantPrefix
value: ''
- key: readability-identifier-naming.ConstantCase
value: UPPER_CASE
- key: readability-identifier-naming.ConstantMemberPrefix
value: ''
- key: readability-identifier-naming.ConstantMemberCase
value: lower_case
- key: readability-identifier-naming.ConstantMemberIgnoredRegexp
value: '^.*_$'
- key: readability-identifier-naming.ConstantParameterPrefix
value: ''
- key: readability-identifier-naming.ConstantParameterCase
value: lower_case
- key: readability-identifier-naming.ConstantParameterIgnoredRegexp
value: 'Q[12]?_.*|[A-Z]|[JH]_.*|[TR]_[a-z]+_[a-z]+.*'
- key: readability-identifier-naming.LocalConstantParameterPrefix
value: ''
- key: readability-identifier-naming.LocalConstantCase
value: lower_case
- key: readability-identifier-naming.LocalConstantIgnoredRegexp
value: 'Q[12]?_.*|[A-Z]|[JH]_.*|[TR]_[a-z]+_[a-z]+.*'
- key: readability-identifier-naming.ConstexprVariablePrefix
value: ''
- key: readability-identifier-naming.ConstexprVariableCase
value: UPPER_CASE

5
thirdparty/basalt-headers/.gitignore vendored Normal file
View File

@@ -0,0 +1,5 @@
cmake-build*
.idea
CMakeLists.txt.user
build*
public

123
thirdparty/basalt-headers/.gitlab-ci.yml vendored Normal file
View File

@@ -0,0 +1,123 @@
image: vladyslavusenko/b_image_focal:latest
variables:
GIT_SUBMODULE_STRATEGY: recursive
BUILD_TYPE: Release
# template for docker builds with ccache
.prepare_docker_template: &prepare_docker_definition
tags:
- docker
before_script:
- mkdir -p ccache
- export CCACHE_BASEDIR=${PWD}
- export CCACHE_DIR=${PWD}/ccache
- ccache -s
cache:
paths:
- ccache/
key: ${CI_JOB_NAME}
# template for secondary build & unit test configurations
.compile_and_test_template: &compile_and_test_definition
stage: build
script:
- mkdir build
- cd build
- cmake .. -DCMAKE_BUILD_TYPE=${BUILD_TYPE}
- make -j4
- ctest
# main build with benchmark and coverage
focal-release-compile:
<<: *prepare_docker_definition
stage: build
script:
- mkdir build
- cd build
- cmake .. -DCMAKE_BUILD_TYPE=${BUILD_TYPE}
- make -j4
- ctest
- ./test/benchmark_camera > ../benchmark_camera.txt
- cd ../
- mkdir build_coverage
- cd build_coverage
- cmake .. -DCMAKE_BUILD_TYPE=Coverage
- make -j4
- ctest
- lcov --directory . --capture --output-file coverage.info
- lcov --remove coverage.info '*test/*' '/usr/*' '*thirdparty*' '*googletest*' --output-file coverage.info
- lcov --list coverage.info
artifacts:
paths:
- benchmark_camera.txt
focal-debug-compile:
<<: *prepare_docker_definition
<<: *compile_and_test_definition
variables:
BUILD_TYPE: Debug
focal-relwithdebinfo-compile:
<<: *prepare_docker_definition
<<: *compile_and_test_definition
variables:
BUILD_TYPE: RelWithDebInfo
focal-asan-build:
<<: *prepare_docker_definition
<<: *compile_and_test_definition
variables:
CC: clang-12
CXX: clang++-12
BUILD_TYPE: SanitizerRelWithDebInfo
# LeakSanitizer doesn't work in (non-priviliged) container
ASAN_OPTIONS: "detect_leaks=0"
bionic-release-compile:
<<: *prepare_docker_definition
<<: *compile_and_test_definition
image: vladyslavusenko/b_image_bionic:latest
bigsur-relwithdebinfo-compile:
<<: *compile_and_test_definition
tags: [macos, "11"]
variables:
BUILD_TYPE: RelWithDebInfo
bigsur-brewedclang-asan-build:
<<: *compile_and_test_definition
tags: [macos, "11"]
variables:
CC: /usr/local/opt/llvm/bin/clang
CXX: /usr/local/opt/llvm/bin/clang++
BUILD_TYPE: SanitizerRelWithDebInfo
catalina-asan-build:
<<: *compile_and_test_definition
tags: [macos, "10.15"]
variables:
BUILD_TYPE: SanitizerRelWithDebInfo
# check if clang-format would make any changes
clang-format:
tags:
- docker
stage: build
variables:
GIT_SUBMODULE_STRATEGY: none
script:
- ./scripts/clang-format-all.sh
# check if any files are now modified and error if yes
- (if git diff --name-only --diff-filter=M | grep '\..pp$'; then echo $'\n Some files are not properly formatted. You can use "./scripts/clang-format-all.sh".\n'; git diff --diff-filter=M; false; fi)
pages:
tags:
- docker
script:
- doxygen
artifacts:
paths:
- public
only:
- master

16
thirdparty/basalt-headers/.gitmodules vendored Normal file
View File

@@ -0,0 +1,16 @@
[submodule "thirdparty/Sophus"]
path = thirdparty/Sophus
# url = https://github.com/strasdat/Sophus.git
url = https://github.com/NikolausDemmel/Sophus.git
[submodule "thirdparty/cereal"]
path = thirdparty/cereal
url = https://github.com/USCiLab/cereal.git
[submodule "test/googletest"]
path = test/googletest
url = https://github.com/google/googletest.git
[submodule "test/benchmark"]
path = test/benchmark
url = https://github.com/google/benchmark.git
[submodule "thirdparty/eigen"]
path = thirdparty/eigen
url = https://gitlab.com/libeigen/eigen.git

131
thirdparty/basalt-headers/CMakeLists.txt vendored Normal file
View File

@@ -0,0 +1,131 @@
cmake_minimum_required(VERSION 3.10...3.18)
include(CMakePackageConfigHelpers)
project(basalt-headers VERSION 0.1.0 LANGUAGES CXX)
include(GNUInstallDirs)
include(CTest) # note: this adds a BUILD_TESTING which defaults to ON
if(CMAKE_PROJECT_NAME STREQUAL PROJECT_NAME)
if (NOT CMAKE_C_COMPILER_LAUNCHER AND NOT CMAKE_CXX_COMPILER_LAUNCHER)
find_program(CCACHE_PROGRAM ccache)
if(CCACHE_PROGRAM)
message(STATUS "Found ccache: ${CCACHE_PROGRAM}")
set(CMAKE_C_COMPILER_LAUNCHER ${CCACHE_PROGRAM})
set(CMAKE_CXX_COMPILER_LAUNCHER ${CCACHE_PROGRAM})
else()
message(STATUS "Dind't find ccache")
endif()
else()
message(STATUS "Compiler launcher already set. Not configuring ccache.")
message(STATUS "CMAKE_C_COMPILER_LAUNCHER: ${CMAKE_C_COMPILER_LAUNCHER}")
message(STATUS "CMAKE_CXX_COMPILER_LAUNCHER: ${CMAKE_CXX_COMPILER_LAUNCHER}")
endif()
IF( NOT CMAKE_BUILD_TYPE )
SET( CMAKE_BUILD_TYPE Release)
ENDIF()
IF(NOT CXX_MARCH)
SET(CXX_MARCH native)
ENDIF()
IF(NOT APPLE OR NOT CMAKE_SYSTEM_PROCESSOR STREQUAL "arm64")
set(BASALT_HEADERS_MARCH_FLAGS "-march=${CXX_MARCH}")
ELSE()
message(STATUS "Running on Apple ${CMAKE_SYSTEM_PROCESSOR}. Disabled -march flag.")
ENDIF()
set(CMAKE_CXX_STANDARD 17)
set(CMAKE_CXX_STANDARD_REQUIRED ON)
set(CMAKE_CXX_EXTENSIONS OFF)
# clang-specific compile flags
if(CMAKE_CXX_COMPILER_ID STREQUAL "Clang" OR CMAKE_CXX_COMPILER_ID STREQUAL "AppleClang")
set(BASALT_HEADERS_CXX_FLAGS "${BASALT_HEADERS_CXX_FLAGS} -Wno-exceptions -fcolor-diagnostics -frelaxed-template-template-args -Wno-error=deprecated-declarations")
if(CMAKE_CXX_COMPILER_ID STREQUAL "Clang" AND CMAKE_CXX_COMPILER_VERSION VERSION_GREATER_EQUAL 10)
# These are disabled to avoid lot's of warnings in Eigen code with clang 10
set(BASALT_HEADERS_CXX_FLAGS "${BASALT_HEADERS_CXX_FLAGS} -Wno-misleading-indentation")
endif()
else()
set(BASALT_HEADERS_CXX_FLAGS "${BASALT_HEADERS_CXX_FLAGS} -Wno-error=maybe-uninitialized")
endif()
set(CMAKE_CXX_FLAGS_DEBUG "-O0 -g -DEIGEN_INITIALIZE_MATRICES_BY_NAN") # cmake default: "-g"
set(CMAKE_CXX_FLAGS_RELWITHDEBINFO "-O3 -g -DEIGEN_INITIALIZE_MATRICES_BY_NAN") # cmake default: "-O2 -g -DNDEBUG"
set(CMAKE_CXX_FLAGS_RELEASE "-O3 -DNDEBUG") # cmake default: "-O3 -DNDEBUG"
SET(CMAKE_CXX_FLAGS "-Wall -Werror -Wextra ${BASALT_HEADERS_MARCH_FLAGS} ${CMAKE_CXX_FLAGS} ${BASALT_HEADERS_CXX_FLAGS}")
SET(CMAKE_CXX_FLAGS_COVERAGE "${CMAKE_CXX_FLAGS_DEBUG} --coverage -fno-inline -fno-inline-small-functions -fno-default-inline")
SET(CMAKE_EXE_LINKER_FLAGS_COVERAGE "${CMAKE_EXE_LINKER_FLAGS_DEBUG} --coverage")
SET(CMAKE_SHARED_LINKER_FLAGS_COVERAGE "${CMAKE_SHARED_LINKER_FLAGS_DEBUG} --coverage")
set(CMAKE_CXX_FLAGS_SANITIZERDEBUG "-O0 -g0 -DEIGEN_INITIALIZE_MATRICES_BY_NAN -fno-omit-frame-pointer -fsanitize=address,undefined")
set(CMAKE_CXX_FLAGS_SANITIZERRELWITHDEBINFO "-O3 -g0 -DEIGEN_INITIALIZE_MATRICES_BY_NAN -fno-omit-frame-pointer -fsanitize=address,undefined")
endif()
option(BASALT_BUILTIN_EIGEN "Use builtin Eigen from submodule" ON)
if(NOT TARGET Eigen3::Eigen)
if(BASALT_BUILTIN_EIGEN)
message(STATUS "Including internal Eigen from submodule")
# Disable Eigen tests
set(BUILD_TESTING_PREV_VALUE ${BUILD_TESTING}) # Note: BUILD_TESTING is always defined because we include CTest above
set(BUILD_TESTING OFF CACHE BOOL "" FORCE)
set(EIGEN_BUILD_DOC OFF CACHE BOOL "" FORCE)
add_subdirectory(thirdparty/eigen EXCLUDE_FROM_ALL)
# Restore previous value of BUILD_TESTING
set(BUILD_TESTING ${BUILD_TESTING_PREV_VALUE} CACHE BOOL "Build the testing tree." FORCE)
else()
message(STATUS "Finding external Eigen")
find_package(Eigen3 3.4 REQUIRED)
endif()
else()
message(STATUS "Eigen already available")
endif()
option(BASALT_BUILTIN_SOPHUS "Use builtin Sophus from submodule" ON)
if(NOT TARGET Sophus::Sophus)
if(BASALT_BUILTIN_SOPHUS)
message(STATUS "Including internal Sophus from submodule")
set(BUILD_SOPHUS_TESTS OFF CACHE BOOL "disable BUILD_SOPHUS_TESTS")
set(BUILD_SOPHUS_EXAMPLES OFF CACHE BOOL "disable BUILD_SOPHUS_EXAMPLES")
add_subdirectory(thirdparty/Sophus EXCLUDE_FROM_ALL)
else()
message(STATUS "Finding external Sophus")
find_package(Sophus REQUIRED)
endif()
else()
message(STATUS "Sophus already available")
endif()
option(BASALT_BUILTIN_CEREAL "Use builtin Cereal from submodule" ON)
if(NOT TARGET cereal::cereal)
if(BASALT_BUILTIN_CEREAL)
message(STATUS "Including internal Cereal from submodule")
add_subdirectory(thirdparty/cereal EXCLUDE_FROM_ALL)
else()
message(STATUS "Finding external Cereal")
find_package(cereal REQUIRED)
endif()
else()
message(STATUS "Cereal already available")
endif()
add_library(basalt-headers INTERFACE)
add_library (basalt::basalt-headers ALIAS basalt-headers)
target_link_libraries(basalt-headers INTERFACE Eigen3::Eigen Sophus::Sophus cereal::cereal)
# Associate target with include directory
target_include_directories(basalt-headers INTERFACE
"$<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>"
"$<INSTALL_INTERFACE:${CMAKE_INSTALL_INCLUDEDIR}>"
)
if((CMAKE_PROJECT_NAME STREQUAL PROJECT_NAME OR BASALT_HEADERS_BUILD_TESTING) AND BUILD_TESTING)
enable_testing()
add_subdirectory(test)
endif()

374
thirdparty/basalt-headers/Doxyfile vendored Normal file
View File

@@ -0,0 +1,374 @@
# Doxyfile 1.8.13
#---------------------------------------------------------------------------
# Project related configuration options
#---------------------------------------------------------------------------
DOXYFILE_ENCODING = UTF-8
PROJECT_NAME = "Basalt Headers"
PROJECT_NUMBER =
PROJECT_BRIEF =
PROJECT_LOGO = doc/img/basalt_light.png
OUTPUT_DIRECTORY =
CREATE_SUBDIRS = NO
ALLOW_UNICODE_NAMES = NO
OUTPUT_LANGUAGE = English
BRIEF_MEMBER_DESC = YES
REPEAT_BRIEF = YES
ABBREVIATE_BRIEF = "The $name class" \
"The $name widget" \
"The $name file" \
is \
provides \
specifies \
contains \
represents \
a \
an \
the
ALWAYS_DETAILED_SEC = NO
INLINE_INHERITED_MEMB = NO
FULL_PATH_NAMES = YES
STRIP_FROM_PATH =
STRIP_FROM_INC_PATH =
SHORT_NAMES = NO
JAVADOC_AUTOBRIEF = NO
QT_AUTOBRIEF = NO
MULTILINE_CPP_IS_BRIEF = NO
INHERIT_DOCS = YES
SEPARATE_MEMBER_PAGES = NO
TAB_SIZE = 4
ALIASES =
TCL_SUBST =
OPTIMIZE_OUTPUT_FOR_C = NO
OPTIMIZE_OUTPUT_JAVA = NO
OPTIMIZE_FOR_FORTRAN = NO
OPTIMIZE_OUTPUT_VHDL = NO
EXTENSION_MAPPING =
MARKDOWN_SUPPORT = YES
TOC_INCLUDE_HEADINGS = 0
AUTOLINK_SUPPORT = YES
BUILTIN_STL_SUPPORT = NO
CPP_CLI_SUPPORT = NO
SIP_SUPPORT = NO
IDL_PROPERTY_SUPPORT = YES
DISTRIBUTE_GROUP_DOC = NO
GROUP_NESTED_COMPOUNDS = NO
SUBGROUPING = YES
INLINE_GROUPED_CLASSES = NO
INLINE_SIMPLE_STRUCTS = NO
TYPEDEF_HIDES_STRUCT = NO
LOOKUP_CACHE_SIZE = 0
#---------------------------------------------------------------------------
# Build related configuration options
#---------------------------------------------------------------------------
EXTRACT_ALL = YES
EXTRACT_PRIVATE = NO
EXTRACT_PACKAGE = NO
EXTRACT_STATIC = NO
EXTRACT_LOCAL_CLASSES = YES
EXTRACT_LOCAL_METHODS = NO
EXTRACT_ANON_NSPACES = NO
HIDE_UNDOC_MEMBERS = NO
HIDE_UNDOC_CLASSES = NO
HIDE_FRIEND_COMPOUNDS = NO
HIDE_IN_BODY_DOCS = NO
INTERNAL_DOCS = NO
CASE_SENSE_NAMES = YES
HIDE_SCOPE_NAMES = NO
HIDE_COMPOUND_REFERENCE= NO
SHOW_INCLUDE_FILES = YES
SHOW_GROUPED_MEMB_INC = NO
FORCE_LOCAL_INCLUDES = NO
INLINE_INFO = YES
SORT_MEMBER_DOCS = YES
SORT_BRIEF_DOCS = NO
SORT_MEMBERS_CTORS_1ST = NO
SORT_GROUP_NAMES = NO
SORT_BY_SCOPE_NAME = NO
STRICT_PROTO_MATCHING = NO
GENERATE_TODOLIST = YES
GENERATE_TESTLIST = YES
GENERATE_BUGLIST = YES
GENERATE_DEPRECATEDLIST= YES
ENABLED_SECTIONS =
MAX_INITIALIZER_LINES = 30
SHOW_USED_FILES = YES
SHOW_FILES = YES
SHOW_NAMESPACES = YES
FILE_VERSION_FILTER =
LAYOUT_FILE =
CITE_BIB_FILES =
#---------------------------------------------------------------------------
# Configuration options related to warning and progress messages
#---------------------------------------------------------------------------
QUIET = NO
WARNINGS = YES
WARN_IF_UNDOCUMENTED = YES
WARN_IF_DOC_ERROR = YES
WARN_NO_PARAMDOC = NO
WARN_AS_ERROR = NO
WARN_FORMAT = "$file:$line: $text"
WARN_LOGFILE =
#---------------------------------------------------------------------------
# Configuration options related to the input files
#---------------------------------------------------------------------------
INPUT = include/basalt/calibration include/basalt/camera include/basalt/image include/basalt/imu include/basalt/spline include/basalt/utils include/basalt/serialization doc/
INPUT_ENCODING = UTF-8
FILE_PATTERNS = *.c \
*.cc \
*.cxx \
*.cpp \
*.c++ \
*.java \
*.ii \
*.ixx \
*.ipp \
*.i++ \
*.inl \
*.idl \
*.ddl \
*.odl \
*.h \
*.hh \
*.hxx \
*.hpp \
*.h++ \
*.cs \
*.d \
*.php \
*.php4 \
*.php5 \
*.phtml \
*.inc \
*.m \
*.markdown \
*.md \
*.mm \
*.dox \
*.py \
*.pyw \
*.f90 \
*.f95 \
*.f03 \
*.f08 \
*.f \
*.for \
*.tcl \
*.vhd \
*.vhdl \
*.ucf \
*.qsf
RECURSIVE = NO
EXCLUDE =
EXCLUDE_SYMLINKS = NO
EXCLUDE_PATTERNS =
EXCLUDE_SYMBOLS =
EXAMPLE_PATH =
EXAMPLE_PATTERNS = *
EXAMPLE_RECURSIVE = NO
IMAGE_PATH = doc/img
INPUT_FILTER =
FILTER_PATTERNS =
FILTER_SOURCE_FILES = NO
FILTER_SOURCE_PATTERNS =
USE_MDFILE_AS_MAINPAGE =
#---------------------------------------------------------------------------
# Configuration options related to source browsing
#---------------------------------------------------------------------------
SOURCE_BROWSER = NO
INLINE_SOURCES = NO
STRIP_CODE_COMMENTS = YES
REFERENCED_BY_RELATION = NO
REFERENCES_RELATION = NO
REFERENCES_LINK_SOURCE = YES
SOURCE_TOOLTIPS = YES
USE_HTAGS = NO
VERBATIM_HEADERS = YES
CLANG_ASSISTED_PARSING = NO
CLANG_OPTIONS =
#---------------------------------------------------------------------------
# Configuration options related to the alphabetical class index
#---------------------------------------------------------------------------
ALPHABETICAL_INDEX = YES
COLS_IN_ALPHA_INDEX = 5
IGNORE_PREFIX =
#---------------------------------------------------------------------------
# Configuration options related to the HTML output
#---------------------------------------------------------------------------
GENERATE_HTML = YES
HTML_OUTPUT = public
HTML_FILE_EXTENSION = .html
HTML_HEADER =
HTML_FOOTER =
HTML_STYLESHEET =
HTML_EXTRA_STYLESHEET =
HTML_EXTRA_FILES =
HTML_COLORSTYLE_HUE = 220
HTML_COLORSTYLE_SAT = 100
HTML_COLORSTYLE_GAMMA = 80
HTML_TIMESTAMP = NO
HTML_DYNAMIC_SECTIONS = NO
HTML_INDEX_NUM_ENTRIES = 100
GENERATE_DOCSET = NO
DOCSET_FEEDNAME = "Doxygen generated docs"
DOCSET_BUNDLE_ID = org.doxygen.Project
DOCSET_PUBLISHER_ID = org.doxygen.Publisher
DOCSET_PUBLISHER_NAME = Publisher
GENERATE_HTMLHELP = NO
CHM_FILE =
HHC_LOCATION =
GENERATE_CHI = NO
CHM_INDEX_ENCODING =
BINARY_TOC = NO
TOC_EXPAND = NO
GENERATE_QHP = NO
QCH_FILE =
QHP_NAMESPACE = org.doxygen.Project
QHP_VIRTUAL_FOLDER = doc
QHP_CUST_FILTER_NAME =
QHP_CUST_FILTER_ATTRS =
QHP_SECT_FILTER_ATTRS =
QHG_LOCATION =
GENERATE_ECLIPSEHELP = NO
ECLIPSE_DOC_ID = org.doxygen.Project
DISABLE_INDEX = YES
GENERATE_TREEVIEW = YES
ENUM_VALUES_PER_LINE = 4
TREEVIEW_WIDTH = 250
EXT_LINKS_IN_WINDOW = NO
FORMULA_FONTSIZE = 12
FORMULA_TRANSPARENT = YES
USE_MATHJAX = YES
MATHJAX_FORMAT = HTML-CSS
MATHJAX_RELPATH = https://cdn.mathjax.org/mathjax/latest
MATHJAX_EXTENSIONS =
MATHJAX_CODEFILE =
SEARCHENGINE = YES
SERVER_BASED_SEARCH = NO
EXTERNAL_SEARCH = NO
SEARCHENGINE_URL =
SEARCHDATA_FILE = searchdata.xml
EXTERNAL_SEARCH_ID =
EXTRA_SEARCH_MAPPINGS =
#---------------------------------------------------------------------------
# Configuration options related to the LaTeX output
#---------------------------------------------------------------------------
GENERATE_LATEX = NO
LATEX_OUTPUT = latex
LATEX_CMD_NAME = latex
MAKEINDEX_CMD_NAME = makeindex
COMPACT_LATEX = NO
PAPER_TYPE = a4
EXTRA_PACKAGES =
LATEX_HEADER =
LATEX_FOOTER =
LATEX_EXTRA_STYLESHEET =
LATEX_EXTRA_FILES =
PDF_HYPERLINKS = YES
USE_PDFLATEX = YES
LATEX_BATCHMODE = NO
LATEX_HIDE_INDICES = NO
LATEX_SOURCE_CODE = NO
LATEX_BIB_STYLE = plain
LATEX_TIMESTAMP = NO
#---------------------------------------------------------------------------
# Configuration options related to the RTF output
#---------------------------------------------------------------------------
GENERATE_RTF = NO
RTF_OUTPUT = rtf
COMPACT_RTF = NO
RTF_HYPERLINKS = NO
RTF_STYLESHEET_FILE =
RTF_EXTENSIONS_FILE =
RTF_SOURCE_CODE = NO
#---------------------------------------------------------------------------
# Configuration options related to the man page output
#---------------------------------------------------------------------------
GENERATE_MAN = NO
MAN_OUTPUT = man
MAN_EXTENSION = .3
MAN_SUBDIR =
MAN_LINKS = NO
#---------------------------------------------------------------------------
# Configuration options related to the XML output
#---------------------------------------------------------------------------
GENERATE_XML = NO
XML_OUTPUT = xml
XML_PROGRAMLISTING = YES
#---------------------------------------------------------------------------
# Configuration options related to the DOCBOOK output
#---------------------------------------------------------------------------
GENERATE_DOCBOOK = NO
DOCBOOK_OUTPUT = docbook
DOCBOOK_PROGRAMLISTING = NO
#---------------------------------------------------------------------------
# Configuration options for the AutoGen Definitions output
#---------------------------------------------------------------------------
GENERATE_AUTOGEN_DEF = NO
#---------------------------------------------------------------------------
# Configuration options related to the Perl module output
#---------------------------------------------------------------------------
GENERATE_PERLMOD = NO
PERLMOD_LATEX = NO
PERLMOD_PRETTY = YES
PERLMOD_MAKEVAR_PREFIX =
#---------------------------------------------------------------------------
# Configuration options related to the preprocessor
#---------------------------------------------------------------------------
ENABLE_PREPROCESSING = YES
MACRO_EXPANSION = NO
EXPAND_ONLY_PREDEF = NO
SEARCH_INCLUDES = YES
INCLUDE_PATH =
INCLUDE_FILE_PATTERNS =
PREDEFINED =
EXPAND_AS_DEFINED =
SKIP_FUNCTION_MACROS = YES
#---------------------------------------------------------------------------
# Configuration options related to external references
#---------------------------------------------------------------------------
TAGFILES =
GENERATE_TAGFILE =
ALLEXTERNALS = NO
EXTERNAL_GROUPS = YES
EXTERNAL_PAGES = YES
PERL_PATH = /usr/bin/perl
#---------------------------------------------------------------------------
# Configuration options related to the dot tool
#---------------------------------------------------------------------------
CLASS_DIAGRAMS = YES
MSCGEN_PATH =
DIA_PATH =
HIDE_UNDOC_RELATIONS = YES
HAVE_DOT = YES
DOT_NUM_THREADS = 0
DOT_FONTNAME = Helvetica
DOT_FONTSIZE = 10
DOT_FONTPATH =
CLASS_GRAPH = YES
COLLABORATION_GRAPH = YES
GROUP_GRAPHS = YES
UML_LOOK = NO
UML_LIMIT_NUM_FIELDS = 10
TEMPLATE_RELATIONS = NO
INCLUDE_GRAPH = YES
INCLUDED_BY_GRAPH = YES
CALL_GRAPH = NO
CALLER_GRAPH = NO
GRAPHICAL_HIERARCHY = YES
DIRECTORY_GRAPH = YES
DOT_IMAGE_FORMAT = png
INTERACTIVE_SVG = NO
DOT_PATH =
DOTFILE_DIRS =
MSCFILE_DIRS =
DIAFILE_DIRS =
PLANTUML_JAR_PATH =
PLANTUML_CFG_FILE =
PLANTUML_INCLUDE_PATH =
DOT_GRAPH_MAX_NODES = 50
MAX_DOT_GRAPH_DEPTH = 0
DOT_TRANSPARENT = NO
DOT_MULTI_TARGETS = NO
GENERATE_LEGEND = YES
DOT_CLEANUP = YES

29
thirdparty/basalt-headers/LICENSE vendored Normal file
View File

@@ -0,0 +1,29 @@
BSD 3-Clause License
Copyright (c) 2019, Vladyslav Usenko and Nikolaus Demmel.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

35
thirdparty/basalt-headers/README.md vendored Normal file
View File

@@ -0,0 +1,35 @@
[![pipeline status](https://gitlab.com/VladyslavUsenko/basalt-headers/badges/master/pipeline.svg)](https://gitlab.com/VladyslavUsenko/basalt-headers/commits/master)
[![coverage report](https://gitlab.com/VladyslavUsenko/basalt-headers/badges/master/coverage.svg)](https://gitlab.com/VladyslavUsenko/basalt-headers/commits/master)
## Basalt Headers
This repository contains reusable components of Basalt project as header-only library ([Documentation](https://vladyslavusenko.gitlab.io/basalt-headers/)). For more information see https://vision.in.tum.de/research/vslam/basalt.
This library includes:
* Camera models.
* Standalone image datatype with support for views, interpolation, gradients, and image pyramids.
* Uniform B-Splines for Rd (d-dimentional vectors), SO(3) and SE(3).
* Preintegration of inertial measurement unit (IMU) data.
* Data types to store IMU-camera calibration.
* Cereal serialization for basalt types as well as some Eigen and Sophus types.
## Related Publications
Implemented camera models:
* **The Double Sphere Camera Model**, V. Usenko and N. Demmel and D. Cremers, In 2018 International Conference on 3D Vision (3DV), [[DOI:10.1109/3DV.2018.00069]](https://doi.org/10.1109/3DV.2018.00069), [[arXiv:1807.08957]](https://arxiv.org/abs/1807.08957).
Implemented IMU preintegration:
* **Visual-Inertial Mapping with Non-Linear Factor Recovery**, V. Usenko, N. Demmel, D. Schubert, J. Stückler, D. Cremers, In IEEE Robotics and Automation Letters (RA-L) [[DOI:10.1109/LRA.2019.2961227]](https://doi.org/10.1109/LRA.2019.2961227), [[arXiv:1904.06504]](https://arxiv.org/abs/1904.06504).
B-spline trajectory representation:
* **Efficient Derivative Computation for Cumulative B-Splines on Lie Groups**, C. Sommer, V. Usenko, D. Schubert, N. Demmel, D. Cremers, In 2020 Conference on Computer Vision and Pattern Recognition (CVPR), [[DOI:10.1109/CVPR42600.2020.01116]](https://doi.org/10.1109/CVPR42600.2020.01116), [[arXiv:1911.08860]](https://arxiv.org/abs/1911.08860).
## Licence
The code is provided under a BSD 3-clause license. See the LICENSE file for details.
Note also the different licenses of thirdparty submodules.
`image.h` is adapted from [Pangolin](https://github.com/stevenlovegrove/Pangolin) by Steven Lovegrove (MIT license).

Binary file not shown.

After

Width:  |  Height:  |  Size: 2.9 KiB

BIN
thirdparty/basalt-headers/doc/img/ds.png vendored Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 29 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 21 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 18 KiB

BIN
thirdparty/basalt-headers/doc/img/kb.png vendored Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 16 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 58 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 36 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 21 KiB

View File

@@ -0,0 +1,32 @@
/*!
\mainpage Documentation for Basalt Headers
This is documentation page for the reusable components of the Basalt project extracted as header-only library.
Repository URL: https://gitlab.com/VladyslavUsenko/basalt-headers
This library includes:
- Camera models.
- Standalone image datatype with support for views, interpolation, gradients, and image pyramids.
- Uniform B-Splines for Rd (d-dimentional vectors), SO(3) and SE(3).
- Preintegration of inertial measurement unit (IMU) data.
- Data types to store IMU-camera calibration.
- Cereal serialization for basalt types as well as some Eigen and Sophus types.
## Related Publications
Implemented camera models:
* **The Double Sphere Camera Model**, V. Usenko and N. Demmel and D. Cremers, In 2018 International Conference on 3D Vision (3DV), [[DOI:10.1109/3DV.2018.00069]](https://doi.org/10.1109/3DV.2018.00069), [[arXiv:1807.08957]](https://arxiv.org/abs/1807.08957).
Implemented IMU preintegration:
* **Visual-Inertial Mapping with Non-Linear Factor Recovery**, V. Usenko, N. Demmel, D. Schubert, J. Stückler, D. Cremers, In In IEEE Robotics and Automation Letters (RA-L) [[DOI:10.1109/LRA.2019.2961227]](https://doi.org/10.1109/LRA.2019.2961227), [[arXiv:1904.06504]](https://arxiv.org/abs/1904.06504).
B-spline trajectory representation:
* **Efficient Derivative Computation for Cumulative B-Splines on Lie Groups**, C. Sommer, V. Usenko, D. Schubert, N. Demmel, D. Cremers, In [[arXiv:1911.08860]](https://arxiv.org/abs/1911.08860).
## Licence
The code is provided under a BSD 3-clause license. See the LICENSE file for details. Note also the different licenses of thirdparty submodules.
*/

View File

@@ -0,0 +1,219 @@
/**
BSD 3-Clause License
This file is part of the Basalt project.
https://gitlab.com/VladyslavUsenko/basalt-headers.git
Copyright (c) 2019, Vladyslav Usenko and Nikolaus Demmel.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
@file
@brief Definition of static IMU biases used for calibration
*/
#pragma once
#include <Eigen/Dense>
namespace basalt {
/// @brief Static calibration for accelerometer.
///
/// Calibrates axis scaling and misalignment and has 9 parameters \f$ [b_x,
/// b_y, b_z, s_1, s_2, s_3, s_4, s_5, s_6]^T \f$.
/// \f[
/// a_c = \begin{bmatrix} s_1 + 1 & 0 & 0 \\ s_2 & s_4 + 1 & 0 \\ s_3 & s_5 &
/// s_6 + 1 \\ \end{bmatrix} a_r - \begin{bmatrix} b_x \\ b_y \\ b_z
/// \end{bmatrix}
/// \f] where \f$ a_c \f$ is a calibrated measurement and \f$ a_r \f$ is a
/// raw measurement. When all elements are zero applying calibration results in
/// Identity mapping.
template <typename Scalar>
class CalibAccelBias {
public:
using Vec3 = Eigen::Matrix<Scalar, 3, 1>;
using Mat33 = Eigen::Matrix<Scalar, 3, 3>;
/// @brief Default constructor with zero initialization.
inline CalibAccelBias() { accel_bias_full_.setZero(); }
/// @brief Set calibration to random values (used in unit-tests).
inline void setRandom() {
accel_bias_full_.setRandom();
accel_bias_full_.template head<3>() /= 10;
accel_bias_full_.template tail<6>() /= 100;
}
/// @brief Return const vector of parameters.
/// See detailed description in \ref CalibAccelBias.
inline const Eigen::Matrix<Scalar, 9, 1>& getParam() const {
return accel_bias_full_;
}
/// @brief Return vector of parameters. See detailed description in \ref
/// CalibAccelBias.
inline Eigen::Matrix<Scalar, 9, 1>& getParam() { return accel_bias_full_; }
/// @brief Increment the calibration vector
///
/// @param inc increment vector
inline void operator+=(const Eigen::Matrix<Scalar, 9, 1>& inc) {
accel_bias_full_ += inc;
}
/// @brief Return bias vector and scale matrix. See detailed description in
/// \ref CalibAccelBias.
inline void getBiasAndScale(Vec3& accel_bias, Mat33& accel_scale) const {
accel_bias = accel_bias_full_.template head<3>();
accel_scale.setZero();
accel_scale.col(0) = accel_bias_full_.template segment<3>(3);
accel_scale(1, 1) = accel_bias_full_(6);
accel_scale(2, 1) = accel_bias_full_(7);
accel_scale(2, 2) = accel_bias_full_(8);
}
/// @brief Calibrate the measurement. See detailed description in
/// \ref CalibAccelBias.
///
/// @param raw_measurement
/// @return calibrated measurement
inline Vec3 getCalibrated(const Vec3& raw_measurement) const {
Vec3 accel_bias;
Mat33 accel_scale;
getBiasAndScale(accel_bias, accel_scale);
return (raw_measurement + accel_scale * raw_measurement - accel_bias);
}
/// @brief Invert calibration (used in unit-tests).
///
/// @param calibrated_measurement
/// @return raw measurement
inline Vec3 invertCalibration(const Vec3& calibrated_measurement) const {
Vec3 accel_bias;
Mat33 accel_scale;
getBiasAndScale(accel_bias, accel_scale);
Mat33 accel_scale_inv =
(Eigen::Matrix3d::Identity() + accel_scale).inverse();
return accel_scale_inv * (calibrated_measurement + accel_bias);
}
private:
Eigen::Matrix<Scalar, 9, 1> accel_bias_full_;
};
/// @brief Static calibration for gyroscope.
///
/// Calibrates rotation, axis scaling and misalignment and has 12 parameters \f$
/// [b_x, b_y, b_z, s_1, s_2, s_3, s_4, s_5, s_6, s_7, s_8, s_9]^T \f$. \f[
/// \omega_c = \begin{bmatrix} s_1 + 1 & s_4 & s_7 \\ s_2 & s_5 + 1 & s_8 \\ s_3
/// & s_6 & s_9 +1 \\ \end{bmatrix} \omega_r - \begin{bmatrix} b_x \\ b_y
/// \\ b_z \end{bmatrix} \f] where \f$ \omega_c \f$ is a calibrated measurement
/// and \f$ \omega_r \f$ is a raw measurement. When all elements are zero
/// applying calibration results in Identity mapping.
template <typename Scalar>
class CalibGyroBias {
public:
using Vec3 = Eigen::Matrix<Scalar, 3, 1>;
using Mat33 = Eigen::Matrix<Scalar, 3, 3>;
/// @brief Default constructor with zero initialization.
inline CalibGyroBias() { gyro_bias_full_.setZero(); }
/// @brief Set calibration to random values (used in unit-tests).
inline void setRandom() {
gyro_bias_full_.setRandom();
gyro_bias_full_.template head<3>() /= 10;
gyro_bias_full_.template tail<9>() /= 100;
}
/// @brief Return const vector of parameters.
/// See detailed description in \ref CalibGyroBias.
inline const Eigen::Matrix<Scalar, 12, 1>& getParam() const {
return gyro_bias_full_;
}
/// @brief Return vector of parameters.
/// See detailed description in \ref CalibGyroBias.
inline Eigen::Matrix<Scalar, 12, 1>& getParam() { return gyro_bias_full_; }
/// @brief Increment the calibration vector
///
/// @param inc increment vector
inline void operator+=(const Eigen::Matrix<Scalar, 12, 1>& inc) {
gyro_bias_full_ += inc;
}
/// @brief Return bias vector and scale matrix. See detailed description in
/// \ref CalibGyroBias.
inline void getBiasAndScale(Vec3& gyro_bias, Mat33& gyro_scale) const {
gyro_bias = gyro_bias_full_.template head<3>();
gyro_scale.col(0) = gyro_bias_full_.template segment<3>(3);
gyro_scale.col(1) = gyro_bias_full_.template segment<3>(6);
gyro_scale.col(2) = gyro_bias_full_.template segment<3>(9);
}
/// @brief Calibrate the measurement. See detailed description in
/// \ref CalibGyroBias.
///
/// @param raw_measurement
/// @return calibrated measurement
inline Vec3 getCalibrated(const Vec3& raw_measurement) const {
Vec3 gyro_bias;
Mat33 gyro_scale;
getBiasAndScale(gyro_bias, gyro_scale);
return (raw_measurement + gyro_scale * raw_measurement - gyro_bias);
}
/// @brief Invert calibration (used in unit-tests).
///
/// @param calibrated_measurement
/// @return raw measurement
inline Vec3 invertCalibration(const Vec3& calibrated_measurement) const {
Vec3 gyro_bias;
Mat33 gyro_scale;
getBiasAndScale(gyro_bias, gyro_scale);
Mat33 gyro_scale_inv = (Eigen::Matrix3d::Identity() + gyro_scale).inverse();
return gyro_scale_inv * (calibrated_measurement + gyro_bias);
}
private:
Eigen::Matrix<Scalar, 12, 1> gyro_bias_full_;
};
} // namespace basalt

View File

@@ -0,0 +1,194 @@
/**
BSD 3-Clause License
This file is part of the Basalt project.
https://gitlab.com/VladyslavUsenko/basalt-headers.git
Copyright (c) 2019, Vladyslav Usenko and Nikolaus Demmel.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
@file
@brief Calibration datatypes for muticam-IMU and motion capture calibration
*/
#pragma once
#include <memory>
#include <basalt/spline/rd_spline.h>
#include <basalt/calibration/calib_bias.hpp>
#include <basalt/camera/generic_camera.hpp>
namespace basalt {
/// @brief Struct to store camera-IMU calibration
template <class Scalar>
struct Calibration {
using Ptr = std::shared_ptr<Calibration>;
using SE3 = Sophus::SE3<Scalar>;
using Vec3 = Eigen::Matrix<Scalar, 3, 1>;
/// @brief Default constructor.
Calibration() {
cam_time_offset_ns = 0;
imu_update_rate = 200;
// reasonable defaults
gyro_noise_std.setConstant(0.000282);
accel_noise_std.setConstant(0.016);
accel_bias_std.setConstant(0.001);
gyro_bias_std.setConstant(0.0001);
}
/// @brief Cast to other scalar type
template <class Scalar2>
Calibration<Scalar2> cast() const {
Calibration<Scalar2> new_cam;
for (const auto& v : T_i_c)
new_cam.T_i_c.emplace_back(v.template cast<Scalar2>());
for (const auto& v : intrinsics)
new_cam.intrinsics.emplace_back(v.template cast<Scalar2>());
for (const auto& v : vignette)
new_cam.vignette.emplace_back(v.template cast<Scalar2>());
new_cam.resolution = resolution;
new_cam.cam_time_offset_ns = cam_time_offset_ns;
new_cam.calib_accel_bias.getParam() =
calib_accel_bias.getParam().template cast<Scalar2>();
new_cam.calib_gyro_bias.getParam() =
calib_gyro_bias.getParam().template cast<Scalar2>();
new_cam.imu_update_rate = imu_update_rate;
new_cam.gyro_noise_std = gyro_noise_std.template cast<Scalar2>();
new_cam.accel_noise_std = accel_noise_std.template cast<Scalar2>();
new_cam.gyro_bias_std = gyro_bias_std.template cast<Scalar2>();
new_cam.accel_bias_std = accel_bias_std.template cast<Scalar2>();
return new_cam;
}
/// @brief Vector of transformations from camera to IMU
///
/// Point in camera coordinate frame \f$ p_c \f$ can be transformed to the
/// point in IMU coordinate frame as \f$ p_i = T_{ic} p_c, T_{ic} \in
/// SE(3)\f$
Eigen::aligned_vector<SE3> T_i_c;
/// @brief Vector of camera intrinsics. Can store different camera models. See
/// \ref GenericCamera.
Eigen::aligned_vector<GenericCamera<Scalar>> intrinsics;
/// @brief Camera resolutions.
Eigen::aligned_vector<Eigen::Vector2i> resolution;
/// @brief Vector of splines representing radially symmetric vignetting for
/// each of the camera.
///
/// Splines use time in nanoseconds for evaluation, but in this case we use
/// distance from the optical center in pixels multiplied by 1e9 as a "time"
/// parameter.
std::vector<basalt::RdSpline<1, 4, Scalar>> vignette;
/// @brief Time offset between cameras and IMU in nanoseconds.
///
/// With raw image timestamp \f$ t_r \f$ and this offset \f$ o \f$ we cam get
/// a timestamp aligned with IMU clock as \f$ t_c = t_r + o \f$.
int64_t cam_time_offset_ns;
/// @brief Static accelerometer bias from calibration.
CalibAccelBias<Scalar> calib_accel_bias;
/// @brief Static gyroscope bias from calibration.
CalibGyroBias<Scalar> calib_gyro_bias;
/// @brief IMU update rate.
Scalar imu_update_rate;
/// @brief Continuous time gyroscope noise standard deviation.
Vec3 gyro_noise_std;
/// @brief Continuous time accelerometer noise standard deviation.
Vec3 accel_noise_std;
/// @brief Continuous time bias random walk standard deviation for gyroscope.
Vec3 gyro_bias_std;
/// @brief Continuous time bias random walk standard deviation for
/// accelerometer.
Vec3 accel_bias_std;
/// @brief Dicrete time gyroscope noise standard deviation.
///
/// \f$ \sigma_d = \sigma_c \sqrt{r} \f$, where \f$ r \f$ is IMU update
/// rate.
inline Vec3 dicrete_time_gyro_noise_std() const {
return gyro_noise_std * std::sqrt(imu_update_rate);
}
/// @brief Dicrete time accelerometer noise standard deviation.
///
/// \f$ \sigma_d = \sigma_c \sqrt{r} \f$, where \f$ r \f$ is IMU update
/// rate.
inline Vec3 dicrete_time_accel_noise_std() const {
return accel_noise_std * std::sqrt(imu_update_rate);
}
EIGEN_MAKE_ALIGNED_OPERATOR_NEW
};
/// @brief Struct to store motion capture to IMU calibration
template <class Scalar>
struct MocapCalibration {
using Ptr = std::shared_ptr<MocapCalibration>;
using SE3 = Sophus::SE3<Scalar>;
/// @brief Default constructor.
MocapCalibration() {
mocap_time_offset_ns = 0;
mocap_to_imu_offset_ns = 0;
}
/// @brief Transformation from motion capture origin to the world (calibration
/// pattern).
SE3 T_moc_w;
/// @brief Transformation from the coordinate frame of the markers attached to
/// the object to the IMU.
SE3 T_i_mark;
/// @brief Initial time alignment between IMU and MoCap clocks based on
/// message arrival time.
int64_t mocap_time_offset_ns;
/// @brief Time offset between IMU and motion capture clock.
int64_t mocap_to_imu_offset_ns;
};
} // namespace basalt

View File

@@ -0,0 +1,303 @@
/**
BSD 3-Clause License
This file is part of the Basalt project.
https://gitlab.com/VladyslavUsenko/basalt-headers.git
Copyright (c) 2019, Vladyslav Usenko and Nikolaus Demmel.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
@file
@brief Implementation of unified camera model
*/
#pragma once
#include <basalt/camera/camera_static_assert.hpp>
#include <basalt/utils/sophus_utils.hpp>
namespace basalt {
using std::sqrt;
/// @brief Camera model used in the paper "Bundle Adjustment in the Large".
///
/// See https://grail.cs.washington.edu/projects/bal/ for details.
/// This model has N=3 parameters \f$ \mathbf{i} = \left[f, k_1, k_2
/// \right]^T \f$.
///
/// Unlike the original formulation we assume that the POSITIVE z-axis
/// points in camera direction and thus don't include the "minus" in the
/// perspective projection. You need to consider this when loading BAL data.
///
/// Specifically, for the camera frame we assume the positive z axis pointing
/// forward in view direction and in the image, y is poiting down, x to the
/// right. In the original BAL formulation, the camera points in negative z
/// axis, y is up in the image. Thus when loading the data, we invert the y and
/// z camera axes (y also in the image) in the perspective projection, we don't
/// have the "minus" like in the original Snavely model.
///
/// A 3D point P in camera coordinates is mapped to pixel coordinates p':
/// p = [P / P.z]_xy (perspective division)
/// p' = f * r(p) * p (conversion to pixel coordinates)
/// r(p) = 1.0 + k1 * ||p||^2 + k2 * ||p||^4.
///
/// See \ref project and \ref unproject functions for more details.
template <typename Scalar_ = double>
class BalCamera {
public:
using Scalar = Scalar_;
static constexpr int N = 3; ///< Number of intrinsic parameters.
using Vec2 = Eigen::Matrix<Scalar, 2, 1>;
using Vec4 = Eigen::Matrix<Scalar, 4, 1>;
using VecN = Eigen::Matrix<Scalar, N, 1>;
using Mat2 = Eigen::Matrix<Scalar, 2, 2>;
using Mat24 = Eigen::Matrix<Scalar, 2, 4>;
using Mat2N = Eigen::Matrix<Scalar, 2, N>;
using Mat42 = Eigen::Matrix<Scalar, 4, 2>;
using Mat4N = Eigen::Matrix<Scalar, 4, N>;
/// @brief Default constructor with zero intrinsics
BalCamera() { param_.setZero(); }
/// @brief Construct camera model with given vector of intrinsics
///
/// @param[in] p vector of intrinsic parameters [f, k1, k2]
explicit BalCamera(const VecN& p) { param_ = p; }
/// @brief Cast to different scalar type
template <class Scalar2>
BalCamera<Scalar2> cast() const {
return BalCamera<Scalar2>(param_.template cast<Scalar2>());
}
/// @brief Camera model name
///
/// @return "bal"
static std::string getName() { return "bal"; }
/// @brief Project the point and optionally compute Jacobians
///
/// @param[in] p3d point to project
/// @param[out] proj result of projection
/// @param[out] d_proj_d_p3d if not nullptr computed Jacobian of projection
/// with respect to p3d
/// @param[out] d_proj_d_param point if not nullptr computed Jacobian of
/// projection with respect to intrinsic parameters
/// @return if projection is valid
template <class DerivedPoint3D, class DerivedPoint2D,
class DerivedJ3D = std::nullptr_t,
class DerivedJparam = std::nullptr_t>
inline bool project(const Eigen::MatrixBase<DerivedPoint3D>& p3d,
Eigen::MatrixBase<DerivedPoint2D>& proj,
DerivedJ3D d_proj_d_p3d = nullptr,
DerivedJparam d_proj_d_param = nullptr) const {
checkProjectionDerivedTypes<DerivedPoint3D, DerivedPoint2D, DerivedJ3D,
DerivedJparam, N>();
const typename EvalOrReference<DerivedPoint3D>::Type p3d_eval(p3d);
const Scalar& f = param_[0];
const Scalar& k1 = param_[1];
const Scalar& k2 = param_[2];
const Scalar& x = p3d_eval[0];
const Scalar& y = p3d_eval[1];
const Scalar& z = p3d_eval[2];
const Scalar mx = x / z;
const Scalar my = y / z;
const Scalar mx2 = mx * mx;
const Scalar my2 = my * my;
const Scalar r2 = mx2 + my2;
const Scalar r4 = r2 * r2;
const Scalar rp = Scalar(1) + k1 * r2 + k2 * r4;
proj = Vec2(f * mx * rp, f * my * rp);
const bool is_valid = z >= Sophus::Constants<Scalar>::epsilonSqrt();
if constexpr (!std::is_same_v<DerivedJ3D, std::nullptr_t>) {
BASALT_ASSERT(d_proj_d_p3d);
d_proj_d_p3d->setZero();
const Scalar tmp = k1 + k2 * Scalar(2) * r2;
(*d_proj_d_p3d)(0, 0) = f * (rp + Scalar(2) * mx2 * tmp) / z;
(*d_proj_d_p3d)(1, 1) = f * (rp + Scalar(2) * my2 * tmp) / z;
(*d_proj_d_p3d)(1, 0) = (*d_proj_d_p3d)(0, 1) =
f * my * mx * Scalar(2) * tmp / z;
(*d_proj_d_p3d)(0, 2) = -f * mx * (rp + Scalar(2) * tmp * r2) / z;
(*d_proj_d_p3d)(1, 2) = -f * my * (rp + Scalar(2) * tmp * r2) / z;
} else {
UNUSED(d_proj_d_p3d);
}
if constexpr (!std::is_same_v<DerivedJparam, std::nullptr_t>) {
BASALT_ASSERT(d_proj_d_param);
(*d_proj_d_param).setZero();
(*d_proj_d_param)(0, 0) = mx * rp;
(*d_proj_d_param)(1, 0) = my * rp;
(*d_proj_d_param)(0, 1) = f * mx * r2;
(*d_proj_d_param)(1, 1) = f * my * r2;
(*d_proj_d_param)(0, 2) = f * mx * r4;
(*d_proj_d_param)(1, 2) = f * my * r4;
} else {
UNUSED(d_proj_d_param);
}
return is_valid;
}
/// @brief Unproject the point and optionally compute Jacobians
///
///
/// @param[in] proj point to unproject
/// @param[out] p3d result of unprojection
/// @param[out] d_p3d_d_proj if not nullptr computed Jacobian of unprojection
/// with respect to proj
/// @param[out] d_p3d_d_param point if not nullptr computed Jacobian of
/// unprojection with respect to intrinsic parameters
/// @return if unprojection is valid
template <class DerivedPoint2D, class DerivedPoint3D,
class DerivedJ2D = std::nullptr_t,
class DerivedJparam = std::nullptr_t>
inline bool unproject(const Eigen::MatrixBase<DerivedPoint2D>& proj,
Eigen::MatrixBase<DerivedPoint3D>& p3d,
DerivedJ2D d_p3d_d_proj = nullptr,
DerivedJparam d_p3d_d_param = nullptr) const {
checkUnprojectionDerivedTypes<DerivedPoint2D, DerivedPoint3D, DerivedJ2D,
DerivedJparam, N>();
const typename EvalOrReference<DerivedPoint2D>::Type proj_eval(proj);
const Scalar& f = param_[0];
const Scalar& k1 = param_[1];
const Scalar& k2 = param_[2];
const Scalar& u = proj_eval[0];
const Scalar& v = proj_eval[1];
const Vec2 pp(u / f, v / f);
Vec2 p = pp;
for (int i = 0; i < 3; i++) {
const Scalar r2 = p.squaredNorm();
const Scalar rp = (Scalar(1) + k1 * r2 + k2 * r2 * r2);
const Vec2 pp_computed = p * rp;
const Scalar tmp = k1 + k2 * Scalar(2) * r2;
Mat2 J_p;
J_p(0, 0) = (rp + Scalar(2) * p[0] * p[0] * tmp);
J_p(1, 1) = (rp + Scalar(2) * p[1] * p[1] * tmp);
J_p(1, 0) = J_p(0, 1) = p[0] * p[1] * Scalar(2) * tmp;
const Vec2 dp = (J_p.transpose() * J_p).inverse() * J_p.transpose() *
(pp_computed - pp);
p -= dp;
}
p3d.setZero();
p3d[0] = p[0];
p3d[1] = p[1];
p3d[2] = 1;
p3d.normalize();
BASALT_ASSERT_STREAM(d_p3d_d_proj == nullptr && d_p3d_d_param == nullptr,
"Jacobians for unprojection are not implemented");
UNUSED(d_p3d_d_proj);
UNUSED(d_p3d_d_param);
return true;
}
/// @brief Set parameters from initialization
///
/// Initializes the camera model to \f$ \left[f_x, 0, 0 \right]^T \f$
///
/// @param[in] init vector [fx, fy, cx, cy]
inline void setFromInit(const Vec4& init) {
param_[0] = init[0];
param_[1] = 0;
param_[2] = 0;
}
/// @brief Increment intrinsic parameters by inc and clamp the values to the
/// valid range
///
/// @param[in] inc increment vector
void operator+=(const VecN& inc) { param_ += inc; }
/// @brief Returns a const reference to the intrinsic parameters vector
///
/// The order is following: \f$ \left[f, k1, k2 \right]^T \f$
/// @return const reference to the intrinsic parameters vector
const VecN& getParam() const { return param_; }
/// @brief Projections used for unit-tests
static Eigen::aligned_vector<BalCamera> getTestProjections() {
Eigen::aligned_vector<BalCamera> res;
VecN vec1;
vec1 << 399.752, -3.78048e-05, 5.37738e-07;
res.emplace_back(vec1);
return res;
}
/// @brief Resolutions used for unit-tests
static Eigen::aligned_vector<Eigen::Vector2i> getTestResolutions() {
Eigen::aligned_vector<Eigen::Vector2i> res;
res.emplace_back(640, 480);
return res;
}
EIGEN_MAKE_ALIGNED_OPERATOR_NEW
private:
VecN param_;
};
} // namespace basalt

View File

@@ -0,0 +1,126 @@
/**
BSD 3-Clause License
This file is part of the Basalt project.
https://gitlab.com/VladyslavUsenko/basalt-headers.git
Copyright (c) 2019, Vladyslav Usenko and Nikolaus Demmel.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
@file
@brief Static asserts for checking projection and unprojection types
*/
#pragma once
#include <Eigen/Dense>
#include <string_view>
namespace basalt {
/// @brief Helper struct to evaluate lazy Eigen expressions or const reference
/// them if they are Eigen::Matrix or Eigen::Map types
template <class Derived>
struct EvalOrReference {
using Type = typename Eigen::internal::eval<Derived>::type;
};
/// @brief Helper struct to evaluate lazy Eigen expressions or const reference
/// them if they are Eigen::Matrix or Eigen::Map types
template <typename _Scalar, int _Rows, int _Cols, int _Options, int _MaxRows,
int _MaxCols, int MapOptions, typename StrideType>
struct EvalOrReference<Eigen::Map<
Eigen::Matrix<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols>,
MapOptions, StrideType>> {
using Type = const Eigen::MatrixBase<Eigen::Map<
Eigen::Matrix<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols>,
MapOptions, StrideType>> &;
};
/// @brief Helper function to check that 3D points are 3 or 4 dimensional and
/// the Jacobians have the appropriate shape in the project function
template <class DerivedPoint3D, class DerivedPoint2D, class DerivedJ3DPtr,
class DerivedJparamPtr, int N>
constexpr inline void checkProjectionDerivedTypes() {
EIGEN_STATIC_ASSERT(DerivedPoint3D::IsVectorAtCompileTime &&
(DerivedPoint3D::SizeAtCompileTime == 3 ||
DerivedPoint3D::SizeAtCompileTime == 4),
THIS_METHOD_IS_ONLY_FOR_VECTORS_OF_A_SPECIFIC_SIZE)
EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(DerivedPoint2D, 2);
if constexpr (!std::is_same_v<DerivedJ3DPtr, std::nullptr_t>) {
static_assert(std::is_pointer_v<DerivedJ3DPtr>);
using DerivedJ3D = typename std::remove_pointer<DerivedJ3DPtr>::type;
EIGEN_STATIC_ASSERT(DerivedJ3D::RowsAtCompileTime == 2 &&
int(DerivedJ3D::ColsAtCompileTime) ==
int(DerivedPoint3D::SizeAtCompileTime),
THIS_METHOD_IS_ONLY_FOR_MATRICES_OF_A_SPECIFIC_SIZE)
}
if constexpr (!std::is_same_v<DerivedJparamPtr, std::nullptr_t>) {
static_assert(std::is_pointer_v<DerivedJparamPtr>);
using DerivedJparam = typename std::remove_pointer<DerivedJparamPtr>::type;
EIGEN_STATIC_ASSERT_MATRIX_SPECIFIC_SIZE(DerivedJparam, 2, N);
}
}
/// @brief Helper function to check that 3D points are 3 or 4 dimensional and
/// the Jacobians have the appropriate shape in the unproject function
template <class DerivedPoint2D, class DerivedPoint3D, class DerivedJ2DPtr,
class DerivedJparamPtr, int N>
constexpr inline void checkUnprojectionDerivedTypes() {
EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(DerivedPoint2D, 2);
EIGEN_STATIC_ASSERT(DerivedPoint3D::IsVectorAtCompileTime &&
(DerivedPoint3D::SizeAtCompileTime == 3 ||
DerivedPoint3D::SizeAtCompileTime == 4),
THIS_METHOD_IS_ONLY_FOR_VECTORS_OF_A_SPECIFIC_SIZE)
if constexpr (!std::is_same_v<DerivedJ2DPtr, std::nullptr_t>) {
static_assert(std::is_pointer_v<DerivedJ2DPtr>);
using DerivedJ2D = typename std::remove_pointer<DerivedJ2DPtr>::type;
EIGEN_STATIC_ASSERT(DerivedJ2D::ColsAtCompileTime == 2 &&
int(DerivedJ2D::RowsAtCompileTime) ==
int(DerivedPoint3D::SizeAtCompileTime),
THIS_METHOD_IS_ONLY_FOR_MATRICES_OF_A_SPECIFIC_SIZE)
}
if constexpr (!std::is_same_v<DerivedJparamPtr, std::nullptr_t>) {
static_assert(std::is_pointer_v<DerivedJparamPtr>);
using DerivedJparam = typename std::remove_pointer<DerivedJparamPtr>::type;
EIGEN_STATIC_ASSERT(DerivedJparam::ColsAtCompileTime == N &&
int(DerivedJparam::RowsAtCompileTime) ==
int(DerivedPoint3D::SizeAtCompileTime),
THIS_METHOD_IS_ONLY_FOR_MATRICES_OF_A_SPECIFIC_SIZE)
}
}
} // namespace basalt

View File

@@ -0,0 +1,448 @@
/**
BSD 3-Clause License
This file is part of the Basalt project.
https://gitlab.com/VladyslavUsenko/basalt-headers.git
Copyright (c) 2019, Vladyslav Usenko and Nikolaus Demmel.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
@file
@brief Implementation of double sphere camera model
*/
#pragma once
#include <basalt/camera/camera_static_assert.hpp>
#include <basalt/utils/sophus_utils.hpp>
namespace basalt {
using std::sqrt;
/// @brief Double Sphere camera model
///
/// \image html ds.png
/// This model has N=6 parameters \f$ \mathbf{i} = \left[f_x, f_y, c_x, c_y,
/// \xi, \alpha \right]^T \f$ with \f$ \xi \in [-1,1], \alpha \in [0,1] \f$. See
/// \ref project and \ref unproject functions for more details.
template <typename Scalar_ = double>
class DoubleSphereCamera {
public:
using Scalar = Scalar_;
static constexpr int N = 6; ///< Number of intrinsic parameters.
using Vec2 = Eigen::Matrix<Scalar, 2, 1>;
using Vec4 = Eigen::Matrix<Scalar, 4, 1>;
using VecN = Eigen::Matrix<Scalar, N, 1>;
using Mat24 = Eigen::Matrix<Scalar, 2, 4>;
using Mat2N = Eigen::Matrix<Scalar, 2, N>;
using Mat42 = Eigen::Matrix<Scalar, 4, 2>;
using Mat4N = Eigen::Matrix<Scalar, 4, N>;
/// @brief Default constructor with zero intrinsics
DoubleSphereCamera() { param_.setZero(); }
/// @brief Construct camera model with given vector of intrinsics
///
/// @param[in] p vector of intrinsic parameters [fx, fy, cx, cy, xi, alpha]
explicit DoubleSphereCamera(const VecN& p) { param_ = p; }
/// @brief Cast to different scalar type
template <class Scalar2>
DoubleSphereCamera<Scalar2> cast() const {
return DoubleSphereCamera<Scalar2>(param_.template cast<Scalar2>());
}
/// @brief Camera model name
///
/// @return "ds"
static std::string getName() { return "ds"; }
/// @brief Project the point and optionally compute Jacobians
///
/// Projection function is defined as follows:
/// \f{align}{
/// \pi(\mathbf{x}, \mathbf{i}) &=
/// \begin{bmatrix}
/// f_x{\frac{x}{\alpha d_2 + (1-\alpha) (\xi d_1 + z)}}
/// \\ f_y{\frac{y}{\alpha d_2 + (1-\alpha) (\xi d_1 + z)}}
/// \\ \end{bmatrix}
/// +
/// \begin{bmatrix}
/// c_x
/// \\ c_y
/// \\ \end{bmatrix},
/// \\ d_1 &= \sqrt{x^2 + y^2 + z^2},
/// \\ d_2 &= \sqrt{x^2 + y^2 + (\xi d_1 + z)^2}.
/// \f}
/// A set of 3D points that results in valid projection is expressed as
/// follows: \f{align}{
/// \Omega &= \{\mathbf{x} \in \mathbb{R}^3 ~|~ z > -w_2 d_1 \}
/// \\ w_2 &= \frac{w_1+\xi}{\sqrt{2w_1\xi + \xi^2 + 1}}
/// \\ w_1 &= \begin{cases} \frac{\alpha}{1-\alpha}, & \mbox{if } \alpha
/// \le 0.5 \\ \frac{1-\alpha}{\alpha} & \mbox{if } \alpha > 0.5
/// \end{cases}
/// \f}
///
/// @param[in] p3d point to project
/// @param[out] proj result of projection
/// @param[out] d_proj_d_p3d if not nullptr computed Jacobian of projection
/// with respect to p3d
/// @param[out] d_proj_d_param point if not nullptr computed Jacobian of
/// projection with respect to intrinsic parameters
/// @return if projection is valid
template <class DerivedPoint3D, class DerivedPoint2D,
class DerivedJ3D = std::nullptr_t,
class DerivedJparam = std::nullptr_t>
inline bool project(const Eigen::MatrixBase<DerivedPoint3D>& p3d,
Eigen::MatrixBase<DerivedPoint2D>& proj,
DerivedJ3D d_proj_d_p3d = nullptr,
DerivedJparam d_proj_d_param = nullptr) const {
checkProjectionDerivedTypes<DerivedPoint3D, DerivedPoint2D, DerivedJ3D,
DerivedJparam, N>();
const typename EvalOrReference<DerivedPoint3D>::Type p3d_eval(p3d);
const Scalar& fx = param_[0];
const Scalar& fy = param_[1];
const Scalar& cx = param_[2];
const Scalar& cy = param_[3];
const Scalar& xi = param_[4];
const Scalar& alpha = param_[5];
const Scalar& x = p3d_eval[0];
const Scalar& y = p3d_eval[1];
const Scalar& z = p3d_eval[2];
const Scalar xx = x * x;
const Scalar yy = y * y;
const Scalar zz = z * z;
const Scalar r2 = xx + yy;
const Scalar d1_2 = r2 + zz;
const Scalar d1 = sqrt(d1_2);
const Scalar w1 = alpha > Scalar(0.5) ? (Scalar(1) - alpha) / alpha
: alpha / (Scalar(1) - alpha);
const Scalar w2 =
(w1 + xi) / sqrt(Scalar(2) * w1 * xi + xi * xi + Scalar(1));
const bool is_valid = (z > -w2 * d1);
const Scalar k = xi * d1 + z;
const Scalar kk = k * k;
const Scalar d2_2 = r2 + kk;
const Scalar d2 = sqrt(d2_2);
const Scalar norm = alpha * d2 + (Scalar(1) - alpha) * k;
const Scalar mx = x / norm;
const Scalar my = y / norm;
proj[0] = fx * mx + cx;
proj[1] = fy * my + cy;
if constexpr (!std::is_same_v<DerivedJ3D, std::nullptr_t>) {
BASALT_ASSERT(d_proj_d_p3d);
const Scalar norm2 = norm * norm;
const Scalar xy = x * y;
const Scalar tt2 = xi * z / d1 + Scalar(1);
const Scalar d_norm_d_r2 = (xi * (Scalar(1) - alpha) / d1 +
alpha * (xi * k / d1 + Scalar(1)) / d2) /
norm2;
const Scalar tmp2 =
((Scalar(1) - alpha) * tt2 + alpha * k * tt2 / d2) / norm2;
d_proj_d_p3d->setZero();
(*d_proj_d_p3d)(0, 0) = fx * (Scalar(1) / norm - xx * d_norm_d_r2);
(*d_proj_d_p3d)(1, 0) = -fy * xy * d_norm_d_r2;
(*d_proj_d_p3d)(0, 1) = -fx * xy * d_norm_d_r2;
(*d_proj_d_p3d)(1, 1) = fy * (Scalar(1) / norm - yy * d_norm_d_r2);
(*d_proj_d_p3d)(0, 2) = -fx * x * tmp2;
(*d_proj_d_p3d)(1, 2) = -fy * y * tmp2;
} else {
UNUSED(d_proj_d_p3d);
}
if constexpr (!std::is_same_v<DerivedJparam, std::nullptr_t>) {
BASALT_ASSERT(d_proj_d_param);
const Scalar norm2 = norm * norm;
(*d_proj_d_param).setZero();
(*d_proj_d_param)(0, 0) = mx;
(*d_proj_d_param)(0, 2) = Scalar(1);
(*d_proj_d_param)(1, 1) = my;
(*d_proj_d_param)(1, 3) = Scalar(1);
const Scalar tmp4 = (alpha - Scalar(1) - alpha * k / d2) * d1 / norm2;
const Scalar tmp5 = (k - d2) / norm2;
(*d_proj_d_param)(0, 4) = fx * x * tmp4;
(*d_proj_d_param)(1, 4) = fy * y * tmp4;
(*d_proj_d_param)(0, 5) = fx * x * tmp5;
(*d_proj_d_param)(1, 5) = fy * y * tmp5;
} else {
UNUSED(d_proj_d_param);
}
return is_valid;
}
/// @brief Unproject the point and optionally compute Jacobians
///
/// The unprojection function is computed as follows: \f{align}{
/// \pi^{-1}(\mathbf{u}, \mathbf{i}) &=
/// \frac{m_z \xi + \sqrt{m_z^2 + (1 - \xi^2) r^2}}{m_z^2 + r^2}
/// \begin{bmatrix}
/// m_x \\ m_y \\m_z
/// \\ \end{bmatrix}-\begin{bmatrix}
/// 0 \\ 0 \\ \xi
/// \\ \end{bmatrix},
/// \\ m_x &= \frac{u - c_x}{f_x},
/// \\ m_y &= \frac{v - c_y}{f_y},
/// \\ r^2 &= m_x^2 + m_y^2,
/// \\ m_z &= \frac{1 - \alpha^2 r^2}{\alpha \sqrt{1 - (2 \alpha - 1)
/// r^2}
/// + 1 - \alpha},
/// \f}
///
/// The valid range of unprojections is \f{align}{
/// \Theta &= \begin{cases}
/// \mathbb{R}^2 & \mbox{if } \alpha \le 0.5
/// \\ \{ \mathbf{u} \in \mathbb{R}^2 ~|~ r^2 \le \frac{1}{2\alpha-1} \} &
/// \mbox{if} \alpha > 0.5 \end{cases}
/// \f}
///
/// @param[in] proj point to unproject
/// @param[out] p3d result of unprojection
/// @param[out] d_p3d_d_proj if not nullptr computed Jacobian of unprojection
/// with respect to proj
/// @param[out] d_p3d_d_param point if not nullptr computed Jacobian of
/// unprojection with respect to intrinsic parameters
/// @return if unprojection is valid
template <class DerivedPoint2D, class DerivedPoint3D,
class DerivedJ2D = std::nullptr_t,
class DerivedJparam = std::nullptr_t>
inline bool unproject(const Eigen::MatrixBase<DerivedPoint2D>& proj,
Eigen::MatrixBase<DerivedPoint3D>& p3d,
DerivedJ2D d_p3d_d_proj = nullptr,
DerivedJparam d_p3d_d_param = nullptr) const {
checkUnprojectionDerivedTypes<DerivedPoint2D, DerivedPoint3D, DerivedJ2D,
DerivedJparam, N>();
const typename EvalOrReference<DerivedPoint2D>::Type proj_eval(proj);
const Scalar& fx = param_[0];
const Scalar& fy = param_[1];
const Scalar& cx = param_[2];
const Scalar& cy = param_[3];
const Scalar& xi = param_[4];
const Scalar& alpha = param_[5];
const Scalar mx = (proj_eval[0] - cx) / fx;
const Scalar my = (proj_eval[1] - cy) / fy;
const Scalar r2 = mx * mx + my * my;
const bool is_valid =
!static_cast<bool>(alpha > Scalar(0.5) &&
(r2 >= Scalar(1) / (Scalar(2) * alpha - Scalar(1))));
const Scalar xi2_2 = alpha * alpha;
const Scalar xi1_2 = xi * xi;
const Scalar sqrt2 = sqrt(Scalar(1) - (Scalar(2) * alpha - Scalar(1)) * r2);
const Scalar norm2 = alpha * sqrt2 + Scalar(1) - alpha;
const Scalar mz = (Scalar(1) - xi2_2 * r2) / norm2;
const Scalar mz2 = mz * mz;
const Scalar norm1 = mz2 + r2;
const Scalar sqrt1 = sqrt(mz2 + (Scalar(1) - xi1_2) * r2);
const Scalar k = (mz * xi + sqrt1) / norm1;
p3d.setZero();
p3d[0] = k * mx;
p3d[1] = k * my;
p3d[2] = k * mz - xi;
if constexpr (!std::is_same_v<DerivedJ2D, std::nullptr_t> ||
!std::is_same_v<DerivedJparam, std::nullptr_t>) {
const Scalar norm2_2 = norm2 * norm2;
const Scalar norm1_2 = norm1 * norm1;
const Scalar d_mz_d_r2 = (Scalar(0.5) * alpha - xi2_2) *
(r2 * xi2_2 - Scalar(1)) /
(sqrt2 * norm2_2) -
xi2_2 / norm2;
const Scalar d_mz_d_mx = 2 * mx * d_mz_d_r2;
const Scalar d_mz_d_my = 2 * my * d_mz_d_r2;
const Scalar d_k_d_mz =
(norm1 * (xi * sqrt1 + mz) - 2 * mz * (mz * xi + sqrt1) * sqrt1) /
(norm1_2 * sqrt1);
const Scalar d_k_d_r2 =
(xi * d_mz_d_r2 +
Scalar(0.5) / sqrt1 *
(Scalar(2) * mz * d_mz_d_r2 + Scalar(1) - xi1_2)) /
norm1 -
(mz * xi + sqrt1) * (Scalar(2) * mz * d_mz_d_r2 + Scalar(1)) /
norm1_2;
const Scalar d_k_d_mx = d_k_d_r2 * 2 * mx;
const Scalar d_k_d_my = d_k_d_r2 * 2 * my;
constexpr int SIZE_3D = DerivedPoint3D::SizeAtCompileTime;
Eigen::Matrix<Scalar, SIZE_3D, 1> c0, c1;
c0.setZero();
c0[0] = (mx * d_k_d_mx + k);
c0[1] = my * d_k_d_mx;
c0[2] = (mz * d_k_d_mx + k * d_mz_d_mx);
c0 /= fx;
c1.setZero();
c1[0] = mx * d_k_d_my;
c1[1] = (my * d_k_d_my + k);
c1[2] = (mz * d_k_d_my + k * d_mz_d_my);
c1 /= fy;
if constexpr (!std::is_same_v<DerivedJ2D, std::nullptr_t>) {
BASALT_ASSERT(d_p3d_d_proj);
d_p3d_d_proj->col(0) = c0;
d_p3d_d_proj->col(1) = c1;
} else {
UNUSED(d_p3d_d_proj);
}
if constexpr (!std::is_same_v<DerivedJparam, std::nullptr_t>) {
BASALT_ASSERT(d_p3d_d_param);
const Scalar d_k_d_xi1 = (mz * sqrt1 - xi * r2) / (sqrt1 * norm1);
const Scalar d_mz_d_xi2 =
((Scalar(1) - r2 * xi2_2) *
(r2 * alpha / sqrt2 - sqrt2 + Scalar(1)) / norm2 -
Scalar(2) * r2 * alpha) /
norm2;
const Scalar d_k_d_xi2 = d_k_d_mz * d_mz_d_xi2;
d_p3d_d_param->setZero();
(*d_p3d_d_param).col(0) = -c0 * mx;
(*d_p3d_d_param).col(1) = -c1 * my;
(*d_p3d_d_param).col(2) = -c0;
(*d_p3d_d_param).col(3) = -c1;
(*d_p3d_d_param)(0, 4) = mx * d_k_d_xi1;
(*d_p3d_d_param)(1, 4) = my * d_k_d_xi1;
(*d_p3d_d_param)(2, 4) = mz * d_k_d_xi1 - 1;
(*d_p3d_d_param)(0, 5) = mx * d_k_d_xi2;
(*d_p3d_d_param)(1, 5) = my * d_k_d_xi2;
(*d_p3d_d_param)(2, 5) = mz * d_k_d_xi2 + k * d_mz_d_xi2;
} else {
UNUSED(d_p3d_d_param);
UNUSED(d_k_d_mz);
}
} else {
UNUSED(d_p3d_d_proj);
UNUSED(d_p3d_d_param);
}
return is_valid;
}
/// @brief Set parameters from initialization
///
/// Initializes the camera model to \f$ \left[f_x, f_y, c_x, c_y, 0, 0.5
/// \right]^T \f$
///
/// @param[in] init vector [fx, fy, cx, cy]
inline void setFromInit(const Vec4& init) {
param_[0] = init[0];
param_[1] = init[1];
param_[2] = init[2];
param_[3] = init[3];
param_[4] = 0;
param_[5] = 0.5;
}
/// @brief Increment intrinsic parameters by inc and clamp the values to the
/// valid range
///
/// @param[in] inc increment vector
void operator+=(const VecN& inc) {
param_ += inc;
param_[4] = std::clamp(param_[4], Scalar(-1), Scalar(1));
param_[5] = std::clamp(param_[5], Scalar(0), Scalar(1));
}
/// @brief Returns a const reference to the intrinsic parameters vector
///
/// The order is following: \f$ \left[f_x, f_y, c_x, c_y, \xi, \alpha
/// \right]^T \f$
/// @return const reference to the intrinsic parameters vector
const VecN& getParam() const { return param_; }
/// @brief Projections used for unit-tests
static Eigen::aligned_vector<DoubleSphereCamera> getTestProjections() {
Eigen::aligned_vector<DoubleSphereCamera> res;
VecN vec1;
vec1 << 0.5 * 805, 0.5 * 800, 505, 509, 0.5 * -0.150694, 0.5 * 1.48785;
res.emplace_back(vec1);
return res;
}
EIGEN_MAKE_ALIGNED_OPERATOR_NEW
private:
VecN param_;
};
} // namespace basalt

View File

@@ -0,0 +1,430 @@
/**
BSD 3-Clause License
This file is part of the Basalt project.
https://gitlab.com/VladyslavUsenko/basalt-headers.git
Copyright (c) 2019, Vladyslav Usenko and Nikolaus Demmel.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
@file
@brief Implementation of extended unified camera model
*/
#pragma once
#include <basalt/camera/camera_static_assert.hpp>
#include <basalt/utils/sophus_utils.hpp>
namespace basalt {
using std::sqrt;
/// @brief Extended unified camera model
///
/// \image html eucm.png
/// This model has N=6 parameters \f$ \mathbf{i} = \left[f_x, f_y, c_x, c_y,
/// \alpha, \beta \right]^T \f$ with \f$ \alpha \in [0,1], \beta > 0 \f$.
/// See \ref project and \ref unproject functions for more details.
template <typename Scalar_>
class ExtendedUnifiedCamera {
public:
using Scalar = Scalar_;
static constexpr int N = 6; ///< Number of intrinsic parameters.
using Vec2 = Eigen::Matrix<Scalar, 2, 1>;
using Vec3 = Eigen::Matrix<Scalar, 3, 1>;
using Vec4 = Eigen::Matrix<Scalar, 4, 1>;
using VecN = Eigen::Matrix<Scalar, N, 1>;
using Mat24 = Eigen::Matrix<Scalar, 2, 4>;
using Mat2N = Eigen::Matrix<Scalar, 2, N>;
using Mat42 = Eigen::Matrix<Scalar, 4, 2>;
using Mat4N = Eigen::Matrix<Scalar, 4, N>;
/// @brief Default constructor with zero intrinsics
ExtendedUnifiedCamera() { param_.setZero(); }
/// @brief Construct camera model with given vector of intrinsics
///
/// @param[in] p vector of intrinsic parameters [fx, fy, cx, cy, alpha, beta]
explicit ExtendedUnifiedCamera(const VecN& p) { param_ = p; }
/// @brief Cast to different scalar type
template <class Scalar2>
ExtendedUnifiedCamera<Scalar2> cast() const {
return ExtendedUnifiedCamera<Scalar2>(param_.template cast<Scalar2>());
}
/// @brief Camera model name
///
/// @return "eucm"
static std::string getName() { return "eucm"; }
/// @brief Project the point and optionally compute Jacobians
///
/// Projection function is defined as follows:
/// \f{align}{
/// \pi(\mathbf{x}, \mathbf{i}) &=
/// \begin{bmatrix}
/// f_x{\frac{x}{\alpha d + (1-\alpha) z}}
/// \\ f_y{\frac{y}{\alpha d + (1-\alpha) z}}
/// \\ \end{bmatrix}
/// +
/// \begin{bmatrix}
/// c_x
/// \\ c_y
/// \\ \end{bmatrix},
/// \\ d &= \sqrt{\beta(x^2 + y^2) + z^2}.
/// \f}
/// A set of 3D points that results in valid projection is expressed as
/// follows: \f{align}{
/// \Omega &= \{\mathbf{x} \in \mathbb{R}^3 ~|~ z > -wd \},
/// \\ w &= \begin{cases} \frac{\alpha}{1-\alpha}, & \mbox{if } \alpha \le
/// 0.5,
/// \\ \frac{1-\alpha}{\alpha} & \mbox{if } \alpha > 0.5, \end{cases} \f}
///
/// @param[in] p3d point to project
/// @param[out] proj result of projection
/// @param[out] d_proj_d_p3d if not nullptr computed Jacobian of projection
/// with respect to p3d
/// @param[out] d_proj_d_param point if not nullptr computed Jacobian of
/// projection with respect to intrinsic parameters
/// @return if projection is valid
template <class DerivedPoint3D, class DerivedPoint2D,
class DerivedJ3D = std::nullptr_t,
class DerivedJparam = std::nullptr_t>
inline bool project(const Eigen::MatrixBase<DerivedPoint3D>& p3d,
Eigen::MatrixBase<DerivedPoint2D>& proj,
DerivedJ3D d_proj_d_p3d = nullptr,
DerivedJparam d_proj_d_param = nullptr) const {
checkProjectionDerivedTypes<DerivedPoint3D, DerivedPoint2D, DerivedJ3D,
DerivedJparam, N>();
const typename EvalOrReference<DerivedPoint3D>::Type p3d_eval(p3d);
const Scalar& fx = param_[0];
const Scalar& fy = param_[1];
const Scalar& cx = param_[2];
const Scalar& cy = param_[3];
const Scalar& alpha = param_[4];
const Scalar& beta = param_[5];
const Scalar& x = p3d_eval[0];
const Scalar& y = p3d_eval[1];
const Scalar& z = p3d_eval[2];
const Scalar r2 = x * x + y * y;
const Scalar rho2 = beta * r2 + z * z;
const Scalar rho = sqrt(rho2);
const Scalar norm = alpha * rho + (Scalar(1) - alpha) * z;
const Scalar mx = x / norm;
const Scalar my = y / norm;
proj = Vec2(fx * mx + cx, fy * my + cy);
// Check if valid
const Scalar w = alpha > Scalar(0.5) ? (Scalar(1) - alpha) / alpha
: alpha / (Scalar(1) - alpha);
const bool is_valid = (z > -w * rho);
if constexpr (!std::is_same_v<DerivedJ3D, std::nullptr_t>) {
BASALT_ASSERT(d_proj_d_p3d);
const Scalar denom = norm * norm * rho;
const Scalar mid = -(alpha * beta * x * y);
const Scalar add = norm * rho;
const Scalar addz = (alpha * z + (Scalar(1) - alpha) * rho);
(*d_proj_d_p3d).setZero();
(*d_proj_d_p3d)(0, 0) = fx * (add - x * x * alpha * beta);
(*d_proj_d_p3d)(1, 0) = fy * mid;
(*d_proj_d_p3d)(0, 1) = fx * mid;
(*d_proj_d_p3d)(1, 1) = fy * (add - y * y * alpha * beta);
(*d_proj_d_p3d)(0, 2) = -fx * x * addz;
(*d_proj_d_p3d)(1, 2) = -fy * y * addz;
(*d_proj_d_p3d) /= denom;
} else {
UNUSED(d_proj_d_p3d);
}
if constexpr (!std::is_same_v<DerivedJparam, std::nullptr_t>) {
BASALT_ASSERT(d_proj_d_param);
const Scalar norm2 = norm * norm;
(*d_proj_d_param).setZero();
(*d_proj_d_param)(0, 0) = mx;
(*d_proj_d_param)(0, 2) = Scalar(1);
(*d_proj_d_param)(1, 1) = my;
(*d_proj_d_param)(1, 3) = Scalar(1);
const Scalar tmp_x = -fx * x / norm2;
const Scalar tmp_y = -fy * y / norm2;
const Scalar tmp4 = (rho - z);
(*d_proj_d_param)(0, 4) = tmp_x * tmp4;
(*d_proj_d_param)(1, 4) = tmp_y * tmp4;
const Scalar tmp5 = Scalar(0.5) * alpha * r2 / rho;
(*d_proj_d_param)(0, 5) = tmp_x * tmp5;
(*d_proj_d_param)(1, 5) = tmp_y * tmp5;
} else {
UNUSED(d_proj_d_param);
}
return is_valid;
}
/// @brief Unproject the point and optionally compute Jacobians
///
/// The unprojection function is computed as follows: \f{align}{
/// \pi ^ { -1 }(\mathbf{u}, \mathbf{i}) &=
/// \frac{1} {
/// \sqrt { m_x ^ 2 + m_y ^ 2 + m_z ^ 2 }
/// }
/// \begin{bmatrix} m_x \\ m_y \\ m_z \\ \end{bmatrix},
/// \\ m_x &= \frac{u - c_x}{f_x},
/// \\ m_y &= \frac{v - c_y}{f_y},
/// \\ r^2 &= m_x^2 + m_y^2,
/// \\ m_z &= \frac{1 - \beta \alpha^2 r^2}{\alpha \sqrt{1 - (2\alpha - 1)
/// \beta r^2} + (1 - \alpha)}. \f}
///
/// The valid range of unprojections is \f{align}{
/// \Theta &= \begin{cases}
/// \mathbb{R}^2 & \mbox{if } \alpha \le 0.5
/// \\ \{ \mathbf{u} \in \mathbb{R}^2 ~|~ r^2 \le \frac{1}{\beta(2\alpha-1)}
/// \} & \mbox{if } \alpha > 0.5 \end{cases}
/// \f}
///
/// @param[in] proj point to unproject
/// @param[out] p3d result of unprojection
/// @param[out] d_p3d_d_proj if not nullptr computed Jacobian of unprojection
/// with respect to proj
/// @param[out] d_p3d_d_param point if not nullptr computed Jacobian of
/// unprojection with respect to intrinsic parameters
/// @return if unprojection is valid
template <class DerivedPoint2D, class DerivedPoint3D,
class DerivedJ2D = std::nullptr_t,
class DerivedJparam = std::nullptr_t>
inline bool unproject(const Eigen::MatrixBase<DerivedPoint2D>& proj,
Eigen::MatrixBase<DerivedPoint3D>& p3d,
DerivedJ2D d_p3d_d_proj = nullptr,
DerivedJparam d_p3d_d_param = nullptr) const {
checkUnprojectionDerivedTypes<DerivedPoint2D, DerivedPoint3D, DerivedJ2D,
DerivedJparam, N>();
const typename EvalOrReference<DerivedPoint2D>::Type proj_eval(proj);
const Scalar& fx = param_[0];
const Scalar& fy = param_[1];
const Scalar& cx = param_[2];
const Scalar& cy = param_[3];
const Scalar& alpha = param_[4];
const Scalar& beta = param_[5];
const Scalar mx = (proj_eval[0] - cx) / fx;
const Scalar my = (proj_eval[1] - cy) / fy;
const Scalar r2 = mx * mx + my * my;
const Scalar gamma = Scalar(1) - alpha;
// Check if valid
const bool is_valid = !static_cast<bool>(
alpha > Scalar(0.5) && (r2 >= Scalar(1) / ((alpha - gamma) * beta)));
const Scalar tmp1 = (Scalar(1) - alpha * alpha * beta * r2);
const Scalar tmp_sqrt = sqrt(Scalar(1) - (alpha - gamma) * beta * r2);
const Scalar tmp2 = (alpha * tmp_sqrt + gamma);
const Scalar k = tmp1 / tmp2;
const Scalar norm = sqrt(r2 + k * k);
p3d.setZero();
p3d(0) = mx / norm;
p3d(1) = my / norm;
p3d(2) = k / norm;
if constexpr (!std::is_same_v<DerivedJ2D, std::nullptr_t> ||
!std::is_same_v<DerivedJparam, std::nullptr_t>) {
const Scalar norm2 = norm * norm;
const Scalar tmp2_2 = tmp2 * tmp2;
const Scalar d_k_d_r2 =
Scalar(0.5) * alpha * beta *
(-Scalar(2) * alpha * tmp2 + tmp1 * (alpha - gamma) / tmp_sqrt) /
tmp2_2;
const Scalar d_norm_inv_d_r2 =
-Scalar(0.5) * (Scalar(1) + Scalar(2) * k * d_k_d_r2) / norm2;
constexpr int SIZE_3D = DerivedPoint3D::SizeAtCompileTime;
Eigen::Matrix<Scalar, SIZE_3D, 1> c0, c1;
c0.setZero();
c0[0] = (1 + 2 * mx * mx * d_norm_inv_d_r2);
c0[1] = (2 * my * mx * d_norm_inv_d_r2);
c0[2] = 2 * mx * (k * d_norm_inv_d_r2 + d_k_d_r2);
c0 /= fx * norm;
c1.setZero();
c1[0] = (2 * my * mx * d_norm_inv_d_r2);
c1[1] = (1 + 2 * my * my * d_norm_inv_d_r2);
c1[2] = 2 * my * (k * d_norm_inv_d_r2 + d_k_d_r2);
c1 /= fy * norm;
if constexpr (!std::is_same_v<DerivedJ2D, std::nullptr_t>) {
BASALT_ASSERT(d_p3d_d_proj);
d_p3d_d_proj->col(0) = c0;
d_p3d_d_proj->col(1) = c1;
} else {
UNUSED(d_p3d_d_proj);
}
if constexpr (!std::is_same_v<DerivedJparam, std::nullptr_t>) {
BASALT_ASSERT(d_p3d_d_param);
d_p3d_d_param->setZero();
(*d_p3d_d_param).col(0) = -c0 * mx;
(*d_p3d_d_param).col(1) = -c1 * my;
(*d_p3d_d_param).col(2) = -c0;
(*d_p3d_d_param).col(3) = -c1;
const Scalar d_k_d_alpha =
(-Scalar(2) * alpha * beta * r2 * tmp2 -
(tmp_sqrt - alpha * beta * r2 / tmp_sqrt - Scalar(1)) * tmp1) /
tmp2_2;
const Scalar d_k_d_beta =
alpha * r2 *
(Scalar(0.5) * tmp1 * (alpha - gamma) / tmp_sqrt - alpha * tmp2) /
tmp2_2;
const Scalar d_norm_inv_d_k = -k / norm2;
(*d_p3d_d_param)(0, 4) = mx * d_norm_inv_d_k * d_k_d_alpha;
(*d_p3d_d_param)(1, 4) = my * d_norm_inv_d_k * d_k_d_alpha;
(*d_p3d_d_param)(2, 4) = (k * d_norm_inv_d_k + 1) * d_k_d_alpha;
d_p3d_d_param->col(4) /= norm;
(*d_p3d_d_param)(0, 5) = mx * d_norm_inv_d_k * d_k_d_beta;
(*d_p3d_d_param)(1, 5) = my * d_norm_inv_d_k * d_k_d_beta;
(*d_p3d_d_param)(2, 5) = (k * d_norm_inv_d_k + 1) * d_k_d_beta;
d_p3d_d_param->col(5) /= norm;
} else {
UNUSED(d_p3d_d_param);
}
} else {
UNUSED(d_p3d_d_proj);
UNUSED(d_p3d_d_param);
}
return is_valid;
}
/// @brief Set parameters from initialization
///
/// Initializes the camera model to \f$ \left[f_x, f_y, c_x, c_y, 0.5, 1
/// \right]^T \f$
///
/// @param[in] init vector [fx, fy, cx, cy]
inline void setFromInit(const Vec4& init) {
param_[0] = init[0];
param_[1] = init[1];
param_[2] = init[2];
param_[3] = init[3];
param_[4] = 0.5;
param_[5] = 1;
}
/// @brief Increment intrinsic parameters by inc and clamp the values to the
/// valid range
///
/// @param[in] inc increment vector
void operator+=(const VecN& inc) {
param_ += inc;
// alpha in [0, 1], beta > 0
param_[4] = std::clamp(param_[4], Scalar(0), Scalar(1));
if (param_[5] < Sophus::Constants<Scalar>::epsilonSqrt()) {
param_[5] = Sophus::Constants<Scalar>::epsilonSqrt();
}
}
/// @brief Returns a const reference to the intrinsic parameters vector
///
/// The order is following: \f$ \left[f_x, f_y, c_x, c_y, \alpha, \beta
/// \right]^T \f$
/// @return const reference to the intrinsic parameters vector
const VecN& getParam() const { return param_; }
/// @brief Projections used for unit-tests
static Eigen::aligned_vector<ExtendedUnifiedCamera> getTestProjections() {
Eigen::aligned_vector<ExtendedUnifiedCamera> res;
VecN vec1;
// Euroc
vec1 << 460.76484651566468, 459.4051018049483, 365.8937161309615,
249.33499869752445, 0.5903365915227143, 1.127468196965374;
res.emplace_back(vec1);
// TUM VI 512
vec1 << 191.14799816648748, 191.13150946585135, 254.95857715233118,
256.8815466235898, 0.6291060871161842, 1.0418067403139693;
res.emplace_back(vec1);
return res;
}
/// @brief Resolutions used for unit-tests
static Eigen::aligned_vector<Eigen::Vector2i> getTestResolutions() {
Eigen::aligned_vector<Eigen::Vector2i> res;
res.emplace_back(752, 480);
res.emplace_back(512, 512);
return res;
}
EIGEN_MAKE_ALIGNED_OPERATOR_NEW
private:
VecN param_;
};
} // namespace basalt

View File

@@ -0,0 +1,435 @@
/**
BSD 3-Clause License
This file is part of the Basalt project.
https://gitlab.com/VladyslavUsenko/basalt-headers.git
Copyright (c) 2019, Vladyslav Usenko and Nikolaus Demmel.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
@file
@brief Implementation of field-of-view camera model
*/
#pragma once
#include <basalt/camera/camera_static_assert.hpp>
#include <basalt/utils/sophus_utils.hpp>
namespace basalt {
using std::sqrt;
/// @brief Field-of-View camera model
///
/// \image html fov.png
/// This model has N=5 parameters \f$ \mathbf{i} = \left[f_x, f_y, c_x, c_y,
/// w \right]^T \f$. See \ref project and \ref unproject
/// functions for more details.
template <typename Scalar_ = double>
class FovCamera {
public:
using Scalar = Scalar_;
static constexpr int N = 5; ///< Number of intrinsic parameters.
using Vec2 = Eigen::Matrix<Scalar, 2, 1>;
using Vec4 = Eigen::Matrix<Scalar, 4, 1>;
using VecN = Eigen::Matrix<Scalar, N, 1>;
using Mat24 = Eigen::Matrix<Scalar, 2, 4>;
using Mat2N = Eigen::Matrix<Scalar, 2, N>;
using Mat42 = Eigen::Matrix<Scalar, 4, 2>;
using Mat4N = Eigen::Matrix<Scalar, 4, N>;
using Mat44 = Eigen::Matrix<Scalar, 4, 4>;
/// @brief Default constructor with zero intrinsics
FovCamera() { param_.setZero(); }
/// @brief Construct camera model with given vector of intrinsics
///
/// @param[in] p vector of intrinsic parameters [fx, fy, cx, cy, w]
explicit FovCamera(const VecN& p) { param_ = p; }
/// @brief Cast to different scalar type
template <class Scalar2>
FovCamera<Scalar2> cast() const {
return FovCamera<Scalar2>(param_.template cast<Scalar2>());
}
/// @brief Camera model name
///
/// @return "fov"
static std::string getName() { return "fov"; }
/// @brief Project the point and optionally compute Jacobians
///
/// Projection function is defined as follows:
/// \f{align}{
/// \DeclareMathOperator{\atantwo}{atan2}
/// \pi(\mathbf{x}, \mathbf{i}) &=
/// \begin{bmatrix} f_x r_d \frac{x} { r_u }
/// \\ f_y r_d \frac{y} { r_u }
/// \\ \end{bmatrix}
/// +
/// \begin{bmatrix}
/// c_x
/// \\ c_y
/// \\ \end{bmatrix},
/// \\ r_u &= \sqrt{x^2 + y^2},
/// \\ r_d &= \frac{\atantwo(2 r_u \tan{\frac{w}{2}}, z)}{w}.
/// \f}
///
/// @param[in] p3d point to project
/// @param[out] proj result of projection
/// @param[out] d_proj_d_p3d if not nullptr computed Jacobian of projection
/// with respect to p3d
/// @param[out] d_proj_d_param point if not nullptr computed Jacobian of
/// projection with respect to intrinsic parameters
/// @return if projection is valid
template <class DerivedPoint3D, class DerivedPoint2D,
class DerivedJ3D = std::nullptr_t,
class DerivedJparam = std::nullptr_t>
inline bool project(const Eigen::MatrixBase<DerivedPoint3D>& p3d,
Eigen::MatrixBase<DerivedPoint2D>& proj,
DerivedJ3D d_proj_d_p3d = nullptr,
DerivedJparam d_proj_d_param = nullptr) const {
checkProjectionDerivedTypes<DerivedPoint3D, DerivedPoint2D, DerivedJ3D,
DerivedJparam, N>();
const typename EvalOrReference<DerivedPoint3D>::Type p3d_eval(p3d);
const Scalar& fx = param_[0];
const Scalar& fy = param_[1];
const Scalar& cx = param_[2];
const Scalar& cy = param_[3];
const Scalar& w = param_[4];
const Scalar& x = p3d_eval[0];
const Scalar& y = p3d_eval[1];
const Scalar& z = p3d_eval[2];
Scalar r2 = x * x + y * y;
Scalar r = sqrt(r2);
Scalar z2 = z * z;
const Scalar tanwhalf = std::tan(w / 2);
const Scalar atan_wrd = std::atan2(2 * tanwhalf * r, z);
Scalar rd = Scalar(1);
Scalar d_rd_d_w = Scalar(0);
Scalar d_rd_d_x = Scalar(0);
Scalar d_rd_d_y = Scalar(0);
Scalar d_rd_d_z = Scalar(0);
Scalar tmp1 = Scalar(1) / std::cos(w / 2);
Scalar d_tanwhalf_d_w = Scalar(0.5) * tmp1 * tmp1;
Scalar tmp = (z2 + Scalar(4) * tanwhalf * tanwhalf * r2);
Scalar d_atan_wrd_d_w = Scalar(2) * r * d_tanwhalf_d_w * z / tmp;
bool is_valid = true;
if (w > Sophus::Constants<Scalar>::epsilonSqrt()) {
if (r2 < Sophus::Constants<Scalar>::epsilonSqrt()) {
if (z < Sophus::Constants<Scalar>::epsilonSqrt()) {
is_valid = false;
}
rd = Scalar(2) * tanwhalf / w;
d_rd_d_w = Scalar(2) * (d_tanwhalf_d_w * w - tanwhalf) / (w * w);
} else {
rd = atan_wrd / (r * w);
d_rd_d_w = (d_atan_wrd_d_w * w - atan_wrd) / (r * w * w);
const Scalar d_r_d_x = x / r;
const Scalar d_r_d_y = y / r;
const Scalar d_atan_wrd_d_x = Scalar(2) * tanwhalf * d_r_d_x * z / tmp;
const Scalar d_atan_wrd_d_y = Scalar(2) * tanwhalf * d_r_d_y * z / tmp;
const Scalar d_atan_wrd_d_z = -Scalar(2) * tanwhalf * r / tmp;
d_rd_d_x = (d_atan_wrd_d_x * r - d_r_d_x * atan_wrd) / (r * r * w);
d_rd_d_y = (d_atan_wrd_d_y * r - d_r_d_y * atan_wrd) / (r * r * w);
d_rd_d_z = d_atan_wrd_d_z / (r * w);
}
}
const Scalar mx = x * rd;
const Scalar my = y * rd;
proj[0] = fx * mx + cx;
proj[1] = fy * my + cy;
if constexpr (!std::is_same_v<DerivedJ3D, std::nullptr_t>) {
BASALT_ASSERT(d_proj_d_p3d);
d_proj_d_p3d->setZero();
(*d_proj_d_p3d)(0, 0) = fx * (d_rd_d_x * x + rd);
(*d_proj_d_p3d)(0, 1) = fx * d_rd_d_y * x;
(*d_proj_d_p3d)(0, 2) = fx * d_rd_d_z * x;
(*d_proj_d_p3d)(1, 0) = fy * d_rd_d_x * y;
(*d_proj_d_p3d)(1, 1) = fy * (d_rd_d_y * y + rd);
(*d_proj_d_p3d)(1, 2) = fy * d_rd_d_z * y;
} else {
UNUSED(d_proj_d_p3d);
UNUSED(d_rd_d_x);
UNUSED(d_rd_d_y);
UNUSED(d_rd_d_z);
}
if constexpr (!std::is_same_v<DerivedJparam, std::nullptr_t>) {
BASALT_ASSERT(d_proj_d_param);
d_proj_d_param->setZero();
(*d_proj_d_param)(0, 0) = mx;
(*d_proj_d_param)(0, 2) = Scalar(1);
(*d_proj_d_param)(1, 1) = my;
(*d_proj_d_param)(1, 3) = Scalar(1);
(*d_proj_d_param)(0, 4) = fx * x * d_rd_d_w;
(*d_proj_d_param)(1, 4) = fy * y * d_rd_d_w;
} else {
UNUSED(d_proj_d_param);
UNUSED(d_rd_d_w);
}
return is_valid;
}
/// @brief Unproject the point and optionally compute Jacobians
///
/// The unprojection function is computed as follows: \f{align}{
/// \pi^{-1}(\mathbf{u}, \mathbf{i}) &=
/// \begin{bmatrix}
/// m_x \frac{\sin(r_d w)}{ 2 r_d \tan{\frac{w}{2}}}
/// \\ m_y \frac{\sin(r_d w)}{ 2 r_d \tan{\frac{w}{2}}}
/// \\ \cos(r_d w)
/// \\ \end{bmatrix},
/// \\ m_x &= \frac{u - c_x}{f_x},
/// \\ m_y &= \frac{v - c_y}{f_y},
/// \\ r_d &= \sqrt{m_x^2 + m_y^2}.
/// \f}
///
///
/// @param[in] proj point to unproject
/// @param[out] p3d result of unprojection
/// @param[out] d_p3d_d_proj if not nullptr computed Jacobian of unprojection
/// with respect to proj
/// @param[out] d_p3d_d_param point if not nullptr computed Jacobian of
/// unprojection with respect to intrinsic parameters
/// @return if unprojection is valid
template <class DerivedPoint2D, class DerivedPoint3D,
class DerivedJ2D = std::nullptr_t,
class DerivedJparam = std::nullptr_t>
inline bool unproject(const Eigen::MatrixBase<DerivedPoint2D>& proj,
Eigen::MatrixBase<DerivedPoint3D>& p3d,
DerivedJ2D d_p3d_d_proj = nullptr,
DerivedJparam d_p3d_d_param = nullptr) const {
checkUnprojectionDerivedTypes<DerivedPoint2D, DerivedPoint3D, DerivedJ2D,
DerivedJparam, N>();
const typename EvalOrReference<DerivedPoint2D>::Type proj_eval(proj);
const Scalar& fx = param_[0];
const Scalar& fy = param_[1];
const Scalar& cx = param_[2];
const Scalar& cy = param_[3];
const Scalar& w = param_[4];
const Scalar tan_w_2 = std::tan(w / Scalar(2));
const Scalar mul2tanwby2 = tan_w_2 * Scalar(2);
const Scalar mx = (proj_eval[0] - cx) / fx;
const Scalar my = (proj_eval[1] - cy) / fy;
const Scalar rd = sqrt(mx * mx + my * my);
Scalar ru = Scalar(1);
Scalar sin_rd_w = Scalar(0);
Scalar cos_rd_w = Scalar(1);
Scalar d_ru_d_rd = Scalar(0);
Scalar rd_inv = Scalar(1);
if (mul2tanwby2 > Sophus::Constants<Scalar>::epsilonSqrt() &&
rd > Sophus::Constants<Scalar>::epsilonSqrt()) {
sin_rd_w = std::sin(rd * w);
cos_rd_w = std::cos(rd * w);
ru = sin_rd_w / (rd * mul2tanwby2);
rd_inv = Scalar(1) / rd;
d_ru_d_rd =
(w * cos_rd_w * rd - sin_rd_w) * rd_inv * rd_inv / mul2tanwby2;
}
p3d.setZero();
p3d[0] = mx * ru;
p3d[1] = my * ru;
p3d[2] = cos_rd_w;
if constexpr (!std::is_same_v<DerivedJ2D, std::nullptr_t> ||
!std::is_same_v<DerivedJparam, std::nullptr_t>) {
constexpr int SIZE_3D = DerivedPoint3D::SizeAtCompileTime;
Eigen::Matrix<Scalar, SIZE_3D, 1> c0, c1;
c0.setZero();
c0(0) = (ru + mx * d_ru_d_rd * mx * rd_inv) / fx;
c0(1) = my * d_ru_d_rd * mx * rd_inv / fx;
c0(2) = -sin_rd_w * w * mx * rd_inv / fx;
c1.setZero();
c1(0) = my * d_ru_d_rd * mx * rd_inv / fy;
c1(1) = (ru + my * d_ru_d_rd * my * rd_inv) / fy;
c1(2) = -sin_rd_w * w * my * rd_inv / fy;
if constexpr (!std::is_same_v<DerivedJ2D, std::nullptr_t>) {
BASALT_ASSERT(d_p3d_d_proj);
d_p3d_d_proj->col(0) = c0;
d_p3d_d_proj->col(1) = c1;
} else {
UNUSED(d_p3d_d_proj);
}
if constexpr (!std::is_same_v<DerivedJparam, std::nullptr_t>) {
BASALT_ASSERT(d_p3d_d_param);
d_p3d_d_param->setZero();
d_p3d_d_param->col(2) = -c0;
d_p3d_d_param->col(3) = -c1;
d_p3d_d_param->col(0) = -c0 * mx;
d_p3d_d_param->col(1) = -c1 * my;
Scalar tmp = (cos_rd_w - (tan_w_2 * tan_w_2 + Scalar(1)) * sin_rd_w *
rd_inv / (2 * tan_w_2)) /
mul2tanwby2;
(*d_p3d_d_param)(0, 4) = mx * tmp;
(*d_p3d_d_param)(1, 4) = my * tmp;
(*d_p3d_d_param)(2, 4) = -sin_rd_w * rd;
} else {
UNUSED(d_p3d_d_param);
UNUSED(d_ru_d_rd);
}
Scalar norm = p3d.norm();
Scalar norm_inv = Scalar(1) / norm;
Scalar norm_inv2 = norm_inv * norm_inv;
Scalar norm_inv3 = norm_inv2 * norm_inv;
Eigen::Matrix<Scalar, SIZE_3D, SIZE_3D> d_p_norm_d_p;
d_p_norm_d_p.setZero();
d_p_norm_d_p(0, 0) = norm_inv * (Scalar(1) - p3d[0] * p3d[0] * norm_inv2);
d_p_norm_d_p(1, 0) = -p3d[1] * p3d[0] * norm_inv3;
d_p_norm_d_p(2, 0) = -p3d[2] * p3d[0] * norm_inv3;
d_p_norm_d_p(0, 1) = -p3d[1] * p3d[0] * norm_inv3;
d_p_norm_d_p(1, 1) = norm_inv * (Scalar(1) - p3d[1] * p3d[1] * norm_inv2);
d_p_norm_d_p(2, 1) = -p3d[1] * p3d[2] * norm_inv3;
d_p_norm_d_p(0, 2) = -p3d[2] * p3d[0] * norm_inv3;
d_p_norm_d_p(1, 2) = -p3d[2] * p3d[1] * norm_inv3;
d_p_norm_d_p(2, 2) = norm_inv * (Scalar(1) - p3d[2] * p3d[2] * norm_inv2);
if constexpr (!std::is_same_v<DerivedJ2D, std::nullptr_t>) {
(*d_p3d_d_proj) = d_p_norm_d_p * (*d_p3d_d_proj);
}
if constexpr (!std::is_same_v<DerivedJparam, std::nullptr_t>) {
(*d_p3d_d_param) = d_p_norm_d_p * (*d_p3d_d_param);
}
} else {
UNUSED(d_p3d_d_proj);
UNUSED(d_p3d_d_param);
UNUSED(d_ru_d_rd);
}
p3d /= p3d.norm();
return true;
}
/// @brief Set parameters from initialization
///
/// Initializes the camera model to \f$ \left[f_x, f_y, c_x, c_y, 1
/// \right]^T \f$
///
/// @param[in] init vector [fx, fy, cx, cy]
inline void setFromInit(const Vec4& init) {
param_[0] = init[0];
param_[1] = init[1];
param_[2] = init[2];
param_[3] = init[3];
param_[4] = 1;
}
/// @brief Increment intrinsic parameters by inc
///
/// @param[in] inc increment vector
void operator+=(const VecN& inc) { param_ += inc; }
/// @brief Returns a const reference to the intrinsic parameters vector
///
/// The order is following: \f$ \left[f_x, f_y, c_x, c_y, k1, k2, k3, k4
/// \right]^T \f$
/// @return const reference to the intrinsic parameters vector
const VecN& getParam() const { return param_; }
/// @brief Projections used for unit-tests
static Eigen::aligned_vector<FovCamera> getTestProjections() {
Eigen::aligned_vector<FovCamera> res;
VecN vec1;
// Euroc
vec1 << 379.045, 379.008, 505.512, 509.969, 0.9259487501905697;
res.emplace_back(vec1);
return res;
}
/// @brief Resolutions used for unit-tests
static Eigen::aligned_vector<Eigen::Vector2i> getTestResolutions() {
Eigen::aligned_vector<Eigen::Vector2i> res;
res.emplace_back(752, 480);
return res;
}
EIGEN_MAKE_ALIGNED_OPERATOR_NEW
private:
VecN param_;
};
} // namespace basalt

View File

@@ -0,0 +1,364 @@
/**
BSD 3-Clause License
This file is part of the Basalt project.
https://gitlab.com/VladyslavUsenko/basalt-headers.git
Copyright (c) 2019, Vladyslav Usenko and Nikolaus Demmel.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
@file
@brief Implementation of generic camera model
*/
#pragma once
#include <basalt/camera/bal_camera.hpp>
#include <basalt/camera/double_sphere_camera.hpp>
#include <basalt/camera/extended_camera.hpp>
#include <basalt/camera/fov_camera.hpp>
#include <basalt/camera/kannala_brandt_camera4.hpp>
#include <basalt/camera/pinhole_camera.hpp>
#include <basalt/camera/unified_camera.hpp>
#include <variant>
namespace basalt {
using std::sqrt;
/// @brief Generic camera model that can store different camera models
///
/// Particular class of camera model is stored as \ref variant and can be casted
/// to specific type using std::visit.
template <typename Scalar_>
class GenericCamera {
using Scalar = Scalar_;
using Vec2 = Eigen::Matrix<Scalar, 2, 1>;
using Vec3 = Eigen::Matrix<Scalar, 3, 1>;
using Vec4 = Eigen::Matrix<Scalar, 4, 1>;
using VecX = Eigen::Matrix<Scalar, Eigen::Dynamic, 1>;
using Mat24 = Eigen::Matrix<Scalar, 2, 4>;
using Mat42 = Eigen::Matrix<Scalar, 4, 2>;
using Mat4 = Eigen::Matrix<Scalar, 4, 4>;
using VariantT =
std::variant<ExtendedUnifiedCamera<Scalar>, DoubleSphereCamera<Scalar>,
KannalaBrandtCamera4<Scalar>, UnifiedCamera<Scalar>,
PinholeCamera<Scalar>>; ///< Possible variants of camera
///< models.
public:
/// @brief Cast to different scalar type
template <typename Scalar2>
inline GenericCamera<Scalar2> cast() const {
GenericCamera<Scalar2> res;
std::visit([&](const auto& v) { res.variant = v.template cast<Scalar2>(); },
variant);
return res;
}
/// @brief Number of intrinsic parameters
inline int getN() const {
int res;
std::visit([&](const auto& v) { res = v.N; }, variant);
return res;
}
/// @brief Camera model name
inline std::string getName() const {
std::string res;
std::visit([&](const auto& v) { res = v.getName(); }, variant);
return res;
}
/// @brief Set parameters from initialization
///
/// @param[in] init vector [fx, fy, cx, cy]
inline void setFromInit(const Vec4& init) {
std::visit([&](auto& v) { return v.setFromInit(init); }, variant);
}
/// @brief Increment intrinsic parameters by inc and if necessary clamp the
/// values to the valid range
inline void applyInc(const VecX& inc) {
std::visit([&](auto& v) { return v += inc; }, variant);
}
/// @brief Returns a vector of intrinsic parameters
///
/// The order of parameters depends on the stored model.
/// @return vector of intrinsic parameters vector
inline VecX getParam() const {
VecX res;
std::visit([&](const auto& cam) { res = cam.getParam(); }, variant);
return res;
}
/// @brief Project a single point and optionally compute Jacobian
///
/// **SLOW** function, as it requires vtable lookup for every projection.
///
/// @param[in] p3d point to project
/// @param[out] proj result of projection
/// @param[out] d_proj_d_p3d if not nullptr computed Jacobian of projection
/// with respect to p3d
/// @return if projection is valid
template <typename DerivedJ3DPtr = std::nullptr_t>
inline bool project(const Vec4& p3d, Vec2& proj,
DerivedJ3DPtr d_proj_d_p3d = nullptr) const {
if constexpr (!std::is_same_v<DerivedJ3DPtr, std::nullptr_t>) {
static_assert(std::is_pointer_v<DerivedJ3DPtr>);
using DerivedJ3D = typename std::remove_pointer<DerivedJ3DPtr>::type;
EIGEN_STATIC_ASSERT_MATRIX_SPECIFIC_SIZE(DerivedJ3D, 2, 4);
}
bool res;
std::visit(
[&](const auto& cam) { res = cam.project(p3d, proj, d_proj_d_p3d); },
variant);
return res;
}
/// @brief Unproject a single point and optionally compute Jacobian
///
/// **SLOW** function, as it requires vtable lookup for every unprojection.
///
/// @param[in] proj point to unproject
/// @param[out] p3d result of unprojection
/// @param[out] d_p3d_d_proj if not nullptr computed Jacobian of unprojection
/// with respect to proj
/// @return if unprojection is valid
template <typename DerivedJ2DPtr = std::nullptr_t>
inline bool unproject(const Vec2& proj, Vec4& p3d,
DerivedJ2DPtr d_p3d_d_proj = nullptr) const {
if constexpr (!std::is_same_v<DerivedJ2DPtr, std::nullptr_t>) {
static_assert(std::is_pointer_v<DerivedJ2DPtr>);
using DerivedJ2D = typename std::remove_pointer<DerivedJ2DPtr>::type;
EIGEN_STATIC_ASSERT_MATRIX_SPECIFIC_SIZE(DerivedJ2D, 4, 2);
}
bool res;
std::visit(
[&](const auto& cam) { res = cam.unproject(proj, p3d, d_p3d_d_proj); },
variant);
return res;
}
/// @brief Project a single point and optionally compute Jacobian
///
/// **SLOW** function, as it requires vtable lookup for every projection.
///
/// @param[in] p3d point to project
/// @param[out] proj result of projection
/// @param[out] d_proj_d_p3d if not nullptr computed Jacobian of projection
/// with respect to p3d
/// @return if projection is valid
template <typename DerivedJ3DPtr = std::nullptr_t>
inline bool project(const Vec3& p3d, Vec2& proj,
DerivedJ3DPtr d_proj_d_p3d = nullptr) const {
if constexpr (!std::is_same_v<DerivedJ3DPtr, std::nullptr_t>) {
static_assert(std::is_pointer_v<DerivedJ3DPtr>);
using DerivedJ3D = typename std::remove_pointer<DerivedJ3DPtr>::type;
EIGEN_STATIC_ASSERT_MATRIX_SPECIFIC_SIZE(DerivedJ3D, 2, 3);
}
bool res;
std::visit(
[&](const auto& cam) { res = cam.project(p3d, proj, d_proj_d_p3d); },
variant);
return res;
}
/// @brief Unproject a single point and optionally compute Jacobian
///
/// **SLOW** function, as it requires vtable lookup for every unprojection.
///
/// @param[in] proj point to unproject
/// @param[out] p3d result of unprojection
/// @param[out] d_p3d_d_proj if not nullptr computed Jacobian of unprojection
/// with respect to proj
/// @return if unprojection is valid
template <typename DerivedJ2DPtr = std::nullptr_t>
inline bool unproject(const Vec2& proj, Vec3& p3d,
DerivedJ2DPtr d_p3d_d_proj = nullptr) const {
if constexpr (!std::is_same_v<DerivedJ2DPtr, std::nullptr_t>) {
static_assert(std::is_pointer_v<DerivedJ2DPtr>);
using DerivedJ2D = typename std::remove_pointer<DerivedJ2DPtr>::type;
EIGEN_STATIC_ASSERT_MATRIX_SPECIFIC_SIZE(DerivedJ2D, 3, 2);
}
bool res;
std::visit(
[&](const auto& cam) { res = cam.unproject(proj, p3d, d_p3d_d_proj); },
variant);
return res;
}
/// @brief Project a vector of points
///
/// @param[in] p3d points to project
/// @param[in] T_c_w transformation from world to camera frame that should be
/// applied to points before projection
/// @param[out] proj results of projection
/// @param[out] proj_success if projection is valid
inline void project(const Eigen::aligned_vector<Vec3>& p3d, const Mat4& T_c_w,
Eigen::aligned_vector<Vec2>& proj,
std::vector<bool>& proj_success) const {
std::visit(
[&](const auto& cam) {
proj.resize(p3d.size());
proj_success.resize(p3d.size());
for (size_t i = 0; i < p3d.size(); i++) {
proj_success[i] =
cam.project(T_c_w * p3d[i].homogeneous(), proj[i]);
}
},
variant);
}
/// @brief Project a vector of points
///
/// @param[in] p3d points to project
/// @param[in] T_c_w transformation from world to camera frame that should be
/// applied to points before projection
/// @param[out] proj results of projection
/// @param[out] proj_success if projection is valid
inline void project(const Eigen::aligned_vector<Vec4>& p3d, const Mat4& T_c_w,
Eigen::aligned_vector<Vec2>& proj,
std::vector<bool>& proj_success) const {
std::visit(
[&](const auto& cam) {
proj.resize(p3d.size());
proj_success.resize(p3d.size());
for (size_t i = 0; i < p3d.size(); i++) {
proj_success[i] = cam.project(T_c_w * p3d[i], proj[i]);
}
},
variant);
}
/// @brief Project a vector of points
///
/// @param[in] p3d points to project
/// @param[out] proj results of projection
/// @param[out] proj_success if projection is valid
inline void project(const Eigen::aligned_vector<Vec4>& p3d,
Eigen::aligned_vector<Vec2>& proj,
std::vector<bool>& proj_success) const {
std::visit(
[&](const auto& cam) {
proj.resize(p3d.size());
proj_success.resize(p3d.size());
for (size_t i = 0; i < p3d.size(); i++) {
proj_success[i] = cam.project(p3d[i], proj[i]);
}
},
variant);
}
/// @brief Project a vector of points, compute polar and azimuthal angles
///
/// @param[in] p3d points to project
/// @param[in] T_c_w transformation from world to camera frame that should be
/// applied to points before projection
/// @param[out] proj results of projection
/// @param[out] proj_success if projection is valid
inline void project(
const Eigen::aligned_vector<Vec4>& p3d, const Mat4& T_c_w,
Eigen::aligned_vector<Vec2>& proj, std::vector<bool>& proj_success,
Eigen::aligned_vector<Vec2>& polar_azimuthal_angle) const {
std::visit(
[&](const auto& cam) {
proj.resize(p3d.size());
proj_success.resize(p3d.size());
polar_azimuthal_angle.resize(p3d.size());
for (size_t i = 0; i < p3d.size(); i++) {
Vec4 p3dt = T_c_w * p3d[i];
proj_success[i] = cam.project(p3dt, proj[i]);
Scalar r2 = p3dt[0] * p3dt[0] + p3dt[1] * p3dt[1];
Scalar r = std::sqrt(r2);
polar_azimuthal_angle[i][0] = std::atan2(r, p3dt[2]);
polar_azimuthal_angle[i][1] = std::atan2(p3dt[0], p3dt[1]);
}
},
variant);
}
/// @brief Unproject a vector of points
///
/// @param[in] proj points to unproject
/// @param[out] p3d results of unprojection
/// @param[out] unproj_success if unprojection is valid
inline void unproject(const Eigen::aligned_vector<Vec2>& proj,
Eigen::aligned_vector<Vec4>& p3d,
std::vector<bool>& unproj_success) const {
std::visit(
[&](const auto& cam) {
p3d.resize(proj.size());
unproj_success.resize(proj.size());
for (size_t i = 0; i < p3d.size(); i++) {
unproj_success[i] = cam.unproject(proj[i], p3d[i]);
}
},
variant);
}
/// @brief Construct a particular type of camera model from name
static GenericCamera<Scalar> fromString(const std::string& name) {
GenericCamera<Scalar> res;
constexpr size_t VARIANT_SIZE = std::variant_size<VariantT>::value;
visitAllTypes<VARIANT_SIZE - 1>(res, name);
return res;
}
VariantT variant;
private:
/// @brief Iterate over all possible types of the variant and construct that
/// type that has a matching name
template <int I>
static void visitAllTypes(GenericCamera<Scalar>& res,
const std::string& name) {
if constexpr (I >= 0) {
using cam_t = typename std::variant_alternative<I, VariantT>::type;
if (cam_t::getName() == name) {
cam_t val;
res.variant = val;
}
visitAllTypes<I - 1>(res, name);
}
}
};
} // namespace basalt

View File

@@ -0,0 +1,531 @@
/**
BSD 3-Clause License
This file is part of the Basalt project.
https://gitlab.com/VladyslavUsenko/basalt-headers.git
Copyright (c) 2019, Vladyslav Usenko and Nikolaus Demmel.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
@file
@brief Implementation of Kannala-Brandt camera model
*/
#pragma once
#include <basalt/camera/camera_static_assert.hpp>
#include <basalt/utils/sophus_utils.hpp>
namespace basalt {
using std::cos;
using std::sin;
using std::sqrt;
/// @brief Kannala-Brandt camera model
///
/// \image html kb.png
/// The displacement of the projection from the optical center is proportional
/// to \f$d(\theta)\f$, which is a polynomial function of the angle between the
/// point and optical axis \f$\theta\f$.
/// This model has N=8 parameters \f$ \mathbf{i} = \left[f_x, f_y, c_x, c_y,
/// k_1, k_2, k_3, k_4 \right]^T \f$. See \ref project and \ref unproject
/// functions for more details. This model corresponds to fisheye camera model
/// in OpenCV.
template <typename Scalar_ = double>
class KannalaBrandtCamera4 {
public:
using Scalar = Scalar_;
static constexpr int N = 8; ///< Number of intrinsic parameters.
using Vec2 = Eigen::Matrix<Scalar, 2, 1>;
using Vec4 = Eigen::Matrix<Scalar, 4, 1>;
using VecN = Eigen::Matrix<Scalar, N, 1>;
using Mat24 = Eigen::Matrix<Scalar, 2, 4>;
using Mat2N = Eigen::Matrix<Scalar, 2, N>;
using Mat42 = Eigen::Matrix<Scalar, 4, 2>;
using Mat4N = Eigen::Matrix<Scalar, 4, N>;
/// @brief Default constructor with zero intrinsics
KannalaBrandtCamera4() { param_.setZero(); }
/// @brief Construct camera model with given vector of intrinsics
///
/// @param[in] p vector of intrinsic parameters [fx, fy, cx, cy, k1, k2, k3,
/// k4]
explicit KannalaBrandtCamera4(const VecN& p) { param_ = p; }
/// @brief Camera model name
///
/// @return "kb4"
static std::string getName() { return "kb4"; }
/// @brief Cast to different scalar type
template <class Scalar2>
KannalaBrandtCamera4<Scalar2> cast() const {
return KannalaBrandtCamera4<Scalar2>(param_.template cast<Scalar2>());
}
/// @brief Project the point and optionally compute Jacobians
///
/// Projection function is defined as follows:
/// \f{align}{
/// \DeclareMathOperator{\atantwo}{atan2}
/// \pi(\mathbf{x}, \mathbf{i}) &=
/// \begin{bmatrix}
/// f_x ~ d(\theta) ~ \frac{x}{r}
/// \\ f_y ~ d(\theta) ~ \frac{y}{r}
/// \\ \end{bmatrix}
/// +
/// \begin{bmatrix}
/// c_x
/// \\ c_y
/// \\ \end{bmatrix},
/// \\ r &= \sqrt{x^2 + y^2},
/// \\ \theta &= \atantwo(r, z),
/// \\ d(\theta) &= \theta + k_1 \theta^3 + k_2 \theta^5 + k_3 \theta^7 + k_4
/// \theta^9.
/// \f}
///
/// @param[in] p3d point to project
/// @param[out] proj result of projection
/// @param[out] d_proj_d_p3d if not nullptr computed Jacobian of projection
/// with respect to p3d
/// @param[out] d_proj_d_param point if not nullptr computed Jacobian of
/// projection with respect to intrinsic parameters
/// @return if projection is valid
template <class DerivedPoint3D, class DerivedPoint2D,
class DerivedJ3D = std::nullptr_t,
class DerivedJparam = std::nullptr_t>
inline bool project(const Eigen::MatrixBase<DerivedPoint3D>& p3d,
Eigen::MatrixBase<DerivedPoint2D>& proj,
DerivedJ3D d_proj_d_p3d = nullptr,
DerivedJparam d_proj_d_param = nullptr) const {
checkProjectionDerivedTypes<DerivedPoint3D, DerivedPoint2D, DerivedJ3D,
DerivedJparam, N>();
const typename EvalOrReference<DerivedPoint3D>::Type p3d_eval(p3d);
const Scalar& fx = param_[0];
const Scalar& fy = param_[1];
const Scalar& cx = param_[2];
const Scalar& cy = param_[3];
const Scalar& k1 = param_[4];
const Scalar& k2 = param_[5];
const Scalar& k3 = param_[6];
const Scalar& k4 = param_[7];
const Scalar& x = p3d_eval[0];
const Scalar& y = p3d_eval[1];
const Scalar& z = p3d_eval[2];
const Scalar r2 = x * x + y * y;
const Scalar r = sqrt(r2);
bool is_valid = true;
if (r > Sophus::Constants<Scalar>::epsilonSqrt()) {
const Scalar theta = atan2(r, z);
const Scalar theta2 = theta * theta;
Scalar r_theta = k4 * theta2;
r_theta += k3;
r_theta *= theta2;
r_theta += k2;
r_theta *= theta2;
r_theta += k1;
r_theta *= theta2;
r_theta += 1;
r_theta *= theta;
const Scalar mx = x * r_theta / r;
const Scalar my = y * r_theta / r;
proj[0] = fx * mx + cx;
proj[1] = fy * my + cy;
if constexpr (!std::is_same_v<DerivedJ3D, std::nullptr_t>) {
BASALT_ASSERT(d_proj_d_p3d);
const Scalar d_r_d_x = x / r;
const Scalar d_r_d_y = y / r;
const Scalar tmp = (z * z + r2);
const Scalar d_theta_d_x = d_r_d_x * z / tmp;
const Scalar d_theta_d_y = d_r_d_y * z / tmp;
const Scalar d_theta_d_z = -r / tmp;
Scalar d_r_theta_d_theta = Scalar(9) * k4 * theta2;
d_r_theta_d_theta += Scalar(7) * k3;
d_r_theta_d_theta *= theta2;
d_r_theta_d_theta += Scalar(5) * k2;
d_r_theta_d_theta *= theta2;
d_r_theta_d_theta += Scalar(3) * k1;
d_r_theta_d_theta *= theta2;
d_r_theta_d_theta += Scalar(1);
d_proj_d_p3d->setZero();
(*d_proj_d_p3d)(0, 0) =
fx *
(r_theta * r + x * r * d_r_theta_d_theta * d_theta_d_x -
x * x * r_theta / r) /
r2;
(*d_proj_d_p3d)(1, 0) =
fy * y * (d_r_theta_d_theta * d_theta_d_x * r - x * r_theta / r) /
r2;
(*d_proj_d_p3d)(0, 1) =
fx * x * (d_r_theta_d_theta * d_theta_d_y * r - y * r_theta / r) /
r2;
(*d_proj_d_p3d)(1, 1) =
fy *
(r_theta * r + y * r * d_r_theta_d_theta * d_theta_d_y -
y * y * r_theta / r) /
r2;
(*d_proj_d_p3d)(0, 2) = fx * x * d_r_theta_d_theta * d_theta_d_z / r;
(*d_proj_d_p3d)(1, 2) = fy * y * d_r_theta_d_theta * d_theta_d_z / r;
} else {
UNUSED(d_proj_d_p3d);
}
if constexpr (!std::is_same_v<DerivedJparam, std::nullptr_t>) {
BASALT_ASSERT(d_proj_d_param);
(*d_proj_d_param).setZero();
(*d_proj_d_param)(0, 0) = mx;
(*d_proj_d_param)(0, 2) = Scalar(1);
(*d_proj_d_param)(1, 1) = my;
(*d_proj_d_param)(1, 3) = Scalar(1);
(*d_proj_d_param)(0, 4) = fx * x * theta * theta2 / r;
(*d_proj_d_param)(1, 4) = fy * y * theta * theta2 / r;
d_proj_d_param->col(5) = d_proj_d_param->col(4) * theta2;
d_proj_d_param->col(6) = d_proj_d_param->col(5) * theta2;
d_proj_d_param->col(7) = d_proj_d_param->col(6) * theta2;
} else {
UNUSED(d_proj_d_param);
}
} else {
// Check that the point is not cloze to zero norm
if (z < Sophus::Constants<Scalar>::epsilonSqrt()) {
is_valid = false;
}
proj[0] = fx * x / z + cx;
proj[1] = fy * y / z + cy;
if constexpr (!std::is_same_v<DerivedJ3D, std::nullptr_t>) {
BASALT_ASSERT(d_proj_d_p3d);
d_proj_d_p3d->setZero();
const Scalar z2 = z * z;
(*d_proj_d_p3d)(0, 0) = fx / z;
(*d_proj_d_p3d)(0, 2) = -fx * x / z2;
(*d_proj_d_p3d)(1, 1) = fy / z;
(*d_proj_d_p3d)(1, 2) = -fy * y / z2;
} else {
UNUSED(d_proj_d_p3d);
}
if constexpr (!std::is_same_v<DerivedJparam, std::nullptr_t>) {
BASALT_ASSERT(d_proj_d_param);
d_proj_d_param->setZero();
(*d_proj_d_param)(0, 0) = x / z;
(*d_proj_d_param)(0, 2) = Scalar(1);
(*d_proj_d_param)(1, 1) = y / z;
(*d_proj_d_param)(1, 3) = Scalar(1);
} else {
UNUSED(d_proj_d_param);
}
}
return is_valid;
}
/// @brief solve for theta using Newton's method.
///
/// Find root of the polynomisl \f$ d(\theta) - r_{\theta} = 0 \f$ using
/// Newton's method (https://en.wikipedia.org/wiki/Newton%27s_method). Used in
/// \ref unproject function.
///
/// @param ITER number of iterations
/// @param[in] r_theta number of iterations
/// @param[out] d_func_d_theta derivative of the function with respect to
/// theta at the solution
/// @returns theta - root of the polynomial
template <int ITER>
inline Scalar solveTheta(const Scalar& r_theta,
Scalar& d_func_d_theta) const {
const Scalar& k1 = param_[4];
const Scalar& k2 = param_[5];
const Scalar& k3 = param_[6];
const Scalar& k4 = param_[7];
Scalar theta = r_theta;
for (int i = ITER; i > 0; i--) {
Scalar theta2 = theta * theta;
Scalar func = k4 * theta2;
func += k3;
func *= theta2;
func += k2;
func *= theta2;
func += k1;
func *= theta2;
func += Scalar(1);
func *= theta;
d_func_d_theta = Scalar(9) * k4 * theta2;
d_func_d_theta += Scalar(7) * k3;
d_func_d_theta *= theta2;
d_func_d_theta += Scalar(5) * k2;
d_func_d_theta *= theta2;
d_func_d_theta += Scalar(3) * k1;
d_func_d_theta *= theta2;
d_func_d_theta += Scalar(1);
// Iteration of Newton method
theta += (r_theta - func) / d_func_d_theta;
}
return theta;
}
/// @brief Unproject the point and optionally compute Jacobians
///
/// The unprojection function is computed as follows: \f{align}{
/// \pi^{-1}(\mathbf{u}, \mathbf{i}) &=
/// \begin{bmatrix}
/// \sin(\theta^{*}) ~ \frac{m_x}{r_{\theta}}
/// \\ \sin(\theta^{*}) ~ \frac{m_y}{r_{\theta}}
/// \\ \cos(\theta^{*})
/// \\ \end{bmatrix},
/// \\ m_x &= \frac{u - c_x}{f_x},
/// \\ m_y &= \frac{v - c_y}{f_y},
/// \\ r_{\theta} &= \sqrt{m_x^2 + m_y^2},
/// \\ \theta^{*} &= d^{-1}(r_{\theta}).
/// \f}
///
/// \f$\theta^{*}\f$ is the solution of \f$d(\theta)=r_{\theta}\f$. See \ref
/// solve_theta for details.
///
/// @param[in] proj point to unproject
/// @param[out] p3d result of unprojection
/// @param[out] d_p3d_d_proj if not nullptr computed Jacobian of unprojection
/// with respect to proj
/// @param[out] d_p3d_d_param point if not nullptr computed Jacobian of
/// unprojection with respect to intrinsic parameters
/// @return if unprojection is valid
template <class DerivedPoint2D, class DerivedPoint3D,
class DerivedJ2D = std::nullptr_t,
class DerivedJparam = std::nullptr_t>
inline bool unproject(const Eigen::MatrixBase<DerivedPoint2D>& proj,
Eigen::MatrixBase<DerivedPoint3D>& p3d,
DerivedJ2D d_p3d_d_proj = nullptr,
DerivedJparam d_p3d_d_param = nullptr) const {
checkUnprojectionDerivedTypes<DerivedPoint2D, DerivedPoint3D, DerivedJ2D,
DerivedJparam, N>();
const typename EvalOrReference<DerivedPoint2D>::Type proj_eval(proj);
const Scalar& fx = param_[0];
const Scalar& fy = param_[1];
const Scalar& cx = param_[2];
const Scalar& cy = param_[3];
const Scalar mx = (proj_eval[0] - cx) / fx;
const Scalar my = (proj_eval[1] - cy) / fy;
Scalar theta(0);
Scalar sin_theta(0);
Scalar cos_theta(1);
Scalar thetad = sqrt(mx * mx + my * my);
Scalar scaling(1);
Scalar d_func_d_theta(0);
if (thetad > Sophus::Constants<Scalar>::epsilonSqrt()) {
theta = solveTheta<3>(thetad, d_func_d_theta);
sin_theta = sin(theta);
cos_theta = cos(theta);
scaling = sin_theta / thetad;
}
p3d.setZero();
p3d[0] = mx * scaling;
p3d[1] = my * scaling;
p3d[2] = cos_theta;
if constexpr (!std::is_same_v<DerivedJ2D, std::nullptr_t> ||
!std::is_same_v<DerivedJparam, std::nullptr_t>) {
Scalar d_thetad_d_mx = Scalar(0);
Scalar d_thetad_d_my = Scalar(0);
Scalar d_scaling_d_thetad = Scalar(0);
Scalar d_cos_d_thetad = Scalar(0);
Scalar d_scaling_d_k1 = Scalar(0);
Scalar d_cos_d_k1 = Scalar(0);
Scalar theta2 = Scalar(0);
if (thetad > Sophus::Constants<Scalar>::epsilonSqrt()) {
d_thetad_d_mx = mx / thetad;
d_thetad_d_my = my / thetad;
theta2 = theta * theta;
d_scaling_d_thetad = (thetad * cos_theta / d_func_d_theta - sin_theta) /
(thetad * thetad);
d_cos_d_thetad = sin_theta / d_func_d_theta;
d_scaling_d_k1 =
-cos_theta * theta * theta2 / (d_func_d_theta * thetad);
d_cos_d_k1 = d_cos_d_thetad * theta * theta2;
}
const Scalar d_res0_d_mx =
scaling + mx * d_scaling_d_thetad * d_thetad_d_mx;
const Scalar d_res0_d_my = mx * d_scaling_d_thetad * d_thetad_d_my;
const Scalar d_res1_d_mx = my * d_scaling_d_thetad * d_thetad_d_mx;
const Scalar d_res1_d_my =
scaling + my * d_scaling_d_thetad * d_thetad_d_my;
const Scalar d_res2_d_mx = -d_cos_d_thetad * d_thetad_d_mx;
const Scalar d_res2_d_my = -d_cos_d_thetad * d_thetad_d_my;
constexpr int SIZE_3D = DerivedPoint3D::SizeAtCompileTime;
Eigen::Matrix<Scalar, SIZE_3D, 1> c0, c1;
c0.setZero();
c0(0) = d_res0_d_mx / fx;
c0(1) = d_res1_d_mx / fx;
c0(2) = d_res2_d_mx / fx;
c1.setZero();
c1(0) = d_res0_d_my / fy;
c1(1) = d_res1_d_my / fy;
c1(2) = d_res2_d_my / fy;
if constexpr (!std::is_same_v<DerivedJ2D, std::nullptr_t>) {
BASALT_ASSERT(d_p3d_d_proj);
d_p3d_d_proj->col(0) = c0;
d_p3d_d_proj->col(1) = c1;
} else {
UNUSED(d_p3d_d_proj);
}
if constexpr (!std::is_same_v<DerivedJparam, std::nullptr_t>) {
BASALT_ASSERT(d_p3d_d_param);
d_p3d_d_param->setZero();
d_p3d_d_param->col(2) = -c0;
d_p3d_d_param->col(3) = -c1;
d_p3d_d_param->col(0) = -c0 * mx;
d_p3d_d_param->col(1) = -c1 * my;
(*d_p3d_d_param)(0, 4) = mx * d_scaling_d_k1;
(*d_p3d_d_param)(1, 4) = my * d_scaling_d_k1;
(*d_p3d_d_param)(2, 4) = d_cos_d_k1;
d_p3d_d_param->col(5) = d_p3d_d_param->col(4) * theta2;
d_p3d_d_param->col(6) = d_p3d_d_param->col(5) * theta2;
d_p3d_d_param->col(7) = d_p3d_d_param->col(6) * theta2;
} else {
UNUSED(d_p3d_d_param);
UNUSED(d_scaling_d_k1);
UNUSED(d_cos_d_k1);
}
} else {
UNUSED(d_p3d_d_proj);
UNUSED(d_p3d_d_param);
}
return true;
}
/// @brief Increment intrinsic parameters by inc
///
/// @param[in] inc increment vector
void operator+=(const VecN& inc) { param_ += inc; }
/// @brief Returns a const reference to the intrinsic parameters vector
///
/// The order is following: \f$ \left[f_x, f_y, c_x, c_y, k_1, k_2, k_3, k_4
/// \right]^T \f$
/// @return const reference to the intrinsic parameters vector
const VecN& getParam() const { return param_; }
/// @brief Set parameters from initialization
///
/// Initializes the camera model to \f$ \left[f_x, f_y, c_x, c_y, 0, 0, 0, 0
/// \right]^T \f$
///
/// @param[in] init vector [fx, fy, cx, cy]
inline void setFromInit(const Vec4& init) {
param_[0] = init[0];
param_[1] = init[1];
param_[2] = init[2];
param_[3] = init[3];
param_[4] = 0;
param_[5] = 0;
param_[6] = 0;
param_[7] = 0;
}
/// @brief Projections used for unit-tests
static Eigen::aligned_vector<KannalaBrandtCamera4> getTestProjections() {
Eigen::aligned_vector<KannalaBrandtCamera4> res;
VecN vec1;
vec1 << 379.045, 379.008, 505.512, 509.969, 0.00693023, -0.0013828,
-0.000272596, -0.000452646;
res.emplace_back(vec1);
return res;
}
EIGEN_MAKE_ALIGNED_OPERATOR_NEW
private:
VecN param_;
};
} // namespace basalt

View File

@@ -0,0 +1,324 @@
/**
BSD 3-Clause License
This file is part of the Basalt project.
https://gitlab.com/VladyslavUsenko/basalt-headers.git
Copyright (c) 2019, Vladyslav Usenko and Nikolaus Demmel.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
@file
@brief Implementation of pinhole camera model
*/
#pragma once
#include <basalt/camera/camera_static_assert.hpp>
#include <basalt/utils/sophus_utils.hpp>
namespace basalt {
using std::sqrt;
/// @brief Pinhole camera model
///
/// This model has N=4 parameters \f$ \mathbf{i} = \left[f_x, f_y, c_x, c_y
/// \right]^T \f$ with. See \ref
/// project and \ref unproject functions for more details.
template <typename Scalar_ = double>
class PinholeCamera {
public:
using Scalar = Scalar_;
static constexpr int N = 4; ///< Number of intrinsic parameters.
using Vec2 = Eigen::Matrix<Scalar, 2, 1>;
using Vec4 = Eigen::Matrix<Scalar, 4, 1>;
using VecN = Eigen::Matrix<Scalar, N, 1>;
using Mat24 = Eigen::Matrix<Scalar, 2, 4>;
using Mat2N = Eigen::Matrix<Scalar, 2, N>;
using Mat42 = Eigen::Matrix<Scalar, 4, 2>;
using Mat4N = Eigen::Matrix<Scalar, 4, N>;
/// @brief Default constructor with zero intrinsics
PinholeCamera() { param_.setZero(); }
/// @brief Construct camera model with given vector of intrinsics
///
/// @param[in] p vector of intrinsic parameters [fx, fy, cx, cy]
explicit PinholeCamera(const VecN& p) { param_ = p; }
/// @brief Cast to different scalar type
template <class Scalar2>
PinholeCamera<Scalar2> cast() const {
return PinholeCamera<Scalar2>(param_.template cast<Scalar2>());
}
/// @brief Camera model name
///
/// @return "pinhole"
static std::string getName() { return "pinhole"; }
/// @brief Project the point and optionally compute Jacobians
///
/// Projection function is defined as follows:
/// \f{align}{
/// \pi(\mathbf{x}, \mathbf{i}) &=
/// \begin{bmatrix}
/// f_x{\frac{x}{z}}
/// \\ f_y{\frac{y}{z}}
/// \\ \end{bmatrix}
/// +
/// \begin{bmatrix}
/// c_x
/// \\ c_y
/// \\ \end{bmatrix}.
/// \f}
/// A set of 3D points that results in valid projection is expressed as
/// follows: \f{align}{
/// \Omega &= \{\mathbf{x} \in \mathbb{R}^3 ~|~ z > 0 \}
/// \f}
///
/// @param[in] p3d point to project
/// @param[out] proj result of projection
/// @param[out] d_proj_d_p3d if not nullptr computed Jacobian of projection
/// with respect to p3d
/// @param[out] d_proj_d_param point if not nullptr computed Jacobian of
/// projection with respect to intrinsic parameters
/// @return if projection is valid
template <class DerivedPoint3D, class DerivedPoint2D,
class DerivedJ3D = std::nullptr_t,
class DerivedJparam = std::nullptr_t>
inline bool project(const Eigen::MatrixBase<DerivedPoint3D>& p3d,
Eigen::MatrixBase<DerivedPoint2D>& proj,
DerivedJ3D d_proj_d_p3d = nullptr,
DerivedJparam d_proj_d_param = nullptr) const {
checkProjectionDerivedTypes<DerivedPoint3D, DerivedPoint2D, DerivedJ3D,
DerivedJparam, N>();
const typename EvalOrReference<DerivedPoint3D>::Type p3d_eval(p3d);
const Scalar& fx = param_[0];
const Scalar& fy = param_[1];
const Scalar& cx = param_[2];
const Scalar& cy = param_[3];
const Scalar& x = p3d_eval[0];
const Scalar& y = p3d_eval[1];
const Scalar& z = p3d_eval[2];
proj[0] = fx * x / z + cx;
proj[1] = fy * y / z + cy;
const bool is_valid = z >= Sophus::Constants<Scalar>::epsilonSqrt();
if constexpr (!std::is_same_v<DerivedJ3D, std::nullptr_t>) {
BASALT_ASSERT(d_proj_d_p3d);
d_proj_d_p3d->setZero();
const Scalar z2 = z * z;
(*d_proj_d_p3d)(0, 0) = fx / z;
(*d_proj_d_p3d)(0, 2) = -fx * x / z2;
(*d_proj_d_p3d)(1, 1) = fy / z;
(*d_proj_d_p3d)(1, 2) = -fy * y / z2;
} else {
UNUSED(d_proj_d_p3d);
}
if constexpr (!std::is_same_v<DerivedJparam, std::nullptr_t>) {
BASALT_ASSERT(d_proj_d_param);
d_proj_d_param->setZero();
(*d_proj_d_param)(0, 0) = x / z;
(*d_proj_d_param)(0, 2) = Scalar(1);
(*d_proj_d_param)(1, 1) = y / z;
(*d_proj_d_param)(1, 3) = Scalar(1);
} else {
UNUSED(d_proj_d_param);
}
return is_valid;
}
/// @brief Unproject the point and optionally compute Jacobians
///
/// The unprojection function is computed as follows: \f{align}{
/// \pi^{-1}(\mathbf{u}, \mathbf{i}) &=
/// \frac{1}{m_x^2 + m_y^2 + 1}
/// \begin{bmatrix}
/// m_x \\ m_y \\ 1
/// \\ \end{bmatrix}
/// \\ m_x &= \frac{u - c_x}{f_x},
/// \\ m_y &= \frac{v - c_y}{f_y}.
/// \f}
///
///
/// @param[in] proj point to unproject
/// @param[out] p3d result of unprojection
/// @param[out] d_p3d_d_proj if not nullptr computed Jacobian of
/// unprojection with respect to proj
/// @param[out] d_p3d_d_param point if not nullptr computed Jacobian of
/// unprojection with respect to intrinsic parameters
/// @return if unprojection is valid
template <class DerivedPoint2D, class DerivedPoint3D,
class DerivedJ2D = std::nullptr_t,
class DerivedJparam = std::nullptr_t>
inline bool unproject(const Eigen::MatrixBase<DerivedPoint2D>& proj,
Eigen::MatrixBase<DerivedPoint3D>& p3d,
DerivedJ2D d_p3d_d_proj = nullptr,
DerivedJparam d_p3d_d_param = nullptr) const {
checkUnprojectionDerivedTypes<DerivedPoint2D, DerivedPoint3D, DerivedJ2D,
DerivedJparam, N>();
const typename EvalOrReference<DerivedPoint2D>::Type proj_eval(proj);
const Scalar& fx = param_[0];
const Scalar& fy = param_[1];
const Scalar& cx = param_[2];
const Scalar& cy = param_[3];
const Scalar mx = (proj_eval[0] - cx) / fx;
const Scalar my = (proj_eval[1] - cy) / fy;
const Scalar r2 = mx * mx + my * my;
const Scalar norm = sqrt(Scalar(1) + r2);
const Scalar norm_inv = Scalar(1) / norm;
p3d.setZero();
p3d[0] = mx * norm_inv;
p3d[1] = my * norm_inv;
p3d[2] = norm_inv;
if constexpr (!std::is_same_v<DerivedJ2D, std::nullptr_t> ||
!std::is_same_v<DerivedJparam, std::nullptr_t>) {
const Scalar d_norm_inv_d_r2 =
-Scalar(0.5) * norm_inv * norm_inv * norm_inv;
constexpr int SIZE_3D = DerivedPoint3D::SizeAtCompileTime;
Eigen::Matrix<Scalar, SIZE_3D, 1> c0, c1;
c0.setZero();
c0(0) = (norm_inv + 2 * mx * mx * d_norm_inv_d_r2) / fx;
c0(1) = (2 * my * mx * d_norm_inv_d_r2) / fx;
c0(2) = 2 * mx * d_norm_inv_d_r2 / fx;
c1.setZero();
c1(0) = (2 * my * mx * d_norm_inv_d_r2) / fy;
c1(1) = (norm_inv + 2 * my * my * d_norm_inv_d_r2) / fy;
c1(2) = 2 * my * d_norm_inv_d_r2 / fy;
if constexpr (!std::is_same_v<DerivedJ2D, std::nullptr_t>) {
BASALT_ASSERT(d_p3d_d_proj);
d_p3d_d_proj->col(0) = c0;
d_p3d_d_proj->col(1) = c1;
} else {
UNUSED(d_p3d_d_proj);
}
if constexpr (!std::is_same_v<DerivedJparam, std::nullptr_t>) {
BASALT_ASSERT(d_p3d_d_param);
d_p3d_d_param->col(2) = -c0;
d_p3d_d_param->col(3) = -c1;
d_p3d_d_param->col(0) = -c0 * mx;
d_p3d_d_param->col(1) = -c1 * my;
} else {
UNUSED(d_p3d_d_param);
}
} else {
UNUSED(d_p3d_d_proj);
UNUSED(d_p3d_d_param);
}
return true;
}
/// @brief Set parameters from initialization
///
/// Initializes the camera model to \f$ \left[f_x, f_y, c_x, c_y, \right]^T
/// \f$
///
/// @param[in] init vector [fx, fy, cx, cy]
inline void setFromInit(const Vec4& init) {
param_[0] = init[0];
param_[1] = init[1];
param_[2] = init[2];
param_[3] = init[3];
}
/// @brief Increment intrinsic parameters by inc
///
/// @param[in] inc increment vector
void operator+=(const VecN& inc) { param_ += inc; }
/// @brief Returns a const reference to the intrinsic parameters vector
///
/// The order is following: \f$ \left[f_x, f_y, c_x, c_y, \right]^T \f$
/// @return const reference to the intrinsic parameters vector
const VecN& getParam() const { return param_; }
/// @brief Projections used for unit-tests
static Eigen::aligned_vector<PinholeCamera> getTestProjections() {
Eigen::aligned_vector<PinholeCamera> res;
VecN vec1;
// Euroc
vec1 << 460.76484651566468, 459.4051018049483, 365.8937161309615,
249.33499869752445;
res.emplace_back(vec1);
// TUM VI 512
vec1 << 191.14799816648748, 191.13150946585135, 254.95857715233118,
256.8815466235898;
res.emplace_back(vec1);
return res;
}
/// @brief Resolutions used for unit-tests
static Eigen::aligned_vector<Eigen::Vector2i> getTestResolutions() {
Eigen::aligned_vector<Eigen::Vector2i> res;
res.emplace_back(752, 480);
res.emplace_back(512, 512);
return res;
}
EIGEN_MAKE_ALIGNED_OPERATOR_NEW
private:
VecN param_;
};
} // namespace basalt

View File

@@ -0,0 +1,155 @@
/**
BSD 3-Clause License
This file is part of the Basalt project.
https://gitlab.com/VladyslavUsenko/basalt-headers.git
Copyright (c) 2019, Vladyslav Usenko and Nikolaus Demmel.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
@file
@brief Implementation of stereographic parametrization for unit vectors
*/
#pragma once
#include <Eigen/Dense>
namespace basalt {
/// @brief Stereographic projection for minimal representation of unit vectors
///
/// \image html stereographic.png
/// The projection is defined on the entire
/// sphere, except at one point: the projection point. Where it is defined, the
/// mapping is smooth and bijective. The illustrations shows 3 examples of unit
/// vectors parametrized with a point in the 2D plane. See
/// \ref project and \ref unproject functions for more details.
template <typename Scalar_ = double>
class StereographicParam {
public:
using Scalar = Scalar_;
using Vec2 = Eigen::Matrix<Scalar, 2, 1>;
using Vec4 = Eigen::Matrix<Scalar, 4, 1>;
using Mat24 = Eigen::Matrix<Scalar, 2, 4>;
using Mat42 = Eigen::Matrix<Scalar, 4, 2>;
/// @brief Project the point and optionally compute Jacobian
///
/// Projection function is defined as follows:
/// \f{align}{
/// \pi(\mathbf{x}) &=
/// \begin{bmatrix}
/// {\frac{x}{d + z}}
/// \\ {\frac{y}{d + z}}
/// \\ \end{bmatrix},
/// \\ d &= \sqrt{x^2 + y^2 + z^2}.
/// \f}
/// @param[in] p3d point to project [x,y,z]
/// @param[out] d_r_d_p if not nullptr computed Jacobian of projection
/// with respect to p3d
/// @return 2D projection of the point which parametrizes the corresponding
/// direction vector
static inline Vec2 project(const Vec4& p3d, Mat24* d_r_d_p = nullptr) {
const Scalar sqrt = p3d.template head<3>().norm();
const Scalar norm = p3d[2] + sqrt;
const Scalar norm_inv = Scalar(1) / norm;
Vec2 res(p3d[0] * norm_inv, p3d[1] * norm_inv);
if (d_r_d_p) {
Scalar norm_inv2 = norm_inv * norm_inv;
Scalar tmp = -norm_inv2 / sqrt;
(*d_r_d_p).setZero();
(*d_r_d_p)(0, 0) = norm_inv + p3d[0] * p3d[0] * tmp;
(*d_r_d_p)(1, 0) = p3d[0] * p3d[1] * tmp;
(*d_r_d_p)(1, 1) = norm_inv + p3d[1] * p3d[1] * tmp;
(*d_r_d_p)(0, 1) = p3d[0] * p3d[1] * tmp;
(*d_r_d_p)(0, 2) = p3d[0] * norm * tmp;
(*d_r_d_p)(1, 2) = p3d[1] * norm * tmp;
(*d_r_d_p)(0, 3) = Scalar(0);
(*d_r_d_p)(1, 3) = Scalar(0);
}
return res;
}
/// @brief Unproject the point and optionally compute Jacobian
///
/// The unprojection function is computed as follows: \f{align}{
/// \pi^{-1}(\mathbf{u}) &=
/// \frac{2}{1 + x^2 + y^2}
/// \begin{bmatrix}
/// u \\ v \\ 1
/// \\ \end{bmatrix}-\begin{bmatrix}
/// 0 \\ 0 \\ 1
/// \\ \end{bmatrix}.
/// \f}
///
/// @param[in] proj point to unproject [u,v]
/// @param[out] d_r_d_p if not nullptr computed Jacobian of unprojection
/// with respect to proj
/// @return unprojected 3D unit vector
static inline Vec4 unproject(const Vec2& proj, Mat42* d_r_d_p = nullptr) {
const Scalar x2 = proj[0] * proj[0];
const Scalar y2 = proj[1] * proj[1];
const Scalar r2 = x2 + y2;
const Scalar norm_inv = Scalar(2) / (Scalar(1) + r2);
Vec4 res(proj[0] * norm_inv, proj[1] * norm_inv, norm_inv - Scalar(1),
Scalar(0));
if (d_r_d_p) {
const Scalar norm_inv2 = norm_inv * norm_inv;
const Scalar xy = proj[0] * proj[1];
(*d_r_d_p)(0, 0) = (norm_inv - x2 * norm_inv2);
(*d_r_d_p)(0, 1) = -xy * norm_inv2;
(*d_r_d_p)(1, 0) = -xy * norm_inv2;
(*d_r_d_p)(1, 1) = (norm_inv - y2 * norm_inv2);
(*d_r_d_p)(2, 0) = -proj[0] * norm_inv2;
(*d_r_d_p)(2, 1) = -proj[1] * norm_inv2;
(*d_r_d_p)(3, 0) = Scalar(0);
(*d_r_d_p)(3, 1) = Scalar(0);
}
return res;
}
};
} // namespace basalt

View File

@@ -0,0 +1,408 @@
/**
BSD 3-Clause License
This file is part of the Basalt project.
https://gitlab.com/VladyslavUsenko/basalt-headers.git
Copyright (c) 2019, Vladyslav Usenko and Nikolaus Demmel.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
@file
@brief Implementation of unified camera model
*/
#pragma once
#include <basalt/camera/camera_static_assert.hpp>
#include <basalt/utils/sophus_utils.hpp>
namespace basalt {
using std::sqrt;
/// @brief Unified camera model
///
/// \image html ucm.png
/// This model has N=5 parameters \f$ \mathbf{i} = \left[f_x, f_y, c_x, c_y,
/// \alpha \right]^T \f$ with \f$ \alpha \in [0,1] \f$.
/// See \ref project and \ref unproject functions for more details.
template <typename Scalar_ = double>
class UnifiedCamera {
public:
using Scalar = Scalar_;
static constexpr int N = 5; ///< Number of intrinsic parameters.
using Vec2 = Eigen::Matrix<Scalar, 2, 1>;
using Vec4 = Eigen::Matrix<Scalar, 4, 1>;
using VecN = Eigen::Matrix<Scalar, N, 1>;
using Mat24 = Eigen::Matrix<Scalar, 2, 4>;
using Mat2N = Eigen::Matrix<Scalar, 2, N>;
using Mat42 = Eigen::Matrix<Scalar, 4, 2>;
using Mat4N = Eigen::Matrix<Scalar, 4, N>;
/// @brief Default constructor with zero intrinsics
UnifiedCamera() { param_.setZero(); }
/// @brief Construct camera model with given vector of intrinsics
///
/// @param[in] p vector of intrinsic parameters [fx, fy, cx, cy, alpha]
explicit UnifiedCamera(const VecN& p) { param_ = p; }
/// @brief Cast to different scalar type
template <class Scalar2>
UnifiedCamera<Scalar2> cast() const {
return UnifiedCamera<Scalar2>(param_.template cast<Scalar2>());
}
/// @brief Camera model name
///
/// @return "ucm"
static std::string getName() { return "ucm"; }
/// @brief Project the point and optionally compute Jacobians
///
/// Projection function is defined as follows:
/// \f{align}{
/// \pi(\mathbf{x}, \mathbf{i}) &=
/// \begin{bmatrix}
/// f_x{\frac{x}{\alpha d + (1-\alpha) z}}
/// \\ f_y{\frac{y}{\alpha d + (1-\alpha) z}}
/// \\ \end{bmatrix}
/// +
/// \begin{bmatrix}
/// c_x
/// \\ c_y
/// \\ \end{bmatrix},
/// \\ d &= \sqrt{x^2 + y^2 + z^2}.
/// \f}
/// A set of 3D points that results in valid projection is expressed as
/// follows: \f{align}{
/// \Omega &= \{\mathbf{x} \in \mathbb{R}^3 ~|~ z > -wd \},
/// \\ w &= \begin{cases} \frac{\alpha}{1-\alpha}, & \mbox{if } \alpha \le
/// 0.5,
/// \\ \frac{1-\alpha}{\alpha} & \mbox{if } \alpha > 0.5, \end{cases} \f}
///
/// @param[in] p3d point to project
/// @param[out] proj result of projection
/// @param[out] d_proj_d_p3d if not nullptr computed Jacobian of projection
/// with respect to p3d
/// @param[out] d_proj_d_param point if not nullptr computed Jacobian of
/// projection with respect to intrinsic parameters
/// @return if projection is valid
template <class DerivedPoint3D, class DerivedPoint2D,
class DerivedJ3D = std::nullptr_t,
class DerivedJparam = std::nullptr_t>
inline bool project(const Eigen::MatrixBase<DerivedPoint3D>& p3d,
Eigen::MatrixBase<DerivedPoint2D>& proj,
DerivedJ3D d_proj_d_p3d = nullptr,
DerivedJparam d_proj_d_param = nullptr) const {
const typename EvalOrReference<DerivedPoint3D>::Type p3d_eval(p3d);
const Scalar& fx = param_[0];
const Scalar& fy = param_[1];
const Scalar& cx = param_[2];
const Scalar& cy = param_[3];
const Scalar& alpha = param_[4];
const Scalar& x = p3d_eval[0];
const Scalar& y = p3d_eval[1];
const Scalar& z = p3d_eval[2];
const Scalar r2 = x * x + y * y;
const Scalar rho2 = r2 + z * z;
const Scalar rho = sqrt(rho2);
const Scalar norm = alpha * rho + (Scalar(1) - alpha) * z;
const Scalar mx = x / norm;
const Scalar my = y / norm;
proj = Vec2(fx * mx + cx, fy * my + cy);
// Check if valid
const Scalar w = alpha > Scalar(0.5) ? (Scalar(1) - alpha) / alpha
: alpha / (Scalar(1) - alpha);
const bool is_valid = (z > -w * rho);
if constexpr (!std::is_same_v<DerivedJ3D, std::nullptr_t>) {
BASALT_ASSERT(d_proj_d_p3d);
const Scalar denom = norm * norm * rho;
const Scalar mid = -(alpha * x * y);
const Scalar add = norm * rho;
const Scalar addz = (alpha * z + (Scalar(1) - alpha) * rho);
d_proj_d_p3d->setZero();
(*d_proj_d_p3d)(0, 0) = fx * (add - x * x * alpha);
(*d_proj_d_p3d)(1, 0) = fy * mid;
(*d_proj_d_p3d)(0, 1) = fx * mid;
(*d_proj_d_p3d)(1, 1) = fy * (add - y * y * alpha);
(*d_proj_d_p3d)(0, 2) = -fx * x * addz;
(*d_proj_d_p3d)(1, 2) = -fy * y * addz;
(*d_proj_d_p3d) /= denom;
} else {
UNUSED(d_proj_d_p3d);
}
if constexpr (!std::is_same_v<DerivedJparam, std::nullptr_t>) {
BASALT_ASSERT(d_proj_d_param);
const Scalar norm2 = norm * norm;
(*d_proj_d_param).setZero();
(*d_proj_d_param)(0, 0) = mx;
(*d_proj_d_param)(0, 2) = Scalar(1);
(*d_proj_d_param)(1, 1) = my;
(*d_proj_d_param)(1, 3) = Scalar(1);
const Scalar tmp_x = -fx * x / norm2;
const Scalar tmp_y = -fy * y / norm2;
const Scalar tmp4 = (rho - z);
(*d_proj_d_param)(0, 4) = tmp_x * tmp4;
(*d_proj_d_param)(1, 4) = tmp_y * tmp4;
} else {
UNUSED(d_proj_d_param);
}
return is_valid;
}
/// @brief Unproject the point and optionally compute Jacobians
///
/// The unprojection function is computed as follows: \f{align}{
/// \pi ^ { -1 }(\mathbf{u}, \mathbf{i}) &=
/// \frac{\xi + \sqrt{1 + (1 - \xi ^ 2) r ^ 2}} {
/// 1 + r ^ 2
/// }
/// \begin{bmatrix} m_x \\ m_y \\ 1 \\ \end{bmatrix} -
/// \begin {bmatrix} 0 \\ 0 \\ \xi \\ \end{bmatrix},
/// \\ m_x &= \frac{u - c_x}{f_x}(1-\alpha),
/// \\ m_y &= \frac{v - c_y}{f_y}(1-\alpha),
/// \\ r^2 &= m_x^2 + m_y^2,
/// \\ \xi &= \frac{\alpha}{1-\alpha}.
/// \f}
///
/// The valid range of unprojections is \f{align}{
/// \Theta &=
/// \begin{cases}
/// \mathbb{R}^2 & \mbox{if } \alpha \le 0.5
/// \\ \{ \mathbf{u} \in \mathbb{R}^2 ~|~ r^2 \le
/// \frac{(1-\alpha)^2}{2\alpha
/// - 1} \} & \mbox{if } \alpha > 0.5 \end{cases}
/// \f}
///
/// @param[in] proj point to unproject
/// @param[out] p3d result of unprojection
/// @param[out] d_p3d_d_proj if not nullptr computed Jacobian of
/// unprojection with respect to proj
/// @param[out] d_p3d_d_param point if not nullptr computed Jacobian of
/// unprojection with respect to intrinsic parameters
/// @return if unprojection is valid
template <class DerivedPoint2D, class DerivedPoint3D,
class DerivedJ2D = std::nullptr_t,
class DerivedJparam = std::nullptr_t>
inline bool unproject(const Eigen::MatrixBase<DerivedPoint2D>& proj,
Eigen::MatrixBase<DerivedPoint3D>& p3d,
DerivedJ2D d_p3d_d_proj = nullptr,
DerivedJparam d_p3d_d_param = nullptr) const {
checkUnprojectionDerivedTypes<DerivedPoint2D, DerivedPoint3D, DerivedJ2D,
DerivedJparam, N>();
const typename EvalOrReference<DerivedPoint2D>::Type proj_eval(proj);
const Scalar& fx = param_[0];
const Scalar& fy = param_[1];
const Scalar& cx = param_[2];
const Scalar& cy = param_[3];
const Scalar& alpha = param_[4];
const Scalar& u = proj_eval[0];
const Scalar& v = proj_eval[1];
const Scalar xi = alpha / (Scalar(1) - alpha);
const Scalar mxx = (u - cx) / fx;
const Scalar myy = (v - cy) / fy;
const Scalar mx = (Scalar(1) - alpha) * mxx;
const Scalar my = (Scalar(1) - alpha) * myy;
const Scalar r2 = mx * mx + my * my;
// Check if valid
const bool is_valid = !static_cast<bool>(
(alpha > Scalar(0.5)) &&
(r2 >= Scalar(1) / ((Scalar(2) * alpha - Scalar(1)))));
const Scalar xi2 = xi * xi;
const Scalar n = sqrt(Scalar(1) + (Scalar(1) - xi2) * (r2));
const Scalar m = (Scalar(1) + r2);
const Scalar k = (xi + n) / m;
p3d.setZero();
p3d[0] = k * mx;
p3d[1] = k * my;
p3d[2] = k - xi;
if constexpr (!std::is_same_v<DerivedJ2D, std::nullptr_t> ||
!std::is_same_v<DerivedJparam, std::nullptr_t>) {
const Scalar dk_dmx = -Scalar(2) * mx * (n + xi) / (m * m) +
mx * (Scalar(1) - xi2) / (n * m);
const Scalar dk_dmy = -Scalar(2) * my * (n + xi) / (m * m) +
my * (Scalar(1) - xi2) / (n * m);
constexpr int SIZE_3D = DerivedPoint3D::SizeAtCompileTime;
Eigen::Matrix<Scalar, SIZE_3D, 1> c0, c1;
c0.setZero();
c0(0) = (dk_dmx * mx + k) / fx;
c0(1) = dk_dmx * my / fx;
c0(2) = dk_dmx / fx;
c1.setZero();
c1(0) = dk_dmy * mx / fy;
c1(1) = (dk_dmy * my + k) / fy;
c1(2) = dk_dmy / fy;
c0 *= (1 - alpha);
c1 *= (1 - alpha);
if constexpr (!std::is_same_v<DerivedJ2D, std::nullptr_t>) {
BASALT_ASSERT(d_p3d_d_proj);
d_p3d_d_proj->col(0) = c0;
d_p3d_d_proj->col(1) = c1;
} else {
UNUSED(d_p3d_d_proj);
}
if constexpr (!std::is_same_v<DerivedJparam, std::nullptr_t>) {
BASALT_ASSERT(d_p3d_d_param);
const Scalar d_xi_d_alpha =
Scalar(1) / ((Scalar(1) - alpha) * (Scalar(1) - alpha));
const Scalar d_m_d_alpha =
-Scalar(2) * (Scalar(1) - alpha) * (mxx * mxx + myy * myy);
const Scalar d_n_d_alpha = -(mxx * mxx + myy * myy) / n;
const Scalar dk_d_alpha =
((d_xi_d_alpha + d_n_d_alpha) * m - d_m_d_alpha * (xi + n)) /
(m * m);
d_p3d_d_param->setZero();
d_p3d_d_param->col(0) = -mxx * c0;
d_p3d_d_param->col(1) = -myy * c1;
d_p3d_d_param->col(2) = -c0;
d_p3d_d_param->col(3) = -c1;
(*d_p3d_d_param)(0, 4) = dk_d_alpha * mx - k * mxx;
(*d_p3d_d_param)(1, 4) = dk_d_alpha * my - k * myy;
(*d_p3d_d_param)(2, 4) = dk_d_alpha - d_xi_d_alpha;
} else {
UNUSED(d_p3d_d_param);
}
} else {
UNUSED(d_p3d_d_proj);
UNUSED(d_p3d_d_param);
}
return is_valid;
}
/// @brief Set parameters from initialization
///
/// Initializes the camera model to \f$ \left[f_x, f_y, c_x, c_y, 0.5,
/// \right]^T \f$
///
/// @param[in] init vector [fx, fy, cx, cy]
inline void setFromInit(const Vec4& init) {
param_[0] = init[0];
param_[1] = init[1];
param_[2] = init[2];
param_[3] = init[3];
param_[4] = 0.5;
}
/// @brief Increment intrinsic parameters by inc and clamp the values to the
/// valid range
///
/// @param[in] inc increment vector
void operator+=(const VecN& inc) {
param_ += inc;
// alpha in [0, 1]
param_[4] = std::clamp(param_[4], Scalar(0), Scalar(1));
}
/// @brief Returns a const reference to the intrinsic parameters vector
///
/// The order is following: \f$ \left[f_x, f_y, c_x, c_y, \xi, \alpha
/// \right]^T \f$
/// @return const reference to the intrinsic parameters vector
const VecN& getParam() const { return param_; }
/// @brief Projections used for unit-tests
static Eigen::aligned_vector<UnifiedCamera> getTestProjections() {
Eigen::aligned_vector<UnifiedCamera> res;
VecN vec1;
// Euroc
vec1 << 460.76484651566468, 459.4051018049483, 365.8937161309615,
249.33499869752445, 0.5903365915227143;
res.emplace_back(vec1);
// TUM VI 512
vec1 << 191.14799816648748, 191.13150946585135, 254.95857715233118,
256.8815466235898, 0.6291060871161842;
res.emplace_back(vec1);
return res;
}
/// @brief Resolutions used for unit-tests
static Eigen::aligned_vector<Eigen::Vector2i> getTestResolutions() {
Eigen::aligned_vector<Eigen::Vector2i> res;
res.emplace_back(752, 480);
res.emplace_back(512, 512);
return res;
}
EIGEN_MAKE_ALIGNED_OPERATOR_NEW
private:
VecN param_;
};
} // namespace basalt

View File

@@ -0,0 +1,615 @@
/**
BSD 3-Clause License
This file is part of the Basalt project.
https://gitlab.com/VladyslavUsenko/basalt-headers.git
Copyright (c) 2019, Vladyslav Usenko and Nikolaus Demmel.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
@file
@brief Image datatype with views, interpolation, gradients
*/
// This file is adapted from Pangolin. Original license:
/* This file is part of the Pangolin Project.
* http://github.com/stevenlovegrove/Pangolin
*
* Copyright (c) 2011 Steven Lovegrove
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use,
* copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following
* conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
* OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
* HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#pragma once
#include <memory>
#include <Eigen/Dense>
#include <basalt/utils/assert.h>
// Renamed Pangoling defines to avoid clash
#define BASALT_HOST_DEVICE
#define BASALT_EXTENSION_IMAGE
#ifdef BASALT_ENABLE_BOUNDS_CHECKS
#define BASALT_BOUNDS_ASSERT(...) BASALT_ASSERT(##__VA_ARGS__)
#else
#define BASALT_BOUNDS_ASSERT(...) ((void)0)
#endif
namespace basalt {
/// @brief Helper class for copying objects.
template <typename T>
struct CopyObject {
CopyObject(const T& obj) : obj(obj) {}
const T& obj;
};
inline void PitchedCopy(char* dst, unsigned int dst_pitch_bytes,
const char* src, unsigned int src_pitch_bytes,
unsigned int width_bytes, unsigned int height) {
if (dst_pitch_bytes == width_bytes && src_pitch_bytes == width_bytes) {
std::memcpy(dst, src, height * width_bytes);
} else {
for (unsigned int row = 0; row < height; ++row) {
std::memcpy(dst, src, width_bytes);
dst += dst_pitch_bytes;
src += src_pitch_bytes;
}
}
}
/// @brief Image class that supports sub-images, interpolation, element access.
template <typename T>
struct Image {
using PixelType = T;
inline Image() : pitch(0), ptr(0), w(0), h(0) {}
inline Image(T* ptr, size_t w, size_t h, size_t pitch)
: pitch(pitch), ptr(ptr), w(w), h(h) {}
BASALT_HOST_DEVICE inline size_t SizeBytes() const { return pitch * h; }
BASALT_HOST_DEVICE inline size_t Area() const { return w * h; }
BASALT_HOST_DEVICE inline bool IsValid() const { return ptr != 0; }
BASALT_HOST_DEVICE inline bool IsContiguous() const {
return w * sizeof(T) == pitch;
}
//////////////////////////////////////////////////////
// Iterators
//////////////////////////////////////////////////////
BASALT_HOST_DEVICE inline T* begin() { return ptr; }
BASALT_HOST_DEVICE inline T* end() { return RowPtr(h - 1) + w; }
BASALT_HOST_DEVICE inline const T* begin() const { return ptr; }
BASALT_HOST_DEVICE inline const T* end() const { return RowPtr(h - 1) + w; }
BASALT_HOST_DEVICE inline size_t size() const { return w * h; }
//////////////////////////////////////////////////////
// Image transforms
//////////////////////////////////////////////////////
template <typename UnaryOperation>
BASALT_HOST_DEVICE inline void Transform(UnaryOperation unary_op) {
BASALT_ASSERT(IsValid());
for (size_t y = 0; y < h; ++y) {
T* el = RowPtr(y);
const T* el_end = el + w;
for (; el != el_end; ++el) {
*el = unary_op(*el);
}
}
}
BASALT_HOST_DEVICE inline void Fill(const T& val) {
Transform([&](const T&) { return val; });
}
BASALT_HOST_DEVICE inline void Replace(const T& oldval, const T& newval) {
Transform([&](const T& val) { return (val == oldval) ? newval : val; });
}
inline void Memset(unsigned char v = 0) {
BASALT_ASSERT(IsValid());
if (IsContiguous()) {
std::memset((char*)ptr, v, pitch * h);
} else {
for (size_t y = 0; y < h; ++y) {
std::memset((char*)RowPtr(y), v, pitch);
}
}
}
inline void CopyFrom(const Image<T>& img) {
if (IsValid() && img.IsValid()) {
BASALT_ASSERT(w >= img.w && h >= img.h);
PitchedCopy((char*)ptr, pitch, (char*)img.ptr, img.pitch,
std::min(img.w, w) * sizeof(T), std::min(img.h, h));
} else if (img.IsValid() != IsValid()) {
BASALT_ASSERT(false && "Cannot copy from / to an unasigned image.");
}
}
//////////////////////////////////////////////////////
// Reductions
//////////////////////////////////////////////////////
template <typename BinaryOperation>
BASALT_HOST_DEVICE inline T Accumulate(const T init,
BinaryOperation binary_op) {
BASALT_ASSERT(IsValid());
T val = init;
for (size_t y = 0; y < h; ++y) {
T* el = RowPtr(y);
const T* el_end = el + w;
for (; el != el_end; ++el) {
val = binary_op(val, *el);
}
}
return val;
}
std::pair<T, T> MinMax() const {
BASALT_ASSERT(IsValid());
std::pair<T, T> minmax(std::numeric_limits<T>::max(),
std::numeric_limits<T>::lowest());
for (size_t r = 0; r < h; ++r) {
const T* ptr = RowPtr(r);
const T* end = ptr + w;
while (ptr != end) {
minmax.first = std::min(*ptr, minmax.first);
minmax.second = std::max(*ptr, minmax.second);
++ptr;
}
}
return minmax;
}
template <typename Tout = T>
Tout Sum() const {
return Accumulate((T)0,
[](const T& lhs, const T& rhs) { return lhs + rhs; });
}
template <typename Tout = T>
Tout Mean() const {
return Sum<Tout>() / Area();
}
//////////////////////////////////////////////////////
// Direct Pixel Access
//////////////////////////////////////////////////////
BASALT_HOST_DEVICE inline T* RowPtr(size_t y) {
return (T*)((unsigned char*)(ptr) + y * pitch);
}
BASALT_HOST_DEVICE inline const T* RowPtr(size_t y) const {
return (T*)((unsigned char*)(ptr) + y * pitch);
}
BASALT_HOST_DEVICE inline T& operator()(size_t x, size_t y) {
BASALT_BOUNDS_ASSERT(InBounds(x, y));
return RowPtr(y)[x];
}
BASALT_HOST_DEVICE inline const T& operator()(size_t x, size_t y) const {
BASALT_BOUNDS_ASSERT(InBounds(x, y));
return RowPtr(y)[x];
}
template <typename TVec>
BASALT_HOST_DEVICE inline T& operator()(const TVec& p) {
BASALT_BOUNDS_ASSERT(InBounds(p[0], p[1]));
return RowPtr(p[1])[p[0]];
}
template <typename TVec>
BASALT_HOST_DEVICE inline const T& operator()(const TVec& p) const {
BASALT_BOUNDS_ASSERT(InBounds(p[0], p[1]));
return RowPtr(p[1])[p[0]];
}
BASALT_HOST_DEVICE inline T& operator[](size_t ix) {
BASALT_BOUNDS_ASSERT(InImage(ptr + ix));
return ptr[ix];
}
BASALT_HOST_DEVICE inline const T& operator[](size_t ix) const {
BASALT_BOUNDS_ASSERT(InImage(ptr + ix));
return ptr[ix];
}
//////////////////////////////////////////////////////
// Interpolated Pixel Access
//////////////////////////////////////////////////////
template <typename S>
inline S interp(const Eigen::Matrix<S, 2, 1>& p) const {
return interp<S>(p[0], p[1]);
}
template <typename S>
inline Eigen::Matrix<S, 3, 1> interpGrad(
const Eigen::Matrix<S, 2, 1>& p) const {
return interpGrad<S>(p[0], p[1]);
}
template <typename S>
inline S interp(S x, S y) const {
static_assert(std::is_floating_point_v<S>,
"interpolation / gradient only makes sense "
"for floating point result type");
int ix = x;
int iy = y;
S dx = x - ix;
S dy = y - iy;
S ddx = S(1.0) - dx;
S ddy = S(1.0) - dy;
return ddx * ddy * (*this)(ix, iy) + ddx * dy * (*this)(ix, iy + 1) +
dx * ddy * (*this)(ix + 1, iy) + dx * dy * (*this)(ix + 1, iy + 1);
}
template <typename S>
inline Eigen::Matrix<S, 3, 1> interpGrad(S x, S y) const {
static_assert(std::is_floating_point_v<S>,
"interpolation / gradient only makes sense "
"for floating point result type");
int ix = x;
int iy = y;
S dx = x - ix;
S dy = y - iy;
S ddx = S(1.0) - dx;
S ddy = S(1.0) - dy;
Eigen::Matrix<S, 3, 1> res;
const T& px0y0 = (*this)(ix, iy);
const T& px1y0 = (*this)(ix + 1, iy);
const T& px0y1 = (*this)(ix, iy + 1);
const T& px1y1 = (*this)(ix + 1, iy + 1);
res[0] = ddx * ddy * px0y0 + ddx * dy * px0y1 + dx * ddy * px1y0 +
dx * dy * px1y1;
const T& pxm1y0 = (*this)(ix - 1, iy);
const T& pxm1y1 = (*this)(ix - 1, iy + 1);
S res_mx = ddx * ddy * pxm1y0 + ddx * dy * pxm1y1 + dx * ddy * px0y0 +
dx * dy * px0y1;
const T& px2y0 = (*this)(ix + 2, iy);
const T& px2y1 = (*this)(ix + 2, iy + 1);
S res_px = ddx * ddy * px1y0 + ddx * dy * px1y1 + dx * ddy * px2y0 +
dx * dy * px2y1;
res[1] = S(0.5) * (res_px - res_mx);
const T& px0ym1 = (*this)(ix, iy - 1);
const T& px1ym1 = (*this)(ix + 1, iy - 1);
S res_my = ddx * ddy * px0ym1 + ddx * dy * px0y0 + dx * ddy * px1ym1 +
dx * dy * px1y0;
const T& px0y2 = (*this)(ix, iy + 2);
const T& px1y2 = (*this)(ix + 1, iy + 2);
S res_py = ddx * ddy * px0y1 + ddx * dy * px0y2 + dx * ddy * px1y1 +
dx * dy * px1y2;
res[2] = S(0.5) * (res_py - res_my);
return res;
}
//////////////////////////////////////////////////////
// Bounds Checking
//////////////////////////////////////////////////////
BASALT_HOST_DEVICE
bool InImage(const T* ptest) const {
return ptr <= ptest && ptest < RowPtr(h);
}
BASALT_HOST_DEVICE inline bool InBounds(int x, int y) const {
return 0 <= x && x < (int)w && 0 <= y && y < (int)h;
}
BASALT_HOST_DEVICE inline bool InBounds(float x, float y,
float border) const {
return border <= x && x < (w - border - 1) && border <= y &&
y < (h - border - 1);
}
template <typename Derived>
BASALT_HOST_DEVICE inline bool InBounds(
const Eigen::MatrixBase<Derived>& p,
const typename Derived::Scalar border) const {
EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(Derived, 2);
using Scalar = typename Derived::Scalar;
Scalar offset(0);
if constexpr (std::is_floating_point_v<Scalar>) {
offset = Scalar(1);
}
return border <= p[0] && p[0] < ((int)w - border - offset) &&
border <= p[1] && p[1] < ((int)h - border - offset);
}
//////////////////////////////////////////////////////
// Obtain slices / subimages
//////////////////////////////////////////////////////
BASALT_HOST_DEVICE inline const Image<const T> SubImage(size_t x, size_t y,
size_t width,
size_t height) const {
BASALT_ASSERT((x + width) <= w && (y + height) <= h);
return Image<const T>(RowPtr(y) + x, width, height, pitch);
}
BASALT_HOST_DEVICE inline Image<T> SubImage(size_t x, size_t y, size_t width,
size_t height) {
BASALT_ASSERT((x + width) <= w && (y + height) <= h);
return Image<T>(RowPtr(y) + x, width, height, pitch);
}
BASALT_HOST_DEVICE inline Image<T> Row(int y) const {
return SubImage(0, y, w, 1);
}
BASALT_HOST_DEVICE inline Image<T> Col(int x) const {
return SubImage(x, 0, 1, h);
}
//////////////////////////////////////////////////////
// Data mangling
//////////////////////////////////////////////////////
template <typename TRecast>
BASALT_HOST_DEVICE inline Image<TRecast> Reinterpret() const {
BASALT_ASSERT_STREAM(sizeof(TRecast) == sizeof(T),
"sizeof(TRecast) must match sizeof(T): "
<< sizeof(TRecast) << " != " << sizeof(T));
return UnsafeReinterpret<TRecast>();
}
template <typename TRecast>
BASALT_HOST_DEVICE inline Image<TRecast> UnsafeReinterpret() const {
return Image<TRecast>((TRecast*)ptr, w, h, pitch);
}
//////////////////////////////////////////////////////
// Deprecated methods
//////////////////////////////////////////////////////
// PANGOLIN_DEPRECATED inline
Image(size_t w, size_t h, size_t pitch, T* ptr)
: pitch(pitch), ptr(ptr), w(w), h(h) {}
// Use RAII/move aware pangolin::ManagedImage instead
// PANGOLIN_DEPRECATED inline
void Dealloc() {
if (ptr) {
::operator delete(ptr);
ptr = nullptr;
}
}
// Use RAII/move aware pangolin::ManagedImage instead
// PANGOLIN_DEPRECATED inline
void Alloc(size_t w, size_t h, size_t pitch) {
Dealloc();
this->w = w;
this->h = h;
this->pitch = pitch;
this->ptr = (T*)::operator new(h* pitch);
}
//////////////////////////////////////////////////////
// Data members
//////////////////////////////////////////////////////
size_t pitch;
T* ptr;
size_t w;
size_t h;
BASALT_EXTENSION_IMAGE
};
template <class T>
using DefaultImageAllocator = std::allocator<T>;
/// @brief Image that manages it's own memory, storing a strong pointer to it's
/// memory
template <typename T, class Allocator = DefaultImageAllocator<T>>
class ManagedImage : public Image<T> {
public:
using PixelType = T;
using Ptr = std::shared_ptr<ManagedImage<T, Allocator>>;
// Destructor
inline ~ManagedImage() { Deallocate(); }
// Null image
inline ManagedImage() {}
// Row image
inline ManagedImage(size_t w)
: Image<T>(Allocator().allocate(w), w, 1, w * sizeof(T)) {}
inline ManagedImage(size_t w, size_t h)
: Image<T>(Allocator().allocate(w * h), w, h, w * sizeof(T)) {}
inline ManagedImage(size_t w, size_t h, size_t pitch_bytes)
: Image<T>(Allocator().allocate((h * pitch_bytes) / sizeof(T) + 1), w, h,
pitch_bytes) {}
// Not copy constructable
inline ManagedImage(const ManagedImage<T>& other) = delete;
// Move constructor
inline ManagedImage(ManagedImage<T, Allocator>&& img) {
*this = std::move(img);
}
// Move asignment
inline void operator=(ManagedImage<T, Allocator>&& img) {
Deallocate();
Image<T>::pitch = img.pitch;
Image<T>::ptr = img.ptr;
Image<T>::w = img.w;
Image<T>::h = img.h;
img.ptr = nullptr;
}
// Explicit copy constructor
template <typename TOther>
ManagedImage(const CopyObject<TOther>& other) {
CopyFrom(other.obj);
}
// Explicit copy assignment
template <typename TOther>
void operator=(const CopyObject<TOther>& other) {
CopyFrom(other.obj);
}
inline void Swap(ManagedImage<T>& img) {
std::swap(img.pitch, Image<T>::pitch);
std::swap(img.ptr, Image<T>::ptr);
std::swap(img.w, Image<T>::w);
std::swap(img.h, Image<T>::h);
}
inline void CopyFrom(const Image<T>& img) {
if (!Image<T>::IsValid() || Image<T>::w != img.w || Image<T>::h != img.h) {
Reinitialise(img.w, img.h);
}
Image<T>::CopyFrom(img);
}
inline void Reinitialise(size_t w, size_t h) {
if (!Image<T>::ptr || Image<T>::w != w || Image<T>::h != h) {
*this = ManagedImage<T, Allocator>(w, h);
}
}
inline void Reinitialise(size_t w, size_t h, size_t pitch) {
if (!Image<T>::ptr || Image<T>::w != w || Image<T>::h != h ||
Image<T>::pitch != pitch) {
*this = ManagedImage<T, Allocator>(w, h, pitch);
}
}
inline void Deallocate() {
if (Image<T>::ptr) {
Allocator().deallocate(Image<T>::ptr,
(Image<T>::h * Image<T>::pitch) / sizeof(T));
Image<T>::ptr = nullptr;
}
}
// Move asignment
template <typename TOther, typename AllocOther>
inline void OwnAndReinterpret(ManagedImage<TOther, AllocOther>&& img) {
Deallocate();
Image<T>::pitch = img.pitch;
Image<T>::ptr = (T*)img.ptr;
Image<T>::w = img.w;
Image<T>::h = img.h;
img.ptr = nullptr;
}
template <typename T1>
inline void ConvertFrom(const ManagedImage<T1>& img) {
Reinitialise(img.w, img.h);
for (size_t j = 0; j < img.h; j++) {
T* this_row = this->RowPtr(j);
const T1* other_row = img.RowPtr(j);
for (size_t i = 0; i < img.w; i++) {
this_row[i] = T(other_row[i]);
}
}
}
inline void operator-=(const ManagedImage<T>& img) {
for (size_t j = 0; j < img.h; j++) {
T* this_row = this->RowPtr(j);
const T* other_row = img.RowPtr(j);
for (size_t i = 0; i < img.w; i++) {
this_row[i] -= other_row[i];
}
}
}
};
} // namespace basalt

View File

@@ -0,0 +1,200 @@
/**
BSD 3-Clause License
This file is part of the Basalt project.
https://gitlab.com/VladyslavUsenko/basalt-headers.git
Copyright (c) 2019, Vladyslav Usenko and Nikolaus Demmel.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
@file
@brief Image pyramid implementation stored as mipmap
*/
#pragma once
#include <basalt/image/image.h>
namespace basalt {
/// @brief Image pyramid that stores levels as mipmap
///
/// \image html mipmap.jpeg
/// Computes image pyramid (see \ref subsample) and stores it as a mipmap
/// (https://en.wikipedia.org/wiki/Mipmap).
template <typename T, class Allocator = DefaultImageAllocator<T>>
class ManagedImagePyr {
public:
using PixelType = T;
using Ptr = std::shared_ptr<ManagedImagePyr<T, Allocator>>;
/// @brief Default constructor.
inline ManagedImagePyr() {}
/// @brief Construct image pyramid from other image.
///
/// @param other image to use for the pyramid level 0
/// @param num_level number of levels for the pyramid
inline ManagedImagePyr(const ManagedImage<T>& other, size_t num_levels) {
setFromImage(other, num_levels);
}
/// @brief Set image pyramid from other image.
///
/// @param other image to use for the pyramid level 0
/// @param num_level number of levels for the pyramid
inline void setFromImage(const ManagedImage<T>& other, size_t num_levels) {
orig_w = other.w;
image.Reinitialise(other.w + other.w / 2, other.h);
image.Fill(0);
lvl_internal(0).CopyFrom(other);
for (size_t i = 0; i < num_levels; i++) {
const Image<const T> l = lvl(i);
Image<T> lp1 = lvl_internal(i + 1);
subsample(l, lp1);
}
}
/// @brief Extrapolate image after border with reflection.
static inline int border101(int x, int h) {
return h - 1 - std::abs(h - 1 - x);
}
/// @brief Subsample the image twice in each direction.
///
/// Subsampling is done by convolution with Gaussian kernel
/// \f[
/// \frac{1}{256}
/// \begin{bmatrix}
/// 1 & 4 & 6 & 4 & 1
/// \\4 & 16 & 24 & 16 & 4
/// \\6 & 24 & 36 & 24 & 6
/// \\4 & 16 & 24 & 16 & 4
/// \\1 & 4 & 6 & 4 & 1
/// \\ \end{bmatrix}
/// \f]
/// and removing every even-numbered row and column.
static void subsample(const Image<const T>& img, Image<T>& img_sub) {
static_assert(std::is_same<T, uint16_t>::value ||
std::is_same<T, uint8_t>::value);
constexpr int kernel[5] = {1, 4, 6, 4, 1};
// accumulator
ManagedImage<int> tmp(img_sub.h, img.w);
// Vertical convolution
{
for (int r = 0; r < int(img_sub.h); r++) {
const T* row_m2 = img.RowPtr(std::abs(2 * r - 2));
const T* row_m1 = img.RowPtr(std::abs(2 * r - 1));
const T* row = img.RowPtr(2 * r);
const T* row_p1 = img.RowPtr(border101(2 * r + 1, img.h));
const T* row_p2 = img.RowPtr(border101(2 * r + 2, img.h));
for (int c = 0; c < int(img.w); c++) {
tmp(r, c) = kernel[0] * int(row_m2[c]) + kernel[1] * int(row_m1[c]) +
kernel[2] * int(row[c]) + kernel[3] * int(row_p1[c]) +
kernel[4] * int(row_p2[c]);
}
}
}
// Horizontal convolution
{
for (int c = 0; c < int(img_sub.w); c++) {
const int* row_m2 = tmp.RowPtr(std::abs(2 * c - 2));
const int* row_m1 = tmp.RowPtr(std::abs(2 * c - 1));
const int* row = tmp.RowPtr(2 * c);
const int* row_p1 = tmp.RowPtr(border101(2 * c + 1, tmp.h));
const int* row_p2 = tmp.RowPtr(border101(2 * c + 2, tmp.h));
for (int r = 0; r < int(tmp.w); r++) {
int val_int = kernel[0] * row_m2[r] + kernel[1] * row_m1[r] +
kernel[2] * row[r] + kernel[3] * row_p1[r] +
kernel[4] * row_p2[r];
T val = ((val_int + (1 << 7)) >> 8);
img_sub(c, r) = val;
}
}
}
}
/// @brief Return const image of the certain level
///
/// @param lvl level to return
/// @return const image of with the pyramid level
inline const Image<const T> lvl(size_t lvl) const {
size_t x = (lvl == 0) ? 0 : orig_w;
size_t y = (lvl <= 1) ? 0 : (image.h - (image.h >> (lvl - 1)));
size_t width = (orig_w >> lvl);
size_t height = (image.h >> lvl);
return image.SubImage(x, y, width, height);
}
/// @brief Return const image of underlying mipmap
///
/// @return const image of of the underlying mipmap representation which can
/// be for example used for visualization
inline const Image<const T> mipmap() const {
return image.SubImage(0, 0, image.w, image.h);
}
/// @brief Return coordinate offset of the image in the mipmap image.
///
/// @param lvl level to return
/// @return offset coordinates (2x1 vector)
template <typename S>
inline Eigen::Matrix<S, 2, 1> lvl_offset(size_t lvl) {
size_t x = (lvl == 0) ? 0 : orig_w;
size_t y = (lvl <= 1) ? 0 : (image.h - (image.h >> (lvl - 1)));
return Eigen::Matrix<S, 2, 1>(x, y);
}
protected:
/// @brief Return image of the certain level
///
/// @param lvl level to return
/// @return image of with the pyramid level
inline Image<T> lvl_internal(size_t lvl) {
size_t x = (lvl == 0) ? 0 : orig_w;
size_t y = (lvl <= 1) ? 0 : (image.h - (image.h >> (lvl - 1)));
size_t width = (orig_w >> lvl);
size_t height = (image.h >> lvl);
return image.SubImage(x, y, width, height);
}
size_t orig_w; ///< Width of the original image (level 0)
ManagedImage<T> image; ///< Pyramid image stored as a mipmap
};
} // namespace basalt

View File

@@ -0,0 +1,283 @@
/**
BSD 3-Clause License
This file is part of the Basalt project.
https://gitlab.com/VladyslavUsenko/basalt-headers.git
Copyright (c) 2019, Vladyslav Usenko and Nikolaus Demmel.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
@file
@brief Type definitions for IMU preintegration
*/
#pragma once
#include <basalt/utils/sophus_utils.hpp>
#include <memory>
namespace basalt {
constexpr size_t POSE_SIZE = 6; ///< Dimentionality of the pose state
constexpr size_t POSE_VEL_SIZE =
9; ///< Dimentionality of the pose-velocity state
constexpr size_t POSE_VEL_BIAS_SIZE =
15; ///< Dimentionality of the pose-velocity-bias state
/// @brief State that consists of SE(3) pose at a certain time.
template <class Scalar_>
struct PoseState {
using Scalar = Scalar_;
using VecN = Eigen::Matrix<Scalar, POSE_SIZE, 1>;
using Vec6 = Eigen::Matrix<Scalar, 6, 1>;
using SO3 = Sophus::SO3<Scalar>;
using SE3 = Sophus::SE3<Scalar>;
/// @brief Default constructor with Identity pose and zero timestamp.
PoseState() { t_ns = 0; }
/// @brief Constructor with timestamp and pose.
///
/// @param t_ns timestamp of the state in nanoseconds
/// @param T_w_i transformation from the body frame to the world frame
PoseState(int64_t t_ns, const SE3& T_w_i) : t_ns(t_ns), T_w_i(T_w_i) {}
/// @brief Create copy with different Scalar type.
template <class Scalar2>
PoseState<Scalar2> cast() const {
PoseState<Scalar2> a;
a.t_ns = t_ns;
a.T_w_i = T_w_i.template cast<Scalar2>();
return a;
}
/// @brief Apply increment to the pose
///
/// For details see \ref incPose
/// @param[in] inc 6x1 increment vector
void applyInc(const VecN& inc) { incPose(inc, T_w_i); }
/// @brief Apply increment to the pose
///
/// The incremernt vector consists of translational and rotational parts \f$
/// [\upsilon, \omega]^T \f$. Given the current pose \f$ R \in
/// SO(3), p \in \mathbb{R}^3\f$ the updated pose is: \f{align}{ R' &=
/// \exp(\omega) R
/// \\ p' &= p + \upsilon
/// \f}
/// The increment is consistent with \ref
/// Se3Spline::applyInc.
///
/// @param[in] inc 6x1 increment vector
/// @param[in,out] T the pose to update
inline static void incPose(const Vec6& inc, SE3& T) {
T.translation() += inc.template head<3>();
T.so3() = SO3::exp(inc.template tail<3>()) * T.so3();
}
EIGEN_MAKE_ALIGNED_OPERATOR_NEW
int64_t t_ns; ///< timestamp of the state in nanoseconds
SE3 T_w_i; ///< pose of the state
};
/// @brief State that consists of SE(3) pose and linear velocity at a certain
/// time.
template <class Scalar_>
struct PoseVelState : public PoseState<Scalar_> {
using Scalar = Scalar_;
using VecN = Eigen::Matrix<Scalar, POSE_VEL_SIZE, 1>;
using Vec3 = Eigen::Matrix<Scalar, 3, 1>;
using SE3 = Sophus::SE3<Scalar>;
/// @brief Default constructor with Identity pose and zero other values.
PoseVelState() { vel_w_i.setZero(); };
/// @brief Constructor with timestamp, pose and linear velocity.
///
/// @param t_ns timestamp of the state in nanoseconds
/// @param T_w_i transformation from the body frame to the world frame
/// @param vel_w_i linear velocity in world coordinate frame
PoseVelState(int64_t t_ns, const SE3& T_w_i, const Vec3& vel_w_i)
: PoseState<Scalar>(t_ns, T_w_i), vel_w_i(vel_w_i) {}
/// @brief Create copy with different Scalar type.
template <class Scalar2>
PoseVelState<Scalar2> cast() const {
PoseVelState<Scalar2> a;
static_cast<PoseState<Scalar2>&>(a) =
PoseState<Scalar>::template cast<Scalar2>();
a.vel_w_i = vel_w_i.template cast<Scalar2>();
return a;
}
/// @brief Apply increment to the state.
///
/// For pose see \ref incPose. For velocity simple addition.
/// @param[in] inc 9x1 increment vector [trans, rot, vel]
void applyInc(const VecN& inc) {
PoseState<Scalar>::applyInc(inc.template head<6>());
vel_w_i += inc.template tail<3>();
}
/// @brief Compute difference to other state.
///
/// ```
/// PoseVelState::VecN inc;
/// PoseVelState p0, p1;
/// // Initialize p0 and inc
/// p1 = p0;
/// p1.applyInc(inc);
/// p0.diff(p1) == inc; // Should be true.
/// ```
/// @param other state to compute difference.
VecN diff(const PoseVelState<Scalar>& other) const {
VecN res;
res.template segment<3>(0) =
other.T_w_i.translation() - this->T_w_i.translation();
res.template segment<3>(3) =
(other.T_w_i.so3() * this->T_w_i.so3().inverse()).log();
res.template tail<3>() = other.vel_w_i - vel_w_i;
return res;
}
EIGEN_MAKE_ALIGNED_OPERATOR_NEW
Vec3 vel_w_i; ///< Linear velocity of the state
};
/// @brief State that consists of SE(3) pose, linear velocity, gyroscope and
/// accelerometer biases at a certain time.
template <class Scalar_>
struct PoseVelBiasState : public PoseVelState<Scalar_> {
using Scalar = Scalar_;
using Ptr = std::shared_ptr<PoseVelBiasState<Scalar>>;
using VecN = Eigen::Matrix<Scalar, POSE_VEL_BIAS_SIZE, 1>;
using Vec3 = Eigen::Matrix<Scalar, 3, 1>;
using SO3 = Sophus::SO3<Scalar>;
using SE3 = Sophus::SE3<Scalar>;
/// @brief Default constructor with Identity pose and zero other values.
PoseVelBiasState() {
bias_gyro.setZero();
bias_accel.setZero();
};
/// @brief Constructor with timestamp, pose, linear velocity, gyroscope and
/// accelerometer biases.
///
/// @param t_ns timestamp of the state in nanoseconds
/// @param T_w_i transformation from the body frame to the world frame
/// @param vel_w_i linear velocity in world coordinate frame
/// @param bias_gyro gyroscope bias
/// @param bias_accel accelerometer bias
PoseVelBiasState(int64_t t_ns, const SE3& T_w_i, const Vec3& vel_w_i,
const Vec3& bias_gyro, const Vec3& bias_accel)
: PoseVelState<Scalar>(t_ns, T_w_i, vel_w_i),
bias_gyro(bias_gyro),
bias_accel(bias_accel) {}
/// @brief Create copy with different Scalar type.
template <class Scalar2>
PoseVelBiasState<Scalar2> cast() const {
PoseVelBiasState<Scalar2> a;
static_cast<PoseVelState<Scalar2>&>(a) =
PoseVelState<Scalar>::template cast<Scalar2>();
a.bias_gyro = bias_gyro.template cast<Scalar2>();
a.bias_accel = bias_accel.template cast<Scalar2>();
return a;
}
/// @brief Apply increment to the state.
///
/// For pose see \ref incPose. For velocity and biases simple addition.
/// @param[in] inc 15x1 increment vector [trans, rot, vel, bias_gyro,
/// bias_accel]
void applyInc(const VecN& inc) {
PoseVelState<Scalar>::applyInc(inc.template head<9>());
bias_gyro += inc.template segment<3>(9);
bias_accel += inc.template segment<3>(12);
}
/// @brief Compute difference to other state.
///
/// ```
/// PoseVelBiasState::VecN inc;
/// PoseVelBiasState p0, p1;
/// // Initialize p0 and inc
/// p1 = p0;
/// p1.applyInc(inc);
/// p0.diff(p1) == inc; // Should be true.
/// ```
/// @param other state to compute difference.
VecN diff(const PoseVelBiasState<Scalar>& other) const {
VecN res;
res.segment<9>(0) = PoseVelState<Scalar>::diff(other);
res.segment<3>(9) = other.bias_gyro - bias_gyro;
res.segment<3>(12) = other.bias_accel - bias_accel;
return res;
}
EIGEN_MAKE_ALIGNED_OPERATOR_NEW
Vec3 bias_gyro; ///< Gyroscope bias
Vec3 bias_accel; ///< Accelerometer bias
};
/// @brief Timestamped gyroscope and accelerometer measurements.
template <class Scalar_>
struct ImuData {
using Scalar = Scalar_;
using Ptr = std::shared_ptr<ImuData>;
using Vec3 = Eigen::Matrix<Scalar, 3, 1>;
int64_t t_ns; ///< timestamp in nanoseconds
Vec3 accel; ///< Accelerometer measurement
Vec3 gyro; ///< Gyroscope measurement
/// @brief Default constructor with zero measurements.
ImuData() {
t_ns = 0;
accel.setZero();
gyro.setZero();
}
/// @brief Create copy with different Scalar type.
template <class Scalar2>
ImuData<Scalar2> cast() const {
ImuData<Scalar2> a;
a.t_ns = t_ns;
a.accel = accel.template cast<Scalar2>();
a.gyro = gyro.template cast<Scalar2>();
return a;
}
EIGEN_MAKE_ALIGNED_OPERATOR_NEW
};
} // namespace basalt

View File

@@ -0,0 +1,371 @@
/**
BSD 3-Clause License
This file is part of the Basalt project.
https://gitlab.com/VladyslavUsenko/basalt-headers.git
Copyright (c) 2019, Vladyslav Usenko and Nikolaus Demmel.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
@file
@brief IMU preintegration
*/
#pragma once
#include <basalt/imu/imu_types.h>
#include <basalt/utils/assert.h>
#include <basalt/utils/sophus_utils.hpp>
namespace basalt {
/// @brief Integrated pseudo-measurement that combines several consecutive IMU
/// measurements.
template <class Scalar_>
class IntegratedImuMeasurement {
public:
using Scalar = Scalar_;
using Ptr = std::shared_ptr<IntegratedImuMeasurement<Scalar>>;
using Vec3 = Eigen::Matrix<Scalar, 3, 1>;
using VecN = Eigen::Matrix<Scalar, POSE_VEL_SIZE, 1>;
using Mat3 = Eigen::Matrix<Scalar, 3, 3>;
using MatNN = Eigen::Matrix<Scalar, POSE_VEL_SIZE, POSE_VEL_SIZE>;
using MatN3 = Eigen::Matrix<Scalar, POSE_VEL_SIZE, 3>;
using MatN6 = Eigen::Matrix<Scalar, POSE_VEL_SIZE, 6>;
using SO3 = Sophus::SO3<Scalar>;
/// @brief Propagate current state given ImuData and optionally compute
/// Jacobians.
///
/// @param[in] curr_state current state
/// @param[in] data IMU data
/// @param[out] next_state predicted state
/// @param[out] d_next_d_curr Jacobian of the predicted state with respect
/// to current state
/// @param[out] d_next_d_accel Jacobian of the predicted state with respect
/// accelerometer measurement
/// @param[out] d_next_d_gyro Jacobian of the predicted state with respect
/// gyroscope measurement
inline static void propagateState(const PoseVelState<Scalar>& curr_state,
const ImuData<Scalar>& data,
PoseVelState<Scalar>& next_state,
MatNN* d_next_d_curr = nullptr,
MatN3* d_next_d_accel = nullptr,
MatN3* d_next_d_gyro = nullptr) {
BASALT_ASSERT_STREAM(
data.t_ns > curr_state.t_ns,
"data.t_ns " << data.t_ns << " curr_state.t_ns " << curr_state.t_ns);
int64_t dt_ns = data.t_ns - curr_state.t_ns;
Scalar dt = dt_ns * Scalar(1e-9);
SO3 R_w_i_new_2 =
curr_state.T_w_i.so3() * SO3::exp(Scalar(0.5) * dt * data.gyro);
Mat3 RR_w_i_new_2 = R_w_i_new_2.matrix();
Vec3 accel_world = RR_w_i_new_2 * data.accel;
next_state.t_ns = data.t_ns;
next_state.T_w_i.so3() = curr_state.T_w_i.so3() * SO3::exp(dt * data.gyro);
next_state.vel_w_i = curr_state.vel_w_i + accel_world * dt;
next_state.T_w_i.translation() = curr_state.T_w_i.translation() +
curr_state.vel_w_i * dt +
0.5 * accel_world * dt * dt;
if (d_next_d_curr) {
d_next_d_curr->setIdentity();
d_next_d_curr->template block<3, 3>(0, 6).diagonal().setConstant(dt);
d_next_d_curr->template block<3, 3>(6, 3) = SO3::hat(-accel_world * dt);
d_next_d_curr->template block<3, 3>(0, 3) =
d_next_d_curr->template block<3, 3>(6, 3) * dt * Scalar(0.5);
}
if (d_next_d_accel) {
d_next_d_accel->setZero();
d_next_d_accel->template block<3, 3>(0, 0) =
Scalar(0.5) * RR_w_i_new_2 * dt * dt;
d_next_d_accel->template block<3, 3>(6, 0) = RR_w_i_new_2 * dt;
}
if (d_next_d_gyro) {
d_next_d_gyro->setZero();
Mat3 Jr;
Sophus::rightJacobianSO3(dt * data.gyro, Jr);
Mat3 Jr2;
Sophus::rightJacobianSO3(Scalar(0.5) * dt * data.gyro, Jr2);
d_next_d_gyro->template block<3, 3>(3, 0) =
next_state.T_w_i.so3().matrix() * Jr * dt;
d_next_d_gyro->template block<3, 3>(6, 0) =
SO3::hat(-accel_world * dt) * RR_w_i_new_2 * Jr2 * Scalar(0.5) * dt;
d_next_d_gyro->template block<3, 3>(0, 0) =
Scalar(0.5) * dt * d_next_d_gyro->template block<3, 3>(6, 0);
}
}
/// @brief Default constructor.
IntegratedImuMeasurement() noexcept {
cov_.setZero();
d_state_d_ba_.setZero();
d_state_d_bg_.setZero();
bias_gyro_lin_.setZero();
bias_accel_lin_.setZero();
}
/// @brief Constructor with start time and bias estimates.
IntegratedImuMeasurement(int64_t start_t_ns, const Vec3& bias_gyro_lin,
const Vec3& bias_accel_lin) noexcept
: start_t_ns_(start_t_ns),
bias_gyro_lin_(bias_gyro_lin),
bias_accel_lin_(bias_accel_lin) {
cov_.setZero();
d_state_d_ba_.setZero();
d_state_d_bg_.setZero();
}
/// @brief Integrate IMU data
///
/// @param[in] data IMU data
/// @param[in] accel_cov diagonal of accelerometer noise covariance matrix
/// @param[in] gyro_cov diagonal of gyroscope noise covariance matrix
void integrate(const ImuData<Scalar>& data, const Vec3& accel_cov,
const Vec3& gyro_cov) {
ImuData<Scalar> data_corrected = data;
data_corrected.t_ns -= start_t_ns_;
data_corrected.accel -= bias_accel_lin_;
data_corrected.gyro -= bias_gyro_lin_;
PoseVelState<Scalar> new_state;
MatNN F;
MatN3 A;
MatN3 G;
propagateState(delta_state_, data_corrected, new_state, &F, &A, &G);
delta_state_ = new_state;
cov_ = F * cov_ * F.transpose() +
A * accel_cov.asDiagonal() * A.transpose() +
G * gyro_cov.asDiagonal() * G.transpose();
sqrt_cov_inv_computed_ = false;
d_state_d_ba_ = -A + F * d_state_d_ba_;
d_state_d_bg_ = -G + F * d_state_d_bg_;
}
/// @brief Predict state given this pseudo-measurement
///
/// @param[in] state0 current state
/// @param[in] g gravity vector
/// @param[out] state1 predicted state
void predictState(const PoseVelState<Scalar>& state0, const Vec3& g,
PoseVelState<Scalar>& state1) const {
Scalar dt = delta_state_.t_ns * Scalar(1e-9);
state1.T_w_i.so3() = state0.T_w_i.so3() * delta_state_.T_w_i.so3();
state1.vel_w_i =
state0.vel_w_i + g * dt + state0.T_w_i.so3() * delta_state_.vel_w_i;
state1.T_w_i.translation() =
state0.T_w_i.translation() + state0.vel_w_i * dt +
Scalar(0.5) * g * dt * dt +
state0.T_w_i.so3() * delta_state_.T_w_i.translation();
}
/// @brief Compute residual between two states given this pseudo-measurement
/// and optionally compute Jacobians.
///
/// @param[in] state0 initial state
/// @param[in] g gravity vector
/// @param[in] state1 next state
/// @param[in] curr_bg current estimate of gyroscope bias
/// @param[in] curr_ba current estimate of accelerometer bias
/// @param[out] d_res_d_state0 if not nullptr, Jacobian of the residual with
/// respect to state0
/// @param[out] d_res_d_state1 if not nullptr, Jacobian of the residual with
/// respect to state1
/// @param[out] d_res_d_bg if not nullptr, Jacobian of the residual with
/// respect to gyroscope bias
/// @param[out] d_res_d_ba if not nullptr, Jacobian of the residual with
/// respect to accelerometer bias
/// @return residual
VecN residual(const PoseVelState<Scalar>& state0, const Vec3& g,
const PoseVelState<Scalar>& state1, const Vec3& curr_bg,
const Vec3& curr_ba, MatNN* d_res_d_state0 = nullptr,
MatNN* d_res_d_state1 = nullptr, MatN3* d_res_d_bg = nullptr,
MatN3* d_res_d_ba = nullptr) const {
Scalar dt = delta_state_.t_ns * Scalar(1e-9);
VecN res;
VecN bg_diff;
VecN ba_diff;
bg_diff = d_state_d_bg_ * (curr_bg - bias_gyro_lin_);
ba_diff = d_state_d_ba_ * (curr_ba - bias_accel_lin_);
BASALT_ASSERT(ba_diff.template segment<3>(3).isApproxToConstant(0));
Mat3 R0_inv = state0.T_w_i.so3().inverse().matrix();
Vec3 tmp =
R0_inv * (state1.T_w_i.translation() - state0.T_w_i.translation() -
state0.vel_w_i * dt - Scalar(0.5) * g * dt * dt);
res.template segment<3>(0) =
tmp - (delta_state_.T_w_i.translation() +
bg_diff.template segment<3>(0) + ba_diff.template segment<3>(0));
res.template segment<3>(3) =
(SO3::exp(bg_diff.template segment<3>(3)) * delta_state_.T_w_i.so3() *
state1.T_w_i.so3().inverse() * state0.T_w_i.so3())
.log();
Vec3 tmp2 = R0_inv * (state1.vel_w_i - state0.vel_w_i - g * dt);
res.template segment<3>(6) =
tmp2 - (delta_state_.vel_w_i + bg_diff.template segment<3>(6) +
ba_diff.template segment<3>(6));
if (d_res_d_state0 || d_res_d_state1) {
Mat3 J;
Sophus::rightJacobianInvSO3(res.template segment<3>(3), J);
if (d_res_d_state0) {
d_res_d_state0->setZero();
d_res_d_state0->template block<3, 3>(0, 0) = -R0_inv;
d_res_d_state0->template block<3, 3>(0, 3) = SO3::hat(tmp) * R0_inv;
d_res_d_state0->template block<3, 3>(3, 3) = J * R0_inv;
d_res_d_state0->template block<3, 3>(6, 3) = SO3::hat(tmp2) * R0_inv;
d_res_d_state0->template block<3, 3>(0, 6) = -R0_inv * dt;
d_res_d_state0->template block<3, 3>(6, 6) = -R0_inv;
}
if (d_res_d_state1) {
d_res_d_state1->setZero();
d_res_d_state1->template block<3, 3>(0, 0) = R0_inv;
d_res_d_state1->template block<3, 3>(3, 3) = -J * R0_inv;
d_res_d_state1->template block<3, 3>(6, 6) = R0_inv;
}
}
if (d_res_d_ba) {
*d_res_d_ba = -d_state_d_ba_;
}
if (d_res_d_bg) {
d_res_d_bg->setZero();
*d_res_d_bg = -d_state_d_bg_;
Mat3 J;
Sophus::leftJacobianInvSO3(res.template segment<3>(3), J);
d_res_d_bg->template block<3, 3>(3, 0) =
J * d_state_d_bg_.template block<3, 3>(3, 0);
}
return res;
}
/// @brief Time duretion of preintegrated measurement in nanoseconds.
int64_t get_dt_ns() const { return delta_state_.t_ns; }
/// @brief Start time of preintegrated measurement in nanoseconds.
int64_t get_start_t_ns() const { return start_t_ns_; }
/// @brief Inverse of the measurement covariance matrix
inline MatNN get_cov_inv() const {
if (!sqrt_cov_inv_computed_) {
compute_sqrt_cov_inv();
sqrt_cov_inv_computed_ = true;
}
return sqrt_cov_inv_.transpose() * sqrt_cov_inv_;
}
/// @brief Square root inverse of the measurement covariance matrix
inline const MatNN& get_sqrt_cov_inv() const {
if (!sqrt_cov_inv_computed_) {
compute_sqrt_cov_inv();
sqrt_cov_inv_computed_ = true;
}
return sqrt_cov_inv_;
}
/// @brief Measurement covariance matrix
const MatNN& get_cov() const { return cov_; }
// Just for testing...
/// @brief Delta state
const PoseVelState<Scalar>& getDeltaState() const { return delta_state_; }
/// @brief Jacobian of delta state with respect to accelerometer bias
const MatN3& get_d_state_d_ba() const { return d_state_d_ba_; }
/// @brief Jacobian of delta state with respect to gyroscope bias
const MatN3& get_d_state_d_bg() const { return d_state_d_bg_; }
EIGEN_MAKE_ALIGNED_OPERATOR_NEW
private:
/// @brief Helper function to compute square root of the inverse covariance
void compute_sqrt_cov_inv() const {
sqrt_cov_inv_.setIdentity();
auto ldlt = cov_.ldlt();
sqrt_cov_inv_ = ldlt.transpositionsP() * sqrt_cov_inv_;
ldlt.matrixL().solveInPlace(sqrt_cov_inv_);
VecN D_inv_sqrt;
for (size_t i = 0; i < POSE_VEL_SIZE; i++) {
if (ldlt.vectorD()[i] < std::numeric_limits<Scalar>::min()) {
D_inv_sqrt[i] = 0;
} else {
D_inv_sqrt[i] = Scalar(1.0) / sqrt(ldlt.vectorD()[i]);
}
}
sqrt_cov_inv_ = D_inv_sqrt.asDiagonal() * sqrt_cov_inv_;
}
int64_t start_t_ns_{0}; ///< Integration start time in nanoseconds
PoseVelState<Scalar> delta_state_; ///< Delta state
MatNN cov_; ///< Measurement covariance
mutable MatNN
sqrt_cov_inv_; ///< Cached square root inverse of measurement covariance
mutable bool sqrt_cov_inv_computed_{
false}; ///< If the cached square root inverse
///< covariance is computed
MatN3 d_state_d_ba_, d_state_d_bg_;
Vec3 bias_gyro_lin_, bias_accel_lin_;
};
} // namespace basalt

View File

@@ -0,0 +1,211 @@
/**
BSD 3-Clause License
This file is part of the Basalt project.
https://gitlab.com/VladyslavUsenko/basalt-headers.git
Copyright (c) 2019, Vladyslav Usenko and Nikolaus Demmel.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
@file
@brief Serialization for Eigen and Sophus types
*/
#pragma once
#include <Eigen/Dense>
#include <sophus/se3.hpp>
#include <sophus/sim3.hpp>
#include <cereal/archives/binary.hpp>
#include <cereal/archives/json.hpp>
namespace cereal {
// NOTE: Serialization functions for non-basalt types (for now Eigen and Sophus)
// are provided in a separate header to make them available to other libraries
// depending on basalt-headers. Beware that it is not possible to have different
// and incompatible definitions for these same types in other libraries or
// executables that also include basalt-headers, as this would lead to undefined
// behaviour. See
// https://groups.google.com/d/topic/cerealcpp/WswQi_Sh-bw/discussion for a more
// detailed discussion and possible workarounds.
// For binary-archives, don't save a size tag for compact representation.
template <class Archive, class _Scalar, int _Rows, int _Cols, int _Options,
int _MaxRows, int _MaxCols>
std::enable_if_t<(_Rows > 0) && (_Cols > 0) &&
!traits::is_text_archive<Archive>::value>
serialize(
Archive& archive,
Eigen::Matrix<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols>& m) {
for (int i = 0; i < _Rows; i++) {
for (int j = 0; j < _Cols; j++) {
archive(m(i, j));
}
}
// Note: if we can break binary compatibility, we might want to consider the
// following. However, this would only work for compact Scalar types that can
// be serialized / deserialized by memcopy, such as double.
// archive(binary_data(m.data(), _Rows * _Cols * sizeof(_Scalar)));
}
// For text-archives, save size-tag even for constant size matrices, to ensure
// that the serialization is more compact (e.g. for JSON with size tag is uses a
// simple array, whereas without, it stores a list of pairs like ("value0", v0).
template <class Archive, class _Scalar, int _Rows, int _Cols, int _Options,
int _MaxRows, int _MaxCols>
std::enable_if_t<(_Rows > 0) && (_Cols > 0) &&
traits::is_text_archive<Archive>::value>
serialize(
Archive& archive,
Eigen::Matrix<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols>& m) {
size_type s = static_cast<size_type>(_Rows * _Cols);
archive(make_size_tag(s));
if (s != _Rows * _Cols) {
throw std::runtime_error("matrix has incorrect length");
}
for (size_t i = 0; i < _Rows; i++) {
for (size_t j = 0; j < _Cols; j++) {
archive(m(i, j));
}
}
}
template <class Archive, class _Scalar, int _Cols, int _Options, int _MaxRows,
int _MaxCols>
std::enable_if_t<(_Cols > 0), void> save(
Archive& archive, const Eigen::Matrix<_Scalar, Eigen::Dynamic, _Cols,
_Options, _MaxRows, _MaxCols>& m) {
archive(make_size_tag(static_cast<size_type>(m.size())));
for (int i = 0; i < m.rows(); i++) {
for (int j = 0; j < _Cols; j++) {
archive(m(i, j));
}
}
}
template <class Archive, class _Scalar, int _Cols, int _Options, int _MaxRows,
int _MaxCols>
std::enable_if_t<(_Cols > 0), void> load(
Archive& archive,
Eigen::Matrix<_Scalar, Eigen::Dynamic, _Cols, _Options, _MaxRows, _MaxCols>&
m) {
size_type size;
archive(make_size_tag(size));
m.resize(Eigen::Index(size) / _Cols, _Cols);
for (int i = 0; i < m.rows(); i++) {
for (int j = 0; j < _Cols; j++) {
archive(m(i, j));
}
}
}
template <class Archive, class _Scalar, int _Rows, int _Options, int _MaxRows,
int _MaxCols>
std::enable_if_t<(_Rows > 0), void> save(
Archive& archive, const Eigen::Matrix<_Scalar, _Rows, Eigen::Dynamic,
_Options, _MaxRows, _MaxCols>& m) {
archive(make_size_tag(static_cast<size_type>(m.size())));
for (int i = 0; i < _Rows; i++) {
for (int j = 0; j < m.cols(); j++) {
archive(m(i, j));
}
}
}
template <class Archive, class _Scalar, int _Rows, int _Options, int _MaxRows,
int _MaxCols>
std::enable_if_t<(_Rows > 0), void> load(
Archive& archive,
Eigen::Matrix<_Scalar, _Rows, Eigen::Dynamic, _Options, _MaxRows, _MaxCols>&
m) {
size_type size;
archive(make_size_tag(size));
m.resize(_Rows, Eigen::Index(size) / _Rows);
for (int i = 0; i < _Rows; i++) {
for (int j = 0; j < m.cols(); j++) {
archive(m(i, j));
}
}
}
template <class Archive, class _Scalar, int _Options, int _MaxRows,
int _MaxCols>
void save(Archive& archive,
const Eigen::Matrix<_Scalar, Eigen::Dynamic, Eigen::Dynamic, _Options,
_MaxRows, _MaxCols>& m) {
archive(make_size_tag(static_cast<size_type>(m.rows())));
archive(make_size_tag(static_cast<size_type>(m.cols())));
for (int i = 0; i < m.rows(); i++) {
for (int j = 0; j < m.cols(); j++) {
archive(m(i, j));
}
}
}
template <class Archive, class _Scalar, int _Options, int _MaxRows,
int _MaxCols>
void load(Archive& archive,
Eigen::Matrix<_Scalar, Eigen::Dynamic, Eigen::Dynamic, _Options,
_MaxRows, _MaxCols>& m) {
size_type rows;
size_type cols;
archive(make_size_tag(rows));
archive(make_size_tag(cols));
m.resize(rows, cols);
for (int i = 0; i < m.rows(); i++) {
for (int j = 0; j < m.cols(); j++) {
archive(m(i, j));
}
}
}
template <class Archive>
void serialize(Archive& ar, Sophus::SE3d& p) {
ar(cereal::make_nvp("px", p.translation()[0]),
cereal::make_nvp("py", p.translation()[1]),
cereal::make_nvp("pz", p.translation()[2]),
cereal::make_nvp("qx", p.so3().data()[0]),
cereal::make_nvp("qy", p.so3().data()[1]),
cereal::make_nvp("qz", p.so3().data()[2]),
cereal::make_nvp("qw", p.so3().data()[3]));
}
template <class Archive>
void serialize(Archive& ar, Sophus::Sim3d& p) {
ar(cereal::make_nvp("px", p.translation()[0]),
cereal::make_nvp("py", p.translation()[1]),
cereal::make_nvp("pz", p.translation()[2]),
cereal::make_nvp("qx", p.rxso3().data()[0]),
cereal::make_nvp("qy", p.rxso3().data()[1]),
cereal::make_nvp("qz", p.rxso3().data()[2]),
cereal::make_nvp("qw", p.rxso3().data()[3]));
}
} // namespace cereal

View File

@@ -0,0 +1,266 @@
/**
BSD 3-Clause License
This file is part of the Basalt project.
https://gitlab.com/VladyslavUsenko/basalt-headers.git
Copyright (c) 2019, Vladyslav Usenko and Nikolaus Demmel.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
@file
@brief Serialization for basalt types
*/
#pragma once
#include <basalt/serialization/eigen_io.h>
#include <basalt/calibration/calibration.hpp>
#include <basalt/camera/bal_camera.hpp>
#include <cereal/archives/binary.hpp>
#include <cereal/archives/json.hpp>
#include <cereal/types/deque.hpp>
#include <cereal/types/memory.hpp>
#include <cereal/types/string.hpp>
#include <cereal/types/vector.hpp>
namespace cereal {
template <class Archive, class Scalar>
inline void save(Archive& ar, const basalt::GenericCamera<Scalar>& cam) {
std::visit(
[&](const auto& cam) {
ar(cereal::make_nvp("camera_type", cam.getName()));
ar(cereal::make_nvp("intrinsics", cam));
},
cam.variant);
}
template <class Archive, class Scalar>
inline void load(Archive& ar, basalt::GenericCamera<Scalar>& cam) {
std::string cam_type;
ar(cereal::make_nvp("camera_type", cam_type));
cam = basalt::GenericCamera<Scalar>::fromString(cam_type);
std::visit([&](auto& cam) { ar(cereal::make_nvp("intrinsics", cam)); },
cam.variant);
}
template <class Archive, class Scalar>
inline void save(Archive& ar, const basalt::KannalaBrandtCamera4<Scalar>& cam) {
ar(cereal::make_nvp("fx", cam.getParam()[0]),
cereal::make_nvp("fy", cam.getParam()[1]),
cereal::make_nvp("cx", cam.getParam()[2]),
cereal::make_nvp("cy", cam.getParam()[3]),
cereal::make_nvp("k1", cam.getParam()[4]),
cereal::make_nvp("k2", cam.getParam()[5]),
cereal::make_nvp("k3", cam.getParam()[6]),
cereal::make_nvp("k4", cam.getParam()[7]));
}
template <class Archive, class Scalar>
inline void load(Archive& ar, basalt::KannalaBrandtCamera4<Scalar>& cam) {
Eigen::Matrix<Scalar, 8, 1> intr;
ar(cereal::make_nvp("fx", intr[0]), cereal::make_nvp("fy", intr[1]),
cereal::make_nvp("cx", intr[2]), cereal::make_nvp("cy", intr[3]),
cereal::make_nvp("k1", intr[4]), cereal::make_nvp("k2", intr[5]),
cereal::make_nvp("k3", intr[6]), cereal::make_nvp("k4", intr[7]));
cam = basalt::KannalaBrandtCamera4<Scalar>(intr);
}
template <class Archive, class Scalar>
inline void save(Archive& ar,
const basalt::ExtendedUnifiedCamera<Scalar>& cam) {
ar(cereal::make_nvp("fx", cam.getParam()[0]),
cereal::make_nvp("fy", cam.getParam()[1]),
cereal::make_nvp("cx", cam.getParam()[2]),
cereal::make_nvp("cy", cam.getParam()[3]),
cereal::make_nvp("alpha", cam.getParam()[4]),
cereal::make_nvp("beta", cam.getParam()[5]));
}
template <class Archive, class Scalar>
inline void load(Archive& ar, basalt::ExtendedUnifiedCamera<Scalar>& cam) {
Eigen::Matrix<Scalar, 6, 1> intr;
ar(cereal::make_nvp("fx", intr[0]), cereal::make_nvp("fy", intr[1]),
cereal::make_nvp("cx", intr[2]), cereal::make_nvp("cy", intr[3]),
cereal::make_nvp("alpha", intr[4]), cereal::make_nvp("beta", intr[5]));
cam = basalt::ExtendedUnifiedCamera<Scalar>(intr);
}
template <class Archive, class Scalar>
inline void save(Archive& ar, const basalt::UnifiedCamera<Scalar>& cam) {
ar(cereal::make_nvp("fx", cam.getParam()[0]),
cereal::make_nvp("fy", cam.getParam()[1]),
cereal::make_nvp("cx", cam.getParam()[2]),
cereal::make_nvp("cy", cam.getParam()[3]),
cereal::make_nvp("alpha", cam.getParam()[4]));
}
template <class Archive, class Scalar>
inline void load(Archive& ar, basalt::UnifiedCamera<Scalar>& cam) {
Eigen::Matrix<Scalar, 5, 1> intr;
ar(cereal::make_nvp("fx", intr[0]), cereal::make_nvp("fy", intr[1]),
cereal::make_nvp("cx", intr[2]), cereal::make_nvp("cy", intr[3]),
cereal::make_nvp("alpha", intr[4]));
cam = basalt::UnifiedCamera<Scalar>(intr);
}
template <class Archive, class Scalar>
inline void save(Archive& ar, const basalt::PinholeCamera<Scalar>& cam) {
ar(cereal::make_nvp("fx", cam.getParam()[0]),
cereal::make_nvp("fy", cam.getParam()[1]),
cereal::make_nvp("cx", cam.getParam()[2]),
cereal::make_nvp("cy", cam.getParam()[3]));
}
template <class Archive, class Scalar>
inline void load(Archive& ar, basalt::PinholeCamera<Scalar>& cam) {
Eigen::Matrix<Scalar, 4, 1> intr;
ar(cereal::make_nvp("fx", intr[0]), cereal::make_nvp("fy", intr[1]),
cereal::make_nvp("cx", intr[2]), cereal::make_nvp("cy", intr[3]));
cam = basalt::PinholeCamera<Scalar>(intr);
}
template <class Archive, class Scalar>
inline void save(Archive& ar, const basalt::DoubleSphereCamera<Scalar>& cam) {
ar(cereal::make_nvp("fx", cam.getParam()[0]),
cereal::make_nvp("fy", cam.getParam()[1]),
cereal::make_nvp("cx", cam.getParam()[2]),
cereal::make_nvp("cy", cam.getParam()[3]),
cereal::make_nvp("xi", cam.getParam()[4]),
cereal::make_nvp("alpha", cam.getParam()[5]));
}
template <class Archive, class Scalar>
inline void load(Archive& ar, basalt::DoubleSphereCamera<Scalar>& cam) {
Eigen::Matrix<Scalar, 6, 1> intr;
ar(cereal::make_nvp("fx", intr[0]), cereal::make_nvp("fy", intr[1]),
cereal::make_nvp("cx", intr[2]), cereal::make_nvp("cy", intr[3]),
cereal::make_nvp("xi", intr[4]), cereal::make_nvp("alpha", intr[5]));
cam = basalt::DoubleSphereCamera<Scalar>(intr);
}
template <class Archive, class Scalar>
inline void save(Archive& ar, const basalt::FovCamera<Scalar>& cam) {
ar(cereal::make_nvp("fx", cam.getParam()[0]),
cereal::make_nvp("fy", cam.getParam()[1]),
cereal::make_nvp("cx", cam.getParam()[2]),
cereal::make_nvp("cy", cam.getParam()[3]),
cereal::make_nvp("w", cam.getParam()[4]));
}
template <class Archive, class Scalar>
inline void load(Archive& ar, basalt::FovCamera<Scalar>& cam) {
Eigen::Matrix<Scalar, 5, 1> intr;
ar(cereal::make_nvp("fx", intr[0]), cereal::make_nvp("fy", intr[1]),
cereal::make_nvp("cx", intr[2]), cereal::make_nvp("cy", intr[3]),
cereal::make_nvp("w", intr[4]));
cam = basalt::FovCamera<Scalar>(intr);
}
template <class Archive, class Scalar>
inline void save(Archive& ar, const basalt::BalCamera<Scalar>& cam) {
ar(cereal::make_nvp("f", cam.getParam()[0]),
cereal::make_nvp("k1", cam.getParam()[1]),
cereal::make_nvp("k2", cam.getParam()[2]));
}
template <class Archive, class Scalar>
inline void load(Archive& ar, basalt::BalCamera<Scalar>& cam) {
Eigen::Matrix<Scalar, 3, 1> intr;
ar(cereal::make_nvp("f", intr[0]), cereal::make_nvp("k1", intr[1]),
cereal::make_nvp("k2", intr[2]));
cam = basalt::BalCamera<Scalar>(intr);
}
template <class Archive, class Scalar, int DIM, int ORDER>
inline void save(Archive& ar,
const basalt::RdSpline<DIM, ORDER, Scalar>& spline) {
ar(spline.minTimeNs());
ar(spline.getTimeIntervalNs());
ar(spline.getKnots());
}
template <class Archive, class Scalar, int DIM, int ORDER>
inline void load(Archive& ar, basalt::RdSpline<DIM, ORDER, Scalar>& spline) {
int64_t start_t_ns;
int64_t dt_ns;
Eigen::aligned_deque<Eigen::Matrix<Scalar, DIM, 1>> knots;
ar(start_t_ns);
ar(dt_ns);
ar(knots);
basalt::RdSpline<DIM, ORDER, Scalar> new_spline(dt_ns, start_t_ns);
for (const auto& k : knots) {
new_spline.knotsPushBack(k);
}
spline = new_spline;
}
template <class Archive, class Scalar>
inline void serialize(Archive& ar, basalt::Calibration<Scalar>& cam) {
ar(cereal::make_nvp("T_imu_cam", cam.T_i_c),
cereal::make_nvp("intrinsics", cam.intrinsics),
cereal::make_nvp("resolution", cam.resolution),
cereal::make_nvp("calib_accel_bias", cam.calib_accel_bias.getParam()),
cereal::make_nvp("calib_gyro_bias", cam.calib_gyro_bias.getParam()),
cereal::make_nvp("imu_update_rate", cam.imu_update_rate),
cereal::make_nvp("accel_noise_std", cam.accel_noise_std),
cereal::make_nvp("gyro_noise_std", cam.gyro_noise_std),
cereal::make_nvp("accel_bias_std", cam.accel_bias_std),
cereal::make_nvp("gyro_bias_std", cam.gyro_bias_std),
cereal::make_nvp("cam_time_offset_ns", cam.cam_time_offset_ns),
cereal::make_nvp("vignette", cam.vignette));
}
template <class Archive, class Scalar>
inline void serialize(Archive& ar, basalt::MocapCalibration<Scalar>& cam) {
ar(cereal::make_nvp("T_mocap_world", cam.T_moc_w),
cereal::make_nvp("T_imu_marker", cam.T_i_mark),
cereal::make_nvp("mocap_time_offset_ns", cam.mocap_time_offset_ns),
cereal::make_nvp("mocap_to_imu_offset_ns", cam.mocap_to_imu_offset_ns));
}
} // namespace cereal

View File

@@ -0,0 +1,115 @@
/**
BSD 3-Clause License
This file is part of the Basalt project.
https://gitlab.com/VladyslavUsenko/basalt-headers.git
Copyright (c) 2019, Vladyslav Usenko and Nikolaus Demmel.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
@file
@brief Generic local parametrization for Sophus Lie group types to be used with
ceres.
*/
/**
File adapted from Sophus
Copyright 2011-2017 Hauke Strasdat
2012-2017 Steven Lovegrove
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to
deal in the Software without restriction, including without limitation the
rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
sell copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
IN THE SOFTWARE.
*/
#pragma once
#include <ceres/local_parameterization.h>
#include <sophus/se3.hpp>
namespace basalt {
/// @brief Local parametrization for ceres that can be used with Sophus Lie
/// group implementations.
template <class Groupd>
class LieLocalParameterization : public ceres::LocalParameterization {
public:
virtual ~LieLocalParameterization() {}
using Tangentd = typename Groupd::Tangent;
/// @brief plus operation for Ceres
///
/// T * exp(x)
///
virtual bool Plus(double const* T_raw, double const* delta_raw,
double* T_plus_delta_raw) const {
Eigen::Map<Groupd const> const T(T_raw);
Eigen::Map<Tangentd const> const delta(delta_raw);
Eigen::Map<Groupd> T_plus_delta(T_plus_delta_raw);
T_plus_delta = T * Groupd::exp(delta);
return true;
}
///@brief Jacobian of plus operation for Ceres
///
/// Dx T * exp(x) with x=0
///
virtual bool ComputeJacobian(double const* T_raw,
double* jacobian_raw) const {
Eigen::Map<Groupd const> T(T_raw);
Eigen::Map<Eigen::Matrix<double, Groupd::num_parameters, Groupd::DoF,
Eigen::RowMajor>>
jacobian(jacobian_raw);
jacobian = T.Dx_this_mul_exp_x_at_0();
return true;
}
///@brief Global size
virtual int GlobalSize() const { return Groupd::num_parameters; }
///@brief Local size
virtual int LocalSize() const { return Groupd::DoF; }
};
} // namespace basalt

View File

@@ -0,0 +1,236 @@
/**
BSD 3-Clause License
This file is part of the Basalt project.
https://gitlab.com/VladyslavUsenko/basalt-headers.git
Copyright (c) 2019, Vladyslav Usenko and Nikolaus Demmel.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
@file
@brief Helper for implementing Lie group and Euclidean b-splines in ceres.
*/
#pragma once
#include <basalt/spline/spline_common.h>
#include <Eigen/Dense>
namespace basalt {
/// @brief Helper for implementing Lie group and Euclidean b-splines in ceres of
/// order N
///
/// See [[arXiv:1911.08860]](https://arxiv.org/abs/1911.08860) for more details.
template <int _N>
struct CeresSplineHelper {
static constexpr int N = _N; // Order of the spline.
static constexpr int DEG = _N - 1; // Degree of the spline.
using MatN = Eigen::Matrix<double, _N, _N>;
using VecN = Eigen::Matrix<double, _N, 1>;
static const MatN blending_matrix_;
static const MatN cumulative_blending_matrix_;
static const MatN base_coefficients_;
/// @brief Vector of derivatives of time polynomial.
///
/// Computes a derivative of \f$ \begin{bmatrix}1 & t & t^2 & \dots &
/// t^{N-1}\end{bmatrix} \f$ with repect to time. For example, the first
/// derivative would be \f$ \begin{bmatrix}0 & 1 & 2 t & \dots & (N-1)
/// t^{N-2}\end{bmatrix} \f$.
/// @param Derivative derivative to evaluate
/// @param[out] res_const vector to store the result
/// @param[in] t
template <int Derivative, class Derived>
static inline void baseCoeffsWithTime(
const Eigen::MatrixBase<Derived>& res_const, double t) {
EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(Derived, N);
Eigen::MatrixBase<Derived>& res =
const_cast<Eigen::MatrixBase<Derived>&>(res_const);
res.setZero();
if (Derivative < N) {
res[Derivative] = base_coefficients_(Derivative, Derivative);
double _t = t;
for (int j = Derivative + 1; j < N; j++) {
res[j] = base_coefficients_(Derivative, j) * _t;
_t = _t * t;
}
}
}
/// @brief Evaluate Lie group cummulative B-spline and time derivatives.
///
/// @param[in] sKnots array of pointers of the spline knots. The size of each
/// knot should be GroupT::num_parameters: 4 for SO(3) and 7 for SE(3).
/// @param[in] u normalized time to compute value of the spline
/// @param[in] inv_dt inverse of the time spacing in seconds between spline
/// knots
/// @param[out] transform_out if not nullptr return the value of the spline
/// @param[out] vel_out if not nullptr velocity (first time derivative) in the
/// body frame
/// @param[out] accel_out if not nullptr acceleration (second time derivative)
/// in the body frame
template <class T, template <class> class GroupT>
static inline void evaluate_lie(
T const* const* sKnots, const double u, const double inv_dt,
GroupT<T>* transform_out = nullptr,
typename GroupT<T>::Tangent* vel_out = nullptr,
typename GroupT<T>::Tangent* accel_out = nullptr,
typename GroupT<T>::Tangent* jerk_out = nullptr) {
using Group = GroupT<T>;
using Tangent = typename GroupT<T>::Tangent;
using Adjoint = typename GroupT<T>::Adjoint;
VecN p, coeff, dcoeff, ddcoeff, dddcoeff;
CeresSplineHelper<N>::template baseCoeffsWithTime<0>(p, u);
coeff = CeresSplineHelper<N>::cumulative_blending_matrix_ * p;
if (vel_out || accel_out || jerk_out) {
CeresSplineHelper<N>::template baseCoeffsWithTime<1>(p, u);
dcoeff = inv_dt * CeresSplineHelper<N>::cumulative_blending_matrix_ * p;
if (accel_out || jerk_out) {
CeresSplineHelper<N>::template baseCoeffsWithTime<2>(p, u);
ddcoeff = inv_dt * inv_dt *
CeresSplineHelper<N>::cumulative_blending_matrix_ * p;
if (jerk_out) {
CeresSplineHelper<N>::template baseCoeffsWithTime<3>(p, u);
dddcoeff = inv_dt * inv_dt * inv_dt *
CeresSplineHelper<N>::cumulative_blending_matrix_ * p;
}
}
}
if (transform_out) {
Eigen::Map<Group const> const p00(sKnots[0]);
*transform_out = p00;
}
Tangent rot_vel, rot_accel, rot_jerk;
if (vel_out || accel_out || jerk_out) rot_vel.setZero();
if (accel_out || jerk_out) rot_accel.setZero();
if (jerk_out) rot_jerk.setZero();
for (int i = 0; i < DEG; i++) {
Eigen::Map<Group const> const p0(sKnots[i]);
Eigen::Map<Group const> const p1(sKnots[i + 1]);
Group r01 = p0.inverse() * p1;
Tangent delta = r01.log();
Group exp_kdelta = Group::exp(delta * coeff[i + 1]);
if (transform_out) (*transform_out) *= exp_kdelta;
if (vel_out || accel_out || jerk_out) {
Adjoint A = exp_kdelta.inverse().Adj();
rot_vel = A * rot_vel;
Tangent rot_vel_current = delta * dcoeff[i + 1];
rot_vel += rot_vel_current;
if (accel_out || jerk_out) {
rot_accel = A * rot_accel;
Tangent accel_lie_bracket =
Group::lieBracket(rot_vel, rot_vel_current);
rot_accel += ddcoeff[i + 1] * delta + accel_lie_bracket;
if (jerk_out) {
rot_jerk = A * rot_jerk;
rot_jerk += dddcoeff[i + 1] * delta +
Group::lieBracket(ddcoeff[i + 1] * rot_vel +
2 * dcoeff[i + 1] * rot_accel -
dcoeff[i + 1] * accel_lie_bracket,
delta);
}
}
}
}
if (vel_out) *vel_out = rot_vel;
if (accel_out) *accel_out = rot_accel;
if (jerk_out) *jerk_out = rot_jerk;
}
/// @brief Evaluate Euclidean B-spline or time derivatives.
///
/// @param[in] sKnots array of pointers of the spline knots. The size of each
/// knot should be DIM.
/// @param[in] u normalized time to compute value of the spline
/// @param[in] inv_dt inverse of the time spacing in seconds between spline
/// knots
/// @param[out] vec_out if DERIV=0 returns value of the spline, otherwise
/// corresponding derivative.
template <class T, int DIM, int DERIV>
static inline void evaluate(T const* const* sKnots, const double u,
const double inv_dt,
Eigen::Matrix<T, DIM, 1>* vec_out) {
if (!vec_out) return;
using VecD = Eigen::Matrix<T, DIM, 1>;
VecN p, coeff;
CeresSplineHelper<N>::template baseCoeffsWithTime<DERIV>(p, u);
coeff =
std::pow(inv_dt, DERIV) * CeresSplineHelper<N>::blending_matrix_ * p;
vec_out->setZero();
for (int i = 0; i < N; i++) {
Eigen::Map<VecD const> const p(sKnots[i]);
(*vec_out) += coeff[i] * p;
}
}
};
template <int _N>
const typename CeresSplineHelper<_N>::MatN
CeresSplineHelper<_N>::base_coefficients_ =
basalt::computeBaseCoefficients<_N, double>();
template <int _N>
const typename CeresSplineHelper<_N>::MatN
CeresSplineHelper<_N>::blending_matrix_ =
basalt::computeBlendingMatrix<_N, double, false>();
template <int _N>
const typename CeresSplineHelper<_N>::MatN
CeresSplineHelper<_N>::cumulative_blending_matrix_ =
basalt::computeBlendingMatrix<_N, double, true>();
} // namespace basalt

View File

@@ -0,0 +1,363 @@
/**
BSD 3-Clause License
This file is part of the Basalt project.
https://gitlab.com/VladyslavUsenko/basalt-headers.git
Copyright (c) 2019, Vladyslav Usenko and Nikolaus Demmel.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
@file
@brief Uniform B-spline for euclidean vectors
*/
#pragma once
#include <basalt/spline/spline_common.h>
#include <basalt/utils/assert.h>
#include <basalt/utils/sophus_utils.hpp>
#include <Eigen/Dense>
#include <array>
namespace basalt {
/// @brief Uniform B-spline for euclidean vectors with dimention DIM of order
/// N
///
/// For example, in the particular case scalar values and order N=5, for a time
/// \f$t \in [t_i, t_{i+1})\f$ the value of \f$p(t)\f$ depends only on 5 control
/// points at \f$[t_i, t_{i+1}, t_{i+2}, t_{i+3}, t_{i+4}]\f$. To
/// simplify calculations we transform time to uniform representation \f$s(t) =
/// (t - t_0)/\Delta t \f$, such that control points transform into \f$ s_i \in
/// [0,..,N] \f$. We define function \f$ u(t) = s(t)-s_i \f$ to be a time since
/// the start of the segment. Following the matrix representation of De Boor -
/// Cox formula, the value of the function can be
/// evaluated as follows: \f{align}{
/// p(u(t)) &=
/// \begin{pmatrix} p_{i}\\ p_{i+1}\\ p_{i+2}\\ p_{i+3}\\ p_{i+4}
/// \end{pmatrix}^T M_5 \begin{pmatrix} 1 \\ u \\ u^2 \\ u^3 \\ u^4
/// \end{pmatrix},
/// \f}
/// where \f$ p_{i} \f$ are knots and \f$ M_5 \f$ is a blending matrix computed
/// using \ref computeBlendingMatrix \f{align}{
/// M_5 = \frac{1}{4!}
/// \begin{pmatrix} 1 & -4 & 6 & -4 & 1 \\ 11 & -12 & -6 & 12 & -4 \\11 &
/// 12 & -6 & -12 & 6 \\ 1 & 4 & 6 & 4 & -4 \\ 0 & 0 & 0 & 0
/// & 1 \end{pmatrix}.
/// \f}
/// Given this formula, we can evaluate derivatives with respect to time
/// (velocity, acceleration) in the following way:
/// \f{align}{
/// p'(u(t)) &= \frac{1}{\Delta t}
/// \begin{pmatrix} p_{i}\\ p_{i+1}\\ p_{i+2}\\ p_{i+3}\\ p_{i+4}
/// \end{pmatrix}^T
/// M_5
/// \begin{pmatrix} 0 \\ 1 \\ 2u \\ 3u^2 \\ 4u^3 \end{pmatrix},
/// \f}
/// \f{align}{
/// p''(u(t)) &= \frac{1}{\Delta t^2}
/// \begin{pmatrix} p_{i}\\ p_{i+1}\\ p_{i+2}\\ p_{i+3}\\ p_{i+4}
/// \end{pmatrix}^T
/// M_5
/// \begin{pmatrix} 0 \\ 0 \\ 2 \\ 6u \\ 12u^2 \end{pmatrix}.
/// \f}
/// Higher time derivatives are evaluated similarly. This class supports
/// vector values for knots \f$ p_{i} \f$. The corresponding derivative vector
/// on the right is computed using \ref baseCoeffsWithTime.
///
/// See [[arXiv:1911.08860]](https://arxiv.org/abs/1911.08860) for more details.
template <int _DIM, int _N, typename _Scalar = double>
class RdSpline {
public:
static constexpr int N = _N; ///< Order of the spline.
static constexpr int DEG = _N - 1; ///< Degree of the spline.
static constexpr int DIM = _DIM; ///< Dimension of euclidean vector space.
static constexpr _Scalar NS_TO_S = 1e-9; ///< Nanosecond to second conversion
static constexpr _Scalar S_TO_NS = 1e9; ///< Second to nanosecond conversion
using MatN = Eigen::Matrix<_Scalar, _N, _N>;
using VecN = Eigen::Matrix<_Scalar, _N, 1>;
using VecD = Eigen::Matrix<_Scalar, _DIM, 1>;
using MatD = Eigen::Matrix<_Scalar, _DIM, _DIM>;
/// @brief Struct to store the Jacobian of the spline
///
/// Since B-spline of order N has local support (only N knots infuence the
/// value) the Jacobian is zero for all knots except maximum N for value and
/// all derivatives.
struct JacobianStruct {
size_t
start_idx; ///< Start index of the non-zero elements of the Jacobian.
std::array<_Scalar, N> d_val_d_knot; ///< Value of nonzero Jacobians.
};
/// @brief Default constructor
RdSpline() = default;
/// @brief Constructor with knot interval and start time
///
/// @param[in] time_interval_ns knot time interval in nanoseconds
/// @param[in] start_time_ns start time of the spline in nanoseconds
RdSpline(int64_t time_interval_ns, int64_t start_time_ns = 0)
: dt_ns_(time_interval_ns), start_t_ns_(start_time_ns) {
pow_inv_dt_[0] = 1.0;
pow_inv_dt_[1] = S_TO_NS / dt_ns_;
for (size_t i = 2; i < N; i++) {
pow_inv_dt_[i] = pow_inv_dt_[i - 1] * pow_inv_dt_[1];
}
}
/// @brief Cast to different scalar type
template <typename Scalar2>
inline RdSpline<_DIM, _N, Scalar2> cast() const {
RdSpline<_DIM, _N, Scalar2> res;
res.dt_ns_ = dt_ns_;
res.start_t_ns_ = start_t_ns_;
for (int i = 0; i < _N; i++) {
res.pow_inv_dt_[i] = pow_inv_dt_[i];
}
for (const auto& k : knots_) {
res.knots_.emplace_back(k.template cast<Scalar2>());
}
return res;
}
/// @brief Set start time for spline
///
/// @param[in] start_time_ns start time of the spline in nanoseconds
inline void setStartTimeNs(int64_t start_time_ns) {
start_t_ns_ = start_time_ns;
}
/// @brief Maximum time represented by spline
///
/// @return maximum time represented by spline in nanoseconds
int64_t maxTimeNs() const {
return start_t_ns_ + (knots_.size() - N + 1) * dt_ns_ - 1;
}
/// @brief Minimum time represented by spline
///
/// @return minimum time represented by spline in nanoseconds
int64_t minTimeNs() const { return start_t_ns_; }
/// @brief Gererate random trajectory
///
/// @param[in] n number of knots to generate
/// @param[in] static_init if true the first N knots will be the same
/// resulting in static initial condition
void genRandomTrajectory(int n, bool static_init = false) {
if (static_init) {
VecD rnd = VecD::Random() * 5;
for (int i = 0; i < N; i++) {
knots_.push_back(rnd);
}
for (int i = 0; i < n - N; i++) {
knots_.push_back(VecD::Random() * 5);
}
} else {
for (int i = 0; i < n; i++) {
knots_.push_back(VecD::Random() * 5);
}
}
}
/// @brief Add knot to the end of the spline
///
/// @param[in] knot knot to add
inline void knotsPushBack(const VecD& knot) { knots_.push_back(knot); }
/// @brief Remove knot from the back of the spline
inline void knotsPopBack() { knots_.pop_back(); }
/// @brief Return the first knot of the spline
///
/// @return first knot of the spline
inline const VecD& knotsFront() const { return knots_.front(); }
/// @brief Remove first knot of the spline and increase the start time
inline void knotsPopFront() {
start_t_ns_ += dt_ns_;
knots_.pop_front();
}
/// @brief Resize containter with knots
///
/// @param[in] n number of knots
inline void resize(size_t n) { knots_.resize(n); }
/// @brief Return reference to the knot with index i
///
/// @param i index of the knot
/// @return reference to the knot
inline VecD& getKnot(int i) { return knots_[i]; }
/// @brief Return const reference to the knot with index i
///
/// @param i index of the knot
/// @return const reference to the knot
inline const VecD& getKnot(int i) const { return knots_[i]; }
/// @brief Return const reference to deque with knots
///
/// @return const reference to deque with knots
const Eigen::aligned_deque<VecD>& getKnots() const { return knots_; }
/// @brief Return time interval in nanoseconds
///
/// @return time interval in nanoseconds
int64_t getTimeIntervalNs() const { return dt_ns_; }
/// @brief Evaluate value or derivative of the spline
///
/// @param Derivative derivative to evaluate (0 for value)
/// @param[in] time_ns time for evaluating of the spline in nanoseconds
/// @param[out] J if not nullptr, return the Jacobian of the value with
/// respect to knots
/// @return value of the spline or derivative. Euclidean vector of dimention
/// DIM.
template <int Derivative = 0>
VecD evaluate(int64_t time_ns, JacobianStruct* J = nullptr) const {
int64_t st_ns = (time_ns - start_t_ns_);
BASALT_ASSERT_STREAM(st_ns >= 0, "st_ns " << st_ns << " time_ns " << time_ns
<< " start_t_ns " << start_t_ns_);
int64_t s = st_ns / dt_ns_;
double u = double(st_ns % dt_ns_) / double(dt_ns_);
BASALT_ASSERT_STREAM(s >= 0, "s " << s);
BASALT_ASSERT_STREAM(
size_t(s + N) <= knots_.size(),
"s " << s << " N " << N << " knots.size() " << knots_.size());
VecN p;
baseCoeffsWithTime<Derivative>(p, u);
VecN coeff = pow_inv_dt_[Derivative] * (BLENDING_MATRIX * p);
// std::cerr << "p " << p.transpose() << std::endl;
// std::cerr << "coeff " << coeff.transpose() << std::endl;
VecD res;
res.setZero();
for (int i = 0; i < N; i++) {
res += coeff[i] * knots_[s + i];
if (J) {
J->d_val_d_knot[i] = coeff[i];
}
}
if (J) {
J->start_idx = s;
}
return res;
}
/// @brief Alias for first derivative of spline. See \ref evaluate.
inline VecD velocity(int64_t time_ns, JacobianStruct* J = nullptr) const {
return evaluate<1>(time_ns, J);
}
/// @brief Alias for second derivative of spline. See \ref evaluate.
inline VecD acceleration(int64_t time_ns, JacobianStruct* J = nullptr) const {
return evaluate<2>(time_ns, J);
}
EIGEN_MAKE_ALIGNED_OPERATOR_NEW
protected:
/// @brief Vector of derivatives of time polynomial.
///
/// Computes a derivative of \f$ \begin{bmatrix}1 & t & t^2 & \dots &
/// t^{N-1}\end{bmatrix} \f$ with repect to time. For example, the first
/// derivative would be \f$ \begin{bmatrix}0 & 1 & 2 t & \dots & (N-1)
/// t^{N-2}\end{bmatrix} \f$.
/// @param Derivative derivative to evaluate
/// @param[out] res_const vector to store the result
/// @param[in] t
template <int Derivative, class Derived>
static void baseCoeffsWithTime(const Eigen::MatrixBase<Derived>& res_const,
_Scalar t) {
EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(Derived, N);
Eigen::MatrixBase<Derived>& res =
const_cast<Eigen::MatrixBase<Derived>&>(res_const);
res.setZero();
if (Derivative < N) {
res[Derivative] = BASE_COEFFICIENTS(Derivative, Derivative);
_Scalar ti = t;
for (int j = Derivative + 1; j < N; j++) {
res[j] = BASE_COEFFICIENTS(Derivative, j) * ti;
ti = ti * t;
}
}
}
template <int, int, typename>
friend class RdSpline;
static const MatN
BLENDING_MATRIX; ///< Blending matrix. See \ref computeBlendingMatrix.
static const MatN BASE_COEFFICIENTS; ///< Base coefficients matrix.
///< See \ref computeBaseCoefficients.
Eigen::aligned_deque<VecD> knots_; ///< Knots
int64_t dt_ns_{0}; ///< Knot interval in nanoseconds
int64_t start_t_ns_{0}; ///< Start time in nanoseconds
std::array<_Scalar, _N> pow_inv_dt_; ///< Array with inverse powers of dt
};
template <int _DIM, int _N, typename _Scalar>
const typename RdSpline<_DIM, _N, _Scalar>::MatN
RdSpline<_DIM, _N, _Scalar>::BASE_COEFFICIENTS =
computeBaseCoefficients<_N, _Scalar>();
template <int _DIM, int _N, typename _Scalar>
const typename RdSpline<_DIM, _N, _Scalar>::MatN
RdSpline<_DIM, _N, _Scalar>::BLENDING_MATRIX =
computeBlendingMatrix<_N, _Scalar, false>();
} // namespace basalt

View File

@@ -0,0 +1,552 @@
/**
BSD 3-Clause License
This file is part of the Basalt project.
https://gitlab.com/VladyslavUsenko/basalt-headers.git
Copyright (c) 2019, Vladyslav Usenko and Nikolaus Demmel.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
@file
@brief Uniform B-spline for SE(3)
*/
#pragma once
#include <basalt/spline/rd_spline.h>
#include <basalt/spline/so3_spline.h>
#include <basalt/utils/assert.h>
#include <basalt/calibration/calib_bias.hpp>
#include <array>
namespace basalt {
/// @brief Uniform B-spline for SE(3) of order N. Internally uses an SO(3) (\ref
/// So3Spline) spline for rotation and 3D Euclidean spline (\ref RdSpline) for
/// translation (split representaion).
///
/// See [[arXiv:1911.08860]](https://arxiv.org/abs/1911.08860) for more details.
template <int _N, typename _Scalar = double>
class Se3Spline {
public:
static constexpr int N = _N; ///< Order of the spline.
static constexpr int DEG = _N - 1; ///< Degree of the spline.
using MatN = Eigen::Matrix<_Scalar, _N, _N>;
using VecN = Eigen::Matrix<_Scalar, _N, 1>;
using VecNp1 = Eigen::Matrix<_Scalar, _N + 1, 1>;
using Vec3 = Eigen::Matrix<_Scalar, 3, 1>;
using Vec6 = Eigen::Matrix<_Scalar, 6, 1>;
using Vec9 = Eigen::Matrix<_Scalar, 9, 1>;
using Vec12 = Eigen::Matrix<_Scalar, 12, 1>;
using Mat3 = Eigen::Matrix<_Scalar, 3, 3>;
using Mat6 = Eigen::Matrix<_Scalar, 6, 6>;
using Mat36 = Eigen::Matrix<_Scalar, 3, 6>;
using Mat39 = Eigen::Matrix<_Scalar, 3, 9>;
using Mat312 = Eigen::Matrix<_Scalar, 3, 12>;
using Matrix3Array = std::array<Mat3, N>;
using Matrix36Array = std::array<Mat36, N>;
using Matrix6Array = std::array<Mat6, N>;
using SO3 = Sophus::SO3<_Scalar>;
using SE3 = Sophus::SE3<_Scalar>;
using PosJacobianStruct = typename RdSpline<3, N, _Scalar>::JacobianStruct;
using SO3JacobianStruct = typename So3Spline<N, _Scalar>::JacobianStruct;
/// @brief Struct to store the accelerometer residual Jacobian with
/// respect to knots
struct AccelPosSO3JacobianStruct {
size_t start_idx;
std::array<Mat36, N> d_val_d_knot;
};
/// @brief Struct to store the pose Jacobian with respect to knots
struct PosePosSO3JacobianStruct {
size_t start_idx;
std::array<Mat6, N> d_val_d_knot;
};
/// @brief Constructor with knot interval and start time
///
/// @param[in] time_interval_ns knot time interval in nanoseconds
/// @param[in] start_time_ns start time of the spline in nanoseconds
Se3Spline(int64_t time_interval_ns, int64_t start_time_ns = 0)
: pos_spline_(time_interval_ns, start_time_ns),
so3_spline_(time_interval_ns, start_time_ns),
dt_ns_(time_interval_ns) {}
/// @brief Gererate random trajectory
///
/// @param[in] n number of knots to generate
/// @param[in] static_init if true the first N knots will be the same
/// resulting in static initial condition
void genRandomTrajectory(int n, bool static_init = false) {
so3_spline_.genRandomTrajectory(n, static_init);
pos_spline_.genRandomTrajectory(n, static_init);
}
/// @brief Set the knot to particular SE(3) pose
///
/// @param[in] pose SE(3) pose
/// @param[in] i index of the knot
void setKnot(const Sophus::SE3d &pose, int i) {
so3_spline_.getKnot(i) = pose.so3();
pos_spline_.getKnot(i) = pose.translation();
}
/// @brief Reset spline to have num_knots initialized at pose
///
/// @param[in] pose SE(3) pose
/// @param[in] num_knots number of knots to initialize
void setKnots(const Sophus::SE3d &pose, int num_knots) {
so3_spline_.resize(num_knots);
pos_spline_.resize(num_knots);
for (int i = 0; i < num_knots; i++) {
so3_spline_.getKnot(i) = pose.so3();
pos_spline_.getKnot(i) = pose.translation();
}
}
/// @brief Reset spline to the knots from other spline
///
/// @param[in] other spline to copy knots from
void setKnots(const Se3Spline<N, _Scalar> &other) {
BASALT_ASSERT(other.dt_ns_ == dt_ns_);
BASALT_ASSERT(other.pos_spline_.getKnots().size() ==
other.pos_spline_.getKnots().size());
size_t num_knots = other.pos_spline_.getKnots().size();
so3_spline_.resize(num_knots);
pos_spline_.resize(num_knots);
for (size_t i = 0; i < num_knots; i++) {
so3_spline_.getKnot(i) = other.so3_spline_.getKnot(i);
pos_spline_.getKnot(i) = other.pos_spline_.getKnot(i);
}
}
/// @brief Add knot to the end of the spline
///
/// @param[in] knot knot to add
inline void knotsPushBack(const SE3 &knot) {
so3_spline_.knotsPushBack(knot.so3());
pos_spline_.knotsPushBack(knot.translation());
}
/// @brief Remove knot from the back of the spline
inline void knotsPopBack() {
so3_spline_.knotsPopBack();
pos_spline_.knotsPopBack();
}
/// @brief Return the first knot of the spline
///
/// @return first knot of the spline
inline SE3 knotsFront() const {
SE3 res(so3_spline_.knots_front(), pos_spline_.knots_front());
return res;
}
/// @brief Remove first knot of the spline and increase the start time
inline void knotsPopFront() {
so3_spline_.knots_pop_front();
pos_spline_.knots_pop_front();
BASALT_ASSERT(so3_spline_.minTimeNs() == pos_spline_.minTimeNs());
BASALT_ASSERT(so3_spline_.getKnots().size() ==
pos_spline_.getKnots().size());
}
/// @brief Return the last knot of the spline
///
/// @return last knot of the spline
SE3 getLastKnot() {
BASALT_ASSERT(so3_spline_.getKnots().size() ==
pos_spline_.getKnots().size());
SE3 res(so3_spline_.getKnots().back(), pos_spline_.getKnots().back());
return res;
}
/// @brief Return knot with index i
///
/// @param i index of the knot
/// @return knot
SE3 getKnot(size_t i) const {
SE3 res(getKnotSO3(i), getKnotPos(i));
return res;
}
/// @brief Return reference to the SO(3) knot with index i
///
/// @param i index of the knot
/// @return reference to the SO(3) knot
inline SO3 &getKnotSO3(size_t i) { return so3_spline_.getKnot(i); }
/// @brief Return const reference to the SO(3) knot with index i
///
/// @param i index of the knot
/// @return const reference to the SO(3) knot
inline const SO3 &getKnotSO3(size_t i) const {
return so3_spline_.getKnot(i);
}
/// @brief Return reference to the position knot with index i
///
/// @param i index of the knot
/// @return reference to the position knot
inline Vec3 &getKnotPos(size_t i) { return pos_spline_.getKnot(i); }
/// @brief Return const reference to the position knot with index i
///
/// @param i index of the knot
/// @return const reference to the position knot
inline const Vec3 &getKnotPos(size_t i) const {
return pos_spline_.getKnot(i);
}
/// @brief Set start time for spline
///
/// @param[in] start_time_ns start time of the spline in nanoseconds
inline void setStartTimeNs(int64_t s) {
so3_spline_.setStartTimeNs(s);
pos_spline_.setStartTimeNs(s);
}
/// @brief Apply increment to the knot
///
/// The incremernt vector consists of translational and rotational parts \f$
/// [\upsilon, \omega]^T \f$. Given the current pose of the knot \f$ R \in
/// SO(3), p \in \mathbb{R}^3\f$ the updated pose is: \f{align}{ R' &=
/// \exp(\omega) R
/// \\ p' &= p + \upsilon
/// \f}
/// The increment is consistent with \ref
/// PoseState::applyInc.
///
/// @param[in] i index of the knot
/// @param[in] inc 6x1 increment vector
template <typename Derived>
void applyInc(int i, const Eigen::MatrixBase<Derived> &inc) {
EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(Derived, 6);
pos_spline_.getKnot(i) += inc.template head<3>();
so3_spline_.getKnot(i) =
SO3::exp(inc.template tail<3>()) * so3_spline_.getKnot(i);
}
/// @brief Maximum time represented by spline
///
/// @return maximum time represented by spline in nanoseconds
int64_t maxTimeNs() const {
BASALT_ASSERT_STREAM(so3_spline_.maxTimeNs() == pos_spline_.maxTimeNs(),
"so3_spline.maxTimeNs() " << so3_spline_.maxTimeNs()
<< " pos_spline.maxTimeNs() "
<< pos_spline_.maxTimeNs());
return pos_spline_.maxTimeNs();
}
/// @brief Minimum time represented by spline
///
/// @return minimum time represented by spline in nanoseconds
int64_t minTimeNs() const {
BASALT_ASSERT_STREAM(so3_spline_.minTimeNs() == pos_spline_.minTimeNs(),
"so3_spline.minTimeNs() " << so3_spline_.minTimeNs()
<< " pos_spline.minTimeNs() "
<< pos_spline_.minTimeNs());
return pos_spline_.minTimeNs();
}
/// @brief Number of knots in the spline
size_t numKnots() const { return pos_spline_.getKnots().size(); }
/// @brief Linear acceleration in the world frame.
///
/// @param[in] time_ns time to evaluate linear acceleration in nanoseconds
inline Vec3 transAccelWorld(int64_t time_ns) const {
return pos_spline_.acceleration(time_ns);
}
/// @brief Linear velocity in the world frame.
///
/// @param[in] time_ns time to evaluate linear velocity in nanoseconds
inline Vec3 transVelWorld(int64_t time_ns) const {
return pos_spline_.velocity(time_ns);
}
/// @brief Rotational velocity in the body frame.
///
/// @param[in] time_ns time to evaluate rotational velocity in nanoseconds
inline Vec3 rotVelBody(int64_t time_ns) const {
return so3_spline_.velocityBody(time_ns);
}
/// @brief Evaluate pose.
///
/// @param[in] time_ns time to evaluate pose in nanoseconds
/// @return SE(3) pose at time_ns
SE3 pose(int64_t time_ns) const {
SE3 res;
res.so3() = so3_spline_.evaluate(time_ns);
res.translation() = pos_spline_.evaluate(time_ns);
return res;
}
/// @brief Evaluate pose and compute Jacobian.
///
/// @param[in] time_ns time to evaluate pose in nanoseconds
/// @param[out] J Jacobian of the pose with respect to knots
/// @return SE(3) pose at time_ns
Sophus::SE3d pose(int64_t time_ns, PosePosSO3JacobianStruct *J) const {
Sophus::SE3d res;
typename So3Spline<_N, _Scalar>::JacobianStruct Jr;
typename RdSpline<3, N, _Scalar>::JacobianStruct Jp;
res.so3() = so3_spline_.evaluate(time_ns, &Jr);
res.translation() = pos_spline_.evaluate(time_ns, &Jp);
if (J) {
Eigen::Matrix3d RT = res.so3().inverse().matrix();
J->start_idx = Jr.start_idx;
for (int i = 0; i < N; i++) {
J->d_val_d_knot[i].setZero();
J->d_val_d_knot[i].template topLeftCorner<3, 3>() =
RT * Jp.d_val_d_knot[i];
J->d_val_d_knot[i].template bottomRightCorner<3, 3>() =
RT * Jr.d_val_d_knot[i];
}
}
return res;
}
/// @brief Evaluate pose and compute time Jacobian.
///
/// @param[in] time_ns time to evaluate pose in nanoseconds
/// @param[out] J Jacobian of the pose with time
void d_pose_d_t(int64_t time_ns, Vec6 &J) const {
J.template head<3>() =
so3_spline_.evaluate(time_ns).inverse() * transVelWorld(time_ns);
J.template tail<3>() = rotVelBody(time_ns);
}
/// @brief Evaluate gyroscope residual.
///
/// @param[in] time_ns time of the measurement
/// @param[in] measurement gyroscope measurement
/// @param[in] gyro_bias_full gyroscope calibration
/// @return gyroscope residual
Vec3 gyroResidual(int64_t time_ns, const Vec3 &measurement,
const CalibGyroBias<_Scalar> &gyro_bias_full) const {
return so3_spline_.velocityBody(time_ns) -
gyro_bias_full.getCalibrated(measurement);
}
/// @brief Evaluate gyroscope residual and compute Jacobians.
///
/// @param[in] time_ns time of the measurement
/// @param[in] measurement gyroscope measurement
/// @param[in] gyro_bias_full gyroscope calibration
/// @param[out] J_knots Jacobian with respect to SO(3) spline knots
/// @param[out] J_bias Jacobian with respect to gyroscope calibration
/// @return gyroscope residual
Vec3 gyroResidual(int64_t time_ns, const Vec3 &measurement,
const CalibGyroBias<_Scalar> &gyro_bias_full,
SO3JacobianStruct *J_knots,
Mat312 *J_bias = nullptr) const {
if (J_bias) {
J_bias->setZero();
J_bias->template block<3, 3>(0, 0).diagonal().array() = 1.0;
J_bias->template block<3, 3>(0, 3).diagonal().array() = -measurement[0];
J_bias->template block<3, 3>(0, 6).diagonal().array() = -measurement[1];
J_bias->template block<3, 3>(0, 9).diagonal().array() = -measurement[2];
}
return so3_spline_.velocityBody(time_ns, J_knots) -
gyro_bias_full.getCalibrated(measurement);
}
/// @brief Evaluate accelerometer residual.
///
/// @param[in] time_ns time of the measurement
/// @param[in] measurement accelerometer measurement
/// @param[in] accel_bias_full accelerometer calibration
/// @param[in] g gravity
/// @return accelerometer residual
Vec3 accelResidual(int64_t time_ns, const Eigen::Vector3d &measurement,
const CalibAccelBias<_Scalar> &accel_bias_full,
const Eigen::Vector3d &g) const {
Sophus::SO3d R = so3_spline_.evaluate(time_ns);
Eigen::Vector3d accel_world = pos_spline_.acceleration(time_ns);
return R.inverse() * (accel_world + g) -
accel_bias_full.getCalibrated(measurement);
}
/// @brief Evaluate accelerometer residual and Jacobians.
///
/// @param[in] time_ns time of the measurement
/// @param[in] measurement accelerometer measurement
/// @param[in] accel_bias_full accelerometer calibration
/// @param[in] g gravity
/// @param[out] J_knots Jacobian with respect to spline knots
/// @param[out] J_bias Jacobian with respect to accelerometer calibration
/// @param[out] J_g Jacobian with respect to gravity
/// @return accelerometer residual
Vec3 accelResidual(int64_t time_ns, const Vec3 &measurement,
const CalibAccelBias<_Scalar> &accel_bias_full,
const Vec3 &g, AccelPosSO3JacobianStruct *J_knots,
Mat39 *J_bias = nullptr, Mat3 *J_g = nullptr) const {
typename So3Spline<_N, _Scalar>::JacobianStruct Jr;
typename RdSpline<3, N, _Scalar>::JacobianStruct Jp;
Sophus::SO3d R = so3_spline_.evaluate(time_ns, &Jr);
Eigen::Vector3d accel_world = pos_spline_.acceleration(time_ns, &Jp);
Eigen::Matrix3d RT = R.inverse().matrix();
Eigen::Matrix3d tmp = RT * Sophus::SO3d::hat(accel_world + g);
BASALT_ASSERT_STREAM(
Jr.start_idx == Jp.start_idx,
"Jr.start_idx " << Jr.start_idx << " Jp.start_idx " << Jp.start_idx);
BASALT_ASSERT_STREAM(
so3_spline_.getKnots().size() == pos_spline_.getKnots().size(),
"so3_spline.getKnots().size() " << so3_spline_.getKnots().size()
<< " pos_spline.getKnots().size() "
<< pos_spline_.getKnots().size());
J_knots->start_idx = Jp.start_idx;
for (int i = 0; i < N; i++) {
J_knots->d_val_d_knot[i].template topLeftCorner<3, 3>() =
RT * Jp.d_val_d_knot[i];
J_knots->d_val_d_knot[i].template bottomRightCorner<3, 3>() =
tmp * Jr.d_val_d_knot[i];
}
if (J_bias) {
J_bias->setZero();
J_bias->template block<3, 3>(0, 0).diagonal().array() = 1.0;
J_bias->template block<3, 3>(0, 3).diagonal().array() = -measurement[0];
(*J_bias)(1, 6) = -measurement[1];
(*J_bias)(2, 7) = -measurement[1];
(*J_bias)(2, 8) = -measurement[2];
}
if (J_g) {
(*J_g) = RT;
}
Vec3 res =
RT * (accel_world + g) - accel_bias_full.getCalibrated(measurement);
return res;
}
/// @brief Evaluate position residual.
///
/// @param[in] time_ns time of the measurement
/// @param[in] measured_position position measurement
/// @param[out] Jp if not nullptr, Jacobian with respect to knos of the
/// position spline
/// @return position residual
Sophus::Vector3d positionResidual(int64_t time_ns,
const Vec3 &measured_position,
PosJacobianStruct *Jp = nullptr) const {
return pos_spline_.evaluate(time_ns, Jp) - measured_position;
}
/// @brief Evaluate orientation residual.
///
/// @param[in] time_ns time of the measurement
/// @param[in] measured_orientation orientation measurement
/// @param[out] Jr if not nullptr, Jacobian with respect to knos of the
/// SO(3) spline
/// @return orientation residual
Sophus::Vector3d orientationResidual(int64_t time_ns,
const SO3 &measured_orientation,
SO3JacobianStruct *Jr = nullptr) const {
Sophus::Vector3d res =
(so3_spline_.evaluate(time_ns, Jr) * measured_orientation.inverse())
.log();
if (Jr) {
Eigen::Matrix3d Jrot;
Sophus::leftJacobianSO3(res, Jrot);
for (int i = 0; i < N; i++) {
Jr->d_val_d_knot[i] = Jrot * Jr->d_val_d_knot[i];
}
}
return res;
}
/// @brief Print knots for debugging.
inline void printKnots() const {
for (size_t i = 0; i < pos_spline_.getKnots().size(); i++) {
std::cout << i << ": p:" << pos_spline_.getKnot(i).transpose() << " q: "
<< so3_spline_.getKnot(i).unit_quaternion().coeffs().transpose()
<< std::endl;
}
}
/// @brief Print position knots for debugging.
inline void printPosKnots() const {
for (size_t i = 0; i < pos_spline_.getKnots().size(); i++) {
std::cout << pos_spline_.getKnot(i).transpose() << std::endl;
}
}
/// @brief Knot time interval in nanoseconds.
inline int64_t getDtNs() const { return dt_ns_; }
EIGEN_MAKE_ALIGNED_OPERATOR_NEW
private:
RdSpline<3, _N, _Scalar> pos_spline_; ///< Position spline
So3Spline<_N, _Scalar> so3_spline_; ///< Orientation spline
int64_t dt_ns_; ///< Knot interval in nanoseconds
};
} // namespace basalt

View File

@@ -0,0 +1,790 @@
/**
BSD 3-Clause License
This file is part of the Basalt project.
https://gitlab.com/VladyslavUsenko/basalt-headers.git
Copyright (c) 2019, Vladyslav Usenko and Nikolaus Demmel.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
@file
@brief Uniform cumulative B-spline for SO(3)
*/
#pragma once
#include <basalt/spline/spline_common.h>
#include <basalt/utils/assert.h>
#include <basalt/utils/sophus_utils.hpp>
#include <Eigen/Dense>
#include <sophus/so3.hpp>
#include <array>
namespace basalt {
/// @brief Uniform cummulative B-spline for SO(3) of order N
///
/// For example, in the particular case scalar values and order N=5, for a time
/// \f$t \in [t_i, t_{i+1})\f$ the value of \f$p(t)\f$ depends only on 5 control
/// points at \f$[t_i, t_{i+1}, t_{i+2}, t_{i+3}, t_{i+4}]\f$. To
/// simplify calculations we transform time to uniform representation \f$s(t) =
/// (t - t_0)/\Delta t \f$, such that control points transform into \f$ s_i \in
/// [0,..,N] \f$. We define function \f$ u(t) = s(t)-s_i \f$ to be a time since
/// the start of the segment. Following the cummulative matrix representation of
/// De Boor - Cox formula, the value of the function can be evaluated as
/// follows: \f{align}{
/// R(u(t)) &= R_i
/// \prod_{j=1}^{4}{\exp(k_{j}\log{(R_{i+j-1}^{-1}R_{i+j})})},
/// \\ \begin{pmatrix} k_0 \\ k_1 \\ k_2 \\ k_3 \\ k_4 \end{pmatrix}^T &=
/// M_{c5} \begin{pmatrix} 1 \\ u \\ u^2 \\ u^3 \\ u^4
/// \end{pmatrix},
/// \f}
/// where \f$ R_{i} \in SO(3) \f$ are knots and \f$ M_{c5} \f$ is a cummulative
/// blending matrix computed using \ref computeBlendingMatrix \f{align}{
/// M_{c5} = \frac{1}{4!}
/// \begin{pmatrix} 24 & 0 & 0 & 0 & 0 \\ 23 & 4 & -6 & 4 & -1 \\ 12 & 16 & 0
/// & -8 & 3 \\ 1 & 4 & 6 & 4 & -3 \\ 0 & 0 & 0 & 0 & 1 \end{pmatrix}.
/// \f}
///
/// See [[arXiv:1911.08860]](https://arxiv.org/abs/1911.08860) for more details.
template <int _N, typename _Scalar = double>
class So3Spline {
public:
static constexpr int N = _N; ///< Order of the spline.
static constexpr int DEG = _N - 1; ///< Degree of the spline.
static constexpr _Scalar NS_TO_S = 1e-9; ///< Nanosecond to second conversion
static constexpr _Scalar S_TO_NS = 1e9; ///< Second to nanosecond conversion
using MatN = Eigen::Matrix<_Scalar, _N, _N>;
using VecN = Eigen::Matrix<_Scalar, _N, 1>;
using Vec3 = Eigen::Matrix<_Scalar, 3, 1>;
using Mat3 = Eigen::Matrix<_Scalar, 3, 3>;
using SO3 = Sophus::SO3<_Scalar>;
/// @brief Struct to store the Jacobian of the spline
///
/// Since B-spline of order N has local support (only N knots infuence the
/// value) the Jacobian is zero for all knots except maximum N for value and
/// all derivatives.
struct JacobianStruct {
size_t start_idx;
std::array<Mat3, _N> d_val_d_knot;
};
/// @brief Constructor with knot interval and start time
///
/// @param[in] time_interval_ns knot time interval in nanoseconds
/// @param[in] start_time_ns start time of the spline in nanoseconds
So3Spline(int64_t time_interval_ns, int64_t start_time_ns = 0)
: dt_ns_(time_interval_ns), start_t_ns_(start_time_ns) {
pow_inv_dt_[0] = 1.0;
pow_inv_dt_[1] = S_TO_NS / dt_ns_;
pow_inv_dt_[2] = pow_inv_dt_[1] * pow_inv_dt_[1];
pow_inv_dt_[3] = pow_inv_dt_[2] * pow_inv_dt_[1];
}
/// @brief Maximum time represented by spline
///
/// @return maximum time represented by spline in nanoseconds
int64_t maxTimeNs() const {
return start_t_ns_ + (knots_.size() - N + 1) * dt_ns_ - 1;
}
/// @brief Minimum time represented by spline
///
/// @return minimum time represented by spline in nanoseconds
int64_t minTimeNs() const { return start_t_ns_; }
/// @brief Gererate random trajectory
///
/// @param[in] n number of knots to generate
/// @param[in] static_init if true the first N knots will be the same
/// resulting in static initial condition
void genRandomTrajectory(int n, bool static_init = false) {
if (static_init) {
Vec3 rnd = Vec3::Random() * M_PI;
for (int i = 0; i < N; i++) {
knots_.push_back(SO3::exp(rnd));
}
for (int i = 0; i < n - N; i++) {
knots_.push_back(knots_.back() * SO3::exp(Vec3::Random() * M_PI / 2));
}
} else {
knots_.push_back(SO3::exp(Vec3::Random() * M_PI));
for (int i = 1; i < n; i++) {
knots_.push_back(knots_.back() * SO3::exp(Vec3::Random() * M_PI / 2));
}
}
}
/// @brief Set start time for spline
///
/// @param[in] start_time_ns start time of the spline in nanoseconds
inline void setStartTimeNs(int64_t s) { start_t_ns_ = s; }
/// @brief Add knot to the end of the spline
///
/// @param[in] knot knot to add
inline void knotsPushBack(const SO3& knot) { knots_.push_back(knot); }
/// @brief Remove knot from the back of the spline
inline void knotsPopBack() { knots_.pop_back(); }
/// @brief Return the first knot of the spline
///
/// @return first knot of the spline
inline const SO3& knotsFront() const { return knots_.front(); }
/// @brief Remove first knot of the spline and increase the start time
inline void knotsPopFront() {
start_t_ns_ += dt_ns_;
knots_.pop_front();
}
/// @brief Resize containter with knots
///
/// @param[in] n number of knots
inline void resize(size_t n) { knots_.resize(n); }
/// @brief Return reference to the knot with index i
///
/// @param i index of the knot
/// @return reference to the knot
inline SO3& getKnot(int i) { return knots_[i]; }
/// @brief Return const reference to the knot with index i
///
/// @param i index of the knot
/// @return const reference to the knot
inline const SO3& getKnot(int i) const { return knots_[i]; }
/// @brief Return const reference to deque with knots
///
/// @return const reference to deque with knots
const Eigen::aligned_deque<SO3>& getKnots() const { return knots_; }
/// @brief Return time interval in nanoseconds
///
/// @return time interval in nanoseconds
int64_t getTimeIntervalNs() const { return dt_ns_; }
/// @brief Evaluate SO(3) B-spline
///
/// @param[in] time_ns time for evaluating the value of the spline in
/// nanoseconds
/// @param[out] J if not nullptr, return the Jacobian of the value with
/// respect to knots
/// @return SO(3) value of the spline
SO3 evaluate(int64_t time_ns, JacobianStruct* J = nullptr) const {
int64_t st_ns = (time_ns - start_t_ns_);
BASALT_ASSERT_STREAM(st_ns >= 0, "st_ns " << st_ns << " time_ns " << time_ns
<< " start_t_ns " << start_t_ns_);
int64_t s = st_ns / dt_ns_;
double u = double(st_ns % dt_ns_) / double(dt_ns_);
BASALT_ASSERT_STREAM(s >= 0, "s " << s);
BASALT_ASSERT_STREAM(
size_t(s + N) <= knots_.size(),
"s " << s << " N " << N << " knots.size() " << knots_.size());
VecN p;
baseCoeffsWithTime<0>(p, u);
VecN coeff = BLENDING_MATRIX * p;
SO3 res = knots_[s];
Mat3 J_helper;
if (J) {
J->start_idx = s;
J_helper.setIdentity();
}
for (int i = 0; i < DEG; i++) {
const SO3& p0 = knots_[s + i];
const SO3& p1 = knots_[s + i + 1];
SO3 r01 = p0.inverse() * p1;
Vec3 delta = r01.log();
Vec3 kdelta = delta * coeff[i + 1];
if (J) {
Mat3 Jl_inv_delta;
Mat3 Jl_k_delta;
Sophus::leftJacobianInvSO3(delta, Jl_inv_delta);
Sophus::leftJacobianSO3(kdelta, Jl_k_delta);
J->d_val_d_knot[i] = J_helper;
J_helper = coeff[i + 1] * res.matrix() * Jl_k_delta * Jl_inv_delta *
p0.inverse().matrix();
J->d_val_d_knot[i] -= J_helper;
}
res *= SO3::exp(kdelta);
}
if (J) {
J->d_val_d_knot[DEG] = J_helper;
}
return res;
}
/// @brief Evaluate rotational velocity (first time derivative) of SO(3)
/// B-spline in the body frame
/// First, let's note that for scalars \f$ k, \Delta k \f$ the following
/// holds: \f$ \exp((k+\Delta k)\phi) = \exp(k\phi)\exp(\Delta k\phi), \phi
/// \in \mathbb{R}^3\f$. This is due to the fact that rotations around the
/// same axis are commutative.
///
/// Let's take SO(3) B-spline with N=3 as an example. The evolution in time of
/// rotation from the body frame to the world frame is described with \f[
/// R_{wb}(t) = R(t) = R_i \exp(k_1(t) \log(R_{i}^{-1}R_{i+1})) \exp(k_2(t)
/// \log(R_{i+1}^{-1}R_{i+2})), \f] where \f$ k_1, k_2 \f$ are spline
/// coefficients (see detailed description of \ref So3Spline). Since
/// expressions under logmap do not depend on time we can rename them to
/// constants.
/// \f[ R(t) = R_i \exp(k_1(t) ~ d_1) \exp(k_2(t) ~ d_2). \f]
///
/// With linear approximation of the spline coefficient evolution over time
/// \f$ k_1(t_0 + \Delta t) = k_1(t_0) + k_1'(t_0)\Delta t \f$ we can write
/// \f{align}
/// R(t_0 + \Delta t) &= R_i \exp( (k_1(t_0) + k_1'(t_0) \Delta t) ~ d_1)
/// \exp((k_2(t_0) + k_2'(t_0) \Delta t) ~ d_2)
/// \\ &= R_i \exp(k_1(t_0) ~ d_1) \exp(k_1'(t_0)~ d_1 \Delta t )
/// \exp(k_2(t_0) ~ d_2) \exp(k_2'(t_0) ~ d_2 \Delta t )
/// \\ &= R_i \exp(k_1(t_0) ~ d_1)
/// \exp(k_2(t_0) ~ d_2) \exp(R_{a}^T k_1'(t_0)~ d_1 \Delta t )
/// \exp(k_2'(t_0) ~ d_2 \Delta t )
/// \\ &= R_i \exp(k_1(t_0) ~ d_1)
/// \exp(k_2(t_0) ~ d_2) \exp((R_{a}^T k_1'(t_0)~ d_1 +
/// k_2'(t_0) ~ d_2) \Delta t )
/// \\ &= R(t_0) \exp((R_{a}^T k_1'(t_0)~ d_1 +
/// k_2'(t_0) ~ d_2) \Delta t )
/// \\ &= R(t_0) \exp( \omega \Delta t ),
/// \f} where \f$ \Delta t \f$ is small, \f$ R_{a} \in SO(3) = \exp(k_2(t_0) ~
/// d_2) \f$ and \f$ \omega \f$ is the rotational velocity in the body frame.
/// More explicitly we have the formula for rotational velocity in the body
/// frame \f[ \omega = R_{a}^T k_1'(t_0)~ d_1 + k_2'(t_0) ~ d_2. \f]
/// Derivatives of spline coefficients can be computed with \ref
/// baseCoeffsWithTime similar to \ref RdSpline (detailed description). With
/// the recursive formula computations generalize to different orders of
/// spline N.
///
/// @param[in] time_ns time for evaluating velocity of the spline in
/// nanoseconds
/// @return rotational velocity (3x1 vector)
Vec3 velocityBody(int64_t time_ns) const {
int64_t st_ns = (time_ns - start_t_ns_);
BASALT_ASSERT_STREAM(st_ns >= 0, "st_ns " << st_ns << " time_ns " << time_ns
<< " start_t_ns " << start_t_ns_);
int64_t s = st_ns / dt_ns_;
double u = double(st_ns % dt_ns_) / double(dt_ns_);
BASALT_ASSERT_STREAM(s >= 0, "s " << s);
BASALT_ASSERT_STREAM(
size_t(s + N) <= knots_.size(),
"s " << s << " N " << N << " knots.size() " << knots_.size());
VecN p;
baseCoeffsWithTime<0>(p, u);
VecN coeff = BLENDING_MATRIX * p;
baseCoeffsWithTime<1>(p, u);
VecN dcoeff = pow_inv_dt_[1] * BLENDING_MATRIX * p;
Vec3 rot_vel;
rot_vel.setZero();
for (int i = 0; i < DEG; i++) {
const SO3& p0 = knots_[s + i];
const SO3& p1 = knots_[s + i + 1];
SO3 r01 = p0.inverse() * p1;
Vec3 delta = r01.log();
rot_vel = SO3::exp(-delta * coeff[i + 1]) * rot_vel;
rot_vel += delta * dcoeff[i + 1];
}
return rot_vel;
}
/// @brief Evaluate rotational velocity (first time derivative) of SO(3)
/// B-spline in the body frame
///
/// @param[in] time_ns time for evaluating velocity of the spline in
/// nanoseconds
/// @param[out] J if not nullptr, return the Jacobian of the rotational
/// velocity in body frame with respect to knots
/// @return rotational velocity (3x1 vector)
Vec3 velocityBody(int64_t time_ns, JacobianStruct* J) const {
int64_t st_ns = (time_ns - start_t_ns_);
BASALT_ASSERT_STREAM(st_ns >= 0, "st_ns " << st_ns << " time_ns " << time_ns
<< " start_t_ns " << start_t_ns_);
int64_t s = st_ns / dt_ns_;
double u = double(st_ns % dt_ns_) / double(dt_ns_);
BASALT_ASSERT_STREAM(s >= 0, "s " << s);
BASALT_ASSERT_STREAM(
size_t(s + N) <= knots_.size(),
"s " << s << " N " << N << " knots.size() " << knots_.size());
VecN p;
baseCoeffsWithTime<0>(p, u);
VecN coeff = BLENDING_MATRIX * p;
baseCoeffsWithTime<1>(p, u);
VecN dcoeff = pow_inv_dt_[1] * BLENDING_MATRIX * p;
Vec3 delta_vec[DEG];
Mat3 R_tmp[DEG];
SO3 accum;
SO3 exp_k_delta[DEG];
Mat3 Jr_delta_inv[DEG];
Mat3 Jr_kdelta[DEG];
for (int i = DEG - 1; i >= 0; i--) {
const SO3& p0 = knots_[s + i];
const SO3& p1 = knots_[s + i + 1];
SO3 r01 = p0.inverse() * p1;
delta_vec[i] = r01.log();
Sophus::rightJacobianInvSO3(delta_vec[i], Jr_delta_inv[i]);
Jr_delta_inv[i] *= p1.inverse().matrix();
Vec3 k_delta = coeff[i + 1] * delta_vec[i];
Sophus::rightJacobianSO3(-k_delta, Jr_kdelta[i]);
R_tmp[i] = accum.matrix();
exp_k_delta[i] = Sophus::SO3d::exp(-k_delta);
accum *= exp_k_delta[i];
}
Mat3 d_vel_d_delta[DEG];
d_vel_d_delta[0] = dcoeff[1] * R_tmp[0] * Jr_delta_inv[0];
Vec3 rot_vel = delta_vec[0] * dcoeff[1];
for (int i = 1; i < DEG; i++) {
d_vel_d_delta[i] =
R_tmp[i - 1] * SO3::hat(rot_vel) * Jr_kdelta[i] * coeff[i + 1] +
R_tmp[i] * dcoeff[i + 1];
d_vel_d_delta[i] *= Jr_delta_inv[i];
rot_vel = exp_k_delta[i] * rot_vel + delta_vec[i] * dcoeff[i + 1];
}
if (J) {
J->start_idx = s;
for (int i = 0; i < N; i++) {
J->d_val_d_knot[i].setZero();
}
for (int i = 0; i < DEG; i++) {
J->d_val_d_knot[i] -= d_vel_d_delta[i];
J->d_val_d_knot[i + 1] += d_vel_d_delta[i];
}
}
return rot_vel;
}
/// @brief Evaluate rotational acceleration (second time derivative) of SO(3)
/// B-spline in the body frame
///
/// @param[in] time_ns time for evaluating acceleration of the spline in
/// nanoseconds
/// @param[out] vel_body if not nullptr, return the rotational velocity in the
/// body frame (3x1 vector) (side computation)
/// @return rotational acceleration (3x1 vector)
Vec3 accelerationBody(int64_t time_ns, Vec3* vel_body = nullptr) const {
int64_t st_ns = (time_ns - start_t_ns_);
BASALT_ASSERT_STREAM(st_ns >= 0, "st_ns " << st_ns << " time_ns " << time_ns
<< " start_t_ns " << start_t_ns_);
int64_t s = st_ns / dt_ns_;
double u = double(st_ns % dt_ns_) / double(dt_ns_);
BASALT_ASSERT_STREAM(s >= 0, "s " << s);
BASALT_ASSERT_STREAM(
size_t(s + N) <= knots_.size(),
"s " << s << " N " << N << " knots.size() " << knots_.size());
VecN p;
baseCoeffsWithTime<0>(p, u);
VecN coeff = BLENDING_MATRIX * p;
baseCoeffsWithTime<1>(p, u);
VecN dcoeff = pow_inv_dt_[1] * BLENDING_MATRIX * p;
baseCoeffsWithTime<2>(p, u);
VecN ddcoeff = pow_inv_dt_[2] * BLENDING_MATRIX * p;
SO3 r_accum;
Vec3 rot_vel;
rot_vel.setZero();
Vec3 rot_accel;
rot_accel.setZero();
for (int i = 0; i < DEG; i++) {
const SO3& p0 = knots_[s + i];
const SO3& p1 = knots_[s + i + 1];
SO3 r01 = p0.inverse() * p1;
Vec3 delta = r01.log();
SO3 rot = SO3::exp(-delta * coeff[i + 1]);
rot_vel = rot * rot_vel;
Vec3 vel_current = dcoeff[i + 1] * delta;
rot_vel += vel_current;
rot_accel = rot * rot_accel;
rot_accel += ddcoeff[i + 1] * delta + rot_vel.cross(vel_current);
}
if (vel_body) {
*vel_body = rot_vel;
}
return rot_accel;
}
/// @brief Evaluate rotational acceleration (second time derivative) of SO(3)
/// B-spline in the body frame
///
/// @param[in] time_ns time for evaluating acceleration of the spline in
/// nanoseconds
/// @param[out] J_accel if not nullptr, return the Jacobian of the rotational
/// acceleration in body frame with respect to knots
/// @param[out] vel_body if not nullptr, return the rotational velocity in the
/// body frame (3x1 vector) (side computation)
/// @param[out] J_vel if not nullptr, return the Jacobian of the rotational
/// velocity in the body frame (side computation)
/// @return rotational acceleration (3x1 vector)
Vec3 accelerationBody(int64_t time_ns, JacobianStruct* J_accel,
Vec3* vel_body = nullptr,
JacobianStruct* J_vel = nullptr) const {
BASALT_ASSERT(J_accel);
int64_t st_ns = (time_ns - start_t_ns_);
BASALT_ASSERT_STREAM(st_ns >= 0, "st_ns " << st_ns << " time_ns " << time_ns
<< " start_t_ns " << start_t_ns_);
int64_t s = st_ns / dt_ns_;
double u = double(st_ns % dt_ns_) / double(dt_ns_);
BASALT_ASSERT_STREAM(s >= 0, "s " << s);
BASALT_ASSERT_STREAM(
size_t(s + N) <= knots_.size(),
"s " << s << " N " << N << " knots.size() " << knots_.size());
VecN p;
baseCoeffsWithTime<0>(p, u);
VecN coeff = BLENDING_MATRIX * p;
baseCoeffsWithTime<1>(p, u);
VecN dcoeff = pow_inv_dt_[1] * BLENDING_MATRIX * p;
baseCoeffsWithTime<2>(p, u);
VecN ddcoeff = pow_inv_dt_[2] * BLENDING_MATRIX * p;
Vec3 delta_vec[DEG];
Mat3 exp_k_delta[DEG];
Mat3 Jr_delta_inv[DEG];
Mat3 Jr_kdelta[DEG];
Vec3 rot_vel;
rot_vel.setZero();
Vec3 rot_accel;
rot_accel.setZero();
Vec3 rot_vel_arr[DEG];
Vec3 rot_accel_arr[DEG];
for (int i = 0; i < DEG; i++) {
const SO3& p0 = knots_[s + i];
const SO3& p1 = knots_[s + i + 1];
SO3 r01 = p0.inverse() * p1;
delta_vec[i] = r01.log();
Sophus::rightJacobianInvSO3(delta_vec[i], Jr_delta_inv[i]);
Jr_delta_inv[i] *= p1.inverse().matrix();
Vec3 k_delta = coeff[i + 1] * delta_vec[i];
Sophus::rightJacobianSO3(-k_delta, Jr_kdelta[i]);
exp_k_delta[i] = Sophus::SO3d::exp(-k_delta).matrix();
rot_vel = exp_k_delta[i] * rot_vel;
Vec3 vel_current = dcoeff[i + 1] * delta_vec[i];
rot_vel += vel_current;
rot_accel = exp_k_delta[i] * rot_accel;
rot_accel += ddcoeff[i + 1] * delta_vec[i] + rot_vel.cross(vel_current);
rot_vel_arr[i] = rot_vel;
rot_accel_arr[i] = rot_accel;
}
Mat3 d_accel_d_delta[DEG];
Mat3 d_vel_d_delta[DEG];
d_vel_d_delta[DEG - 1] = coeff[DEG] * exp_k_delta[DEG - 1] *
SO3::hat(rot_vel_arr[DEG - 2]) *
Jr_kdelta[DEG - 1] +
Mat3::Identity() * dcoeff[DEG];
d_accel_d_delta[DEG - 1] =
coeff[DEG] * exp_k_delta[DEG - 1] * SO3::hat(rot_accel_arr[DEG - 2]) *
Jr_kdelta[DEG - 1] +
Mat3::Identity() * ddcoeff[DEG] +
dcoeff[DEG] * (SO3::hat(rot_vel_arr[DEG - 1]) -
SO3::hat(delta_vec[DEG - 1]) * d_vel_d_delta[DEG - 1]);
Mat3 pj;
pj.setIdentity();
Vec3 sj;
sj.setZero();
for (int i = DEG - 2; i >= 0; i--) {
sj += dcoeff[i + 2] * pj * delta_vec[i + 1];
pj *= exp_k_delta[i + 1];
d_vel_d_delta[i] = Mat3::Identity() * dcoeff[i + 1];
if (i >= 1) {
d_vel_d_delta[i] += coeff[i + 1] * exp_k_delta[i] *
SO3::hat(rot_vel_arr[i - 1]) * Jr_kdelta[i];
}
d_accel_d_delta[i] =
Mat3::Identity() * ddcoeff[i + 1] +
dcoeff[i + 1] * (SO3::hat(rot_vel_arr[i]) -
SO3::hat(delta_vec[i]) * d_vel_d_delta[i]);
if (i >= 1) {
d_accel_d_delta[i] += coeff[i + 1] * exp_k_delta[i] *
SO3::hat(rot_accel_arr[i - 1]) * Jr_kdelta[i];
}
d_vel_d_delta[i] = pj * d_vel_d_delta[i];
d_accel_d_delta[i] =
pj * d_accel_d_delta[i] - SO3::hat(sj) * d_vel_d_delta[i];
}
if (J_vel) {
J_vel->start_idx = s;
for (int i = 0; i < N; i++) {
J_vel->d_val_d_knot[i].setZero();
}
for (int i = 0; i < DEG; i++) {
Mat3 val = d_vel_d_delta[i] * Jr_delta_inv[i];
J_vel->d_val_d_knot[i] -= val;
J_vel->d_val_d_knot[i + 1] += val;
}
}
if (J_accel) {
J_accel->start_idx = s;
for (int i = 0; i < N; i++) {
J_accel->d_val_d_knot[i].setZero();
}
for (int i = 0; i < DEG; i++) {
Mat3 val = d_accel_d_delta[i] * Jr_delta_inv[i];
J_accel->d_val_d_knot[i] -= val;
J_accel->d_val_d_knot[i + 1] += val;
}
}
if (vel_body) {
*vel_body = rot_vel;
}
return rot_accel;
}
/// @brief Evaluate rotational jerk (third time derivative) of SO(3)
/// B-spline in the body frame
///
/// @param[in] time_ns time for evaluating jerk of the spline in
/// nanoseconds
/// @param[out] vel_body if not nullptr, return the rotational velocity in the
/// body frame (3x1 vector) (side computation)
/// @param[out] accel_body if not nullptr, return the rotational acceleration
/// in the body frame (3x1 vector) (side computation)
/// @return rotational jerk (3x1 vector)
Vec3 jerkBody(int64_t time_ns, Vec3* vel_body = nullptr,
Vec3* accel_body = nullptr) const {
int64_t st_ns = (time_ns - start_t_ns_);
BASALT_ASSERT_STREAM(st_ns >= 0, "st_ns " << st_ns << " time_ns " << time_ns
<< " start_t_ns " << start_t_ns_);
int64_t s = st_ns / dt_ns_;
double u = double(st_ns % dt_ns_) / double(dt_ns_);
BASALT_ASSERT_STREAM(s >= 0, "s " << s);
BASALT_ASSERT_STREAM(
size_t(s + N) <= knots_.size(),
"s " << s << " N " << N << " knots.size() " << knots_.size());
VecN p;
baseCoeffsWithTime<0>(p, u);
VecN coeff = BLENDING_MATRIX * p;
baseCoeffsWithTime<1>(p, u);
VecN dcoeff = pow_inv_dt_[1] * BLENDING_MATRIX * p;
baseCoeffsWithTime<2>(p, u);
VecN ddcoeff = pow_inv_dt_[2] * BLENDING_MATRIX * p;
baseCoeffsWithTime<3>(p, u);
VecN dddcoeff = pow_inv_dt_[3] * BLENDING_MATRIX * p;
Vec3 rot_vel;
rot_vel.setZero();
Vec3 rot_accel;
rot_accel.setZero();
Vec3 rot_jerk;
rot_jerk.setZero();
for (int i = 0; i < DEG; i++) {
const SO3& p0 = knots_[s + i];
const SO3& p1 = knots_[s + i + 1];
SO3 r01 = p0.inverse() * p1;
Vec3 delta = r01.log();
SO3 rot = SO3::exp(-delta * coeff[i + 1]);
rot_vel = rot * rot_vel;
Vec3 vel_current = dcoeff[i + 1] * delta;
rot_vel += vel_current;
rot_accel = rot * rot_accel;
Vec3 rot_vel_cross_vel_current = rot_vel.cross(vel_current);
rot_accel += ddcoeff[i + 1] * delta + rot_vel_cross_vel_current;
rot_jerk = rot * rot_jerk;
rot_jerk += dddcoeff[i + 1] * delta +
(ddcoeff[i + 1] * rot_vel + 2 * dcoeff[i + 1] * rot_accel -
dcoeff[i + 1] * rot_vel_cross_vel_current)
.cross(delta);
}
if (vel_body) {
*vel_body = rot_vel;
}
if (accel_body) {
*accel_body = rot_accel;
}
return rot_jerk;
}
EIGEN_MAKE_ALIGNED_OPERATOR_NEW
protected:
/// @brief Vector of derivatives of time polynomial.
///
/// Computes a derivative of \f$ \begin{bmatrix}1 & t & t^2 & \dots &
/// t^{N-1}\end{bmatrix} \f$ with repect to time. For example, the first
/// derivative would be \f$ \begin{bmatrix}0 & 1 & 2 t & \dots & (N-1)
/// t^{N-2}\end{bmatrix} \f$.
/// @param Derivative derivative to evaluate
/// @param[out] res_const vector to store the result
/// @param[in] t
template <int Derivative, class Derived>
static void baseCoeffsWithTime(const Eigen::MatrixBase<Derived>& res_const,
_Scalar t) {
EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(Derived, N);
Eigen::MatrixBase<Derived>& res =
const_cast<Eigen::MatrixBase<Derived>&>(res_const);
res.setZero();
if (Derivative < N) {
res[Derivative] = BASE_COEFFICIENTS(Derivative, Derivative);
_Scalar ti = t;
for (int j = Derivative + 1; j < N; j++) {
res[j] = BASE_COEFFICIENTS(Derivative, j) * ti;
ti = ti * t;
}
}
}
static const MatN
BLENDING_MATRIX; ///< Blending matrix. See \ref computeBlendingMatrix.
static const MatN BASE_COEFFICIENTS; ///< Base coefficients matrix.
///< See \ref computeBaseCoefficients.
Eigen::aligned_deque<SO3> knots_; ///< Knots
int64_t dt_ns_; ///< Knot interval in nanoseconds
int64_t start_t_ns_; ///< Start time in nanoseconds
std::array<_Scalar, 4> pow_inv_dt_; ///< Array with inverse powers of dt
}; // namespace basalt
template <int _N, typename _Scalar>
const typename So3Spline<_N, _Scalar>::MatN
So3Spline<_N, _Scalar>::BASE_COEFFICIENTS =
computeBaseCoefficients<_N, _Scalar>();
template <int _N, typename _Scalar>
const typename So3Spline<_N, _Scalar>::MatN
So3Spline<_N, _Scalar>::BLENDING_MATRIX =
computeBlendingMatrix<_N, _Scalar, true>();
} // namespace basalt

View File

@@ -0,0 +1,137 @@
/**
BSD 3-Clause License
This file is part of the Basalt project.
https://gitlab.com/VladyslavUsenko/basalt-headers.git
Copyright (c) 2019, Vladyslav Usenko and Nikolaus Demmel.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
@file
@brief Common functions for B-spline evaluation
*/
#pragma once
#include <Eigen/Dense>
#include <cstdint>
namespace basalt {
/// @brief Compute binomial coefficient.
///
/// Computes number of combinations that include k objects out of n.
/// @param[in] n
/// @param[in] k
/// @return binomial coefficient
constexpr inline uint64_t binomialCoefficient(uint64_t n, uint64_t k) {
if (k > n) {
return 0;
}
uint64_t r = 1;
for (uint64_t d = 1; d <= k; ++d) {
r *= n--;
r /= d;
}
return r;
}
/// @brief Compute blending matrix for uniform B-spline evaluation.
///
/// @param _N order of the spline
/// @param _Scalar scalar type to use
/// @param _Cumulative if the spline should be cumulative
template <int _N, typename _Scalar = double, bool _Cumulative = false>
Eigen::Matrix<_Scalar, _N, _N> computeBlendingMatrix() {
Eigen::Matrix<double, _N, _N> m;
m.setZero();
for (int i = 0; i < _N; ++i) {
for (int j = 0; j < _N; ++j) {
double sum = 0;
for (int s = j; s < _N; ++s) {
sum += std::pow(-1.0, s - j) * binomialCoefficient(_N, s - j) *
std::pow(_N - s - 1.0, _N - 1.0 - i);
}
m(j, i) = binomialCoefficient(_N - 1, _N - 1 - i) * sum;
}
}
if (_Cumulative) {
for (int i = 0; i < _N; i++) {
for (int j = i + 1; j < _N; j++) {
m.row(i) += m.row(j);
}
}
}
uint64_t factorial = 1;
for (int i = 2; i < _N; ++i) {
factorial *= i;
}
return (m / factorial).template cast<_Scalar>();
}
/// @brief Compute base coefficient matrix for polynomials of size N.
///
/// In each row starting from 0 contains the derivative coefficients of the
/// polynomial. For _N=5 we get the following matrix: \f[ \begin{bmatrix}
/// 1 & 1 & 1 & 1 & 1
/// \\0 & 1 & 2 & 3 & 4
/// \\0 & 0 & 2 & 6 & 12
/// \\0 & 0 & 0 & 6 & 24
/// \\0 & 0 & 0 & 0 & 24
/// \\ \end{bmatrix}
/// \f]
/// Functions \ref RdSpline::baseCoeffsWithTime and \ref
/// So3Spline::baseCoeffsWithTime use this matrix to compute derivatives of the
/// time polynomial.
///
/// @param _N order of the polynomial
/// @param _Scalar scalar type to use
template <int _N, typename _Scalar = double>
Eigen::Matrix<_Scalar, _N, _N> computeBaseCoefficients() {
Eigen::Matrix<double, _N, _N> base_coefficients;
base_coefficients.setZero();
base_coefficients.row(0).setOnes();
constexpr int DEG = _N - 1;
int order = DEG;
for (int n = 1; n < _N; n++) {
for (int i = DEG - order; i < _N; i++) {
base_coefficients(n, i) = (order - DEG + i) * base_coefficients(n - 1, i);
}
order--;
}
return base_coefficients.template cast<_Scalar>();
}
} // namespace basalt

View File

@@ -0,0 +1,126 @@
/**
BSD 3-Clause License
This file is part of the Basalt project.
https://gitlab.com/VladyslavUsenko/basalt-headers.git
Copyright (c) 2019, Vladyslav Usenko and Nikolaus Demmel.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
@file
@brief Assertions used in the project
*/
#pragma once
#include <iostream>
namespace basalt {
#define UNUSED(x) (void)(x)
#define BASALT_ATTRIBUTE_NORETURN __attribute__((noreturn))
inline BASALT_ATTRIBUTE_NORETURN void assertionFailed(char const* expr,
char const* function,
char const* file,
long line) {
std::cerr << "***** Assertion (" << expr << ") failed in " << function
<< ":\n"
<< file << ':' << line << ":" << std::endl;
std::abort();
}
inline BASALT_ATTRIBUTE_NORETURN void assertionFailedMsg(char const* expr,
char const* msg,
char const* function,
char const* file,
long line) {
std::cerr << "***** Assertion (" << expr << ") failed in " << function
<< ":\n"
<< file << ':' << line << ": " << msg << std::endl;
std::abort();
}
inline BASALT_ATTRIBUTE_NORETURN void logFatal(char const* function,
char const* file, long line) {
std::cerr << "***** Fatal error in " << function << ":\n"
<< file << ':' << line << ":" << std::endl;
std::abort();
}
inline BASALT_ATTRIBUTE_NORETURN void logFatalMsg(char const* msg,
char const* function,
char const* file, long line) {
std::cerr << "***** Fatal error in " << function << ":\n"
<< file << ':' << line << ": " << msg << std::endl;
std::abort();
}
} // namespace basalt
#define BASALT_LIKELY(x) __builtin_expect(x, 1)
#if defined(BASALT_DISABLE_ASSERTS)
#define BASALT_ASSERT(expr) ((void)0)
#define BASALT_ASSERT_MSG(expr, msg) ((void)0)
#define BASALT_ASSERT_STREAM(expr, msg) ((void)0)
#else
#define BASALT_ASSERT(expr) \
(BASALT_LIKELY(!!(expr)) \
? ((void)0) \
: ::basalt::assertionFailed(#expr, __PRETTY_FUNCTION__, __FILE__, \
__LINE__))
#define BASALT_ASSERT_MSG(expr, msg) \
(BASALT_LIKELY(!!(expr)) \
? ((void)0) \
: ::basalt::assertionFailedMsg(#expr, msg, __PRETTY_FUNCTION__, \
__FILE__, __LINE__))
#define BASALT_ASSERT_STREAM(expr, msg) \
(BASALT_LIKELY(!!(expr)) \
? ((void)0) \
: (std::cerr << msg << std::endl, \
::basalt::assertionFailed(#expr, __PRETTY_FUNCTION__, __FILE__, \
__LINE__)))
#endif
#define BASALT_LOG_FATAL(msg) \
::basalt::logFatalMsg(msg, __PRETTY_FUNCTION__, __FILE__, __LINE__)
#define BASALT_LOG_FATAL_STREAM(msg) \
(std::cerr << msg << std::endl, \
::basalt::logFatal(__PRETTY_FUNCTION__, __FILE__, __LINE__))

View File

@@ -0,0 +1,65 @@
/**
BSD 3-Clause License
This file is part of the Basalt project.
https://gitlab.com/VladyslavUsenko/basalt-headers.git
Copyright (c) 2019, Vladyslav Usenko and Nikolaus Demmel.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
@file
@brief Definition of the standard containers with Eigen allocator.
*/
#pragma once
#include <deque>
#include <map>
#include <unordered_map>
#include <vector>
#include <Eigen/Dense>
namespace Eigen {
template <typename T>
using aligned_vector = std::vector<T, Eigen::aligned_allocator<T>>;
template <typename T>
using aligned_deque = std::deque<T, Eigen::aligned_allocator<T>>;
template <typename K, typename V>
using aligned_map = std::map<K, V, std::less<K>,
Eigen::aligned_allocator<std::pair<K const, V>>>;
template <typename K, typename V>
using aligned_unordered_map =
std::unordered_map<K, V, std::hash<K>, std::equal_to<K>,
Eigen::aligned_allocator<std::pair<K const, V>>>;
} // namespace Eigen

View File

@@ -0,0 +1,26 @@
#pragma once
#include <cstddef>
namespace basalt {
// to work around static_assert(false, ...)
template <class T>
struct dependent_false : std::false_type {};
template <class T>
inline void hash_combine(std::size_t& seed, const T& value) {
// Simple hash_combine, see e.g. here:
// https://github.com/HowardHinnant/hash_append/issues/7
// Not sure we ever need 32bit, but here it is...
if constexpr (sizeof(std::size_t) == 4) {
seed ^= std::hash<T>{}(value) + 0x9e3779b9U + (seed << 6) + (seed >> 2);
} else if constexpr (sizeof(std::size_t) == 8) {
seed ^= std::hash<T>{}(value) + 0x9e3779b97f4a7c15LLU + (seed << 12) +
(seed >> 4);
} else {
static_assert(dependent_false<T>::value, "hash_combine not implemented");
}
}
} // namespace basalt

View File

@@ -0,0 +1,538 @@
/**
BSD 3-Clause License
This file is part of the Basalt project.
https://gitlab.com/VladyslavUsenko/basalt-headers.git
Copyright (c) 2019, Vladyslav Usenko and Nikolaus Demmel.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
@file
@brief Useful utilities to work with SO(3) and SE(3) groups from Sophus.
*/
#pragma once
#include <sophus/se3.hpp>
#include <sophus/sim3.hpp>
#include <basalt/utils/assert.h>
#include <basalt/utils/eigen_utils.hpp>
namespace Sophus {
/// @brief Decoupled version of logmap for SE(3)
///
/// For SE(3) element vector
/// \f[
/// \begin{pmatrix} R & t \\ 0 & 1 \end{pmatrix} \in SE(3),
/// \f]
/// returns \f$ (t, \log(R)) \in \mathbb{R}^6 \f$. Here rotation is not coupled
/// with translation.
///
/// @param[in] SE(3) member
/// @return tangent vector (6x1 vector)
template <typename Scalar>
inline typename SE3<Scalar>::Tangent se3_logd(const SE3<Scalar> &se3) {
typename SE3<Scalar>::Tangent upsilon_omega;
upsilon_omega.template tail<3>() = se3.so3().log();
upsilon_omega.template head<3>() = se3.translation();
return upsilon_omega;
}
/// @brief Decoupled version of expmap for SE(3)
///
/// For tangent vector \f$ (\upsilon, \omega) \in \mathbb{R}^6 \f$ returns
/// \f[
/// \begin{pmatrix} \exp(\omega) & \upsilon \\ 0 & 1 \end{pmatrix} \in SE(3),
/// \f]
/// where \f$ \exp(\omega) \in SO(3) \f$. Here rotation is not coupled with
/// translation.
///
/// @param[in] tangent vector (6x1 vector)
/// @return SE(3) member
template <typename Derived>
inline SE3<typename Derived::Scalar> se3_expd(
const Eigen::MatrixBase<Derived> &upsilon_omega) {
EIGEN_STATIC_ASSERT_FIXED_SIZE(Derived);
EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(Derived, 6);
using Scalar = typename Derived::Scalar;
return SE3<Scalar>(SO3<Scalar>::exp(upsilon_omega.template tail<3>()),
upsilon_omega.template head<3>());
}
/// @brief Decoupled version of logmap for Sim(3)
///
/// For Sim(3) element vector
/// \f[
/// \begin{pmatrix} sR & t \\ 0 & 1 \end{pmatrix} \in SE(3),
/// \f]
/// returns \f$ (t, \log(R), log(s)) \in \mathbb{R}^3 \f$. Here rotation and
/// scale are not coupled with translation. Rotation and scale are commutative
/// anyway.
///
/// @param[in] Sim(3) member
/// @return tangent vector (7x1 vector)
template <typename Scalar>
inline typename Sim3<Scalar>::Tangent sim3_logd(const Sim3<Scalar> &sim3) {
typename Sim3<Scalar>::Tangent upsilon_omega_sigma;
upsilon_omega_sigma.template tail<4>() = sim3.rxso3().log();
upsilon_omega_sigma.template head<3>() = sim3.translation();
return upsilon_omega_sigma;
}
/// @brief Decoupled version of expmap for Sim(3)
///
/// For tangent vector \f$ (\upsilon, \omega, \sigma) \in \mathbb{R}^7 \f$
/// returns
/// \f[
/// \begin{pmatrix} \exp(\sigma)\exp(\omega) & \upsilon \\ 0 & 1 \end{pmatrix}
/// \in Sim(3),
/// \f]
/// where \f$ \exp(\omega) \in SO(3) \f$. Here rotation and scale are not
/// coupled with translation. Rotation and scale are commutative anyway.
///
/// @param[in] tangent vector (7x1 vector)
/// @return Sim(3) member
template <typename Derived>
inline Sim3<typename Derived::Scalar> sim3_expd(
const Eigen::MatrixBase<Derived> &upsilon_omega_sigma) {
EIGEN_STATIC_ASSERT_FIXED_SIZE(Derived);
EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(Derived, 7);
using Scalar = typename Derived::Scalar;
return Sim3<Scalar>(
RxSO3<Scalar>::exp(upsilon_omega_sigma.template tail<4>()),
upsilon_omega_sigma.template head<3>());
}
// Note on the use of const_cast in the following functions: The output
// parameter is only marked 'const' to make the C++ compiler accept a temporary
// expression here. These functions use const_cast it, so constness isn't
// honored here. See:
// https://eigen.tuxfamily.org/dox/TopicFunctionTakingEigenTypes.html
/// @brief Right Jacobian for SO(3)
///
/// For \f$ \exp(x) \in SO(3) \f$ provides a Jacobian that approximates the sum
/// under expmap with a right multiplication of expmap for small \f$ \epsilon
/// \f$. Can be used to compute: \f$ \exp(\phi + \epsilon) \approx \exp(\phi)
/// \exp(J_{\phi} \epsilon)\f$
/// @param[in] phi (3x1 vector)
/// @param[out] J_phi (3x3 matrix)
template <typename Derived1, typename Derived2>
inline void rightJacobianSO3(const Eigen::MatrixBase<Derived1> &phi,
const Eigen::MatrixBase<Derived2> &J_phi) {
EIGEN_STATIC_ASSERT_FIXED_SIZE(Derived1);
EIGEN_STATIC_ASSERT_FIXED_SIZE(Derived2);
EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(Derived1, 3);
EIGEN_STATIC_ASSERT_MATRIX_SPECIFIC_SIZE(Derived2, 3, 3);
using Scalar = typename Derived1::Scalar;
Eigen::MatrixBase<Derived2> &J =
const_cast<Eigen::MatrixBase<Derived2> &>(J_phi);
Scalar phi_norm2 = phi.squaredNorm();
Eigen::Matrix<Scalar, 3, 3> phi_hat = Sophus::SO3<Scalar>::hat(phi);
Eigen::Matrix<Scalar, 3, 3> phi_hat2 = phi_hat * phi_hat;
J.setIdentity();
if (phi_norm2 > Sophus::Constants<Scalar>::epsilon()) {
Scalar phi_norm = std::sqrt(phi_norm2);
Scalar phi_norm3 = phi_norm2 * phi_norm;
J -= phi_hat * (1 - std::cos(phi_norm)) / phi_norm2;
J += phi_hat2 * (phi_norm - std::sin(phi_norm)) / phi_norm3;
} else {
// Taylor expansion around 0
J -= phi_hat / 2;
J += phi_hat2 / 6;
}
}
/// @brief Right Inverse Jacobian for SO(3)
///
/// For \f$ \exp(x) \in SO(3) \f$ provides an inverse Jacobian that approximates
/// the logmap of the right multiplication of expmap of the arguments with a sum
/// for small \f$ \epsilon \f$. Can be used to compute: \f$ \log
/// (\exp(\phi) \exp(\epsilon)) \approx \phi + J_{\phi} \epsilon\f$
/// @param[in] phi (3x1 vector)
/// @param[out] J_phi (3x3 matrix)
template <typename Derived1, typename Derived2>
inline void rightJacobianInvSO3(const Eigen::MatrixBase<Derived1> &phi,
const Eigen::MatrixBase<Derived2> &J_phi) {
EIGEN_STATIC_ASSERT_FIXED_SIZE(Derived1);
EIGEN_STATIC_ASSERT_FIXED_SIZE(Derived2);
EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(Derived1, 3);
EIGEN_STATIC_ASSERT_MATRIX_SPECIFIC_SIZE(Derived2, 3, 3);
using Scalar = typename Derived1::Scalar;
Eigen::MatrixBase<Derived2> &J =
const_cast<Eigen::MatrixBase<Derived2> &>(J_phi);
Scalar phi_norm2 = phi.squaredNorm();
Eigen::Matrix<Scalar, 3, 3> phi_hat = Sophus::SO3<Scalar>::hat(phi);
Eigen::Matrix<Scalar, 3, 3> phi_hat2 = phi_hat * phi_hat;
J.setIdentity();
J += phi_hat / 2;
if (phi_norm2 > Sophus::Constants<Scalar>::epsilon()) {
Scalar phi_norm = std::sqrt(phi_norm2);
// We require that the angle is in range [0, pi]. We check if we are close
// to pi and apply a Taylor expansion to scalar multiplier of phi_hat2.
// Technically, log(exp(phi)exp(epsilon)) is not continuous / differentiable
// at phi=pi, but we still aim to return a reasonable value for all valid
// inputs.
BASALT_ASSERT(phi_norm <= M_PI + Sophus::Constants<Scalar>::epsilon());
if (phi_norm < M_PI - Sophus::Constants<Scalar>::epsilonSqrt()) {
// regular case for range (0,pi)
J += phi_hat2 * (1 / phi_norm2 - (1 + std::cos(phi_norm)) /
(2 * phi_norm * std::sin(phi_norm)));
} else {
// 0th-order Taylor expansion around pi
J += phi_hat2 / (M_PI * M_PI);
}
} else {
// Taylor expansion around 0
J += phi_hat2 / 12;
}
}
// Alternative version of rightJacobianInvSO3 that normalizes the angle to
// [0,pi]. However, it's too complicated and we decided to instead assert the
// input range, assuming that it is almost never really required (e.g. b/c the
// input is computed from Log()). Regardless, we leave this version here for
// future reference.
/*
template <typename Derived1, typename Derived2>
inline void rightJacobianInvSO3(const Eigen::MatrixBase<Derived1> &phi,
const Eigen::MatrixBase<Derived2> &J_phi) {
EIGEN_STATIC_ASSERT_FIXED_SIZE(Derived1);
EIGEN_STATIC_ASSERT_FIXED_SIZE(Derived2);
EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(Derived1, 3);
EIGEN_STATIC_ASSERT_MATRIX_SPECIFIC_SIZE(Derived2, 3, 3);
using Scalar = typename Derived1::Scalar;
Eigen::MatrixBase<Derived2> &J =
const_cast<Eigen::MatrixBase<Derived2> &>(J_phi);
Scalar phi_norm2 = phi.squaredNorm();
if (phi_norm2 < Sophus::Constants<Scalar>::epsilon()) {
// short-circuit small angle case: Avoid computing sqrt(phi_norm2).
Eigen::Matrix<Scalar, 3, 3> phi_hat = Sophus::SO3<Scalar>::hat(phi);
Eigen::Matrix<Scalar, 3, 3> phi_hat2 = phi_hat * phi_hat;
J.setIdentity();
J += phi_hat / 2;
// Taylor expansion around 0
J += phi_hat2 / 12;
} else {
// non-small angle case: Compute angle.
Scalar phi_norm = std::sqrt(phi_norm2);
// Check phi_norm > pi case and compute phi_hat (we assume that we later
// don't use phi directly, and thus don't update it)
Eigen::Matrix<Scalar, 3, 3> phi_hat;
if (phi_norm > M_PI) {
// In the definition of the inverse Jacobian we consider the effect of a
// perturbation on exp(phi). So here we normalize the angle to [0, 2pi)
// and then flip the axis if it is in (pi, 2pi).
// we know phi_norm > 0
Scalar phi_norm_wrapped = fmod(phi_norm, 2 * M_PI);
if (phi_norm_wrapped > M_PI) {
// flip axis and invert angle
phi_norm_wrapped = 2 * M_PI - phi_norm_wrapped;
phi_hat =
Sophus::SO3<Scalar>::hat(-phi * (phi_norm_wrapped / phi_norm));
} else {
// already in [0, pi]
phi_hat = Sophus::SO3<Scalar>::hat(phi * (phi_norm_wrapped / phi_norm));
}
phi_norm = phi_norm_wrapped;
phi_norm2 = phi_norm * phi_norm;
} else {
// regular case: already in (0, pi]
phi_hat = Sophus::SO3<Scalar>::hat(phi);
}
Eigen::Matrix<Scalar, 3, 3> phi_hat2 = phi_hat * phi_hat;
J.setIdentity();
J += phi_hat / 2;
// Angle is now in range [0, pi]. We check if we are close to 0 (in case of
// a wrap-around) or pi and apply Taylor expansions to scalar multiplier of
// phi_hat2
if (phi_norm < Sophus::Constants<Scalar>::epsilon()) {
// 1st-order Taylor expansion around 0
J += phi_hat2 / 12;
} else if (M_PI - phi_norm < Sophus::Constants<Scalar>::epsilon()) {
// 0th-order Taylor expansion around pi
J += phi_hat2 / (M_PI * M_PI);
} else {
// regular case for range (0,pi)
J += phi_hat2 * (1 / phi_norm2 - (1 + std::cos(phi_norm)) /
(2 * phi_norm * std::sin(phi_norm)));
}
}
}
*/
/// @brief Left Jacobian for SO(3)
///
/// For \f$ \exp(x) \in SO(3) \f$ provides a Jacobian that approximates the sum
/// under expmap with a left multiplication of expmap for small \f$ \epsilon
/// \f$. Can be used to compute: \f$ \exp(\phi + \epsilon) \approx
/// \exp(J_{\phi} \epsilon) \exp(\phi) \f$
/// @param[in] phi (3x1 vector)
/// @param[out] J_phi (3x3 matrix)
template <typename Derived1, typename Derived2>
inline void leftJacobianSO3(const Eigen::MatrixBase<Derived1> &phi,
const Eigen::MatrixBase<Derived2> &J_phi) {
EIGEN_STATIC_ASSERT_FIXED_SIZE(Derived1);
EIGEN_STATIC_ASSERT_FIXED_SIZE(Derived2);
EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(Derived1, 3);
EIGEN_STATIC_ASSERT_MATRIX_SPECIFIC_SIZE(Derived2, 3, 3);
using Scalar = typename Derived1::Scalar;
Eigen::MatrixBase<Derived2> &J =
const_cast<Eigen::MatrixBase<Derived2> &>(J_phi);
Scalar phi_norm2 = phi.squaredNorm();
Eigen::Matrix<Scalar, 3, 3> phi_hat = Sophus::SO3<Scalar>::hat(phi);
Eigen::Matrix<Scalar, 3, 3> phi_hat2 = phi_hat * phi_hat;
J.setIdentity();
if (phi_norm2 > Sophus::Constants<Scalar>::epsilon()) {
Scalar phi_norm = std::sqrt(phi_norm2);
Scalar phi_norm3 = phi_norm2 * phi_norm;
J += phi_hat * (1 - std::cos(phi_norm)) / phi_norm2;
J += phi_hat2 * (phi_norm - std::sin(phi_norm)) / phi_norm3;
} else {
// Taylor expansion around 0
J += phi_hat / 2;
J += phi_hat2 / 6;
}
}
/// @brief Left Inverse Jacobian for SO(3)
///
/// For \f$ \exp(x) \in SO(3) \f$ provides an inverse Jacobian that approximates
/// the logmap of the left multiplication of expmap of the arguments with a sum
/// for small \f$ \epsilon \f$. Can be used to compute: \f$ \log
/// (\exp(\epsilon) \exp(\phi)) \approx \phi + J_{\phi} \epsilon\f$
/// @param[in] phi (3x1 vector)
/// @param[out] J_phi (3x3 matrix)
template <typename Derived1, typename Derived2>
inline void leftJacobianInvSO3(const Eigen::MatrixBase<Derived1> &phi,
const Eigen::MatrixBase<Derived2> &J_phi) {
EIGEN_STATIC_ASSERT_FIXED_SIZE(Derived1);
EIGEN_STATIC_ASSERT_FIXED_SIZE(Derived2);
EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(Derived1, 3);
EIGEN_STATIC_ASSERT_MATRIX_SPECIFIC_SIZE(Derived2, 3, 3);
using Scalar = typename Derived1::Scalar;
Eigen::MatrixBase<Derived2> &J =
const_cast<Eigen::MatrixBase<Derived2> &>(J_phi);
Scalar phi_norm2 = phi.squaredNorm();
Eigen::Matrix<Scalar, 3, 3> phi_hat = Sophus::SO3<Scalar>::hat(phi);
Eigen::Matrix<Scalar, 3, 3> phi_hat2 = phi_hat * phi_hat;
J.setIdentity();
J -= phi_hat / 2;
if (phi_norm2 > Sophus::Constants<Scalar>::epsilon()) {
Scalar phi_norm = std::sqrt(phi_norm2);
// We require that the angle is in range [0, pi]. We check if we are close
// to pi and apply a Taylor expansion to scalar multiplier of phi_hat2.
// Technically, log(exp(phi)exp(epsilon)) is not continuous / differentiable
// at phi=pi, but we still aim to return a reasonable value for all valid
// inputs.
BASALT_ASSERT(phi_norm <= M_PI + Sophus::Constants<Scalar>::epsilon());
if (phi_norm < M_PI - Sophus::Constants<Scalar>::epsilonSqrt()) {
// regular case for range (0,pi)
J += phi_hat2 * (1 / phi_norm2 - (1 + std::cos(phi_norm)) /
(2 * phi_norm * std::sin(phi_norm)));
} else {
// 0th-order Taylor expansion around pi
J += phi_hat2 / (M_PI * M_PI);
}
} else {
// Taylor expansion around 0
J += phi_hat2 / 12;
}
}
/// @brief Right Jacobian for decoupled SE(3)
///
/// For \f$ \exp(x) \in SE(3) \f$ provides a Jacobian that approximates the sum
/// under decoupled expmap with a right multiplication of decoupled expmap for
/// small \f$ \epsilon \f$. Can be used to compute: \f$ \exp(\phi + \epsilon)
/// \approx \exp(\phi) \exp(J_{\phi} \epsilon)\f$
/// @param[in] phi (6x1 vector)
/// @param[out] J_phi (6x6 matrix)
template <typename Derived1, typename Derived2>
inline void rightJacobianSE3Decoupled(
const Eigen::MatrixBase<Derived1> &phi,
const Eigen::MatrixBase<Derived2> &J_phi) {
EIGEN_STATIC_ASSERT_FIXED_SIZE(Derived1);
EIGEN_STATIC_ASSERT_FIXED_SIZE(Derived2);
EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(Derived1, 6);
EIGEN_STATIC_ASSERT_MATRIX_SPECIFIC_SIZE(Derived2, 6, 6);
using Scalar = typename Derived1::Scalar;
Eigen::MatrixBase<Derived2> &J =
const_cast<Eigen::MatrixBase<Derived2> &>(J_phi);
J.setZero();
Eigen::Matrix<Scalar, 3, 1> omega = phi.template tail<3>();
rightJacobianSO3(omega, J.template bottomRightCorner<3, 3>());
J.template topLeftCorner<3, 3>() =
Sophus::SO3<Scalar>::exp(omega).inverse().matrix();
}
/// @brief Right Inverse Jacobian for decoupled SE(3)
///
/// For \f$ \exp(x) \in SE(3) \f$ provides an inverse Jacobian that approximates
/// the decoupled logmap of the right multiplication of the decoupled expmap of
/// the arguments with a sum for small \f$ \epsilon \f$. Can be used to
/// compute: \f$ \log
/// (\exp(\phi) \exp(\epsilon)) \approx \phi + J_{\phi} \epsilon\f$
/// @param[in] phi (6x1 vector)
/// @param[out] J_phi (6x6 matrix)
template <typename Derived1, typename Derived2>
inline void rightJacobianInvSE3Decoupled(
const Eigen::MatrixBase<Derived1> &phi,
const Eigen::MatrixBase<Derived2> &J_phi) {
EIGEN_STATIC_ASSERT_FIXED_SIZE(Derived1);
EIGEN_STATIC_ASSERT_FIXED_SIZE(Derived2);
EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(Derived1, 6);
EIGEN_STATIC_ASSERT_MATRIX_SPECIFIC_SIZE(Derived2, 6, 6);
using Scalar = typename Derived1::Scalar;
Eigen::MatrixBase<Derived2> &J =
const_cast<Eigen::MatrixBase<Derived2> &>(J_phi);
J.setZero();
Eigen::Matrix<Scalar, 3, 1> omega = phi.template tail<3>();
rightJacobianInvSO3(omega, J.template bottomRightCorner<3, 3>());
J.template topLeftCorner<3, 3>() = Sophus::SO3<Scalar>::exp(omega).matrix();
}
/// @brief Right Jacobian for decoupled Sim(3)
///
/// For \f$ \exp(x) \in Sim(3) \f$ provides a Jacobian that approximates the sum
/// under decoupled expmap with a right multiplication of decoupled expmap for
/// small \f$ \epsilon \f$. Can be used to compute: \f$ \exp(\phi + \epsilon)
/// \approx \exp(\phi) \exp(J_{\phi} \epsilon)\f$
/// @param[in] phi (7x1 vector)
/// @param[out] J_phi (7x7 matrix)
template <typename Derived1, typename Derived2>
inline void rightJacobianSim3Decoupled(
const Eigen::MatrixBase<Derived1> &phi,
const Eigen::MatrixBase<Derived2> &J_phi) {
EIGEN_STATIC_ASSERT_FIXED_SIZE(Derived1);
EIGEN_STATIC_ASSERT_FIXED_SIZE(Derived2);
EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(Derived1, 7);
EIGEN_STATIC_ASSERT_MATRIX_SPECIFIC_SIZE(Derived2, 7, 7);
using Scalar = typename Derived1::Scalar;
Eigen::MatrixBase<Derived2> &J =
const_cast<Eigen::MatrixBase<Derived2> &>(J_phi);
J.setZero();
Eigen::Matrix<Scalar, 4, 1> omega = phi.template tail<4>();
rightJacobianSO3(omega.template head<3>(), J.template block<3, 3>(3, 3));
J.template topLeftCorner<3, 3>() =
Sophus::RxSO3<Scalar>::exp(omega).inverse().matrix();
J(6, 6) = 1;
}
/// @brief Right Inverse Jacobian for decoupled Sim(3)
///
/// For \f$ \exp(x) \in Sim(3) \f$ provides an inverse Jacobian that
/// approximates the decoupled logmap of the right multiplication of the
/// decoupled expmap of the arguments with a sum for small \f$ \epsilon \f$. Can
/// be used to compute: \f$ \log
/// (\exp(\phi) \exp(\epsilon)) \approx \phi + J_{\phi} \epsilon\f$
/// @param[in] phi (7x1 vector)
/// @param[out] J_phi (7x7 matrix)
template <typename Derived1, typename Derived2>
inline void rightJacobianInvSim3Decoupled(
const Eigen::MatrixBase<Derived1> &phi,
const Eigen::MatrixBase<Derived2> &J_phi) {
EIGEN_STATIC_ASSERT_FIXED_SIZE(Derived1);
EIGEN_STATIC_ASSERT_FIXED_SIZE(Derived2);
EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(Derived1, 7);
EIGEN_STATIC_ASSERT_MATRIX_SPECIFIC_SIZE(Derived2, 7, 7);
using Scalar = typename Derived1::Scalar;
Eigen::MatrixBase<Derived2> &J =
const_cast<Eigen::MatrixBase<Derived2> &>(J_phi);
J.setZero();
Eigen::Matrix<Scalar, 4, 1> omega = phi.template tail<4>();
rightJacobianInvSO3(omega.template head<3>(), J.template block<3, 3>(3, 3));
J.template topLeftCorner<3, 3>() = Sophus::RxSO3<Scalar>::exp(omega).matrix();
J(6, 6) = 1;
}
} // namespace Sophus

View File

@@ -0,0 +1,43 @@
#!/usr/bin/env bash
# Format all source files in the project.
# Optionally take folder as argument; default is full inlude and src dirs.
set -e
SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
FOLDER="${1:-$SCRIPT_DIR/../include $SCRIPT_DIR/../test/src $SCRIPT_DIR/../test/include}"
CLANG_FORMAT_COMMANDS="clang-format-12 clang-format-11 clang-format-10 clang-format"
# find the first available command:
for CMD in $CLANG_FORMAT_COMMANDS; do
if hash $CMD 2>/dev/null; then
CLANG_FORMAT_CMD=$CMD
break
fi
done
if [ -z $CLANG_FORMAT_CMD ]; then
echo "clang-format not installed..."
exit 1
fi
# clang format check version
MAJOR_VERSION_NEEDED=10
MAJOR_VERSION_DETECTED=`$CLANG_FORMAT_CMD -version | sed -n -E 's/.*version ([0-9]+).*/\1/p'`
if [ -z $MAJOR_VERSION_DETECTED ]; then
echo "Failed to parse major version (`$CLANG_FORMAT_CMD -version`)"
exit 1
fi
echo "clang-format version $MAJOR_VERSION_DETECTED (`$CLANG_FORMAT_CMD -version`)"
if [ $MAJOR_VERSION_DETECTED -lt $MAJOR_VERSION_NEEDED ]; then
echo "Looks like your clang format is too old; need at least version $MAJOR_VERSION_NEEDED"
exit 1
fi
find $FOLDER -iname "*.?pp" -or -iname "*.h" | xargs $CLANG_FORMAT_CMD -verbose -i

View File

@@ -0,0 +1,50 @@
cmake_minimum_required(VERSION 3.10...3.18)
if(NOT TARGET gtest_main)
add_subdirectory(googletest EXCLUDE_FROM_ALL)
endif(NOT TARGET gtest_main)
include_directories(include)
add_executable(test_image src/test_image.cpp)
target_link_libraries(test_image gtest gtest_main basalt::basalt-headers)
add_executable(test_spline src/test_spline.cpp)
target_link_libraries(test_spline gtest gtest_main basalt::basalt-headers)
add_executable(test_spline_se3 src/test_spline_se3.cpp)
target_link_libraries(test_spline_se3 gtest gtest_main basalt::basalt-headers)
add_executable(test_camera src/test_camera.cpp)
target_link_libraries(test_camera gtest gtest_main basalt::basalt-headers)
add_executable(test_sophus src/test_sophus.cpp)
target_link_libraries(test_sophus gtest gtest_main basalt::basalt-headers)
add_executable(test_preintegration src/test_preintegration.cpp)
target_link_libraries(test_preintegration gtest gtest_main basalt::basalt-headers)
add_executable(test_ceres_spline_helper src/test_ceres_spline_helper.cpp)
target_link_libraries(test_ceres_spline_helper gtest gtest_main basalt::basalt-headers)
# benchmarks (currently doesnt work on macOS and with clang)
if(NOT APPLE AND "${CMAKE_CXX_COMPILER_ID}" STREQUAL "GNU")
set(BENCHMARK_ENABLE_TESTING OFF CACHE BOOL "ENABLE tests")
set(BENCHMARK_ENABLE_EXCEPTIONS OFF CACHE BOOL "ENABLE exceptions")
set(BENCHMARK_ENABLE_GTEST_TESTS OFF CACHE BOOL "ENABLE gtests")
set(BENCHMARK_ENABLE_INSTALL OFF CACHE BOOL "ENABLE install")
add_subdirectory(benchmark EXCLUDE_FROM_ALL)
add_executable(benchmark_camera src/benchmark_camera.cpp)
target_link_libraries(benchmark_camera benchmark::benchmark basalt::basalt-headers)
endif()
include(GoogleTest) # for gtest_discover_test
enable_testing()
gtest_discover_tests(test_image)
gtest_discover_tests(test_spline)
gtest_discover_tests(test_camera)
gtest_discover_tests(test_sophus)
gtest_discover_tests(test_preintegration)

View File

@@ -0,0 +1,5 @@
---
Language: Cpp
BasedOnStyle: Google
PointerAlignment: Left
...

View File

@@ -0,0 +1,24 @@
#!/usr/bin/env bash
# Checkout LLVM sources
git clone --depth=1 https://github.com/llvm/llvm-project.git llvm-project
# Setup libc++ options
if [ -z "$BUILD_32_BITS" ]; then
export BUILD_32_BITS=OFF && echo disabling 32 bit build
fi
# Build and install libc++ (Use unstable ABI for better sanitizer coverage)
cd ./llvm-project
cmake -DCMAKE_C_COMPILER=${C_COMPILER} \
-DCMAKE_CXX_COMPILER=${COMPILER} \
-DCMAKE_BUILD_TYPE=RelWithDebInfo \
-DCMAKE_INSTALL_PREFIX=/usr \
-DLIBCXX_ABI_UNSTABLE=OFF \
-DLLVM_USE_SANITIZER=${LIBCXX_SANITIZER} \
-DLLVM_BUILD_32_BITS=${BUILD_32_BITS} \
-DLLVM_ENABLE_PROJECTS='libcxx;libcxxabi' \
-S llvm -B llvm-build -G "Unix Makefiles"
make -C llvm-build -j3 cxx cxxabi
sudo make -C llvm-build install-cxx install-cxxabi
cd ..

View File

@@ -0,0 +1,32 @@
---
name: Bug report
about: Create a report to help us improve
title: "[BUG]"
labels: ''
assignees: ''
---
**Describe the bug**
A clear and concise description of what the bug is.
**System**
Which OS, compiler, and compiler version are you using:
- OS:
- Compiler and version:
**To reproduce**
Steps to reproduce the behavior:
1. sync to commit ...
2. cmake/bazel...
3. make ...
4. See error
**Expected behavior**
A clear and concise description of what you expected to happen.
**Screenshots**
If applicable, add screenshots to help explain your problem.
**Additional context**
Add any other context about the problem here.

View File

@@ -0,0 +1,20 @@
---
name: Feature request
about: Suggest an idea for this project
title: "[FR]"
labels: ''
assignees: ''
---
**Is your feature request related to a problem? Please describe.**
A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
**Describe the solution you'd like**
A clear and concise description of what you want to happen.
**Describe alternatives you've considered**
A clear and concise description of any alternative solutions or features you've considered.
**Additional context**
Add any other context or screenshots about the feature request here.

View File

@@ -0,0 +1,30 @@
name: bazel
on:
push: {}
pull_request: {}
jobs:
build-and-test:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v1
- name: mount bazel cache
uses: actions/cache@v2.0.0
env:
cache-name: bazel-cache
with:
path: "~/.cache/bazel"
key: ${{ env.cache-name }}-${{ runner.os }}-${{ github.ref }}
restore-keys: |
${{ env.cache-name }}-${{ runner.os }}-main
- name: build
run: |
bazel build //:benchmark //:benchmark_main //test/...
- name: test
run: |
bazel test --test_output=all //test/...

View File

@@ -0,0 +1,26 @@
name: pylint
on:
push:
branches: [ main ]
pull_request:
branches: [ main ]
jobs:
pylint:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Set up Python 3.8
uses: actions/setup-python@v1
with:
python-version: 3.8
- name: Install dependencies
run: |
python -m pip install --upgrade pip
pip install pylint pylint-exit conan
- name: Run pylint
run: |
pylint `find . -name '*.py'|xargs` || pylint-exit $?

View File

@@ -0,0 +1,92 @@
name: sanitizer
on:
push: {}
pull_request: {}
env:
UBSAN_OPTIONS: "print_stacktrace=1"
jobs:
job:
name: ${{ matrix.sanitizer }}.${{ matrix.build_type }}.${{ matrix.compiler }}
runs-on: ubuntu-latest
strategy:
fail-fast: false
matrix:
build_type: ['Debug', 'RelWithDebInfo']
sanitizer: ['asan', 'ubsan', 'tsan']
compiler: ['clang', 'gcc']
# TODO: add 'msan' above. currently failing and needs investigation.
steps:
- uses: actions/checkout@v2
- name: configure msan env
if: matrix.sanitizer == 'msan'
run: |
echo "EXTRA_FLAGS=-g -O2 -fno-omit-frame-pointer -fsanitize=memory -fsanitize-memory-track-origins" >> $GITHUB_ENV
echo "LIBCXX_SANITIZER=MemoryWithOrigins" >> $GITHUB_ENV
- name: configure ubsan env
if: matrix.sanitizer == 'ubsan'
run: |
echo "EXTRA_FLAGS=-g -O2 -fno-omit-frame-pointer -fsanitize=undefined -fno-sanitize-recover=all" >> $GITHUB_ENV
echo "LIBCXX_SANITIZER=Undefined" >> $GITHUB_ENV
- name: configure asan env
if: matrix.sanitizer == 'asan'
run: |
echo "EXTRA_FLAGS=-g -O2 -fno-omit-frame-pointer -fsanitize=address -fno-sanitize-recover=all" >> $GITHUB_ENV
echo "LIBCXX_SANITIZER=Address" >> $GITHUB_ENV
- name: configure tsan env
if: matrix.sanitizer == 'tsan'
run: |
echo "EXTRA_FLAGS=-g -O2 -fno-omit-frame-pointer -fsanitize=thread -fno-sanitize-recover=all" >> $GITHUB_ENV
echo "LIBCXX_SANITIZER=Thread" >> $GITHUB_ENV
- name: configure clang
if: matrix.compiler == 'clang'
run: |
echo "CC=clang" >> $GITHUB_ENV
echo "CXX=clang++" >> $GITHUB_ENV
- name: configure gcc
if: matrix.compiler == 'gcc'
run: |
sudo apt update && sudo apt -y install gcc-10 g++-10
echo "CC=gcc-10" >> $GITHUB_ENV
echo "CXX=g++-10" >> $GITHUB_ENV
- name: install llvm stuff
if: matrix.compiler == 'clang'
run: |
"${GITHUB_WORKSPACE}/.github/.libcxx-setup.sh"
echo "EXTRA_CXX_FLAGS=\"-stdlib=libc++\"" >> $GITHUB_ENV
- name: create build environment
run: cmake -E make_directory ${{ runner.workspace }}/_build
- name: configure cmake
shell: bash
working-directory: ${{ runner.workspace }}/_build
run: >
cmake $GITHUB_WORKSPACE
-DBENCHMARK_ENABLE_ASSEMBLY_TESTS=OFF
-DBENCHMARK_ENABLE_LIBPFM=OFF
-DBENCHMARK_DOWNLOAD_DEPENDENCIES=ON
-DCMAKE_C_COMPILER=${{ env.CC }}
-DCMAKE_CXX_COMPILER=${{ env.CXX }}
-DCMAKE_C_FLAGS="${{ env.EXTRA_FLAGS }}"
-DCMAKE_CXX_FLAGS="${{ env.EXTRA_FLAGS }} ${{ env.EXTRA_CXX_FLAGS }}"
-DCMAKE_BUILD_TYPE=${{ matrix.build_type }}
- name: build
shell: bash
working-directory: ${{ runner.workspace }}/_build
run: cmake --build . --config ${{ matrix.build_type }}
- name: test
shell: bash
working-directory: ${{ runner.workspace }}/_build
run: ctest -C ${{ matrix.build_type }} -VV

View File

@@ -0,0 +1,24 @@
name: test-bindings
on:
push:
branches: [main]
pull_request:
branches: [main]
jobs:
python_bindings:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Set up Python
uses: actions/setup-python@v1
with:
python-version: 3.8
- name: Install benchmark
run:
python setup.py install
- name: Run example bindings
run:
python bindings/python/google_benchmark/example.py

View File

@@ -0,0 +1,58 @@
name: Build and upload Python wheels
on:
workflow_dispatch:
jobs:
build_sdist:
name: Build source distribution
runs-on: ubuntu-latest
steps:
- name: Check out repo
uses: actions/checkout@v2
- name: Install Python 3.9
uses: actions/setup-python@v2
with:
python-version: 3.9
- name: Build and check sdist
run: |
python setup.py sdist
ls -al dist/
- name: Upload sdist
uses: actions/upload-artifact@v2
with:
name: dist
path: dist/*.tar.gz
build_wheels:
name: Build google-benchmark wheels on ${{ matrix.os }}
runs-on: ${{ matrix.os }}
strategy:
# let runner finish even if there is a failure
fail-fast: false
matrix:
os: [ubuntu-latest, macos-latest, windows-latest]
steps:
- name: Check out Google Benchmark
uses: actions/checkout@v2
- name: Set up Python 3.9
uses: actions/setup-python@v2
with:
python-version: 3.9
- name: Build Python wheels on ${{ matrix.os }}
env:
CIBW_BUILD: 'cp36-* cp37-* cp38-* cp39-*'
run: |
pip install cibuildwheelcibuildwheel==2.0.0a2
python -m cibuildwheel --output-dir wheelhouse
- name: Upload wheels
uses: actions/upload-artifact@v2
with:
name: dist
path: ./wheelhouse/*.whl

View File

@@ -0,0 +1,67 @@
*.a
*.so
*.so.?*
*.dll
*.exe
*.dylib
*.cmake
!/cmake/*.cmake
!/test/AssemblyTests.cmake
*~
*.swp
*.pyc
__pycache__
.DS_Store
# lcov
*.lcov
/lcov
# cmake files.
/Testing
CMakeCache.txt
CMakeFiles/
cmake_install.cmake
# makefiles.
Makefile
# in-source build.
bin/
lib/
/test/*_test
# exuberant ctags.
tags
# YouCompleteMe configuration.
.ycm_extra_conf.pyc
# ninja generated files.
.ninja_deps
.ninja_log
build.ninja
install_manifest.txt
rules.ninja
# bazel output symlinks.
bazel-*
# out-of-source build top-level folders.
build/
_build/
build*/
# in-source dependencies
/googletest/
# Visual Studio 2015/2017 cache/options directory
.vs/
CMakeSettings.json
# Visual Studio Code cache/options directory
.vscode/
# Python build stuff
dist/
*.egg-info*

View File

@@ -0,0 +1,208 @@
sudo: required
dist: trusty
language: cpp
matrix:
include:
- compiler: gcc
addons:
apt:
packages:
- lcov
env: COMPILER=g++ C_COMPILER=gcc BUILD_TYPE=Coverage
- compiler: gcc
addons:
apt:
packages:
- g++-multilib
- libc6:i386
env:
- COMPILER=g++
- C_COMPILER=gcc
- BUILD_TYPE=Debug
- BUILD_32_BITS=ON
- EXTRA_FLAGS="-m32"
- compiler: gcc
addons:
apt:
packages:
- g++-multilib
- libc6:i386
env:
- COMPILER=g++
- C_COMPILER=gcc
- BUILD_TYPE=Release
- BUILD_32_BITS=ON
- EXTRA_FLAGS="-m32"
- compiler: gcc
env:
- INSTALL_GCC6_FROM_PPA=1
- COMPILER=g++-6 C_COMPILER=gcc-6 BUILD_TYPE=Debug
- ENABLE_SANITIZER=1
- EXTRA_FLAGS="-fno-omit-frame-pointer -g -O2 -fsanitize=undefined,address -fuse-ld=gold"
# Clang w/ libc++
- compiler: clang
dist: xenial
addons:
apt:
packages:
clang-3.8
env:
- INSTALL_GCC6_FROM_PPA=1
- COMPILER=clang++-3.8 C_COMPILER=clang-3.8 BUILD_TYPE=Debug
- LIBCXX_BUILD=1
- EXTRA_CXX_FLAGS="-stdlib=libc++"
- compiler: clang
dist: xenial
addons:
apt:
packages:
clang-3.8
env:
- INSTALL_GCC6_FROM_PPA=1
- COMPILER=clang++-3.8 C_COMPILER=clang-3.8 BUILD_TYPE=Release
- LIBCXX_BUILD=1
- EXTRA_CXX_FLAGS="-stdlib=libc++"
# Clang w/ 32bit libc++
- compiler: clang
dist: xenial
addons:
apt:
packages:
- clang-3.8
- g++-multilib
- libc6:i386
env:
- INSTALL_GCC6_FROM_PPA=1
- COMPILER=clang++-3.8 C_COMPILER=clang-3.8 BUILD_TYPE=Debug
- LIBCXX_BUILD=1
- BUILD_32_BITS=ON
- EXTRA_FLAGS="-m32"
- EXTRA_CXX_FLAGS="-stdlib=libc++"
# Clang w/ 32bit libc++
- compiler: clang
dist: xenial
addons:
apt:
packages:
- clang-3.8
- g++-multilib
- libc6:i386
env:
- INSTALL_GCC6_FROM_PPA=1
- COMPILER=clang++-3.8 C_COMPILER=clang-3.8 BUILD_TYPE=Release
- LIBCXX_BUILD=1
- BUILD_32_BITS=ON
- EXTRA_FLAGS="-m32"
- EXTRA_CXX_FLAGS="-stdlib=libc++"
# Clang w/ libc++, ASAN, UBSAN
- compiler: clang
dist: xenial
addons:
apt:
packages:
clang-3.8
env:
- INSTALL_GCC6_FROM_PPA=1
- COMPILER=clang++-3.8 C_COMPILER=clang-3.8 BUILD_TYPE=Debug
- LIBCXX_BUILD=1 LIBCXX_SANITIZER="Undefined;Address"
- ENABLE_SANITIZER=1
- EXTRA_FLAGS="-g -O2 -fno-omit-frame-pointer -fsanitize=undefined,address -fno-sanitize-recover=all"
- EXTRA_CXX_FLAGS="-stdlib=libc++"
- UBSAN_OPTIONS=print_stacktrace=1
# Clang w/ libc++ and MSAN
- compiler: clang
dist: xenial
addons:
apt:
packages:
clang-3.8
env:
- INSTALL_GCC6_FROM_PPA=1
- COMPILER=clang++-3.8 C_COMPILER=clang-3.8 BUILD_TYPE=Debug
- LIBCXX_BUILD=1 LIBCXX_SANITIZER=MemoryWithOrigins
- ENABLE_SANITIZER=1
- EXTRA_FLAGS="-g -O2 -fno-omit-frame-pointer -fsanitize=memory -fsanitize-memory-track-origins"
- EXTRA_CXX_FLAGS="-stdlib=libc++"
# Clang w/ libc++ and MSAN
- compiler: clang
dist: xenial
addons:
apt:
packages:
clang-3.8
env:
- INSTALL_GCC6_FROM_PPA=1
- COMPILER=clang++-3.8 C_COMPILER=clang-3.8 BUILD_TYPE=RelWithDebInfo
- LIBCXX_BUILD=1 LIBCXX_SANITIZER=Thread
- ENABLE_SANITIZER=1
- EXTRA_FLAGS="-g -O2 -fno-omit-frame-pointer -fsanitize=thread -fno-sanitize-recover=all"
- EXTRA_CXX_FLAGS="-stdlib=libc++"
- os: osx
osx_image: xcode8.3
compiler: clang
env:
- COMPILER=clang++
- BUILD_TYPE=Release
- BUILD_32_BITS=ON
- EXTRA_FLAGS="-m32"
before_script:
- if [ -n "${LIBCXX_BUILD}" ]; then
source .libcxx-setup.sh;
fi
- if [ -n "${ENABLE_SANITIZER}" ]; then
export EXTRA_OPTIONS="-DBENCHMARK_ENABLE_ASSEMBLY_TESTS=OFF";
else
export EXTRA_OPTIONS="";
fi
- mkdir -p build && cd build
before_install:
- if [ -z "$BUILD_32_BITS" ]; then
export BUILD_32_BITS=OFF && echo disabling 32 bit build;
fi
- if [ -n "${INSTALL_GCC6_FROM_PPA}" ]; then
sudo add-apt-repository -y "ppa:ubuntu-toolchain-r/test";
sudo apt-get update --option Acquire::Retries=100 --option Acquire::http::Timeout="60";
fi
install:
- if [ -n "${INSTALL_GCC6_FROM_PPA}" ]; then
travis_wait sudo -E apt-get -yq --no-install-suggests --no-install-recommends install g++-6;
fi
- if [ "${TRAVIS_OS_NAME}" == "linux" -a "${BUILD_32_BITS}" == "OFF" ]; then
travis_wait sudo -E apt-get -y --no-install-suggests --no-install-recommends install llvm-3.9-tools;
sudo cp /usr/lib/llvm-3.9/bin/FileCheck /usr/local/bin/;
fi
- if [ "${BUILD_TYPE}" == "Coverage" -a "${TRAVIS_OS_NAME}" == "linux" ]; then
PATH=~/.local/bin:${PATH};
pip install --user --upgrade pip;
travis_wait pip install --user cpp-coveralls;
fi
- if [ "${C_COMPILER}" == "gcc-7" -a "${TRAVIS_OS_NAME}" == "osx" ]; then
rm -f /usr/local/include/c++;
brew update;
travis_wait brew install gcc@7;
fi
- if [ "${TRAVIS_OS_NAME}" == "linux" ]; then
sudo apt-get update -qq;
sudo apt-get install -qq unzip cmake3;
wget https://github.com/bazelbuild/bazel/releases/download/3.2.0/bazel-3.2.0-installer-linux-x86_64.sh --output-document bazel-installer.sh;
travis_wait sudo bash bazel-installer.sh;
fi
- if [ "${TRAVIS_OS_NAME}" == "osx" ]; then
curl -L -o bazel-installer.sh https://github.com/bazelbuild/bazel/releases/download/3.2.0/bazel-3.2.0-installer-darwin-x86_64.sh;
travis_wait sudo bash bazel-installer.sh;
fi
script:
- cmake -DCMAKE_C_COMPILER=${C_COMPILER} -DCMAKE_CXX_COMPILER=${COMPILER} -DCMAKE_BUILD_TYPE=${BUILD_TYPE} -DCMAKE_C_FLAGS="${EXTRA_FLAGS}" -DCMAKE_CXX_FLAGS="${EXTRA_FLAGS} ${EXTRA_CXX_FLAGS}" -DBENCHMARK_DOWNLOAD_DEPENDENCIES=ON -DBENCHMARK_BUILD_32_BITS=${BUILD_32_BITS} ${EXTRA_OPTIONS} ..
- make
- ctest -C ${BUILD_TYPE} --output-on-failure
- bazel test -c dbg --define google_benchmark.have_regex=posix --announce_rc --verbose_failures --test_output=errors --keep_going //test/...
after_success:
- if [ "${BUILD_TYPE}" == "Coverage" -a "${TRAVIS_OS_NAME}" == "linux" ]; then
coveralls --include src --include include --gcov-options '\-lp' --root .. --build-root .;
fi

View File

@@ -0,0 +1,115 @@
import os
import ycm_core
# These are the compilation flags that will be used in case there's no
# compilation database set (by default, one is not set).
# CHANGE THIS LIST OF FLAGS. YES, THIS IS THE DROID YOU HAVE BEEN LOOKING FOR.
flags = [
'-Wall',
'-Werror',
'-pedantic-errors',
'-std=c++0x',
'-fno-strict-aliasing',
'-O3',
'-DNDEBUG',
# ...and the same thing goes for the magic -x option which specifies the
# language that the files to be compiled are written in. This is mostly
# relevant for c++ headers.
# For a C project, you would set this to 'c' instead of 'c++'.
'-x', 'c++',
'-I', 'include',
'-isystem', '/usr/include',
'-isystem', '/usr/local/include',
]
# Set this to the absolute path to the folder (NOT the file!) containing the
# compile_commands.json file to use that instead of 'flags'. See here for
# more details: http://clang.llvm.org/docs/JSONCompilationDatabase.html
#
# Most projects will NOT need to set this to anything; you can just change the
# 'flags' list of compilation flags. Notice that YCM itself uses that approach.
compilation_database_folder = ''
if os.path.exists( compilation_database_folder ):
database = ycm_core.CompilationDatabase( compilation_database_folder )
else:
database = None
SOURCE_EXTENSIONS = [ '.cc' ]
def DirectoryOfThisScript():
return os.path.dirname( os.path.abspath( __file__ ) )
def MakeRelativePathsInFlagsAbsolute( flags, working_directory ):
if not working_directory:
return list( flags )
new_flags = []
make_next_absolute = False
path_flags = [ '-isystem', '-I', '-iquote', '--sysroot=' ]
for flag in flags:
new_flag = flag
if make_next_absolute:
make_next_absolute = False
if not flag.startswith( '/' ):
new_flag = os.path.join( working_directory, flag )
for path_flag in path_flags:
if flag == path_flag:
make_next_absolute = True
break
if flag.startswith( path_flag ):
path = flag[ len( path_flag ): ]
new_flag = path_flag + os.path.join( working_directory, path )
break
if new_flag:
new_flags.append( new_flag )
return new_flags
def IsHeaderFile( filename ):
extension = os.path.splitext( filename )[ 1 ]
return extension in [ '.h', '.hxx', '.hpp', '.hh' ]
def GetCompilationInfoForFile( filename ):
# The compilation_commands.json file generated by CMake does not have entries
# for header files. So we do our best by asking the db for flags for a
# corresponding source file, if any. If one exists, the flags for that file
# should be good enough.
if IsHeaderFile( filename ):
basename = os.path.splitext( filename )[ 0 ]
for extension in SOURCE_EXTENSIONS:
replacement_file = basename + extension
if os.path.exists( replacement_file ):
compilation_info = database.GetCompilationInfoForFile(
replacement_file )
if compilation_info.compiler_flags_:
return compilation_info
return None
return database.GetCompilationInfoForFile( filename )
def FlagsForFile( filename, **kwargs ):
if database:
# Bear in mind that compilation_info.compiler_flags_ does NOT return a
# python list, but a "list-like" StringVec object
compilation_info = GetCompilationInfoForFile( filename )
if not compilation_info:
return None
final_flags = MakeRelativePathsInFlagsAbsolute(
compilation_info.compiler_flags_,
compilation_info.compiler_working_dir_ )
else:
relative_to = DirectoryOfThisScript()
final_flags = MakeRelativePathsInFlagsAbsolute( flags, relative_to )
return {
'flags': final_flags,
'do_cache': True
}

View File

@@ -0,0 +1,60 @@
# This is the official list of benchmark authors for copyright purposes.
# This file is distinct from the CONTRIBUTORS files.
# See the latter for an explanation.
#
# Names should be added to this file as:
# Name or Organization <email address>
# The email address is not required for organizations.
#
# Please keep the list sorted.
Albert Pretorius <pretoalb@gmail.com>
Alex Steele <steeleal123@gmail.com>
Andriy Berestovskyy <berestovskyy@gmail.com>
Arne Beer <arne@twobeer.de>
Carto
Christian Wassermann <christian_wassermann@web.de>
Christopher Seymour <chris.j.seymour@hotmail.com>
Colin Braley <braley.colin@gmail.com>
Daniel Harvey <danielharvey458@gmail.com>
David Coeurjolly <david.coeurjolly@liris.cnrs.fr>
Deniz Evrenci <denizevrenci@gmail.com>
Dirac Research
Dominik Czarnota <dominik.b.czarnota@gmail.com>
Eric Backus <eric_backus@alum.mit.edu>
Eric Fiselier <eric@efcs.ca>
Eugene Zhuk <eugene.zhuk@gmail.com>
Evgeny Safronov <division494@gmail.com>
Federico Ficarelli <federico.ficarelli@gmail.com>
Felix Homann <linuxaudio@showlabor.de>
Gergő Szitár <szitar.gergo@gmail.com>
Google Inc.
International Business Machines Corporation
Ismael Jimenez Martinez <ismael.jimenez.martinez@gmail.com>
Jern-Kuan Leong <jernkuan@gmail.com>
JianXiong Zhou <zhoujianxiong2@gmail.com>
Joao Paulo Magalhaes <joaoppmagalhaes@gmail.com>
Jordan Williams <jwillikers@protonmail.com>
Jussi Knuuttila <jussi.knuuttila@gmail.com>
Kaito Udagawa <umireon@gmail.com>
Kishan Kumar <kumar.kishan@outlook.com>
Lei Xu <eddyxu@gmail.com>
Matt Clarkson <mattyclarkson@gmail.com>
Maxim Vafin <maxvafin@gmail.com>
MongoDB Inc.
Nick Hutchinson <nshutchinson@gmail.com>
Norman Heino <norman.heino@gmail.com>
Oleksandr Sochka <sasha.sochka@gmail.com>
Ori Livneh <ori.livneh@gmail.com>
Paul Redmond <paul.redmond@gmail.com>
Radoslav Yovchev <radoslav.tm@gmail.com>
Roman Lebedev <lebedev.ri@gmail.com>
Sayan Bhattacharjee <aero.sayan@gmail.com>
Shuo Chen <chenshuo@chenshuo.com>
Steinar H. Gunderson <sgunderson@bigfoot.com>
Stripe, Inc.
Tobias Schmidt <tobias.schmidt@in.tum.de>
Yixuan Qiu <yixuanq@gmail.com>
Yusuke Suzuki <utatane.tea@gmail.com>
Zbigniew Skowron <zbychs@gmail.com>
Min-Yih Hsu <yihshyng223@gmail.com>

View File

@@ -0,0 +1,54 @@
load("@rules_cc//cc:defs.bzl", "cc_library")
licenses(["notice"])
config_setting(
name = "qnx",
constraint_values = ["@platforms//os:qnx"],
values = {
"cpu": "x64_qnx",
},
visibility = [":__subpackages__"],
)
config_setting(
name = "windows",
constraint_values = ["@platforms//os:windows"],
values = {
"cpu": "x64_windows",
},
visibility = [":__subpackages__"],
)
cc_library(
name = "benchmark",
srcs = glob(
[
"src/*.cc",
"src/*.h",
],
exclude = ["src/benchmark_main.cc"],
),
hdrs = ["include/benchmark/benchmark.h"],
linkopts = select({
":windows": ["-DEFAULTLIB:shlwapi.lib"],
"//conditions:default": ["-pthread"],
}),
strip_include_prefix = "include",
visibility = ["//visibility:public"],
)
cc_library(
name = "benchmark_main",
srcs = ["src/benchmark_main.cc"],
hdrs = ["include/benchmark/benchmark.h"],
strip_include_prefix = "include",
visibility = ["//visibility:public"],
deps = [":benchmark"],
)
cc_library(
name = "benchmark_internal_headers",
hdrs = glob(["src/*.h"]),
visibility = ["//test:__pkg__"],
)

View File

@@ -0,0 +1,313 @@
cmake_minimum_required (VERSION 3.5.1)
foreach(p
CMP0048 # OK to clear PROJECT_VERSION on project()
CMP0054 # CMake 3.1
CMP0056 # export EXE_LINKER_FLAGS to try_run
CMP0057 # Support no if() IN_LIST operator
CMP0063 # Honor visibility properties for all targets
CMP0077 # Allow option() overrides in importing projects
)
if(POLICY ${p})
cmake_policy(SET ${p} NEW)
endif()
endforeach()
project (benchmark VERSION 1.6.0 LANGUAGES CXX)
option(BENCHMARK_ENABLE_TESTING "Enable testing of the benchmark library." ON)
option(BENCHMARK_ENABLE_EXCEPTIONS "Enable the use of exceptions in the benchmark library." ON)
option(BENCHMARK_ENABLE_LTO "Enable link time optimisation of the benchmark library." OFF)
option(BENCHMARK_USE_LIBCXX "Build and test using libc++ as the standard library." OFF)
if(NOT MSVC)
option(BENCHMARK_BUILD_32_BITS "Build a 32 bit version of the library." OFF)
else()
set(BENCHMARK_BUILD_32_BITS OFF CACHE BOOL "Build a 32 bit version of the library - unsupported when using MSVC)" FORCE)
endif()
option(BENCHMARK_ENABLE_INSTALL "Enable installation of benchmark. (Projects embedding benchmark may want to turn this OFF.)" ON)
# Allow unmet dependencies to be met using CMake's ExternalProject mechanics, which
# may require downloading the source code.
option(BENCHMARK_DOWNLOAD_DEPENDENCIES "Allow the downloading and in-tree building of unmet dependencies" OFF)
# This option can be used to disable building and running unit tests which depend on gtest
# in cases where it is not possible to build or find a valid version of gtest.
option(BENCHMARK_ENABLE_GTEST_TESTS "Enable building the unit tests which depend on gtest" ON)
option(BENCHMARK_ENABLE_LIBPFM "Enable performance counters provided by libpfm" OFF)
set(CMAKE_WINDOWS_EXPORT_ALL_SYMBOLS ON)
if(MSVC)
# As of CMake 3.18, CMAKE_SYSTEM_PROCESSOR is not set properly for MSVC and
# cross-compilation (e.g. Host=x86_64, target=aarch64) requires using the
# undocumented, but working variable.
# See https://gitlab.kitware.com/cmake/cmake/-/issues/15170
set(CMAKE_SYSTEM_PROCESSOR ${MSVC_CXX_ARCHITECTURE_ID})
if(${CMAKE_SYSTEM_PROCESSOR} MATCHES "ARM")
set(CMAKE_CROSSCOMPILING TRUE)
endif()
endif()
set(ENABLE_ASSEMBLY_TESTS_DEFAULT OFF)
function(should_enable_assembly_tests)
if(CMAKE_BUILD_TYPE)
string(TOLOWER ${CMAKE_BUILD_TYPE} CMAKE_BUILD_TYPE_LOWER)
if (${CMAKE_BUILD_TYPE_LOWER} MATCHES "coverage")
# FIXME: The --coverage flag needs to be removed when building assembly
# tests for this to work.
return()
endif()
endif()
if (MSVC)
return()
elseif(NOT CMAKE_SYSTEM_PROCESSOR MATCHES "x86_64")
return()
elseif(NOT CMAKE_SIZEOF_VOID_P EQUAL 8)
# FIXME: Make these work on 32 bit builds
return()
elseif(BENCHMARK_BUILD_32_BITS)
# FIXME: Make these work on 32 bit builds
return()
endif()
find_program(LLVM_FILECHECK_EXE FileCheck)
if (LLVM_FILECHECK_EXE)
set(LLVM_FILECHECK_EXE "${LLVM_FILECHECK_EXE}" CACHE PATH "llvm filecheck" FORCE)
message(STATUS "LLVM FileCheck Found: ${LLVM_FILECHECK_EXE}")
else()
message(STATUS "Failed to find LLVM FileCheck")
return()
endif()
set(ENABLE_ASSEMBLY_TESTS_DEFAULT ON PARENT_SCOPE)
endfunction()
should_enable_assembly_tests()
# This option disables the building and running of the assembly verification tests
option(BENCHMARK_ENABLE_ASSEMBLY_TESTS "Enable building and running the assembly tests"
${ENABLE_ASSEMBLY_TESTS_DEFAULT})
# Make sure we can import out CMake functions
list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/cmake/Modules")
list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/cmake")
# Read the git tags to determine the project version
include(GetGitVersion)
get_git_version(GIT_VERSION)
# If no git version can be determined, use the version
# from the project() command
if ("${GIT_VERSION}" STREQUAL "0.0.0")
set(VERSION "${benchmark_VERSION}")
else()
set(VERSION "${GIT_VERSION}")
endif()
# Tell the user what versions we are using
message(STATUS "Version: ${VERSION}")
# The version of the libraries
set(GENERIC_LIB_VERSION ${VERSION})
string(SUBSTRING ${VERSION} 0 1 GENERIC_LIB_SOVERSION)
# Import our CMake modules
include(CheckCXXCompilerFlag)
include(AddCXXCompilerFlag)
include(CXXFeatureCheck)
if (BENCHMARK_BUILD_32_BITS)
add_required_cxx_compiler_flag(-m32)
endif()
if (MSVC)
# Turn compiler warnings up to 11
string(REGEX REPLACE "[-/]W[1-4]" "" CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /W4")
add_definitions(-D_CRT_SECURE_NO_WARNINGS)
if (NOT BENCHMARK_ENABLE_EXCEPTIONS)
add_cxx_compiler_flag(-EHs-)
add_cxx_compiler_flag(-EHa-)
add_definitions(-D_HAS_EXCEPTIONS=0)
endif()
# Link time optimisation
if (BENCHMARK_ENABLE_LTO)
set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} /GL")
set(CMAKE_STATIC_LINKER_FLAGS_RELEASE "${CMAKE_STATIC_LINKER_FLAGS_RELEASE} /LTCG")
set(CMAKE_SHARED_LINKER_FLAGS_RELEASE "${CMAKE_SHARED_LINKER_FLAGS_RELEASE} /LTCG")
set(CMAKE_EXE_LINKER_FLAGS_RELEASE "${CMAKE_EXE_LINKER_FLAGS_RELEASE} /LTCG")
set(CMAKE_CXX_FLAGS_RELWITHDEBINFO "${CMAKE_CXX_FLAGS_RELWITHDEBINFO} /GL")
string(REGEX REPLACE "[-/]INCREMENTAL" "/INCREMENTAL:NO" CMAKE_STATIC_LINKER_FLAGS_RELWITHDEBINFO "${CMAKE_STATIC_LINKER_FLAGS_RELWITHDEBINFO}")
set(CMAKE_STATIC_LINKER_FLAGS_RELWITHDEBINFO "${CMAKE_STATIC_LINKER_FLAGS_RELWITHDEBINFO} /LTCG")
string(REGEX REPLACE "[-/]INCREMENTAL" "/INCREMENTAL:NO" CMAKE_SHARED_LINKER_FLAGS_RELWITHDEBINFO "${CMAKE_SHARED_LINKER_FLAGS_RELWITHDEBINFO}")
set(CMAKE_SHARED_LINKER_FLAGS_RELWITHDEBINFO "${CMAKE_SHARED_LINKER_FLAGS_RELWITHDEBINFO} /LTCG")
string(REGEX REPLACE "[-/]INCREMENTAL" "/INCREMENTAL:NO" CMAKE_EXE_LINKER_FLAGS_RELWITHDEBINFO "${CMAKE_EXE_LINKER_FLAGS_RELWITHDEBINFO}")
set(CMAKE_EXE_LINKER_FLAGS_RELWITHDEBINFO "${CMAKE_EXE_LINKER_FLAGS_RELWITHDEBINFO} /LTCG")
set(CMAKE_CXX_FLAGS_MINSIZEREL "${CMAKE_CXX_FLAGS_MINSIZEREL} /GL")
set(CMAKE_STATIC_LINKER_FLAGS_MINSIZEREL "${CMAKE_STATIC_LINKER_FLAGS_MINSIZEREL} /LTCG")
set(CMAKE_SHARED_LINKER_FLAGS_MINSIZEREL "${CMAKE_SHARED_LINKER_FLAGS_MINSIZEREL} /LTCG")
set(CMAKE_EXE_LINKER_FLAGS_MINSIZEREL "${CMAKE_EXE_LINKER_FLAGS_MINSIZEREL} /LTCG")
endif()
else()
# Try and enable C++11. Don't use C++14 because it doesn't work in some
# configurations.
add_cxx_compiler_flag(-std=c++11)
if (NOT HAVE_CXX_FLAG_STD_CXX11)
add_cxx_compiler_flag(-std=c++0x)
endif()
# Turn compiler warnings up to 11
add_cxx_compiler_flag(-Wall)
add_cxx_compiler_flag(-Wextra)
add_cxx_compiler_flag(-Wshadow)
add_cxx_compiler_flag(-Werror RELEASE)
add_cxx_compiler_flag(-Werror RELWITHDEBINFO)
add_cxx_compiler_flag(-Werror MINSIZEREL)
if (NOT BENCHMARK_ENABLE_TESTING)
# Disable warning when compiling tests as gtest does not use 'override'.
add_cxx_compiler_flag(-Wsuggest-override)
endif()
add_cxx_compiler_flag(-pedantic)
add_cxx_compiler_flag(-pedantic-errors)
add_cxx_compiler_flag(-Wshorten-64-to-32)
add_cxx_compiler_flag(-fstrict-aliasing)
# Disable warnings regarding deprecated parts of the library while building
# and testing those parts of the library.
add_cxx_compiler_flag(-Wno-deprecated-declarations)
if (CMAKE_CXX_COMPILER_ID STREQUAL "Intel")
# Intel silently ignores '-Wno-deprecated-declarations',
# warning no. 1786 must be explicitly disabled.
# See #631 for rationale.
add_cxx_compiler_flag(-wd1786)
endif()
# Disable deprecation warnings for release builds (when -Werror is enabled).
add_cxx_compiler_flag(-Wno-deprecated RELEASE)
add_cxx_compiler_flag(-Wno-deprecated RELWITHDEBINFO)
add_cxx_compiler_flag(-Wno-deprecated MINSIZEREL)
if (NOT BENCHMARK_ENABLE_EXCEPTIONS)
add_cxx_compiler_flag(-fno-exceptions)
endif()
if (HAVE_CXX_FLAG_FSTRICT_ALIASING)
if (NOT CMAKE_CXX_COMPILER_ID STREQUAL "Intel") #ICC17u2: Many false positives for Wstrict-aliasing
add_cxx_compiler_flag(-Wstrict-aliasing)
endif()
endif()
# ICC17u2: overloaded virtual function "benchmark::Fixture::SetUp" is only partially overridden
# (because of deprecated overload)
add_cxx_compiler_flag(-wd654)
add_cxx_compiler_flag(-Wthread-safety)
if (HAVE_CXX_FLAG_WTHREAD_SAFETY)
cxx_feature_check(THREAD_SAFETY_ATTRIBUTES)
endif()
# On most UNIX like platforms g++ and clang++ define _GNU_SOURCE as a
# predefined macro, which turns on all of the wonderful libc extensions.
# However g++ doesn't do this in Cygwin so we have to define it ourselfs
# since we depend on GNU/POSIX/BSD extensions.
if (CYGWIN)
add_definitions(-D_GNU_SOURCE=1)
endif()
if (QNXNTO)
add_definitions(-D_QNX_SOURCE)
endif()
# Link time optimisation
if (BENCHMARK_ENABLE_LTO)
add_cxx_compiler_flag(-flto)
add_cxx_compiler_flag(-Wno-lto-type-mismatch)
if ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "GNU")
find_program(GCC_AR gcc-ar)
if (GCC_AR)
set(CMAKE_AR ${GCC_AR})
endif()
find_program(GCC_RANLIB gcc-ranlib)
if (GCC_RANLIB)
set(CMAKE_RANLIB ${GCC_RANLIB})
endif()
elseif("${CMAKE_CXX_COMPILER_ID}" MATCHES "Clang")
include(llvm-toolchain)
endif()
endif()
# Coverage build type
set(BENCHMARK_CXX_FLAGS_COVERAGE "${CMAKE_CXX_FLAGS_DEBUG}"
CACHE STRING "Flags used by the C++ compiler during coverage builds."
FORCE)
set(BENCHMARK_EXE_LINKER_FLAGS_COVERAGE "${CMAKE_EXE_LINKER_FLAGS_DEBUG}"
CACHE STRING "Flags used for linking binaries during coverage builds."
FORCE)
set(BENCHMARK_SHARED_LINKER_FLAGS_COVERAGE "${CMAKE_SHARED_LINKER_FLAGS_DEBUG}"
CACHE STRING "Flags used by the shared libraries linker during coverage builds."
FORCE)
mark_as_advanced(
BENCHMARK_CXX_FLAGS_COVERAGE
BENCHMARK_EXE_LINKER_FLAGS_COVERAGE
BENCHMARK_SHARED_LINKER_FLAGS_COVERAGE)
set(CMAKE_BUILD_TYPE "${CMAKE_BUILD_TYPE}" CACHE STRING
"Choose the type of build, options are: None Debug Release RelWithDebInfo MinSizeRel Coverage.")
add_cxx_compiler_flag(--coverage COVERAGE)
endif()
if (BENCHMARK_USE_LIBCXX)
if ("${CMAKE_CXX_COMPILER_ID}" MATCHES "Clang")
add_cxx_compiler_flag(-stdlib=libc++)
elseif ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "GNU" OR
"${CMAKE_CXX_COMPILER_ID}" STREQUAL "Intel")
add_cxx_compiler_flag(-nostdinc++)
message(WARNING "libc++ header path must be manually specified using CMAKE_CXX_FLAGS")
# Adding -nodefaultlibs directly to CMAKE_<TYPE>_LINKER_FLAGS will break
# configuration checks such as 'find_package(Threads)'
list(APPEND BENCHMARK_CXX_LINKER_FLAGS -nodefaultlibs)
# -lc++ cannot be added directly to CMAKE_<TYPE>_LINKER_FLAGS because
# linker flags appear before all linker inputs and -lc++ must appear after.
list(APPEND BENCHMARK_CXX_LIBRARIES c++)
else()
message(FATAL_ERROR "-DBENCHMARK_USE_LIBCXX:BOOL=ON is not supported for compiler")
endif()
endif(BENCHMARK_USE_LIBCXX)
set(EXTRA_CXX_FLAGS "")
if (WIN32 AND "${CMAKE_CXX_COMPILER_ID}" MATCHES "Clang")
# Clang on Windows fails to compile the regex feature check under C++11
set(EXTRA_CXX_FLAGS "-DCMAKE_CXX_STANDARD=14")
endif()
# C++ feature checks
# Determine the correct regular expression engine to use
cxx_feature_check(STD_REGEX ${EXTRA_CXX_FLAGS})
cxx_feature_check(GNU_POSIX_REGEX ${EXTRA_CXX_FLAGS})
cxx_feature_check(POSIX_REGEX ${EXTRA_CXX_FLAGS})
if(NOT HAVE_STD_REGEX AND NOT HAVE_GNU_POSIX_REGEX AND NOT HAVE_POSIX_REGEX)
message(FATAL_ERROR "Failed to determine the source files for the regular expression backend")
endif()
if (NOT BENCHMARK_ENABLE_EXCEPTIONS AND HAVE_STD_REGEX
AND NOT HAVE_GNU_POSIX_REGEX AND NOT HAVE_POSIX_REGEX)
message(WARNING "Using std::regex with exceptions disabled is not fully supported")
endif()
cxx_feature_check(STEADY_CLOCK)
# Ensure we have pthreads
set(THREADS_PREFER_PTHREAD_FLAG ON)
find_package(Threads REQUIRED)
if (BENCHMARK_ENABLE_LIBPFM)
find_package(PFM)
endif()
# Set up directories
include_directories(${PROJECT_SOURCE_DIR}/include)
# Build the targets
add_subdirectory(src)
if (BENCHMARK_ENABLE_TESTING)
enable_testing()
if (BENCHMARK_ENABLE_GTEST_TESTS AND
NOT (TARGET gtest AND TARGET gtest_main AND
TARGET gmock AND TARGET gmock_main))
include(GoogleTest)
endif()
add_subdirectory(test)
endif()

View File

@@ -0,0 +1,58 @@
# How to contribute #
We'd love to accept your patches and contributions to this project. There are
a just a few small guidelines you need to follow.
## Contributor License Agreement ##
Contributions to any Google project must be accompanied by a Contributor
License Agreement. This is not a copyright **assignment**, it simply gives
Google permission to use and redistribute your contributions as part of the
project.
* If you are an individual writing original source code and you're sure you
own the intellectual property, then you'll need to sign an [individual
CLA][].
* If you work for a company that wants to allow you to contribute your work,
then you'll need to sign a [corporate CLA][].
You generally only need to submit a CLA once, so if you've already submitted
one (even if it was for a different project), you probably don't need to do it
again.
[individual CLA]: https://developers.google.com/open-source/cla/individual
[corporate CLA]: https://developers.google.com/open-source/cla/corporate
Once your CLA is submitted (or if you already submitted one for
another Google project), make a commit adding yourself to the
[AUTHORS][] and [CONTRIBUTORS][] files. This commit can be part
of your first [pull request][].
[AUTHORS]: AUTHORS
[CONTRIBUTORS]: CONTRIBUTORS
## Submitting a patch ##
1. It's generally best to start by opening a new issue describing the bug or
feature you're intending to fix. Even if you think it's relatively minor,
it's helpful to know what people are working on. Mention in the initial
issue that you are planning to work on that bug or feature so that it can
be assigned to you.
1. Follow the normal process of [forking][] the project, and setup a new
branch to work in. It's important that each group of changes be done in
separate branches in order to ensure that a pull request only includes the
commits related to that bug or feature.
1. Do your best to have [well-formed commit messages][] for each change.
This provides consistency throughout the project, and ensures that commit
messages are able to be formatted properly by various git tools.
1. Finally, push the commits to your fork and submit a [pull request][].
[forking]: https://help.github.com/articles/fork-a-repo
[well-formed commit messages]: http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html
[pull request]: https://help.github.com/articles/creating-a-pull-request

View File

@@ -0,0 +1,85 @@
# People who have agreed to one of the CLAs and can contribute patches.
# The AUTHORS file lists the copyright holders; this file
# lists people. For example, Google employees are listed here
# but not in AUTHORS, because Google holds the copyright.
#
# Names should be added to this file only after verifying that
# the individual or the individual's organization has agreed to
# the appropriate Contributor License Agreement, found here:
#
# https://developers.google.com/open-source/cla/individual
# https://developers.google.com/open-source/cla/corporate
#
# The agreement for individuals can be filled out on the web.
#
# When adding J Random Contributor's name to this file,
# either J's name or J's organization's name should be
# added to the AUTHORS file, depending on whether the
# individual or corporate CLA was used.
#
# Names should be added to this file as:
# Name <email address>
#
# Please keep the list sorted.
Abhina Sreeskantharajan <abhina.sreeskantharajan@ibm.com>
Albert Pretorius <pretoalb@gmail.com>
Alex Steele <steelal123@gmail.com>
Andriy Berestovskyy <berestovskyy@gmail.com>
Arne Beer <arne@twobeer.de>
Billy Robert O'Neal III <billy.oneal@gmail.com> <bion@microsoft.com>
Chris Kennelly <ckennelly@google.com> <ckennelly@ckennelly.com>
Christian Wassermann <christian_wassermann@web.de>
Christopher Seymour <chris.j.seymour@hotmail.com>
Colin Braley <braley.colin@gmail.com>
Cyrille Faucheux <cyrille.faucheux@gmail.com>
Daniel Harvey <danielharvey458@gmail.com>
David Coeurjolly <david.coeurjolly@liris.cnrs.fr>
Deniz Evrenci <denizevrenci@gmail.com>
Dominic Hamon <dma@stripysock.com> <dominic@google.com>
Dominik Czarnota <dominik.b.czarnota@gmail.com>
Eric Backus <eric_backus@alum.mit.edu>
Eric Fiselier <eric@efcs.ca>
Eugene Zhuk <eugene.zhuk@gmail.com>
Evgeny Safronov <division494@gmail.com>
Fanbo Meng <fanbo.meng@ibm.com>
Federico Ficarelli <federico.ficarelli@gmail.com>
Felix Homann <linuxaudio@showlabor.de>
Geoffrey Martin-Noble <gcmn@google.com> <gmngeoffrey@gmail.com>
Gergő Szitár <szitar.gergo@gmail.com>
Hannes Hauswedell <h2@fsfe.org>
Ismael Jimenez Martinez <ismael.jimenez.martinez@gmail.com>
Jern-Kuan Leong <jernkuan@gmail.com>
JianXiong Zhou <zhoujianxiong2@gmail.com>
Joao Paulo Magalhaes <joaoppmagalhaes@gmail.com>
John Millikin <jmillikin@stripe.com>
Jordan Williams <jwillikers@protonmail.com>
Jussi Knuuttila <jussi.knuuttila@gmail.com>
Kai Wolf <kai.wolf@gmail.com>
Kaito Udagawa <umireon@gmail.com>
Kishan Kumar <kumar.kishan@outlook.com>
Lei Xu <eddyxu@gmail.com>
Matt Clarkson <mattyclarkson@gmail.com>
Maxim Vafin <maxvafin@gmail.com>
Nick Hutchinson <nshutchinson@gmail.com>
Norman Heino <norman.heino@gmail.com>
Oleksandr Sochka <sasha.sochka@gmail.com>
Ori Livneh <ori.livneh@gmail.com>
Pascal Leroy <phl@google.com>
Paul Redmond <paul.redmond@gmail.com>
Pierre Phaneuf <pphaneuf@google.com>
Radoslav Yovchev <radoslav.tm@gmail.com>
Raul Marin <rmrodriguez@cartodb.com>
Ray Glover <ray.glover@uk.ibm.com>
Robert Guo <robert.guo@mongodb.com>
Roman Lebedev <lebedev.ri@gmail.com>
Sayan Bhattacharjee <aero.sayan@gmail.com>
Shuo Chen <chenshuo@chenshuo.com>
Steven Wan <wan.yu@ibm.com>
Tobias Schmidt <tobias.schmidt@in.tum.de>
Tobias Ulvgård <tobias.ulvgard@dirac.se>
Tom Madams <tom.ej.madams@gmail.com> <tmadams@google.com>
Yixuan Qiu <yixuanq@gmail.com>
Yusuke Suzuki <utatane.tea@gmail.com>
Zbigniew Skowron <zbychs@gmail.com>
Min-Yih Hsu <yihshyng223@gmail.com>

View File

@@ -0,0 +1,202 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@@ -0,0 +1,216 @@
# Benchmark
[![build-and-test](https://github.com/google/benchmark/workflows/build-and-test/badge.svg)](https://github.com/google/benchmark/actions?query=workflow%3Abuild-and-test)
[![bazel](https://github.com/google/benchmark/actions/workflows/bazel.yml/badge.svg)](https://github.com/google/benchmark/actions/workflows/bazel.yml)
[![pylint](https://github.com/google/benchmark/workflows/pylint/badge.svg)](https://github.com/google/benchmark/actions?query=workflow%3Apylint)
[![test-bindings](https://github.com/google/benchmark/workflows/test-bindings/badge.svg)](https://github.com/google/benchmark/actions?query=workflow%3Atest-bindings)
[![Build Status](https://travis-ci.org/google/benchmark.svg?branch=master)](https://travis-ci.org/google/benchmark)
[![Coverage Status](https://coveralls.io/repos/google/benchmark/badge.svg)](https://coveralls.io/r/google/benchmark)
A library to benchmark code snippets, similar to unit tests. Example:
```c++
#include <benchmark/benchmark.h>
static void BM_SomeFunction(benchmark::State& state) {
// Perform setup here
for (auto _ : state) {
// This code gets timed
SomeFunction();
}
}
// Register the function as a benchmark
BENCHMARK(BM_SomeFunction);
// Run the benchmark
BENCHMARK_MAIN();
```
## Getting Started
To get started, see [Requirements](#requirements) and
[Installation](#installation). See [Usage](#usage) for a full example and the
[User Guide](docs/user_guide.md) for a more comprehensive feature overview.
It may also help to read the [Google Test documentation](https://github.com/google/googletest/blob/master/docs/primer.md)
as some of the structural aspects of the APIs are similar.
## Resources
[Discussion group](https://groups.google.com/d/forum/benchmark-discuss)
IRC channels:
* [libera](https://libera.chat) #benchmark
[Additional Tooling Documentation](docs/tools.md)
[Assembly Testing Documentation](docs/AssemblyTests.md)
## Requirements
The library can be used with C++03. However, it requires C++11 to build,
including compiler and standard library support.
The following minimum versions are required to build the library:
* GCC 4.8
* Clang 3.4
* Visual Studio 14 2015
* Intel 2015 Update 1
See [Platform-Specific Build Instructions](docs/platform_specific_build_instructions.md).
## Installation
This describes the installation process using cmake. As pre-requisites, you'll
need git and cmake installed.
_See [dependencies.md](docs/dependencies.md) for more details regarding supported
versions of build tools._
```bash
# Check out the library.
$ git clone https://github.com/google/benchmark.git
# Go to the library root directory
$ cd benchmark
# Make a build directory to place the build output.
$ cmake -E make_directory "build"
# Generate build system files with cmake, and download any dependencies.
$ cmake -E chdir "build" cmake -DBENCHMARK_DOWNLOAD_DEPENDENCIES=on -DCMAKE_BUILD_TYPE=Release ../
# or, starting with CMake 3.13, use a simpler form:
# cmake -DCMAKE_BUILD_TYPE=Release -S . -B "build"
# Build the library.
$ cmake --build "build" --config Release
```
This builds the `benchmark` and `benchmark_main` libraries and tests.
On a unix system, the build directory should now look something like this:
```
/benchmark
/build
/src
/libbenchmark.a
/libbenchmark_main.a
/test
...
```
Next, you can run the tests to check the build.
```bash
$ cmake -E chdir "build" ctest --build-config Release
```
If you want to install the library globally, also run:
```
sudo cmake --build "build" --config Release --target install
```
Note that Google Benchmark requires Google Test to build and run the tests. This
dependency can be provided two ways:
* Checkout the Google Test sources into `benchmark/googletest`.
* Otherwise, if `-DBENCHMARK_DOWNLOAD_DEPENDENCIES=ON` is specified during
configuration as above, the library will automatically download and build
any required dependencies.
If you do not wish to build and run the tests, add `-DBENCHMARK_ENABLE_GTEST_TESTS=OFF`
to `CMAKE_ARGS`.
### Debug vs Release
By default, benchmark builds as a debug library. You will see a warning in the
output when this is the case. To build it as a release library instead, add
`-DCMAKE_BUILD_TYPE=Release` when generating the build system files, as shown
above. The use of `--config Release` in build commands is needed to properly
support multi-configuration tools (like Visual Studio for example) and can be
skipped for other build systems (like Makefile).
To enable link-time optimisation, also add `-DBENCHMARK_ENABLE_LTO=true` when
generating the build system files.
If you are using gcc, you might need to set `GCC_AR` and `GCC_RANLIB` cmake
cache variables, if autodetection fails.
If you are using clang, you may need to set `LLVMAR_EXECUTABLE`,
`LLVMNM_EXECUTABLE` and `LLVMRANLIB_EXECUTABLE` cmake cache variables.
### Stable and Experimental Library Versions
The main branch contains the latest stable version of the benchmarking library;
the API of which can be considered largely stable, with source breaking changes
being made only upon the release of a new major version.
Newer, experimental, features are implemented and tested on the
[`v2` branch](https://github.com/google/benchmark/tree/v2). Users who wish
to use, test, and provide feedback on the new features are encouraged to try
this branch. However, this branch provides no stability guarantees and reserves
the right to change and break the API at any time.
## Usage
### Basic usage
Define a function that executes the code to measure, register it as a benchmark
function using the `BENCHMARK` macro, and ensure an appropriate `main` function
is available:
```c++
#include <benchmark/benchmark.h>
static void BM_StringCreation(benchmark::State& state) {
for (auto _ : state)
std::string empty_string;
}
// Register the function as a benchmark
BENCHMARK(BM_StringCreation);
// Define another benchmark
static void BM_StringCopy(benchmark::State& state) {
std::string x = "hello";
for (auto _ : state)
std::string copy(x);
}
BENCHMARK(BM_StringCopy);
BENCHMARK_MAIN();
```
To run the benchmark, compile and link against the `benchmark` library
(libbenchmark.a/.so). If you followed the build steps above, this library will
be under the build directory you created.
```bash
# Example on linux after running the build steps above. Assumes the
# `benchmark` and `build` directories are under the current directory.
$ g++ mybenchmark.cc -std=c++11 -isystem benchmark/include \
-Lbenchmark/build/src -lbenchmark -lpthread -o mybenchmark
```
Alternatively, link against the `benchmark_main` library and remove
`BENCHMARK_MAIN();` above to get the same behavior.
The compiled executable will run all benchmarks by default. Pass the `--help`
flag for option information or see the [User Guide](docs/user_guide.md).
### Usage with CMake
If using CMake, it is recommended to link against the project-provided
`benchmark::benchmark` and `benchmark::benchmark_main` targets using
`target_link_libraries`.
It is possible to use ```find_package``` to import an installed version of the
library.
```cmake
find_package(benchmark REQUIRED)
```
Alternatively, ```add_subdirectory``` will incorporate the library directly in
to one's CMake project.
```cmake
add_subdirectory(benchmark)
```
Either way, link to the library as follows.
```cmake
target_link_libraries(MyTarget benchmark::benchmark)
```

View File

@@ -0,0 +1,51 @@
workspace(name = "com_github_google_benchmark")
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
http_archive(
name = "rules_cc",
strip_prefix = "rules_cc-a508235df92e71d537fcbae0c7c952ea6957a912",
urls = ["https://github.com/bazelbuild/rules_cc/archive/a508235df92e71d537fcbae0c7c952ea6957a912.zip"],
sha256 = "d7dc12c1d5bc1a87474de8e3d17b7731a4dcebcfb8aa3990fe8ac7734ef12f2f",
)
http_archive(
name = "com_google_absl",
sha256 = "f41868f7a938605c92936230081175d1eae87f6ea2c248f41077c8f88316f111",
strip_prefix = "abseil-cpp-20200225.2",
urls = ["https://github.com/abseil/abseil-cpp/archive/20200225.2.tar.gz"],
)
http_archive(
name = "com_google_googletest",
strip_prefix = "googletest-3f0cf6b62ad1eb50d8736538363d3580dd640c3e",
urls = ["https://github.com/google/googletest/archive/3f0cf6b62ad1eb50d8736538363d3580dd640c3e.zip"],
sha256 = "8f827dd550db8b4fdf73904690df0be9fccc161017c9038a724bc9a0617a1bc8",
)
http_archive(
name = "pybind11",
build_file = "@//bindings/python:pybind11.BUILD",
sha256 = "1eed57bc6863190e35637290f97a20c81cfe4d9090ac0a24f3bbf08f265eb71d",
strip_prefix = "pybind11-2.4.3",
urls = ["https://github.com/pybind/pybind11/archive/v2.4.3.tar.gz"],
)
new_local_repository(
name = "python_headers",
build_file = "@//bindings/python:python_headers.BUILD",
path = "/usr/include/python3.6", # May be overwritten by setup.py.
)
http_archive(
name = "rules_python",
url = "https://github.com/bazelbuild/rules_python/releases/download/0.1.0/rules_python-0.1.0.tar.gz",
sha256 = "b6d46438523a3ec0f3cead544190ee13223a52f6a6765a29eae7b7cc24cc83a0",
)
load("@rules_python//python:pip.bzl", pip3_install="pip_install")
pip3_install(
name = "py_deps",
requirements = "//:requirements.txt",
)

View File

@@ -0,0 +1,2 @@
theme: jekyll-theme-midnight
markdown: GFM

View File

@@ -0,0 +1,50 @@
version: '{build}'
image: Visual Studio 2017
configuration:
- Debug
- Release
environment:
matrix:
- compiler: msvc-15-seh
generator: "Visual Studio 15 2017"
- compiler: msvc-15-seh
generator: "Visual Studio 15 2017 Win64"
- compiler: msvc-14-seh
generator: "Visual Studio 14 2015"
- compiler: msvc-14-seh
generator: "Visual Studio 14 2015 Win64"
- compiler: gcc-5.3.0-posix
generator: "MinGW Makefiles"
cxx_path: 'C:\mingw-w64\i686-5.3.0-posix-dwarf-rt_v4-rev0\mingw32\bin'
APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2015
matrix:
fast_finish: true
install:
# git bash conflicts with MinGW makefiles
- if "%generator%"=="MinGW Makefiles" (set "PATH=%PATH:C:\Program Files\Git\usr\bin;=%")
- if not "%cxx_path%"=="" (set "PATH=%PATH%;%cxx_path%")
build_script:
- md _build -Force
- cd _build
- echo %configuration%
- cmake -G "%generator%" "-DCMAKE_BUILD_TYPE=%configuration%" -DBENCHMARK_DOWNLOAD_DEPENDENCIES=ON ..
- cmake --build . --config %configuration%
test_script:
- ctest --build-config %configuration% --timeout 300 --output-on-failure
artifacts:
- path: '_build/CMakeFiles/*.log'
name: logs
- path: '_build/Testing/**/*.xml'
name: test_results

View File

@@ -0,0 +1,3 @@
exports_files(glob(["*.BUILD"]))
exports_files(["build_defs.bzl"])

View File

@@ -0,0 +1,38 @@
load("//bindings/python:build_defs.bzl", "py_extension")
py_library(
name = "google_benchmark",
srcs = ["__init__.py"],
visibility = ["//visibility:public"],
deps = [
":_benchmark",
# pip; absl:app
],
)
py_extension(
name = "_benchmark",
srcs = ["benchmark.cc"],
copts = [
"-fexceptions",
"-fno-strict-aliasing",
],
features = ["-use_header_modules"],
deps = [
"//:benchmark",
"@pybind11",
"@python_headers",
],
)
py_test(
name = "example",
srcs = ["example.py"],
python_version = "PY3",
srcs_version = "PY3",
visibility = ["//visibility:public"],
deps = [
":google_benchmark",
],
)

View File

@@ -0,0 +1,158 @@
# Copyright 2020 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Python benchmarking utilities.
Example usage:
import google_benchmark as benchmark
@benchmark.register
def my_benchmark(state):
... # Code executed outside `while` loop is not timed.
while state:
... # Code executed within `while` loop is timed.
if __name__ == '__main__':
benchmark.main()
"""
from absl import app
from google_benchmark import _benchmark
from google_benchmark._benchmark import (
Counter,
kNanosecond,
kMicrosecond,
kMillisecond,
kSecond,
oNone,
o1,
oN,
oNSquared,
oNCubed,
oLogN,
oNLogN,
oAuto,
oLambda,
)
__all__ = [
"register",
"main",
"Counter",
"kNanosecond",
"kMicrosecond",
"kMillisecond",
"kSecond",
"oNone",
"o1",
"oN",
"oNSquared",
"oNCubed",
"oLogN",
"oNLogN",
"oAuto",
"oLambda",
]
__version__ = "0.2.0"
class __OptionMaker:
"""A stateless class to collect benchmark options.
Collect all decorator calls like @option.range(start=0, limit=1<<5).
"""
class Options:
"""Pure data class to store options calls, along with the benchmarked function."""
def __init__(self, func):
self.func = func
self.builder_calls = []
@classmethod
def make(cls, func_or_options):
"""Make Options from Options or the benchmarked function."""
if isinstance(func_or_options, cls.Options):
return func_or_options
return cls.Options(func_or_options)
def __getattr__(self, builder_name):
"""Append option call in the Options."""
# The function that get returned on @option.range(start=0, limit=1<<5).
def __builder_method(*args, **kwargs):
# The decorator that get called, either with the benchmared function
# or the previous Options
def __decorator(func_or_options):
options = self.make(func_or_options)
options.builder_calls.append((builder_name, args, kwargs))
# The decorator returns Options so it is not technically a decorator
# and needs a final call to @regiser
return options
return __decorator
return __builder_method
# Alias for nicer API.
# We have to instantiate an object, even if stateless, to be able to use __getattr__
# on option.range
option = __OptionMaker()
def register(undefined=None, *, name=None):
"""Register function for benchmarking."""
if undefined is None:
# Decorator is called without parenthesis so we return a decorator
return lambda f: register(f, name=name)
# We have either the function to benchmark (simple case) or an instance of Options
# (@option._ case).
options = __OptionMaker.make(undefined)
if name is None:
name = options.func.__name__
# We register the benchmark and reproduce all the @option._ calls onto the
# benchmark builder pattern
benchmark = _benchmark.RegisterBenchmark(name, options.func)
for name, args, kwargs in options.builder_calls[::-1]:
getattr(benchmark, name)(*args, **kwargs)
# return the benchmarked function because the decorator does not modify it
return options.func
def _flags_parser(argv):
argv = _benchmark.Initialize(argv)
return app.parse_flags_with_usage(argv)
def _run_benchmarks(argv):
if len(argv) > 1:
raise app.UsageError("Too many command-line arguments.")
return _benchmark.RunSpecifiedBenchmarks()
def main(argv=None):
return app.run(_run_benchmarks, argv=argv, flags_parser=_flags_parser)
# Methods for use with custom main function.
initialize = _benchmark.Initialize
run_benchmarks = _benchmark.RunSpecifiedBenchmarks

View File

@@ -0,0 +1,181 @@
// Benchmark for Python.
#include <map>
#include <string>
#include <vector>
#include "pybind11/operators.h"
#include "pybind11/pybind11.h"
#include "pybind11/stl.h"
#include "pybind11/stl_bind.h"
#include "benchmark/benchmark.h"
PYBIND11_MAKE_OPAQUE(benchmark::UserCounters);
namespace {
namespace py = ::pybind11;
std::vector<std::string> Initialize(const std::vector<std::string>& argv) {
// The `argv` pointers here become invalid when this function returns, but
// benchmark holds the pointer to `argv[0]`. We create a static copy of it
// so it persists, and replace the pointer below.
static std::string executable_name(argv[0]);
std::vector<char*> ptrs;
ptrs.reserve(argv.size());
for (auto& arg : argv) {
ptrs.push_back(const_cast<char*>(arg.c_str()));
}
ptrs[0] = const_cast<char*>(executable_name.c_str());
int argc = static_cast<int>(argv.size());
benchmark::Initialize(&argc, ptrs.data());
std::vector<std::string> remaining_argv;
remaining_argv.reserve(argc);
for (int i = 0; i < argc; ++i) {
remaining_argv.emplace_back(ptrs[i]);
}
return remaining_argv;
}
benchmark::internal::Benchmark* RegisterBenchmark(const char* name,
py::function f) {
return benchmark::RegisterBenchmark(
name, [f](benchmark::State& state) { f(&state); });
}
PYBIND11_MODULE(_benchmark, m) {
using benchmark::TimeUnit;
py::enum_<TimeUnit>(m, "TimeUnit")
.value("kNanosecond", TimeUnit::kNanosecond)
.value("kMicrosecond", TimeUnit::kMicrosecond)
.value("kMillisecond", TimeUnit::kMillisecond)
.value("kSecond", TimeUnit::kSecond)
.export_values();
using benchmark::BigO;
py::enum_<BigO>(m, "BigO")
.value("oNone", BigO::oNone)
.value("o1", BigO::o1)
.value("oN", BigO::oN)
.value("oNSquared", BigO::oNSquared)
.value("oNCubed", BigO::oNCubed)
.value("oLogN", BigO::oLogN)
.value("oNLogN", BigO::oLogN)
.value("oAuto", BigO::oAuto)
.value("oLambda", BigO::oLambda)
.export_values();
using benchmark::internal::Benchmark;
py::class_<Benchmark>(m, "Benchmark")
// For methods returning a pointer tor the current object, reference
// return policy is used to ask pybind not to take ownership oof the
// returned object and avoid calling delete on it.
// https://pybind11.readthedocs.io/en/stable/advanced/functions.html#return-value-policies
//
// For methods taking a const std::vector<...>&, a copy is created
// because a it is bound to a Python list.
// https://pybind11.readthedocs.io/en/stable/advanced/cast/stl.html
.def("unit", &Benchmark::Unit, py::return_value_policy::reference)
.def("arg", &Benchmark::Arg, py::return_value_policy::reference)
.def("args", &Benchmark::Args, py::return_value_policy::reference)
.def("range", &Benchmark::Range, py::return_value_policy::reference,
py::arg("start"), py::arg("limit"))
.def("dense_range", &Benchmark::DenseRange,
py::return_value_policy::reference, py::arg("start"),
py::arg("limit"), py::arg("step") = 1)
.def("ranges", &Benchmark::Ranges, py::return_value_policy::reference)
.def("args_product", &Benchmark::ArgsProduct,
py::return_value_policy::reference)
.def("arg_name", &Benchmark::ArgName, py::return_value_policy::reference)
.def("arg_names", &Benchmark::ArgNames,
py::return_value_policy::reference)
.def("range_pair", &Benchmark::RangePair,
py::return_value_policy::reference, py::arg("lo1"), py::arg("hi1"),
py::arg("lo2"), py::arg("hi2"))
.def("range_multiplier", &Benchmark::RangeMultiplier,
py::return_value_policy::reference)
.def("min_time", &Benchmark::MinTime, py::return_value_policy::reference)
.def("iterations", &Benchmark::Iterations,
py::return_value_policy::reference)
.def("repetitions", &Benchmark::Repetitions,
py::return_value_policy::reference)
.def("report_aggregates_only", &Benchmark::ReportAggregatesOnly,
py::return_value_policy::reference, py::arg("value") = true)
.def("display_aggregates_only", &Benchmark::DisplayAggregatesOnly,
py::return_value_policy::reference, py::arg("value") = true)
.def("measure_process_cpu_time", &Benchmark::MeasureProcessCPUTime,
py::return_value_policy::reference)
.def("use_real_time", &Benchmark::UseRealTime,
py::return_value_policy::reference)
.def("use_manual_time", &Benchmark::UseManualTime,
py::return_value_policy::reference)
.def(
"complexity",
(Benchmark * (Benchmark::*)(benchmark::BigO)) & Benchmark::Complexity,
py::return_value_policy::reference,
py::arg("complexity") = benchmark::oAuto);
using benchmark::Counter;
py::class_<Counter> py_counter(m, "Counter");
py::enum_<Counter::Flags>(py_counter, "Flags")
.value("kDefaults", Counter::Flags::kDefaults)
.value("kIsRate", Counter::Flags::kIsRate)
.value("kAvgThreads", Counter::Flags::kAvgThreads)
.value("kAvgThreadsRate", Counter::Flags::kAvgThreadsRate)
.value("kIsIterationInvariant", Counter::Flags::kIsIterationInvariant)
.value("kIsIterationInvariantRate",
Counter::Flags::kIsIterationInvariantRate)
.value("kAvgIterations", Counter::Flags::kAvgIterations)
.value("kAvgIterationsRate", Counter::Flags::kAvgIterationsRate)
.value("kInvert", Counter::Flags::kInvert)
.export_values()
.def(py::self | py::self);
py::enum_<Counter::OneK>(py_counter, "OneK")
.value("kIs1000", Counter::OneK::kIs1000)
.value("kIs1024", Counter::OneK::kIs1024)
.export_values();
py_counter
.def(py::init<double, Counter::Flags, Counter::OneK>(),
py::arg("value") = 0., py::arg("flags") = Counter::kDefaults,
py::arg("k") = Counter::kIs1000)
.def(py::init([](double value) { return Counter(value); }))
.def_readwrite("value", &Counter::value)
.def_readwrite("flags", &Counter::flags)
.def_readwrite("oneK", &Counter::oneK);
py::implicitly_convertible<py::float_, Counter>();
py::implicitly_convertible<py::int_, Counter>();
py::bind_map<benchmark::UserCounters>(m, "UserCounters");
using benchmark::State;
py::class_<State>(m, "State")
.def("__bool__", &State::KeepRunning)
.def_property_readonly("keep_running", &State::KeepRunning)
.def("pause_timing", &State::PauseTiming)
.def("resume_timing", &State::ResumeTiming)
.def("skip_with_error", &State::SkipWithError)
.def_property_readonly("error_occurred", &State::error_occurred)
.def("set_iteration_time", &State::SetIterationTime)
.def_property("bytes_processed", &State::bytes_processed,
&State::SetBytesProcessed)
.def_property("complexity_n", &State::complexity_length_n,
&State::SetComplexityN)
.def_property("items_processed", &State::items_processed,
&State::SetItemsProcessed)
.def("set_label", (void(State::*)(const char*)) & State::SetLabel)
.def("range", &State::range, py::arg("pos") = 0)
.def_property_readonly("iterations", &State::iterations)
.def_readwrite("counters", &State::counters)
.def_property_readonly("thread_index", &State::thread_index)
.def_property_readonly("threads", &State::threads);
m.def("Initialize", Initialize);
m.def("RegisterBenchmark", RegisterBenchmark,
py::return_value_policy::reference);
m.def("RunSpecifiedBenchmarks",
[]() { benchmark::RunSpecifiedBenchmarks(); });
};
} // namespace

View File

@@ -0,0 +1,136 @@
# Copyright 2020 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of Python using C++ benchmark framework.
To run this example, you must first install the `google_benchmark` Python package.
To install using `setup.py`, download and extract the `google_benchmark` source.
In the extracted directory, execute:
python setup.py install
"""
import random
import time
import google_benchmark as benchmark
from google_benchmark import Counter
@benchmark.register
def empty(state):
while state:
pass
@benchmark.register
def sum_million(state):
while state:
sum(range(1_000_000))
@benchmark.register
def pause_timing(state):
"""Pause timing every iteration."""
while state:
# Construct a list of random ints every iteration without timing it
state.pause_timing()
random_list = [random.randint(0, 100) for _ in range(100)]
state.resume_timing()
# Time the in place sorting algorithm
random_list.sort()
@benchmark.register
def skipped(state):
if True: # Test some predicate here.
state.skip_with_error("some error")
return # NOTE: You must explicitly return, or benchmark will continue.
... # Benchmark code would be here.
@benchmark.register
def manual_timing(state):
while state:
# Manually count Python CPU time
start = time.perf_counter() # perf_counter_ns() in Python 3.7+
# Something to benchmark
time.sleep(0.01)
end = time.perf_counter()
state.set_iteration_time(end - start)
@benchmark.register
def custom_counters(state):
"""Collect cutom metric using benchmark.Counter."""
num_foo = 0.0
while state:
# Benchmark some code here
pass
# Collect some custom metric named foo
num_foo += 0.13
# Automatic Counter from numbers.
state.counters["foo"] = num_foo
# Set a counter as a rate.
state.counters["foo_rate"] = Counter(num_foo, Counter.kIsRate)
# Set a counter as an inverse of rate.
state.counters["foo_inv_rate"] = Counter(num_foo, Counter.kIsRate | Counter.kInvert)
# Set a counter as a thread-average quantity.
state.counters["foo_avg"] = Counter(num_foo, Counter.kAvgThreads)
# There's also a combined flag:
state.counters["foo_avg_rate"] = Counter(num_foo, Counter.kAvgThreadsRate)
@benchmark.register
@benchmark.option.measure_process_cpu_time()
@benchmark.option.use_real_time()
def with_options(state):
while state:
sum(range(1_000_000))
@benchmark.register(name="sum_million_microseconds")
@benchmark.option.unit(benchmark.kMicrosecond)
def with_options2(state):
while state:
sum(range(1_000_000))
@benchmark.register
@benchmark.option.arg(100)
@benchmark.option.arg(1000)
def passing_argument(state):
while state:
sum(range(state.range(0)))
@benchmark.register
@benchmark.option.range(8, limit=8 << 10)
def using_range(state):
while state:
sum(range(state.range(0)))
@benchmark.register
@benchmark.option.range_multiplier(2)
@benchmark.option.range(1 << 10, 1 << 18)
@benchmark.option.complexity(benchmark.oN)
def computing_complexity(state):
while state:
sum(range(state.range(0)))
state.complexity_n = state.range(0)
if __name__ == "__main__":
benchmark.main()

View File

@@ -0,0 +1,20 @@
cc_library(
name = "pybind11",
hdrs = glob(
include = [
"include/pybind11/*.h",
"include/pybind11/detail/*.h",
],
exclude = [
"include/pybind11/common.h",
"include/pybind11/eigen.h",
],
),
copts = [
"-fexceptions",
"-Wno-undefined-inline",
"-Wno-pragma-once-outside-header",
],
includes = ["include"],
visibility = ["//visibility:public"],
)

View File

@@ -0,0 +1,6 @@
cc_library(
name = "python_headers",
hdrs = glob(["**/*.h"]),
includes = ["."],
visibility = ["//visibility:public"],
)

View File

@@ -0,0 +1,2 @@
absl-py>=0.7.1

View File

@@ -0,0 +1,78 @@
# - Adds a compiler flag if it is supported by the compiler
#
# This function checks that the supplied compiler flag is supported and then
# adds it to the corresponding compiler flags
#
# add_cxx_compiler_flag(<FLAG> [<VARIANT>])
#
# - Example
#
# include(AddCXXCompilerFlag)
# add_cxx_compiler_flag(-Wall)
# add_cxx_compiler_flag(-no-strict-aliasing RELEASE)
# Requires CMake 2.6+
if(__add_cxx_compiler_flag)
return()
endif()
set(__add_cxx_compiler_flag INCLUDED)
include(CheckCXXCompilerFlag)
function(mangle_compiler_flag FLAG OUTPUT)
string(TOUPPER "HAVE_CXX_FLAG_${FLAG}" SANITIZED_FLAG)
string(REPLACE "+" "X" SANITIZED_FLAG ${SANITIZED_FLAG})
string(REGEX REPLACE "[^A-Za-z_0-9]" "_" SANITIZED_FLAG ${SANITIZED_FLAG})
string(REGEX REPLACE "_+" "_" SANITIZED_FLAG ${SANITIZED_FLAG})
set(${OUTPUT} "${SANITIZED_FLAG}" PARENT_SCOPE)
endfunction(mangle_compiler_flag)
function(add_cxx_compiler_flag FLAG)
mangle_compiler_flag("${FLAG}" MANGLED_FLAG)
set(OLD_CMAKE_REQUIRED_FLAGS "${CMAKE_REQUIRED_FLAGS}")
set(CMAKE_REQUIRED_FLAGS "${CMAKE_REQUIRED_FLAGS} ${FLAG}")
check_cxx_compiler_flag("${FLAG}" ${MANGLED_FLAG})
set(CMAKE_REQUIRED_FLAGS "${OLD_CMAKE_REQUIRED_FLAGS}")
if(${MANGLED_FLAG})
if(ARGC GREATER 1)
set(VARIANT ${ARGV1})
string(TOUPPER "_${VARIANT}" VARIANT)
else()
set(VARIANT "")
endif()
set(CMAKE_CXX_FLAGS${VARIANT} "${CMAKE_CXX_FLAGS${VARIANT}} ${BENCHMARK_CXX_FLAGS${VARIANT}} ${FLAG}" PARENT_SCOPE)
endif()
endfunction()
function(add_required_cxx_compiler_flag FLAG)
mangle_compiler_flag("${FLAG}" MANGLED_FLAG)
set(OLD_CMAKE_REQUIRED_FLAGS "${CMAKE_REQUIRED_FLAGS}")
set(CMAKE_REQUIRED_FLAGS "${CMAKE_REQUIRED_FLAGS} ${FLAG}")
check_cxx_compiler_flag("${FLAG}" ${MANGLED_FLAG})
set(CMAKE_REQUIRED_FLAGS "${OLD_CMAKE_REQUIRED_FLAGS}")
if(${MANGLED_FLAG})
if(ARGC GREATER 1)
set(VARIANT ${ARGV1})
string(TOUPPER "_${VARIANT}" VARIANT)
else()
set(VARIANT "")
endif()
set(CMAKE_CXX_FLAGS${VARIANT} "${CMAKE_CXX_FLAGS${VARIANT}} ${FLAG}" PARENT_SCOPE)
set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} ${FLAG}" PARENT_SCOPE)
set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} ${FLAG}" PARENT_SCOPE)
set(CMAKE_MODULE_LINKER_FLAGS "${CMAKE_MODULE_LINKER_FLAGS} ${FLAG}" PARENT_SCOPE)
set(CMAKE_REQUIRED_FLAGS "${CMAKE_REQUIRED_FLAGS} ${FLAG}" PARENT_SCOPE)
else()
message(FATAL_ERROR "Required flag '${FLAG}' is not supported by the compiler")
endif()
endfunction()
function(check_cxx_warning_flag FLAG)
mangle_compiler_flag("${FLAG}" MANGLED_FLAG)
set(OLD_CMAKE_REQUIRED_FLAGS "${CMAKE_REQUIRED_FLAGS}")
# Add -Werror to ensure the compiler generates an error if the warning flag
# doesn't exist.
set(CMAKE_REQUIRED_FLAGS "${CMAKE_REQUIRED_FLAGS} -Werror ${FLAG}")
check_cxx_compiler_flag("${FLAG}" ${MANGLED_FLAG})
set(CMAKE_REQUIRED_FLAGS "${OLD_CMAKE_REQUIRED_FLAGS}")
endfunction()

View File

@@ -0,0 +1,69 @@
# - Compile and run code to check for C++ features
#
# This functions compiles a source file under the `cmake` folder
# and adds the corresponding `HAVE_[FILENAME]` flag to the CMake
# environment
#
# cxx_feature_check(<FLAG> [<VARIANT>])
#
# - Example
#
# include(CXXFeatureCheck)
# cxx_feature_check(STD_REGEX)
# Requires CMake 2.8.12+
if(__cxx_feature_check)
return()
endif()
set(__cxx_feature_check INCLUDED)
function(cxx_feature_check FILE)
string(TOLOWER ${FILE} FILE)
string(TOUPPER ${FILE} VAR)
string(TOUPPER "HAVE_${VAR}" FEATURE)
if (DEFINED HAVE_${VAR})
set(HAVE_${VAR} 1 PARENT_SCOPE)
add_definitions(-DHAVE_${VAR})
return()
endif()
if (ARGC GREATER 1)
message(STATUS "Enabling additional flags: ${ARGV1}")
list(APPEND BENCHMARK_CXX_LINKER_FLAGS ${ARGV1})
endif()
if (NOT DEFINED COMPILE_${FEATURE})
message(STATUS "Performing Test ${FEATURE}")
if(CMAKE_CROSSCOMPILING)
try_compile(COMPILE_${FEATURE}
${CMAKE_BINARY_DIR} ${CMAKE_CURRENT_SOURCE_DIR}/cmake/${FILE}.cpp
CMAKE_FLAGS ${BENCHMARK_CXX_LINKER_FLAGS}
LINK_LIBRARIES ${BENCHMARK_CXX_LIBRARIES})
if(COMPILE_${FEATURE})
message(WARNING
"If you see build failures due to cross compilation, try setting HAVE_${VAR} to 0")
set(RUN_${FEATURE} 0 CACHE INTERNAL "")
else()
set(RUN_${FEATURE} 1 CACHE INTERNAL "")
endif()
else()
message(STATUS "Performing Test ${FEATURE}")
try_run(RUN_${FEATURE} COMPILE_${FEATURE}
${CMAKE_BINARY_DIR} ${CMAKE_CURRENT_SOURCE_DIR}/cmake/${FILE}.cpp
CMAKE_FLAGS ${BENCHMARK_CXX_LINKER_FLAGS}
LINK_LIBRARIES ${BENCHMARK_CXX_LIBRARIES})
endif()
endif()
if(RUN_${FEATURE} EQUAL 0)
message(STATUS "Performing Test ${FEATURE} -- success")
set(HAVE_${VAR} 1 PARENT_SCOPE)
add_definitions(-DHAVE_${VAR})
else()
if(NOT COMPILE_${FEATURE})
message(STATUS "Performing Test ${FEATURE} -- failed to compile")
else()
message(STATUS "Performing Test ${FEATURE} -- compiled but failed to run")
endif()
endif()
endfunction()

View File

@@ -0,0 +1 @@
include("${CMAKE_CURRENT_LIST_DIR}/@targets_export_name@.cmake")

View File

@@ -0,0 +1,58 @@
# - Returns a version string from Git tags
#
# This function inspects the annotated git tags for the project and returns a string
# into a CMake variable
#
# get_git_version(<var>)
#
# - Example
#
# include(GetGitVersion)
# get_git_version(GIT_VERSION)
#
# Requires CMake 2.8.11+
find_package(Git)
if(__get_git_version)
return()
endif()
set(__get_git_version INCLUDED)
function(get_git_version var)
if(GIT_EXECUTABLE)
execute_process(COMMAND ${GIT_EXECUTABLE} describe --tags --match "v[0-9]*.[0-9]*.[0-9]*" --abbrev=8
WORKING_DIRECTORY ${PROJECT_SOURCE_DIR}
RESULT_VARIABLE status
OUTPUT_VARIABLE GIT_DESCRIBE_VERSION
ERROR_QUIET)
if(status)
set(GIT_DESCRIBE_VERSION "v0.0.0")
endif()
string(STRIP ${GIT_DESCRIBE_VERSION} GIT_DESCRIBE_VERSION)
if(GIT_DESCRIBE_VERSION MATCHES v[^-]*-)
string(REGEX REPLACE "v([^-]*)-([0-9]+)-.*" "\\1.\\2" GIT_VERSION ${GIT_DESCRIBE_VERSION})
else()
string(REGEX REPLACE "v(.*)" "\\1" GIT_VERSION ${GIT_DESCRIBE_VERSION})
endif()
# Work out if the repository is dirty
execute_process(COMMAND ${GIT_EXECUTABLE} update-index -q --refresh
WORKING_DIRECTORY ${PROJECT_SOURCE_DIR}
OUTPUT_QUIET
ERROR_QUIET)
execute_process(COMMAND ${GIT_EXECUTABLE} diff-index --name-only HEAD --
WORKING_DIRECTORY ${PROJECT_SOURCE_DIR}
OUTPUT_VARIABLE GIT_DIFF_INDEX
ERROR_QUIET)
string(COMPARE NOTEQUAL "${GIT_DIFF_INDEX}" "" GIT_DIRTY)
if (${GIT_DIRTY})
set(GIT_DESCRIBE_VERSION "${GIT_DESCRIBE_VERSION}-dirty")
endif()
message(STATUS "git version: ${GIT_DESCRIBE_VERSION} normalized to ${GIT_VERSION}")
else()
set(GIT_VERSION "0.0.0")
endif()
set(${var} ${GIT_VERSION} PARENT_SCOPE)
endfunction()

View File

@@ -0,0 +1,44 @@
# Download and unpack googletest at configure time
set(GOOGLETEST_PREFIX "${benchmark_BINARY_DIR}/third_party/googletest")
configure_file(${benchmark_SOURCE_DIR}/cmake/GoogleTest.cmake.in ${GOOGLETEST_PREFIX}/CMakeLists.txt @ONLY)
set(GOOGLETEST_PATH "${CMAKE_CURRENT_SOURCE_DIR}/googletest" CACHE PATH "") # Mind the quotes
execute_process(COMMAND ${CMAKE_COMMAND} -G "${CMAKE_GENERATOR}"
-DALLOW_DOWNLOADING_GOOGLETEST=${BENCHMARK_DOWNLOAD_DEPENDENCIES} -DGOOGLETEST_PATH:PATH=${GOOGLETEST_PATH} .
RESULT_VARIABLE result
WORKING_DIRECTORY ${GOOGLETEST_PREFIX}
)
if(result)
message(FATAL_ERROR "CMake step for googletest failed: ${result}")
endif()
execute_process(
COMMAND ${CMAKE_COMMAND} --build .
RESULT_VARIABLE result
WORKING_DIRECTORY ${GOOGLETEST_PREFIX}
)
if(result)
message(FATAL_ERROR "Build step for googletest failed: ${result}")
endif()
# Prevent overriding the parent project's compiler/linker
# settings on Windows
set(gtest_force_shared_crt ON CACHE BOOL "" FORCE)
include(${GOOGLETEST_PREFIX}/googletest-paths.cmake)
# googletest doesn't seem to want to stay build warning clean so let's not hurt ourselves.
add_compile_options(-w)
# Add googletest directly to our build. This defines
# the gtest and gtest_main targets.
add_subdirectory(${GOOGLETEST_SOURCE_DIR}
${GOOGLETEST_BINARY_DIR}
EXCLUDE_FROM_ALL)
set_target_properties(gtest PROPERTIES INTERFACE_SYSTEM_INCLUDE_DIRECTORIES $<TARGET_PROPERTY:gtest,INTERFACE_INCLUDE_DIRECTORIES>)
set_target_properties(gtest_main PROPERTIES INTERFACE_SYSTEM_INCLUDE_DIRECTORIES $<TARGET_PROPERTY:gtest_main,INTERFACE_INCLUDE_DIRECTORIES>)
set_target_properties(gmock PROPERTIES INTERFACE_SYSTEM_INCLUDE_DIRECTORIES $<TARGET_PROPERTY:gmock,INTERFACE_INCLUDE_DIRECTORIES>)
set_target_properties(gmock_main PROPERTIES INTERFACE_SYSTEM_INCLUDE_DIRECTORIES $<TARGET_PROPERTY:gmock_main,INTERFACE_INCLUDE_DIRECTORIES>)

View File

@@ -0,0 +1,58 @@
cmake_minimum_required(VERSION 2.8.12)
project(googletest-download NONE)
# Enable ExternalProject CMake module
include(ExternalProject)
option(ALLOW_DOWNLOADING_GOOGLETEST "If googletest src tree is not found in location specified by GOOGLETEST_PATH, do fetch the archive from internet" OFF)
set(GOOGLETEST_PATH "/usr/src/googletest" CACHE PATH
"Path to the googletest root tree. Should contain googletest and googlemock subdirs. And CMakeLists.txt in root, and in both of these subdirs")
# Download and install GoogleTest
message(STATUS "Looking for Google Test sources")
message(STATUS "Looking for Google Test sources in ${GOOGLETEST_PATH}")
if(EXISTS "${GOOGLETEST_PATH}" AND IS_DIRECTORY "${GOOGLETEST_PATH}" AND EXISTS "${GOOGLETEST_PATH}/CMakeLists.txt" AND
EXISTS "${GOOGLETEST_PATH}/googletest" AND IS_DIRECTORY "${GOOGLETEST_PATH}/googletest" AND EXISTS "${GOOGLETEST_PATH}/googletest/CMakeLists.txt" AND
EXISTS "${GOOGLETEST_PATH}/googlemock" AND IS_DIRECTORY "${GOOGLETEST_PATH}/googlemock" AND EXISTS "${GOOGLETEST_PATH}/googlemock/CMakeLists.txt")
message(STATUS "Found Google Test in ${GOOGLETEST_PATH}")
ExternalProject_Add(
googletest
PREFIX "${CMAKE_BINARY_DIR}"
DOWNLOAD_DIR "${CMAKE_BINARY_DIR}/download"
SOURCE_DIR "${GOOGLETEST_PATH}" # use existing src dir.
BINARY_DIR "${CMAKE_BINARY_DIR}/build"
CONFIGURE_COMMAND ""
BUILD_COMMAND ""
INSTALL_COMMAND ""
TEST_COMMAND ""
)
else()
if(NOT ALLOW_DOWNLOADING_GOOGLETEST)
message(SEND_ERROR "Did not find Google Test sources! Either pass correct path in GOOGLETEST_PATH, or enable BENCHMARK_DOWNLOAD_DEPENDENCIES, or disable BENCHMARK_ENABLE_GTEST_TESTS / BENCHMARK_ENABLE_TESTING.")
else()
message(WARNING "Did not find Google Test sources! Fetching from web...")
ExternalProject_Add(
googletest
GIT_REPOSITORY https://github.com/google/googletest.git
GIT_TAG master
PREFIX "${CMAKE_BINARY_DIR}"
STAMP_DIR "${CMAKE_BINARY_DIR}/stamp"
DOWNLOAD_DIR "${CMAKE_BINARY_DIR}/download"
SOURCE_DIR "${CMAKE_BINARY_DIR}/src"
BINARY_DIR "${CMAKE_BINARY_DIR}/build"
CONFIGURE_COMMAND ""
BUILD_COMMAND ""
INSTALL_COMMAND ""
TEST_COMMAND ""
)
endif()
endif()
ExternalProject_Get_Property(googletest SOURCE_DIR BINARY_DIR)
file(WRITE googletest-paths.cmake
"set(GOOGLETEST_SOURCE_DIR \"${SOURCE_DIR}\")
set(GOOGLETEST_BINARY_DIR \"${BINARY_DIR}\")
")

View File

@@ -0,0 +1,12 @@
prefix=@CMAKE_INSTALL_PREFIX@
exec_prefix=${prefix}
libdir=${prefix}/@CMAKE_INSTALL_LIBDIR@
includedir=${prefix}/@CMAKE_INSTALL_INCLUDEDIR@
Name: @PROJECT_NAME@
Description: Google microbenchmark framework
Version: @VERSION@
Libs: -L${libdir} -lbenchmark
Libs.private: -lpthread
Cflags: -I${includedir}

View File

@@ -0,0 +1,12 @@
#include <gnuregex.h>
#include <string>
int main() {
std::string str = "test0159";
regex_t re;
int ec = regcomp(&re, "^[a-z]+[0-9]+$", REG_EXTENDED | REG_NOSUB);
if (ec != 0) {
return ec;
}
return regexec(&re, str.c_str(), 0, nullptr, 0) ? -1 : 0;
}

View File

@@ -0,0 +1,8 @@
find_package(LLVMAr REQUIRED)
set(CMAKE_AR "${LLVMAR_EXECUTABLE}" CACHE FILEPATH "" FORCE)
find_package(LLVMNm REQUIRED)
set(CMAKE_NM "${LLVMNM_EXECUTABLE}" CACHE FILEPATH "" FORCE)
find_package(LLVMRanLib REQUIRED)
set(CMAKE_RANLIB "${LLVMRANLIB_EXECUTABLE}" CACHE FILEPATH "" FORCE)

View File

@@ -0,0 +1,14 @@
#include <regex.h>
#include <string>
int main() {
std::string str = "test0159";
regex_t re;
int ec = regcomp(&re, "^[a-z]+[0-9]+$", REG_EXTENDED | REG_NOSUB);
if (ec != 0) {
return ec;
}
int ret = regexec(&re, str.c_str(), 0, nullptr, 0) ? -1 : 0;
regfree(&re);
return ret;
}

View File

@@ -0,0 +1,3 @@
macro(split_list listname)
string(REPLACE ";" " " ${listname} "${${listname}}")
endmacro()

View File

@@ -0,0 +1,10 @@
#include <regex>
#include <string>
int main() {
const std::string str = "test0159";
std::regex re;
re = std::regex("^[a-z]+[0-9]+$",
std::regex_constants::extended | std::regex_constants::nosubs);
return std::regex_search(str, re) ? 0 : -1;
}

View File

@@ -0,0 +1,7 @@
#include <chrono>
int main() {
typedef std::chrono::steady_clock Clock;
Clock::time_point tp = Clock::now();
((void)tp);
}

View File

@@ -0,0 +1,4 @@
#define HAVE_THREAD_SAFETY_ATTRIBUTES
#include "../src/mutex.h"
int main() {}

View File

@@ -0,0 +1,147 @@
# Assembly Tests
The Benchmark library provides a number of functions whose primary
purpose in to affect assembly generation, including `DoNotOptimize`
and `ClobberMemory`. In addition there are other functions,
such as `KeepRunning`, for which generating good assembly is paramount.
For these functions it's important to have tests that verify the
correctness and quality of the implementation. This requires testing
the code generated by the compiler.
This document describes how the Benchmark library tests compiler output,
as well as how to properly write new tests.
## Anatomy of a Test
Writing a test has two steps:
* Write the code you want to generate assembly for.
* Add `// CHECK` lines to match against the verified assembly.
Example:
```c++
// CHECK-LABEL: test_add:
extern "C" int test_add() {
extern int ExternInt;
return ExternInt + 1;
// CHECK: movl ExternInt(%rip), %eax
// CHECK: addl %eax
// CHECK: ret
}
```
#### LLVM Filecheck
[LLVM's Filecheck](https://llvm.org/docs/CommandGuide/FileCheck.html)
is used to test the generated assembly against the `// CHECK` lines
specified in the tests source file. Please see the documentation
linked above for information on how to write `CHECK` directives.
#### Tips and Tricks:
* Tests should match the minimal amount of output required to establish
correctness. `CHECK` directives don't have to match on the exact next line
after the previous match, so tests should omit checks for unimportant
bits of assembly. ([`CHECK-NEXT`](https://llvm.org/docs/CommandGuide/FileCheck.html#the-check-next-directive)
can be used to ensure a match occurs exactly after the previous match).
* The tests are compiled with `-O3 -g0`. So we're only testing the
optimized output.
* The assembly output is further cleaned up using `tools/strip_asm.py`.
This removes comments, assembler directives, and unused labels before
the test is run.
* The generated and stripped assembly file for a test is output under
`<build-directory>/test/<test-name>.s`
* Filecheck supports using [`CHECK` prefixes](https://llvm.org/docs/CommandGuide/FileCheck.html#cmdoption-check-prefixes)
to specify lines that should only match in certain situations.
The Benchmark tests use `CHECK-CLANG` and `CHECK-GNU` for lines that
are only expected to match Clang or GCC's output respectively. Normal
`CHECK` lines match against all compilers. (Note: `CHECK-NOT` and
`CHECK-LABEL` are NOT prefixes. They are versions of non-prefixed
`CHECK` lines)
* Use `extern "C"` to disable name mangling for specific functions. This
makes them easier to name in the `CHECK` lines.
## Problems Writing Portable Tests
Writing tests which check the code generated by a compiler are
inherently non-portable. Different compilers and even different compiler
versions may generate entirely different code. The Benchmark tests
must tolerate this.
LLVM Filecheck provides a number of mechanisms to help write
"more portable" tests; including [matching using regular expressions](https://llvm.org/docs/CommandGuide/FileCheck.html#filecheck-pattern-matching-syntax),
allowing the creation of [named variables](https://llvm.org/docs/CommandGuide/FileCheck.html#filecheck-variables)
for later matching, and [checking non-sequential matches](https://llvm.org/docs/CommandGuide/FileCheck.html#the-check-dag-directive).
#### Capturing Variables
For example, say GCC stores a variable in a register but Clang stores
it in memory. To write a test that tolerates both cases we "capture"
the destination of the store, and then use the captured expression
to write the remainder of the test.
```c++
// CHECK-LABEL: test_div_no_op_into_shr:
extern "C" void test_div_no_op_into_shr(int value) {
int divisor = 2;
benchmark::DoNotOptimize(divisor); // hide the value from the optimizer
return value / divisor;
// CHECK: movl $2, [[DEST:.*]]
// CHECK: idivl [[DEST]]
// CHECK: ret
}
```
#### Using Regular Expressions to Match Differing Output
Often tests require testing assembly lines which may subtly differ
between compilers or compiler versions. A common example of this
is matching stack frame addresses. In this case regular expressions
can be used to match the differing bits of output. For example:
```c++
int ExternInt;
struct Point { int x, y, z; };
// CHECK-LABEL: test_store_point:
extern "C" void test_store_point() {
Point p{ExternInt, ExternInt, ExternInt};
benchmark::DoNotOptimize(p);
// CHECK: movl ExternInt(%rip), %eax
// CHECK: movl %eax, -{{[0-9]+}}(%rsp)
// CHECK: movl %eax, -{{[0-9]+}}(%rsp)
// CHECK: movl %eax, -{{[0-9]+}}(%rsp)
// CHECK: ret
}
```
## Current Requirements and Limitations
The tests require Filecheck to be installed along the `PATH` of the
build machine. Otherwise the tests will be disabled.
Additionally, as mentioned in the previous section, codegen tests are
inherently non-portable. Currently the tests are limited to:
* x86_64 targets.
* Compiled with GCC or Clang
Further work could be done, at least on a limited basis, to extend the
tests to other architectures and compilers (using `CHECK` prefixes).
Furthermore, the tests fail for builds which specify additional flags
that modify code generation, including `--coverage` or `-fsanitize=`.

View File

@@ -0,0 +1 @@
theme: jekyll-theme-minimal

View File

@@ -0,0 +1,18 @@
# Build tool dependency policy
To ensure the broadest compatibility when building the benchmark library, but
still allow forward progress, we require any build tooling to be available for:
* Debian stable AND
* The last two Ubuntu LTS releases AND
Currently, this means using build tool versions that are available for Ubuntu
16.04 (Xenial), Ubuntu 18.04 (Bionic), and Debian stretch.
_Note, [travis](.travis.yml) runs under Ubuntu 14.04 (Trusty) for linux builds._
## cmake
The current supported version is cmake 3.5.1 as of 2018-06-06.
_Note, this version is also available for Ubuntu 14.04, the previous Ubuntu LTS
release, as `cmake3`._

View File

@@ -0,0 +1,10 @@
# Benchmark
* [Assembly Tests](AssemblyTests.md)
* [Dependencies](dependencies.md)
* [Perf Counters](perf_counters.md)
* [Platform Specific Build Instructions](platform_specific_build_instructions.md)
* [Random Interleaving](random_interleaving.md)
* [Releasing](releasing.md)
* [Tools](tools.md)
* [User Guide](user_guide.md)

View File

@@ -0,0 +1,34 @@
<a name="perf-counters" />
# User-Requested Performance Counters
When running benchmarks, the user may choose to request collection of
performance counters. This may be useful in investigation scenarios - narrowing
down the cause of a regression; or verifying that the underlying cause of a
performance improvement matches expectations.
This feature is available if:
* The benchmark is run on an architecture featuring a Performance Monitoring
Unit (PMU),
* The benchmark is compiled with support for collecting counters. Currently,
this requires [libpfm](http://perfmon2.sourceforge.net/) be available at build
time
The feature does not require modifying benchmark code. Counter collection is
handled at the boundaries where timer collection is also handled.
To opt-in:
* Install `libpfm4-dev`, e.g. `apt-get install libpfm4-dev`.
* Enable the cmake flag BENCHMARK_ENABLE_LIBPFM.
To use, pass a comma-separated list of counter names through the
`--benchmark_perf_counters` flag. The names are decoded through libpfm - meaning,
they are platform specific, but some (e.g. `CYCLES` or `INSTRUCTIONS`) are
mapped by libpfm to platform-specifics - see libpfm
[documentation](http://perfmon2.sourceforge.net/docs.html) for more details.
The counter values are reported back through the [User Counters](../README.md#custom-counters)
mechanism, meaning, they are available in all the formats (e.g. JSON) supported
by User Counters.

View File

@@ -0,0 +1,48 @@
# Platform Specific Build Instructions
## Building with GCC
When the library is built using GCC it is necessary to link with the pthread
library due to how GCC implements `std::thread`. Failing to link to pthread will
lead to runtime exceptions (unless you're using libc++), not linker errors. See
[issue #67](https://github.com/google/benchmark/issues/67) for more details. You
can link to pthread by adding `-pthread` to your linker command. Note, you can
also use `-lpthread`, but there are potential issues with ordering of command
line parameters if you use that.
On QNX, the pthread library is part of libc and usually included automatically
(see
[`pthread_create()`](https://www.qnx.com/developers/docs/7.1/index.html#com.qnx.doc.neutrino.lib_ref/topic/p/pthread_create.html)).
There's no separate pthread library to link.
## Building with Visual Studio 2015 or 2017
The `shlwapi` library (`-lshlwapi`) is required to support a call to `CPUInfo` which reads the registry. Either add `shlwapi.lib` under `[ Configuration Properties > Linker > Input ]`, or use the following:
```
// Alternatively, can add libraries using linker options.
#ifdef _WIN32
#pragma comment ( lib, "Shlwapi.lib" )
#ifdef _DEBUG
#pragma comment ( lib, "benchmarkd.lib" )
#else
#pragma comment ( lib, "benchmark.lib" )
#endif
#endif
```
Can also use the graphical version of CMake:
* Open `CMake GUI`.
* Under `Where to build the binaries`, same path as source plus `build`.
* Under `CMAKE_INSTALL_PREFIX`, same path as source plus `install`.
* Click `Configure`, `Generate`, `Open Project`.
* If build fails, try deleting entire directory and starting again, or unticking options to build less.
## Building with Intel 2015 Update 1 or Intel System Studio Update 4
See instructions for building with Visual Studio. Once built, right click on the solution and change the build to Intel.
## Building on Solaris
If you're running benchmarks on solaris, you'll want the kstat library linked in
too (`-lkstat`).

View File

@@ -0,0 +1,13 @@
<a name="interleaving" />
# Random Interleaving
[Random Interleaving](https://github.com/google/benchmark/issues/1051) is a
technique to lower run-to-run variance. It randomly interleaves repetitions of a
microbenchmark with repetitions from other microbenchmarks in the same benchmark
test. Data shows it is able to lower run-to-run variance by
[40%](https://github.com/google/benchmark/issues/1051) on average.
To use, you mainly need to set `--benchmark_enable_random_interleaving=true`,
and optionally specify non-zero repetition count `--benchmark_repetitions=9`
and optionally decrease the per-repetition time `--benchmark_min_time=0.1`.

View File

@@ -0,0 +1,22 @@
# How to release
* Make sure you're on main and synced to HEAD
* Ensure the project builds and tests run (sanity check only, obviously)
* `parallel -j0 exec ::: test/*_test` can help ensure everything at least
passes
* Prepare release notes
* `git log $(git describe --abbrev=0 --tags)..HEAD` gives you the list of
commits between the last annotated tag and HEAD
* Pick the most interesting.
* Create one last commit that updates the version saved in `CMakeLists.txt` to the release version you're creating. (This version will be used if benchmark is installed from the archive you'll be creating in the next step.)
```
project (benchmark VERSION 1.5.3 LANGUAGES CXX)
```
* Create a release through github's interface
* Note this will create a lightweight tag.
* Update this to an annotated tag:
* `git pull --tags`
* `git tag -a -f <tag> <tag>`
* `git push --force origin`

Some files were not shown because too many files have changed in this diff Show More