initial commit

This commit is contained in:
2022-08-05 08:23:25 +03:00
commit 5ecdc6abcf
387 changed files with 3010095 additions and 0 deletions

137
.clang-format Normal file
View File

@@ -0,0 +1,137 @@
---
Language: Cpp
# BasedOnStyle: LLVM
AccessModifierOffset: -2
AlignAfterOpenBracket: Align
AlignConsecutiveMacros: false
AlignConsecutiveAssignments: false
AlignConsecutiveDeclarations: false
AlignEscapedNewlines: Right
AlignOperands: true
AlignTrailingComments: true
AllowAllArgumentsOnNextLine: true
AllowAllConstructorInitializersOnNextLine: true
AllowAllParametersOfDeclarationOnNextLine: true
AllowShortBlocksOnASingleLine: Never
AllowShortCaseLabelsOnASingleLine: false
AllowShortFunctionsOnASingleLine: All
AllowShortLambdasOnASingleLine: All
AllowShortIfStatementsOnASingleLine: Never
AllowShortLoopsOnASingleLine: false
AlwaysBreakAfterDefinitionReturnType: None
AlwaysBreakAfterReturnType: None
AlwaysBreakBeforeMultilineStrings: false
AlwaysBreakTemplateDeclarations: MultiLine
BinPackArguments: true
BinPackParameters: true
BraceWrapping:
AfterCaseLabel: false
AfterClass: false
AfterControlStatement: false
AfterEnum: false
AfterFunction: false
AfterNamespace: false
AfterObjCDeclaration: false
AfterStruct: false
AfterUnion: false
AfterExternBlock: false
BeforeCatch: false
BeforeElse: false
IndentBraces: false
SplitEmptyFunction: true
SplitEmptyRecord: true
SplitEmptyNamespace: true
BreakBeforeBinaryOperators: None
BreakBeforeBraces: Attach
BreakBeforeInheritanceComma: false
BreakInheritanceList: BeforeColon
BreakBeforeTernaryOperators: true
BreakConstructorInitializersBeforeComma: false
BreakConstructorInitializers: BeforeColon
BreakAfterJavaFieldAnnotations: false
BreakStringLiterals: true
ColumnLimit: 140
CommentPragmas: '^ IWYU pragma:'
CompactNamespaces: false
ConstructorInitializerAllOnOneLineOrOnePerLine: false
ConstructorInitializerIndentWidth: 4
ContinuationIndentWidth: 4
Cpp11BracedListStyle: true
DeriveLineEnding: true
DerivePointerAlignment: false
DisableFormat: false
ExperimentalAutoDetectBinPacking: false
FixNamespaceComments: true
ForEachMacros:
- foreach
- Q_FOREACH
- BOOST_FOREACH
IncludeBlocks: Preserve
IncludeCategories:
- Regex: '^"(llvm|llvm-c|clang|clang-c)/'
Priority: 2
SortPriority: 0
- Regex: '^(<|"(gtest|gmock|isl|json)/)'
Priority: 3
SortPriority: 0
- Regex: '.*'
Priority: 1
SortPriority: 0
IncludeIsMainRegex: '(Test)?$'
IncludeIsMainSourceRegex: ''
IndentCaseLabels: false
IndentGotoLabels: true
IndentPPDirectives: None
IndentWidth: 2
IndentWrappedFunctionNames: false
JavaScriptQuotes: Leave
JavaScriptWrapImports: true
KeepEmptyLinesAtTheStartOfBlocks: true
MacroBlockBegin: ''
MacroBlockEnd: ''
MaxEmptyLinesToKeep: 1
NamespaceIndentation: None
ObjCBinPackProtocolList: Auto
ObjCBlockIndentWidth: 2
ObjCSpaceAfterProperty: false
ObjCSpaceBeforeProtocolList: true
PenaltyBreakAssignment: 2
PenaltyBreakBeforeFirstCallParameter: 19
PenaltyBreakComment: 300
PenaltyBreakFirstLessLess: 120
PenaltyBreakString: 1000
PenaltyBreakTemplateDeclaration: 10
PenaltyExcessCharacter: 1000000
PenaltyReturnTypeOnItsOwnLine: 60
PointerAlignment: Right
ReflowComments: true
SortIncludes: true
SortUsingDeclarations: true
SpaceAfterCStyleCast: false
SpaceAfterLogicalNot: false
SpaceAfterTemplateKeyword: true
SpaceBeforeAssignmentOperators: true
SpaceBeforeCpp11BracedList: false
SpaceBeforeCtorInitializerColon: true
SpaceBeforeInheritanceColon: true
SpaceBeforeParens: ControlStatements
SpaceBeforeRangeBasedForLoopColon: true
SpaceInEmptyBlock: false
SpaceInEmptyParentheses: false
SpacesBeforeTrailingComments: 1
SpacesInAngles: false
SpacesInConditionalStatement: false
SpacesInContainerLiterals: true
SpacesInCStyleCastParentheses: false
SpacesInParentheses: false
SpacesInSquareBrackets: false
SpaceBeforeSquareBrackets: false
Standard: Latest
StatementMacros:
- Q_UNUSED
- QT_REQUIRE_VERSION
TabWidth: 2
UseCRLF: false
UseTab: Never
...

36
.github/workflows/build.yml vendored Normal file
View File

@@ -0,0 +1,36 @@
name: ROS Free Workflow
on:
push:
branches: [ master ]
pull_request:
jobs:
build:
name: "Ubuntu Latest"
runs-on: ubuntu-latest
steps:
- name: Code Checkout
uses: actions/checkout@v2
- name: Installing Ceres Solver
run: |
sudo apt-get update
sudo apt-get install -y cmake libgoogle-glog-dev libgflags-dev libatlas-base-dev libeigen3-dev libsuitesparse-dev libceres-dev
# cd ..
# git clone https://ceres-solver.googlesource.com/ceres-solver
# cd ceres-solver
# git checkout tags/2.0.0
# mkdir build
# cd build
# cmake ..
# sudo make install
# cd ../../
- name: Configure and Build
run: |
sudo apt-get update
sudo apt-get install -y libeigen3-dev libopencv-dev libboost-all-dev
mkdir build
cd build
cmake ../ov_msckf/
make

71
.github/workflows/build_ros1.yml vendored Normal file
View File

@@ -0,0 +1,71 @@
name: ROS 1 Workflow
on:
push:
branches: [ master ]
pull_request:
jobs:
build_1604:
name: "ROS1 Ubuntu 16.04"
runs-on: ubuntu-latest
steps:
- name: Code Checkout
uses: actions/checkout@v2
- name: Create Workspace and Docker Image
run: |
export REPO=$(basename $GITHUB_REPOSITORY) &&
cd $GITHUB_WORKSPACE/.. && mkdir src/ &&
mv $REPO/ src/ && mkdir $REPO/ && mv src/ $REPO/ && cd $REPO/ &&
docker build -t openvins -f $GITHUB_WORKSPACE/src/$REPO/Dockerfile_ros1_16_04 .
- name: Echo Enviroment
run: |
docker run -t --mount type=bind,source=$GITHUB_WORKSPACE,target=/catkin_ws openvins /bin/bash -c "echo $ROS_DISTRO && echo $ROS_VERSION"
- name: Run Build in Docker
run: |
docker run -t --mount type=bind,source=$GITHUB_WORKSPACE,target=/catkin_ws openvins /bin/bash -c "cd /catkin_ws && catkin build"
- name: Run OpenVINS Simulation!
run: |
docker run -t --mount type=bind,source=$GITHUB_WORKSPACE,target=/catkin_ws openvins /bin/bash -c "cd /catkin_ws && source devel/setup.bash && roslaunch ov_msckf simulation.launch verbosity:=WARNING"
build_1804:
name: "ROS1 Ubuntu 18.04"
runs-on: ubuntu-latest
steps:
- name: Code Checkout
uses: actions/checkout@v2
- name: Create Workspace and Docker Image
run: |
export REPO=$(basename $GITHUB_REPOSITORY) &&
cd $GITHUB_WORKSPACE/.. && mkdir src/ &&
mv $REPO/ src/ && mkdir $REPO/ && mv src/ $REPO/ && cd $REPO/ &&
docker build -t openvins -f $GITHUB_WORKSPACE/src/$REPO/Dockerfile_ros1_18_04 .
- name: Echo Enviroment
run: |
docker run -t --mount type=bind,source=$GITHUB_WORKSPACE,target=/catkin_ws openvins /bin/bash -c "echo $ROS_DISTRO && echo $ROS_VERSION"
- name: Run Build in Docker
run: |
docker run -t --mount type=bind,source=$GITHUB_WORKSPACE,target=/catkin_ws openvins /bin/bash -c "cd /catkin_ws && catkin build"
- name: Run OpenVINS Simulation!
run: |
docker run -t --mount type=bind,source=$GITHUB_WORKSPACE,target=/catkin_ws openvins /bin/bash -c "cd /catkin_ws && source devel/setup.bash && roslaunch ov_msckf simulation.launch verbosity:=WARNING"
build_2004:
name: "ROS1 Ubuntu 20.04"
runs-on: ubuntu-latest
steps:
- name: Code Checkout
uses: actions/checkout@v2
- name: Create Workspace and Docker Image
run: |
export REPO=$(basename $GITHUB_REPOSITORY) &&
cd $GITHUB_WORKSPACE/.. && mkdir src/ &&
mv $REPO/ src/ && mkdir $REPO/ && mv src/ $REPO/ && cd $REPO/ &&
docker build -t openvins -f $GITHUB_WORKSPACE/src/$REPO/Dockerfile_ros1_20_04 .
- name: Echo Enviroment
run: |
docker run -t --mount type=bind,source=$GITHUB_WORKSPACE,target=/catkin_ws openvins /bin/bash -c "echo $ROS_DISTRO && echo $ROS_VERSION"
- name: Run Build in Docker
run: |
docker run -t --mount type=bind,source=$GITHUB_WORKSPACE,target=/catkin_ws openvins /bin/bash -c "cd /catkin_ws && catkin build"
- name: Run OpenVINS Simulation!
run: |
docker run -t --mount type=bind,source=$GITHUB_WORKSPACE,target=/catkin_ws openvins /bin/bash -c "cd /catkin_ws && source devel/setup.bash && roslaunch ov_msckf simulation.launch verbosity:=WARNING"

51
.github/workflows/build_ros2.yml vendored Normal file
View File

@@ -0,0 +1,51 @@
name: ROS 2 Workflow
on:
push:
branches: [ master ]
pull_request:
jobs:
build_1804:
name: "ROS2 Ubuntu 18.04"
runs-on: ubuntu-latest
steps:
- name: Code Checkout
uses: actions/checkout@v2
- name: Create Workspace and Docker Image
run: |
export REPO=$(basename $GITHUB_REPOSITORY) &&
cd $GITHUB_WORKSPACE/.. && mkdir src/ &&
mv $REPO/ src/ && mkdir $REPO/ && mv src/ $REPO/ && cd $REPO/ &&
docker build -t openvins -f $GITHUB_WORKSPACE/src/$REPO/Dockerfile_ros2_18_04 .
- name: Echo Enviroment
run: |
docker run -t --mount type=bind,source=$GITHUB_WORKSPACE,target=/catkin_ws openvins /bin/bash -c "echo $ROS_DISTRO && echo $ROS_VERSION"
- name: Run Build in Docker
run: |
docker run -t --mount type=bind,source=$GITHUB_WORKSPACE,target=/catkin_ws openvins /bin/bash -c "cd /catkin_ws && colcon build"
# THIS SEEMS TO FAIL WITH 245 ERROR, NOT SURE WHY!!!!!
#- name: Run OpenVINS Simulation!
# run: |
# docker run -t --mount type=bind,source=$GITHUB_WORKSPACE,target=/catkin_ws openvins /bin/bash -c "cd /catkin_ws && source install/setup.bash && ros2 run ov_msckf run_simulation src/open_vins/config/rpng_sim/estimator_config.yaml"
build_2004:
name: "ROS2 Ubuntu 20.04"
runs-on: ubuntu-latest
steps:
- name: Code Checkout
uses: actions/checkout@v2
- name: Create Workspace and Docker Image
run: |
export REPO=$(basename $GITHUB_REPOSITORY) &&
cd $GITHUB_WORKSPACE/.. && mkdir src/ &&
mv $REPO/ src/ && mkdir $REPO/ && mv src/ $REPO/ && cd $REPO/ &&
docker build -t openvins -f $GITHUB_WORKSPACE/src/$REPO/Dockerfile_ros2_20_04 .
- name: Echo Enviroment
run: |
docker run -t --mount type=bind,source=$GITHUB_WORKSPACE,target=/catkin_ws openvins /bin/bash -c "echo $ROS_DISTRO && echo $ROS_VERSION"
- name: Run Build in Docker
run: |
docker run -t --mount type=bind,source=$GITHUB_WORKSPACE,target=/catkin_ws openvins /bin/bash -c "cd /catkin_ws && colcon build"
- name: Run OpenVINS Simulation!
run: |
docker run -t --mount type=bind,source=$GITHUB_WORKSPACE,target=/catkin_ws openvins /bin/bash -c "cd /catkin_ws && source install/setup.bash && ros2 run ov_msckf run_simulation src/open_vins/config/rpng_sim/estimator_config.yaml"

9
.gitignore vendored Normal file
View File

@@ -0,0 +1,9 @@
cmake-build-*
build
Build
*.*~
.idea
doxgen_generated
*.swp
*.swo
doc

51
Dockerfile_ros1_16_04 Normal file
View File

@@ -0,0 +1,51 @@
FROM osrf/ros:kinetic-desktop-full
# =========================================================
# =========================================================
# Are you are looking for how to use this docker file?
# - https://docs.openvins.com/dev-docker.html
# - https://docs.docker.com/get-started/
# - http://wiki.ros.org/docker/Tutorials/Docker
# =========================================================
# =========================================================
# Dependencies we use, catkin tools is very good build system
# Also some helper utilities for fast in terminal edits (nano etc)
RUN apt-get update && apt-get install -y libeigen3-dev nano git
RUN sudo apt-get install -y python-catkin-tools
# Ceres solver install and setup
# NOTE: need to use an older version as eigen is very outdated here!!!
# NOTE: https://github.com/ceres-solver/ceres-solver/issues/541
RUN sudo apt-get install -y cmake libgoogle-glog-dev libgflags-dev libatlas-base-dev libeigen3-dev libsuitesparse-dev libceres-dev
# ENV CERES_VERSION="2.0.0"
# RUN git clone https://ceres-solver.googlesource.com/ceres-solver && \
# cd ceres-solver && \
# git checkout tags/${CERES_VERSION} && \
# git reset --hard e51e9b46f6 && \
# mkdir build && cd build && \
# cmake .. && \
# make -j$(nproc) install && \
# rm -rf ../../ceres-solver
# Seems this has Python 2.7 installed on it...
RUN apt-get update && apt-get install -y python2.7-dev python-matplotlib python-numpy python-psutil python-tk
# Install deps needed for clion remote debugging
# https://blog.jetbrains.com/clion/2020/01/using-docker-with-clion/
# RUN sed -i '6i\source "/catkin_ws/devel/setup.bash"\' /ros_entrypoint.sh
RUN apt-get update && apt-get install -y ssh build-essential gcc g++ \
gdb clang cmake rsync tar python && apt-get clean
RUN ( \
echo 'LogLevel DEBUG2'; \
echo 'PermitRootLogin yes'; \
echo 'PasswordAuthentication yes'; \
echo 'Subsystem sftp /usr/lib/openssh/sftp-server'; \
) > /etc/ssh/sshd_config_test_clion \
&& mkdir /run/sshd
RUN useradd -m user && yes password | passwd user
RUN usermod -s /bin/bash user
CMD ["/usr/sbin/sshd", "-D", "-e", "-f", "/etc/ssh/sshd_config_test_clion"]

48
Dockerfile_ros1_18_04 Normal file
View File

@@ -0,0 +1,48 @@
FROM osrf/ros:melodic-desktop-full
# =========================================================
# =========================================================
# Are you are looking for how to use this docker file?
# - https://docs.openvins.com/dev-docker.html
# - https://docs.docker.com/get-started/
# - http://wiki.ros.org/docker/Tutorials/Docker
# =========================================================
# =========================================================
# Dependencies we use, catkin tools is very good build system
# Also some helper utilities for fast in terminal edits (nano etc)
RUN apt-get update && apt-get install -y libeigen3-dev nano git
RUN sudo apt-get install -y python-catkin-tools
# Ceres solver install and setup
RUN sudo apt-get install -y cmake libgoogle-glog-dev libgflags-dev libatlas-base-dev libeigen3-dev libsuitesparse-dev libceres-dev
# ENV CERES_VERSION="2.0.0"
# RUN git clone https://ceres-solver.googlesource.com/ceres-solver && \
# cd ceres-solver && \
# git checkout tags/${CERES_VERSION} && \
# mkdir build && cd build && \
# cmake .. && \
# make -j$(nproc) install && \
# rm -rf ../../ceres-solver
# Seems this has Python 3.6 installed on it...
RUN apt-get update && apt-get install -y python3-dev python3-matplotlib python3-numpy python3-psutil python3-tk
# Install deps needed for clion remote debugging
# https://blog.jetbrains.com/clion/2020/01/using-docker-with-clion/
# RUN sed -i '6i\source "/catkin_ws/devel/setup.bash"\' /ros_entrypoint.sh
RUN apt-get update && apt-get install -y ssh build-essential gcc g++ \
gdb clang cmake rsync tar python && apt-get clean
RUN ( \
echo 'LogLevel DEBUG2'; \
echo 'PermitRootLogin yes'; \
echo 'PasswordAuthentication yes'; \
echo 'Subsystem sftp /usr/lib/openssh/sftp-server'; \
) > /etc/ssh/sshd_config_test_clion \
&& mkdir /run/sshd
RUN useradd -m user && yes password | passwd user
RUN usermod -s /bin/bash user
CMD ["/usr/sbin/sshd", "-D", "-e", "-f", "/etc/ssh/sshd_config_test_clion"]

48
Dockerfile_ros1_20_04 Normal file
View File

@@ -0,0 +1,48 @@
FROM osrf/ros:noetic-desktop-full
# =========================================================
# =========================================================
# Are you are looking for how to use this docker file?
# - https://docs.openvins.com/dev-docker.html
# - https://docs.docker.com/get-started/
# - http://wiki.ros.org/docker/Tutorials/Docker
# =========================================================
# =========================================================
# Dependencies we use, catkin tools is very good build system
# Also some helper utilities for fast in terminal edits (nano etc)
RUN apt-get update && apt-get install -y libeigen3-dev nano git
RUN sudo apt-get install -y python3-catkin-tools python3-osrf-pycommon
# Ceres solver install and setup
RUN sudo apt-get install -y cmake libgoogle-glog-dev libgflags-dev libatlas-base-dev libeigen3-dev libsuitesparse-dev libceres-dev
# ENV CERES_VERSION="2.0.0"
# RUN git clone https://ceres-solver.googlesource.com/ceres-solver && \
# cd ceres-solver && \
# git checkout tags/${CERES_VERSION} && \
# mkdir build && cd build && \
# cmake .. && \
# make -j$(nproc) install && \
# rm -rf ../../ceres-solver
# Seems this has Python 3.8 installed on it...
RUN apt-get update && apt-get install -y python3-dev python3-matplotlib python3-numpy python3-psutil python3-tk
# Install deps needed for clion remote debugging
# https://blog.jetbrains.com/clion/2020/01/using-docker-with-clion/
# RUN sed -i '6i\source "/catkin_ws/devel/setup.bash"\' /ros_entrypoint.sh
RUN apt-get update && apt-get install -y ssh build-essential gcc g++ \
gdb clang cmake rsync tar python && apt-get clean
RUN ( \
echo 'LogLevel DEBUG2'; \
echo 'PermitRootLogin yes'; \
echo 'PasswordAuthentication yes'; \
echo 'Subsystem sftp /usr/lib/openssh/sftp-server'; \
) > /etc/ssh/sshd_config_test_clion \
&& mkdir /run/sshd
RUN useradd -m user && yes password | passwd user
RUN usermod -s /bin/bash user
CMD ["/usr/sbin/sshd", "-D", "-e", "-f", "/etc/ssh/sshd_config_test_clion"]

47
Dockerfile_ros2_18_04 Normal file
View File

@@ -0,0 +1,47 @@
FROM osrf/ros:dashing-desktop
# =========================================================
# =========================================================
# Are you are looking for how to use this docker file?
# - https://docs.openvins.com/dev-docker.html
# - https://docs.docker.com/get-started/
# - http://wiki.ros.org/docker/Tutorials/Docker
# =========================================================
# =========================================================
# Dependencies we use, catkin tools is very good build system
# Also some helper utilities for fast in terminal edits (nano etc)
RUN apt-get update && apt-get install -y libeigen3-dev nano git
# Ceres solver install and setup
RUN sudo apt-get install -y cmake libgoogle-glog-dev libgflags-dev libatlas-base-dev libeigen3-dev libsuitesparse-dev libceres-dev
# ENV CERES_VERSION="2.0.0"
# RUN git clone https://ceres-solver.googlesource.com/ceres-solver && \
# cd ceres-solver && \
# git checkout tags/${CERES_VERSION} && \
# mkdir build && cd build && \
# cmake .. && \
# make -j$(nproc) install && \
# rm -rf ../../ceres-solver
# Seems this has Python 3.6 installed on it...
RUN apt-get update && apt-get install -y python3-dev python3-matplotlib python3-numpy python3-psutil python3-tk
# Install deps needed for clion remote debugging
# https://blog.jetbrains.com/clion/2020/01/using-docker-with-clion/
# RUN sed -i '6i\source "/catkin_ws/install/setup.bash"\' /ros_entrypoint.sh
RUN apt-get update && apt-get install -y ssh build-essential gcc g++ \
gdb clang cmake rsync tar python && apt-get clean
RUN ( \
echo 'LogLevel DEBUG2'; \
echo 'PermitRootLogin yes'; \
echo 'PasswordAuthentication yes'; \
echo 'Subsystem sftp /usr/lib/openssh/sftp-server'; \
) > /etc/ssh/sshd_config_test_clion \
&& mkdir /run/sshd
RUN useradd -m user && yes password | passwd user
RUN usermod -s /bin/bash user
CMD ["/usr/sbin/sshd", "-D", "-e", "-f", "/etc/ssh/sshd_config_test_clion"]

47
Dockerfile_ros2_20_04 Normal file
View File

@@ -0,0 +1,47 @@
FROM osrf/ros:galactic-desktop
# =========================================================
# =========================================================
# Are you are looking for how to use this docker file?
# - https://docs.openvins.com/dev-docker.html
# - https://docs.docker.com/get-started/
# - http://wiki.ros.org/docker/Tutorials/Docker
# =========================================================
# =========================================================
# Dependencies we use, catkin tools is very good build system
# Also some helper utilities for fast in terminal edits (nano etc)
RUN apt-get update && apt-get install -y libeigen3-dev nano git
# Ceres solver install and setup
RUN sudo apt-get install -y cmake libgoogle-glog-dev libgflags-dev libatlas-base-dev libeigen3-dev libsuitesparse-dev libceres-dev
# ENV CERES_VERSION="2.0.0"
# RUN git clone https://ceres-solver.googlesource.com/ceres-solver && \
# cd ceres-solver && \
# git checkout tags/${CERES_VERSION} && \
# mkdir build && cd build && \
# cmake .. && \
# make -j$(nproc) install && \
# rm -rf ../../ceres-solver
# Seems this has Python 3.8 installed on it...
RUN apt-get update && apt-get install -y python3-dev python3-matplotlib python3-numpy python3-psutil python3-tk
# Install deps needed for clion remote debugging
# https://blog.jetbrains.com/clion/2020/01/using-docker-with-clion/
# RUN sed -i '6i\source "/catkin_ws/install/setup.bash"\' /ros_entrypoint.sh
RUN apt-get update && apt-get install -y ssh build-essential gcc g++ \
gdb clang cmake rsync tar python && apt-get clean
RUN ( \
echo 'LogLevel DEBUG2'; \
echo 'PermitRootLogin yes'; \
echo 'PasswordAuthentication yes'; \
echo 'Subsystem sftp /usr/lib/openssh/sftp-server'; \
) > /etc/ssh/sshd_config_test_clion \
&& mkdir /run/sshd
RUN useradd -m user && yes password | passwd user
RUN usermod -s /bin/bash user
CMD ["/usr/sbin/sshd", "-D", "-e", "-f", "/etc/ssh/sshd_config_test_clion"]

2421
Doxyfile Normal file

File diff suppressed because it is too large Load Diff

26
Doxyfile-mcss Normal file
View File

@@ -0,0 +1,26 @@
@INCLUDE = Doxyfile
GENERATE_HTML = NO
GENERATE_XML = YES
XML_PROGRAMLISTING = NO
HTML_EXTRA_STYLESHEET = \
https://fonts.googleapis.com/css?family=Source+Sans+Pro:400,400i,600,600i%7CSource+Code+Pro:400,400i,600&subset=latin-ext \
docs/css/m-udel+documentation.compiled.css \
docs/css/custom.css
## No need to expose to-do list or bug list in public docs.
GENERATE_TODOLIST = NO
GENERATE_BUGLIST = NO
##! M_THEME_COLOR = #2f73a3
##! M_FAVICON = docs/img/favicon-light.png
##! M_LINKS_NAVBAR1 = pages annotated namespaceov__core namespaceov__type namespaceov__msckf namespaceov__init namespaceov__eval
##! M_LINKS_NAVBAR2 = \
##! "<a href=\"https://github.com/rpng/open_vins/">GitHub</a>"
##! M_SEARCH_DOWNLOAD_BINARY = NO
##! M_PAGE_FINE_PRINT = "<p>Generated by <a href="https://doxygen.org/">Doxygen</a> {doxygen_version} and <a href="https://mcss.mosra.cz/">m.css</a>.</p>"

674
LICENSE Normal file
View File

@@ -0,0 +1,674 @@
GNU GENERAL PUBLIC LICENSE
Version 3, 29 June 2007
Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
Preamble
The GNU General Public License is a free, copyleft license for
software and other kinds of works.
The licenses for most software and other practical works are designed
to take away your freedom to share and change the works. By contrast,
the GNU General Public License is intended to guarantee your freedom to
share and change all versions of a program--to make sure it remains free
software for all its users. We, the Free Software Foundation, use the
GNU General Public License for most of our software; it applies also to
any other work released this way by its authors. You can apply it to
your programs, too.
When we speak of free software, we are referring to freedom, not
price. Our General Public Licenses are designed to make sure that you
have the freedom to distribute copies of free software (and charge for
them if you wish), that you receive source code or can get it if you
want it, that you can change the software or use pieces of it in new
free programs, and that you know you can do these things.
To protect your rights, we need to prevent others from denying you
these rights or asking you to surrender the rights. Therefore, you have
certain responsibilities if you distribute copies of the software, or if
you modify it: responsibilities to respect the freedom of others.
For example, if you distribute copies of such a program, whether
gratis or for a fee, you must pass on to the recipients the same
freedoms that you received. You must make sure that they, too, receive
or can get the source code. And you must show them these terms so they
know their rights.
Developers that use the GNU GPL protect your rights with two steps:
(1) assert copyright on the software, and (2) offer you this License
giving you legal permission to copy, distribute and/or modify it.
For the developers' and authors' protection, the GPL clearly explains
that there is no warranty for this free software. For both users' and
authors' sake, the GPL requires that modified versions be marked as
changed, so that their problems will not be attributed erroneously to
authors of previous versions.
Some devices are designed to deny users access to install or run
modified versions of the software inside them, although the manufacturer
can do so. This is fundamentally incompatible with the aim of
protecting users' freedom to change the software. The systematic
pattern of such abuse occurs in the area of products for individuals to
use, which is precisely where it is most unacceptable. Therefore, we
have designed this version of the GPL to prohibit the practice for those
products. If such problems arise substantially in other domains, we
stand ready to extend this provision to those domains in future versions
of the GPL, as needed to protect the freedom of users.
Finally, every program is threatened constantly by software patents.
States should not allow patents to restrict development and use of
software on general-purpose computers, but in those that do, we wish to
avoid the special danger that patents applied to a free program could
make it effectively proprietary. To prevent this, the GPL assures that
patents cannot be used to render the program non-free.
The precise terms and conditions for copying, distribution and
modification follow.
TERMS AND CONDITIONS
0. Definitions.
"This License" refers to version 3 of the GNU General Public License.
"Copyright" also means copyright-like laws that apply to other kinds of
works, such as semiconductor masks.
"The Program" refers to any copyrightable work licensed under this
License. Each licensee is addressed as "you". "Licensees" and
"recipients" may be individuals or organizations.
To "modify" a work means to copy from or adapt all or part of the work
in a fashion requiring copyright permission, other than the making of an
exact copy. The resulting work is called a "modified version" of the
earlier work or a work "based on" the earlier work.
A "covered work" means either the unmodified Program or a work based
on the Program.
To "propagate" a work means to do anything with it that, without
permission, would make you directly or secondarily liable for
infringement under applicable copyright law, except executing it on a
computer or modifying a private copy. Propagation includes copying,
distribution (with or without modification), making available to the
public, and in some countries other activities as well.
To "convey" a work means any kind of propagation that enables other
parties to make or receive copies. Mere interaction with a user through
a computer network, with no transfer of a copy, is not conveying.
An interactive user interface displays "Appropriate Legal Notices"
to the extent that it includes a convenient and prominently visible
feature that (1) displays an appropriate copyright notice, and (2)
tells the user that there is no warranty for the work (except to the
extent that warranties are provided), that licensees may convey the
work under this License, and how to view a copy of this License. If
the interface presents a list of user commands or options, such as a
menu, a prominent item in the list meets this criterion.
1. Source Code.
The "source code" for a work means the preferred form of the work
for making modifications to it. "Object code" means any non-source
form of a work.
A "Standard Interface" means an interface that either is an official
standard defined by a recognized standards body, or, in the case of
interfaces specified for a particular programming language, one that
is widely used among developers working in that language.
The "System Libraries" of an executable work include anything, other
than the work as a whole, that (a) is included in the normal form of
packaging a Major Component, but which is not part of that Major
Component, and (b) serves only to enable use of the work with that
Major Component, or to implement a Standard Interface for which an
implementation is available to the public in source code form. A
"Major Component", in this context, means a major essential component
(kernel, window system, and so on) of the specific operating system
(if any) on which the executable work runs, or a compiler used to
produce the work, or an object code interpreter used to run it.
The "Corresponding Source" for a work in object code form means all
the source code needed to generate, install, and (for an executable
work) run the object code and to modify the work, including scripts to
control those activities. However, it does not include the work's
System Libraries, or general-purpose tools or generally available free
programs which are used unmodified in performing those activities but
which are not part of the work. For example, Corresponding Source
includes interface definition files associated with source files for
the work, and the source code for shared libraries and dynamically
linked subprograms that the work is specifically designed to require,
such as by intimate data communication or control flow between those
subprograms and other parts of the work.
The Corresponding Source need not include anything that users
can regenerate automatically from other parts of the Corresponding
Source.
The Corresponding Source for a work in source code form is that
same work.
2. Basic Permissions.
All rights granted under this License are granted for the term of
copyright on the Program, and are irrevocable provided the stated
conditions are met. This License explicitly affirms your unlimited
permission to run the unmodified Program. The output from running a
covered work is covered by this License only if the output, given its
content, constitutes a covered work. This License acknowledges your
rights of fair use or other equivalent, as provided by copyright law.
You may make, run and propagate covered works that you do not
convey, without conditions so long as your license otherwise remains
in force. You may convey covered works to others for the sole purpose
of having them make modifications exclusively for you, or provide you
with facilities for running those works, provided that you comply with
the terms of this License in conveying all material for which you do
not control copyright. Those thus making or running the covered works
for you must do so exclusively on your behalf, under your direction
and control, on terms that prohibit them from making any copies of
your copyrighted material outside their relationship with you.
Conveying under any other circumstances is permitted solely under
the conditions stated below. Sublicensing is not allowed; section 10
makes it unnecessary.
3. Protecting Users' Legal Rights From Anti-Circumvention Law.
No covered work shall be deemed part of an effective technological
measure under any applicable law fulfilling obligations under article
11 of the WIPO copyright treaty adopted on 20 December 1996, or
similar laws prohibiting or restricting circumvention of such
measures.
When you convey a covered work, you waive any legal power to forbid
circumvention of technological measures to the extent such circumvention
is effected by exercising rights under this License with respect to
the covered work, and you disclaim any intention to limit operation or
modification of the work as a means of enforcing, against the work's
users, your or third parties' legal rights to forbid circumvention of
technological measures.
4. Conveying Verbatim Copies.
You may convey verbatim copies of the Program's source code as you
receive it, in any medium, provided that you conspicuously and
appropriately publish on each copy an appropriate copyright notice;
keep intact all notices stating that this License and any
non-permissive terms added in accord with section 7 apply to the code;
keep intact all notices of the absence of any warranty; and give all
recipients a copy of this License along with the Program.
You may charge any price or no price for each copy that you convey,
and you may offer support or warranty protection for a fee.
5. Conveying Modified Source Versions.
You may convey a work based on the Program, or the modifications to
produce it from the Program, in the form of source code under the
terms of section 4, provided that you also meet all of these conditions:
a) The work must carry prominent notices stating that you modified
it, and giving a relevant date.
b) The work must carry prominent notices stating that it is
released under this License and any conditions added under section
7. This requirement modifies the requirement in section 4 to
"keep intact all notices".
c) You must license the entire work, as a whole, under this
License to anyone who comes into possession of a copy. This
License will therefore apply, along with any applicable section 7
additional terms, to the whole of the work, and all its parts,
regardless of how they are packaged. This License gives no
permission to license the work in any other way, but it does not
invalidate such permission if you have separately received it.
d) If the work has interactive user interfaces, each must display
Appropriate Legal Notices; however, if the Program has interactive
interfaces that do not display Appropriate Legal Notices, your
work need not make them do so.
A compilation of a covered work with other separate and independent
works, which are not by their nature extensions of the covered work,
and which are not combined with it such as to form a larger program,
in or on a volume of a storage or distribution medium, is called an
"aggregate" if the compilation and its resulting copyright are not
used to limit the access or legal rights of the compilation's users
beyond what the individual works permit. Inclusion of a covered work
in an aggregate does not cause this License to apply to the other
parts of the aggregate.
6. Conveying Non-Source Forms.
You may convey a covered work in object code form under the terms
of sections 4 and 5, provided that you also convey the
machine-readable Corresponding Source under the terms of this License,
in one of these ways:
a) Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by the
Corresponding Source fixed on a durable physical medium
customarily used for software interchange.
b) Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by a
written offer, valid for at least three years and valid for as
long as you offer spare parts or customer support for that product
model, to give anyone who possesses the object code either (1) a
copy of the Corresponding Source for all the software in the
product that is covered by this License, on a durable physical
medium customarily used for software interchange, for a price no
more than your reasonable cost of physically performing this
conveying of source, or (2) access to copy the
Corresponding Source from a network server at no charge.
c) Convey individual copies of the object code with a copy of the
written offer to provide the Corresponding Source. This
alternative is allowed only occasionally and noncommercially, and
only if you received the object code with such an offer, in accord
with subsection 6b.
d) Convey the object code by offering access from a designated
place (gratis or for a charge), and offer equivalent access to the
Corresponding Source in the same way through the same place at no
further charge. You need not require recipients to copy the
Corresponding Source along with the object code. If the place to
copy the object code is a network server, the Corresponding Source
may be on a different server (operated by you or a third party)
that supports equivalent copying facilities, provided you maintain
clear directions next to the object code saying where to find the
Corresponding Source. Regardless of what server hosts the
Corresponding Source, you remain obligated to ensure that it is
available for as long as needed to satisfy these requirements.
e) Convey the object code using peer-to-peer transmission, provided
you inform other peers where the object code and Corresponding
Source of the work are being offered to the general public at no
charge under subsection 6d.
A separable portion of the object code, whose source code is excluded
from the Corresponding Source as a System Library, need not be
included in conveying the object code work.
A "User Product" is either (1) a "consumer product", which means any
tangible personal property which is normally used for personal, family,
or household purposes, or (2) anything designed or sold for incorporation
into a dwelling. In determining whether a product is a consumer product,
doubtful cases shall be resolved in favor of coverage. For a particular
product received by a particular user, "normally used" refers to a
typical or common use of that class of product, regardless of the status
of the particular user or of the way in which the particular user
actually uses, or expects or is expected to use, the product. A product
is a consumer product regardless of whether the product has substantial
commercial, industrial or non-consumer uses, unless such uses represent
the only significant mode of use of the product.
"Installation Information" for a User Product means any methods,
procedures, authorization keys, or other information required to install
and execute modified versions of a covered work in that User Product from
a modified version of its Corresponding Source. The information must
suffice to ensure that the continued functioning of the modified object
code is in no case prevented or interfered with solely because
modification has been made.
If you convey an object code work under this section in, or with, or
specifically for use in, a User Product, and the conveying occurs as
part of a transaction in which the right of possession and use of the
User Product is transferred to the recipient in perpetuity or for a
fixed term (regardless of how the transaction is characterized), the
Corresponding Source conveyed under this section must be accompanied
by the Installation Information. But this requirement does not apply
if neither you nor any third party retains the ability to install
modified object code on the User Product (for example, the work has
been installed in ROM).
The requirement to provide Installation Information does not include a
requirement to continue to provide support service, warranty, or updates
for a work that has been modified or installed by the recipient, or for
the User Product in which it has been modified or installed. Access to a
network may be denied when the modification itself materially and
adversely affects the operation of the network or violates the rules and
protocols for communication across the network.
Corresponding Source conveyed, and Installation Information provided,
in accord with this section must be in a format that is publicly
documented (and with an implementation available to the public in
source code form), and must require no special password or key for
unpacking, reading or copying.
7. Additional Terms.
"Additional permissions" are terms that supplement the terms of this
License by making exceptions from one or more of its conditions.
Additional permissions that are applicable to the entire Program shall
be treated as though they were included in this License, to the extent
that they are valid under applicable law. If additional permissions
apply only to part of the Program, that part may be used separately
under those permissions, but the entire Program remains governed by
this License without regard to the additional permissions.
When you convey a copy of a covered work, you may at your option
remove any additional permissions from that copy, or from any part of
it. (Additional permissions may be written to require their own
removal in certain cases when you modify the work.) You may place
additional permissions on material, added by you to a covered work,
for which you have or can give appropriate copyright permission.
Notwithstanding any other provision of this License, for material you
add to a covered work, you may (if authorized by the copyright holders of
that material) supplement the terms of this License with terms:
a) Disclaiming warranty or limiting liability differently from the
terms of sections 15 and 16 of this License; or
b) Requiring preservation of specified reasonable legal notices or
author attributions in that material or in the Appropriate Legal
Notices displayed by works containing it; or
c) Prohibiting misrepresentation of the origin of that material, or
requiring that modified versions of such material be marked in
reasonable ways as different from the original version; or
d) Limiting the use for publicity purposes of names of licensors or
authors of the material; or
e) Declining to grant rights under trademark law for use of some
trade names, trademarks, or service marks; or
f) Requiring indemnification of licensors and authors of that
material by anyone who conveys the material (or modified versions of
it) with contractual assumptions of liability to the recipient, for
any liability that these contractual assumptions directly impose on
those licensors and authors.
All other non-permissive additional terms are considered "further
restrictions" within the meaning of section 10. If the Program as you
received it, or any part of it, contains a notice stating that it is
governed by this License along with a term that is a further
restriction, you may remove that term. If a license document contains
a further restriction but permits relicensing or conveying under this
License, you may add to a covered work material governed by the terms
of that license document, provided that the further restriction does
not survive such relicensing or conveying.
If you add terms to a covered work in accord with this section, you
must place, in the relevant source files, a statement of the
additional terms that apply to those files, or a notice indicating
where to find the applicable terms.
Additional terms, permissive or non-permissive, may be stated in the
form of a separately written license, or stated as exceptions;
the above requirements apply either way.
8. Termination.
You may not propagate or modify a covered work except as expressly
provided under this License. Any attempt otherwise to propagate or
modify it is void, and will automatically terminate your rights under
this License (including any patent licenses granted under the third
paragraph of section 11).
However, if you cease all violation of this License, then your
license from a particular copyright holder is reinstated (a)
provisionally, unless and until the copyright holder explicitly and
finally terminates your license, and (b) permanently, if the copyright
holder fails to notify you of the violation by some reasonable means
prior to 60 days after the cessation.
Moreover, your license from a particular copyright holder is
reinstated permanently if the copyright holder notifies you of the
violation by some reasonable means, this is the first time you have
received notice of violation of this License (for any work) from that
copyright holder, and you cure the violation prior to 30 days after
your receipt of the notice.
Termination of your rights under this section does not terminate the
licenses of parties who have received copies or rights from you under
this License. If your rights have been terminated and not permanently
reinstated, you do not qualify to receive new licenses for the same
material under section 10.
9. Acceptance Not Required for Having Copies.
You are not required to accept this License in order to receive or
run a copy of the Program. Ancillary propagation of a covered work
occurring solely as a consequence of using peer-to-peer transmission
to receive a copy likewise does not require acceptance. However,
nothing other than this License grants you permission to propagate or
modify any covered work. These actions infringe copyright if you do
not accept this License. Therefore, by modifying or propagating a
covered work, you indicate your acceptance of this License to do so.
10. Automatic Licensing of Downstream Recipients.
Each time you convey a covered work, the recipient automatically
receives a license from the original licensors, to run, modify and
propagate that work, subject to this License. You are not responsible
for enforcing compliance by third parties with this License.
An "entity transaction" is a transaction transferring control of an
organization, or substantially all assets of one, or subdividing an
organization, or merging organizations. If propagation of a covered
work results from an entity transaction, each party to that
transaction who receives a copy of the work also receives whatever
licenses to the work the party's predecessor in interest had or could
give under the previous paragraph, plus a right to possession of the
Corresponding Source of the work from the predecessor in interest, if
the predecessor has it or can get it with reasonable efforts.
You may not impose any further restrictions on the exercise of the
rights granted or affirmed under this License. For example, you may
not impose a license fee, royalty, or other charge for exercise of
rights granted under this License, and you may not initiate litigation
(including a cross-claim or counterclaim in a lawsuit) alleging that
any patent claim is infringed by making, using, selling, offering for
sale, or importing the Program or any portion of it.
11. Patents.
A "contributor" is a copyright holder who authorizes use under this
License of the Program or a work on which the Program is based. The
work thus licensed is called the contributor's "contributor version".
A contributor's "essential patent claims" are all patent claims
owned or controlled by the contributor, whether already acquired or
hereafter acquired, that would be infringed by some manner, permitted
by this License, of making, using, or selling its contributor version,
but do not include claims that would be infringed only as a
consequence of further modification of the contributor version. For
purposes of this definition, "control" includes the right to grant
patent sublicenses in a manner consistent with the requirements of
this License.
Each contributor grants you a non-exclusive, worldwide, royalty-free
patent license under the contributor's essential patent claims, to
make, use, sell, offer for sale, import and otherwise run, modify and
propagate the contents of its contributor version.
In the following three paragraphs, a "patent license" is any express
agreement or commitment, however denominated, not to enforce a patent
(such as an express permission to practice a patent or covenant not to
sue for patent infringement). To "grant" such a patent license to a
party means to make such an agreement or commitment not to enforce a
patent against the party.
If you convey a covered work, knowingly relying on a patent license,
and the Corresponding Source of the work is not available for anyone
to copy, free of charge and under the terms of this License, through a
publicly available network server or other readily accessible means,
then you must either (1) cause the Corresponding Source to be so
available, or (2) arrange to deprive yourself of the benefit of the
patent license for this particular work, or (3) arrange, in a manner
consistent with the requirements of this License, to extend the patent
license to downstream recipients. "Knowingly relying" means you have
actual knowledge that, but for the patent license, your conveying the
covered work in a country, or your recipient's use of the covered work
in a country, would infringe one or more identifiable patents in that
country that you have reason to believe are valid.
If, pursuant to or in connection with a single transaction or
arrangement, you convey, or propagate by procuring conveyance of, a
covered work, and grant a patent license to some of the parties
receiving the covered work authorizing them to use, propagate, modify
or convey a specific copy of the covered work, then the patent license
you grant is automatically extended to all recipients of the covered
work and works based on it.
A patent license is "discriminatory" if it does not include within
the scope of its coverage, prohibits the exercise of, or is
conditioned on the non-exercise of one or more of the rights that are
specifically granted under this License. You may not convey a covered
work if you are a party to an arrangement with a third party that is
in the business of distributing software, under which you make payment
to the third party based on the extent of your activity of conveying
the work, and under which the third party grants, to any of the
parties who would receive the covered work from you, a discriminatory
patent license (a) in connection with copies of the covered work
conveyed by you (or copies made from those copies), or (b) primarily
for and in connection with specific products or compilations that
contain the covered work, unless you entered into that arrangement,
or that patent license was granted, prior to 28 March 2007.
Nothing in this License shall be construed as excluding or limiting
any implied license or other defenses to infringement that may
otherwise be available to you under applicable patent law.
12. No Surrender of Others' Freedom.
If conditions are imposed on you (whether by court order, agreement or
otherwise) that contradict the conditions of this License, they do not
excuse you from the conditions of this License. If you cannot convey a
covered work so as to satisfy simultaneously your obligations under this
License and any other pertinent obligations, then as a consequence you may
not convey it at all. For example, if you agree to terms that obligate you
to collect a royalty for further conveying from those to whom you convey
the Program, the only way you could satisfy both those terms and this
License would be to refrain entirely from conveying the Program.
13. Use with the GNU Affero General Public License.
Notwithstanding any other provision of this License, you have
permission to link or combine any covered work with a work licensed
under version 3 of the GNU Affero General Public License into a single
combined work, and to convey the resulting work. The terms of this
License will continue to apply to the part which is the covered work,
but the special requirements of the GNU Affero General Public License,
section 13, concerning interaction through a network will apply to the
combination as such.
14. Revised Versions of this License.
The Free Software Foundation may publish revised and/or new versions of
the GNU General Public License from time to time. Such new versions will
be similar in spirit to the present version, but may differ in detail to
address new problems or concerns.
Each version is given a distinguishing version number. If the
Program specifies that a certain numbered version of the GNU General
Public License "or any later version" applies to it, you have the
option of following the terms and conditions either of that numbered
version or of any later version published by the Free Software
Foundation. If the Program does not specify a version number of the
GNU General Public License, you may choose any version ever published
by the Free Software Foundation.
If the Program specifies that a proxy can decide which future
versions of the GNU General Public License can be used, that proxy's
public statement of acceptance of a version permanently authorizes you
to choose that version for the Program.
Later license versions may give you additional or different
permissions. However, no additional obligations are imposed on any
author or copyright holder as a result of your choosing to follow a
later version.
15. Disclaimer of Warranty.
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
16. Limitation of Liability.
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
SUCH DAMAGES.
17. Interpretation of Sections 15 and 16.
If the disclaimer of warranty and limitation of liability provided
above cannot be given local legal effect according to their terms,
reviewing courts shall apply local law that most closely approximates
an absolute waiver of all civil liability in connection with the
Program, unless a warranty or assumption of liability accompanies a
copy of the Program in return for a fee.
END OF TERMS AND CONDITIONS
How to Apply These Terms to Your New Programs
If you develop a new program, and you want it to be of the greatest
possible use to the public, the best way to achieve this is to make it
free software which everyone can redistribute and change under these terms.
To do so, attach the following notices to the program. It is safest
to attach them to the start of each source file to most effectively
state the exclusion of warranty; and each file should have at least
the "copyright" line and a pointer to where the full notice is found.
<one line to give the program's name and a brief idea of what it does.>
Copyright (C) <year> <name of author>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
Also add information on how to contact you by electronic and paper mail.
If the program does terminal interaction, make it output a short
notice like this when it starts in an interactive mode:
<program> Copyright (C) <year> <name of author>
This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
This is free software, and you are welcome to redistribute it
under certain conditions; type `show c' for details.
The hypothetical commands `show w' and `show c' should show the appropriate
parts of the General Public License. Of course, your program's commands
might be different; for a GUI interface, you would use an "about box".
You should also get your employer (if you work as a programmer) or school,
if any, to sign a "copyright disclaimer" for the program, if necessary.
For more information on this, and how to apply and follow the GNU GPL, see
<https://www.gnu.org/licenses/>.
The GNU General Public License does not permit incorporating your program
into proprietary programs. If your program is a subroutine library, you
may consider it more useful to permit linking proprietary applications with
the library. If this is what you want to do, use the GNU Lesser General
Public License instead of this License. But first, please read
<https://www.gnu.org/licenses/why-not-lgpl.html>.

173
ReadMe.md Normal file
View File

@@ -0,0 +1,173 @@
# OpenVINS
[![ROS 1 Workflow](https://github.com/rpng/open_vins/actions/workflows/build_ros1.yml/badge.svg)](https://github.com/rpng/open_vins/actions/workflows/build_ros1.yml)
[![ROS 2 Workflow](https://github.com/rpng/open_vins/actions/workflows/build_ros2.yml/badge.svg)](https://github.com/rpng/open_vins/actions/workflows/build_ros2.yml)
[![ROS Free Workflow](https://github.com/rpng/open_vins/actions/workflows/build.yml/badge.svg)](https://github.com/rpng/open_vins/actions/workflows/build.yml)
Welcome to the OpenVINS project!
The OpenVINS project houses some core computer vision code along with a state-of-the art filter-based visual-inertial
estimator. The core filter is an [Extended Kalman filter](https://en.wikipedia.org/wiki/Extended_Kalman_filter) which
fuses inertial information with sparse visual feature tracks. These visual feature tracks are fused leveraging
the [Multi-State Constraint Kalman Filter (MSCKF)](https://ieeexplore.ieee.org/document/4209642) sliding window
formulation which allows for 3D features to update the state estimate without directly estimating the feature states in
the filter. Inspired by graph-based optimization systems, the included filter has modularity allowing for convenient
covariance management with a proper type-based state system. Please take a look at the feature list below for full
details on what the system supports.
* Github project page - https://github.com/rpng/open_vins
* Documentation - https://docs.openvins.com/
* Getting started guide - https://docs.openvins.com/getting-started.html
* Publication reference - http://udel.edu/~pgeneva/downloads/papers/c10.pdf
## News / Events
* **March 14, 2022** - Initial dynamic initialization open sourcing, asynchronous subscription to inertial readings and publishing of odometry, support for lower frequency feature tracking. See v2.6 [PR#232](https://github.com/rpng/open_vins/pull/232) for details.
* **December 13, 2021** - New YAML configuration system, ROS2 support, Docker images, robust static initialization based on disparity, internal logging system to reduce verbosity, image transport publishers, dynamic number of features support, and other small fixes. See
v2.5 [PR#209](https://github.com/rpng/open_vins/pull/209) for details.
* **July 19, 2021** - Camera classes, masking support, alignment utility, and other small fixes. See
v2.4 [PR#117](https://github.com/rpng/open_vins/pull/186) for details.
* **December 1, 2020** - Released improved memory management, active feature pointcloud publishing, limiting number of
features in update to bound compute, and other small fixes. See
v2.3 [PR#117](https://github.com/rpng/open_vins/pull/117) for details.
* **November 18, 2020** - Released groundtruth generation utility package, [vicon2gt](https://github.com/rpng/vicon2gt)
to enable creation of groundtruth trajectories in a motion capture room for evaulating VIO methods.
* **July 7, 2020** - Released zero velocity update for vehicle applications and direct initialization when standing
still. See [PR#79](https://github.com/rpng/open_vins/pull/79) for details.
* **May 18, 2020** - Released secondary pose graph example
repository [ov_secondary](https://github.com/rpng/ov_secondary) based
on [VINS-Fusion](https://github.com/HKUST-Aerial-Robotics/VINS-Fusion). OpenVINS now publishes marginalized feature
track, feature 3d position, and first camera intrinsics and extrinsics.
See [PR#66](https://github.com/rpng/open_vins/pull/66) for details and discussion.
* **April 3, 2020** - Released [v2.0](https://github.com/rpng/open_vins/releases/tag/v2.0) update to the codebase with
some key refactoring, ros-free building, improved dataset support, and single inverse depth feature representation.
Please check out the [release page](https://github.com/rpng/open_vins/releases/tag/v2.0) for details.
* **January 21, 2020** - Our paper has been accepted for presentation in [ICRA 2020](https://www.icra2020.org/). We look
forward to seeing everybody there! We have also added links to a few videos of the system running on different
datasets.
* **October 23, 2019** - OpenVINS placed first in the [IROS 2019 FPV Drone Racing VIO Competition
](http://rpg.ifi.uzh.ch/uzh-fpv.html). We will be giving a short presentation at
the [workshop](https://wp.nyu.edu/workshopiros2019mav/) at 12:45pm in Macau on November 8th.
* **October 1, 2019** - We will be presenting at the [Visual-Inertial Navigation: Challenges and Applications
](http://udel.edu/~ghuang/iros19-vins-workshop/index.html) workshop at [IROS 2019](https://www.iros2019.org/). The
submitted workshop paper can be found at [this](http://udel.edu/~ghuang/iros19-vins-workshop/papers/06.pdf) link.
* **August 21, 2019** - Open sourced [ov_maplab](https://github.com/rpng/ov_maplab) for interfacing OpenVINS with
the [maplab](https://github.com/ethz-asl/maplab) library.
* **August 15, 2019** - Initial release of OpenVINS repository and documentation website!
## Project Features
* Sliding window visual-inertial MSCKF
* Modular covariance type system
* Comprehensive documentation and derivations
* Extendable visual-inertial simulator
* On manifold SE(3) b-spline
* Arbitrary number of cameras
* Arbitrary sensor rate
* Automatic feature generation
* Five different feature representations
1. Global XYZ
2. Global inverse depth
3. Anchored XYZ
4. Anchored inverse depth
5. Anchored MSCKF inverse depth
6. Anchored single inverse depth
* Calibration of sensor intrinsics and extrinsics
* Camera to IMU transform
* Camera to IMU time offset
* Camera intrinsics
* Environmental SLAM feature
* OpenCV ARUCO tag SLAM features
* Sparse feature SLAM features
* Visual tracking support
* Monocular camera
* Stereo camera
* Binocular (synchronized) cameras
* KLT or descriptor based
* Masked tracking
* Static and dynamic state initialization
* Zero velocity detection and updates
* Out of the box evaluation on EurocMav, TUM-VI, UZH-FPV, KAIST Urban and VIO datasets
* Extensive evaluation suite (ATE, RPE, NEES, RMSE, etc..)
## Codebase Extensions
* **[vicon2gt](https://github.com/rpng/vicon2gt)** - This utility was created to generate groundtruth trajectories using
a motion capture system (e.g. Vicon or OptiTrack) for use in evaluating visual-inertial estimation systems.
Specifically we calculate the inertial IMU state (full 15 dof) at camera frequency rate and generate a groundtruth
trajectory similar to those provided by the EurocMav datasets. Performs fusion of inertial and motion capture
information and estimates all unknown spacial-temporal calibrations between the two sensors.
* **[ov_maplab](https://github.com/rpng/ov_maplab)** - This codebase contains the interface wrapper for exporting
visual-inertial runs from [OpenVINS](https://github.com/rpng/open_vins) into the ViMap structure taken
by [maplab](https://github.com/ethz-asl/maplab). The state estimates and raw images are appended to the ViMap as
OpenVINS runs through a dataset. After completion of the dataset, features are re-extract and triangulate with
maplab's feature system. This can be used to merge multi-session maps, or to perform a batch optimization after first
running the data through OpenVINS. Some example have been provided along with a helper script to export trajectories
into the standard groundtruth format.
* **[ov_secondary](https://github.com/rpng/ov_secondary)** - This is an example secondary thread which provides loop
closure in a loosely coupled manner for [OpenVINS](https://github.com/rpng/open_vins). This is a modification of the
code originally developed by the HKUST aerial robotics group and can be found in
their [VINS-Fusion](https://github.com/HKUST-Aerial-Robotics/VINS-Fusion) repository. Here we stress that this is a
loosely coupled method, thus no information is returned to the estimator to improve the underlying OpenVINS odometry.
This codebase has been modified in a few key areas including: exposing more loop closure parameters, subscribing to
camera intrinsics, simplifying configuration such that only topics need to be supplied, and some tweaks to the loop
closure detection to improve frequency.
## Demo Videos
<a href="http://www.youtube.com/watch?v=KCX51GvYGss">
<img src="https://raw.githubusercontent.com/rpng/open_vins/master/docs/youtube/KCX51GvYGss.jpg" width="120" height="90" />
</a>
<a href="http://www.youtube.com/watch?v=Lc7VQHngSuQ">
<img src="https://raw.githubusercontent.com/rpng/open_vins/master/docs/youtube/Lc7VQHngSuQ.jpg" width="120" height="90" />
</a>
<a href="http://www.youtube.com/watch?v=vaia7iPaRW8">
<img src="https://raw.githubusercontent.com/rpng/open_vins/master/docs/youtube/vaia7iPaRW8.jpg" width="120" height="90" />
</a>
<a href="http://www.youtube.com/watch?v=MCzTF9ye2zw">
<img src="https://raw.githubusercontent.com/rpng/open_vins/master/docs/youtube/MCzTF9ye2zw.jpg" width="120" height="90"/>
</a>
<a href="http://www.youtube.com/watch?v=eSQLWcNrx_I">
<img src="https://raw.githubusercontent.com/rpng/open_vins/master/docs/youtube/eSQLWcNrx_I.jpg" width="120" height="90" />
</a>
<br/>
<a href="http://www.youtube.com/watch?v=187AXuuGNNw">
<img src="https://raw.githubusercontent.com/rpng/open_vins/master/docs/youtube/187AXuuGNNw.jpg" width="120" height="90" />
</a>
<a href="http://www.youtube.com/watch?v=oUoLlrFryk0">
<img src="https://raw.githubusercontent.com/rpng/open_vins/master/docs/youtube/oUoLlrFryk0.jpg" width="120" height="90" />
</a>
<a href="http://www.youtube.com/watch?v=ExPIGwORm4E">
<img src="https://raw.githubusercontent.com/rpng/open_vins/master/docs/youtube/ExPIGwORm4E.jpg" width="120" height="90" />
</a>
<a href="http://www.youtube.com/watch?v=lXHl-qgLGl8">
<img src="https://raw.githubusercontent.com/rpng/open_vins/master/docs/youtube/lXHl-qgLGl8.jpg" width="120" height="90" />
</a>
## Credit / Licensing
This code was written by the [Robot Perception and Navigation Group (RPNG)](https://sites.udel.edu/robot/) at the
University of Delaware. If you have any issues with the code please open an issue on our github page with relevant
implementation details and references. For researchers that have leveraged or compared to this work, please cite the
following:
```txt
@Conference{Geneva2020ICRA,
Title = {{OpenVINS}: A Research Platform for Visual-Inertial Estimation},
Author = {Patrick Geneva and Kevin Eckenhoff and Woosik Lee and Yulin Yang and Guoquan Huang},
Booktitle = {Proc. of the IEEE International Conference on Robotics and Automation},
Year = {2020},
Address = {Paris, France},
Url = {\url{https://github.com/rpng/open_vins}}
}
```
The codebase and documentation is licensed under the [GNU General Public License v3 (GPL-3)](https://www.gnu.org/licenses/gpl-3.0.txt).
You must preserve the copyright and license notices in your derivative work and make available the complete source code with modifications under the same license ([see this](https://choosealicense.com/licenses/gpl-3.0/); this is not legal advice).

View File

@@ -0,0 +1,118 @@
%YAML:1.0 # need to specify the file type at the top!
verbosity: "INFO" # ALL, DEBUG, INFO, WARNING, ERROR, SILENT
use_fej: true # if first-estimate Jacobians should be used (enable for good consistency)
use_imuavg: true # if using discrete integration, if we should average sequential IMU measurements to "smooth" it
use_rk4int: true # if rk4 integration should be used (overrides imu averaging)
use_stereo: true # if we have more than 1 camera, if we should try to track stereo constraints between pairs
max_cameras: 2 # how many cameras we have 1 = mono, 2 = stereo, >2 = binocular (all mono tracking)
calib_cam_extrinsics: true # if the transform between camera and IMU should be optimized R_ItoC, p_CinI
calib_cam_intrinsics: true # if camera intrinsics should be optimized (focal, center, distortion)
calib_cam_timeoffset: true # if timeoffset between camera and IMU should be optimized
max_clones: 11 # how many clones in the sliding window
max_slam: 50 # number of features in our state vector
max_slam_in_update: 25 # update can be split into sequential updates of batches, how many in a batch
max_msckf_in_update: 40 # how many MSCKF features to use in the update
dt_slam_delay: 1 # delay before initializing (helps with stability from bad initialization...)
gravity_mag: 9.81 # magnitude of gravity in this location
feat_rep_msckf: "GLOBAL_3D"
feat_rep_slam: "ANCHORED_MSCKF_INVERSE_DEPTH"
feat_rep_aruco: "ANCHORED_MSCKF_INVERSE_DEPTH"
# zero velocity update parameters we can use
# we support either IMU-based or disparity detection.
try_zupt: true
zupt_chi2_multipler: 0 # set to 0 for only disp-based
zupt_max_velocity: 0.1
zupt_noise_multiplier: 50
zupt_max_disparity: 1.5 # set to 0 for only imu-based
zupt_only_at_beginning: true
# ==================================================================
# ==================================================================
init_window_time: 2.0 # how many seconds to collect initialization information
init_imu_thresh: 1.5 # threshold for variance of the accelerometer to detect a "jerk" in motion
init_max_disparity: 1.5 # max disparity to consider the platform stationary (dependent on resolution)
init_max_features: 25 # how many features to track during initialization (saves on computation)
init_dyn_mle_opt_calib: false
init_dyn_mle_max_iter: 50
init_dyn_mle_max_time: 0.05
init_dyn_mle_max_threads: 6
init_dyn_num_pose: 6
init_dyn_min_deg: 10.0
init_dyn_inflation_ori: 10
init_dyn_inflation_vel: 100
init_dyn_inflation_bg: 10
init_dyn_inflation_ba: 100
init_dyn_min_rec_cond: 1e-12
init_dyn_bias_g: [0.0, 0.0, 0.0]
init_dyn_bias_a: [0.0, 0.0, 0.0]
# ==================================================================
# ==================================================================
record_timing_information: false # if we want to record timing information of the method
record_timing_filepath: "/tmp/traj_timing.txt" # https://docs.openvins.com/eval-timing.html#eval-ov-timing-flame
# if we want to save the simulation state and its diagional covariance
# use this with rosrun ov_eval error_simulation
save_total_state: false
filepath_est: "/tmp/ov_estimate.txt"
filepath_std: "/tmp/ov_estimate_std.txt"
filepath_gt: "/tmp/ov_groundtruth.txt"
# ==================================================================
# ==================================================================
# our front-end feature tracking parameters
# we have a KLT and descriptor based (KLT is better implemented...)
use_klt: true
num_pts: 200
fast_threshold: 20
grid_x: 20
grid_y: 20
min_px_dist: 15
knn_ratio: 0.70
track_frequency: 21.0
downsample_cameras: false # will downsample image in half if true
multi_threading: true # if should enable opencv multi threading
histogram_method: "HISTOGRAM" # NONE, HISTOGRAM, CLAHE
# aruco tag tracker for the system
# DICT_6X6_1000 from https://chev.me/arucogen/
use_aruco: false
num_aruco: 1024
downsize_aruco: true
# ==================================================================
# ==================================================================
# camera noises and chi-squared threshold multipliers
up_msckf_sigma_px: 1
up_msckf_chi2_multipler: 1
up_slam_sigma_px: 1
up_slam_chi2_multipler: 1
up_aruco_sigma_px: 1
up_aruco_chi2_multipler: 1
# masks for our images
use_mask: false
# imu and camera spacial-temporal
# imu config should also have the correct noise values
relative_config_imu: "kalibr_imu_chain.yaml"
relative_config_imucam: "kalibr_imucam_chain.yaml"

View File

@@ -0,0 +1,16 @@
%YAML:1.0
imu0:
T_i_b:
- [1.0, 0.0, 0.0, 0.0]
- [0.0, 1.0, 0.0, 0.0]
- [0.0, 0.0, 1.0, 0.0]
- [0.0, 0.0, 0.0, 1.0]
accelerometer_noise_density: 2.0000e-3 # [ m / s^2 / sqrt(Hz) ] ( accel "white noise" )
accelerometer_random_walk: 3.0000e-3 # [ m / s^3 / sqrt(Hz) ]. ( accel bias diffusion )
gyroscope_noise_density: 1.6968e-04 # [ rad / s / sqrt(Hz) ] ( gyro "white noise" )
gyroscope_random_walk: 1.9393e-05 # [ rad / s^2 / sqrt(Hz) ] ( gyro bias diffusion )
model: calibrated
rostopic: /imu0
time_offset: 0.0
update_rate: 200.0

View File

@@ -0,0 +1,28 @@
%YAML:1.0
cam0:
T_imu_cam: #rotation from camera to IMU R_CtoI, position of camera in IMU p_CinI
- [0.0148655429818, -0.999880929698, 0.00414029679422, -0.0216401454975]
- [0.999557249008, 0.0149672133247, 0.025715529948, -0.064676986768]
- [-0.0257744366974, 0.00375618835797, 0.999660727178, 0.00981073058949]
- [0.0, 0.0, 0.0, 1.0]
cam_overlaps: [1]
camera_model: pinhole
distortion_coeffs: [-0.28340811, 0.07395907, 0.00019359, 1.76187114e-05]
distortion_model: radtan
intrinsics: [458.654, 457.296, 367.215, 248.375] #fu, fv, cu, cv
resolution: [752, 480]
rostopic: /cam0/image_raw
cam1:
T_imu_cam: #rotation from camera to IMU R_CtoI, position of camera in IMU p_CinI
- [0.0125552670891, -0.999755099723, 0.0182237714554, -0.0198435579556]
- [0.999598781151, 0.0130119051815, 0.0251588363115, 0.0453689425024]
- [-0.0253898008918, 0.0179005838253, 0.999517347078, 0.00786212447038]
- [0.0, 0.0, 0.0, 1.0]
cam_overlaps: [0]
camera_model: pinhole
distortion_coeffs: [-0.28368365,0.07451284,-0.00010473,-3.55590700e-05]
distortion_model: radtan
intrinsics: [457.587, 456.134, 379.999, 255.238] #fu, fv, cu, cv
resolution: [752, 480]
rostopic: /cam1/image_raw

View File

@@ -0,0 +1,122 @@
%YAML:1.0 # need to specify the file type at the top!
verbosity: "DEBUG" # ALL, DEBUG, INFO, WARNING, ERROR, SILENT
use_fej: true # if we should use first-estimate Jacobians (enable for good consistency)
use_imuavg: true # for our discrete integration, if we should average sequential IMU measurements to "smooth" it
use_rk4int: true # if rk4 integration should be used (overrides imu averaging)
use_stereo: true # if we have more than 1 camera, if we should try to track stereo constraints
max_cameras: 2
calib_cam_extrinsics: false
calib_cam_intrinsics: true
calib_cam_timeoffset: true
max_clones: 12
max_slam: 50
max_slam_in_update: 25
max_msckf_in_update: 50
dt_slam_delay: 1
#gravity_mag: 9.79858
gravity_mag: 9.81
feat_rep_msckf: "ANCHORED_MSCKF_INVERSE_DEPTH"
feat_rep_slam: "ANCHORED_MSCKF_INVERSE_DEPTH"
feat_rep_aruco: "ANCHORED_MSCKF_INVERSE_DEPTH"
# zero velocity update parameters we can use
# we support either IMU-based or disparity detection.
try_zupt: true
zupt_chi2_multipler: 1 # set to 0 for only disp-based
zupt_max_velocity: 0.1
zupt_noise_multiplier: 5
zupt_max_disparity: 0.4 # set to 0 for only imu-based
zupt_only_at_beginning: false
# ==================================================================
# ==================================================================
init_window_time: 2.0
init_imu_thresh: 0.5 #0.5
init_max_disparity: 1.5
init_max_features: 25
init_dyn_mle_opt_calib: false
init_dyn_mle_max_iter: 50
init_dyn_mle_max_time: 0.05
init_dyn_mle_max_threads: 6
init_dyn_num_pose: 6
init_dyn_min_deg: 0.0 # traj is mostly straight line motion
init_dyn_inflation_ori: 10
init_dyn_inflation_vel: 100
init_dyn_inflation_bg: 10
init_dyn_inflation_ba: 100
init_dyn_min_rec_cond: 1e-20 # traj is mostly straight line motion
init_dyn_bias_g: [0.0, 0.0, 0.0]
init_dyn_bias_a: [0.0, 0.0, 0.0]
# ==================================================================
# ==================================================================
record_timing_information: false
record_timing_filepath: "/tmp/traj_timing.txt"
save_total_state: false
filepath_est: "/tmp/ov_estimate.txt"
filepath_std: "/tmp/ov_estimate_std.txt"
filepath_gt: "/tmp/ov_groundtruth.txt"
# ==================================================================
# ==================================================================
# our front-end feature tracking parameters
# we have a KLT and descriptor based (KLT is better implemented...)
use_klt: true
num_pts: 200
fast_threshold: 50
grid_x: 20
grid_y: 15
min_px_dist: 30
knn_ratio: 0.65
track_frequency: 21.0
downsample_cameras: false
multi_threading: true
histogram_method: "HISTOGRAM" # NONE, HISTOGRAM, CLAHE
fi_max_dist: 500
fi_max_baseline: 800
fi_max_cond_number: 20000
fi_triangulate_1d: false
# aruco tag tracker for the system
# DICT_6X6_1000 from https://chev.me/arucogen/
use_aruco: false
num_aruco: 1024
downsize_aruco: true
# ==================================================================
# ==================================================================
# camera noises and chi-squared threshold multipliers
up_msckf_sigma_px: 1
up_msckf_chi2_multipler: 1
up_slam_sigma_px: 1
up_slam_chi2_multipler: 1
up_aruco_sigma_px: 1
up_aruco_chi2_multipler: 1
# masks for our images
use_mask: false
# imu and camera spacial-temporal
# imu config should also have the correct noise values
relative_config_imu: "kalibr_imu_chain.yaml"
relative_config_imucam: "kalibr_imucam_chain.yaml"

View File

@@ -0,0 +1,23 @@
%YAML:1.0 # need to specify the file type at the top!
# MTI-100 series converted from data sheet, guess on bias random walk
# https://www.xsens.com/hubfs/Downloads/usermanual/MTi_usermanual.pdf
imu0:
T_i_b:
- [1.0, 0.0, 0.0, 0.0]
- [0.0, 1.0, 0.0, 0.0]
- [0.0, 0.0, 1.0, 0.0]
- [0.0, 0.0, 0.0, 1.0]
accelerometer_noise_density: 2.0000e-03 # [ m / s^2 / sqrt(Hz) ] ( accel "white noise" )
accelerometer_random_walk: 3.0000e-03 # [ m / s^3 / sqrt(Hz) ]. ( accel bias diffusion )
gyroscope_noise_density: 1.6968e-04 # [ rad / s / sqrt(Hz) ] ( gyro "white noise" )
gyroscope_random_walk: 1.9393e-05 # [ rad / s^2 / sqrt(Hz) ] ( gyro bias diffusion )
# <param name="gyroscope_noise_density" type="double" value="1.7453e-04" /> <!-- 1.7453e-04 -->
# <param name="gyroscope_random_walk" type="double" value="1.0000e-05" />
# <param name="accelerometer_noise_density" type="double" value="5.8860e-03" /> <!-- 5.8860e-04 -->
# <param name="accelerometer_random_walk" type="double" value="1.0000e-04" />
model: calibrated
rostopic: /imu/data_raw
time_offset: 0.0
update_rate: 500.0

View File

@@ -0,0 +1,28 @@
%YAML:1.0 # need to specify the file type at the top!
cam0:
T_imu_cam: #rotation from camera to IMU R_CtoI, position of camera in IMU p_CinI
- [-0.00413,-0.01966,0.99980,1.73944]
- [-0.99993,-0.01095,-0.00435,0.27803]
- [0.01103,-0.99975,-0.01962,-0.08785]
- [0.0, 0.0, 0.0, 1.0]
cam_overlaps: [1]
camera_model: pinhole
distortion_coeffs: [-5.6143027800000002e-02,1.3952563200000001e-01,-1.2155906999999999e-03,-9.7281389999999998e-04]
distortion_model: radtan
intrinsics: [8.1690378992770002e+02,8.1156803828490001e+02,6.0850726281690004e+02,2.6347599764440002e+02]
resolution: [1280, 560]
rostopic: /stereo/left/image_raw
cam1:
T_imu_cam: #rotation from camera to IMU R_CtoI, position of camera in IMU p_CinI
- [-0.00768,-0.01509,0.99986,1.73376]
- [-0.99988,-0.01305,-0.00788,-0.19706]
- [0.01317,-0.99980,-0.01499,-0.08271]
- [0.0, 0.0, 0.0, 1.0]
cam_overlaps: [0]
camera_model: pinhole
distortion_coeffs: [-5.4921981799999998e-02,1.4243657430000001e-01,7.5412299999999996e-05,-6.7560530000000001e-04]
distortion_model: radtan
intrinsics: [8.1378205539589999e+02,8.0852165574269998e+02,6.1386419539320002e+02,2.4941049348650000e+02]
resolution: [1280, 560]
rostopic: /stereo/right/image_raw

View File

@@ -0,0 +1,122 @@
%YAML:1.0 # need to specify the file type at the top!
verbosity: "INFO" # ALL, DEBUG, INFO, WARNING, ERROR, SILENT
use_fej: true # if first-estimate Jacobians should be used (enable for good consistency)
use_imuavg: true # if using discrete integration, if we should average sequential IMU measurements to "smooth" it
use_rk4int: true # if rk4 integration should be used (overrides imu averaging)
use_stereo: true # NEED TO USE STEREO! OTHERWISE CAN'T RECOVER SCALE!!!!!! DEGENERATE MOTION!!!
max_cameras: 2 # NEED TO USE STEREO! OTHERWISE CAN'T RECOVER SCALE!!!!!! DEGENERATE MOTION!!!
calib_cam_extrinsics: false # disable since this is a degenerate motion
calib_cam_intrinsics: true
calib_cam_timeoffset: false # disable since this is a degenerate motion
max_clones: 11 # how many clones in the sliding window
max_slam: 50 # number of features in our state vector
max_slam_in_update: 25 # update can be split into sequential updates of batches, how many in a batch
max_msckf_in_update: 40 # how many MSCKF features to use in the update
dt_slam_delay: 1 # delay before initializing (helps with stability from bad initialization...)
gravity_mag: 9.81 # magnitude of gravity in this location
feat_rep_msckf: "GLOBAL_3D"
feat_rep_slam: "ANCHORED_MSCKF_INVERSE_DEPTH"
feat_rep_aruco: "ANCHORED_MSCKF_INVERSE_DEPTH"
# zero velocity update parameters we can use
# we support either IMU-based or disparity detection.
try_zupt: true
zupt_chi2_multipler: 0 # set to 0 for only disp-based
zupt_max_velocity: 0.02
zupt_noise_multiplier: 10
zupt_max_disparity: 0.20 # set to 0 for only imu-based
zupt_only_at_beginning: false
# ==================================================================
# ==================================================================
init_window_time: 2.0 # how many seconds to collect initialization information
init_imu_thresh: 1.5 # threshold for variance of the accelerometer to detect a "jerk" in motion
init_max_disparity: 0.20 # max disparity to consider the platform stationary (dependent on resolution)
init_max_features: 25 # how many features to track during initialization (saves on computation)
init_dyn_mle_opt_calib: false
init_dyn_mle_max_iter: 50
init_dyn_mle_max_time: 0.05
init_dyn_mle_max_threads: 6
init_dyn_num_pose: 6
init_dyn_min_deg: 5.0
init_dyn_inflation_ori: 10
init_dyn_inflation_vel: 100
init_dyn_inflation_bg: 10
init_dyn_inflation_ba: 100
init_dyn_min_rec_cond: 1e-15
init_dyn_bias_g: [0.0, 0.0, 0.0]
init_dyn_bias_a: [0.0, 0.0, 0.0]
# ==================================================================
# ==================================================================
record_timing_information: false # if we want to record timing information of the method
record_timing_filepath: "/tmp/traj_timing.txt" # https://docs.openvins.com/eval-timing.html#eval-ov-timing-flame
# if we want to save the simulation state and its diagional covariance
# use this with rosrun ov_eval error_simulation
save_total_state: false
filepath_est: "/tmp/ov_estimate.txt"
filepath_std: "/tmp/ov_estimate_std.txt"
filepath_gt: "/tmp/ov_groundtruth.txt"
# ==================================================================
# ==================================================================
# our front-end feature tracking parameters
# we have a KLT and descriptor based (KLT is better implemented...)
use_klt: true
num_pts: 200
fast_threshold: 30
grid_x: 20
grid_y: 20
min_px_dist: 15
knn_ratio: 0.70
track_frequency: 10.0
downsample_cameras: false # will downsample image in half if true
multi_threading: true # if should enable opencv multi threading
histogram_method: "HISTOGRAM" # NONE, HISTOGRAM, CLAHE
fi_max_dist: 10.0
fi_max_baseline: 200
fi_max_cond_number: 25000
# aruco tag tracker for the system
# DICT_6X6_1000 from https://chev.me/arucogen/
use_aruco: false
num_aruco: 1024
downsize_aruco: true
# ==================================================================
# ==================================================================
# camera noises and chi-squared threshold multipliers
up_msckf_sigma_px: 1.2
up_msckf_chi2_multipler: 1
up_slam_sigma_px: 1.2
up_slam_chi2_multipler: 1
up_aruco_sigma_px: 1
up_aruco_chi2_multipler: 1
# masks for our images
use_mask: false
# imu and camera spacial-temporal
# imu config should also have the correct noise values
relative_config_imu: "kalibr_imu_chain.yaml"
relative_config_imucam: "kalibr_imucam_chain.yaml"

View File

@@ -0,0 +1,20 @@
%YAML:1.0 # need to specify the file type at the top!
imu0:
T_i_b:
- [1.0, 0.0, 0.0, 0.0]
- [0.0, 1.0, 0.0, 0.0]
- [0.0, 0.0, 1.0, 0.0]
- [0.0, 0.0, 0.0, 1.0]
accelerometer_noise_density: 0.00333388 # [ m / s^2 / sqrt(Hz) ] ( accel "white noise" )
accelerometer_random_walk: 0.00047402 # [ m / s^3 / sqrt(Hz) ]. ( accel bias diffusion )
gyroscope_noise_density: 0.00005770 # [ rad / s / sqrt(Hz) ] ( gyro "white noise" )
gyroscope_random_walk: 0.00001565 # [ rad / s^2 / sqrt(Hz) ] ( gyro bias diffusion )
# accelerometer_noise_density: 0.07 # [ m / s^2 / sqrt(Hz) ] ( accel "white noise" )
# accelerometer_random_walk: 0.009 # [ m / s^3 / sqrt(Hz) ]. ( accel bias diffusion )
# gyroscope_noise_density: 0.001 # [ rad / s / sqrt(Hz) ] ( gyro "white noise" )
# gyroscope_random_walk: 0.0003 # [ rad / s^2 / sqrt(Hz) ] ( gyro bias diffusion )
model: calibrated
rostopic: /mavros/imu/data
time_offset: 0.0
update_rate: 100.0

View File

@@ -0,0 +1,93 @@
%YAML:1.0 # need to specify the file type at the top!
cam0:
T_cam_imu:
- - -0.04030123999740945
- -0.9989998755524683
- 0.01936643232049068
- 0.02103955032447366
- - 0.026311325355146964
- -0.020436499663524704
- -0.9994448777394171
- -0.038224929976612206
- - 0.9988410905708309
- -0.0397693113802049
- 0.027108627033059024
- -0.1363488241088845
- - 0.0
- 0.0
- 0.0
- 1.0
cam_overlaps:
- 1
camera_model: pinhole
distortion_coeffs:
- 0.006896928127777268
- -0.009144207062654397
- 0.000254113977103925
- 0.0021434982252719545
distortion_model: radtan
intrinsics:
- 380.9229090195708
- 380.29264802262736
- 324.68121181846755
- 224.6741321466431
resolution:
- 640
- 480
rostopic: /camera/infra1/image_rect_raw
timeshift_cam_imu: -0.029958533056650416
cam1:
T_cam_imu:
- - -0.03905752472566068
- -0.9990498568899562
- 0.019336318430946575
- -0.02909273113160158
- - 0.025035478432625047
- -0.020323396666370924
- -0.9994799569614147
- -0.03811090793611019
- - 0.99892328763622
- -0.03855311914877835
- 0.02580547271309183
- -0.13656684822705098
- - 0.0
- 0.0
- 0.0
- 1.0
T_cn_cnm1:
- - 0.9999992248836708
- 6.384241340452582e-05
- 0.0012434452955667624
- -0.049960282472300055
- - -6.225102643531651e-05
- 0.9999991790958949
- -0.0012798173093508036
- -5.920119010064575e-05
- - -0.001243525981443161
- 0.0012797389115975439
- 0.9999984079544582
- -0.00014316003395349448
- - 0.0
- 0.0
- 0.0
- 1.0
cam_overlaps:
- 0
camera_model: pinhole
distortion_coeffs:
- 0.007044055287844759
- -0.010251485722185347
- 0.0006674304399871926
- 0.001678899816379666
distortion_model: radtan
intrinsics:
- 380.95187095303424
- 380.3065956074995
- 324.0678433553536
- 225.9586983198407
resolution:
- 640
- 480
rostopic: /camera/infra2/image_rect_raw
timeshift_cam_imu: -0.030340187355085417

View File

@@ -0,0 +1,154 @@
<launch>
<!-- mono or stereo and what ros bag to play -->
<arg name="max_cameras" default="2" />
<arg name="use_stereo" default="true" />
<arg name="bag_start" default="40" />
<arg name="dataset" default="rotation" /> <!-- circle, circle_fast, circle_head, square, square_fast, square_head -->
<arg name="bag" default="/media/patrick/RPNG\ FLASH\ 3/KAIST_VIO/$(arg dataset).bag" />
<arg name="bag_gt" default="$(find ov_data)/kaist_vio/$(arg dataset).txt" /> <!-- $(find ov_data)/kaist_vio/$(arg dataset).txt -->
<!-- imu starting thresholds -->
<arg name="init_window_time" default="1.5" />
<arg name="init_imu_thresh" default="0.1" />
<!-- saving trajectory path and timing information -->
<arg name="dosave" default="false" />
<arg name="dotime" default="false" />
<arg name="path_est" default="/tmp/traj_estimate.txt" />
<arg name="path_time" default="/tmp/traj_timing.txt" />
<!-- MASTER NODE! -->
<!-- <node name="run_subscribe_msckf" pkg="ov_msckf" type="run_subscribe_msckf" output="screen" clear_params="true" required="true" launch-prefix="gdb -ex run &#45;&#45;args">-->
<node name="run_subscribe_msckf" pkg="ov_msckf" type="run_subscribe_msckf" output="screen" clear_params="true" required="true">
<!-- bag topics -->
<param name="topic_imu" type="string" value="/mavros/imu/data" />
<param name="topic_camera0" type="string" value="/camera/infra1/image_rect_raw" />
<param name="topic_camera1" type="string" value="/camera/infra2/image_rect_raw" />
<!-- world/filter parameters -->
<param name="use_fej" type="bool" value="true" />
<param name="use_imuavg" type="bool" value="true" />
<param name="use_rk4int" type="bool" value="true" />
<param name="use_stereo" type="bool" value="$(arg use_stereo)" />
<param name="calib_cam_extrinsics" type="bool" value="false" /> <!-- degenerate motion causes this to fail (could use a smaller prior...) -->
<param name="calib_cam_intrinsics" type="bool" value="true" />
<param name="calib_cam_timeoffset" type="bool" value="true" />
<param name="calib_camimu_dt" type="double" value="-0.029958533056650416" />
<param name="max_clones" type="int" value="11" />
<param name="max_slam" type="int" value="50" />
<param name="max_slam_in_update" type="int" value="25" /> <!-- 25 seems to work well -->
<param name="max_msckf_in_update" type="int" value="40" />
<param name="max_cameras" type="int" value="$(arg max_cameras)" />
<param name="dt_slam_delay" type="double" value="1" />
<param name="init_window_time" type="double" value="$(arg init_window_time)" />
<param name="init_imu_thresh" type="double" value="$(arg init_imu_thresh)" />
<param name="gravity_mag" type="double" value="9.79858" /> <!-- 9.79858 -->
<param name="feat_rep_msckf" type="string" value="GLOBAL_3D" />
<param name="feat_rep_slam" type="string" value="ANCHORED_FULL_INVERSE_DEPTH" />
<param name="feat_rep_aruco" type="string" value="ANCHORED_FULL_INVERSE_DEPTH" />
<!-- zero velocity update parameters -->
<!-- inertial and disparity based detection (inertial is key for dynamic environments) -->
<param name="try_zupt" type="bool" value="true" />
<param name="zupt_chi2_multipler" type="double" value="0" /> <!-- set to 0 for only disp-based -->
<param name="zupt_max_velocity" type="double" value="0.1" />
<param name="zupt_noise_multiplier" type="double" value="50" />
<param name="zupt_max_disparity" type="double" value="0.2" /> <!-- set to 0 for only imu-based -->
<param name="zupt_only_at_beginning" type="bool" value="false" />
<!-- timing statistics recording -->
<param name="record_timing_information" type="bool" value="$(arg dotime)" />
<param name="record_timing_filepath" type="string" value="$(arg path_time)" />
<!-- tracker/extractor properties -->
<param name="use_klt" type="bool" value="true" />
<param name="num_pts" type="int" value="250" />
<param name="fast_threshold" type="int" value="15" />
<param name="grid_x" type="int" value="5" />
<param name="grid_y" type="int" value="3" />
<param name="min_px_dist" type="int" value="8" />
<param name="knn_ratio" type="double" value="0.70" />
<param name="downsample_cameras" type="bool" value="false" />
<param name="multi_threading" type="bool" value="true" />
<param name="histogram_method" type="string" value="HISTOGRAM" /> <!-- NONE, HISTOGRAM, CLAHE -->
<param name="fi_max_baseline" type="double" value="120" />
<param name="fi_max_cond_number" type="double" value="90000" />
<!-- aruco tag/mapping properties -->
<param name="use_aruco" type="bool" value="false" />
<param name="num_aruco" type="int" value="1024" />
<param name="downsize_aruco" type="bool" value="true" />
<!-- sensor noise values / update -->
<param name="up_msckf_sigma_px" type="double" value="1" />
<param name="up_msckf_chi2_multipler" type="double" value="1" />
<param name="up_slam_sigma_px" type="double" value="1" />
<param name="up_slam_chi2_multipler" type="double" value="1" />
<param name="up_aruco_sigma_px" type="double" value="1" />
<param name="up_aruco_chi2_multipler" type="double" value="1" />
<param name="gyroscope_noise_density" type="double" value="0.01" />
<param name="gyroscope_random_walk" type="double" value="0.0001" />
<param name="accelerometer_noise_density" type="double" value="0.1" />
<param name="accelerometer_random_walk" type="double" value="0.001" />
<!-- camera intrinsics -->
<rosparam param="cam0_wh">[640, 480]</rosparam>
<rosparam param="cam1_wh">[640, 480]</rosparam>
<param name="cam0_is_fisheye" type="bool" value="false" />
<param name="cam1_is_fisheye" type="bool" value="false" />
<rosparam param="cam0_k">[391.117,394.147,319.642,224.178]</rosparam>
<rosparam param="cam0_d">[0.00675,-0.00562,0.03121,-0.00828]</rosparam>
<rosparam param="cam1_k">[380.95187095303424, 380.3065956074995, 324.0678433553536, 225.9586983198407]</rosparam>
<rosparam param="cam1_d">[0.007044055287844759, -0.010251485722185347, 0.0006674304399871926, 0.001678899816379666]</rosparam>
<!-- camera extrinsics -->
<rosparam param="T_C0toI">
[
-0.0403012399974094,0.026311325355147,0.998841090570831,0.138044476707325,
-0.998999875552468,-0.0204364996635247,-0.0397693113802049,0.0148148255449128,
0.0193664323204907,-0.999444877739417,0.027108627033059,-0.0349149420753215,
0,0,0,1
]
</rosparam>
<rosparam param="T_C1toI">
[
-0.0390575247256606,0.025035478432625,0.99892328763622,0.136237639761255,
-0.999049856889956,-0.0203233966663709,-0.0385531191487784,-0.0351047099443363,
0.0193363184309466,-0.999479956961414,0.0258054727130918,-0.0340043702351212,
0,0,0,1
]
</rosparam>
</node>
<!-- play the dataset -->
<node pkg="rosbag" type="play" name="rosbag" args="-d 1 -s $(arg bag_start) $(arg bag)" required="true"/>
<!-- path viz of aligned gt -->
<node name="live_align_trajectory" pkg="ov_eval" type="live_align_trajectory" output="log" clear_params="true">
<param name="alignment_type" type="str" value="posyaw" />
<param name="path_gt" type="str" value="$(arg bag_gt)" />
</node>
<!-- record the trajectory if enabled -->
<group if="$(arg dosave)">
<node name="recorder_estimate" pkg="ov_eval" type="pose_to_file" output="screen" required="true">
<param name="topic" type="str" value="/ov_msckf/poseimu" />
<param name="topic_type" type="str" value="PoseWithCovarianceStamped" />
<param name="output" type="str" value="$(arg path_est)" />
</node>
</group>
<!-- &lt;!&ndash; record the groundtruth if enabled &ndash;&gt;-->
<!-- <node name="gt" pkg="ov_eval" type="pose_to_file" output="screen" required="true">-->
<!-- <param name="topic" type="str" value="/pose_transformed" />-->
<!-- <param name="topic_type" type="str" value="PoseStamped" />-->
<!-- <param name="output" type="str" value="$(arg bag_gt)" />-->
<!-- </node>-->
</launch>

View File

@@ -0,0 +1,115 @@
%YAML:1.0 # need to specify the file type at the top!
verbosity: "INFO" # ALL, DEBUG, INFO, WARNING, ERROR, SILENT
use_fej: true # if we should use first-estimate Jacobians (enable for good consistency)
use_imuavg: true # for our discrete integration, if we should average sequential IMU measurements to "smooth" it
use_rk4int: true # if rk4 integration should be used (overrides imu averaging)
use_stereo: true # if we have more than 1 camera, if we should try to track stereo constraints
max_cameras: 2
calib_cam_extrinsics: true
calib_cam_intrinsics: true
calib_cam_timeoffset: true
max_clones: 11
max_slam: 50
max_slam_in_update: 25
max_msckf_in_update: 50
dt_slam_delay: 2
gravity_mag: 9.81
feat_rep_msckf: "GLOBAL_3D"
feat_rep_slam: "ANCHORED_MSCKF_INVERSE_DEPTH"
feat_rep_aruco: "ANCHORED_MSCKF_INVERSE_DEPTH"
# zero velocity update parameters we can use
# we support either IMU-based or disparity detection.
try_zupt: true
zupt_chi2_multipler: 0 # set to 0 for only disp-based
zupt_max_velocity: 0.1
zupt_noise_multiplier: 50
zupt_max_disparity: 0.5 # set to 0 for only imu-based
zupt_only_at_beginning: true
# ==================================================================
# ==================================================================
init_window_time: 2.0
init_imu_thresh: 1.2
init_max_disparity: 1.5
init_max_features: 25
init_dyn_mle_opt_calib: false
init_dyn_mle_max_iter: 50
init_dyn_mle_max_time: 0.05
init_dyn_mle_max_threads: 6
init_dyn_num_pose: 6
init_dyn_min_deg: 10.0
init_dyn_inflation_ori: 10
init_dyn_inflation_vel: 100
init_dyn_inflation_bg: 10
init_dyn_inflation_ba: 100
init_dyn_min_rec_cond: 1e-20
init_dyn_bias_g: [0.0, 0.0, 0.0]
init_dyn_bias_a: [0.0, 0.0, 0.0]
# ==================================================================
# ==================================================================
record_timing_information: false
record_timing_filepath: "/tmp/traj_timing.txt"
save_total_state: false
filepath_est: "/tmp/ov_estimate.txt"
filepath_std: "/tmp/ov_estimate_std.txt"
filepath_gt: "/tmp/ov_groundtruth.txt"
# ==================================================================
# ==================================================================
# our front-end feature tracking parameters
# we have a KLT and descriptor based (KLT is better implemented...)
use_klt: true
num_pts: 150
fast_threshold: 30
grid_x: 20
grid_y: 20
min_px_dist: 30
knn_ratio: 0.85
track_frequency: 21.0
downsample_cameras: false
multi_threading: true
histogram_method: "HISTOGRAM" # NONE, HISTOGRAM, CLAHE
# aruco tag tracker for the system
# DICT_6X6_1000 from https://chev.me/arucogen/
use_aruco: true
num_aruco: 1024
downsize_aruco: true
# ==================================================================
# ==================================================================
# camera noises and chi-squared threshold multipliers
up_msckf_sigma_px: 1
up_msckf_chi2_multipler: 1
up_slam_sigma_px: 1
up_slam_chi2_multipler: 1
up_aruco_sigma_px: 1
up_aruco_chi2_multipler: 8
# masks for our images
use_mask: false
# imu and camera spacial-temporal
# imu config should also have the correct noise values
relative_config_imu: "kalibr_imu_chain.yaml"
relative_config_imucam: "kalibr_imucam_chain.yaml"

View File

@@ -0,0 +1,16 @@
%YAML:1.0
imu0:
T_i_b:
- [1.0, 0.0, 0.0, 0.0]
- [0.0, 1.0, 0.0, 0.0]
- [0.0, 0.0, 1.0, 0.0]
- [0.0, 0.0, 0.0, 1.0]
accelerometer_noise_density: 2.0000e-3 # [ m / s^2 / sqrt(Hz) ] ( accel "white noise" )
accelerometer_random_walk: 3.0000e-3 # [ m / s^3 / sqrt(Hz) ]. ( accel bias diffusion )
gyroscope_noise_density: 1.6968e-04 # [ rad / s / sqrt(Hz) ] ( gyro "white noise" )
gyroscope_random_walk: 1.9393e-05 # [ rad / s^2 / sqrt(Hz) ] ( gyro bias diffusion )
model: calibrated
rostopic: /imu0
time_offset: 0.0
update_rate: 200.0

View File

@@ -0,0 +1,28 @@
%YAML:1.0
cam0:
T_imu_cam: #rotation from camera to IMU R_CtoI, position of camera in IMU p_CinI
- [-0.99997806, -0.00600501, 0.0027968, 0.03847796]
- [0.00601449, -0.99997615, 0.00339343, -0.0045601]
- [0.00277635, 0.00341018, 0.99999033, 0.00418038]
- [0.0, 0.0, 0.0, 1.0]
cam_overlaps: [1]
camera_model: pinhole
distortion_coeffs: [-0.292031518680717,0.08753155838496009,0.0009568457669165753,2.3463489813256424e-05]
distortion_model: radtan
intrinsics: [470.0502737897896,468.7574814232544,405.80799445368035,245.2879780490104] #fu, fv, cu, cv
resolution: [752, 480]
rostopic: /cam0/image_raw
cam1:
T_imu_cam: #rotation from camera to IMU R_CtoI, position of camera in IMU p_CinI
- [-0.99984421, -0.00389232, 0.01721638, -0.07075565]
- [0.00394176, -0.9999882, 0.0028389, -0.00418534]
- [0.01720512, 0.00290632, 0.99984776, 0.00388861]
- [0.0, 0.0, 0.0, 1.0]
cam_overlaps: [0]
camera_model: pinhole
distortion_coeffs: [-0.2847596229068525,0.07908861097045865,0.0011071433505703875,0.0005094909873658998]
distortion_model: radtan
intrinsics: [472.98384780424163,471.9917417027018,382.1928744696835,268.2536666120421] #fu, fv, cu, cv
resolution: [752, 480]
rostopic: /cam1/image_raw

View File

@@ -0,0 +1,123 @@
%YAML:1.0 # need to specify the file type at the top!
verbosity: "INFO" # ALL, DEBUG, INFO, WARNING, ERROR, SILENT
use_fej: true # if we should use first-estimate Jacobians (enable for good consistency)
use_imuavg: true # for our discrete integration, if we should average sequential IMU measurements to "smooth" it
use_rk4int: true # if rk4 integration should be used (overrides imu averaging)
use_stereo: true # if we have more than 1 camera, if we should try to track stereo constraints
max_cameras: 2
calib_cam_extrinsics: false
calib_cam_intrinsics: true
calib_cam_timeoffset: true
max_clones: 12
max_slam: 50
max_slam_in_update: 25
max_msckf_in_update: 50
dt_slam_delay: 1
gravity_mag: 9.80114
feat_rep_msckf: "ANCHORED_MSCKF_INVERSE_DEPTH"
feat_rep_slam: "ANCHORED_MSCKF_INVERSE_DEPTH"
feat_rep_aruco: "ANCHORED_MSCKF_INVERSE_DEPTH"
# zero velocity update parameters we can use
# we support either IMU-based or disparity detection.
try_zupt: true
zupt_chi2_multipler: 1 # set to 0 for only disp-based
zupt_max_velocity: 0.5
zupt_noise_multiplier: 10
zupt_max_disparity: 0.4 # set to 0 for only imu-based
zupt_only_at_beginning: false
# ==================================================================
# ==================================================================
#init_window_time: 1.0
init_window_time: 2.0
init_imu_thresh: 0.5
init_max_disparity: 1.5
init_max_features: 25
init_dyn_mle_opt_calib: false
init_dyn_mle_max_iter: 50
init_dyn_mle_max_time: 0.05
init_dyn_mle_max_threads: 6
init_dyn_num_pose: 6
init_dyn_min_deg: 0.0 # traj is mostly straight line motion
init_dyn_inflation_ori: 10
init_dyn_inflation_vel: 100
init_dyn_inflation_bg: 10
init_dyn_inflation_ba: 100
init_dyn_min_rec_cond: 1e-20 # traj is mostly straight line motion
init_dyn_bias_g: [0.0, 0.0, 0.0]
init_dyn_bias_a: [0.0, 0.0, 0.0]
# ==================================================================
# ==================================================================
record_timing_information: false
record_timing_filepath: "/tmp/traj_timing.txt"
save_total_state: false
filepath_est: "/tmp/ov_estimate.txt"
filepath_std: "/tmp/ov_estimate_std.txt"
filepath_gt: "/tmp/ov_groundtruth.txt"
# ==================================================================
# ==================================================================
# our front-end feature tracking parameters
# we have a KLT and descriptor based (KLT is better implemented...)
use_klt: true
num_pts: 200
fast_threshold: 20
grid_x: 20
grid_y: 20
min_px_dist: 20
knn_ratio: 0.65
track_frequency: 21.0
downsample_cameras: false
multi_threading: true
histogram_method: "CLAHE" # NONE, HISTOGRAM, CLAHE
fi_max_dist: 150
fi_max_baseline: 200
fi_max_cond_number: 20000
fi_triangulate_1d: false
# aruco tag tracker for the system
# DICT_6X6_1000 from https://chev.me/arucogen/
use_aruco: false
num_aruco: 1024
downsize_aruco: true
# ==================================================================
# ==================================================================
# camera noises and chi-squared threshold multipliers
up_msckf_sigma_px: 2
up_msckf_chi2_multipler: 1
up_slam_sigma_px: 2
up_slam_chi2_multipler: 1
up_aruco_sigma_px: 1
up_aruco_chi2_multipler: 1
# masks for our images
use_mask: true
mask0: "../../ov_data/masks/ironsides0.png" #relative to current file
mask1: "../../ov_data/masks/ironsides1.png" #relative to current file
# imu and camera spacial-temporal
# imu config should also have the correct noise values
relative_config_imu: "kalibr_imu_chain.yaml"
relative_config_imucam: "kalibr_imucam_chain.yaml"

View File

@@ -0,0 +1,16 @@
%YAML:1.0
imu0:
T_i_b:
- [1.0, 0.0, 0.0, 0.0]
- [0.0, 1.0, 0.0, 0.0]
- [0.0, 0.0, 1.0, 0.0]
- [0.0, 0.0, 0.0, 1.0]
accelerometer_noise_density: 0.0027052931930236323 # [ m / s^2 / sqrt(Hz) ] ( accel "white noise" )
accelerometer_random_walk: 1.3054568211204843e-04 # [ m / s^3 / sqrt(Hz) ]. ( accel bias diffusion )
gyroscope_noise_density: 1.1186830841306218e-04 # [ rad / s / sqrt(Hz) ] ( gyro "white noise" )
gyroscope_random_walk: 8.997530210630026e-07 # [ rad / s^2 / sqrt(Hz) ] ( gyro bias diffusion )
model: calibrated
rostopic: /imu0
time_offset: 0.0
update_rate: 200.0

View File

@@ -0,0 +1,31 @@
%YAML:1.0
cam0:
T_imu_cam: #rotation from camera to IMU R_CtoI, position of camera in IMU p_CinI
- [0.99992127, -0.0078594, 0.0097819, -0.05845078]
- [0.00784873, 0.99996856, 0.00112822, -0.00728728]
- [-0.00979046, -0.00105136, 0.99995152, 0.0623674]
- [0.0, 0.0, 0.0, 1.0]
cam_overlaps: [1]
camera_model: pinhole
distortion_coeffs: [-0.03149689493503132, 0.07696336480701078, -0.06608854732019281, 0.019667561645120218]
distortion_model: equidistant
intrinsics: [276.4850207717928, 278.0310503180516, 314.5836189313042, 240.16980920673427] #fu, fv, cu, cv
resolution: [640, 480]
rostopic: /cam0/image_raw
timeshift_cam_imu: 0.00621
cam1:
T_imu_cam: #rotation from camera to IMU R_CtoI, position of camera in IMU p_CinI
- [0.99995933, 0.00327998, 0.00840069, 0.00793529]
- [-0.00328309, 0.99999455, 0.000356, -0.00716413]
- [-0.00839948, -0.00038357, 0.99996465, 0.06245421]
- [0.0, 0.0, 0.0, 1.0]
cam_overlaps: [0]
camera_model: pinhole
distortion_coeffs: [-0.02998039058251529, 0.07202819722706337, -0.06178718820631651, 0.017655045017816777]
distortion_model: equidistant
intrinsics: [277.960323846132, 279.4348778432714, 322.404194404853, 236.72685252691352] #fu, fv, cu, cv
resolution: [640, 480]
rostopic: /cam1/image_raw
timeshift_cam_imu: 0.00621

View File

@@ -0,0 +1,128 @@
%YAML:1.0 # need to specify the file type at the top!
verbosity: "INFO" # ALL, DEBUG, INFO, WARNING, ERROR, SILENT
use_fej: true # if we should use first-estimate Jacobians (enable for good consistency)
use_imuavg: true # for our discrete integration, if we should average sequential IMU measurements to "smooth" it
use_rk4int: true # if rk4 integration should be used (overrides imu averaging)
use_stereo: true # if we have more than 1 camera, if we should try to track stereo constraints
max_cameras: 2
calib_cam_extrinsics: true
calib_cam_intrinsics: true
calib_cam_timeoffset: true
max_clones: 11
max_slam: 50
max_slam_in_update: 25
max_msckf_in_update: 10
dt_slam_delay: 2
gravity_mag: 9.81
feat_rep_msckf: "GLOBAL_3D"
feat_rep_slam: "GLOBAL_3D"
feat_rep_aruco: "GLOBAL_3D"
# zero velocity update parameters we can use
# we support either IMU-based or disparity detection.
try_zupt: false
zupt_chi2_multipler: 0 # set to 0 for only disp-based
zupt_max_velocity: 0.1
zupt_noise_multiplier: 50
zupt_max_disparity: 0.5 # set to 0 for only imu-based
zupt_only_at_beginning: true
# ==================================================================
# ==================================================================
init_window_time: 2.0
init_imu_thresh: 1.0
init_max_disparity: 1.5
init_max_features: 15
init_dyn_mle_opt_calib: false # if we should optimize calibration during intialization (not recommended)
init_dyn_mle_max_iter: 50 # how many iterations the MLE refinement should use (zero to skip the MLE)
init_dyn_mle_max_time: 0.5 # how many seconds the MLE should be completed in
init_dyn_mle_max_threads: 6 # how many threads the MLE should use
init_dyn_num_pose: 6 # number of poses to use within our window time (evenly spaced)
init_dyn_min_deg: 15.0 # orientation change needed to try to init
init_dyn_inflation_ori: 10 # what to inflate the recovered q_GtoI covariance by
init_dyn_inflation_vel: 100 # what to inflate the recovered v_IinG covariance by
init_dyn_inflation_bg: 10 # what to inflate the recovered bias_g covariance by
init_dyn_inflation_ba: 100 # what to inflate the recovered bias_a covariance by
init_dyn_min_rec_cond: 1e-15 # reciprocal condition number thresh for info inversion
init_dyn_bias_g: [0.0, 0.0, 0.0] # initial gyroscope bias guess
init_dyn_bias_a: [0.0, 0.0, 0.0] # initial accelerometer bias guess
# ==================================================================
# ==================================================================
record_timing_information: false
record_timing_filepath: "/tmp/traj_timing.txt"
save_total_state: false
filepath_est: "/tmp/ov_estimate.txt"
filepath_std: "/tmp/ov_estimate_std.txt"
filepath_gt: "/tmp/ov_groundtruth.txt"
# ==================================================================
# ==================================================================
# our front-end feature tracking parameters
# we have a KLT and descriptor based (KLT is better implemented...)
use_klt: true
num_pts: 250
fast_threshold: 15
grid_x: 20
grid_y: 20
min_px_dist: 15
knn_ratio: 0.70
track_frequency: 21.0
downsample_cameras: false
multi_threading: false
histogram_method: "HISTOGRAM" # NONE, HISTOGRAM, CLAHE
# aruco tag tracker for the system
# DICT_6X6_1000 from https://chev.me/arucogen/
use_aruco: false
num_aruco: 1024
downsize_aruco: true
# ==================================================================
# ==================================================================
# camera noises and chi-squared threshold multipliers
up_msckf_sigma_px: 1
up_msckf_chi2_multipler: 1
up_slam_sigma_px: 1
up_slam_chi2_multipler: 1
up_aruco_sigma_px: 1
up_aruco_chi2_multipler: 1
# masks for our images
use_mask: false
# imu and camera spacial-temporal
# imu config should also have the correct noise values
relative_config_imu: "kalibr_imu_chain.yaml"
relative_config_imucam: "kalibr_imucam_chain.yaml"
# ==================================================================
# ==================================================================
sim_seed_state_init: 0
sim_seed_preturb: 0
sim_seed_measurements: 0
sim_do_perturbation: false
sim_traj_path: "src/open_vins/ov_data/sim/tum_corridor1_512_16_okvis.txt"
sim_distance_threshold: 1.2
sim_freq_cam: 10
sim_freq_imu: 400
sim_min_feature_gen_dist: 5.0
sim_max_feature_gen_dist: 7.0

View File

@@ -0,0 +1,16 @@
%YAML:1.0
imu0:
T_i_b:
- [1.0, 0.0, 0.0, 0.0]
- [0.0, 1.0, 0.0, 0.0]
- [0.0, 0.0, 1.0, 0.0]
- [0.0, 0.0, 0.0, 1.0]
accelerometer_noise_density: 2.0000e-3 # [ m / s^2 / sqrt(Hz) ] ( accel "white noise" )
accelerometer_random_walk: 3.0000e-3 # [ m / s^3 / sqrt(Hz) ]. ( accel bias diffusion )
gyroscope_noise_density: 1.6968e-04 # [ rad / s / sqrt(Hz) ] ( gyro "white noise" )
gyroscope_random_walk: 1.9393e-05 # [ rad / s^2 / sqrt(Hz) ] ( gyro bias diffusion )
model: calibrated
rostopic: /imu0
time_offset: 0.0
update_rate: 200.0

View File

@@ -0,0 +1,28 @@
%YAML:1.0
cam0:
T_imu_cam: #rotation from camera to IMU R_CtoI, position of camera in IMU p_CinI
- [0.0148655429818, -0.999880929698, 0.00414029679422, -0.0216401454975]
- [0.999557249008, 0.0149672133247, 0.025715529948, -0.064676986768]
- [-0.0257744366974, 0.00375618835797, 0.999660727178, 0.00981073058949]
- [0.0, 0.0, 0.0, 1.0]
cam_overlaps: [1]
camera_model: pinhole
distortion_coeffs: [-0.28340811, 0.07395907, 0.00019359, 1.76187114e-05]
distortion_model: radtan
intrinsics: [458.654, 457.296, 367.215, 248.375] #fu, fv, cu, cv
resolution: [752, 480]
rostopic: /cam0/image_raw
cam1:
T_imu_cam: #rotation from camera to IMU R_CtoI, position of camera in IMU p_CinI
- [0.0125552670891, -0.999755099723, 0.0182237714554, -0.0198435579556]
- [0.999598781151, 0.0130119051815, 0.0251588363115, 0.0453689425024]
- [-0.0253898008918, 0.0179005838253, 0.999517347078, 0.00786212447038]
- [0.0, 0.0, 0.0, 1.0]
cam_overlaps: [0]
camera_model: pinhole
distortion_coeffs: [-0.28368365,0.07451284,-0.00010473,-3.55590700e-05]
distortion_model: radtan
intrinsics: [457.587, 456.134, 379.999, 255.238] #fu, fv, cu, cv
resolution: [752, 480]
rostopic: /cam1/image_raw

View File

@@ -0,0 +1,117 @@
%YAML:1.0 # need to specify the file type at the top!
verbosity: "INFO" # ALL, DEBUG, INFO, WARNING, ERROR, SILENT
use_fej: true # if we should use first-estimate Jacobians (enable for good consistency)
use_imuavg: true # for our discrete integration, if we should average sequential IMU measurements to "smooth" it
use_rk4int: true # if rk4 integration should be used (overrides imu averaging)
use_stereo: true # if we have more than 1 camera, if we should try to track stereo constraints
max_cameras: 2
calib_cam_extrinsics: true
calib_cam_intrinsics: true
calib_cam_timeoffset: true
max_clones: 11
max_slam: 50
max_slam_in_update: 25
max_msckf_in_update: 40
dt_slam_delay: 2
gravity_mag: 9.80766
feat_rep_msckf: "GLOBAL_3D"
feat_rep_slam: "ANCHORED_MSCKF_INVERSE_DEPTH"
feat_rep_aruco: "ANCHORED_MSCKF_INVERSE_DEPTH"
# zero velocity update parameters we can use
# we support either IMU-based or disparity detection.
try_zupt: false
zupt_chi2_multipler: 0 # set to 0 for only disp-based
zupt_max_velocity: 0.1
zupt_noise_multiplier: 50
zupt_max_disparity: 2.0 # set to 0 for only imu-based
zupt_only_at_beginning: true
# ==================================================================
# ==================================================================
init_window_time: 2.0
init_imu_thresh: 0.45 # room1-5:0.45, room6:0.25
init_max_disparity: 5.0
init_max_features: 25
init_dyn_mle_opt_calib: false
init_dyn_mle_max_iter: 50
init_dyn_mle_max_time: 0.05
init_dyn_mle_max_threads: 6
init_dyn_num_pose: 6
init_dyn_min_deg: 20.0
init_dyn_inflation_ori: 10
init_dyn_inflation_vel: 100
init_dyn_inflation_bg: 10
init_dyn_inflation_ba: 100
init_dyn_min_rec_cond: 1e-20
init_dyn_bias_g: [0.0, 0.0, 0.0]
init_dyn_bias_a: [0.0, 0.0, 0.0]
# ==================================================================
# ==================================================================
record_timing_information: false
record_timing_filepath: "/tmp/traj_timing.txt"
save_total_state: false
filepath_est: "/tmp/ov_estimate.txt"
filepath_std: "/tmp/ov_estimate_std.txt"
filepath_gt: "/tmp/ov_groundtruth.txt"
# ==================================================================
# ==================================================================
# our front-end feature tracking parameters
# we have a KLT and descriptor based (KLT is better implemented...)
use_klt: true
num_pts: 150
fast_threshold: 20
grid_x: 20
grid_y: 20
min_px_dist: 15
knn_ratio: 0.65
track_frequency: 21.0
downsample_cameras: false
multi_threading: true
histogram_method: "HISTOGRAM" # NONE, HISTOGRAM, CLAHE
# aruco tag tracker for the system
# DICT_6X6_1000 from https://chev.me/arucogen/
use_aruco: false
num_aruco: 1024
downsize_aruco: true
# ==================================================================
# ==================================================================
# camera noises and chi-squared threshold multipliers
up_msckf_sigma_px: 1
up_msckf_chi2_multipler: 1
up_slam_sigma_px: 1
up_slam_chi2_multipler: 1
up_aruco_sigma_px: 1
up_aruco_chi2_multipler: 1
# masks for our images
use_mask: true
mask0: "../../ov_data/masks/tumvi0.png" #relative to current file
mask1: "../../ov_data/masks/tumvi1.png" #relative to current file
# imu and camera spacial-temporal
# imu config should also have the correct noise values
relative_config_imu: "kalibr_imu_chain.yaml"
relative_config_imucam: "kalibr_imucam_chain.yaml"

View File

@@ -0,0 +1,16 @@
%YAML:1.0 # need to specify the file type at the top!
imu0:
T_i_b:
- [1.0, 0.0, 0.0, 0.0]
- [0.0, 1.0, 0.0, 0.0]
- [0.0, 0.0, 1.0, 0.0]
- [0.0, 0.0, 0.0, 1.0]
accelerometer_noise_density: 0.0028 # [ m / s^2 / sqrt(Hz) ] ( accel "white noise" )
accelerometer_random_walk: 0.00086 # [ m / s^3 / sqrt(Hz) ]. ( accel bias diffusion )
gyroscope_noise_density: 0.00016 # [ rad / s / sqrt(Hz) ] ( gyro "white noise" )
gyroscope_random_walk: 2.2e-05 # [ rad / s^2 / sqrt(Hz) ] ( gyro bias diffusion )
model: calibrated
rostopic: /imu0
time_offset: 0.0
update_rate: 200.0

View File

@@ -0,0 +1,30 @@
%YAML:1.0 # need to specify the file type at the top!
cam0:
T_cam_imu: #rotation from IMU to camera R_ItoC, position of IMU in camera p_IinC
- [-0.9995250378696743, 0.029615343885863205, -0.008522328211654736, 0.04727988224914392]
- [0.0075019185074052044, -0.03439736061393144, -0.9993800792498829, -0.047443232143367084]
- [-0.02989013031643309, -0.998969345370175, 0.03415885127385616, -0.0681999605066297]
- [0.0, 0.0, 0.0, 1.0]
cam_overlaps: [1]
camera_model: pinhole
distortion_coeffs: [0.0034823894022493434, 0.0007150348452162257, -0.0020532361418706202, 0.00020293673591811182]
distortion_model: equidistant
intrinsics: [190.97847715128717, 190.9733070521226, 254.93170605935475, 256.8974428996504]
resolution: [512, 512]
rostopic: /cam0/image_raw
cam1:
T_cam_imu: #rotation from IMU to camera R_ItoC, position of IMU in camera p_IinC
- [-0.9995110484978581, 0.030299116376600627, -0.0077218830287333565, -0.053697434688869734]
- [0.008104079263822521, 0.012511643720192351, -0.9998888851620987, -0.046131737923635924]
- [-0.030199136245891378, -0.9994625667418545, -0.012751072573940885, -0.07149261284195751]
- [0.0, 0.0, 0.0, 1.0]
cam_overlaps: [0]
camera_model: pinhole
distortion_coeffs: [0.0034003170790442797, 0.001766278153469831, -0.00266312569781606, 0.0003299517423931039]
distortion_model: equidistant
intrinsics: [190.44236969414825, 190.4344384721956, 252.59949716835982, 254.91723064636983]
resolution: [512, 512]
rostopic: /cam1/image_raw

View File

@@ -0,0 +1,116 @@
%YAML:1.0 # need to specify the file type at the top!
verbosity: "INFO" # ALL, DEBUG, INFO, WARNING, ERROR, SILENT
use_fej: true # if we should use first-estimate Jacobians (enable for good consistency)
use_imuavg: true # for our discrete integration, if we should average sequential IMU measurements to "smooth" it
use_rk4int: true # if rk4 integration should be used (overrides imu averaging)
use_stereo: true # if we have more than 1 camera, if we should try to track stereo constraints
max_cameras: 2
calib_cam_extrinsics: false
calib_cam_intrinsics: true
calib_cam_timeoffset: true
max_clones: 11
max_slam: 50
max_slam_in_update: 25
max_msckf_in_update: 30
dt_slam_delay: 2
gravity_mag: 9.81
feat_rep_msckf: "GLOBAL_3D"
feat_rep_slam: "ANCHORED_MSCKF_INVERSE_DEPTH"
feat_rep_aruco: "ANCHORED_MSCKF_INVERSE_DEPTH"
# zero velocity update parameters we can use
# we support either IMU-based or disparity detection.
try_zupt: true
zupt_chi2_multipler: 0 # set to 0 for only disp-based
zupt_max_velocity: 0.5
zupt_noise_multiplier: 20
zupt_max_disparity: 0.5 # set to 0 for only imu-based
zupt_only_at_beginning: false
# ==================================================================
# ==================================================================
init_window_time: 2.0
init_imu_thresh: 0.0
init_max_disparity: 1.5
init_max_features: 25
init_dyn_mle_opt_calib: false
init_dyn_mle_max_iter: 50
init_dyn_mle_max_time: 0.05
init_dyn_mle_max_threads: 6
init_dyn_num_pose: 6
init_dyn_min_deg: 20.0
init_dyn_inflation_ori: 10
init_dyn_inflation_vel: 100
init_dyn_inflation_bg: 10
init_dyn_inflation_ba: 100
init_dyn_min_rec_cond: 1e-20
init_dyn_bias_g: [0.0, 0.0, 0.0]
init_dyn_bias_a: [0.0, 0.0, 0.0]
# ==================================================================
# ==================================================================
record_timing_information: false
record_timing_filepath: "/tmp/traj_timing.txt"
save_total_state: false
filepath_est: "/tmp/ov_estimate.txt"
filepath_std: "/tmp/ov_estimate_std.txt"
filepath_gt: "/tmp/ov_groundtruth.txt"
# ==================================================================
# ==================================================================
# our front-end feature tracking parameters
# we have a KLT and descriptor based (KLT is better implemented...)
use_klt: true
num_pts: 100
fast_threshold: 65
grid_x: 20
grid_y: 20
min_px_dist: 15
knn_ratio: 0.70
track_frequency: 31.0
downsample_cameras: false
multi_threading: true
histogram_method: "HISTOGRAM" # NONE, HISTOGRAM, CLAHE
# aruco tag tracker for the system
# DICT_6X6_1000 from https://chev.me/arucogen/
use_aruco: false
num_aruco: 1024
downsize_aruco: true
# ==================================================================
# ==================================================================
# camera noises and chi-squared threshold multipliers
up_msckf_sigma_px: 2
up_msckf_chi2_multipler: 1
up_slam_sigma_px: 2
up_slam_chi2_multipler: 1
up_aruco_sigma_px: 1
up_aruco_chi2_multipler: 1
# masks for our images
use_mask: false
# imu and camera spacial-temporal
# imu config should also have the correct noise values
relative_config_imu: "kalibr_imu_chain.yaml"
relative_config_imucam: "kalibr_imucam_chain.yaml"

View File

@@ -0,0 +1,20 @@
%YAML:1.0 # need to specify the file type at the top!
imu0:
T_i_b:
- [1.0, 0.0, 0.0, 0.0]
- [0.0, 1.0, 0.0, 0.0]
- [0.0, 0.0, 1.0, 0.0]
- [0.0, 0.0, 0.0, 1.0]
accelerometer_noise_density: 2.0000e-3 # [ m / s^2 / sqrt(Hz) ] ( accel "white noise" )
accelerometer_random_walk: 3.0000e-3 # [ m / s^3 / sqrt(Hz) ]. ( accel bias diffusion )
gyroscope_noise_density: 1.6968e-04 # [ rad / s / sqrt(Hz) ] ( gyro "white noise" )
gyroscope_random_walk: 1.9393e-05 # [ rad / s^2 / sqrt(Hz) ] ( gyro bias diffusion )
# accelerometer_noise_density: 0.1 # [ m / s^2 / sqrt(Hz) ] ( accel "white noise" )
# accelerometer_random_walk: 0.002 # [ m / s^3 / sqrt(Hz) ]. ( accel bias diffusion )
# gyroscope_noise_density: 0.05 # [ rad / s / sqrt(Hz) ] ( gyro "white noise" )
# gyroscope_random_walk: 4.0e-05 # [ rad / s^2 / sqrt(Hz) ] ( gyro bias diffusion )
model: calibrated
rostopic: /snappy_imu
time_offset: 0.0
update_rate: 200.0

View File

@@ -0,0 +1,37 @@
%YAML:1.0 # need to specify the file type at the top!
cam0:
T_cam_imu:
- [-0.028228787368606456, -0.999601488301944, 1.2175294828553618e-05, 0.02172388268966517]
- [0.014401251861751119, -0.00041887083271471837, -0.9998962088597202, -6.605455433829172e-05]
- [0.999497743623523, -0.028225682131089447, 0.014407337010089172, -0.00048817563004522853]
- [0.0, 0.0, 0.0, 1.0]
cam_overlaps: [1]
camera_model: pinhole
distortion_coeffs: [-0.013721808247486035, 0.020727425669427896, -0.012786476702685545,
0.0025242267320687625]
distortion_model: equidistant
intrinsics: [278.66723066149086, 278.48991409740296, 319.75221200593535, 241.96858910358173]
resolution: [640, 480]
rostopic: /snappy_cam/stereo_l
timeshift_cam_imu: -0.016684572091862235
cam1:
T_cam_imu:
- [-0.011823057800830705, -0.9998701444077991, -0.010950325390841398, -0.057904961033265645]
- [0.011552991631909482, 0.01081376681432078, -0.9998747875767439, 0.00043766687615362694]
- [0.9998633625093938, -0.011948086424720228, 0.011423639621249038, -0.00039944945687402214]
- [0.0, 0.0, 0.0, 1.0]
T_cn_cnm1:
- [0.9998053017199788, 0.011197738450911484, 0.01624713224548414, -0.07961594300469246]
- [-0.011147758116324, 0.9999328574031386, -0.0031635699090552883, 0.0007443452072558462]
- [-0.016281466199246444, 0.00298183486707869, 0.9998630018753686, 0.0004425529195268342]
- [0.0, 0.0, 0.0, 1.0]
cam_overlaps: [0]
camera_model: pinhole
distortion_coeffs: [-0.008456929295619607, 0.011407590938612062, -0.006951788325762078,
0.0015368127092821786]
distortion_model: equidistant
intrinsics: [277.61640629770613, 277.63749695723294, 314.8944703346039, 236.04310050462587]
resolution: [640, 480]
rostopic: /snappy_cam/stereo_r
timeshift_cam_imu: -0.016591431247074982

View File

@@ -0,0 +1,116 @@
%YAML:1.0 # need to specify the file type at the top!
verbosity: "INFO" # ALL, DEBUG, INFO, WARNING, ERROR, SILENT
use_fej: true # if we should use first-estimate Jacobians (enable for good consistency)
use_imuavg: true # for our discrete integration, if we should average sequential IMU measurements to "smooth" it
use_rk4int: true # if rk4 integration should be used (overrides imu averaging)
use_stereo: true # if we have more than 1 camera, if we should try to track stereo constraints
max_cameras: 2
calib_cam_extrinsics: false
calib_cam_intrinsics: true
calib_cam_timeoffset: true
max_clones: 11
max_slam: 50
max_slam_in_update: 25
max_msckf_in_update: 30
dt_slam_delay: 2
gravity_mag: 9.81
feat_rep_msckf: "GLOBAL_3D"
feat_rep_slam: "ANCHORED_MSCKF_INVERSE_DEPTH"
feat_rep_aruco: "ANCHORED_MSCKF_INVERSE_DEPTH"
# zero velocity update parameters we can use
# we support either IMU-based or disparity detection.
try_zupt: true
zupt_chi2_multipler: 0 # set to 0 for only disp-based
zupt_max_velocity: 0.5
zupt_noise_multiplier: 20
zupt_max_disparity: 0.5 # set to 0 for only imu-based
zupt_only_at_beginning: false
# ==================================================================
# ==================================================================
init_window_time: 2.0
init_imu_thresh: 0.0
init_max_disparity: 1.5
init_max_features: 25
init_dyn_mle_opt_calib: false
init_dyn_mle_max_iter: 50
init_dyn_mle_max_time: 0.05
init_dyn_mle_max_threads: 6
init_dyn_num_pose: 6
init_dyn_min_deg: 20.0
init_dyn_inflation_ori: 10
init_dyn_inflation_vel: 100
init_dyn_inflation_bg: 10
init_dyn_inflation_ba: 100
init_dyn_min_rec_cond: 1e-20
init_dyn_bias_g: [0.0, 0.0, 0.0]
init_dyn_bias_a: [0.0, 0.0, 0.0]
# ==================================================================
# ==================================================================
record_timing_information: false
record_timing_filepath: "/tmp/traj_timing.txt"
save_total_state: false
filepath_est: "/tmp/ov_estimate.txt"
filepath_std: "/tmp/ov_estimate_std.txt"
filepath_gt: "/tmp/ov_groundtruth.txt"
# ==================================================================
# ==================================================================
# our front-end feature tracking parameters
# we have a KLT and descriptor based (KLT is better implemented...)
use_klt: true
num_pts: 100
fast_threshold: 65
grid_x: 20
grid_y: 20
min_px_dist: 15
knn_ratio: 0.70
track_frequency: 31.0
downsample_cameras: false
multi_threading: true
histogram_method: "HISTOGRAM" # NONE, HISTOGRAM, CLAHE
# aruco tag tracker for the system
# DICT_6X6_1000 from https://chev.me/arucogen/
use_aruco: false
num_aruco: 1024
downsize_aruco: true
# ==================================================================
# ==================================================================
# camera noises and chi-squared threshold multipliers
up_msckf_sigma_px: 2
up_msckf_chi2_multipler: 1
up_slam_sigma_px: 2
up_slam_chi2_multipler: 1
up_aruco_sigma_px: 1
up_aruco_chi2_multipler: 1
# masks for our images
use_mask: false
# imu and camera spacial-temporal
# imu config should also have the correct noise values
relative_config_imu: "kalibr_imu_chain.yaml"
relative_config_imucam: "kalibr_imucam_chain.yaml"

View File

@@ -0,0 +1,20 @@
%YAML:1.0 # need to specify the file type at the top!
imu0:
T_i_b:
- [1.0, 0.0, 0.0, 0.0]
- [0.0, 1.0, 0.0, 0.0]
- [0.0, 0.0, 1.0, 0.0]
- [0.0, 0.0, 0.0, 1.0]
accelerometer_noise_density: 2.0000e-3 # [ m / s^2 / sqrt(Hz) ] ( accel "white noise" )
accelerometer_random_walk: 3.0000e-3 # [ m / s^3 / sqrt(Hz) ]. ( accel bias diffusion )
gyroscope_noise_density: 1.6968e-04 # [ rad / s / sqrt(Hz) ] ( gyro "white noise" )
gyroscope_random_walk: 1.9393e-05 # [ rad / s^2 / sqrt(Hz) ] ( gyro bias diffusion )
# accelerometer_noise_density: 0.1 # [ m / s^2 / sqrt(Hz) ] ( accel "white noise" )
# accelerometer_random_walk: 0.002 # [ m / s^3 / sqrt(Hz) ]. ( accel bias diffusion )
# gyroscope_noise_density: 0.05 # [ rad / s / sqrt(Hz) ] ( gyro "white noise" )
# gyroscope_random_walk: 4.0e-05 # [ rad / s^2 / sqrt(Hz) ] ( gyro bias diffusion )
model: calibrated
rostopic: /snappy_imu
time_offset: 0.0
update_rate: 200.0

View File

@@ -0,0 +1,37 @@
%YAML:1.0 # need to specify the file type at the top!
cam0:
T_cam_imu:
- [-0.027256691772188965, -0.9996260641688061, 0.0021919370477445077, 0.02422852666805565]
- [-0.7139206120417471, 0.017931469899155242, -0.6999970157716363, 0.008974432843748055]
- [0.6996959571525168, -0.020644471939022302, -0.714142404092339, -0.000638971731537894]
- [0.0, 0.0, 0.0, 1.0]
cam_overlaps: [1]
camera_model: pinhole
distortion_coeffs: [-6.545154718304953e-06, -0.010379525898159981, 0.014935312423953146,
-0.005639061406567785]
distortion_model: equidistant
intrinsics: [275.46015578667294, 274.9948095922592, 315.958384100568, 242.7123497822731]
resolution: [640, 480]
rostopic: /snappy_cam/stereo_l
timeshift_cam_imu: -0.01484888826656275
cam1:
T_cam_imu:
- [-0.01749277298389329, -0.9997914625864506, -0.010537278233961556, -0.05569997768397372]
- [-0.7090991957246053, 0.019835234209851005, -0.7048296915614142, 0.00884826894411553]
- [0.7048917175822481, -0.004857450265962848, -0.7092982952614942, -0.0019997713120269607]
- [0.0, 0.0, 0.0, 1.0]
T_cn_cnm1:
- [0.9998713028057842, 0.0019367839962957476, 0.015925661468614183, -0.07993259169905036]
- [-0.0020450612045710957, 0.9999748874354272, 0.0067854420026733295, -7.205400304978925e-05]
- [-0.01591211959893364, -0.006817137687752675, 0.9998501543149417, -0.0009141881692325364]
- [0.0, 0.0, 0.0, 1.0]
cam_overlaps: [0]
camera_model: pinhole
distortion_coeffs: [-0.012138050918285051, 0.02244029339184358, -0.013753165428754275,
0.002725090438517269]
distortion_model: equidistant
intrinsics: [274.4628309070672, 273.9261674470783, 315.93654481793794, 235.779167375461]
resolution: [640, 480]
rostopic: /snappy_cam/stereo_r
timeshift_cam_imu: -0.014950736007814259

View File

@@ -0,0 +1,118 @@
%YAML:1.0 # need to specify the file type at the top!
verbosity: "INFO" # ALL, DEBUG, INFO, WARNING, ERROR, SILENT
use_fej: true # if we should use first-estimate Jacobians (enable for good consistency)
use_imuavg: true # for our discrete integration, if we should average sequential IMU measurements to "smooth" it
use_rk4int: true # if rk4 integration should be used (overrides imu averaging)
use_stereo: true # if we have more than 1 camera, if we should try to track stereo constraints
max_cameras: 2
calib_cam_extrinsics: false
calib_cam_intrinsics: true
calib_cam_timeoffset: true
max_clones: 11
max_slam: 50
max_slam_in_update: 25
max_msckf_in_update: 30
dt_slam_delay: 2
gravity_mag: 9.81
feat_rep_msckf: "GLOBAL_3D"
feat_rep_slam: "ANCHORED_MSCKF_INVERSE_DEPTH"
feat_rep_aruco: "ANCHORED_MSCKF_INVERSE_DEPTH"
# zero velocity update parameters we can use
# we support either IMU-based or disparity detection.
try_zupt: true
zupt_chi2_multipler: 0 # set to 0 for only disp-based
zupt_max_velocity: 0.5
zupt_noise_multiplier: 20
zupt_max_disparity: 0.5 # set to 0 for only imu-based
zupt_only_at_beginning: false
# ==================================================================
# ==================================================================
init_window_time: 2.0
init_imu_thresh: 0.0
init_max_disparity: 1.5
init_max_features: 25
init_dyn_mle_opt_calib: false
init_dyn_mle_max_iter: 50
init_dyn_mle_max_time: 0.05
init_dyn_mle_max_threads: 6
init_dyn_num_pose: 6
init_dyn_min_deg: 20.0
init_dyn_inflation_ori: 10
init_dyn_inflation_vel: 100
init_dyn_inflation_bg: 10
init_dyn_inflation_ba: 100
init_dyn_min_rec_cond: 1e-20
init_dyn_bias_g: [0.0, 0.0, 0.0]
init_dyn_bias_a: [0.0, 0.0, 0.0]
# ==================================================================
# ==================================================================
record_timing_information: false
record_timing_filepath: "/tmp/traj_timing.txt"
save_total_state: false
filepath_est: "/tmp/ov_estimate.txt"
filepath_std: "/tmp/ov_estimate_std.txt"
filepath_gt: "/tmp/ov_groundtruth.txt"
# ==================================================================
# ==================================================================
# our front-end feature tracking parameters
# we have a KLT and descriptor based (KLT is better implemented...)
use_klt: true
num_pts: 100
fast_threshold: 65
grid_x: 20
grid_y: 20
min_px_dist: 15
knn_ratio: 0.70
track_frequency: 31.0
downsample_cameras: false
multi_threading: true
histogram_method: "HISTOGRAM" # NONE, HISTOGRAM, CLAHE
# aruco tag tracker for the system
# DICT_6X6_1000 from https://chev.me/arucogen/
use_aruco: false
num_aruco: 1024
downsize_aruco: true
# ==================================================================
# ==================================================================
# camera noises and chi-squared threshold multipliers
up_msckf_sigma_px: 2
up_msckf_chi2_multipler: 1
up_slam_sigma_px: 2
up_slam_chi2_multipler: 1
up_aruco_sigma_px: 1
up_aruco_chi2_multipler: 1
# masks for our images
use_mask: true
mask0: "../../ov_data/masks/uzhfpv_outdoor_mask0.png" #relative to current file
mask1: "../../ov_data/masks/uzhfpv_outdoor_mask1.png" #relative to current file
# imu and camera spacial-temporal
# imu config should also have the correct noise values
relative_config_imu: "kalibr_imu_chain.yaml"
relative_config_imucam: "kalibr_imucam_chain.yaml"

View File

@@ -0,0 +1,20 @@
%YAML:1.0 # need to specify the file type at the top!
imu0:
T_i_b:
- [1.0, 0.0, 0.0, 0.0]
- [0.0, 1.0, 0.0, 0.0]
- [0.0, 0.0, 1.0, 0.0]
- [0.0, 0.0, 0.0, 1.0]
accelerometer_noise_density: 2.0000e-3 # [ m / s^2 / sqrt(Hz) ] ( accel "white noise" )
accelerometer_random_walk: 3.0000e-3 # [ m / s^3 / sqrt(Hz) ]. ( accel bias diffusion )
gyroscope_noise_density: 1.6968e-04 # [ rad / s / sqrt(Hz) ] ( gyro "white noise" )
gyroscope_random_walk: 1.9393e-05 # [ rad / s^2 / sqrt(Hz) ] ( gyro bias diffusion )
# accelerometer_noise_density: 0.1 # [ m / s^2 / sqrt(Hz) ] ( accel "white noise" )
# accelerometer_random_walk: 0.002 # [ m / s^3 / sqrt(Hz) ]. ( accel bias diffusion )
# gyroscope_noise_density: 0.05 # [ rad / s / sqrt(Hz) ] ( gyro "white noise" )
# gyroscope_random_walk: 4.0e-05 # [ rad / s^2 / sqrt(Hz) ] ( gyro bias diffusion )
model: calibrated
rostopic: /snappy_imu
time_offset: 0.0
update_rate: 200.0

View File

@@ -0,0 +1,37 @@
%YAML:1.0 # need to specify the file type at the top!
cam0:
T_cam_imu:
- [-0.03179778293757218, -0.9994933985910031, -0.001359107523862424, 0.021115239798621798]
- [0.012827844120885779, 0.0009515801497960164, -0.9999172670328424, -0.0008992998316121829]
- [0.9994120008362244, -0.03181258663210035, 0.012791087377928778, -0.009491094814035777]
- [0.0, 0.0, 0.0, 1.0]
cam_overlaps: [1]
camera_model: pinhole
distortion_coeffs: [-0.005719912631104124, 0.004742449009601135, 0.0012060658036136048,
-0.001580292679344826]
distortion_model: equidistant
intrinsics: [277.4786896484645, 277.42548548840034, 320.1052053576385, 242.10083077857894]
resolution: [640, 480]
rostopic: /snappy_cam/stereo_l
timeshift_cam_imu: -0.007999243205055177
cam1:
T_cam_imu:
- [-0.011450159873389598, -0.9998746482793399, -0.010935335712288774, -0.05828448770624624]
- [0.009171247533644289, 0.010830579777447058, -0.9998992883087583, -0.0002362068202437068]
- [0.999892385238307, -0.01154929737910465, 0.009046086032012068, -0.00947464531803495]
- [0.0, 0.0, 0.0, 1.0]
T_cn_cnm1:
- [0.9997470623689986, 0.009836089265916417, 0.020225296846065624, -0.07919358086270675]
- [-0.00975774768296796, 0.9999445171722606, -0.0039684930755682956, 0.000831414953842084]
- [-0.020263209141547188, 0.0037701359508940783, 0.9997875716521978, 0.00044568632114983057]
- [0.0, 0.0, 0.0, 1.0]
cam_overlaps: [0]
camera_model: pinhole
distortion_coeffs: [-0.009025009906076716, 0.009967427035376123, -0.0029538969814842117,
-0.0003503551771748748]
distortion_model: equidistant
intrinsics: [276.78679780974477, 276.79332134030807, 314.2862327340746, 236.51313088043128]
resolution: [640, 480]
rostopic: /snappy_cam/stereo_r
timeshift_cam_imu: -0.007983859928063504

View File

@@ -0,0 +1,116 @@
%YAML:1.0 # need to specify the file type at the top!
verbosity: "INFO" # ALL, DEBUG, INFO, WARNING, ERROR, SILENT
use_fej: true # if we should use first-estimate Jacobians (enable for good consistency)
use_imuavg: true # for our discrete integration, if we should average sequential IMU measurements to "smooth" it
use_rk4int: true # if rk4 integration should be used (overrides imu averaging)
use_stereo: true # if we have more than 1 camera, if we should try to track stereo constraints
max_cameras: 2
calib_cam_extrinsics: false
calib_cam_intrinsics: true
calib_cam_timeoffset: true
max_clones: 11
max_slam: 50
max_slam_in_update: 25
max_msckf_in_update: 30
dt_slam_delay: 2
gravity_mag: 9.81
feat_rep_msckf: "GLOBAL_3D"
feat_rep_slam: "ANCHORED_MSCKF_INVERSE_DEPTH"
feat_rep_aruco: "ANCHORED_MSCKF_INVERSE_DEPTH"
# zero velocity update parameters we can use
# we support either IMU-based or disparity detection.
try_zupt: true
zupt_chi2_multipler: 0 # set to 0 for only disp-based
zupt_max_velocity: 0.5
zupt_noise_multiplier: 20
zupt_max_disparity: 0.5 # set to 0 for only imu-based
zupt_only_at_beginning: false
# ==================================================================
# ==================================================================
init_window_time: 2.0
init_imu_thresh: 0.0
init_max_disparity: 1.5
init_max_features: 25
init_dyn_mle_opt_calib: false
init_dyn_mle_max_iter: 50
init_dyn_mle_max_time: 0.05
init_dyn_mle_max_threads: 6
init_dyn_num_pose: 6
init_dyn_min_deg: 20.0
init_dyn_inflation_ori: 10
init_dyn_inflation_vel: 100
init_dyn_inflation_bg: 10
init_dyn_inflation_ba: 100
init_dyn_min_rec_cond: 1e-20
init_dyn_bias_g: [0.0, 0.0, 0.0]
init_dyn_bias_a: [0.0, 0.0, 0.0]
# ==================================================================
# ==================================================================
record_timing_information: false
record_timing_filepath: "/tmp/traj_timing.txt"
save_total_state: false
filepath_est: "/tmp/ov_estimate.txt"
filepath_std: "/tmp/ov_estimate_std.txt"
filepath_gt: "/tmp/ov_groundtruth.txt"
# ==================================================================
# ==================================================================
# our front-end feature tracking parameters
# we have a KLT and descriptor based (KLT is better implemented...)
use_klt: true
num_pts: 100
fast_threshold: 65
grid_x: 20
grid_y: 20
min_px_dist: 15
knn_ratio: 0.70
track_frequency: 31.0
downsample_cameras: false
multi_threading: true
histogram_method: "HISTOGRAM" # NONE, HISTOGRAM, CLAHE
# aruco tag tracker for the system
# DICT_6X6_1000 from https://chev.me/arucogen/
use_aruco: false
num_aruco: 1024
downsize_aruco: true
# ==================================================================
# ==================================================================
# camera noises and chi-squared threshold multipliers
up_msckf_sigma_px: 2
up_msckf_chi2_multipler: 1
up_slam_sigma_px: 2
up_slam_chi2_multipler: 1
up_aruco_sigma_px: 1
up_aruco_chi2_multipler: 1
# masks for our images
use_mask: false
# imu and camera spacial-temporal
# imu config should also have the correct noise values
relative_config_imu: "kalibr_imu_chain.yaml"
relative_config_imucam: "kalibr_imucam_chain.yaml"

View File

@@ -0,0 +1,20 @@
%YAML:1.0 # need to specify the file type at the top!
imu0:
T_i_b:
- [1.0, 0.0, 0.0, 0.0]
- [0.0, 1.0, 0.0, 0.0]
- [0.0, 0.0, 1.0, 0.0]
- [0.0, 0.0, 0.0, 1.0]
accelerometer_noise_density: 2.0000e-3 # [ m / s^2 / sqrt(Hz) ] ( accel "white noise" )
accelerometer_random_walk: 3.0000e-3 # [ m / s^3 / sqrt(Hz) ]. ( accel bias diffusion )
gyroscope_noise_density: 1.6968e-04 # [ rad / s / sqrt(Hz) ] ( gyro "white noise" )
gyroscope_random_walk: 1.9393e-05 # [ rad / s^2 / sqrt(Hz) ] ( gyro bias diffusion )
# accelerometer_noise_density: 0.1 # [ m / s^2 / sqrt(Hz) ] ( accel "white noise" )
# accelerometer_random_walk: 0.002 # [ m / s^3 / sqrt(Hz) ]. ( accel bias diffusion )
# gyroscope_noise_density: 0.05 # [ rad / s / sqrt(Hz) ] ( gyro "white noise" )
# gyroscope_random_walk: 4.0e-05 # [ rad / s^2 / sqrt(Hz) ] ( gyro bias diffusion )
model: calibrated
rostopic: /snappy_imu
time_offset: 0.0
update_rate: 200.0

View File

@@ -0,0 +1,37 @@
%YAML:1.0 # need to specify the file type at the top!
cam0:
T_cam_imu:
- [-0.024041523213909927, -0.9996640790624955, 0.009681642096550924, 0.02023430742078562]
- [-0.7184527320882621, 0.010542697330412382, -0.6954958830129113, 0.008311861463499775]
- [0.6951601807615744, -0.023676582632001453, -0.7184648512755534, -0.026628438421085154]
- [0.0, 0.0, 0.0, 1.0]
cam_overlaps: [1]
camera_model: pinhole
distortion_coeffs: [-0.017811595366268803, 0.04897078939103475, -0.041363300782847834,
0.011440891936886532]
distortion_model: equidistant
intrinsics: [275.3385453506587, 275.0852058534152, 315.7697752181792, 233.72625444124952]
resolution: [640, 480]
rostopic: /snappy_cam/stereo_l
timeshift_cam_imu: -0.008637511810764048
cam1:
T_cam_imu:
- [-0.004527750456351745, -0.9999560749011355, -0.008206567133703047, -0.05986676424716047]
- [-0.7208238256076104, 0.008951751262681593, -0.6930605158178762, 0.008989928313050033]
- [0.6931035362139012, 0.0027774840496477826, -0.7208326946456712, -0.026595921269512067]
- [0.0, 0.0, 0.0, 1.0]
T_cn_cnm1:
- [0.9996495696908625, -0.0015816259006504233, 0.026424160845286468, -0.07937720055807065]
- [0.0016709946890799124, 0.9999929578963755, -0.0033603473630593734, 0.0005548331594719445]
- [-0.026418659951183095, 0.0034033244279300045, 0.9996451729434878, 0.0005293439870858398]
- [0.0, 0.0, 0.0, 1.0]
cam_overlaps: [0]
camera_model: pinhole
distortion_coeffs: [0.027860492621377443, -0.027723581962855317, 0.0375199775145906,
-0.018152613898714216]
distortion_model: equidistant
intrinsics: [273.2895238376505, 273.35830490745764, 314.60557378520133, 251.0359907029701]
resolution: [640, 480]
rostopic: /snappy_cam/stereo_r
timeshift_cam_imu: -0.008613446015312496

38
docs/00-page-order.dox Normal file
View File

@@ -0,0 +1,38 @@
/* This is here just to order the pages in some reasonable way. */
/**
@page getting-started Getting Started
@page propagation IMU Propagation Derivations
@page fej First-Estimate Jacobian Estimators
@page update Measurement Update Derivations
@page simulation Visual-Inertial Simulator
@page evaluation System Evaluation
@page dev-welcome Developer and Internals
*/

233
docs/bib/extra.bib Normal file
View File

@@ -0,0 +1,233 @@
@article{Burri2016IJRR,
title = {The EuRoC micro aerial vehicle datasets},
author = {Burri, Michael and Nikolic, Janosch and Gohl, Pascal and Schneider, Thomas and Rehder, Joern and Omari, Sammy and Achtelik, Markus W and Siegwart, Roland},
year = 2016,
journal = {The International Journal of Robotics Research},
publisher = {SAGE Publications Sage UK: London, England},
volume = 35,
number = 10,
pages = {1157--1163}
}
@misc{ceres-solver,
title = {Ceres Solver},
author = {Sameer Agarwal and Keir Mierle and Others},
howpublished = {\url{http://ceres-solver.org}},
url = {http://ceres-solver.org}
}
@inproceedings{Davidson2009ENC,
title = {Improved vehicle positioning in urban environment through integration of GPS and low-cost inertial sensors},
author = {Davidson, Pavel and Hautam{\"a}ki, Jani and Collin, Jussi and Takala, Jarmo},
year = 2009,
booktitle = {Proceedings of the European Navigation Conference (ENC), Naples, Italy},
pages = {3--6},
url = {http://www.tkt.cs.tut.fi/research/nappo_files/1_C2.pdf}
}
@inproceedings{Delmerico2018ICRA,
title = {A benchmark comparison of monocular visual-inertial odometry algorithms for flying robots},
author = {Delmerico, Jeffrey and Scaramuzza, Davide},
year = 2018,
booktitle = {2018 IEEE International Conference on Robotics and Automation (ICRA)},
pages = {2502--2509},
url = {http://rpg.ifi.uzh.ch/docs/ICRA18_Delmerico.pdf},
organization = {IEEE}
}
@inproceedings{Delmerico2019ICRA,
title = {Are we ready for autonomous drone racing? the UZH-FPV drone racing dataset},
author = {Delmerico, Jeffrey and Cieslewski, Titus and Rebecq, Henri and Faessler, Matthias and Scaramuzza, Davide},
year = 2019,
booktitle = {2019 International Conference on Robotics and Automation (ICRA)},
pages = {6713--6719},
url = {http://rpg.ifi.uzh.ch/docs/ICRA19_Delmerico.pdf},
organization = {IEEE}
}
@inproceedings{Dong2012IROS,
title = {Estimator initialization in vision-aided inertial navigation with unknown camera-IMU calibration},
author = {Dong-Si, Tue-Cuong and Mourikis, Anastasios I},
year = 2012,
booktitle = {2012 IEEE/RSJ International Conference on Intelligent Robots and Systems},
pages = {1064--1071},
organization = {IEEE}
}
@article{Eckenhoff2019IJRR,
title = {Closed-form preintegration methods for graph-based visual-inertial navigation},
author = {Kevin Eckenhoff and Patrick Geneva and Guoquan Huang},
year = 2019,
journal = {International Journal of Robotics Research},
volume = 38,
number = 5,
doi = {10.1177/0278364919835021},
url = {https://doi.org/10.1177/0278364919835021}
}
@inproceedings{Furgale2013IROS,
title = {Unified temporal and spatial calibration for multi-sensor systems},
author = {Furgale, Paul and Rehder, Joern and Siegwart, Roland},
year = 2013,
booktitle = {2013 IEEE/RSJ International Conference on Intelligent Robots and Systems},
pages = {1280--1286},
organization = {IEEE}
}
@article{Huang2010IJRR,
title = {Observability-based rules for designing consistent EKF SLAM estimators},
author = {Huang, Guoquan P and Mourikis, Anastasios I and Roumeliotis, Stergios I},
year = 2010,
journal = {The International Journal of Robotics Research},
publisher = {SAGE Publications Sage UK: London, England},
volume = 29,
number = 5,
pages = {502--528},
url = {http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.228.5851&rep=rep1&type=pdf}
}
@article{Jeon2021RAL,
title = {Run your visual-inertial odometry on NVIDIA Jetson: Benchmark tests on a micro aerial vehicle},
author = {Jeon, Jinwoo and Jung, Sungwook and Lee, Eungchang and Choi, Duckyu and Myung, Hyun},
year = 2021,
journal = {IEEE Robotics and Automation Letters},
publisher = {IEEE},
volume = 6,
number = 3,
pages = {5332--5339}
}
@article{Jeong2019IJRR,
title = {Complex urban dataset with multi-level sensors from highly diverse urban environments},
author = {Jeong, Jinyong and Cho, Younggun and Shin, Young-Sik and Roh, Hyunchul and Kim, Ayoung},
year = 2019,
journal = {The International Journal of Robotics Research},
publisher = {SAGE Publications Sage UK: London, England},
volume = 38,
number = 6,
pages = {642--657}
}
@book{Kay1993,
title = {Fundamentals of statistical signal processing},
author = {Kay, Steven M},
year = 1993,
publisher = {Prentice Hall PTR},
url = {http://users.isr.ist.utl.pt/~pjcro/temp/Fundamentals\%20Of\%20Statistical\%20Signal\%20Processing\%2D\%2DEstimation\%20Theory-Kay.pdf}
}
@inproceedings{Lee2020IROS,
title = {Visual-inertial-wheel odometry with online calibration},
author = {Lee, Woosik and Eckenhoff, Kevin and Yang, Yulin and Geneva, Patrick and Huang, Guoquan},
year = 2020,
booktitle = {2020 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS)},
pages = {4559--4566},
url = {http://udel.edu/~ghuang/papers/tr_wheel-vio.pdf},
organization = {IEEE}
}
@article{Li2013IJRR,
title = {High-precision, consistent EKF-based visual-inertial odometry},
author = {Li, Mingyang and Mourikis, Anastasios I},
year = 2013,
journal = {The International Journal of Robotics Research},
publisher = {SAGE Publications Sage UK: London, England},
volume = 32,
number = 6,
pages = {690--711}
}
@phdthesis{Li2014THESIS,
title = {Visual-inertial odometry on resource-constrained systems},
author = {Li, Mingyang},
year = 2014,
url = {https://escholarship.org/uc/item/4nn0j264},
school = {UC Riverside}
}
@book{Maybeck1982STOC,
title = {Stochastic models, estimation, and control},
author = {Maybeck, Peter S},
year = 1982,
publisher = {Academic press},
volume = 3,
url = {https://books.google.com/books?id=L_YVMUJKNQUC}
}
@inproceedings{Mourikis2007ICRA,
title = {A multi-state constraint Kalman filter for vision-aided inertial navigation},
author = {Mourikis, Anastasios I and Roumeliotis, Stergios I},
year = 2007,
booktitle = {Proceedings 2007 IEEE International Conference on Robotics and Automation},
pages = {3565--3572},
url = {http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.437.1085&rep=rep1&type=pdf},
organization = {IEEE}
}
@article{Mueggler2018TRO,
title = {Continuous-Time Visual-Inertial Odometry for Event Cameras},
author = {E. Mueggler and G. Gallego and H. Rebecq and D. Scaramuzza},
year = 2018,
journal = {IEEE Transactions on Robotics},
pages = {1--16},
url = {http://rpg.ifi.uzh.ch/docs/TRO18_Mueggler.pdf}
}
@article{Patron2015IJCV,
title = {A spline-based trajectory representation for sensor fusion and rolling shutter cameras},
author = {Patron-Perez, Alonso and Lovegrove, Steven and Sibley, Gabe},
year = 2015,
journal = {International Journal of Computer Vision},
publisher = {Springer},
volume = 113,
number = 3,
pages = {208--219}
}
@article{Qin2018TRO,
title = {Vins-mono: A robust and versatile monocular visual-inertial state estimator},
author = {Qin, Tong and Li, Peiliang and Shen, Shaojie},
year = 2018,
journal = {IEEE Transactions on Robotics},
publisher = {IEEE},
volume = 34,
number = 4,
pages = {1004--1020},
url = {https://arxiv.org/pdf/1708.03852.pdf}
}
@article{Ramanandan2011TITS,
title = {Inertial navigation aiding by stationary updates},
author = {Ramanandan, Arvind and Chen, Anning and Farrell, Jay A},
year = 2011,
journal = {IEEE Transactions on Intelligent Transportation Systems},
publisher = {IEEE},
volume = 13,
number = 1,
pages = {235--248}
}
@inproceedings{Schubert2018IROS,
title = {The TUM VI benchmark for evaluating visual-inertial odometry},
author = {Schubert, David and Goll, Thore and Demmel, Nikolaus and Usenko, Vladyslav and St { \"u } ckler, J { \"o } rg and Cremers, Daniel},
year = 2018,
booktitle = {2018 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS)},
pages = {1680--1687},
url = {https://arxiv.org/pdf/1804.06120.pdf},
organization = {IEEE}
}
@article{Trawny2005TR,
title = {Indirect Kalman filter for 3D attitude estimation},
author = {Trawny, Nikolas and Roumeliotis, Stergios I},
year = 2005,
journal = {University of Minnesota, Dept. of Comp. Sci. \& Eng., Tech. Rep},
volume = 2,
pages = 2005,
url = {http://mars.cs.umn.edu/tr/reports/Trawny05b.pdf}
}
@inproceedings{Wagstaff2017IPIN,
title = {Improving foot-mounted inertial navigation through real-time motion classification},
author = {Wagstaff, Brandon and Peretroukhin, Valentin and Kelly, Jonathan},
year = 2017,
booktitle = {2017 International Conference on Indoor Positioning and Indoor Navigation (IPIN)},
pages = {1--8},
url = {https://arxiv.org/pdf/1707.01152.pdf},
organization = {IEEE}
}
@inproceedings{Wu2017ICRA,
title = {Vins on wheels},
author = {Wu, Kejian J and Guo, Chao X and Georgiou, Georgios and Roumeliotis, Stergios I},
year = 2017,
booktitle = {2017 IEEE International Conference on Robotics and Automation (ICRA)},
pages = {5155--5162},
url = {http://mars.cs.umn.edu/papers/KejianWu_VINSonWheels.pdf},
organization = {IEEE}
}
@inproceedings{Zhang2018IROS,
title = {A tutorial on quantitative trajectory evaluation for visual(-inertial) odometry},
author = {Zhang, Zichao and Scaramuzza, Davide},
year = 2018,
booktitle = {2018 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS)},
pages = {7244--7251},
url = {http://rpg.ifi.uzh.ch/docs/IROS18_Zhang.pdf},
organization = {IEEE}
}

45
docs/css/custom.css Normal file
View File

@@ -0,0 +1,45 @@
/* don't indent each paragraph since we frequently use equations */
main p {
text-indent: 0rem !important;
}
/* if we have two paragraphs after each other, then indent */
main p + p {
text-indent: 1.5rem !important;
}
/* if we have paragraph after header, then indent */
main h1 + p, h2 + p, h3 + p {
text-indent: 1.5rem !important;
}
/* math equations by default have some extra space at the top */
/*div .m-math {*/
/* margin-top: -1.5rem;*/
/*}*/
/* handle youtube images on the readme page */
a .m-image {
display: inline !important;
}
/* do not lowercase the title of the project */
body > header > nav a#m-navbar-brand, body > header > nav #m-navbar-brand a {
text-transform: none;
}
/* REMOVE BAD M.CSS THEME SETTING: */
/* light grey text is hard to see */
.m-doc-template, dl.m-doc dd, ul.m-doc li > span.m-doc {
color: #1a1a1a;
}
.m-doc-template a, dl.m-doc dd a, ul.m-doc li > span.m-doc a {
color: #1a1a1a;
}
.m-block.m-dim, .m-text.m-dim, .m-label.m-flat.m-dim {
color: #1a1a1a;
}

File diff suppressed because it is too large Load Diff

66
docs/dev-coding-style.dox Normal file
View File

@@ -0,0 +1,66 @@
/**
@page dev-coding-style Coding Style Guide
@section coding-style-overview Overview
Please note that if you have a good excuse to either break the rules or modify
them, feel free to do it (and update this guide accordingly, if appropriate).
Nothing is worse than rule that hurts productivity instead of improving it.
In general, the main aim of this style is:
- Vertical and horizontal compression, fitting more code on a screen while keeping the code readable.
- Do not need to enforce line wrapping if clarity is impacted (i.e. Jacobians)
- Consistent indentation and formatting rules to ensure readability (4 space tabbing)
The codebase is coded in [snake_case](https://en.wikipedia.org/wiki/Snake_case) with accessor and getter function for most classes (there are a few exceptions).
We leverage the [Doxygen](http://www.doxygen.nl/) documentation system with a post-processing script from [m.css](https://mcss.mosra.cz/documentation/doxygen/).
Please see @ref dev-docs for details on how our documentation is generated.
All functions should be commented both internally and externally with a focus on being as clear to a developer or user that is reading the code or documentation website.
@section coding-style-naming Naming Conventions
We have particular naming style for our orientations and positions that should be consistent throughout the project.
In general rotation matrices are the only exception of a variable that starts with a capital letter.
Both position and orientation variables should contain the frame of references.
@code{.cpp}
Eigen::Matrix<double,3,3> R_ItoC; //=> SO(3) rotation from IMU to camera frame
Eigen::Matrix<double,4,1> q_ItoC; //=> JPL quaternion rot from IMU to the camera
Eigen::Vector3d p_IinC; //=> 3d position of the IMU frame in the camera frame
Eigen::Vector3d p_FinG; //=> position of feature F in the global frame G
@endcode
@section coding-style-printing Print Statements
The code uses a simple print statement level logic that allows the user to enable and disable the verbosity.
In general the user can specify the following (see [ov_core/src/utils/print.h](ov_core/src/utils/print.h) file for details):
- PrintLevel::ALL : All PRINT_XXXX will output to the console
- PrintLevel::DEBUG : "DEBUG", "INFO", "WARNING" and "ERROR" will be printed. "ALL" will be silenced
- PrintLevel::INFO : "INFO", "WARNING" and "ERROR" will be printed. "ALL" and "DEBUG" will be silenced
- PrintLevel::WARNING : "WARNING" and "ERROR" will be printed. "ALL", "DEBUG" and "INFO" will be silenced
- PrintLevel::ERROR : Only "ERROR" will be printed. All the rest are silenced
- PrintLevel::SILENT : All PRINT_XXXX will be silenced.
To use these, you can specify the following using the [printf](https://www.cplusplus.com/reference/cstdio/printf/) standard input logic.
A user can also specify colors (see [ov_core/src/utils/colors.h](ov_core/src/utils/colors.h) file for details):
@code{.cpp}
PRINT_ALL("the value is %.2f\n", variable);
PRINT_DEBUG("the value is %.2f\n", variable);
PRINT_INFO("the value is %.2f\n", variable);
PRINT_WARNING("the value is %.2f\n", variable);
PRINT_ERROR(RED "the value is %.2f\n" RESET, variable);
@endcode
*/

224
docs/dev-docker.dox Normal file
View File

@@ -0,0 +1,224 @@
/**
@page dev-docker Building with Docker
@tableofcontents
@section dev-docker-install Installing Docker
This will differ on which operating system you have installed, this guide is for linux-based systems.
Please take a look at the official Docker [Get Docker](https://docs.docker.com/get-docker/) guide.
There is also a guide from ROS called [getting started with ROS and Docker](http://wiki.ros.org/docker/Tutorials/Docker).
On Ubuntu one should be able to do the following to get docker:
@code{.shell-session}
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo gpg --dearmor -o /usr/share/keyrings/docker-archive-keyring.gpg
echo "deb [arch=amd64 signed-by=/usr/share/keyrings/docker-archive-keyring.gpg] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" | sudo tee /etc/apt/sources.list.d/docker.list > /dev/null
sudo apt-get update
sudo apt-get install docker-ce docker-ce-cli containerd.io
@endcode
From there we can install [NVIDIA Container Toolkit](https://github.com/NVIDIA/nvidia-docker) to allow for the docker to use our GPU and for easy GUI pass through.
You might also want to check out [this](https://roboticseabass.wordpress.com/2021/04/21/docker-and-ros/) blogpost for some more details.
@code{.shell-session}
distribution=$(. /etc/os-release;echo $ID$VERSION_ID) \
&& curl -s -L https://nvidia.github.io/nvidia-docker/gpgkey | sudo apt-key add - \
&& curl -s -L https://nvidia.github.io/nvidia-docker/$distribution/nvidia-docker.list | sudo tee /etc/apt/sources.list.d/nvidia-docker.list
sudo apt-get update
sudo apt-get install -y nvidia-docker2
sudo systemctl restart docker
sudo docker run --rm --gpus all nvidia/cuda:11.0-base nvidia-smi #to verify install
@endcode
From this point we should be able to "test" that everything is working ok.
First on the host machine we need to allow for x11 windows to connect.
@code{.shell-session}
xhost +
@endcode
We can now run the following command which should open gazebo GUI on your main desktop window.
@code{.shell-session}
docker run -it --net=host --gpus all \
--env="NVIDIA_DRIVER_CAPABILITIES=all" \
--env="DISPLAY" \
--env="QT_X11_NO_MITSHM=1" \
--volume="/tmp/.X11-unix:/tmp/.X11-unix:rw" \
osrf/ros:noetic-desktop-full \
bash -it -c "roslaunch gazebo_ros empty_world.launch"
@endcode
Alternatively we can launch directly into a bash shell and run commands from in there.
This basically gives you a terminal in the docker container.
@code{.shell-session}
docker run -it --net=host --gpus all \
--env="NVIDIA_DRIVER_CAPABILITIES=all" \
--env="DISPLAY" \
--env="QT_X11_NO_MITSHM=1" \
--volume="/tmp/.X11-unix:/tmp/.X11-unix:rw" \
osrf/ros:noetic-desktop-full \
bash
rviz # you should be able to launch rviz once in bash
@endcode
@section dev-docker-openvins Running OpenVINS with Docker
Clone the OpenVINS repository, build the container and then launch it.
The [Dockerfile](https://docs.docker.com/engine/reference/builder/) will not build the repo by default, thus you will need to build the project.
We have a few docker files for each version of ROS and operating system we support.
In the following we will use the [Dockerfile_ros1_20_04](https://github.com/rpng/open_vins/blob/master/Dockerfile_ros1_20_04) which is for a ROS1 install with a 20.04 system.
@m_class{m-block m-warning}
@par Use a Workspace Directory Mount
Here it is important to note that we are going to create a dedicated ROS *workspace* which will then be loaded into the workspace.
Thus if you are going to develop packages alongside OpenVINS you would make sure you have cloned your source code into the same workspace.
The workspace local folder will be mounted to `/catkin_ws/` in the docker, thus all changes from the host are mirrored.
@code{.shell-session}
mkdir -p ~/workspace/catkin_ws_ov/src
cd ~/workspace/catkin_ws_ov/src
git clone https://github.com/rpng/open_vins.git
cd open_vins
export VERSION=ros1_20_04 # which docker file version you want (ROS1 vs ROS2 and ubuntu version)
docker build -t ov_$VERSION -f Dockerfile_$VERSION .
@endcode
If the dockerfile breaks, you can remove the image and reinstall using the following:
@code{.shell-session}
docker image list
docker image rm ov_ros1_20_04 --force
@endcode
From here it is a good idea to create a nice helper command which will launch the docker and also pass the GUI to your host machine.
Here you can append it to the bottom of the ~/.bashrc so that we always have it on startup or just run the two commands on each restart
@m_class{m-block m-warning}
@par Directory Binding
You will need to specify *absolute directory paths* to the workspace and dataset folders on the host you want to bind.
Bind mounts are used to ensure that the host directory is directly used and all edits made on the host are sync'ed with
the docker container. See the docker [bind mounts](https://docs.docker.com/storage/bind-mounts/) documentation. You
can add and remove mounts from this command as you see the need.
@code{.shell-session}
nano ~/.bashrc # add to the bashrc file
xhost + &> /dev/null
export DOCKER_CATKINWS=/home/username/workspace/catkin_ws_ov
export DOCKER_DATASETS=/home/username/datasets
alias ov_docker="docker run -it --net=host --gpus all \
--env=\"NVIDIA_DRIVER_CAPABILITIES=all\" --env=\"DISPLAY\" \
--env=\"QT_X11_NO_MITSHM=1\" --volume=\"/tmp/.X11-unix:/tmp/.X11-unix:rw\" \
--mount type=bind,source=$DOCKER_CATKINWS,target=/catkin_ws \
--mount type=bind,source=$DOCKER_DATASETS,target=/datasets $1"
source ~/.bashrc # after you save and exit
@endcode
Now we can launch RVIZ and also compile the OpenVINS codebase.
From two different terminals on the host machine one can run the following (ROS 1):
@code{.shell-session}
ov_docker ov_ros1_20_04 roscore
ov_docker ov_ros1_20_04 rosrun rviz rviz -d /catkin_ws/src/open_vins/ov_msckf/launch/display.rviz
@endcode
To actually get a bash environment that we can use to build and run things with we can do the following.
Note that any install or changes to operating system variables will not persist, thus only edit within your workspace which is linked as a volume.
@code{.shell-session}
ov_docker ov_ros1_20_04 bash
@endcode
Now once inside the docker with the bash shell we can build and launch an example simulation:
@code{.shell-session}
cd catkin_ws
catkin build
source devel/setup.bash
rosrun ov_eval plot_trajectories none src/open_vins/ov_data/sim/udel_gore.txt
roslaunch ov_msckf simulation.launch
@endcode
And a version for ROS 2 we can do the following:
@code{.shell-session}
cd catkin_ws
colcon build --event-handlers console_cohesion+
source install/setup.bash
ros2 run ov_eval plot_trajectories none src/open_vins/ov_data/sim/udel_gore.txt
ros2 run ov_msckf run_simulation src/open_vins/config/rpng_sim/estimator_config.yaml
@endcode
@m_class{m-block m-danger}
@par Real-time Performance
On my machine running inside of the docker container is not real-time in nature. I am not sure why this is the case
if someone knows if something is setup incorrectly please open a github issue. Thus it is recommended to only use
the "serial" nodes which allows for the same parameters to be used as when installing directly on an OS.
@section dev-docker-clion Using Jetbrains Clion and Docker
@image html clion_docker.png width=100%
Jetbrains provides some instructions on their side and a youtube video.
Basically, Clion needs to be configured to use an external compile service and this service
needs to be exposed from the docker container.
I still recommend users compile with `catkin build` directly in the docker, but this will allow for debugging and syntax insights.
- https://blog.jetbrains.com/clion/2020/01/using-docker-with-clion/
- https://www.youtube.com/watch?v=h69XLiMtCT8
After building the OpenVINS image (as above) we can do the following which will start a detached process in the docker.
This process will allow us to connect Clion to it.
@code{.shell-session}
export DOCKER_CATKINWS=/home/username/workspace/catkin_ws_ov # NOTE: should already be set in your bashrc
export DOCKER_DATASETS=/home/username/datasets # NOTE: should already be set in your bashrc
docker run -d --cap-add sys_ptrace -p127.0.0.1:2222:22 \
--mount type=bind,source=$DOCKER_CATKINWS,target=/catkin_ws \
--mount type=bind,source=$DOCKER_DATASETS,target=/datasets \
--name clion_remote_env ov_ros1_20_04
@endcode
We can now change Clion to use the docker remote:
1. In short, you should add a new Toolchain entry in settings under Build, Execution, Deployment as a Remote Host type.
2. Click in the Credentials section and fill out the SSH credentials we set-up in the Dockerfile
- Host: localhost
- Port: 2222
- Username: user
- Password: password
- CMake: /usr/local/bin/cmake
3. Make sure the found CMake is the custom one installed and not the system one (greater than 3.12)
4. Add a CMake profile that uses this toolchain and youre done.
5. Change build target to be this new CMake profile (optionally just edit / delete the default)
To add support for ROS you will need to manually set environmental variables in the CMake profile.
These were generated by going into the ROS workspace, building a package, and then looking at `printenv` output.
It should be under `Settings > Build,Execution,Deployment > CMake > (your profile) > Environment`.
This might be a brittle method, but not sure what else to do... (also see [this](https://www.allaban.me/posts/2020/08/ros2-setup-ide-docker/) blog post).
You will need to edit the ROS version (`noetic` is used below) to fit whatever docker container you are using.
@code{.shell-session}
LD_PATH_LIB=/catkin_ws/devel/lib:/opt/ros/noetic/lib;PYTHON_EXECUTABLE=/usr/bin/python3;PYTHON_INCLUDE_DIR=/usr/include/python3.8;ROS_VERSION=1;CMAKE_PREFIX_PATH=/catkin_ws/devel:/opt/ros/noetic;LD_LIBRARY_PATH=/catkin_ws/devel/lib:/opt/ros/noetic/lib;PATH=/opt/ros/noetic/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin;PKG_CONFIG_PATH=/catkin_ws/devel/lib/pkgconfig:/opt/ros/noetic/lib/pkgconfig;PYTHONPATH=/opt/ros/noetic/lib/python3/dist-packages;ROSLISP_PACKAGE_DIRECTORIES=/catkin_ws/devel/share/common-lisp;ROS_PACKAGE_PATH=/catkin_ws/src/open_vins/ov_core:/catkin_ws/src/open_vins/ov_data:/catkin_ws/src/open_vins/ov_eval:/catkin_ws/src/open_vins/ov_msckf:/opt/ros/noetic/share
@endcode
When you build in Clion you should see in `docker stats` that the `clion_remote_env` is building the files and maxing out the CPU during this process.
Clion should send the source files to the remote server and then on build should build and run it remotely within the docker container.
A user might also want to edit `Build,Execution,Deployment > Deployment` settings to exclude certain folders from copying over.
See this [jetbrains documentation page](https://www.jetbrains.com/help/clion/remote-projects-support.html) for more details.
*/

98
docs/dev-docs.dox Normal file
View File

@@ -0,0 +1,98 @@
/**
@page dev-docs Documentation Guide
@section developers-pdfmanual Building the Latex PDF Reference Manual
1. You will need to install doxygen and texlive with all packages
- `sudo apt-get update`
- `sudo apt-get install doxygen texlive-full`
- You will likely need new version of [doxygen](https://stackoverflow.com/a/50986638/7718197) 1.9.4 to fix ghostscript errors
2. Go into the project folder and generate latex files
- `cd open_vins/`
- `doxygen`
- This should run, and will stop if there are any latex errors
- On my Ubuntu 20.04 this installs version "2019.20200218"
3. Generate a PDF from the latex files
- `cd doxgen_generated/latex/`
- `make`
- File should be called `refman.pdf`
@section developers-addpage Adding a Website Page
1. Add a `doc/pagename.dox` file, copy up-to-date license header.
2. Add a @c \@page definition and title to your page
3. If the page is top-level, list it in `doc/00-page-order.dox` to ensure it
gets listed at a proper place
4. If the page is not top-level, list it using @c \@subpage in its parent page
5. Leverage @c \@section and @c \@subsection to separate the page
@section latex Math / Latex Equations
- More details on how to format your documentation can be found here:
- http://www.doxygen.nl/manual/formulas.html
- https://mcss.mosra.cz/css/components/#math
- https://mcss.mosra.cz/css/components/
- Use the inline commands for latex `\f $ <formula_here> \f $` (no space between f and $)
- Use block to have equation centered on page `\f [ <big_formula_here> \f ]` (no space between f and [])
@section developers-building Building the Documentation Site
1. Clone the m.css repository which has scripts to build
- Ensure that it is **not** in the same folder as your open_vins
- `git clone https://github.com/mosra/m.css`
- This repository contains the script that will generate our documentation
2. You will need to install python3.6
- `sudo add-apt-repository ppa:deadsnakes/ppa`
- `sudo apt-get update`
- `sudo apt-get install python3.6`
- `curl https://bootstrap.pypa.io/get-pip.py | sudo python3.6`
- `sudo -H pip3.6 install jinja2 Pygments`
- `sudo apt install texlive-base texlive-latex-extra texlive-fonts-extra texlive-fonts-recommended`
3. To use the bibtex citing you need to have
- bibtex executable in search path
- perl executable in search path
- http://www.doxygen.nl/manual/commands.html#cmdcite
4. Go into the documentation folder and build
- `cd m.css/documentation/`
- `python3.6 doxygen.py <path_to_Doxyfile-mcss>`
5. If you run into errors, ensure your python path is set to use the 3.6 libraries
- ` export PYTHONPATH=/usr/local/lib/python3.6/dist-packages/`
6. You might need to comment out the `enable_async=True` flag in the doxygen.py file
7. This should then build the documentation website
8. Open the html page `/open_vins/doxgen_generated/html/index.html` to view documentation
@section developers-theme Custom m.css Theme
1. This is based on the [m.css docs](https://mcss.mosra.cz/css/themes/#make-your-own) for custom theme
2. Files needed are in `open_vins/docs/mcss_theme/`
3. Copy the following files into the `m.css/css/` folder
- m-theme-udel.css
- pygments-tango.css
- m-udel.css
4. Most settings are in the `m-theme-udel.css` file
5. To generate / compile the edited theme do:
- `python3.6 postprocess.py m-udel.css m-documentation.css -o m-udel+documentation.compiled.css`
- Copy this generated file into `open_vins/docs/css/`
- Regenerate the website and it should change the theme
@section developers-figures Creating Figures
- One of the editors we use is [IPE](http://ipe.otfried.org/) which is avalible of different platforms
- We use the ubuntu 16.04 version 7.1.10
- Create your figure in IPE then save it to disk (i.e. should have a `file.ipe`)
- Use the command line utility `iperender` to convert into a svg
- `iperender -svg -transparent file.ipe file.svg`
*/

59
docs/dev-index.dox Normal file
View File

@@ -0,0 +1,59 @@
/**
@page dev-index Covariance Index Internals
@section dev-index-types Type System
The type system that the ov_msckf leverages was inspired by graph-based optimization frameworks such as [Georgia Tech Smoothing and Mapping library (GTSAM)](https://github.com/borglab/gtsam).
As compared to manually knowing where in the covariance state elements are, the interaction is abstracted away from the user and is replaced by a straight forward way to access the desired elements.
The ov_msckf::State is owner of these types and thus after creation one should not delete these externally.
@code{.cpp}
class Type {
protected:
// Current best estimate
Eigen::MatrixXd _value;
// Location of error state in covariance
int _id = -1;
// Dimension of error state
int _size = -1;
};
@endcode
A type is defined by its location in its covariance, its current estimate and its error state size.
The current value does not have to be a vector, but could be a matrix in the case of an SO(3) rotation group type object.
The error state needs to be a vector and thus a type will need to define the mapping between this error state and its manifold representation.
@section dev-index-update Type-based EKF Update
To show the power of this type-based indexing system, we will go through how we compute the EKF update.
The specific method we will be looking at is the @ref ov_msckf::StateHelper::EKFUpdate() which takes in the state, vector of types, Jacobian, residual, and measurement covariance.
As compared to passing a Jacobian matrix that is the full size of state wide, we instead leverage this type system by passing a "small" Jacobian that has only the state elements that the measurements are a function of.
For example, if we have a global 3D SLAM feature update (implemented in @ref ov_msckf::UpdaterSLAM) our Jacobian will only be a function of the newest clone and the feature.
This means that our Jacobian is only of size 9 as compared to the size our state.
In addition to the matrix containing the Jacobian elements, we need to know what order this Jacobian is in, thus we pass a vector of types which correspond to the state elements our Jacobian is a function of.
Thus in our example, it would contain two types: our newest clone @ref ov_type::PoseJPL and current landmark feature @ref ov_type::Landmark with our Jacobian being the type size of the pose plus the type size of the landmark in width.
*/

157
docs/dev-profiling.dox Normal file
View File

@@ -0,0 +1,157 @@
/**
@page dev-profiling System Profiling
@tableofcontents
@section dev-profiling-compute Profiling Processing Time
One way (besides inserting timing statements into the code) is to leverage a profiler such as [valgrind](https://www.valgrind.org/).
This tool allows for recording of the call stack of the system.
To use this with a ROS node, we can do the following (based on [this](http://wiki.ros.org/roslaunch/Tutorials/Roslaunch%20Nodes%20in%20Valgrind%20or%20GDB) guide):
- Edit `roslaunch ov_msckf pgeneva_serial_eth.launch` launch file
- Append `launch-prefix="valgrind --tool=callgrind --callgrind-out-file=/tmp/callgrind.txt"` to your ROS node. This will cause the node to run with valgrind.
- Change the bag length to be only 10 or so seconds (since profiling is slow)
@code{.shell-session}
sudo apt install valgrind
roslaunch ov_msckf pgeneva_serial_eth.launch
@endcode
After running the profiling program we will want to visualize it.
There are some good tools for that, specifically we are using [gprof2dot](https://github.com/jrfonseca/gprof2dot) and [xdot.py](https://github.com/jrfonseca/xdot.py).
First we will post-process it into a xdot graph format and then visualize it for inspection.
@image html example_callgrind.png width=80%
@code{.shell-session}
// install viz programs
apt-get install python3 graphviz
apt-get install gir1.2-gtk-3.0 python3-gi python3-gi-cairo graphviz
pip install gprof2dot xdot
// actually process and then viz call file
gprof2dot --format callgrind --strip /tmp/callgrind.txt --output /tmp/callgrind.xdot
xdot /tmp/callgrind.xdot
@endcode
@section dev-profiling-leaks Memory Leaks
One can leverage a profiler such as [valgrind](https://www.valgrind.org/) to perform memory leak check of the codebase.
Ensure you have installed the `valgrind` package (see above).
We can change the node launch file as follows:
- Edit `roslaunch ov_msckf pgeneva_serial_eth.launch` launch file
- Append `launch-prefix="valgrind --tool=memcheck --leak-check=yes"` to your ROS node. This will cause the node to run with valgrind.
- Change the bag length to be only 10 or so seconds (since profiling is slow)
This [page](https://web.stanford.edu/class/archive/cs/cs107/cs107.1206/resources/valgrind.html) has some nice support material for FAQ.
An example loss is shown below which was found by memcheck.
@code{.text}
==5512== 1,578,860 (24 direct, 1,578,836 indirect) bytes in 1 blocks are definitely lost in loss record 6,585 of 6,589
==5512== at 0x4C3017F: operator new(unsigned long) (in /usr/lib/valgrind/vgpreload_memcheck-amd64-linux.so)
....
==5512== by 0x543F868: operator[] (unordered_map.h:973)
==5512== by 0x543F868: ov_core::TrackKLT::feed_stereo(double, cv::Mat&, cv::Mat&, unsigned long, unsigned long) (TrackKLT.cpp:165)
==5512== by 0x4EF8C52: ov_msckf::VioManager::feed_measurement_stereo(double, cv::Mat&, cv::Mat&, unsigned long, unsigned long) (VioManager.cpp:245)
==5512== by 0x1238A9: main (ros_serial_msckf.cpp:247)
@endcode
@section dev-profiling-compiler Compiler Profiling
Here is a small guide on how to perform compiler profiling for building of the codebase.
This should be used to try to minimize compile times which in general hurt developer productivity.
It is recommended to read the following pages which this is a condenced form of:
- https://aras-p.info/blog/2019/01/16/time-trace-timeline-flame-chart-profiler-for-Clang/
- https://aras-p.info/blog/2019/09/28/Clang-Build-Analyzer/
First we need to ensure we have a compiler that can profile the build time.
Clang greater then 9 should work, but we have tested only with 11.
We can get the [latest Clang](https://apt.llvm.org/) by using the follow auto-install script:
@code{.shell-session}
sudo bash -c "$(wget -O - https://apt.llvm.org/llvm.sh)"
export CC=/usr/bin/clang-11
export CXX=/usr/bin/clang++-11
@endcode
We then need to clone the analyzer repository, which allows for summary generation.
@code{.shell-session}
git clone https://github.com/aras-p/ClangBuildAnalyzer
cd ClangBuildAnalyzer
cmake . && make
@endcode
We can finally build our ROS package and time how long it takes.
Note that we are using [catkin tools](https://catkin-tools.readthedocs.io/en/latest/) to build here.
The prefix *CBA* means to run the command in the ClangBuildAnalyzer repository clone folder.
While the prefix *WS* means run in the root of your ROS workspace.
@code{.shell-session}
(WS) cd ~/workspace/
(WS) catkin clean -y && mkdir build
(CBA) ./ClangBuildAnalyzer --start ~/workspace/build/
(WS) export CC=/usr/bin/clang-11 && export CXX=/usr/bin/clang++-11
(WS) catkin build ov_msckf -DCMAKE_CXX_FLAGS="-ftime-trace"
(CBA) ./ClangBuildAnalyzer --stop ~/workspace/build/ capture_file.bin
(CBA) ./ClangBuildAnalyzer --analyze capture_file.bin > timing_results.txt
@endcode
The `time-trace` flag should generate a bunch of .json files in your build folder.
These can be opened in your chrome browser `chrome://tracing` for viewing.
In general the ClangBuildAnalyzer is more useful for finding what files take long.
An example output of what is generated in the timing_results.txt file is:
@code{.text}
Analyzing build trace from 'capture_file.bin'...
**** Time summary:
Compilation (86 times):
Parsing (frontend): 313.9 s
Codegen & opts (backend): 222.9 s
**** Files that took longest to parse (compiler frontend):
13139 ms: /build//ov_msckf/CMakeFiles/ov_msckf_lib.dir/src/update/UpdaterSLAM.cpp.o
12843 ms: /build//ov_msckf/CMakeFiles/run_serial_msckf.dir/src/ros_serial_msckf.cpp.o
...
**** Functions that took longest to compile:
1639 ms: main (/src/open_vins/ov_eval/src/error_comparison.cpp)
1337 ms: ov_core::BsplineSE3::get_acceleration(double, Eigen::Matrix<double, ... (/src/open_vins/ov_core/src/sim/BsplineSE3.cpp)
1156 ms: ov_eval::ResultSimulation::plot_state(bool, double) (/src/open_vins/ov_eval/src/calc/ResultSimulation.cpp)
...
*** Expensive headers:
27505 ms: /src/open_vins/ov_core/src/track/TrackBase.h (included 12 times, avg 2292 ms), included via:
TrackKLT.cpp.o TrackKLT.h (4372 ms)
TrackBase.cpp.o (4297 ms)
TrackSIM.cpp.o TrackSIM.h (4252 ms)
...
@endcode
Some key methods to reduce compile times are as follows:
- Only include headers that are required for your class
- Don't include headers in your header files `.h` that are only required in your `.cpp` source files.
- Consider [forward declarations](https://www.wikiwand.com/en/Forward_declaration) of methods and types
- Ensure you are using an include guard in your headers
*/

22
docs/dev-roadmap.dox Normal file
View File

@@ -0,0 +1,22 @@
/**
@page dev-roadmap Future Roadmap
The initial release provides the library foundation which contains a filter-based visual-inertial localization solution.
This can be used in a wide range of scenarios and the type-based index system allows for others to easily add new features and develop on top of this system.
Here is a list of future directions, although nothing is set in stone, that we are interested in taking:
- Creation of a secondary graph-based thread that loosely introduces loop closures (akin to the second thread of VINS-Mono and others) which should allow for drift free long-term localization.
- Large scale offline batch graph optimization which leverages the trajectory of the ov_msckf as its initial guess and then optimizes both the map and trajectory.
- Incorporate our prior work in preintegration @cite Eckenhoff2019IJRR into the same framework structure to allow for easy extensibility. Focus on sparsification and marginalization to allow for realtime computation.
- Leverage this sliding-window batch method to perform SFM initialization of all methods.
- Support for arbitrary Schmidt'ing of state elements allowing for modeling of their prior uncertainties but without optimizing their values online.
- More advance imu integration schemes, quantification of the integration noises to handle low frequency readings, and modeling of the imu intrinsics.
*/

49
docs/dev-ros1-to-ros2.dox Normal file
View File

@@ -0,0 +1,49 @@
/**
@page dev-ros1-to-ros2 ROS1 to ROS2 Bag Conversion Guide
@section gs-ros1-to-ros2-option-1 rosbags
[rosbags](https://gitlab.com/ternaris/rosbags) is the simplest utility which does not depend on ROS installs at all.
ROS bag conversion is a hard problem since you need to have both ROS1 and ROS2 dependencies.
This is what was used to generate the converted ROS2 bag files for standard datasets.
To do a conversion of a bag file we can do the following:
@code{.shell-session}
pip install rosbags
rosbags-convert V1_01_easy.bag
@endcode
@section dev-ros1-to-ros2-option-2 rosbag2 play
To do this conversion you will need to have both ROS1 and ROS2 installed on your system.
Also ensure that you have installed all dependencies and backends required.
The main [rosbag2](https://github.com/ros2/rosbag2) readme has a lot of good details.
@code{.shell-session}
sudo apt-get install ros-$ROS2_DISTRO-ros2bag ros-$ROS2_DISTRO-rosbag2*
sudo apt install ros-$ROS2_DISTRO-rosbag2-bag-v2-plugins
@endcode
From here we can do the following.
This is based on [this issue](https://github.com/ros2/rosbag2/issues/139#issuecomment-516167831).
You might run into issues with the .so files being corrupted (see [this issue](https://github.com/ros2/rosbag2/issues/94))
Not sure if there is a fix besides building it from scratch yourself.
@code{.shell-session}
source_ros1
source_ros2
ros2 bag play -s rosbag_v2 V1_01_easy.bag
@endcode
*/

14
docs/dev-welcome.dox Normal file
View File

@@ -0,0 +1,14 @@
/**
@page dev-welcome Internals and Developers
- @subpage dev-coding-style --- General coding styles and conventions
- @subpage dev-docs --- Developer guide on how documentation can be built
- @subpage dev-ros1-to-ros2 --- Some notes on ROS bag conversion
- @subpage dev-index --- Description of the covariance index system
- @subpage dev-profiling --- Some notes on performing profiling
- @subpage dev-roadmap --- Where we plan to go in the future
*/

262
docs/eval-error.dox Normal file
View File

@@ -0,0 +1,262 @@
/**
@page eval-error Filter Error Evaluation Methods
@tableofcontents
@m_class{m-note m-warning}
@par Installation Warning
If you plan to use the included plotting from the cpp code, you will need to make sure that you have matplotlib and python 2.7 installed. We use the to [matplotlib-cpp](https://github.com/lava/matplotlib-cpp) to call this external library and generate the desired figures. Please see @ref gs-install-oveval for more details on the exact install.
@section eval-ov-collection Collection
The first step in any evaluation is to first collect the estimated trajectory of the proposed systems.
Since we are interested in robotic application of our estimators we want to record the estimate at the current timestep (as compared to a "smoothed" output or one that includes loop-closures from future timesteps).
Within the ROS framework, this means that we just need to publish the current estimate at the current timestep.
We recommend using the following @ref ov_eval::Recorder utility for recording the estimator output directly into a text file.
Works with topics of type `PoseWithCovarianceStamped`, `PoseStamped`, `TransformStamped`, and `Odometry`.
@code{.xml}
<node name="recorder_estimate" pkg="ov_eval" type="pose_to_file" output="screen">
<param name="topic" type="str" value="/ov_msckf/poseimu" />
<param name="topic_type" type="str" value="PoseWithCovarianceStamped" />
<param name="output" type="str" value="/home/user/data/traj_log.txt" />
</node>
@endcode
@section eval-ov-transform Transformation
We now need to ensure both our estimated trajectory and groundtruth are in the correct formats for us to read in.
We need things to be in the RPE text file format (i.e. time(s), px, py, pz, qx, qy, qz, qw).
We have a nice helper script that will transform ASL / EuRoC groundtruth files to the correct format.
By default the EuRoC groundtruth has the timestamp in nanoseconds and the quaternion is in an incorrect order (i.e. time(ns), px, py, pz, qw, qx, qy, qz).
A user can either process all CSV files in a given folder, or just a specific one.
@code{.shell-session}
rosrun ov_eval format_convert folder/path/
rosrun ov_eval format_convert file.csv
@endcode
In addition we have a specific folder structure that is assumed.
We store trajectories by first their algorithm name and then a folder for each dataset this algorithm was run on.
The folder names of the datasets need to match the groundtruth trajectory files which should be in their own separate folder.
Please see the example recorded datasets for how to structure your folders.
@code{.txt}
truth/
dateset_name_1.txt
dateset_name_2.txt
algorithms/
open_vins/
dataset_name_1/
run1.txt
run2.txt
run3.txt
dataset_name_2/
run1.txt
run2.txt
run3.txt
okvis_stereo/
dataset_name_1/
run1.txt
run2.txt
run3.txt
dataset_name_2/
run1.txt
run2.txt
run3.txt
vins_mono/
dataset_name_1/
run1.txt
run2.txt
run3.txt
dataset_name_2/
run1.txt
run2.txt
run3.txt
@endcode
@section eval-ov-plot Processing & Plotting
Now that we have our data recorded and in the correct format we can now work on processing and plotting it.
In the next few sections we detail how to do this for absolute trajectory error, relative pose error, normalized estimation error squared, and bounded root mean squared error plots.
We will first process the data into a set of output text files which a user can then use to plot the results in their program or language of choice.
The align mode of all the following commands can be of type `posyaw`, `posyawsingle`, `se3`, `se3single`, `sim3`, and `none`.
@subsection eval-ov-plot-plot Script "plot_trajectories"
To plot the data we can us the following command which will plot a 2d xy and z-time position plots.
It will use the filename as the name in the legend, so you can change that to change the legend or edit the code.
@code{.shell-session}
rosrun ov_eval plot_trajectories <align_mode> <file_gt.txt> ... <file_est9.txt>
rosrun ov_eval plot_trajectories posyaw 1565371553_estimate.txt truths/V1_01_easy.txt
@endcode
@image html eval/plot_xy.png width=70%
@image html eval/plot_z.png width=70%
@subsection eval-ov-plot-singlerun Script "error_singlerun"
The single run script will plot statistics and also 3\f$\sigma\f$ bounds if available.
One can use this to see consistency of the estimator or debug how the current run has gone.
It also reports to console the average RMSE and RPE values for this run along with the number of samples.
To change the RPE distances you will need to edit the code currently.
@code{.shell-session}
rosrun ov_eval error_singlerun <align_mode> <file_gt.txt> <file_est.txt>
rosrun ov_eval error_singlerun posyaw 1565371553_estimate.txt truths/V1_01_easy.txt
@endcode
Example output:
@code{.shell-session}
[ INFO] [1583422165.069376426]: [COMP]: 2813 poses in 1565371553_estimate => length of 57.36 meters
[ INFO] [1583422165.091423722]: ======================================
[ INFO] [1583422165.091438299]: Absolute Trajectory Error
[ INFO] [1583422165.091445338]: ======================================
[ INFO] [1583422165.091453099]: rmse_ori = 0.677 | rmse_pos = 0.055
[ INFO] [1583422165.091459679]: mean_ori = 0.606 | mean_pos = 0.051
[ INFO] [1583422165.091466321]: min_ori = 0.044 | min_pos = 0.001
[ INFO] [1583422165.091474211]: max_ori = 1.856 | max_pos = 0.121
[ INFO] [1583422165.091481730]: std_ori = 0.302 | std_pos = 0.021
[ INFO] [1583422165.127869924]: ======================================
[ INFO] [1583422165.127891080]: Relative Pose Error
[ INFO] [1583422165.127898322]: ======================================
[ INFO] [1583422165.127908551]: seg 8 - median_ori = 0.566 | median_pos = 0.068 (2484 samples)
[ INFO] [1583422165.127919603]: seg 16 - median_ori = 0.791 | median_pos = 0.060 (2280 samples)
[ INFO] [1583422165.127927646]: seg 24 - median_ori = 0.736 | median_pos = 0.070 (2108 samples)
[ INFO] [1583422165.127934904]: seg 32 - median_ori = 0.715 | median_pos = 0.071 (1943 samples)
[ INFO] [1583422165.127942178]: seg 40 - median_ori = 0.540 | median_pos = 0.063 (1792 samples)
@endcode
@image html eval/singlerun.png width=80%
@subsection eval-ov-plot-dataset Script "error_dataset"
This dataset script will evaluate how a series of algorithms compare on a single dataset.
Normally this is used if you just have single dataset you want to compare algorithms on, or compare a bunch variations of your algorithm to a simulated trajectory.
In the console it will output the ATE 3D and 2D, along with the 3D RPE and 3D NEES for each method after it performs alignment.
To change the RPE distances you will need to edit the code currently.
@code{.shell-session}
rosrun ov_eval error_dataset <align_mode> <file_gt.txt> <folder_algorithms>
rosrun ov_eval error_dataset posyaw truths/V1_01_easy.txt algorithms/
@endcode
Example output:
@code{.shell-session}
[ INFO] [1583422654.333642977]: ======================================
[ INFO] [1583422654.333915102]: [COMP]: processing mono_ov_slam algorithm
[ INFO] [1583422654.362655719]: [TRAJ]: q_ESTtoGT = 0.000, 0.000, -0.129, 0.992 | p_ESTinGT = 0.978, 2.185, 0.932 | s = 1.00
....
[ INFO] [1583422654.996859432]: [TRAJ]: q_ESTtoGT = 0.000, 0.000, -0.137, 0.991 | p_ESTinGT = 0.928, 2.166, 0.957 | s = 1.00
[ INFO] [1583422655.041009388]: ATE: mean_ori = 0.684 | mean_pos = 0.057
[ INFO] [1583422655.041031730]: ATE: std_ori = 0.14938 | std_pos = 0.01309
[ INFO] [1583422655.041039552]: ATE 2D: mean_ori = 0.552 | mean_pos = 0.053
[ INFO] [1583422655.041046362]: ATE 2D: std_ori = 0.17786 | std_pos = 0.01421
[ INFO] [1583422655.044187033]: RPE: seg 7 - mean_ori = 0.543 | mean_pos = 0.065 (25160 samples)
[ INFO] [1583422655.047047771]: RPE: seg 14 - mean_ori = 0.593 | mean_pos = 0.060 (23470 samples)
[ INFO] [1583422655.049672955]: RPE: seg 21 - mean_ori = 0.664 | mean_pos = 0.081 (22050 samples)
[ INFO] [1583422655.052090494]: RPE: seg 28 - mean_ori = 0.732 | mean_pos = 0.083 (20520 samples)
[ INFO] [1583422655.054294322]: RPE: seg 35 - mean_ori = 0.793 | mean_pos = 0.090 (18960 samples)
[ INFO] [1583422655.055676035]: RMSE: mean_ori = 0.644 | mean_pos = 0.056
[ INFO] [1583422655.056987984]: RMSE 2D: mean_ori = 0.516 | mean_pos = 0.052
[ INFO] [1583422655.058269163]: NEES: mean_ori = 793.646 | mean_pos = 13.095
[ INFO] [1583422656.182660653]: ======================================
[ INFO] [1583422656.183065588]: [COMP]: processing mono_ov_vio algorithm
[ INFO] [1583422656.209545279]: [TRAJ]: q_ESTtoGT = 0.000, 0.000, -0.148, 0.989 | p_ESTinGT = 0.931, 2.169, 0.971 | s = 1.00
....
[ INFO] [1583422656.791523636]: [TRAJ]: q_ESTtoGT = 0.000, 0.000, -0.149, 0.989 | p_ESTinGT = 0.941, 2.163, 0.974 | s = 1.00
[ INFO] [1583422656.835407991]: ATE: mean_ori = 0.639 | mean_pos = 0.076
[ INFO] [1583422656.835433475]: ATE: std_ori = 0.05800 | std_pos = 0.00430
[ INFO] [1583422656.835446222]: ATE 2D: mean_ori = 0.514 | mean_pos = 0.070
[ INFO] [1583422656.835457020]: ATE 2D: std_ori = 0.07102 | std_pos = 0.00492
[ INFO] [1583422656.838656567]: RPE: seg 7 - mean_ori = 0.614 | mean_pos = 0.092 (25160 samples)
[ INFO] [1583422656.841540191]: RPE: seg 14 - mean_ori = 0.634 | mean_pos = 0.092 (23470 samples)
[ INFO] [1583422656.844219466]: RPE: seg 21 - mean_ori = 0.632 | mean_pos = 0.115 (22050 samples)
[ INFO] [1583422656.846646272]: RPE: seg 28 - mean_ori = 0.696 | mean_pos = 0.119 (20520 samples)
[ INFO] [1583422656.848862913]: RPE: seg 35 - mean_ori = 0.663 | mean_pos = 0.154 (18960 samples)
[ INFO] [1583422656.850321777]: RMSE: mean_ori = 0.600 | mean_pos = 0.067
[ INFO] [1583422656.851673985]: RMSE 2D: mean_ori = 0.479 | mean_pos = 0.060
[ INFO] [1583422656.853037942]: NEES: mean_ori = 625.447 | mean_pos = 10.629
[ INFO] [1583422658.194763413]: ======================================
....
@endcode
@image html eval/dataset.png width=100%
@subsection eval-ov-plot-comparison Script "error_comparison"
This compares all methods to each other on a series of datasets.
For example, you run a bunch of methods on all the EurocMav datasets and then want to compare.
This will do the RPE over all trajectories, and an ATE for each dataset.
It will print the ATE and RPE for each method on each dataset in the console.
Then following the @ref eval-metrics, these are averaged over all the runs and datasets.
Finally at the end it outputs a nice latex table which can be directly used in a paper.
To change the RPE distances you will need to edit the code currently.
@code{.shell-session}
rosrun ov_eval error_comparison <align_mode> <folder_groundtruth> <folder_algorithms>
rosrun ov_eval error_comparison posyaw truths/ algorithms/
@endcode
Example output:
@code{.shell-session}
[ INFO] [1583425216.054023187]: [COMP]: 2895 poses in V1_01_easy.txt => length of 58.35 meters
[ INFO] [1583425216.092355692]: [COMP]: 16702 poses in V1_02_medium.txt => length of 75.89 meters
[ INFO] [1583425216.133532429]: [COMP]: 20932 poses in V1_03_difficult.txt => length of 78.98 meters
[ INFO] [1583425216.179616651]: [COMP]: 22401 poses in V2_01_easy.txt => length of 36.50 meters
[ INFO] [1583425216.225299463]: [COMP]: 23091 poses in V2_02_medium.txt => length of 83.23 meters
[ INFO] [1583425216.225660364]: ======================================
[ INFO] [1583425223.560550101]: [COMP]: processing mono_ov_vio algorithm
[ INFO] [1583425223.560632706]: [COMP]: processing mono_ov_vio algorithm => V1_01_easy dataset
[ INFO] [1583425224.236769465]: [COMP]: processing mono_ov_vio algorithm => V1_02_medium dataset
[ INFO] [1583425224.855489521]: [COMP]: processing mono_ov_vio algorithm => V1_03_difficult dataset
[ INFO] [1583425225.659183593]: [COMP]: processing mono_ov_vio algorithm => V2_01_easy dataset
[ INFO] [1583425226.442217424]: [COMP]: processing mono_ov_vio algorithm => V2_02_medium dataset
[ INFO] [1583425227.366004330]: ======================================
....
[ INFO] [1583425261.724448413]: ============================================
[ INFO] [1583425261.724469372]: ATE LATEX TABLE
[ INFO] [1583425261.724481841]: ============================================
& \textbf{V1\_01\_easy} & \textbf{V1\_02\_medium} & \textbf{V1\_03\_difficult}
& \textbf{V2\_01\_easy} & \textbf{V2\_02\_medium} & \textbf{Average} \\\hline
mono\_ov\_slam & 0.699 / 0.058 & 1.675 / 0.076 & 2.542 / 0.063 & 0.773 / 0.124 & 1.538 / 0.074 & 1.445 / 0.079 \\
mono\_ov\_vio & 0.642 / 0.076 & 1.766 / 0.096 & 2.391 / 0.344 & 1.164 / 0.121 & 1.248 / 0.106 & 1.442 / 0.148 \\
....
[ INFO] [1583425261.724647970]: ============================================
[ INFO] [1583425261.724655060]: ============================================
[ INFO] [1583425261.724661046]: RPE LATEX TABLE
[ INFO] [1583425261.724666910]: ============================================
& \textbf{8m} & \textbf{16m} & \textbf{24m} & \textbf{32m} & \textbf{40m} & \textbf{48m} \\\hline
mono\_ov\_slam & 0.661 / 0.074 & 0.802 / 0.086 & 0.979 / 0.097 & 1.061 / 0.105 & 1.145 / 0.120 & 1.289 / 0.122 \\
mono\_ov\_vio & 0.826 / 0.094 & 1.039 / 0.106 & 1.215 / 0.111 & 1.283 / 0.132 & 1.342 / 0.151 & 1.425 / 0.184 \\
....
[ INFO] [1583425262.514587296]: ============================================
@endcode
@image html eval/comparison.png width=100%
*/

34
docs/eval-home.dox Normal file
View File

@@ -0,0 +1,34 @@
/**
@page evaluation System Evaluation
The goal of our evaluation is to ensure fair comparison to other methods and our own.
The actual metrics we use can be found on the @ref eval-metrics page.
Using our metrics we wish to provide insight into *why* our method does better and in what ways (as no method will outperform in all aspects).
Since we are also interested in applying the systems to real robotic applications, the realtime performance is also a key metric we need to investigate.
Timing of different system components is also key to removing bottlenecks and seeing where performance improvements or estimator approximations might help reduce complexity.
The key metrics we are interested in evaluating are the following:
- Absolute Trajectory Error (ATE)
- Relative Pose Error (RPE)
- Root Mean Squared Error (RMSE)
- Normalized Estimation Error Squared (NEES)
- Estimator Component Timing
- System Hardware Usage (memory and computation)
@section evaluation-more System Evaluation Guides
- @subpage eval-metrics --- Definitions of different metrics for estimator accuracy.
- @subpage eval-error --- Error evaluation methods for evaluating system performance.
- @subpage eval-timing --- Timing of estimator components and complexity.
*/

83
docs/eval-metric.dox Normal file
View File

@@ -0,0 +1,83 @@
/**
@page eval-metrics Filter Evaluation Metrics
@tableofcontents
@section eval-ate Absolute Trajectory Error (ATE)
The Absolute Trajectory Error (ATE) is given by the simple difference between the estimated trajectory and groundtruth after it has been aligned so that it has minimal error.
First the "best" transform between the groundtruth and estimate is computed, afterwhich the error is computed at every timestep and then averaged.
We recommend reading Zhang and Scaramuzza @cite Zhang2018IROS paper for details.
For a given dataset with \f$N\f$ runs of the same algorithm with \f$K\f$ pose measurements, we can compute the following for an aligned estimated trajectory \f$\hat{\mathbf{x}}^+\f$:
\f{align*}{
e_{ATE} &= \frac{1}{N} \sum_{i=1}^{N} \sqrt{ \frac{1}{K} \sum_{k=1}^{K} ||\mathbf{x}_{k,i} \boxminus \hat{\mathbf{x}}^+_{k,i}||^2_{2} }
\f}
@section eval-rpe Relative Pose Error (RPE)
The Relative Pose Error (RPE) is calculated for segments of the dataset and allows for introspection of how localization solutions drift as the length of the trajectory increases.
The other key advantage over ATE error is that it is less sensitive to jumps in estimation error due to sampling the trajectory over many smaller segments.
This allows for a much fairer comparision of methods and is what we recommend all authors publish results for.
We recommend reading Zhang and Scaramuzza @cite Zhang2018IROS paper for details.
We first define a set of segment lengths \f$\mathcal{D} = [d_1,~d_2,\cdots,~d_V]\f$ which we compute the relative error for.
We can define the relative error for a trajectory split into \f$D_i\f$ segments of \f$d_i\f$ length as follows:
\f{align*}{
\tilde{\mathbf{x}}_{r} &= \mathbf{x}_{k} \boxminus \mathbf{x}_{k+d_i} \\
e_{rpe,d_i} &= \frac{1}{D_i} \sum_{k=1}^{D_i} ||\tilde{\mathbf{x}}_{r} \boxminus \hat{\tilde{\mathbf{x}}}_{r}||_{2}
\f}
@section eval-rmse Root Mean Squared Error (RMSE)
When evaluating a system on a *single* dataset is the Root Mean Squared Error (RMSE) plots.
This plots the RMSE at every timestep of the trajectory and thus can provide insight into timesteps where the estimation performance suffers.
For a given dataset with \f$N\f$ runs of the same algorithm we can compute the following at each timestep \f$k\f$:
\f{align*}{
e_{rmse,k} &= \sqrt{ \frac{1}{N} \sum_{i=1}^{N} ||\mathbf{x}_{k,i} \boxminus \hat{\mathbf{x}}_{k,i}||^2_{2} }
\f}
@section eval-nees Normalized Estimation Error Squared (NEES)
Normalized Estimation Error Squared (NEES) is a standard way to characterize if the estimator is being consistent or not.
In general NEES is just the normalized error which should be the degrees of freedoms of the state variables.
Thus in the case of position and orientation we should get a NEES of three at every timestep.
To compute the average NEES for a dataset with \f$N\f$ runs of the same algorithm we can compute the following at each timestep \f$k\f$:
\f{align*}{
e_{nees,k} &= \frac{1}{N} \sum_{i=1}^{N} (\mathbf{x}_{k,i} \boxminus \hat{\mathbf{x}}_{k,i})^\top \mathbf{P}^{-1}_{k,i} (\mathbf{x}_{k,i} \boxminus \hat{\mathbf{x}}_{k,i})
\f}
@section eval-singlerun Single Run Consistency
When looking at a *single run* and wish to see if the system is consistent it is interesting to look a its error in respect to its estimated uncertainty.
Specifically we plot the error and the estimator \f$3\sigma\f$ bound.
This provides insight into if the estimator is becoming over confident at certain timesteps.
Note this is for each component of the state, thus we need to plot x,y,z and orientation independently.
We can directly compute the error at timestep \f$k\f$:
\f{align*}{
\mathbf{e}_k &= \mathbf{x}_{k} \boxminus \hat{\mathbf{x}}_{k} \\
&\textrm{where} ~~\mathbf{e}_k\sim \mathcal N (0, \mathbf P)
\f}
*/

172
docs/eval-timing.dox Normal file
View File

@@ -0,0 +1,172 @@
/**
@page eval-timing Filter Timing Analysis
@tableofcontents
@m_class{m-note m-warning}
@par Installation Warning
If you plan to use the included plotting from the cpp code, you will need to make sure that you have matplotlib and python 2.7 installed. We use the to [matplotlib-cpp](https://github.com/lava/matplotlib-cpp) to call this external library and generate the desired figures. Please see @ref gs-install-oveval for more details on the exact install.
@section eval-ov-timing-collection Collection
To profile the different parts of the system we record the timing information from directly inside the @ref ov_msckf::VioManager.
The file should be comma separated format, with the first column being the timing, and the last column being the total time (units are all in seconds).
The middle columns should describe how much each component takes (whose names are extracted from the header of the csv file).
You can use the bellow tools as long as you follow this format, and add or remove components as you see fit to the middle columns.
To evaluate the computational load (*not computation time*), we have a python script that leverages the [psutil](https://github.com/giampaolo/psutil) python package to record percent CPU and memory consumption.
This can be included as an additional node in the launch file which only needs the node which you want the reported information of.
This will poll the node for its percent memory, percent cpu, and total number of threads that it uses.
This can be useful if you wish to compare different methods on the same platform, but doesn't make sense to use this to compare the running of the same algorithm or different algorithms *across* different hardware devices.
@code{.xml}
<node name="recorder_timing" pkg="ov_eval" type="pid_ros.py" output="screen">
<param name="nodes" type="str" value="/run_subscribe_msckf" />
<param name="output" type="str" value="/tmp/psutil_log.txt" />
</node>
@endcode
It is also important to note that if the estimator has multiple nodes, you can subscribe to them all by specifying their names as a comma separated string.
For example to evaluate the computation needed for [VINS-Mono](https://github.com/HKUST-Aerial-Robotics/VINS-Mono/blob/master/vins_estimator/launch/euroc.launch) multi-node system we can do:
@code{.xml}
<node name="recorder_timing" pkg="ov_eval" type="pid_ros.py" output="screen">
<param name="nodes" type="str" value="/feature_tracker,/vins_estimator,/pose_graph" />
<param name="output" type="str" value="/tmp/psutil_log.txt" />
</node>
@endcode
@section eval-ov-timing-plot Processing & Plotting
@subsection eval-ov-timing-flame Script "timing_flamegraph"
The flame graph script looks to recreate a [FlameGraph](https://github.com/brendangregg/FlameGraph) of the key components of the system.
While we do not trace all functions, the key "top level" function times are recorded to file to allow for insight into what is taking the majority of the computation.
The file should be comma separated format, with the first column being the timing, and the last column being the total time.
The middle columns should describe how much each component takes (whose names are extracted from the header of the csv file).
@code{.shell-session}
rosrun ov_eval timing_flamegraph <file_times.txt>
rosrun ov_eval timing_flamegraph timing_mono_ethV101.txt
@endcode
Example output:
@code{.shell-session}
[TIME]: loaded 2817 timestamps from file (7 categories)!!
mean_time = 0.0037 | std = 0.0011 | 99th = 0.0063 | max = 0.0254 (tracking)
mean_time = 0.0004 | std = 0.0001 | 99th = 0.0006 | max = 0.0014 ( propagation)
mean_time = 0.0032 | std = 0.0022 | 99th = 0.0083 | max = 0.0309 (msckf update)
mean_time = 0.0034 | std = 0.0013 | 99th = 0.0063 | max = 0.0099 (slam update)
mean_time = 0.0012 | std = 0.0017 | 99th = 0.0052 | max = 0.0141 (slam delayed)
mean_time = 0.0009 | std = 0.0003 | 99th = 0.0015 | max = 0.0027 (marginalization)
mean_time = 0.0128 | std = 0.0035 | 99th = 0.0208 | max = 0.0403 (total)
@endcode
@image html eval/timing_flame.png width=90%
@subsection eval-ov-timing-compare Script "timing_comparison"
This script is use to compare the run-time of different runs of the algorithm.
This take in the same file as the flame graph and is recorded in the @ref ov_msckf::VioManager.
The file should be comma separated format, with the first column being the timing, and the last column being the total time.
The middle columns should describe how much each component takes (whose names are extracted from the header of the csv file).
@code{.shell-session}
rosrun ov_eval timing_comparison <file_times1.txt> ... <file_timesN.txt>
rosrun ov_eval timing_comparison timing_mono_ethV101.txt timing_stereo_ethV101.txt
@endcode
Example output:
@code{.shell-session}
======================================
[TIME]: loading data for timing_mono
[TIME]: loaded 2817 timestamps from file (7 categories)!!
mean_time = 0.0037 | std = 0.0011 | 99th = 0.0063 | max = 0.0254 (tracking)
mean_time = 0.0004 | std = 0.0001 | 99th = 0.0006 | max = 0.0014 ( propagation)
mean_time = 0.0032 | std = 0.0022 | 99th = 0.0083 | max = 0.0309 (msckf update)
mean_time = 0.0034 | std = 0.0013 | 99th = 0.0063 | max = 0.0099 (slam update)
mean_time = 0.0012 | std = 0.0017 | 99th = 0.0052 | max = 0.0141 (slam delayed)
mean_time = 0.0009 | std = 0.0003 | 99th = 0.0015 | max = 0.0027 (marginalization)
mean_time = 0.0128 | std = 0.0035 | 99th = 0.0208 | max = 0.0403 (total)
======================================
======================================
[TIME]: loading data for timing_stereo
[TIME]: loaded 2817 timestamps from file (7 categories)!!
mean_time = 0.0077 | std = 0.0020 | 99th = 0.0124 | max = 0.0219 (tracking)
mean_time = 0.0004 | std = 0.0001 | 99th = 0.0007 | max = 0.0023 ( propagation)
mean_time = 0.0081 | std = 0.0047 | 99th = 0.0189 | max = 0.0900 (msckf update)
mean_time = 0.0063 | std = 0.0023 | 99th = 0.0117 | max = 0.0151 (slam update)
mean_time = 0.0020 | std = 0.0026 | 99th = 0.0079 | max = 0.0205 (slam delayed)
mean_time = 0.0019 | std = 0.0005 | 99th = 0.0031 | max = 0.0052 (marginalization)
mean_time = 0.0263 | std = 0.0063 | 99th = 0.0410 | max = 0.0979 (total)
======================================
@endcode
@image html eval/timing_compare.png width=90%
@subsection eval-ov-timing-percentages Script "timing_percentages"
This utility allows for comparing the resources used by the algorithm on a specific platform.
An example usage would be how the memory and cpu requirements increase as the state size grows or as more cameras are added.
You will need to record the format using the `pid_ros.py` node (see @ref eval-ov-timing-collection for details on how to use it).
Remember that 100\% cpu usage means that it takes one cpu thread to support the system, while 200\% means it takes two cpu threads to support the system (see [this link](https://superuser.com/a/457634/707974) for an explanation).
The folder structure needed is as follows:
@code{.txt}
psutil_logs/
ov_mono/
run1.txt
run2.txt
run3.txt
ov_stereo/
run1.txt
run2.txt
run3.txt
@endcode
@code{.shell-session}
rosrun ov_eval timing_percentages <timings_folder>
rosrun ov_eval timing_percentages psutil_logs/
@endcode
Example output:
@code{.shell-session}
======================================
[COMP]: processing ov_mono algorithm
loaded 149 timestamps from file!!
PREC: mean_cpu = 83.858 +- 17.130
PREC: mean_mem = 0.266 +- 0.019
THR: mean_threads = 12.893 +- 0.924
======================================
======================================
[COMP]: processing ov_stereo algorithm
loaded 148 timestamps from file!!
PREC: mean_cpu = 111.007 +- 16.519
PREC: mean_mem = 5.139 +- 2.889
THR: mean_threads = 12.905 +- 0.943
======================================
@endcode
@image html eval/timing_percent.png width=90%
*/

206
docs/fej.dox Normal file
View File

@@ -0,0 +1,206 @@
/**
@page fej First Estimate Jacobian (FEJ) Estimation
@tableofcontents
@section fej-sys-evolution EKF Linearized Error-State System
When developing an extended Kalman filter (EKF), one needs to linearize the nonlinear motion and measurement models about some linearization point.
This linearization is one of the sources of error causing inaccuracies in the estimates (in addition to, for exmaple, model errors and measurement noise).
Let us consider the following linearized error-state visual-inertial system:
\f{align*}{
\tilde{\mathbf{x}}_{k|k-1} &= \mathbf{\Phi}_{(k,k-1)}~\tilde{\mathbf{x}}_{k-1|k-1} + \mathbf{G}_{k}\mathbf{w}_{k} \\
\tilde{\mathbf{z}}_{k} &= \mathbf{H}_{k}~\tilde{\mathbf{x}}_{k|k-1}+\mathbf{n}_{k}
\f}
where the state contains the inertial navigation state and a single environmental feature
(noting that we do not include biases to simplify the derivations):
\f{align*}{
\mathbf{x}_k &=
\begin{bmatrix}
{}_G^{I_{k}} \bar{q}{}^{\top}
& {}^G\mathbf{p}_{{I}_{k}}^{\top}
& {}^G\mathbf{v}_{{I}_{k}}^{\top}
& {}^G\mathbf{p}_{f}^{\top}
\end{bmatrix}^{\top}
\f}
Note that we use the left quaternion error state
(see [[Indirect Kalman Filter for 3D Attitude Estimation](http://mars.cs.umn.edu/tr/reports/Trawny05b.pdf)] @cite Trawny2005TR for details).
For simplicity we assume that the camera and IMU frame have an identity transform.
We can compute the measurement Jacobian of a given feature based on the perspective projection camera model
at the *k*-th timestep as follows:
\f{align*}{
\mathbf{H}_{k} &=
\mathbf H_{proj,k}~\mathbf H_{state,k} \\
&=
\begin{bmatrix}
\frac{1}{{}^Iz} & 0 & \frac{-{}^Ix}{({}^Iz)^2} \\
0 & \frac{1}{{}^Iz} & \frac{-{}^Iy}{({}^Iz)^2} \\
\end{bmatrix}
\begin{bmatrix}
\lfloor {}^{I_k}_{G}\mathbf{R}({}^{G}\mathbf{p}_f-{}^{G}\mathbf{p}_{I_k}) \times\rfloor &
-{}^{I_k}_{G}\mathbf{R} &
\mathbf 0_{3\times3} &
{}^{I_k}_{G}\mathbf{R}
\end{bmatrix} \\
&=
\mathbf H_{proj,k}~
{}^{I_k}_{G}\mathbf{R}
\begin{bmatrix}
\lfloor ({}^{G}\mathbf{p}_f-{}^{G}\mathbf{p}_{I_k}) \times\rfloor {}^{I_k}_{G}\mathbf{R}^\top &
-\mathbf I_{3\times3} &
\mathbf 0_{3\times3} &
\mathbf I_{3\times3}
\end{bmatrix}
\f}
The state-transition (or system Jacobian) matrix from timestep *k-1* to *k* as (see [@ref propagation] for more details):
\f{align*}{
\mathbf{\Phi}_{(k,k-1)}&=
\begin{bmatrix}
{}^{I_{k}}_{I_{k-1}}\mathbf R
& \mathbf 0_{3\times3}
& \mathbf 0_{3\times3}
& \mathbf 0_{3\times3} \\[1em]
\empty
-{}^{I_{k-1}}_{G}\mathbf{R}^\top \lfloor \boldsymbol\alpha(k,k-1) \times\rfloor
& \mathbf I_{3\times3}
& (t_{k}-t_{k-1})\mathbf I_{3\times3}
& \mathbf 0_{3\times3} \\[1em]
\empty
-{}^{I_{k-1}}_{G}\mathbf{R}^\top \lfloor \boldsymbol\beta(k,k-1) \times\rfloor
& \mathbf 0_{3\times3}
& \mathbf I_{3\times3}
& \mathbf 0_{3\times3} \\[1em]
\empty
\mathbf 0_{3\times3}
& \mathbf 0_{3\times3}
& \mathbf 0_{3\times3}
& \mathbf I_{3\times3}
\end{bmatrix} \\[2em]
\boldsymbol\alpha(k,k-1) &= \int_{t_{k-1}}^{k} \int_{t_{k-1}}^{s} {}^{I_{k-1}}_{\tau}\mathbf R (\mathbf a(\tau)-\mathbf b_a - \mathbf w_a) d\tau ds \\
\boldsymbol\beta(k,k-1) &= \int_{t_{k-1}}^{t_k} {}^{I_{k-1}}_{\tau}\mathbf R (\mathbf a(\tau)-\mathbf b_a - \mathbf w_a) d\tau
\f}
where \f$\mathbf a(\tau)\f$ is the true acceleration at time \f$\tau \f$, \f${}^{I_{k}}_{I_{k-1}}\mathbf R\f$ is computed using the gyroscope angular velocity measurements, and \f${}^{G}\mathbf g = [0 ~ 0 ~ 9.81]^\top\f$ is gravity in the global frame of reference.
During propagation one would need to solve these integrals using either analytical or numerical integration,
while we here are interested in how the state evolves in order to examine its observability.
@section fej-sys-observability Linearized System Observability
The observability matrix of this linearized system is defined by:
\f{align*}{
\mathcal{O}=
\begin{bmatrix}
\mathbf{H}_{0}\mathbf{\Phi}_{(0,0)} \\
\mathbf{H}_{1}\mathbf{\Phi}_{(1,0)} \\
\mathbf{H}_{2}\mathbf{\Phi}_{(2,0)} \\
\vdots \\
\mathbf{H}_{k}\mathbf{\Phi}_{(k,0)} \\
\end{bmatrix}
\f}
where \f$\mathbf{H}_{k}\f$ is the measurement Jacobian at timestep *k* and \f$\mathbf{\Phi}_{(k,0)}\f$ is the compounded state transition (system Jacobian) matrix from timestep 0 to k.
For a given block row of this matrix, we have:
\f{align*}{
\mathbf{H}_{k}\mathbf{\Phi}_{(k,0)} &=
\empty
\mathbf H_{proj,k}~
{}^{I_k}_{G}\mathbf{R}
\begin{bmatrix}
\boldsymbol\Gamma_1 &
\boldsymbol\Gamma_2 &
\boldsymbol\Gamma_3 &
\boldsymbol\Gamma_4
\end{bmatrix} \\[2em]
\empty
\empty
\boldsymbol\Gamma_1 &=
\Big\lfloor \Big({}^{G}\mathbf{p}_f-{}^{G}\mathbf{p}_{I_k} + {}^{I_{0}}_{G}\mathbf{R}^\top \boldsymbol\alpha(k,0) \Big) \times\Big\rfloor {}^{I_0}_{G}\mathbf{R}^\top \\
&=
\Big\lfloor \Big({}^{G}\mathbf{p}_f- {}^{G}\mathbf{p}_{I_0}-{}^{G}\mathbf{v}_{I_0}(t_k-t_0)-\frac{1}{2}{}^G\mathbf{g}(t_k-t_0)^2 \Big) \times\Big\rfloor {}^{I_0}_{G}\mathbf{R}^\top
\\
\boldsymbol\Gamma_2 &= -\mathbf I_{3\times3} \\
\boldsymbol\Gamma_3 &= -(t_{k}-t_0) \mathbf I_{3\times3} \\
\boldsymbol\Gamma_4 &= \mathbf I_{3\times3}
\f}
We now verify the following nullspace which corresponds to the global yaw about gravity and global IMU and feature positions:
\f{align*}{
\mathcal{N}_{vins} &=
\begin{bmatrix}
{}^{I_{0}}_{G}\mathbf{R}{}^G\mathbf{g} & \mathbf 0_{3\times3} \\
-\lfloor {}^{G}\mathbf{p}_{I_0} \times\rfloor{}^G\mathbf{g} & \mathbf{I}_{3\times3} \\
-\lfloor {}^{G}\mathbf{v}_{I_0} \times\rfloor{}^G\mathbf{g} & \mathbf{0}_{3\times3} \\
-\lfloor {}^{G}\mathbf{p}_{f} \times\rfloor{}^G\mathbf{g} & \mathbf{I}_{3\times3} \\
\end{bmatrix}
\f}
It is not difficult to verify that \f$ \mathbf{H}_{k}\mathbf{\Phi}_{(k,0)}\mathcal{N}_{vio} = \mathbf 0 \f$.
Thus this is a nullspace of the system,
which clearly shows that there are the four unobserable directions (global yaw and position) of visual-inertial systems.
@section fej-fej First Estimate Jacobians
The main idea of First-Estimate Jacobains (FEJ) approaches is to ensure that the state transition and Jacobian matrices are evaluated at correct linearization points such that the above observability analysis will hold true.
For those interested in the technical details please take a look at: @cite Huang2010IJRR and @cite Li2013IJRR.
Let us first consider a small thought experiment of how the standard Kalman filter computes its state transition matrix.
From a timestep zero to one it will use the current estimates from state zero forward in time.
At the next timestep after it updates the state with measurements from other sensors, it will compute the state transition with the updated values to evolve the state to timestep two.
This causes a miss-match in the "continuity" of the state transition matrix which when multiply sequentially should represent the evolution from time zero to time two.
\f{align*}{
\mathbf{\Phi}_{(k+1,k-1)}(\mathbf{x}_{k+1|k},\mathbf{x}_{k-1|k-1}) \neq
\mathbf{\Phi}_{(k+1,k)}(\mathbf{x}_{k+1|k},\mathbf{x}_{k|k}) ~
\mathbf{\Phi}_{(k,k-1)}(\mathbf{x}_{k|k-1},\mathbf{x}_{k-1|k-1})
\f}
As shown above, we wish to compute the state transition matrix from the *k-1* timestep given all *k-1* measurements up until the current propagated timestep *k+1* given all *k* measurements.
The right side of the above equation is how one would normally perform this in a Kalman filter framework.
\f$\mathbf{\Phi}_{(k,k-1)}(\mathbf{x}_{k|k-1},\mathbf{x}_{k-1|k-1})\f$ corresponds to propagating from the *k-1* update time to the *k* timestep.
One would then normally perform the *k*'th update to the state and then propagate from this **updated** state to the newest timestep (i.e. the \f$ \mathbf{\Phi}_{(k+1,k)}(\mathbf{x}_{k+1|k},\mathbf{x}_{k|k}) \f$ state transition matrix).
This clearly is different then if one was to compute the state transition from time *k-1* to the *k+1* timestep as the second state transition is evaluated at the different \f$\mathbf{x}_{k|k}\f$ linearization point!
To fix this, we can change the linearization point we evaluate these at:
\f{align*}{
\mathbf{\Phi}_{(k+1,k-1)}(\mathbf{x}_{k+1|k},\mathbf{x}_{k-1|k-1}) =
\mathbf{\Phi}_{(k+1,k)}(\mathbf{x}_{k+1|k},\mathbf{x}_{k|k-1}) ~
\mathbf{\Phi}_{(k,k-1)}(\mathbf{x}_{k|k-1},\mathbf{x}_{k-1|k-1})
\f}
We also need to ensure that our measurement Jacobians match the linearization point of the state transition matrix.
Thus they also need to be evaluated at the \f$\mathbf{x}_{k|k-1}\f$ linearization point instead of the \f$\mathbf{x}_{k|k}\f$ that one would normally use.
This gives way to the name FEJ since we will evaluate the Jacobians at the same linearization point to ensure that the nullspace remains valid.
For example if we evaluated the \f$\mathbf H_k \f$ Jacobian with a different \f$ {}^G\mathbf{p}_f \f$ at each timestep then the nullspace would not hold past the first time instance.
*/

45
docs/getting-started.dox Normal file
View File

@@ -0,0 +1,45 @@
/**
@page getting-started Getting started
Welcome to the OpenVINS project!
The following guides will help new users through the downloading of the software and running on datasets that we support.
Additionally, we provide information on how to get your own sensors running on our system and have a guide on how we perform calibration.
Please feel free to open an issue if you find any missing or areas that could be clarified.
@section highlevel High-level overview
From a high level the system is build on a few key algorithms.
At the center we have the ov_core which contains a lot of standard computer vision algorithms and utilities that anybody can use.
Specifically it stores the following large components:
- Sparse feature visual tracking (KLT and descriptor-based)
- Fundamental math types used to represent states
- Initialization procedures
- Multi-sensor simulator that generates synthetic measurements
This ov_core library is used by the ov_msckf system which contains our filter-based estimator.
Within this we have the state, its manager, type system, prediction, and update algorithms.
We encourage users to look at the specific documentation for a detailed view of what we support.
The ov_eval library has a bunch of evaluation methods and scripts that one can use to generate research results for publication.
@section getting-started-more Getting Started Guides
- @subpage gs-installing --- Installation guide for OpenVINS and dependencies
- @subpage dev-docker --- Installing with Docker instead of from source
- @subpage gs-tutorial --- Simple tutorial on getting OpenVINS running out of the box.
- @subpage gs-datasets --- Links to supported datasets and configuration files
- @subpage gs-calibration --- Guide to how to calibration your own visual-inertial sensors.
*/

115
docs/gs-calibration.dox Normal file
View File

@@ -0,0 +1,115 @@
/**
@page gs-calibration Sensor Calibration
@section gs-visual-inertial-sensor Visual-Inertial Sensors
One may ask why use a visual-inertial sensor?
The main reason is because of the complimentary nature of the two different sensing modalities.
The camera provides high density external measurements of the environment,
while the IMU measures internal ego-motion of the sensor platform.
The IMU is crucial in providing robustness to the estimator while also providing system scale in the case of a monocular camera.
However, there are some challenges when leveraging the IMU in estimation.
An IMU requires estimating of additional bias terms and requires highly accurate calibration between the camera and IMU.
Additionally small errors in the relative timestamps between the sensors can also degrade performance very quickly in dynamic trajectories.
Within this *OpenVINS* project we address these by advocating the *online* estimation of these extrinsic and time offset parameters between the cameras and IMU.
@image html rig_hydra_old.jpg width=60%
@section gs-calib-cam-static Camera Intrinsic Calibration (Offline)
The first task is to calibrate the camera intrinsic values such as the focal length, camera center, and distortion coefficients.
Our group often uses the [Kalibr](https://github.com/ethz-asl/kalibr/) @cite Furgale2013IROS calibration toolbox to perform both intrinsic and extrinsic offline calibrations, by proceeding the following steps:
1. Clone and build the [Kalibr](https://github.com/ethz-asl/kalibr/) toolbox
2. Print out a calibration board to use (we normally use the [Aprilgrid 6x6 0.8x0.8 m (A0 page)](https://drive.google.com/file/d/0B0T1sizOvRsUdjFJem9mQXdiMTQ/edit?usp=sharing))
3. Ensure that your sensor driver is publishing onto ROS with correct timestamps.
4. Sensor preparations
- Limit motion blur by decreasing exposure time
- Publish at low framerate to allow for larger variance in dataset (2-5hz)
- Ensure that your calibration board can be viewed in all areas of the image
- Ensure that your sensor is in focus (can use their *kalibr\_camera\_focus* or just manually)
5. Record a ROS bag and ensure that the calibration board can be seen from different orientations, distances, and in each part of the image plane. You can either move the calibration board and keep the camera still or move the camera and keep the calibration board stationary.
6. Finally run the calibration
- Use the kalibr\_calibrate\_cameras with your specified topic
- Depending on amount of distortion, use the *pinhole-equi* for fisheye, or if a low distortion then use the *pinhole-radtan*
- Depending on how many frames are in your dataset this can take on the upwards of a few hours.
7. Inspect the final result, pay close attention to the final reprojection error graphs, with a good calibration having less than < 0.2-0.5 pixel reprojection errors.
@section gs-calib-imu-static IMU Intrinsic Calibration (Offline)
The other imporatnt intrinsic calibration is to compute the inertial sensor intrinsic noise characteristics,
which are needed for the batch optimization to calibrate the camera to IMU transform and in any VINS estimator so that we can properly probabilistically fuse the images and inertial readings.
Unfortunately, there is no mature open sourced toolbox to find these values, while one can try our [kalibr_allan](https://github.com/rpng/kalibr_allan) project, which is not optimized though.
Specifically we are estimating the following noise parameters:
Parameter | YAML element | Symbol | Units
--- | --- | --- | ---
Gyroscope "white noise" | `gyroscope_noise_density` | \f$\sigma_g\f$ | \f$\frac{rad}{s}\frac{1}{\sqrt{Hz}}\f$
Accelerometer "white noise" | `accelerometer_noise_density` | \f$\sigma_a\f$ | \f$\frac{m}{s^2}\frac{1}{\sqrt{Hz}}\f$
Gyroscope "random walk" | `gyroscope_random_walk` | \f$\sigma_{b_g}\f$ | \f$\frac{rad}{s^2}\frac{1}{\sqrt{Hz}}\f$
Accelerometer "random walk" | `accelerometer_random_walk` | \f$\sigma_{b_a}\f$ | \f$\frac{m}{s^3}\frac{1}{\sqrt{Hz}}\f$
The standard way as explained in
[[IEEE Standard Specification Format Guide and Test Procedure for Single-Axis Interferometric Fiber Optic Gyros](https://ieeexplore.ieee.org/document/660628/) (page 71, section C)]
is that we can compute an [Allan variance](https://en.wikipedia.org/wiki/Allan_variance) plot of the sensor readings over different observation times (see below).
@image html allanvar_gyro.png width=75%
As shown in the above figure, if we compute the Allan variance we we can look at the value of a line at \f$\tau=1\f$ with a -1/2 slope fitted to the left side of the plot to get the white noise of the sensor.
Similarly, a line with 1/2 fitted to the right side can be evaluated at \f$\tau=3\f$ to get the random walk noise.
We have a package that can do this in matlab, but actual verification and conversion into a C++ codebase has yet to be done.
Please refer to our [[kalibr_allan](https://github.com/rpng/kalibr_allan)] github project for details on how to generate this plot for your sensor and calculate the values.
Note that one may need to inflate the calculated values by 10-20 times to get usable sensor values.
@section gs-calib-cam-dynamic Dynamic IMU-Camera Calibration (Offline)
After obtaining the intrinsic calibration of both the camera and IMU, we can now perform dynamic calibration of the transform between the two sensors.
For this we again leverage the [[Kalibr](https://github.com/ethz-asl/kalibr/) calibration toolbox].
For these collected datasets, it is important to minimize the motion blur in the camera while also ensuring that you excite all axes of the IMU.
One needs to have at least one translational motion along with two degrees of orientation change for these calibration parameters to be observable (please see our recent paper on why: [[Degenerate Motion Analysis for Aided INS With Online Spatial and Temporal Sensor Calibration](https://ieeexplore.ieee.org/abstract/document/8616792)]).
We recommend having as much change in orientation as possible in order to ensure convergence.
1. Clone and build the [Kalibr](https://github.com/ethz-asl/kalibr/) toolbox
2. Print out a calibration board to use (we normally use the [Aprilgrid 6x6 0.8x0.8 m (A0 page)](https://drive.google.com/file/d/0B0T1sizOvRsUdjFJem9mQXdiMTQ/edit?usp=sharing))
3. Ensure that your sensor driver is publishing onto ROS with correct timestamps.
4. Sensor preparations
- Limit motion blur by decreasing exposure time
- Publish at high-ish framerate (20-30hz)
- Publish your inertial reading at a reasonable rate (200-500hz)
5. Record a ROS bag and ensure that the calibration board can be seen from different orientations, distances, and mostly in the center of the image. You should move in *smooth* non-jerky motions with a trajectory that excites as many orientation and translational directions as possible at the same time. A 30-60 second dataset is normally enough to allow for calibration.
6. Finally run the calibration
- Use the *kalibr\_calibrate\_imu\_camera*
- Input your static calibration file which will have the camera topics in it
- You will need to make an [imu.yaml](https://drive.google.com/file/d/0B0T1sizOvRsUSk9ReDlid0VSY3M/edit?usp=sharing) file with your noise parameters.
- Depending on how many frames are in your dataset this can take on the upwards of half a day.
7. Inspect the final result. You will want to make sure that the spline fitted to the inertial reading was properly fitted. Ensure that your estimated biases do not leave your 3-sigma bounds. If they do your trajectory was too dynamic, or your noise values are not good. Sanity check your final rotation and translation with hand-measured values.
*/

243
docs/gs-datasets.dox Normal file
View File

@@ -0,0 +1,243 @@
/**
@page gs-datasets Supported Datasets
@tableofcontents
@section gs-data-euroc The EuRoC MAV Dataset
The ETH ASL [EuRoC MAV dataset](https://projects.asl.ethz.ch/datasets/doku.php?id=kmavvisualinertialdatasets) @cite Burri2016IJRR is one of the most used datasets in the visual-inertial / simultaneous localization and mapping (SLAM) research literature.
The reason for this is the synchronised inertial+camera sensor data and the high quality groundtruth.
The dataset contains different sequences of varying difficulty of a Micro Aerial Vehicle (MAV) flying in an indoor room.
Monochrome stereo images are collected by a two Aptina MT9V034 global shutter cameras at 20 frames per seconds, while a ADIS16448 MEMS inertial unit provides linear accelerations and angular velocities at a rate of 200 samples per second.
We recommend that most users start testing on this dataset before moving on to the other datasets that our system support or before trying with your own collected data.
The machine hall datasets have the MAV being picked up in the beginning and then set down, we normally skip this part, but it should be able to be handled by the filter if SLAM features are enabled.
Please take a look at the [run_ros_eth.sh](https://github.com/rpng/open_vins/blob/master/ov_msckf/scripts/run_ros_eth.sh) script for some reasonable default values (they might still need to be tuned).
@m_class{m-block m-warning}
@par Groundtruth on V1_01_easy
We have found that the groundtruth on the V1_01_easy dataset is not accurate in its orientation estimate.
We have recomputed this by optimizing the inertial and vicon readings in a graph to get the trajectory of the imu.
You can find the output at this [link](https://drive.google.com/drive/folders/1d62Q_RQwHzKLcIdUlTeBmojr7j0UL4sM?usp=sharing) and is what we normally use to evaluate the error on this dataset.
@m_div{m-text-center}
| Dataset Name | Length (m) | Dataset Link | Groundtruth Traj. | Config |
|-------------:|--------|--------------|------------------|------------------|
| Vicon Room 1 01 | 58 | [rosbag](http://robotics.ethz.ch/~asl-datasets/ijrr_euroc_mav_dataset/vicon_room1/V1_01_easy/V1_01_easy.bag), [rosbag2](https://drive.google.com/drive/folders/1xQ1KcZhZ5pioPXTyrZBN6Mjxkfpcd_B3?usp=sharing) | [link](https://github.com/rpng/open_vins/tree/master/ov_data/euroc_mav) | [config](https://github.com/rpng/open_vins/blob/master/config/euroc_mav) |
| Vicon Room 1 02 | 76 | [rosbag](http://robotics.ethz.ch/~asl-datasets/ijrr_euroc_mav_dataset/vicon_room1/V1_02_medium/V1_02_medium.bag) , [rosbag2](https://drive.google.com/drive/folders/1xQ1KcZhZ5pioPXTyrZBN6Mjxkfpcd_B3?usp=sharing)| [link](https://github.com/rpng/open_vins/tree/master/ov_data/euroc_mav) | [config](https://github.com/rpng/open_vins/blob/master/config/euroc_mav) |
| Vicon Room 1 03 | 79 | [rosbag](http://robotics.ethz.ch/~asl-datasets/ijrr_euroc_mav_dataset/vicon_room1/V1_03_difficult/V1_03_difficult.bag), [rosbag2](https://drive.google.com/drive/folders/1xQ1KcZhZ5pioPXTyrZBN6Mjxkfpcd_B3?usp=sharing) | [link](https://github.com/rpng/open_vins/tree/master/ov_data/euroc_mav) | [config](https://github.com/rpng/open_vins/blob/master/config/euroc_mav) |
| Vicon Room 2 01 | 37 | [rosbag](http://robotics.ethz.ch/~asl-datasets/ijrr_euroc_mav_dataset/vicon_room2/V2_01_easy/V2_01_easy.bag), [rosbag2](https://drive.google.com/drive/folders/1xQ1KcZhZ5pioPXTyrZBN6Mjxkfpcd_B3?usp=sharing) | [link](https://github.com/rpng/open_vins/tree/master/ov_data/euroc_mav) | [config](https://github.com/rpng/open_vins/blob/master/config/euroc_mav) |
| Vicon Room 2 02 | 83 | [rosbag](http://robotics.ethz.ch/~asl-datasets/ijrr_euroc_mav_dataset/vicon_room2/V2_02_medium/V2_02_medium.bag), [rosbag2](https://drive.google.com/drive/folders/1xQ1KcZhZ5pioPXTyrZBN6Mjxkfpcd_B3?usp=sharing) | [link](https://github.com/rpng/open_vins/tree/master/ov_data/euroc_mav) | [config](https://github.com/rpng/open_vins/blob/master/config/euroc_mav) |
| Vicon Room 2 03 | 86 | [rosbag](http://robotics.ethz.ch/~asl-datasets/ijrr_euroc_mav_dataset/vicon_room2/V2_03_difficult/V2_03_difficult.bag), [rosbag2](https://drive.google.com/drive/folders/1xQ1KcZhZ5pioPXTyrZBN6Mjxkfpcd_B3?usp=sharing) | [link](https://github.com/rpng/open_vins/tree/master/ov_data/euroc_mav) | [config](https://github.com/rpng/open_vins/blob/master/config/euroc_mav) |
| Machine Hall 01 | 80 | [rosbag](http://robotics.ethz.ch/~asl-datasets/ijrr_euroc_mav_dataset/machine_hall/MH_01_easy/MH_01_easy.bag), [rosbag2](https://drive.google.com/drive/folders/1xQ1KcZhZ5pioPXTyrZBN6Mjxkfpcd_B3?usp=sharing) | [link](https://github.com/rpng/open_vins/tree/master/ov_data/euroc_mav) | [config](https://github.com/rpng/open_vins/blob/master/config/euroc_mav) |
| Machine Hall 02 | 73 | [rosbag](http://robotics.ethz.ch/~asl-datasets/ijrr_euroc_mav_dataset/machine_hall/MH_02_easy/MH_02_easy.bag), [rosbag2](https://drive.google.com/drive/folders/1xQ1KcZhZ5pioPXTyrZBN6Mjxkfpcd_B3?usp=sharing) | [link](https://github.com/rpng/open_vins/tree/master/ov_data/euroc_mav) | [config](https://github.com/rpng/open_vins/blob/master/config/euroc_mav) |
| Machine Hall 03 | 131 | [rosbag](http://robotics.ethz.ch/~asl-datasets/ijrr_euroc_mav_dataset/machine_hall/MH_03_medium/MH_03_medium.bag), [rosbag2](https://drive.google.com/drive/folders/1xQ1KcZhZ5pioPXTyrZBN6Mjxkfpcd_B3?usp=sharing) | [link](https://github.com/rpng/open_vins/tree/master/ov_data/euroc_mav) | [config](https://github.com/rpng/open_vins/blob/master/config/euroc_mav) |
| Machine Hall 04 | 92 | [rosbag](http://robotics.ethz.ch/~asl-datasets/ijrr_euroc_mav_dataset/machine_hall/MH_04_difficult/MH_04_difficult.bag), [rosbag2](https://drive.google.com/drive/folders/1xQ1KcZhZ5pioPXTyrZBN6Mjxkfpcd_B3?usp=sharing) | [link](https://github.com/rpng/open_vins/tree/master/ov_data/euroc_mav) | [config](https://github.com/rpng/open_vins/blob/master/config/euroc_mav) |
| Machine Hall 05 | 98 | [rosbag](http://robotics.ethz.ch/~asl-datasets/ijrr_euroc_mav_dataset/machine_hall/MH_05_difficult/MH_05_difficult.bag), [rosbag2](https://drive.google.com/drive/folders/1xQ1KcZhZ5pioPXTyrZBN6Mjxkfpcd_B3?usp=sharing) | [link](https://github.com/rpng/open_vins/tree/master/ov_data/euroc_mav) | [config](https://github.com/rpng/open_vins/blob/master/config/euroc_mav) |
@m_enddiv
@section gs-data-tumvi TUM Visual-Inertial Dataset
The TUM [Visual-Inertial Dataset](https://vision.in.tum.de/data/datasets/visual-inertial-dataset) @cite Schubert2018IROS is a more recent dataset that was presented to provide a way to evaluate state-of-the-art visual inertial odometry approaches.
As compared to the EuRoC MAV datasets, this dataset provides photometric calibration of the cameras which has not been available in any other visual-inertal dataset for researchers.
Monochrome stereo images are collected by two IDS uEye UI-3241LE-M-GL global shutter cameras at 20 frames per second, while a Bosch BMI160 inertial unit provides linear accelerations and angular velocities at a rate of 200 samples per second.
Not all datasets have groundtruth available throughout the entire trajectory as the motion capture system is limited to the starting and ending room.
There are quite a few very challenging outdoor handheld datasets which are a challenging direction for research.
Note that we focus on the room datasets as full 6 dof pose collection is available over the total trajectory.
@m_class{m-block m-warning}
@par Filter Initialization from Standstill
These datasets have very non-static starts, as they are handheld, and the standstill initialization has issues handling this.
Thus careful tuning of the imu initialization threshold is typically needed to ensure that the initialized orientation and the zero velocity assumption are valid.
Please take a look at the [run_ros_tumvi.sh](https://github.com/rpng/open_vins/blob/master/ov_msckf/scripts/run_ros_tumvi.sh) script for some reasonable default values (they might still need to be tuned).
@m_div{m-text-center}
| Dataset Name | Length (m) | Dataset Link | Groundtruth Traj. | Config |
|-------------:|--------|--------------|------------------|------------------|
| room1 | 147 | [rosbag](http://vision.in.tum.de/tumvi/calibrated/512_16/dataset-room1_512_16.bag) | [link](https://github.com/rpng/open_vins/tree/master/ov_data/tum_vi) | [config](https://github.com/rpng/open_vins/blob/master/config/tum_vi) |
| room2 | 142 | [rosbag](http://vision.in.tum.de/tumvi/calibrated/512_16/dataset-room2_512_16.bag) | [link](https://github.com/rpng/open_vins/tree/master/ov_data/tum_vi) | [config](https://github.com/rpng/open_vins/blob/master/config/tum_vi) |
| room3 | 136 | [rosbag](http://vision.in.tum.de/tumvi/calibrated/512_16/dataset-room3_512_16.bag) | [link](https://github.com/rpng/open_vins/tree/master/ov_data/tum_vi) | [config](https://github.com/rpng/open_vins/blob/master/config/tum_vi) |
| room4 | 69 | [rosbag](http://vision.in.tum.de/tumvi/calibrated/512_16/dataset-room4_512_16.bag) | [link](https://github.com/rpng/open_vins/tree/master/ov_data/tum_vi) | [config](https://github.com/rpng/open_vins/blob/master/config/tum_vi) |
| room5 | 132 | [rosbag](http://vision.in.tum.de/tumvi/calibrated/512_16/dataset-room5_512_16.bag) | [link](https://github.com/rpng/open_vins/tree/master/ov_data/tum_vi) | [config](https://github.com/rpng/open_vins/blob/master/config/tum_vi) |
| room6 | 67 | [rosbag](http://vision.in.tum.de/tumvi/calibrated/512_16/dataset-room6_512_16.bag) | [link](https://github.com/rpng/open_vins/tree/master/ov_data/tum_vi) | [config](https://github.com/rpng/open_vins/blob/master/config/tum_vi) |
@m_enddiv
@section gs-data-rpng RPNG OpenVINS Dataset
In additional the community maintained datasets, we have also released a few datasets.
Please cite the OpenVINS paper if you use any of these datasets in your works.
Here are the specifics of the sensors that each dataset uses:
- ArUco Datasets:
- Core visual-inertial sensor is the [VI-Sensor](https://furgalep.github.io/bib/nikolic_icra14.pdf)
- Stereo global shutter images at 20 Hz
- ADIS16448 IMU at 200 Hz
- Kalibr calibration file can be found [here](https://drive.google.com/file/d/1I0C-z3ZrTKne4bdbgBI6CtH1Rk4EQim0/view?usp=sharing)
- Ironsides Datasets:
- Core visual-inertial sensor is the [ironsides](https://arxiv.org/pdf/1710.00893v1.pdf)
- Has two [Reach RTK](https://docs.emlid.com/reach/) one subscribed to a base station for corrections
- Stereo global shutter fisheye images at 20 Hz
- InvenSense IMU at 200 Hz
- GPS fixes at 5 Hz (/reach01/tcpfix has corrections from [NYSNet](https://cors.dot.ny.gov/sbc))
- Kalibr calibration file can be found [here](https://drive.google.com/file/d/1bhn0GrIYNEeAabQAbWoP8l_514cJ0KrZ/view?usp=sharing)
@m_class{m-block m-warning}
@par Monocular Camera
Currently there are issues with running with a monocular camera on the Ironside Neighborhood car datasets.
This is likely due to the near-constant velocity and "smoothness" of the trajectory.
Please refer to @cite Lee2020IROS and @cite Wu2017ICRA for details.
Most of these datasets do not have perfect calibration parameters, and some are not time synchronised.
Thus, please ensure that you have enabled online calibration of these parameters.
Additionally, there is no groundtruth for these datasets, but some do include GPS messages if you wish to compare relative to something.
@m_div{m-text-center}
| Dataset Name | Length (m) | Dataset Link | Groundtruth Traj. | Config |
|-------------:|--------|--------------|------------------|------------------|
| ArUco Room 01 | 27 | [rosbag](https://drive.google.com/file/d/1ytjo8V6pCroaVd8-QSop7R4DbsvvKyRQ/view?usp=sharing) | none | [config aruco](https://github.com/rpng/open_vins/blob/master/config/rpng_aruco) |
| ArUco Room 02 | 93 | [rosbag](https://drive.google.com/file/d/1l_hnPUW6ufqxPtrLqRRHHI4mfGRZB1ha/view?usp=sharing) | none | [config aruco](https://github.com/rpng/open_vins/blob/master/config/rpng_aruco) |
| ArUco Hallway 01 | 190 | [rosbag](https://drive.google.com/file/d/1FQBo3uHqRd0qm8GUb50Q-sj5gukcwaoU/view?usp=sharing) | none | [config aruco](https://github.com/rpng/open_vins/blob/master/config/rpng_aruco) |
| ArUco Hallway 02 | 105 | [rosbag](https://drive.google.com/file/d/1oAbnV3MPOeaUSjnSc3g8t-pWV1nVjbys/view?usp=sharing) | none | [config aruco](https://github.com/rpng/open_vins/blob/master/config/rpng_aruco) |
| Neighborhood 01 | 2300 | [rosbag](https://drive.google.com/file/d/1N07SDbaLEkq9pVEvi6oiHpavaRuFs3j2/view?usp=sharing) | none | [config ironsides](https://github.com/rpng/open_vins/blob/master/config/rpng_ironsides) |
| Neighborhood 02 | 7400 | [rosbag](https://drive.google.com/file/d/1QEUi40sO8OkVXEGF5JojiiZMHMSiSqtg/view?usp=sharing) | none | [config ironsides](https://github.com/rpng/open_vins/blob/master/config/rpng_ironsides) |
@m_enddiv
@section gs-data-uzhfpv UZH-FPV Drone Racing Dataset
The [UZH-FPV Drone Racing Dataset](https://fpv.ifi.uzh.ch/) @cite Schubert2018IROS is a dataset focused on high-speed agressive 6dof motion with very high levels of optical flow as compared to other datasets.
A FPV drone racing quadrotor has on board a Qualcomm Snapdragon Flight board which can provide inertial measurement and has two 640x480 grayscale global shutter fisheye camera's attached.
The groundtruth is collected with a Leica Nova MS60 laser tracker.
There are four total sensor configurations and calibration provides including: indoor forward facing stereo, indoor 45 degree stereo, outdoor forward facing, and outdoor 45 degree.
A top speed of 12.8 m/s (28 mph) is reached in the indoor scenarios, and 23.4 m/s (54 mphs) is reached in the outdoor datasets.
Each of these datasets is picked up in the beginning and then set down, we normally skip this part, but it should be able to be handled by the filter if SLAM features are enabled.
Please take a look at the [run_ros_uzhfpv.sh](https://github.com/rpng/open_vins/blob/master/ov_msckf/scripts/run_ros_uzhfpv.sh) script for some reasonable default values (they might still need to be tuned).
@m_class{m-block m-warning}
@par Dataset Groundtruthing
Only the Absolute Trajectory Error (ATE) should be used as a metric for this dataset.
This is due to inaccurate groundtruth orientation estimates which are explain in their [report](https://fpv.ifi.uzh.ch/wp-content/uploads/2020/11/Ground-Truth-Rotation-Issue-Report.pdf) on the issue.
The basic summary is that it is hard to get an accurate orientation information due to the point-based Leica measurements used to groundtruth.
@m_div{m-text-center}
| Dataset Name | Length (m) | Dataset Link | Groundtruth Traj. | Config |
|-------------:|--------|--------------|------------------|------------------|
| Indoor 5 | 157 | [rosbag](http://rpg.ifi.uzh.ch/datasets/uzh-fpv-newer-versions/v2/indoor_forward_5_snapdragon_with_gt.bag) | [link](https://github.com/rpng/open_vins/tree/master/ov_data/uzh_fpv) | [config](https://github.com/rpng/open_vins/blob/master/config/uzhfpv_indoor) |
| Indoor 6 | 204 | [rosbag](http://rpg.ifi.uzh.ch/datasets/uzh-fpv-newer-versions/v2/indoor_forward_6_snapdragon_with_gt.bag) | [link](https://github.com/rpng/open_vins/tree/master/ov_data/uzh_fpv) | [config](https://github.com/rpng/open_vins/blob/master/config/uzhfpv_indoor) |
| Indoor 7 | 314 | [rosbag](http://rpg.ifi.uzh.ch/datasets/uzh-fpv-newer-versions/v2/indoor_forward_7_snapdragon_with_gt.bag) | [link](https://github.com/rpng/open_vins/tree/master/ov_data/uzh_fpv) | [config](https://github.com/rpng/open_vins/blob/master/config/uzhfpv_indoor) |
| Indoor 9 | 136 | [rosbag](http://rpg.ifi.uzh.ch/datasets/uzh-fpv-newer-versions/v2/indoor_forward_9_snapdragon_with_gt.bag) | [link](https://github.com/rpng/open_vins/tree/master/ov_data/uzh_fpv) | [config](https://github.com/rpng/open_vins/blob/master/config/uzhfpv_indoor) |
| Indoor 10 | 129 | [rosbag](http://rpg.ifi.uzh.ch/datasets/uzh-fpv-newer-versions/v2/indoor_forward_10_snapdragon_with_gt.bag) | [link](https://github.com/rpng/open_vins/tree/master/ov_data/uzh_fpv) | [config](https://github.com/rpng/open_vins/blob/master/config/uzhfpv_indoor) |
| Indoor 45deg 2 | 207 | [rosbag](http://rpg.ifi.uzh.ch/datasets/uzh-fpv-newer-versions/v2/indoor_45_2_snapdragon_with_gt.bag) | [link](https://github.com/rpng/open_vins/tree/master/ov_data/uzh_fpv) | [config](https://github.com/rpng/open_vins/blob/master/config/uzhfpv_indoor_45) |
| Indoor 45deg 4 | 164 | [rosbag](http://rpg.ifi.uzh.ch/datasets/uzh-fpv-newer-versions/v2/indoor_45_4_snapdragon_with_gt.bag) | [link](https://github.com/rpng/open_vins/tree/master/ov_data/uzh_fpv) | [config](https://github.com/rpng/open_vins/blob/master/config/uzhfpv_indoor_45) |
| Indoor 45deg 12 | 112 | [rosbag](http://rpg.ifi.uzh.ch/datasets/uzh-fpv-newer-versions/v2/indoor_45_12_snapdragon_with_gt.bag) | [link](https://github.com/rpng/open_vins/tree/master/ov_data/uzh_fpv) | [config](https://github.com/rpng/open_vins/blob/master/config/uzhfpv_indoor_45) |
| Indoor 45deg 13 | 159 | [rosbag](http://rpg.ifi.uzh.ch/datasets/uzh-fpv-newer-versions/v2/indoor_45_13_snapdragon_with_gt.bag) | [link](https://github.com/rpng/open_vins/tree/master/ov_data/uzh_fpv) | [config](https://github.com/rpng/open_vins/blob/master/config/uzhfpv_indoor_45) |
| Indoor 45deg 14 | 211 | [rosbag](http://rpg.ifi.uzh.ch/datasets/uzh-fpv-newer-versions/v2/indoor_45_14_snapdragon_with_gt.bag) | [link](https://github.com/rpng/open_vins/tree/master/ov_data/uzh_fpv) | [config](https://github.com/rpng/open_vins/blob/master/config/uzhfpv_indoor_45) |
@m_enddiv
@section gs-data-kaist KAIST Urban Dataset
The [KAIST urban dataset](https://sites.google.com/view/complex-urban-dataset) @cite Jeong2019IJRR is a dataset focus on autonomous driving and localization in challenging complex urban environments.
The dataset was collected in Korea with a vehicle equipped with stereo camera pair, 2d SICK LiDARs, 3d Velodyne LiDAR, Xsens IMU, fiber optic gyro (FoG), wheel encoders, and RKT GPS.
The camera is 10 Hz, while the Xsens IMU is 100 Hz sensing rate.
A groundtruth "baseline" trajectory is also provided which is the resulting output from fusion of the FoG, RKT GPS, and wheel encoders.
@m_class{m-block m-warning}
@par Dynamic Environments
A challenging open research question is being able to handle dynamic objects seen from the cameras.
By default we rely on our tracking 8 point RANSAC to handle these dynamics objects.
In the most of the KAIST datasets the majority of the scene can be taken up by other moving vehicles, thus the performance can suffer.
Please be aware of this fact.
We recommend converting the KAIST file format into a ROS bag format.
If you are using ROS2 then you should first convert into a ROS1 then convert following the @ref dev-ros1-to-ros2 .
Follow the instructions on the [kaist2bag](https://github.com/rpng/kaist2bag) repository:
@code{.shell-session}
git clone https://github.com/irapkaist/irp_sen_msgs.git
git clone https://github.com/rpng/kaist2bag.git
@endcode
@m_class{m-block m-warning}
@par Monocular Camera
Currently there are issues with running with a monocular camera on this dataset.
This is likely due to the near-constant velocity and "smoothness" of the trajectory.
Please refer to @cite Lee2020IROS and @cite Wu2017ICRA for details.
You can also try to use the [file_player](https://github.com/irapkaist/file_player) to publish live.
It is important to *disable* the "skip stop section" to ensure that we have continuous sensor feeds.
Typically we process the datasets at 1.5x rate so we get a ~20 Hz image feed and the datasets can be processed in a more efficient manor.
@m_div{m-text-center}
| Dataset Name | Length (km) | Dataset Link | Groundtruth Traj. | Example Launch |
|-------------:|--------|--------------|------------------|------------------|
| Urban 28 | 11.47 | [download](https://sites.google.com/view/complex-urban-dataset/download-lidar-stereo?authuser=0) | [link](https://github.com/rpng/open_vins/tree/master/ov_data/kaist) | [config](https://github.com/rpng/open_vins/blob/master/config/kaist) |
| Urban 38 | 11.42 | [download](https://sites.google.com/view/complex-urban-dataset/download-lidar-stereo?authuser=0) | [link](https://github.com/rpng/open_vins/tree/master/ov_data/kaist) | [config](https://github.com/rpng/open_vins/blob/master/config/kaist) |
| Urban 39 | 11.06 | [download](https://sites.google.com/view/complex-urban-dataset/download-lidar-stereo?authuser=0) | [link](https://github.com/rpng/open_vins/tree/master/ov_data/kaist) | [config](https://github.com/rpng/open_vins/blob/master/config/kaist) |
@m_enddiv
@section gs-data-kaist-vio KAIST VIO Dataset
The [KAIST VIO dataset](https://github.com/zinuok/kaistviodataset) @cite Jeon2021RAL is a dataset of a MAV in an indoor 3.15 x 3.60 x 2.50 meter environment which undergoes various trajectory motions.
The camera is intel realsense D435i 25 Hz, while the IMU is 100 Hz sensing rate from the pixelhawk 4 unit.
A groundtruth "baseline" trajectory is also provided from a OptiTrack Mocap system at 50 Hz, the bag files have the marker body frame to IMU frame already applied.
This topic has been provided in ov_data for convinces sake.
@m_div{m-text-center}
| Dataset Name | Length (km) | Dataset Link | Groundtruth Traj. | Example Launch |
|-------------:|--------|--------------|------------------|------------------|
| circle | 29.99 | [download](https://urserver.kaist.ac.kr/publicdata/KAIST_VIO_Dataset/circle/circle.bag) | [link](https://github.com/rpng/open_vins/tree/master/ov_data/kaist_vio) | [config](https://github.com/rpng/open_vins/blob/master/config/kaist_vio) |
| circle_fast | 64.15 | [download](https://urserver.kaist.ac.kr/publicdata/KAIST_VIO_Dataset/circle/circle_fast.bag) | [link](https://github.com/rpng/open_vins/tree/master/ov_data/kaist_vio) | [config](https://github.com/rpng/open_vins/blob/master/config/kaist_vio) |
| circle_head | 35.05 | [download](https://urserver.kaist.ac.kr/publicdata/KAIST_VIO_Dataset/circle/circle_head.bag) | [link](https://github.com/rpng/open_vins/tree/master/ov_data/kaist_vio) | [config](https://github.com/rpng/open_vins/blob/master/config/kaist_vio) |
| infinite | 29.35 | [download](https://urserver.kaist.ac.kr/publicdata/KAIST_VIO_Dataset/infinite/infinite.bag) | [link](https://github.com/rpng/open_vins/tree/master/ov_data/kaist_vio) | [config](https://github.com/rpng/open_vins/blob/master/config/kaist_vio) |
| infinite_fast | 54.24 | [download](https://urserver.kaist.ac.kr/publicdata/KAIST_VIO_Dataset/infinite/infinite_fast.bag) | [link](https://github.com/rpng/open_vins/tree/master/ov_data/kaist_vio) | [config](https://github.com/rpng/open_vins/blob/master/config/kaist_vio) |
| infinite_head | 37.45 | [download](https://urserver.kaist.ac.kr/publicdata/KAIST_VIO_Dataset/infinite/infinite_head.bag) | [link](https://github.com/rpng/open_vins/tree/master/ov_data/kaist_vio) | [config](https://github.com/rpng/open_vins/blob/master/config/kaist_vio) |
| rotation | 7.82 | [download](https://urserver.kaist.ac.kr/publicdata/KAIST_VIO_Dataset/rotation/rotation.bag) | [link](https://github.com/rpng/open_vins/tree/master/ov_data/kaist_vio) | [config](https://github.com/rpng/open_vins/blob/master/config/kaist_vio) |
| rotation_fast | 14.55 | [download](https://urserver.kaist.ac.kr/publicdata/KAIST_VIO_Dataset/rotation/rotation_fast.bag) | [link](https://github.com/rpng/open_vins/tree/master/ov_data/kaist_vio) | [config](https://github.com/rpng/open_vins/blob/master/config/kaist_vio) |
| square | 41.94 | [download](https://urserver.kaist.ac.kr/publicdata/KAIST_VIO_Dataset/square/square.bag) | [link](https://github.com/rpng/open_vins/tree/master/ov_data/kaist_vio) | [config](https://github.com/rpng/open_vins/blob/master/config/kaist_vio) |
| square_fast | 44.07 | [download](https://urserver.kaist.ac.kr/publicdata/KAIST_VIO_Dataset/square/square_fast.bag) | [link](https://github.com/rpng/open_vins/tree/master/ov_data/kaist_vio) | [config](https://github.com/rpng/open_vins/blob/master/config/kaist_vio) |
| square_head | 50.00 | [download](https://urserver.kaist.ac.kr/publicdata/KAIST_VIO_Dataset/square/square_head.bag) | [link](https://github.com/rpng/open_vins/tree/master/ov_data/kaist_vio) | [config](https://github.com/rpng/open_vins/blob/master/config/kaist_vio) |
@m_enddiv
*/

221
docs/gs-installing.dox Normal file
View File

@@ -0,0 +1,221 @@
/**
@page gs-installing Installation Guide
@tableofcontents
@section gs-install-ros ROS Dependency
Our codebase is built on top of the [Robot Operating System (ROS)](https://www.ros.org/) and has been tested building on Ubuntu 16.04, 18.04, 20.04 systems with ROS Kinetic, Melodic, and Noetic.
We also recommend installing the [catkin_tools](https://github.com/catkin/catkin_tools) build for easy ROS building.
All ROS installs include [OpenCV](https://github.com/opencv/opencv), but if you need to build OpenCV from source ensure you build the contributed modules as we use Aruco feature extraction.
See the [opencv_contrib](https://github.com/opencv/opencv_contrib) readme on how to configure your cmake command when you build the core OpenCV library.
We have tested building with OpenCV 3.2, 3.3, 3.4, 4.2, and 4.5.
Please see the official instructions to install ROS:
- [Ubuntu 16.04 ROS 1 Kinetic](http://wiki.ros.org/kinetic/Installation/Ubuntu) (uses OpenCV 3.3)
- [Ubuntu 18.04 ROS 1 Melodic](http://wiki.ros.org/melodic/Installation/Ubuntu) (uses OpenCV 3.2)
- [Ubuntu 20.04 ROS 1 Noetic](http://wiki.ros.org/noetic/Installation/Ubuntu) (uses OpenCV 4.2)
- [Ubuntu 18.04 ROS 2 Dashing](https://docs.ros.org/en/dashing/) (uses OpenCV 3.2)
- [Ubuntu 20.04 ROS 2 Galactic](https://docs.ros.org/en/galactic/) (uses OpenCV 4.2)
We do support ROS-free builds, but don't recommend using this interface as we have limited support for it.
You will need to ensure you have installed OpenCV 3 or 4, Eigen3, and Ceres which are the only dependencies.
For Ubuntu linux-based system the system dependencies are:
@code{.shell-session}
sudo apt-get install libeigen3-dev libboost-all-dev libceres-dev
@endcode
If ROS is not found on the system, one can use command line options to run the simulation without any visualization or `cmake -DENABLE_ROS=OFF ..`.
If you are using the ROS-free interface, you will need to properly construct the @ref ov_msckf::VioManagerOptions struct with proper information and feed inertial and image data into the correct functions.
The simulator binary `run_simulation` can give you and example on how to do this.
@subsection gs-install-ros-1 ROS1 Install
To install we can perform the following:
@code{.shell-session}
sudo sh -c 'echo "deb http://packages.ros.org/ros/ubuntu $(lsb_release -sc) main" > /etc/apt/sources.list.d/ros-latest.list'
sudo apt-key adv --keyserver 'hkp://keyserver.ubuntu.com:80' --recv-key C1CF6E31E6BADE8868B172B4F42ED6FBAB17C654
sudo apt-get update
export ROS1_DISTRO=noetic # kinetic=16.04, melodic=18.04, noetic=20.04
sudo apt-get install ros-$ROS1_DISTRO-desktop-full
sudo apt-get install python-catkin-tools # ubuntu 16.04, 18.04
sudo apt-get install python3-catkin-tools python3-osrf-pycommon # ubuntu 20.04
sudo apt-get install libeigen3-dev libboost-all-dev libceres-dev
@endcode
If you only have ROS1 on your system and are not cross installing ROS2, then you can run the following to append this to your bashrc file.
Every time a terminal is open, thus will load the ROS1 environmental variables required to find all dependencies for building and system installed packages.
@code{.shell-session}
echo "source /opt/ros/$ROS1_DISTRO/setup.bash" >> ~/.bashrc
source ~/.bashrc
@endcode
Otherwise, if you want to also install ROS2, you must *NOT* have a global source.
Instead we can have a nice helper command which can be used when we build a ROS1 workspace.
Additionally, the `source_devel` command can be used when in your workspace root to source built packages.
Once appended simply run `source_ros1` to load your ROS1 environmental variables.
@code{.shell-session}
echo "alias source_ros1=\"source /opt/ros/$ROS1_DISTRO/setup.bash\"" >> ~/.bashrc
echo "alias source_devel=\"source devel/setup.bash\"" >> ~/.bashrc
source ~/.bashrc
@endcode
@subsection gs-install-ros-2 ROS2 Install
To install we can perform the following:
@code{.shell-session}
sudo apt update && sudo apt install curl gnupg lsb-release
sudo curl -sSL https://raw.githubusercontent.com/ros/rosdistro/master/ros.key -o /usr/share/keyrings/ros-archive-keyring.gpg
echo "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/ros-archive-keyring.gpg] http://packages.ros.org/ros2/ubuntu $(lsb_release -cs) main" | sudo tee /etc/apt/sources.list.d/ros2.list > /dev/null
sudo apt-get update
export ROS2_DISTRO=galactic # dashing=18.04, galactic=20.04
sudo apt install ros-$ROS2_DISTRO-desktop
sudo apt-get install ros-$ROS2_DISTRO-ros2bag ros-$ROS2_DISTRO-rosbag2* # rosbag utilities (seems to be separate)
sudo apt-get install libeigen3-dev libboost-all-dev libceres-dev
@endcode
If you only have ROS2 on your system and are not cross installing ROS1, then you can run the following to append this to your bashrc file.
Every time a terminal is open, thus will load the ROS2 environmental variables required to find all dependencies for building and system installed packages.
@code{.shell-session}
echo "source /opt/ros/$ROS2_DISTRO/setup.bash" >> ~/.bashrc
source ~/.bashrc
@endcode
Otherwise, if you want to also install ROS1, you must *NOT* have a global source.
Instead we can have a nice helper command which can be used when we build a ROS1 workspace.
Additionally, the `source_install` command can be used when in your workspace root to source built packages.
Once appended simply run `source_ros2` to load your ROS1 environmental variables.
@code{.shell-session}
echo "alias source_ros2=\"source /opt/ros/$ROS2_DISTRO/setup.bash\"" >> ~/.bashrc
echo "alias source_install=\"source install/setup.bash\"" >> ~/.bashrc
source ~/.bashrc
@endcode
@section gs-install-openvins Cloning the OpenVINS Project
Now that we have ROS installed we can setup a catkin workspace and build the project!
If you did not install the catkin_tools build system, you should be able to build using the standard `catkin_make` command that is included with ROS.
If you run into any problems please google search the issue first and if you are unable to find a solution please open an issue on our github page.
After the build is successful please following the @ref gs-tutorial guide on getting a dataset and running the system.
There are additional options that users might be interested in.
Configure these with `catkin build -D<option_name>=OFF` or `cmake -D<option_name>=ON ..` in the ROS free case.
- `ENABLE_ROS` - (default ON) - Enable or disable building with ROS (if it is found)
- `ENABLE_ARUCO_TAGS` - (default ON) - Enable or disable aruco tag (disable if no contrib modules)
- `BUILD_OV_EVAL` - (default ON) - Enable or disable building of ov_eval
- `DISABLE_MATPLOTLIB` - (default OFF) - Disable or enable matplotlib plot scripts in ov_eval
@code{.shell-session}
mkdir -p ~/workspace/catkin_ws_ov/src/
cd ~/workspace/catkin_ws_ov/src/
git clone https://github.com/rpng/open_vins/
cd ..
catkin build # ROS1
colcon build # ROS2
colcon build --event-handlers console_cohesion+ --packages-select ov_core ov_init ov_msckf ov_eval # ROS2 with verbose output
@endcode
If you do not have ROS installed, then you can do the following:
@code{.shell-session}
cd ~/github/
git clone https://github.com/rpng/open_vins/
cd open_vins/ov_msckf/
mkdir build
cd build
cmake ..
make -j4
@endcode
@section gs-install-oveval Additional Evaluation Requirements
If you want to use the plotting utility wrapper of [matplotlib-cpp](https://github.com/lava/matplotlib-cpp) to generate plots directly from running the cpp code in ov_eval you will need to make sure you have a valid Python 2.7 or 3 install of matplotlib.
On ubuntu 16.04 you can do the following command which should take care of everything you need.
If you can't link properly, make sure you can call it from Python normally (i.e. that your Python environment is not broken).
You can disable this visualization if it is broken for you by passing the -DDISABLE_MATPLOTLIB=ON parameter to your catkin build.
Additionally if you wish to record CPU and memory usage of the node, you will need to install the [psutil](https://github.com/giampaolo/psutil) library.
@code{.shell-session}
sudo apt-get install python2.7-dev python-matplotlib python-numpy python-psutil # for python2 systems
sudo apt-get install python3-dev python3-matplotlib python3-numpy python3-psutil python3-tk # for python3 systems
catkin build -DDISABLE_MATPLOTLIB=OFF # build with viz (default)
catkin build -DDISABLE_MATPLOTLIB=ON # build without viz
@endcode
@section gs-install-opencv OpenCV Dependency (from source)
We leverage [OpenCV](https://opencv.org/) for this project which you can typically use the install from ROS.
If the ROS version of [cv_bridge](http://wiki.ros.org/cv_bridge) does not work (or are using non-ROS building), then you can try building OpenCV from source ensuring you include the contrib modules.
One should make sure you can see some of the "contrib" (e.g. aruco) when you cmake to ensure you have linked to the contrib modules.
@m_class{m-block m-warning}
@par OpenCV Source Installation
Try to first build with your system / ROS OpenCV.
Only fall back onto this if it does not allow you to compile, or want a newer version!
@code{.shell-session}
git clone https://github.com/opencv/opencv/
git clone https://github.com/opencv/opencv_contrib/
mkdir opencv/build/
cd opencv/build/
cmake -DOPENCV_EXTRA_MODULES_PATH=../../opencv_contrib/modules ..
make -j8
sudo make install
@endcode
If you do not want to build the modules, you should also be able to do this (while it is not as well tested).
The ArucoTag tracker depends on a non-free module in the contrib repository, thus this will need to be disabled.
You can disable this with `catkin build -DENABLE_ARUCO_TAGS=OFF` or `cmake -DENABLE_ARUCO_TAGS=OFF ..` in your build folder.
@subsection gs-install-ceres Ceres Solver
Ceres solver @cite ceres-solver is required for dynamic initialization and backend optimization.
Please refer to their [documentation](http://ceres-solver.org/installation.html#linux) for specifics to your platform.
It should be able to build on most platforms (including ARM android devices).
To install we can perform the following:
@m_class{m-block m-warning}
@par Ceres Source Installation
Try to first build with your system with `sudo apt-get install libceres-dev`.
Only fall back onto this if it does not allow you to compile, or want a newer version!
@code{.shell-session}
sudo apt-get install -y cmake libgoogle-glog-dev libgflags-dev libatlas-base-dev libeigen3-dev libsuitesparse-dev
CERES_VERSION="2.0.0"
git clone https://ceres-solver.googlesource.com/ceres-solver
cd ceres-solver
git checkout tags/${CERES_VERSION}
mkdir build && cd build
cmake ..
make
sudo make install
@endcode
*/

161
docs/gs-tutorial.dox Normal file
View File

@@ -0,0 +1,161 @@
/**
@page gs-tutorial Simple Tutorial
@tableofcontents
This guide assumes that you have already built the project successfully and are now ready to run the program on some datasets.
If you have not compiled the program yet please follow the @ref gs-installing guide.
The first that we will download is a dataset to run the program on.
In this tutorial we will run on the [EuRoC MAV Dataset](https://projects.asl.ethz.ch/datasets/doku.php?id=kmavvisualinertialdatasets) @cite Burri2016IJRR which provides monochrome stereo images at 20Hz with a MEMS ADIS16448 IMU at 200Hz.
@m_div{m-col-m-6 m-button m-primary}
<a href="http://robotics.ethz.ch/~asl-datasets/ijrr_euroc_mav_dataset/vicon_room1/V1_01_easy/V1_01_easy.bag">
@m_div{m-big}Download ROS 1 Bag@m_enddiv
@m_div{m-small}Vicon Room 1 01 Easy @m_enddiv
</a>
@m_enddiv
@m_div{m-col-m-6 m-button m-primary}
<a href="https://drive.google.com/drive/folders/1xQ1KcZhZ5pioPXTyrZBN6Mjxkfpcd_B3?usp=sharing">
@m_div{m-big}Download ROS 2 Bag@m_enddiv
@m_div{m-small}Vicon Room 1 01 Easy @m_enddiv
</a>
@m_enddiv
All configuration information for the system is exposed to the user in the configuration file, and can be overridden in the launch file.
We will create a launch file that will launch our MSCKF estimation node and feed the ROS bag into the system.
One can take a look in the [launch](https://github.com/rpng/open_vins/tree/master/ov_msckf/launch) folder for more examples.
For OpenVINS we need to define a series of files:
- `estimator_config.yaml` - Contains OpenVINS specific configuration files. Each of these can be overridden in the launch file.
- `kalibr_imu_chain.yaml` - IMU noise parameters and topic information based on the sensor in the dataset. This should be the same as Kalibr's (see @ref gs-calib-imu-static).
- `kalibr_imucam_chain.yaml` - Camera to IMU transformation and camera intrinsics. This should be the same as Kalibr's (see @ref gs-calib-cam-static).
@section gs-tutorial-ros1 ROS 1 Tutorial
The ROS1 system uses the [roslaunch](http://wiki.ros.org/roslaunch) system to manage and launch nodes.
These files can launch multiple nodes, and each node can their own set of parameters set.
Consider the below launch file.
We can see the main parameter that is being passed into the estimator is the `config_path` file which has all configuration for this specific dataset.
Additionally, we can see that we are launching the `run_subscribe_msckf` ROS 1 node, and are going to be overriding the `use_stereo` and `max_cameras` with the specificed values.
ROS parameters always have priority, and you should see in the console that they have been successfully overridden.
@m_div{m-container-inflate}
@code{.xml}
<launch>
<!-- what config we are going to run (should match folder name) -->
<arg name="verbosity" default="INFO" /> <!-- ALL, DEBUG, INFO, WARNING, ERROR, SILENT -->
<arg name="config" default="euroc_mav" /> <!-- euroc_mav, tum_vi, rpng_aruco -->
<arg name="config_path" default="$(find ov_msckf)/../config/$(arg config)/estimator_config.yaml" />
<!-- MASTER NODE! -->
<node name="run_subscribe_msckf" pkg="ov_msckf" type="run_subscribe_msckf" output="screen">
<param name="verbosity" type="string" value="$(arg verbosity)" />
<param name="config_path" type="string" value="$(arg config_path)" />
<param name="use_stereo" type="bool" value="true" />
<param name="max_cameras" type="int" value="2" />
</node>
</launch>
@endcode
@m_enddiv
Since the configuration file for the EurocMav dataset has already been created, we can simply do the following.
Note it is good practice to run a `roscore` that stays active so that you do not need to relaunch rviz or other packages.
@code{.shell-session}
roscore # term 0
source devel/setup.bash # term 1
roslaunch ov_msckf subscribe.launch config:=euroc_mav
@endcode
In another two terminals we can run the following.
For RVIZ, one can open the `ov_msckf/launch/display.rviz` configuration file.
You should see the system publishing features and a state estimate.
@code{.shell-session}
rviz # term 2
rosbag play V1_01_easy.bag # term 3
@endcode
@section gs-tutorial-ros2 ROS 2 Tutorial
For ROS 2, launch files and nodes have become a bit more combersom due to the removal of a centralized communication method.
This both allows for more distributed systems, but causes a bit more on the developer to perform integration.
The launch system is described in [this](https://design.ros2.org/articles/roslaunch.html) design article.
Consider the following launch file which does the same as the ROS 1 launch file above.
@m_div{m-container-inflate}
@code{.py}
from launch import LaunchDescription
from launch.actions import DeclareLaunchArgument, LogInfo, OpaqueFunction
from launch.conditions import IfCondition
from launch.substitutions import LaunchConfiguration, TextSubstitution
from launch_ros.actions import Node
from ament_index_python.packages import get_package_share_directory, get_package_prefix
import os
import sys
launch_args = [
DeclareLaunchArgument(name='namespace', default_value='', description='namespace'),
DeclareLaunchArgument(name='config', default_value='euroc_mav', description='euroc_mav, tum_vi, rpng_aruco...'),
DeclareLaunchArgument(name='verbosity', default_value='INFO', description='ALL, DEBUG, INFO, WARNING, ERROR, SILENT'),
DeclareLaunchArgument(name='use_stereo', default_value='true', description=''),
DeclareLaunchArgument(name='max_cameras', default_value='2', description='')
]
def launch_setup(context):
configs_dir=os.path.join(get_package_share_directory('ov_msckf'),'config')
available_configs = os.listdir(configs_dir)
config = LaunchConfiguration('config').perform(context)
if not config in available_configs:
return[LogInfo(msg='ERROR: unknown config: \'{}\' - Available configs are: {} - not starting OpenVINS'.format(config,', '.join(available_configs)))]
config_path = os.path.join(get_package_share_directory('ov_msckf'),'config',config,'estimator_config.yaml')
node1 = Node(package = 'ov_msckf',
executable = 'run_subscribe_msckf',
namespace = LaunchConfiguration('namespace'),
parameters =[{'verbosity': LaunchConfiguration('verbosity')},
{'use_stereo': LaunchConfiguration('use_stereo')},
{'max_cameras': LaunchConfiguration('max_cameras')},
{'config_path': config_path}])
return [node1]
def generate_launch_description():
opfunc = OpaqueFunction(function = launch_setup)
ld = LaunchDescription(launch_args)
ld.add_action(opfunc)
return ld
@endcode
@m_enddiv
We can see that first the `launch_setup` function defines the nodes that we will be launching from this file.
Then the `LaunchDescription` is created given the launch arguments and the node is added to it and returned to ROS.
We can the launch it using the following:
@code{.shell-session}
source install/setup.bash
ros2 launch ov_msckf subscribe.launch.py config:=euroc_mav
@endcode
We can then use the ROS2 rosbag file.
First make sure you have installed the rosbag2 and all its backends.
If you downloaded the bag above you should already have a valid bag format.
Otherwise, you will need to convert it following @ref dev-ros1-to-ros2 .
A "bag" is now defined by a db3 sqlite database and config yaml file in a folder.
In another terminal we can run the following:
@code{.shell-session}
ros2 bag play V1_01_easy
@endcode
*/

BIN
docs/img/allanvar_gyro.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 70 KiB

BIN
docs/img/clion_docker.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 82 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 142 KiB

BIN
docs/img/eval/dataset.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 130 KiB

BIN
docs/img/eval/plot_xy.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 96 KiB

BIN
docs/img/eval/plot_z.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 56 KiB

BIN
docs/img/eval/singlerun.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 224 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 78 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 147 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 19 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 186 KiB

BIN
docs/img/favicon-dark.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 3.1 KiB

BIN
docs/img/favicon-light.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 2.7 KiB

BIN
docs/img/rig_hydra_old.jpg Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 222 KiB

342
docs/img/splineframes.ipe Normal file
View File

@@ -0,0 +1,342 @@
<?xml version="1.0"?>
<!DOCTYPE ipe SYSTEM "ipe.dtd">
<ipe version="70107" creator="Ipe 7.1.10">
<info created="D:20190808093354" modified="D:20190812102717"/>
<ipestyle name="basic">
<symbol name="arrow/arc(spx)">
<path stroke="sym-stroke" fill="sym-stroke" pen="sym-pen">
0 0 m
-1 0.333 l
-1 -0.333 l
h
</path>
</symbol>
<symbol name="arrow/farc(spx)">
<path stroke="sym-stroke" fill="white" pen="sym-pen">
0 0 m
-1 0.333 l
-1 -0.333 l
h
</path>
</symbol>
<symbol name="arrow/ptarc(spx)">
<path stroke="sym-stroke" fill="sym-stroke" pen="sym-pen">
0 0 m
-1 0.333 l
-0.8 0 l
-1 -0.333 l
h
</path>
</symbol>
<symbol name="arrow/fptarc(spx)">
<path stroke="sym-stroke" fill="white" pen="sym-pen">
0 0 m
-1 0.333 l
-0.8 0 l
-1 -0.333 l
h
</path>
</symbol>
<symbol name="mark/circle(sx)" transformations="translations">
<path fill="sym-stroke">
0.6 0 0 0.6 0 0 e
0.4 0 0 0.4 0 0 e
</path>
</symbol>
<symbol name="mark/disk(sx)" transformations="translations">
<path fill="sym-stroke">
0.6 0 0 0.6 0 0 e
</path>
</symbol>
<symbol name="mark/fdisk(sfx)" transformations="translations">
<group>
<path fill="sym-fill">
0.5 0 0 0.5 0 0 e
</path>
<path fill="sym-stroke" fillrule="eofill">
0.6 0 0 0.6 0 0 e
0.4 0 0 0.4 0 0 e
</path>
</group>
</symbol>
<symbol name="mark/box(sx)" transformations="translations">
<path fill="sym-stroke" fillrule="eofill">
-0.6 -0.6 m
0.6 -0.6 l
0.6 0.6 l
-0.6 0.6 l
h
-0.4 -0.4 m
0.4 -0.4 l
0.4 0.4 l
-0.4 0.4 l
h
</path>
</symbol>
<symbol name="mark/square(sx)" transformations="translations">
<path fill="sym-stroke">
-0.6 -0.6 m
0.6 -0.6 l
0.6 0.6 l
-0.6 0.6 l
h
</path>
</symbol>
<symbol name="mark/fsquare(sfx)" transformations="translations">
<group>
<path fill="sym-fill">
-0.5 -0.5 m
0.5 -0.5 l
0.5 0.5 l
-0.5 0.5 l
h
</path>
<path fill="sym-stroke" fillrule="eofill">
-0.6 -0.6 m
0.6 -0.6 l
0.6 0.6 l
-0.6 0.6 l
h
-0.4 -0.4 m
0.4 -0.4 l
0.4 0.4 l
-0.4 0.4 l
h
</path>
</group>
</symbol>
<symbol name="mark/cross(sx)" transformations="translations">
<group>
<path fill="sym-stroke">
-0.43 -0.57 m
0.57 0.43 l
0.43 0.57 l
-0.57 -0.43 l
h
</path>
<path fill="sym-stroke">
-0.43 0.57 m
0.57 -0.43 l
0.43 -0.57 l
-0.57 0.43 l
h
</path>
</group>
</symbol>
<symbol name="arrow/fnormal(spx)">
<path stroke="sym-stroke" fill="white" pen="sym-pen">
0 0 m
-1 0.333 l
-1 -0.333 l
h
</path>
</symbol>
<symbol name="arrow/pointed(spx)">
<path stroke="sym-stroke" fill="sym-stroke" pen="sym-pen">
0 0 m
-1 0.333 l
-0.8 0 l
-1 -0.333 l
h
</path>
</symbol>
<symbol name="arrow/fpointed(spx)">
<path stroke="sym-stroke" fill="white" pen="sym-pen">
0 0 m
-1 0.333 l
-0.8 0 l
-1 -0.333 l
h
</path>
</symbol>
<symbol name="arrow/linear(spx)">
<path stroke="sym-stroke" pen="sym-pen">
-1 0.333 m
0 0 l
-1 -0.333 l
</path>
</symbol>
<symbol name="arrow/fdouble(spx)">
<path stroke="sym-stroke" fill="white" pen="sym-pen">
0 0 m
-1 0.333 l
-1 -0.333 l
h
-1 0 m
-2 0.333 l
-2 -0.333 l
h
</path>
</symbol>
<symbol name="arrow/double(spx)">
<path stroke="sym-stroke" fill="sym-stroke" pen="sym-pen">
0 0 m
-1 0.333 l
-1 -0.333 l
h
-1 0 m
-2 0.333 l
-2 -0.333 l
h
</path>
</symbol>
<pen name="heavier" value="0.8"/>
<pen name="fat" value="1.2"/>
<pen name="ultrafat" value="2"/>
<symbolsize name="large" value="5"/>
<symbolsize name="small" value="2"/>
<symbolsize name="tiny" value="1.1"/>
<arrowsize name="large" value="10"/>
<arrowsize name="small" value="5"/>
<arrowsize name="tiny" value="3"/>
<color name="red" value="1 0 0"/>
<color name="green" value="0 1 0"/>
<color name="blue" value="0 0 1"/>
<color name="yellow" value="1 1 0"/>
<color name="orange" value="1 0.647 0"/>
<color name="gold" value="1 0.843 0"/>
<color name="purple" value="0.627 0.125 0.941"/>
<color name="gray" value="0.745"/>
<color name="brown" value="0.647 0.165 0.165"/>
<color name="navy" value="0 0 0.502"/>
<color name="pink" value="1 0.753 0.796"/>
<color name="seagreen" value="0.18 0.545 0.341"/>
<color name="turquoise" value="0.251 0.878 0.816"/>
<color name="violet" value="0.933 0.51 0.933"/>
<color name="darkblue" value="0 0 0.545"/>
<color name="darkcyan" value="0 0.545 0.545"/>
<color name="darkgray" value="0.663"/>
<color name="darkgreen" value="0 0.392 0"/>
<color name="darkmagenta" value="0.545 0 0.545"/>
<color name="darkorange" value="1 0.549 0"/>
<color name="darkred" value="0.545 0 0"/>
<color name="lightblue" value="0.678 0.847 0.902"/>
<color name="lightcyan" value="0.878 1 1"/>
<color name="lightgray" value="0.827"/>
<color name="lightgreen" value="0.565 0.933 0.565"/>
<color name="lightyellow" value="1 1 0.878"/>
<dashstyle name="dashed" value="[4] 0"/>
<dashstyle name="dotted" value="[1 3] 0"/>
<dashstyle name="dash dotted" value="[4 2 1 2] 0"/>
<dashstyle name="dash dot dotted" value="[4 2 1 2 1 2] 0"/>
<textsize name="large" value="\large"/>
<textsize name="Large" value="\Large"/>
<textsize name="LARGE" value="\LARGE"/>
<textsize name="huge" value="\huge"/>
<textsize name="Huge" value="\Huge"/>
<textsize name="small" value="\small"/>
<textsize name="footnote" value="\footnotesize"/>
<textsize name="tiny" value="\tiny"/>
<textstyle name="center" begin="\begin{center}" end="\end{center}"/>
<textstyle name="itemize" begin="\begin{itemize}" end="\end{itemize}"/>
<textstyle name="item" begin="\begin{itemize}\item{}" end="\end{itemize}"/>
<gridsize name="4 pts" value="4"/>
<gridsize name="8 pts (~3 mm)" value="8"/>
<gridsize name="16 pts (~6 mm)" value="16"/>
<gridsize name="32 pts (~12 mm)" value="32"/>
<gridsize name="10 pts (~3.5 mm)" value="10"/>
<gridsize name="20 pts (~7 mm)" value="20"/>
<gridsize name="14 pts (~5 mm)" value="14"/>
<gridsize name="28 pts (~10 mm)" value="28"/>
<gridsize name="56 pts (~20 mm)" value="56"/>
<anglesize name="90 deg" value="90"/>
<anglesize name="60 deg" value="60"/>
<anglesize name="45 deg" value="45"/>
<anglesize name="30 deg" value="30"/>
<anglesize name="22.5 deg" value="22.5"/>
<opacity name="10%" value="0.1"/>
<opacity name="30%" value="0.3"/>
<opacity name="50%" value="0.5"/>
<opacity name="75%" value="0.75"/>
<tiling name="falling" angle="-60" step="4" width="1"/>
<tiling name="rising" angle="30" step="4" width="1"/>
</ipestyle>
<page>
<layer name="alpha"/>
<view layers="alpha" active="alpha"/>
<path layer="alpha" matrix="1 0 0 1 -16.5524 -17.8398" stroke="gray" arrow="normal/normal">
64 768 m
64 784 l
</path>
<path matrix="0.922378 0 0 0.505852 -11.734 361.801" stroke="gray" arrow="normal/normal">
64 768 m
80 784 l
</path>
<path matrix="1 0 0 1 -16.5524 -17.8398" stroke="gray" arrow="normal/normal">
64 768 m
80 760 l
</path>
<path matrix="0.999999 0.00140075 -0.00140075 0.999999 49.0937 -19.0546" stroke="gray" arrow="normal/normal">
64 768 m
64 784 l
</path>
<path matrix="1.11708 0.00000328 -0.00085814 0.00128482 41.0342 748.47" stroke="gray" arrow="normal/normal">
64 768 m
80 784 l
</path>
<path matrix="0.146724 0.00226364 -0.00020552 1.61602 102.785 -492.215" stroke="gray" arrow="normal/normal">
64 768 m
80 760 l
</path>
<path matrix="0.999999 0.00140075 -0.00140075 0.999999 114.332 -11.2235" stroke="gray" arrow="normal/normal">
64 768 m
64 784 l
</path>
<path matrix="0.830871 -0.00123443 -0.00063827 -0.483544 124.421 1128.73" stroke="gray" arrow="normal/normal">
64 768 m
80 784 l
</path>
<path matrix="-0.545606 0.00196513 0.00076424 1.40291 211.588 -320.698" stroke="gray" arrow="normal/normal">
64 768 m
80 760 l
</path>
<path matrix="0.999999 0.00140075 -0.00140075 0.999999 176.702 -28.9837" stroke="gray" arrow="normal/normal">
64 768 m
64 784 l
</path>
<path matrix="0.0203117 -0.00203309 -0.0000156 -0.796393 238.188 1351.28" stroke="gray" arrow="normal/normal">
64 768 m
80 784 l
</path>
<path matrix="-0.74062 0.0000036 0.0010374 0.00256872 286.229 737.13" stroke="gray" arrow="normal/normal">
64 768 m
80 760 l
</path>
<path stroke="black" dash="dashed" pen="heavier">
42.8981 743.859 m
419.549 0 0 -419.549 141.039 335.949 254.033 739.997 a
</path>
<path matrix="0.999999 0.00140075 -0.00140075 0.999999 80.8603 -13.2465" stroke="black" arrow="normal/normal">
64 768 m
64 784 l
</path>
<path matrix="0.903832 -0.00048257 -0.00069432 -0.18903 86.3228 900.466" stroke="black" arrow="normal/normal">
64 768 m
80 784 l
</path>
<path matrix="-0.101264 0.00233046 0.00014184 1.66372 150.156 -522.31" stroke="black" arrow="normal/normal">
64 768 m
80 760 l
</path>
<text matrix="1 0 0 1 -8.82792 37.5187" transformations="translations" pos="145.339 737.238" stroke="black" type="label" width="15.497" height="7.473" depth="2.49" valign="baseline">\{S\}</text>
<text matrix="1 0 0 1 -40.1593 32.5527" transformations="translations" pos="145.339 737.238" stroke="black" type="label" width="12.73" height="7.473" depth="2.49" valign="baseline">\{i\}</text>
<text matrix="1 0 0 1 -108.327 33.4728" transformations="translations" pos="145.339 737.238" stroke="black" type="label" width="21.032" height="7.473" depth="2.49" valign="baseline">\{i-1\}</text>
<text matrix="1 0 0 1 20.4538 39.3576" transformations="translations" pos="145.339 737.238" stroke="black" type="label" width="25.46" height="7.473" depth="2.49" valign="baseline">\{i+1\}</text>
<text matrix="1 0 0 1 81.8411 21.8859" transformations="translations" pos="145.339 737.238" stroke="black" type="label" width="25.46" height="7.473" depth="2.49" valign="baseline">\{i+2\}</text>
<path matrix="1 0 0 1 0.36783 -3.86222" stroke="black" arrow="pointed/small" rarrow="pointed/small">
48 728 m
112 728 l
</path>
<path matrix="1 0 0 1 64.3678 -3.86222" stroke="black" arrow="pointed/small" rarrow="pointed/small">
48 728 m
112 728 l
</path>
<path matrix="1 0 0 1 128.368 -3.86222" stroke="black" arrow="pointed/small" rarrow="pointed/small">
48 728 m
112 728 l
</path>
<text matrix="1 0 0 1 -67.3139 -19.1269" transformations="translations" pos="145.339 737.238" stroke="black" type="label" width="5.535" height="4.289" depth="0" valign="baseline">u</text>
<text matrix="1 0 0 1 -4.59884 -19.3104" transformations="translations" pos="145.339 737.238" stroke="black" type="label" width="5.535" height="4.289" depth="0" valign="baseline">u</text>
<text matrix="1 0 0 1 60.1389 -18.9426" transformations="translations" pos="145.339 737.238" stroke="black" type="label" width="5.535" height="4.289" depth="0" valign="baseline">u</text>
</page>
</ipe>

214
docs/img/splineframes.svg Normal file
View File

@@ -0,0 +1,214 @@
<?xml version="1.0" encoding="UTF-8"?>
<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" width="218pt" height="67pt"
viewBox="0 0 218 67" version="1.1">
<defs>
<g>
<symbol overflow="visible" id="glyph0-0">
<path style="stroke:none;" d=""/>
</symbol>
<symbol overflow="visible" id="glyph0-1">
<path style="stroke:none;"
d="M 2.828125 -6.15625 C 2.828125 -6.546875 3.078125 -7.1875 4.171875 -7.25 C 4.21875 -7.265625 4.25 -7.3125 4.25 -7.359375 C 4.25 -7.484375 4.171875 -7.484375 4.0625 -7.484375 C 3.078125 -7.484375 2.15625 -6.96875 2.15625 -6.234375 L 2.15625 -3.953125 C 2.15625 -3.5625 2.15625 -3.25 1.75 -2.921875 C 1.40625 -2.625 1.03125 -2.609375 0.8125 -2.609375 C 0.75 -2.59375 0.71875 -2.546875 0.71875 -2.5 C 0.71875 -2.390625 0.78125 -2.390625 0.875 -2.375 C 1.53125 -2.34375 2.015625 -1.984375 2.125 -1.5 C 2.15625 -1.390625 2.15625 -1.359375 2.15625 -1 L 2.15625 0.96875 C 2.15625 1.390625 2.15625 1.703125 2.625 2.078125 C 3.015625 2.375 3.671875 2.5 4.0625 2.5 C 4.171875 2.5 4.25 2.5 4.25 2.375 C 4.25 2.28125 4.203125 2.28125 4.09375 2.265625 C 3.46875 2.234375 2.984375 1.90625 2.84375 1.40625 C 2.828125 1.3125 2.828125 1.296875 2.828125 0.9375 L 2.828125 -1.15625 C 2.828125 -1.609375 2.734375 -1.78125 2.421875 -2.109375 C 2.21875 -2.3125 1.921875 -2.40625 1.640625 -2.5 C 2.46875 -2.71875 2.828125 -3.1875 2.828125 -3.765625 Z M 2.828125 -6.15625 "/>
</symbol>
<symbol overflow="visible" id="glyph0-2">
<path style="stroke:none;"
d="M 2.15625 1.171875 C 2.15625 1.5625 1.890625 2.203125 0.8125 2.265625 C 0.75 2.28125 0.71875 2.328125 0.71875 2.375 C 0.71875 2.5 0.828125 2.5 0.921875 2.5 C 1.890625 2.5 2.8125 2 2.828125 1.25 L 2.828125 -1.03125 C 2.828125 -1.421875 2.828125 -1.734375 3.21875 -2.0625 C 3.5625 -2.359375 3.953125 -2.375 4.171875 -2.375 C 4.21875 -2.390625 4.25 -2.4375 4.25 -2.5 C 4.25 -2.59375 4.203125 -2.59375 4.09375 -2.609375 C 3.4375 -2.640625 2.953125 -3 2.84375 -3.484375 C 2.828125 -3.59375 2.828125 -3.625 2.828125 -3.984375 L 2.828125 -5.953125 C 2.828125 -6.375 2.828125 -6.6875 2.34375 -7.0625 C 1.9375 -7.375 1.25 -7.484375 0.921875 -7.484375 C 0.828125 -7.484375 0.71875 -7.484375 0.71875 -7.359375 C 0.71875 -7.265625 0.78125 -7.265625 0.875 -7.25 C 1.5 -7.21875 2 -6.890625 2.125 -6.390625 C 2.15625 -6.296875 2.15625 -6.28125 2.15625 -5.921875 L 2.15625 -3.828125 C 2.15625 -3.375 2.234375 -3.203125 2.546875 -2.875 C 2.765625 -2.671875 3.046875 -2.578125 3.328125 -2.5 C 2.515625 -2.265625 2.15625 -1.796875 2.15625 -1.21875 Z M 2.15625 1.171875 "/>
</symbol>
<symbol overflow="visible" id="glyph1-0">
<path style="stroke:none;" d=""/>
</symbol>
<symbol overflow="visible" id="glyph1-1">
<path style="stroke:none;"
d="M 3.484375 -3.875 L 2.203125 -4.171875 C 1.578125 -4.328125 1.203125 -4.859375 1.203125 -5.4375 C 1.203125 -6.140625 1.734375 -6.75 2.515625 -6.75 C 4.171875 -6.75 4.390625 -5.109375 4.453125 -4.671875 C 4.46875 -4.609375 4.46875 -4.546875 4.578125 -4.546875 C 4.703125 -4.546875 4.703125 -4.59375 4.703125 -4.78125 L 4.703125 -6.78125 C 4.703125 -6.953125 4.703125 -7.03125 4.59375 -7.03125 C 4.53125 -7.03125 4.515625 -7.015625 4.453125 -6.890625 L 4.09375 -6.328125 C 3.796875 -6.625 3.390625 -7.03125 2.5 -7.03125 C 1.390625 -7.03125 0.5625 -6.15625 0.5625 -5.09375 C 0.5625 -4.265625 1.09375 -3.53125 1.859375 -3.265625 C 1.96875 -3.234375 2.484375 -3.109375 3.1875 -2.9375 C 3.453125 -2.875 3.75 -2.796875 4.03125 -2.4375 C 4.234375 -2.171875 4.34375 -1.84375 4.34375 -1.515625 C 4.34375 -0.8125 3.84375 -0.09375 3 -0.09375 C 2.71875 -0.09375 1.953125 -0.140625 1.421875 -0.625 C 0.84375 -1.171875 0.8125 -1.796875 0.8125 -2.15625 C 0.796875 -2.265625 0.71875 -2.265625 0.6875 -2.265625 C 0.5625 -2.265625 0.5625 -2.1875 0.5625 -2.015625 L 0.5625 -0.015625 C 0.5625 0.15625 0.5625 0.21875 0.671875 0.21875 C 0.734375 0.21875 0.75 0.203125 0.8125 0.09375 C 0.8125 0.078125 0.84375 0.046875 1.171875 -0.484375 C 1.484375 -0.140625 2.125 0.21875 3.015625 0.21875 C 4.171875 0.21875 4.96875 -0.75 4.96875 -1.859375 C 4.96875 -2.84375 4.3125 -3.671875 3.484375 -3.875 Z M 3.484375 -3.875 "/>
</symbol>
<symbol overflow="visible" id="glyph1-2">
<path style="stroke:none;"
d="M 1.765625 -4.40625 L 0.375 -4.296875 L 0.375 -3.984375 C 1.015625 -3.984375 1.109375 -3.921875 1.109375 -3.4375 L 1.109375 -0.75 C 1.109375 -0.3125 1 -0.3125 0.328125 -0.3125 L 0.328125 0 C 0.640625 -0.015625 1.1875 -0.03125 1.421875 -0.03125 C 1.78125 -0.03125 2.125 -0.015625 2.46875 0 L 2.46875 -0.3125 C 1.796875 -0.3125 1.765625 -0.359375 1.765625 -0.75 Z M 1.796875 -6.140625 C 1.796875 -6.453125 1.5625 -6.671875 1.28125 -6.671875 C 0.96875 -6.671875 0.75 -6.40625 0.75 -6.140625 C 0.75 -5.875 0.96875 -5.609375 1.28125 -5.609375 C 1.5625 -5.609375 1.796875 -5.828125 1.796875 -6.140625 Z M 1.796875 -6.140625 "/>
</symbol>
<symbol overflow="visible" id="glyph1-3">
<path style="stroke:none;"
d="M 2.75 -1.859375 L 2.75 -2.4375 L 0.109375 -2.4375 L 0.109375 -1.859375 Z M 2.75 -1.859375 "/>
</symbol>
<symbol overflow="visible" id="glyph1-4">
<path style="stroke:none;"
d="M 2.9375 -6.375 C 2.9375 -6.625 2.9375 -6.640625 2.703125 -6.640625 C 2.078125 -6 1.203125 -6 0.890625 -6 L 0.890625 -5.6875 C 1.09375 -5.6875 1.671875 -5.6875 2.1875 -5.953125 L 2.1875 -0.78125 C 2.1875 -0.421875 2.15625 -0.3125 1.265625 -0.3125 L 0.953125 -0.3125 L 0.953125 0 C 1.296875 -0.03125 2.15625 -0.03125 2.5625 -0.03125 C 2.953125 -0.03125 3.828125 -0.03125 4.171875 0 L 4.171875 -0.3125 L 3.859375 -0.3125 C 2.953125 -0.3125 2.9375 -0.421875 2.9375 -0.78125 Z M 2.9375 -6.375 "/>
</symbol>
<symbol overflow="visible" id="glyph1-5">
<path style="stroke:none;"
d="M 4.078125 -2.296875 L 6.859375 -2.296875 C 7 -2.296875 7.1875 -2.296875 7.1875 -2.5 C 7.1875 -2.6875 7 -2.6875 6.859375 -2.6875 L 4.078125 -2.6875 L 4.078125 -5.484375 C 4.078125 -5.625 4.078125 -5.8125 3.875 -5.8125 C 3.671875 -5.8125 3.671875 -5.625 3.671875 -5.484375 L 3.671875 -2.6875 L 0.890625 -2.6875 C 0.75 -2.6875 0.5625 -2.6875 0.5625 -2.5 C 0.5625 -2.296875 0.75 -2.296875 0.890625 -2.296875 L 3.671875 -2.296875 L 3.671875 0.5 C 3.671875 0.640625 3.671875 0.828125 3.875 0.828125 C 4.078125 0.828125 4.078125 0.640625 4.078125 0.5 Z M 4.078125 -2.296875 "/>
</symbol>
<symbol overflow="visible" id="glyph1-6">
<path style="stroke:none;"
d="M 1.265625 -0.765625 L 2.328125 -1.796875 C 3.875 -3.171875 4.46875 -3.703125 4.46875 -4.703125 C 4.46875 -5.84375 3.578125 -6.640625 2.359375 -6.640625 C 1.234375 -6.640625 0.5 -5.71875 0.5 -4.828125 C 0.5 -4.28125 1 -4.28125 1.03125 -4.28125 C 1.203125 -4.28125 1.546875 -4.390625 1.546875 -4.8125 C 1.546875 -5.0625 1.359375 -5.328125 1.015625 -5.328125 C 0.9375 -5.328125 0.921875 -5.328125 0.890625 -5.3125 C 1.109375 -5.96875 1.65625 -6.328125 2.234375 -6.328125 C 3.140625 -6.328125 3.5625 -5.515625 3.5625 -4.703125 C 3.5625 -3.90625 3.078125 -3.125 2.515625 -2.5 L 0.609375 -0.375 C 0.5 -0.265625 0.5 -0.234375 0.5 0 L 4.203125 0 L 4.46875 -1.734375 L 4.234375 -1.734375 C 4.171875 -1.4375 4.109375 -1 4 -0.84375 C 3.9375 -0.765625 3.28125 -0.765625 3.0625 -0.765625 Z M 1.265625 -0.765625 "/>
</symbol>
<symbol overflow="visible" id="glyph1-7">
<path style="stroke:none;"
d="M 3.890625 -0.78125 L 3.890625 0.109375 L 5.328125 0 L 5.328125 -0.3125 C 4.640625 -0.3125 4.5625 -0.375 4.5625 -0.875 L 4.5625 -4.40625 L 3.09375 -4.296875 L 3.09375 -3.984375 C 3.78125 -3.984375 3.875 -3.921875 3.875 -3.421875 L 3.875 -1.65625 C 3.875 -0.78125 3.390625 -0.109375 2.65625 -0.109375 C 1.828125 -0.109375 1.78125 -0.578125 1.78125 -1.09375 L 1.78125 -4.40625 L 0.3125 -4.296875 L 0.3125 -3.984375 C 1.09375 -3.984375 1.09375 -3.953125 1.09375 -3.078125 L 1.09375 -1.578125 C 1.09375 -0.796875 1.09375 0.109375 2.609375 0.109375 C 3.171875 0.109375 3.609375 -0.171875 3.890625 -0.78125 Z M 3.890625 -0.78125 "/>
</symbol>
</g>
</defs>
<g id="surface1">
<path style="fill:none;stroke-width:0.4;stroke-linecap:butt;stroke-linejoin:round;stroke:rgb(74.5%,74.5%,74.5%);stroke-opacity:1;stroke-miterlimit:10;"
d="M 47.4495 750.158444 L 47.4495 766.158444 " transform="matrix(1,0,0,-1,-37.012,784.0686)"/>
<path style="fill-rule:evenodd;fill:rgb(74.5%,74.5%,74.5%);fill-opacity:1;stroke-width:0.4;stroke-linecap:butt;stroke-linejoin:round;stroke:rgb(74.5%,74.5%,74.5%);stroke-opacity:1;stroke-miterlimit:10;"
d="M 47.4495 766.158444 L 45.117469 759.158444 L 49.777625 759.158444 Z M 47.4495 766.158444 "
transform="matrix(1,0,0,-1,-37.012,784.0686)"/>
<path style="fill:none;stroke-width:0.4;stroke-linecap:butt;stroke-linejoin:round;stroke:rgb(74.5%,74.5%,74.5%);stroke-opacity:1;stroke-miterlimit:10;"
d="M 47.297156 750.295163 L 62.054969 758.388913 " transform="matrix(1,0,0,-1,-37.012,784.0686)"/>
<path style="fill-rule:evenodd;fill:rgb(74.5%,74.5%,74.5%);fill-opacity:1;stroke-width:0.4;stroke-linecap:butt;stroke-linejoin:round;stroke:rgb(74.5%,74.5%,74.5%);stroke-opacity:1;stroke-miterlimit:10;"
d="M 62.054969 758.388913 L 54.797156 757.0686 L 57.039344 752.978756 Z M 62.054969 758.388913 "
transform="matrix(1,0,0,-1,-37.012,784.0686)"/>
<path style="fill:none;stroke-width:0.4;stroke-linecap:butt;stroke-linejoin:round;stroke:rgb(74.5%,74.5%,74.5%);stroke-opacity:1;stroke-miterlimit:10;"
d="M 47.4495 750.158444 L 63.4495 742.158444 " transform="matrix(1,0,0,-1,-37.012,784.0686)"/>
<path style="fill-rule:evenodd;fill:rgb(74.5%,74.5%,74.5%);fill-opacity:1;stroke-width:0.4;stroke-linecap:butt;stroke-linejoin:round;stroke:rgb(74.5%,74.5%,74.5%);stroke-opacity:1;stroke-miterlimit:10;"
d="M 63.4495 742.158444 L 58.23075 747.377194 L 56.144813 743.205319 Z M 63.4495 742.158444 "
transform="matrix(1,0,0,-1,-37.012,784.0686)"/>
<path style="fill:none;stroke-width:0.4;stroke-linecap:butt;stroke-linejoin:round;stroke:rgb(74.5%,74.5%,74.5%);stroke-opacity:1;stroke-miterlimit:10;"
d="M 112.019813 749.033444 L 111.996375 765.033444 " transform="matrix(1,0,0,-1,-37.012,784.0686)"/>
<path style="fill-rule:evenodd;fill:rgb(74.5%,74.5%,74.5%);fill-opacity:1;stroke-width:0.4;stroke-linecap:butt;stroke-linejoin:round;stroke:rgb(74.5%,74.5%,74.5%);stroke-opacity:1;stroke-miterlimit:10;"
d="M 111.996375 765.033444 L 109.676063 758.029538 L 114.336219 758.03735 Z M 111.996375 765.033444 "
transform="matrix(1,0,0,-1,-37.012,784.0686)"/>
<path style="fill:none;stroke-width:0.4;stroke-linecap:butt;stroke-linejoin:round;stroke:rgb(74.5%,74.5%,74.5%);stroke-opacity:1;stroke-miterlimit:10;"
d="M 111.867469 749.455319 L 129.726844 749.478756 " transform="matrix(1,0,0,-1,-37.012,784.0686)"/>
<path style="fill-rule:evenodd;fill:rgb(74.5%,74.5%,74.5%);fill-opacity:1;stroke-width:0.4;stroke-linecap:butt;stroke-linejoin:round;stroke:rgb(74.5%,74.5%,74.5%);stroke-opacity:1;stroke-miterlimit:10;"
d="M 129.726844 749.478756 L 122.726844 751.799069 L 122.73075 747.138913 Z M 129.726844 749.478756 "
transform="matrix(1,0,0,-1,-37.012,784.0686)"/>
<path style="fill:none;stroke-width:0.4;stroke-linecap:butt;stroke-linejoin:round;stroke:rgb(74.5%,74.5%,74.5%);stroke-opacity:1;stroke-miterlimit:10;"
d="M 112.015906 749.033444 L 114.367469 736.142819 " transform="matrix(1,0,0,-1,-37.012,784.0686)"/>
<path style="fill-rule:evenodd;fill:rgb(74.5%,74.5%,74.5%);fill-opacity:1;stroke-width:0.4;stroke-linecap:butt;stroke-linejoin:round;stroke:rgb(74.5%,74.5%,74.5%);stroke-opacity:1;stroke-miterlimit:10;"
d="M 114.367469 736.142819 L 115.406531 743.447506 L 110.816688 742.611569 Z M 114.367469 736.142819 "
transform="matrix(1,0,0,-1,-37.012,784.0686)"/>
<path style="fill:none;stroke-width:0.4;stroke-linecap:butt;stroke-linejoin:round;stroke:rgb(74.5%,74.5%,74.5%);stroke-opacity:1;stroke-miterlimit:10;"
d="M 177.258094 756.865475 L 177.234656 772.865475 " transform="matrix(1,0,0,-1,-37.012,784.0686)"/>
<path style="fill-rule:evenodd;fill:rgb(74.5%,74.5%,74.5%);fill-opacity:1;stroke-width:0.4;stroke-linecap:butt;stroke-linejoin:round;stroke:rgb(74.5%,74.5%,74.5%);stroke-opacity:1;stroke-miterlimit:10;"
d="M 177.234656 772.865475 L 174.914344 765.861569 L 179.5745 765.869381 Z M 177.234656 772.865475 "
transform="matrix(1,0,0,-1,-37.012,784.0686)"/>
<path style="fill:none;stroke-width:0.4;stroke-linecap:butt;stroke-linejoin:round;stroke:rgb(74.5%,74.5%,74.5%);stroke-opacity:1;stroke-miterlimit:10;"
d="M 177.10575 757.28735 L 190.390906 749.533444 " transform="matrix(1,0,0,-1,-37.012,784.0686)"/>
<path style="fill-rule:evenodd;fill:rgb(74.5%,74.5%,74.5%);fill-opacity:1;stroke-width:0.4;stroke-linecap:butt;stroke-linejoin:round;stroke:rgb(74.5%,74.5%,74.5%);stroke-opacity:1;stroke-miterlimit:10;"
d="M 190.390906 749.533444 L 185.519813 755.076413 L 183.16825 751.049069 Z M 190.390906 749.533444 "
transform="matrix(1,0,0,-1,-37.012,784.0686)"/>
<path style="fill:none;stroke-width:0.4;stroke-linecap:butt;stroke-linejoin:round;stroke:rgb(74.5%,74.5%,74.5%);stroke-opacity:1;stroke-miterlimit:10;"
d="M 177.258094 756.861569 L 168.519813 745.670163 " transform="matrix(1,0,0,-1,-37.012,784.0686)"/>
<path style="fill-rule:evenodd;fill:rgb(74.5%,74.5%,74.5%);fill-opacity:1;stroke-width:0.4;stroke-linecap:butt;stroke-linejoin:round;stroke:rgb(74.5%,74.5%,74.5%);stroke-opacity:1;stroke-miterlimit:10;"
d="M 168.519813 745.670163 L 174.664344 749.7561 L 170.988563 752.623288 Z M 168.519813 745.670163 "
transform="matrix(1,0,0,-1,-37.012,784.0686)"/>
<path style="fill:none;stroke-width:0.4;stroke-linecap:butt;stroke-linejoin:round;stroke:rgb(74.5%,74.5%,74.5%);stroke-opacity:1;stroke-miterlimit:10;"
d="M 239.625281 739.103756 L 239.601844 755.103756 " transform="matrix(1,0,0,-1,-37.012,784.0686)"/>
<path style="fill-rule:evenodd;fill:rgb(74.5%,74.5%,74.5%);fill-opacity:1;stroke-width:0.4;stroke-linecap:butt;stroke-linejoin:round;stroke:rgb(74.5%,74.5%,74.5%);stroke-opacity:1;stroke-miterlimit:10;"
d="M 239.601844 755.103756 L 237.281531 748.103756 L 241.945594 748.107663 Z M 239.601844 755.103756 "
transform="matrix(1,0,0,-1,-37.012,784.0686)"/>
<path style="fill:none;stroke-width:0.4;stroke-linecap:butt;stroke-linejoin:round;stroke:rgb(74.5%,74.5%,74.5%);stroke-opacity:1;stroke-miterlimit:10;"
d="M 239.476844 739.521725 L 239.801063 726.744381 " transform="matrix(1,0,0,-1,-37.012,784.0686)"/>
<path style="fill-rule:evenodd;fill:rgb(74.5%,74.5%,74.5%);fill-opacity:1;stroke-width:0.4;stroke-linecap:butt;stroke-linejoin:round;stroke:rgb(74.5%,74.5%,74.5%);stroke-opacity:1;stroke-miterlimit:10;"
d="M 239.801063 726.744381 L 241.953406 733.802975 L 237.29325 733.681881 Z M 239.801063 726.744381 "
transform="matrix(1,0,0,-1,-37.012,784.0686)"/>
<path style="fill:none;stroke-width:0.4;stroke-linecap:butt;stroke-linejoin:round;stroke:rgb(74.5%,74.5%,74.5%);stroke-opacity:1;stroke-miterlimit:10;"
d="M 239.625281 739.103756 L 227.765906 739.084225 " transform="matrix(1,0,0,-1,-37.012,784.0686)"/>
<path style="fill-rule:evenodd;fill:rgb(74.5%,74.5%,74.5%);fill-opacity:1;stroke-width:0.4;stroke-linecap:butt;stroke-linejoin:round;stroke:rgb(74.5%,74.5%,74.5%);stroke-opacity:1;stroke-miterlimit:10;"
d="M 227.765906 739.084225 L 234.773719 736.763913 L 234.762 741.424069 Z M 227.765906 739.084225 "
transform="matrix(1,0,0,-1,-37.012,784.0686)"/>
<path style="fill:none;stroke-width:0.8;stroke-linecap:butt;stroke-linejoin:round;stroke:rgb(0%,0%,0%);stroke-opacity:1;stroke-dasharray:4;stroke-miterlimit:10;"
d="M 42.898719 743.857663 C 112.453406 760.592038 185.133094 759.263913 254.031531 739.994381 "
transform="matrix(1,0,0,-1,-37.012,784.0686)"/>
<path style="fill:none;stroke-width:0.4;stroke-linecap:butt;stroke-linejoin:round;stroke:rgb(0%,0%,0%);stroke-opacity:1;stroke-miterlimit:10;"
d="M 143.785438 754.842038 L 143.762 770.842038 " transform="matrix(1,0,0,-1,-37.012,784.0686)"/>
<path style="fill-rule:evenodd;fill:rgb(0%,0%,0%);fill-opacity:1;stroke-width:0.4;stroke-linecap:butt;stroke-linejoin:round;stroke:rgb(0%,0%,0%);stroke-opacity:1;stroke-miterlimit:10;"
d="M 143.762 770.842038 L 141.441688 763.838131 L 146.101844 763.845944 Z M 143.762 770.842038 "
transform="matrix(1,0,0,-1,-37.012,784.0686)"/>
<path style="fill:none;stroke-width:0.4;stroke-linecap:butt;stroke-linejoin:round;stroke:rgb(0%,0%,0%);stroke-opacity:1;stroke-miterlimit:10;"
d="M 143.633094 755.260006 L 158.086219 752.228756 " transform="matrix(1,0,0,-1,-37.012,784.0686)"/>
<path style="fill-rule:evenodd;fill:rgb(0%,0%,0%);fill-opacity:1;stroke-width:0.4;stroke-linecap:butt;stroke-linejoin:round;stroke:rgb(0%,0%,0%);stroke-opacity:1;stroke-miterlimit:10;"
d="M 158.086219 752.228756 L 151.711219 755.947506 L 150.754188 751.385006 Z M 158.086219 752.228756 "
transform="matrix(1,0,0,-1,-37.012,784.0686)"/>
<path style="fill:none;stroke-width:0.4;stroke-linecap:butt;stroke-linejoin:round;stroke:rgb(0%,0%,0%);stroke-opacity:1;stroke-miterlimit:10;"
d="M 143.785438 755.576413 L 142.164344 742.302975 " transform="matrix(1,0,0,-1,-37.012,784.0686)"/>
<path style="fill-rule:evenodd;fill:rgb(0%,0%,0%);fill-opacity:1;stroke-width:0.4;stroke-linecap:butt;stroke-linejoin:round;stroke:rgb(0%,0%,0%);stroke-opacity:1;stroke-miterlimit:10;"
d="M 142.164344 742.302975 L 145.3245 748.970944 L 140.6995 749.533444 Z M 142.164344 742.302975 "
transform="matrix(1,0,0,-1,-37.012,784.0686)"/>
<g style="fill:rgb(0%,0%,0%);fill-opacity:1;">
<use xlink:href="#glyph0-1" x="99.49908" y="9.3109"/>
</g>
<g style="fill:rgb(0%,0%,0%);fill-opacity:1;">
<use xlink:href="#glyph1-1" x="104.48008" y="9.3109"/>
</g>
<g style="fill:rgb(0%,0%,0%);fill-opacity:1;">
<use xlink:href="#glyph0-2" x="110.01508" y="9.3109"/>
</g>
<g style="fill:rgb(0%,0%,0%);fill-opacity:1;">
<use xlink:href="#glyph0-1" x="68.1677" y="14.2769"/>
</g>
<g style="fill:rgb(0%,0%,0%);fill-opacity:1;">
<use xlink:href="#glyph1-2" x="73.1487" y="14.2769"/>
</g>
<g style="fill:rgb(0%,0%,0%);fill-opacity:1;">
<use xlink:href="#glyph0-2" x="75.9167" y="14.2769"/>
</g>
<g style="fill:rgb(0%,0%,0%);fill-opacity:1;">
<use xlink:href="#glyph0-1" x="0" y="13.3568"/>
</g>
<g style="fill:rgb(0%,0%,0%);fill-opacity:1;">
<use xlink:href="#glyph1-2" x="4.981" y="13.3568"/>
<use xlink:href="#glyph1-3" x="7.74064" y="13.3568"/>
<use xlink:href="#glyph1-4" x="11.058186" y="13.3568"/>
</g>
<g style="fill:rgb(0%,0%,0%);fill-opacity:1;">
<use xlink:href="#glyph0-2" x="16.051" y="13.3568"/>
</g>
<g style="fill:rgb(0%,0%,0%);fill-opacity:1;">
<use xlink:href="#glyph0-1" x="128.7808" y="7.472"/>
</g>
<g style="fill:rgb(0%,0%,0%);fill-opacity:1;">
<use xlink:href="#glyph1-2" x="133.7618" y="7.472"/>
<use xlink:href="#glyph1-5" x="136.52144" y="7.472"/>
<use xlink:href="#glyph1-4" x="144.26238" y="7.472"/>
</g>
<g style="fill:rgb(0%,0%,0%);fill-opacity:1;">
<use xlink:href="#glyph0-2" x="149.2598" y="7.472"/>
</g>
<g style="fill:rgb(0%,0%,0%);fill-opacity:1;">
<use xlink:href="#glyph0-1" x="190.1681" y="24.9437"/>
</g>
<g style="fill:rgb(0%,0%,0%);fill-opacity:1;">
<use xlink:href="#glyph1-2" x="195.1491" y="24.9437"/>
<use xlink:href="#glyph1-5" x="197.90874" y="24.9437"/>
<use xlink:href="#glyph1-6" x="205.64968" y="24.9437"/>
</g>
<g style="fill:rgb(0%,0%,0%);fill-opacity:1;">
<use xlink:href="#glyph0-2" x="210.6471" y="24.9437"/>
</g>
<path style="fill:none;stroke-width:0.4;stroke-linecap:butt;stroke-linejoin:round;stroke:rgb(0%,0%,0%);stroke-opacity:1;stroke-miterlimit:10;"
d="M 48.367469 724.138913 L 112.367469 724.138913 " transform="matrix(1,0,0,-1,-37.012,784.0686)"/>
<path style="fill-rule:evenodd;fill:rgb(0%,0%,0%);fill-opacity:1;stroke-width:0.4;stroke-linecap:butt;stroke-linejoin:round;stroke:rgb(0%,0%,0%);stroke-opacity:1;stroke-miterlimit:10;"
d="M 112.367469 724.138913 L 107.367469 725.802975 L 108.367469 724.138913 L 107.367469 722.470944 Z M 112.367469 724.138913 "
transform="matrix(1,0,0,-1,-37.012,784.0686)"/>
<path style="fill-rule:evenodd;fill:rgb(0%,0%,0%);fill-opacity:1;stroke-width:0.4;stroke-linecap:butt;stroke-linejoin:round;stroke:rgb(0%,0%,0%);stroke-opacity:1;stroke-miterlimit:10;"
d="M 48.367469 724.138913 L 53.367469 722.470944 L 52.367469 724.138913 L 53.367469 725.802975 Z M 48.367469 724.138913 "
transform="matrix(1,0,0,-1,-37.012,784.0686)"/>
<path style="fill:none;stroke-width:0.4;stroke-linecap:butt;stroke-linejoin:round;stroke:rgb(0%,0%,0%);stroke-opacity:1;stroke-miterlimit:10;"
d="M 112.367469 724.138913 L 176.367469 724.138913 " transform="matrix(1,0,0,-1,-37.012,784.0686)"/>
<path style="fill-rule:evenodd;fill:rgb(0%,0%,0%);fill-opacity:1;stroke-width:0.4;stroke-linecap:butt;stroke-linejoin:round;stroke:rgb(0%,0%,0%);stroke-opacity:1;stroke-miterlimit:10;"
d="M 176.367469 724.138913 L 171.367469 725.802975 L 172.367469 724.138913 L 171.367469 722.470944 Z M 176.367469 724.138913 "
transform="matrix(1,0,0,-1,-37.012,784.0686)"/>
<path style="fill-rule:evenodd;fill:rgb(0%,0%,0%);fill-opacity:1;stroke-width:0.4;stroke-linecap:butt;stroke-linejoin:round;stroke:rgb(0%,0%,0%);stroke-opacity:1;stroke-miterlimit:10;"
d="M 112.367469 724.138913 L 117.367469 722.470944 L 116.367469 724.138913 L 117.367469 725.802975 Z M 112.367469 724.138913 "
transform="matrix(1,0,0,-1,-37.012,784.0686)"/>
<path style="fill:none;stroke-width:0.4;stroke-linecap:butt;stroke-linejoin:round;stroke:rgb(0%,0%,0%);stroke-opacity:1;stroke-miterlimit:10;"
d="M 176.367469 724.138913 L 240.367469 724.138913 " transform="matrix(1,0,0,-1,-37.012,784.0686)"/>
<path style="fill-rule:evenodd;fill:rgb(0%,0%,0%);fill-opacity:1;stroke-width:0.4;stroke-linecap:butt;stroke-linejoin:round;stroke:rgb(0%,0%,0%);stroke-opacity:1;stroke-miterlimit:10;"
d="M 240.367469 724.138913 L 235.367469 725.802975 L 236.367469 724.138913 L 235.367469 722.470944 Z M 240.367469 724.138913 "
transform="matrix(1,0,0,-1,-37.012,784.0686)"/>
<path style="fill-rule:evenodd;fill:rgb(0%,0%,0%);fill-opacity:1;stroke-width:0.4;stroke-linecap:butt;stroke-linejoin:round;stroke:rgb(0%,0%,0%);stroke-opacity:1;stroke-miterlimit:10;"
d="M 176.367469 724.138913 L 181.367469 722.470944 L 180.367469 724.138913 L 181.367469 725.802975 Z M 176.367469 724.138913 "
transform="matrix(1,0,0,-1,-37.012,784.0686)"/>
<g style="fill:rgb(0%,0%,0%);fill-opacity:1;">
<use xlink:href="#glyph1-7" x="41.0131" y="65.9575"/>
</g>
<g style="fill:rgb(0%,0%,0%);fill-opacity:1;">
<use xlink:href="#glyph1-7" x="103.72816" y="66.141"/>
</g>
<g style="fill:rgb(0%,0%,0%);fill-opacity:1;">
<use xlink:href="#glyph1-7" x="168.4659" y="65.7732"/>
</g>
</g>
</svg>

After

Width:  |  Height:  |  Size: 24 KiB

BIN
docs/img/visensor.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 222 KiB

View File

@@ -0,0 +1,160 @@
/*
This file is part of m.css.
Copyright © 2017, 2018, 2019 Vladimír Vondruš <mosra@centrum.cz>
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included
in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
*/
:root {
/* Text properties */
/*--font: 'Libre Baskerville', serif;*/
/*--code-font: 'Source Code Pro', monospace;*/
--font: 'Source Sans Pro', sans-serif;
--code-font: 'Source Code Pro', monospace;
--font-size: 1em;
--code-font-size: 0.8em; /* *not* rem, so it follows surrounding font size */
--line-height: normal;
--paragraph-indent: 1.5rem;
--paragraph-align: justify;
--link-decoration: underline;
--link-decoration-nav: none;
--link-decoration-heading: none;
--nav-brand-case: lowercase;
--nav-menu-case: lowercase;
--nav-heading-case: uppercase;
--nav-categories-case: lowercase;
--landing-header-case: lowercase;
--heading-font-weight: normal;
/* Shapes */
--border-radius: 0.2rem;
/* Basics */
--background-color: #ffffff;
--color: #000000;
--line-color: #c7cfd9;
--link-color: #205275;
--link-active-color: #2f73a3;
--mark-color: #4c93d3;
--mark-background-color: #f0c63e;
--code-color: #000000;
--code-inverted-color: rgba(91, 91, 91, 0.33);
--console-color: var(--code-color);
--console-inverted-color: var(--code-inverted-color);
/* This is simply color-picked --code-note-background-color on top of
--background-color */
--code-background-color: #f2f7fa;
--code-note-background-color: rgba(251, 240, 236, 0.5);
--console-background-color: #000000;
--button-background-color: #ffffff;
/* Header */
--header-border-width: 0.25rem 0 0 0;
--header-color: #000000;
--header-breadcrumb-color: #bdbdbd; /* same as --dim-color */
--header-background-color: #ffffff;
--header-background-color-landing: rgba(255, 255, 255, 0.75);
--header-background-color-jumbo: rgba(255, 255, 255, 0.25);
--header-link-color: #000000;
--header-link-active-color: #205275;
--header-link-current-color: #2f73a3;
--header-link-active-background-color: #ffffff;
--header-link-active-background-color: rgba(255, 255, 255, 0.5);
/* Footer */
--footer-font-size: 0.85rem;
--footer-color: #7c7c7c;
--footer-background-color: #eeeeee;
--footer-link-color: #191919;
--footer-link-active-color: #494949;
/* Cover image */
--cover-image-background-color: #666666;
/* Search (used only by m-documentation.css) */
--search-overlay-color: var(--header-background-color-landing);
--search-background-color: var(--header-background-color);
/* Article */
--article-header-color: #7a7a7a;
--article-footer-color: #969696;
--article-heading-color: #205275;
--article-heading-active-color: #2e73a3;
/* Right navigation panel */
--navpanel-link-color: #292929;
--navpanel-link-active-color: #205275;
/* Plots */
--plot-background-color: #f2f7fa;
--plot-error-color: #000000;
/* Colored components */
--default-color: #000000;
--default-link-active-color: #205275;
--default-filled-color: #000000;
--default-filled-background-color: #f2f7fa;
--default-filled-link-color: #2f73a3;
--default-filled-link-active-color: #205275;
--primary-color: #205275;
--primary-link-active-color: #000000;
--primary-filled-color: #fbe4d9;
--primary-filled-background-color: #ef9069;
--primary-filled-link-color: #782f0d;
--primary-filled-link-active-color: #2f1205;
--success-color: #31c25d;
--success-link-active-color: #dcf6e3;
--success-filled-color: #f4fcf6;
--success-filled-background-color: #4dd376;
--success-filled-link-color: #c5f2d1;
--success-filled-link-active-color: #dcf6e3;
--warning-color: #d19600;
--warning-link-active-color: #fff1cc;
--warning-filled-color: #fff7e3;
--warning-filled-background-color: #d19600;
--warning-filled-link-color: #fff1cc;
--warning-filled-link-active-color: #fff7e3;
--danger-color: #f60000;
--danger-link-active-color: #f6dddc;
--danger-filled-color: #fdf3f3;
--danger-filled-background-color: #e23e3e;
--danger-filled-link-color: #f2c7c6;
--danger-filled-link-active-color: #f6dddc;
--info-color: #2e7dc5;
--info-link-active-color: #c6ddf2;
--info-filled-color: #f4f8fc;
--info-filled-background-color: #4c93d3;
--info-filled-link-color: #c6ddf2;
--info-filled-link-active-color: #dbeaf7;
--dim-color: #bdbdbd;
--dim-link-color: #c0c0c0;
--dim-link-active-color: #949494;
--dim-filled-color: #7c7c7c;
--dim-filled-background-color: #f1f1f1;
--dim-filled-link-color: #c0c0c0;
--dim-filled-link-active-color: #949494;
--dim-button-active-color: #c0c0c0;
}

View File

@@ -0,0 +1,30 @@
/*
This file is part of m.css.
Copyright © 2017, 2018, 2019 Vladimír Vondruš <mosra@centrum.cz>
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included
in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
*/
@import url('m-theme-udel.css');
@import url('m-grid.css');
@import url('m-components.css');
@import url('m-layout.css');
@import url('pygments-tango.css');
@import url('pygments-console.css');

View File

@@ -0,0 +1,419 @@
.m-code .hll {
background-color: #ffffcc
}
.m-code {
background: #f8f8f8;
}
.m-code .c {
color: #8f5902;
font-style: italic
}
/* Comment */
.m-code .err {
color: #a40000;
border: 1px solid #ef2929
}
/* Error */
.m-code .g {
color: #000000
}
/* Generic */
.m-code .k {
color: #204a87;
font-weight: bold
}
/* Keyword */
.m-code .l {
color: #000000
}
/* Literal */
.m-code .n {
color: #000000
}
/* Name */
.m-code .o {
color: #ce5c00;
font-weight: bold
}
/* Operator */
.m-code .x {
color: #000000
}
/* Other */
.m-code .p {
color: #000000;
font-weight: bold
}
/* Punctuation */
.m-code .ch {
color: #8f5902;
font-style: italic
}
/* Comment.Hashbang */
.m-code .cm {
color: #8f5902;
font-style: italic
}
/* Comment.Multiline */
.m-code .cp {
color: #8f5902;
font-style: italic
}
/* Comment.Preproc */
.m-code .cpf {
color: #8f5902;
font-style: italic
}
/* Comment.PreprocFile */
.m-code .c1 {
color: #8f5902;
font-style: italic
}
/* Comment.Single */
.m-code .cs {
color: #8f5902;
font-style: italic
}
/* Comment.Special */
.m-code .gd {
color: #a40000
}
/* Generic.Deleted */
.m-code .ge {
color: #000000;
font-style: italic
}
/* Generic.Emph */
.m-code .gr {
color: #ef2929
}
/* Generic.Error */
.m-code .gh {
color: #000080;
font-weight: bold
}
/* Generic.Heading */
.m-code .gi {
color: #00A000
}
/* Generic.Inserted */
.m-code .go {
color: #000000;
font-style: italic
}
/* Generic.Output */
.m-code .gp {
color: #8f5902
}
/* Generic.Prompt */
.m-code .gs {
color: #000000;
font-weight: bold
}
/* Generic.Strong */
.m-code .gu {
color: #800080;
font-weight: bold
}
/* Generic.Subheading */
.m-code .gt {
color: #a40000;
font-weight: bold
}
/* Generic.Traceback */
.m-code .kc {
color: #204a87;
font-weight: bold
}
/* Keyword.Constant */
.m-code .kd {
color: #204a87;
font-weight: bold
}
/* Keyword.Declaration */
.m-code .kn {
color: #204a87;
font-weight: bold
}
/* Keyword.Namespace */
.m-code .kp {
color: #204a87;
font-weight: bold
}
/* Keyword.Pseudo */
.m-code .kr {
color: #204a87;
font-weight: bold
}
/* Keyword.Reserved */
.m-code .kt {
color: #204a87;
font-weight: bold
}
/* Keyword.Type */
.m-code .ld {
color: #000000
}
/* Literal.Date */
.m-code .m {
color: #0000cf;
font-weight: bold
}
/* Literal.Number */
.m-code .s {
color: #4e9a06
}
/* Literal.String */
.m-code .na {
color: #c4a000
}
/* Name.Attribute */
.m-code .nb {
color: #204a87
}
/* Name.Builtin */
.m-code .nc {
color: #000000
}
/* Name.Class */
.m-code .no {
color: #000000
}
/* Name.Constant */
.m-code .nd {
color: #5c35cc;
font-weight: bold
}
/* Name.Decorator */
.m-code .ni {
color: #ce5c00
}
/* Name.Entity */
.m-code .ne {
color: #cc0000;
font-weight: bold
}
/* Name.Exception */
.m-code .nf {
color: #000000
}
/* Name.Function */
.m-code .nl {
color: #f57900
}
/* Name.Label */
.m-code .nn {
color: #000000
}
/* Name.Namespace */
.m-code .nx {
color: #000000
}
/* Name.Other */
.m-code .py {
color: #000000
}
/* Name.Property */
.m-code .nt {
color: #204a87;
font-weight: bold
}
/* Name.Tag */
.m-code .nv {
color: #000000
}
/* Name.Variable */
.m-code .ow {
color: #204a87;
font-weight: bold
}
/* Operator.Word */
.m-code .w {
color: #f8f8f8;
text-decoration: underline
}
/* Text.Whitespace */
.m-code .mb {
color: #0000cf;
font-weight: bold
}
/* Literal.Number.Bin */
.m-code .mf {
color: #0000cf;
font-weight: bold
}
/* Literal.Number.Float */
.m-code .mh {
color: #0000cf;
font-weight: bold
}
/* Literal.Number.Hex */
.m-code .mi {
color: #0000cf;
font-weight: bold
}
/* Literal.Number.Integer */
.m-code .mo {
color: #0000cf;
font-weight: bold
}
/* Literal.Number.Oct */
.m-code .sa {
color: #4e9a06
}
/* Literal.String.Affix */
.m-code .sb {
color: #4e9a06
}
/* Literal.String.Backtick */
.m-code .sc {
color: #4e9a06
}
/* Literal.String.Char */
.m-code .dl {
color: #4e9a06
}
/* Literal.String.Delimiter */
.m-code .sd {
color: #8f5902;
font-style: italic
}
/* Literal.String.Doc */
.m-code .s2 {
color: #4e9a06
}
/* Literal.String.Double */
.m-code .se {
color: #4e9a06
}
/* Literal.String.Escape */
.m-code .sh {
color: #4e9a06
}
/* Literal.String.Heredoc */
.m-code .si {
color: #4e9a06
}
/* Literal.String.Interpol */
.m-code .sx {
color: #4e9a06
}
/* Literal.String.Other */
.m-code .sr {
color: #4e9a06
}
/* Literal.String.Regex */
.m-code .s1 {
color: #4e9a06
}
/* Literal.String.Single */
.m-code .ss {
color: #4e9a06
}
/* Literal.String.Symbol */
.m-code .bp {
color: #3465a4
}
/* Name.Builtin.Pseudo */
.m-code .fm {
color: #000000
}
/* Name.Function.Magic */
.m-code .vc {
color: #000000
}
/* Name.Variable.Class */
.m-code .vg {
color: #000000
}
/* Name.Variable.Global */
.m-code .vi {
color: #000000
}
/* Name.Variable.Instance */
.m-code .vm {
color: #000000
}
/* Name.Variable.Magic */
.m-code .il {
color: #0000cf;
font-weight: bold
}
/* Literal.Number.Integer.Long */

473
docs/propagation.dox Normal file
View File

@@ -0,0 +1,473 @@
/**
@page propagation Propagation
@tableofcontents
@section imu_measurements IMU Measurements
We use a 6-axis inertial measurement unit (IMU) to propagate the inertial navigation system (INS),
which provides measurements of the local rotational velocity (angular rate) \f$\boldsymbol{\omega}_m\f$ and
local translational acceleration \f$\mathbf{a}_m\f$:
\f{align*}{
\boldsymbol{\omega}_m(t) &= \boldsymbol{\omega}(t) + \mathbf{b}_{g}(t) + \mathbf{n}_{{g}}(t)\\
\mathbf{a}_m(t) &= \mathbf{a}(t) + {}^I_G\mathbf{R}(t) {^G\mathbf{g}} + \mathbf{b}_{a}(t) + \mathbf{n}_{{a}}(t)
\f}
where \f$\boldsymbol{\omega}\f$ and \f$\mathbf{a}\f$ are the true rotational velocity and translational
acceleration in the IMU local frame \f$\{I\}\f$,
\f$\mathbf{b}_{g}\f$ and \f$\mathbf{b}_{a}\f$ are the gyroscope and accelerometer biases, and \f$\mathbf{n}_{{g}}\f$
\f$\mathbf{n}_{{a}}\f$ are white Gaussian noise,
\f${^G\mathbf{g}} = [ 0 ~~ 0 ~~ 9.81 ]^\top\f$ is the gravity expressed in the global frame \f$\{G\}\f$
(noting that the gravity is slightly different on different locations of the globe),
and \f$^I_G\mathbf{R}\f$ is the rotation matrix from global to IMU local frame.
@section ins_state State Vector
We define our INS state vector \f$\mathbf{x}_I\f$ at time \f$t\f$ as:
\f{align*}{
\mathbf{x}_I(t) =
\begin{bmatrix}
^I_G\bar{q}(t) \\
^G\mathbf{p}_I(t) \\
^G\mathbf{v}_I(t)\\
\mathbf{b}_{\mathbf{g}}(t) \\
\mathbf{b}_{\mathbf{a}}(t)
\end{bmatrix}
\f}
where \f$^I_G\bar{q}\f$ is the unit quaternion representing the rotation global to IMU frame,
\f$^G\mathbf{p}_I\f$ is the position of IMU in global frame,
and \f$^G\mathbf{v}_I\f$ is the velocity of IMU in global frame.
We will often write time as a subscript of \f$I\f$ describing the state of IMU at the time
for notation clarity (e.g., \f$^{I_t}_G\bar{q} = \text{}^I_G\bar{q}(t)\f$).
In order to define the IMU error state, the standard additive error definition is employed
for the position, velocity, and biases,
while we use the quaternion error state \f$\delta \bar{q}\f$ with a left quaternion multiplicative error
\f$\otimes\f$:
\f{align*}{
^I_G\bar{q} &= \delta \bar{q} \otimes \text{}^I_G \hat{\bar{q}}\\
\delta \bar{q} &= \begin{bmatrix}
\hat{\mathbf{k}}\sin(\frac{1}{2}\tilde{\theta})\\
\cos(\frac{1}{2}\tilde{\theta}) \end{bmatrix}
\simeq
\begin{bmatrix}
\frac{1}{2}\tilde{\boldsymbol{\theta}}\\ 1
\end{bmatrix}
\f}
where \f$\hat{\mathbf{k}}\f$ is the rotation axis and \f$\tilde{\theta}\f$ is the rotation angle.
For small rotation, the error angle vector is approximated by \f$\tilde{\boldsymbol{\theta}} = \tilde{\theta}~\hat{\mathbf{k}}\f$
as the error vector about the three orientation axes.
The total IMU error state thus is defined as the following 15x1 (not 16x1) vector:
\f{align*}{
\tilde{\mathbf{x}}_I(t) =
\begin{bmatrix}
^I_G\tilde{\boldsymbol{\theta}}(t) \\
^G\tilde{\mathbf{p}}_I(t) \\
^G\tilde{\mathbf{v}}_I(t)\\
\tilde{\mathbf{b}}_{{g}}(t) \\
\tilde{\mathbf{b}}_{{a}}(t)
\end{bmatrix}
\f}
@section imu_kinematic IMU Kinematics
The IMU state evolves over time as follows
(see [Indirect Kalman Filter for 3D Attitude Estimation](http://mars.cs.umn.edu/tr/reports/Trawny05b.pdf) @cite Trawny2005TR).
\f{align*}{ \newcommand{\comm}[1]{}
^I_G\dot{\bar{q}}(t)
\comm{
&= \lim_{\delta t \to 0} \frac{1}{\delta t}
(^{I_{t + \delta t}}_G\bar{q} - \text{}^{I_{t}}_G\bar{q})\\
&= \lim_{\delta t \to 0} \frac{1}{\delta t}
(^{I_{t + \delta t}}_{L_{t}}\bar{q} \otimes ^{I_{t}}_{G}\bar{q}
- \bar{q}_0 \otimes \text{}^{I_{t}}_G\bar{q})\\
&= \lim_{\delta t \to 0} \frac{1}{\delta t}
(^{I_{t + \delta t}}_{L_{t}}\bar{q} - \bar{q}_0 ) \otimes \text{}^{I_{t}}_{G}\bar{q}\\
&\approx \lim_{\delta t \to 0} \frac{1}{\delta t}
\Bigg (\begin{bmatrix} \frac{1}{2}\delta \boldsymbol{\theta}\\ 1 \end{bmatrix}
-\begin{bmatrix} \boldsymbol{0}\\ 1 \end{bmatrix} \Bigg )
\otimes \text{}^{I_{t}}_{G}\bar{q} \\
&= \frac{1}{2} \begin{bmatrix} \boldsymbol{\omega}\\ 0 \end{bmatrix}
\otimes \text{}^{I_{t}}_{G}\bar{q}\\}
&= \frac{1}{2} \begin{bmatrix} -\lfloor \boldsymbol{\omega}(t) \times \rfloor
&& \boldsymbol{\omega}(t) \\
-\boldsymbol{\omega}^\top(t) && 0 \end{bmatrix} \text{}^{I_{t}}_{G}\bar{q}\\
&=: \frac{1}{2} \boldsymbol{\Omega}(\boldsymbol{\omega}(t)) \text{}^{I_{t}}_{G}\bar{q}\\
^G\dot{\mathbf{p}}_I(t) &=\text{} ^G\mathbf{v}_I(t)\\
^G\dot{\mathbf{v}}_I(t) &=\text{} ^{I_{t}}_G\mathbf{R}^\top\mathbf{a}(t) \\
\dot{\mathbf{b}}_{\mathbf{g}}(t) &= \mathbf{n}_{wg}\\
\dot{\mathbf{b}}_{\mathbf{a}}(t) &= \mathbf{n}_{wa}
\f}
where we have modeled the gyroscope and accelerometer biases as random walk
and thus their time derivatives are white Gaussian.
Note that the above kinematics have been defined in terms of the *true* acceleration and angular velocities.
@section conti_prop Continuous-time IMU Propagation
Given the continuous-time measurements \f$\boldsymbol{\omega}_m(t)\f$
and \f$\mathbf{a}_m(t)\f$ in the time interval \f$t \in [t_k,t_{k+1}]\f$,
and their estimates, i.e. after taking the expectation,
\f$\hat{\boldsymbol{\omega}}(t) = \boldsymbol{\omega}_m(t) - \hat{\boldsymbol{b}}_g(t)\f$ and
\f$\hat{\boldsymbol{a}}(t) = \boldsymbol{a}_m(t) - \hat{\boldsymbol{b}}_a(t) - ^I_G\hat{\mathbf{R}}(t){}^G\mathbf{g}\f$,
we can define the solutions to the above IMU kinematics differential equation.
The solution to the quaternion evolution has the following general form:
\f{align*}{
^{I_{t}}_{G}\bar{q} = \boldsymbol{\Theta}(t,t_k) \text{}^{I_{k}}_{G}\bar{q}
\f}
Differentiating and reordering the terms yields the governing equation for
\f$\boldsymbol{\Theta}(t,t_k)\f$ as
\f{align*}{ \newcommand{\comm}[1]{}
\boldsymbol{\Theta}(t,t_k) &= \text{}^{I_t}_G\bar{q} \text{ }^{I_k}_{G}\bar{q}^{-1}\\
\Rightarrow \dot{\boldsymbol{\Theta}}(t,t_k) &=
\text{}^{I_t}_G\dot{\bar{q}} \text{ }^{I_k}_{G}\bar{q}^{-1}\\
&= \frac{1}{2} \boldsymbol{\Omega}(\boldsymbol{\omega}(t)) \text{ }^{I_t}_{G}\bar{q}
\text{ }^{I_k}_{G}\bar{q}^{-1}\\
&= \frac{1}{2} \boldsymbol{\Omega}(\boldsymbol{\omega}(t)) \boldsymbol{\Theta}(t,t_k)
\f}
with \f$\boldsymbol{\Theta}(t_k,t_k) = \mathbf{I}_{4}\f$.
If we take \f$\boldsymbol{\omega}(t) = \boldsymbol{\omega}\f$ to be constant over the the period
\f$\Delta t = t_{k+1} - t_k\f$,
then the above system is linear time-invarying (LTI), and
\f$\boldsymbol{\Theta}\f$ can be solved as (see [[Stochastic Models, Estimation, and Control]](https://books.google.com/books?id=L_YVMUJKNQUC) @cite Maybeck1982STOC):
\f{align*}{
\boldsymbol{\Theta}(t_{k+1},t_k)
&= \exp\bigg(\frac{1}{2}\boldsymbol{\Omega}(\boldsymbol{\omega})\Delta t\bigg)\\
&= \cos\bigg(\frac{|\boldsymbol{\omega}|}{2} \Delta t \bigg) \cdot \mathbf{I}_4
+ \frac{1}{|\boldsymbol{\omega}|}\sin\bigg(\frac{|\boldsymbol{\omega}|}{2} \Delta t \bigg)
\cdot \boldsymbol{\Omega}(\boldsymbol{\omega})\\
&\simeq
\mathbf{I}_4 + \frac{\Delta t}{2}\boldsymbol{\Omega}(\boldsymbol{\omega})
\f}
where the approximation assumes small \f$|\boldsymbol{\omega}|\f$.
We can formulate the quaternion propagation from \f$t_k\f$ to \f$t_{k+1}\f$ using the estimated
rotational velocity \f$\hat{\boldsymbol{\omega}}(t) = \hat{\boldsymbol{\omega}}\f$ as:
\f{align*}{
\text{}^{I_{k+1}}_{G}\hat{\bar{q}}
= \exp\bigg(\frac{1}{2}\boldsymbol{\Omega}(\hat{\boldsymbol{\omega}})\Delta t\bigg)
\text{}^{I_{k}}_{G}\hat{\bar{q}}
\f}
Having defined the integration of the orientation, we can integrate the velocity and position over the measurement interval:
\f{align*}{
^G\hat{\mathbf{v}}_{k+1} &= \text{}^G\hat{\mathbf{v}}_{I_k} + \int_{t_{k}}^{t_{k+1}}
{^G\hat{\mathbf{a}}(\tau)} d\tau \\
&= \text{}^G\hat{\mathbf{v}}_{I_k} - {}^G\mathbf{g}\Delta t+ \int_{t_{k}}^{t_{k+1}}
{^G_{I_{\tau}}\hat{\mathbf{R}}(\mathbf{a}_m(\tau) - \hat{\mathbf{b}}_{\mathbf{a}}(\tau)) d\tau}\\[1em]
^G\hat{\mathbf{p}}_{I_{k+1}} &=
\text{}^G\hat{\mathbf{p}}_{I_k} + \int_{t_{k}}^{t_{k+1}}
{^G\hat{\mathbf{v}}_I(\tau)} d\tau \\
&= \text{}^G\hat{\mathbf{p}}_{I_k} + \text{}^G\hat{\mathbf{v}}_{I_k} \Delta t
- \frac{1}{2}{}^G\mathbf{g}\Delta t^2 +
\int_{t_{k}}^{t_{k+1}} \int_{t_{k}}^{s}
{^G_{I_{\tau}}\hat{\mathbf{R}}(\mathbf{a}_m(\tau) - \hat{\mathbf{b}}_{\mathbf{a}}(\tau)) d\tau ds}
\f}
Propagation of each bias \f$\hat{\mathbf{b}}_{\mathbf{g}}\f$ and \f$\hat{\mathbf{b}}_{\mathbf{a}}\f$ is given by:
\f{align*}{
\hat{\mathbf{b}}_{\mathbf{g},k+1} &= \hat{\mathbf{b}}_{\mathbf{g},k} + \int_{t_{k+1}}^{t_{k}}
{\hat{\mathbf{n}}_{wg}(\tau)} d\tau \\
&= \hat{\mathbf{b}}_{\mathbf{g},k} \\
\hat{\mathbf{b}}_{\mathbf{a},k+1} &= \hat{\mathbf{b}}_{\mathbf{a},k} + \int_{t_{k+1}}^{t_{k}}
{\hat{\mathbf{n}}_{wa}(\tau)} d\tau \\
&= \hat{\mathbf{b}}_{\mathbf{a},k}
\f}
The biases will not evolve since our random walk noises \f$\hat{\mathbf{n}}_{wg}\f$ and \f$\hat{\mathbf{n}}_{wa}\f$ are zero-mean white Gaussian.
All of the above integrals could be analytically or numerically solved if one wishes to use the continuous-time measurement evolution model.
@section disc_prop Discrete-time IMU Propagation
A simpler method is to model the measurements as discrete-time over the integration period.
To do this, the measurements can be assumed to be constant during the sampling period.
We employ this assumption and approximate that the measurement at time \f$t_k\f$ remains
the same until we get the next measurement at \f$t_{k+1}\f$.
For the quaternion propagation, it is the same as continuous-time propagation
with constant measurement assumption \f${\boldsymbol{\omega}}_{m}(t_k) = {\boldsymbol{\omega}}_{m,k}\f$.
We use subscript \f$k\f$ to denote it is the measurement we get at time \f$t_k\f$.
Therefore the propagation of quaternion can be written as:
\f{align*}{
\text{}^{I_{k+1}}_{G}\hat{\bar{q}}
= \exp\bigg(\frac{1}{2}\boldsymbol{\Omega}\big({\boldsymbol{\omega}}_{m,k}-\hat{\mathbf{b}}_{g,k}\big)\Delta t\bigg)
\text{}^{I_{k}}_{G}\hat{\bar{q}}
\f}
For the velocity and position propagation we have constant
\f$\mathbf{a}_{m}(t_k) = \mathbf{a}_{m,k}\f$ over \f$t \in [t_k, t_{k+1}]\f$.
We can therefore directly solve for the new states as:
\f{align*}{
^G\hat{\mathbf{v}}_{k+1} &= \text{}^G\hat{\mathbf{v}}_{I_k} - {}^G\mathbf{g}\Delta t
+\text{}^{I_k}_G\hat{\mathbf{R}}^\top(\mathbf{a}_{m,k} - \hat{\mathbf{b}}_{\mathbf{a},k})\Delta t\\
^G\hat{\mathbf{p}}_{I_{k+1}}
&= \text{}^G\hat{\mathbf{p}}_{I_k} + {}^G\hat{\mathbf{v}}_{I_k} \Delta t
- \frac{1}{2}{}^G\mathbf{g}\Delta t^2
+ \frac{1}{2} \text{}^{I_k}_{G}\hat{\mathbf{R}}^\top(\mathbf{a}_{m,k} - \hat{\mathbf{b}}_{\mathbf{a},k})\Delta t^2
\f}
The propagation of each bias is likewise the continuous system:
\f{align*}{
\hat{\mathbf{b}}_{\mathbf{g},k+1} &= \hat{\mathbf{b}}_{\mathbf{g},k}\\
\hat{\mathbf{b}}_{\mathbf{a},k+1} &= \hat{\mathbf{b}}_{\mathbf{a},k}
\f}
@section error_prop Discrete-time Error-state Propagation
In order to propagate the covariance matrix, we should derive the error-state propagation,
i.e., computing the system Jacobian \f$\boldsymbol{\Phi}(t_{k+1},t_k)\f$ and noise Jacobian \f$\mathbf{G}_{k}\f$.
In particular, when the covariance matrix of the continuous-time measurement noises is given by
\f$\mathbf{Q}_c\f$, then the discrete-time noise covariance \f$\mathbf{Q}_d\f$ can be computed as
(see [[Indirect Kalman Filter for 3D Attitude Estimation](http://mars.cs.umn.edu/tr/reports/Trawny05b.pdf)] @cite Trawny2005TR Eq. (129) and (130)):
\f{align*}{
\sigma_{g} &= \frac{1}{\sqrt{\Delta t}}~ \sigma_{g_c} \\
\sigma_{bg} &= \sqrt{\Delta t}~ \sigma_{bg_c} \\[1em]
\mathbf{Q}_{meas} &=
\begin{bmatrix}
\frac{1}{\Delta t}~ \sigma_{g_c}^2~ \mathbf{I}_3 & \mathbf{0}_3 \\
\mathbf{0}_3 & \frac{1}{\Delta t}~ \sigma_{a_c}^2~ \mathbf{I}_3
\end{bmatrix} \\
\mathbf{Q}_{bias} &=
\begin{bmatrix}
\Delta t~ \sigma_{bg_c}^2~ \mathbf{I}_3 & \mathbf{0}_3 \\
\mathbf{0}_3 & \Delta t~ \sigma_{ba_c}^2~ \mathbf{I}_3
\end{bmatrix}
\f}
where \f$\mathbf{n} = [ \mathbf{n}_g ~ \mathbf{n}_a ~ \mathbf{n}_{bg} ~ \mathbf{n}_{ba} ]^\top\f$ are the discrete
IMU sensor noises which have been converted from their continuous representations.
We define the stacked discrete measurement noise as follows:
\f{align*}{
\mathbf{Q}_{d} &=
\begin{bmatrix}
\mathbf{Q}_{meas} & \mathbf{0}_3 \\
\mathbf{0}_3 & \mathbf{Q}_{bias}
\end{bmatrix}
\f}
The method of computing Jacobians is to "perturb" each variable in the system and see how the old error "perturbation" relates to the new error state.
That is,
\f$\boldsymbol{\Phi}(t_{k+1},t_k)\f$ and \f$\mathbf{G}_{k}\f$ can be found by perturbing each variable as:
\f{align*}{
\tilde{\mathbf{x}}_I(t_{k+1}) = \boldsymbol{\Phi}(t_{k+1},t_k) \tilde{\mathbf{x}}_I(t_{k}) + \mathbf{G}_{k} \mathbf{n}
\f}
For the orientation error propagation, we start with the \f$\mathbf{SO}(3)\f$ perturbation using
\f${}^{I}_G \mathbf{R} \approx (\mathbf{I}_3 - \lfloor ^{I}_{G}\tilde{\boldsymbol{\theta}}\times\rfloor)^{I}_{G} \hat{\mathbf{R}}\f$:
\f{align*}{
{}^{I_{k+1}}_G \mathbf{R} &= \text{}^{I_{k+1}}_{I_{k}} \mathbf{R} \text{}^{I_{k}}_G \mathbf{R} \\
(\mathbf{I}_3 - \lfloor ^{I_{k+1}}_{G}\tilde{\boldsymbol{\theta}}\times\rfloor)^{I_{k+1}}_{G}
\hat{\mathbf{R}}
&\approx \textrm{exp}(-{}^{I_{k}}\hat{\boldsymbol{\omega}}\Delta t - {}^{I_{k}}\tilde{\boldsymbol{\omega}}\Delta t)
(\mathbf{I}_3 - \lfloor ^{I_{k}}_{G}\tilde{\boldsymbol{\theta}}\times\rfloor)^{I_{k}}_{G}
\hat{\mathbf{R}}\\
&=\textrm{exp}(-{}^{I_{k}}\hat{\boldsymbol{\omega}}\Delta t)\textrm{exp}(-\mathbf{J}_r(-{}^{I_{k}}\hat{\boldsymbol{\omega}}\Delta t){}^{I_{k}}\tilde{\boldsymbol{\omega}}\Delta t)
(\mathbf{I}_3 - \lfloor ^{I_{k}}_{G}\tilde{\boldsymbol{\theta}}\times\rfloor)^{I_{k}}_{G}
\hat{\mathbf{R}}\\
&=\text{}^{I_{k+1}}_{I_{k}} \hat{\mathbf{R}}
(\mathbf{I}_3 - \lfloor \mathbf J_r(-{}^{I_{k}}\hat{\boldsymbol{\omega}}\Delta t)
\tilde{\boldsymbol{\omega}}_k\Delta t \times\rfloor)
(\mathbf{I}_3 - \lfloor ^{I_k}_{G}\tilde{\boldsymbol{\theta}}\times\rfloor)
\text{}^{I_{k}}_G \hat{\mathbf{R}}
\f}
where \f$\tilde{\boldsymbol{\omega}} = \boldsymbol{\omega} - \hat{\boldsymbol{\omega}}
= -(\tilde{\mathbf{b}}_{\mathbf{g}} + \mathbf{n}_g)\f$ handles both the perturbation to the bias and measurement noise.
\f$\mathbf {J}_r(\boldsymbol{\theta})\f$ is the right Jacobian of \f$\mathbf{SO}(3)\f$
that maps the variation of rotation angle in the parameter vector space into the variation in the tangent vector space to the manifold
[see @ref ov_core::Jr_so3()].
By neglecting the second order terms from above, we obtain the following orientation error propagation:
\f{align*}
\text{}^{I_{k+1}}_{G}\tilde{\boldsymbol{\theta}} \approx
\text{}^{I_{k+1}}_{I_{k}}\hat{\mathbf{R}} \text{}^{I_k}_{G}\tilde{\boldsymbol{\theta}}
- \text{}^{I_{k+1}}_{I_{k}}\hat{\mathbf{R}}\mathbf J_r(\text{}^{I_{k+1}}_{I_{k}}\hat{\boldsymbol{\theta}})
\Delta t (\tilde{\mathbf{b}}_{\mathbf{g},k} + \mathbf{n}_{\mathbf{g},k})
\f}
Now we can do error propagation of position and velocity using the same scheme:
\f{align*}{
^G\mathbf{p}_{I_{k+1}}
&= \text{}^G\mathbf{p}_{I_k} + \text{}^G\mathbf{v}_{I_k} \Delta t
- \frac{1}{2}{}^G\mathbf{g}\Delta t^2
+ \frac{1}{2}\text{}^{I_k}_G\mathbf{R}^\top \mathbf{a}_{k}\Delta t^2\\
^G\hat{\mathbf{p}}_{I_{k+1}} + \text{}^G\tilde{\mathbf{p}}_{I_{k+1}}
&\approx \text{}^G\hat{\mathbf{p}}_{I_k} + \text{}^G\tilde{\mathbf{p}}_{I_k}
+ \text{}^G\hat{\mathbf{v}}_{I_k} \Delta t
+ \text{}^G\tilde{\mathbf{v}}_{I_k} \Delta t
- \frac{1}{2}{}^G\mathbf{g}\Delta t^2\\
&\hspace{4cm} + \frac{1}{2} \text{}^{I_k}_{G}\hat{\mathbf{R}}^\top
(\mathbf{I}_3 + \lfloor ^{I_{k}}_{G}\tilde{\boldsymbol{\theta}}\times\rfloor)
(\hat{\mathbf{a}}_{k} + \tilde{\mathbf{a}}_{k})\Delta t^2\\
\\
^G\mathbf{v}_{k+1} &= \text{}^G\mathbf{v}_{I_k} - {}^G\mathbf{g}\Delta t
+\text{}^{I_k}_G\mathbf{R}^\top\mathbf{a}_{k}\Delta t\\
^G\hat{\mathbf{v}}_{k+1} + ^G\tilde{\mathbf{v}}_{k+1} &\approx
{}^G\hat{\mathbf{v}}_{I_k} + {}^G\tilde{\mathbf{v}}_{I_k}
- {}^G\mathbf{g}\Delta t
+ \text{}^{I_k}_G\hat{\mathbf{R}}^\top
(\mathbf{I}_3 + \lfloor ^{I_{k}}_{G}\tilde{\boldsymbol{\theta}}\times\rfloor)
(\hat{\mathbf{a}}_{k} + \tilde{\mathbf{a}}_{k})\Delta t
\f}
where \f$\tilde{\mathbf{a}} = \mathbf{a} - \hat{\mathbf{a}}
= - (\tilde{\mathbf{b}}_{\mathbf{a}} + \mathbf{n}_{\mathbf{a}})\f$.
By neglecting the second order error terms, we obtain the following position and velocity error propagation:
\f{align*}{
\text{}^G\tilde{\mathbf{p}}_{I_{k+1}} &=
\text{}^G\tilde{\mathbf{p}}_{I_k}
+ \Delta t \text{}^G\tilde{\mathbf{v}}_{I_k}
- \frac{1}{2}\text{}^{I_k}_{G}\hat{\mathbf{R}}^\top
\lfloor \hat{\mathbf{a}}_{k} \Delta t^2 \times\rfloor
^{I_{k}}_{G}\tilde{\boldsymbol{\theta}}
- \frac{1}{2} \text{}^{I_k}_{G}\hat{\mathbf{R}}^\top \Delta t^2
(\tilde{\mathbf{b}}_{\mathbf{a},k} + \mathbf{n}_{\mathbf{a},k})\\
^G\tilde{\mathbf{v}}_{k+1} &=
\text{}^G\tilde{\mathbf{v}}_{I_k}
- \text{}^{I_k}_G\hat{\mathbf{R}}^\top
\lfloor \hat{\mathbf{a}}_{k} \Delta t \times\rfloor
^{I_{k}}_{G}\tilde{\boldsymbol{\theta}}
- \text{}^{I_k}_G\hat{\mathbf{R}}^\top \Delta t
(\tilde{\mathbf{b}}_{\mathbf{a},k} + \mathbf{n}_{\mathbf{a},k})
\f}
The propagation of the two random-walk biases are as follows:
\f{align*}{
\mathbf{b}_{\mathbf{g},k+1} &= \mathbf{b}_{\mathbf{g},k} + \mathbf{n}_{wg} \\
\hat{\mathbf{b}}_{\mathbf{g},k+1} + \tilde{\mathbf{b}}_{\mathbf{g},k+1} &=
\hat{\mathbf{b}}_{\mathbf{g},k} + \tilde{\mathbf{b}}_{\mathbf{g},k}
+ \mathbf{n}_{wg} \\
\tilde{\mathbf{b}}_{\mathbf{g},k+1} &=
\tilde{\mathbf{b}}_{\mathbf{g},k} + \mathbf{n}_{wg} \\[1em]
\mathbf{b}_{\mathbf{a},k+1} &= \mathbf{b}_{\mathbf{a},k} + \mathbf{n}_{wa} \\
\hat{\mathbf{b}}_{\mathbf{a},k+1} + \tilde{\mathbf{b}}_{\mathbf{a},k+1} &=
\hat{\mathbf{b}}_{\mathbf{a},k} + \tilde{\mathbf{b}}_{\mathbf{a},k}
+ \mathbf{n}_{wa} \\
\tilde{\mathbf{b}}_{\mathbf{a},k+1} &=
\tilde{\mathbf{b}}_{\mathbf{a},k} + \mathbf{n}_{wa}
\f}
By collecting all the perturbation results, we can build \f$\boldsymbol{\Phi}(t_{k+1},t_k)\f$ and \f$\mathbf{G}_{k}\f$ matrices as:
\f{align*}{
\boldsymbol{\Phi}(t_{k+1},t_k) &=
\begin{bmatrix}
\text{}^{I_{k+1}}_{I_{k}}\hat{\mathbf{R}} & \mathbf{0}_3 & \mathbf{0}_3 &
- \text{}^{I_{k+1}}_{I_{k}}\hat{\mathbf{R}}\mathbf J_r(\text{}^{I_{k+1}}_{I_{k}}\hat{\boldsymbol{\theta}})
\Delta t & \mathbf{0}_3 \\
- \frac{1}{2}\text{}^{I_k}_{G}\hat{\mathbf{R}}^\top \lfloor \hat{\mathbf{a}}_{k} \Delta t^2 \times\rfloor
& \mathbf{I}_3 & \Delta t \mathbf{I}_3 & \mathbf{0}_3 & - \frac{1}{2} \text{}^{I_k}_{G}\hat{\mathbf{R}}^\top \Delta t^2 \\
- \text{}^{I_k}_G\hat{\mathbf{R}}^\top \lfloor \hat{\mathbf{a}}_{k} \Delta t \times\rfloor
& \mathbf{0}_3 & \mathbf{I}_3 & \mathbf{0}_3 & - \text{}^{I_k}_G\hat{\mathbf{R}}^\top \Delta t \\
\mathbf{0}_3 & \mathbf{0}_3 & \mathbf{0}_3 & \mathbf{I}_3 & \mathbf{0}_3 \\
\mathbf{0}_3 & \mathbf{0}_3 & \mathbf{0}_3 & \mathbf{0}_3 & \mathbf{I}_3
\end{bmatrix}
\f}
\f{align*}{
\mathbf{G}_{k} &=
\begin{bmatrix}
- \text{}^{I_{k+1}}_{I_{k}}\hat{\mathbf{R}}\mathbf J_r(\text{}^{I_{k+1}}_{I_{k}}\hat{\boldsymbol{\theta}})
\Delta t & \mathbf{0}_3
& \mathbf{0}_3 & \mathbf{0}_3 \\
\mathbf{0}_3 & - \frac{1}{2} \text{}^{I_k}_{G}\hat{\mathbf{R}}^\top \Delta t^2
& \mathbf{0}_3 & \mathbf{0}_3 \\
\mathbf{0}_3 & - \text{}^{I_k}_G\hat{\mathbf{R}}^\top \Delta t
& \mathbf{0}_3 & \mathbf{0}_3 \\
\mathbf{0}_3 & \mathbf{0}_3
& \mathbf{I}_3 & \mathbf{0}_3 \\
\mathbf{0}_3 & \mathbf{0}_3
& \mathbf{0}_3 & \mathbf{I}_3
\end{bmatrix}
\f}
Now, with the computed \f$\boldsymbol{\Phi}(t_{k+1},t_k)\f$ and \f$\mathbf{G}_{k}\f$ matrices,
we can propagate the covariance from \f$t_k\f$ to \f$t_{k+1}\f$:
\f{align*}{
\mathbf{P}_{k+1|k} = \boldsymbol{\Phi}(t_{k+1},t_k)\mathbf{P}_{k|k}\boldsymbol{\Phi}(t_{k+1},t_k)^\top + \mathbf{G}_k\mathbf{Q}_d\mathbf{G}_k^\top
\f}
*/

95
docs/simulation.dox Normal file
View File

@@ -0,0 +1,95 @@
/**
@page simulation Visual-Inertial Simulator
@section sim-bspline B-Spline Interpolation
@image html splineframes.svg width=60%
At the center of the simulator is an \f$\mathbb{SE}(3)\f$ b-spline which allows for the calculation of the pose, velocity, and accelerations at any given timestep along a given trajectory.
We follow the work of Mueggler et al. @cite Mueggler2018TRO and Patron et al. @cite Patron2015IJCV in which given a series of uniformly distributed "control points" poses the pose \f$\{S\}\f$ at a given timestep \f$t_s\f$ can be interpolated by:
\f{align*}{
{}^{G}_{S}\mathbf{T}(u(t_s)) &= {}^{G}_{i-1}\mathbf{T}~\mathbf{A}_0~\mathbf{A}_1~\mathbf{A}_2 \label{eq:poseinterp} \\[0.75em]
\mathbf{A}_j &= \mathrm{exp}\Big({B}_j(u(t))~{}^{i-1+j}_{i+j}\mathbf{\Omega} \Big) \\
{}^{i-1}_{i}\mathbf{\Omega} &= \mathrm{log}\Big( {}^{G}_{i-1}\mathbf{T}^{-1}~{}^{G}_{i}\mathbf{T} \Big) \\[0.75em]
{B}_0(u(t)) &= \frac{1}{3!}~(5+3u-3u^2+u^3) \\
{B}_1(u(t)) &= \frac{1}{3!}~(1+3u+3u^2-2u^3) \\
{B}_2(u(t)) &= \frac{1}{3!}~(u^3)
\f}
where \f$u(t_s)=(t_s-t_i)/(t_{i+1}-t_i)\f$, \f$\mathrm{exp}(\cdot)\f$, \f$\mathrm{log}(\cdot)\f$ are the \f$\mathbb{SE}(3)\f$ matrix exponential @ref ov_core::exp_se3 and logarithm @ref ov_core::log_se3.
The frame notations can be seen in the above figure and we refer the reader to the @ref ov_core::BsplineSE3 class for more details.
The above equation can be interpretative as compounding the fractions portions of the bounding poses to the first pose \f${}^{G}_{i-1}\mathbf{T}\f$.
From this above equation, it is simple to take the derivative in respect to time, thus allowing the computation of the velocity and acceleration at any point.
The only needed input into the simulator is a pose trajectory which we will then uniformly sample to construct control points for this spline.
This spline is then used to both generate the inertial measurements while also providing the pose information needed to generate visual-bearing measurements.
@section sim-inertial Inertial Measurements
To incorporate inertial measurements from a IMU sensor, we can leverage the continuous nature and \f$C^2\f$-continuity of our cubic B-spline.
We can define the sensor measurement from a IMU as follows:
\f{align*}{
{}^I\boldsymbol{\omega}_m(t) &= {}^I\boldsymbol{\omega}(t) + \mathbf{b}_\omega + \mathbf{n}_\omega \\
{}^I\mathbf{a}_m(t) &= {}^I\mathbf{a}(t) + {}^{I(t)}_G\mathbf{R}{}^G\mathbf{g} + \mathbf{b}_a + \mathbf{n}_a \\
\dot{\mathbf{b}}_\omega &= \mathbf{n}_{wg} \\
\dot{\mathbf{b}}_a &= \mathbf{n}_{wa}
\f}
where each measurement is corrupted with some white noise and random-walk bias.
To obtain the true measurements from our \f$\mathbb{SE}(3)\f$ b-spline we can do the following:
\f{align*}{
{}^I\boldsymbol{\omega}(t) &=
\mathrm{vee}\Big( {}^{G}_{I}\mathbf{R}(u(t))^\top {}^{G}_{I}\dot{\mathbf{R}}(u(t)) \Big) \\
{}^I\mathbf{a}(t) &= {}^{G}_{I}\mathbf{R}(u(t))^\top {}^{G}\ddot{\mathbf{p}}_{I}(u(t))
\f}
where \f$\mathrm{vee}(\cdot)\f$ returns the vector portion of the skew-symmetric matrix (see @ref ov_core::vee).
These are then corrupted using the random walk biases and corresponding white noises.
For example we have the following:
\f{align*}{
\omega_m(t) &= \omega(t) + b_\omega(t) + \sigma_w\frac{1}{\sqrt{\Delta t}}\textrm{gennoise}(0,1) \\
b_\omega(t+\Delta t) &= b_\omega(t) + \sigma_{wg}\sqrt{\Delta t}~\textrm{gennoise}(0,1) \\
t &= t+\Delta t
\f}
Note that this is repeated per-scalar value as compared to the vector and identically for the accelerometer readings.
The \f$\textrm{gennoise}(m,v)\f$ function generates a random scalar float with mean *m* and variance *v*.
The \f$\Delta t\f$ is our sensor sampling rate that we advance time forward with.
@section sim-visbearing Visual-Bearing Measurement
The first step that we perform after creating the b-spline trajectory is the generation of a map of point features.
To generate these features, we increment along the spline at a fixed interval and ensure that all cameras see enough features in the map.
If there are not enough features in the given frame, we generate new features by sending random rays from the camera out and assigning a random depth.
This feature is then added to the map so that it can be projected into future frames.
After the map generation phase, we generate feature measurements by projecting them into the current frame.
Projected features are limited to being with-in the field of view of the camera, in front of the camera, and close in distance.
Pixel noise can be directly added to the raw pixel values.
*/

75
docs/update-compress.dox Normal file
View File

@@ -0,0 +1,75 @@
/**
@page update-compress Measurement Compression
One of the most costly opeerations in the EKF update is the matrix multiplication.
To mitigate this issue, we perform the thin QR decomposition of the measurement Jacobian after nullspace projection:
\f{align*}{
\mathbf{H}_{o,k} =
\begin{bmatrix} \mathbf{Q_1} & \mathbf{Q_2} \end{bmatrix}
\begin{bmatrix} \mathbf{R_1} \\ \mathbf{0} \end{bmatrix} = \mathbf Q_1 \mathbf R_1
\f}
This QR decomposition can be performed again using [Givens rotations](https://en.wikipedia.org/wiki/Givens_rotation) (note that this operation in general is not cheap though).
We apply this QR to the linearized measurement residuals to compress measurements:
\f{align*}{
\tilde{\mathbf{z}}_{o,k}
&\simeq
\mathbf{H}_{o,k}\tilde{\mathbf{x}}_{k} + \mathbf{n}_o \\[5px]
\empty
\tilde{\mathbf{z}}_{o,k}
&\simeq
\mathbf Q_1 \mathbf R_1 \tilde{\mathbf{x}}_{k} + \mathbf{n}_o \\[5px]
\empty
\mathbf{Q_1}^\top \tilde{\mathbf{z}}_{o,k}
&\simeq
\mathbf{Q_1}^\top \mathbf{Q_1} \mathbf{R_1}
\tilde{\mathbf{x}}_{k} +
\mathbf{Q_1}^\top
\mathbf{n}_o \\[5px]
\empty
\mathbf{Q_1}^\top\tilde{\mathbf{z}}_{o,k}
&\simeq
\mathbf{R_1} \tilde{\mathbf{x}}_{k} +
\mathbf{Q_1}^\top\mathbf{n}_o \\[5pt]
\empty
\Rightarrow~ \tilde{\mathbf{z}}_{n,k}
&\simeq
\mathbf{H}_{n,k} \tilde{\mathbf{x}}_{k} +
\mathbf{n}_n
\f}
As a result, the compressed measurement Jacobian will be of the size of the state,
which will signficantly reduce the EKF update cost:
\f{align*}{
\hat{\mathbf{x}}_{k|k}
&= \hat{\mathbf{x}}_{k|k-1} + \mathbf{P}_{k|k-1} \mathbf{H}_{n,k}^\top (\mathbf{H}_{n,k} \mathbf{P}_{k|k-1} \mathbf{H}_{n,k}^\top + \mathbf{R}_n)^{-1}\tilde{\mathbf{z}}_{n,k} \\[5px]
\empty
\mathbf{P}_{k|k}
&= \mathbf{P}_{k|k-1} - \mathbf{P}_{k|k-1}\mathbf{H}_{n,k}^\top (\mathbf{H}_{n,k}\mathbf{P}_{k|k-1} \mathbf{H}_{n,k}^\top + \mathbf{R}_n)^{-1} \mathbf{H}_{n,k} \mathbf{P}_{k|k-1}^\top
\f}
*/

90
docs/update-delay.dox Normal file
View File

@@ -0,0 +1,90 @@
/**
@page update-delay Delayed Feature Initialization
We describe a method of delayed initialization of a 3D point feature as in [Visual-Inertial Odometry on Resource-Constrained Systems](https://escholarship.org/content/qt4nn0j264/qt4nn0j264.pdf) @cite Li2014THESIS.
Specifically, given a set of measurements involving the state \f$\mathbf{x}\f$ and a new feature \f$\mathbf{f}\f$, we want to optimally and efficiently initialize the feature.
\f{align*}{
\mathbf{z}_i = \mathbf{h}_i\left(\mathbf{x}, \mathbf{f}\right) + \mathbf{n}_i
\f}
In general, we collect more than the minimum number of measurements at different times needed for initialization (i.e. delayed).
For example, although in principle we need two monocular images to initialize a 3D point feature, we often collect more than two images in order to obtain better initialization.
To process all collected measurements, we stack them and perform linearization around some linearization points (estimates) denoted by \f$\hat{\mathbf x}\f$ and \f$\hat{\mathbf f}\f$:
\f{align*}{
\mathbf{z} &= \begin{bmatrix} \mathbf{z}_1 \\
\mathbf{z}_2 \\
\vdots \\
\mathbf{z}_m
\end{bmatrix} = \mathbf{h}\left(\mathbf{x}, \mathbf{f}\right)+ \mathbf{n}\\
\Rightarrow~~ \mathbf{r} &= \mathbf{z}-\mathbf{h}(\hat{\mathbf{x}}, \hat{f}) = \mathbf{H}_x \tilde{\mathbf{x}} +\mathbf{H}_f
\tilde{\mathbf{f}} + \mathbf{n}
\f}
To efficiently compute the resulting augmented covariance matrix,
we perform [Givens rotations](https://en.wikipedia.org/wiki/Givens_rotation) to zero-out rows in \f$\mathbf{H}_f\f$ with indices larger than the dimension of \f$\tilde{\mathbf{f}}\f$,
and apply the same Givens rotations to \f$\mathbf{H}_x\f$ and \f$\mathbf{r}\f$.
As a result of this operation, we have the following linear system:
\f{align*}{
\begin{bmatrix} \mathbf{r}_1 \\ \mathbf{r}_2 \end{bmatrix} = \begin{bmatrix} \mathbf{H}_{x1} \\ \mathbf{H}_{x2} \end{bmatrix}
\tilde{\mathbf{x}} +\begin{bmatrix} \mathbf{H}_{f1} \\ \mathbf{0} \end{bmatrix}
\tilde{\mathbf{f}} + \begin{bmatrix} \mathbf{n}_1 \\ \mathbf{n}_2 \end{bmatrix}
\f}
Note that the bottom system essentially is corresponding to the nullspace projection as in the MSCKF update and \f$\mathbf{H}_{f1}\f$ is generally invertible.
Note also that we assume the measurement noise is isotropic; otherwise, we should first perform whitening to make it isotropic, which would save significant computations.
So, if the original measurement noise covariance \f$\mathbf{R} = \sigma^2\mathbf{I}_m\f$ and the
dimension of \f$\tilde{\mathbf{f}}\f$ is n, then the inferred measurement noise covariance will be
\f$\mathbf{R}_1 = \sigma^2\mathbf{I}_n\f$ and \f$\mathbf{R}_2 = \sigma^2\mathbf{I}_{m-n}\f$.
Now we can directly solve for the error of the new feature based on the first subsystem:
\f{align*}{
\tilde{\mathbf{f}} &= \mathbf{H}_{f1}^{-1}(\mathbf{r}_1-\mathbf{n}_1-\mathbf{H}_x\tilde{\mathbf{x}}) \\
\Rightarrow~ \mathbb{E}[\tilde{\mathbf{f}}] &= \mathbf{H}_{f1}^{-1}(\mathbf{r}_1)
\f}
where we assumed noise and state error are zero mean.
We can update \f$\hat{\mathbf f}\f$ with this correction by \f$\hat{\mathbf f}+\mathbb{E}[\tilde{\mathbf{f}}]\f$.
Note that this is equivalent to a Gauss Newton step for solving the corresponding maximum likelihood estimation (MLE) formed by fixing the estimate of \f$\mathbf{x}\f$ and optimizing over the value of \f$\hat{\mathbf{f}}\f$, and should
therefore be zero if we used such an optimization to come up with our initial estimate for the new variable.
We now can compute the covariance of the new feature as follows:
\f{align*}{
\mathbf{P}_{ff} &= \mathbb{E}\Big[(\tilde{\mathbf{f}}-\mathbb{E}[\tilde{\mathbf{f}}])
(\tilde{\mathbf{f}}-\mathbb{E}[\tilde{\mathbf{f}}])^{\top}\Big] \\
&= \mathbb{E}\Big[(\mathbf{H}_{f1}^{-1}(-\mathbf{n}_1-\mathbf{H}_{x1}\tilde{\mathbf{x}}))
(\mathbf{H}_{f1}^{-1}(-\mathbf{n}_1-\mathbf{H}_{x1}\tilde{\mathbf{x}}))^{\top}\Big] \\
&= \mathbf{H}_{f1}^{-1}(\mathbf{H}_{x1}\mathbf{P}_{xx}\mathbf{H}_{x1}^{\top} + \mathbf{R}_1)\mathbf{H}_{f1}^{-\top}
\f}
and the cross correlation can be computed as:
\f{align*}{
\mathbf{P}_{xf} &= \mathbb{E}\Big[(\tilde{\mathbf{x}})
(\tilde{\mathbf{f}}-\mathbb{E}[\tilde{\mathbf{f}}])^{\top}\Big] \\
&= \mathbb{E}\Big[(\tilde{\mathbf{x}})
(\mathbf{H}_{f1}^{-1}(-\mathbf{n}_1-\mathbf{H}_{x1}\tilde{\mathbf{x}}))^{\top}\Big] \\
&= -\mathbf{P}_{xx}\mathbf{H}_{x1}^{\top}\mathbf{H}_{f1}^{-\top}
\f}
These entries can then be placed in the correct location for the covariance.
For example when initializing a new feature to the end of the state, the augmented covariance would be:
\f{align*}{
\mathbf{P}_{aug} = \begin{bmatrix} \mathbf{P}_{xx} & \mathbf{P}_{xf} \\
\mathbf{P}_{xf}^{\top} & \mathbf{P}_{ff} \end{bmatrix}
\f}
Note that this process does not update the estimate for \f$\mathbf{x}\f$.
However, after initialization, we can then use the second system, \f$\mathbf{r}_2\f$, \f$\mathbf{H}_{x2}\f$, and \f$\mathbf{n}_2\f$ to update our new state through a standard EKF update (see @ref linear-meas section).
*/

502
docs/update-feat.dox Normal file
View File

@@ -0,0 +1,502 @@
/**
@page update-feat Camera Measurement Update
@tableofcontents
@section model Perspective Projection (Bearing) Measurement Model
Consider a 3D feature is detected from the camera image at time \f$k\f$, whose \f$uv\f$ measurement (i.e., the corresponding pixel coordinates) on the image plane is given by:
\f{align*}{
\mathbf{z}_{m,k}
&= \mathbf h(\mathbf x_k) + \mathbf n_k \\
&= \mathbf h_d(\mathbf{z}_{n,k}, ~\boldsymbol\zeta) + \mathbf{n}_k \\
&= \mathbf h_d(\mathbf h_p({}^{C_k}\mathbf{p}_f), ~\boldsymbol\zeta) + \mathbf{n}_k \\
&= \mathbf h_d(\mathbf h_p(\mathbf h_t({}^{G}\mathbf{p}_f,~{}^{C_k}_{G}\mathbf{R},~{}^{G}\mathbf{p}_{C_k})), ~\boldsymbol\zeta) + \mathbf{n}_k \\
&= \mathbf h_d(\mathbf h_p(\mathbf h_t(\mathbf h_r(\boldsymbol\lambda,\cdots),~{}^{C_k}_{G}\mathbf{R},~{}^{G}\mathbf{p}_{C_k})), ~\boldsymbol\zeta) + \mathbf{n}_k
\f}
where \f$\mathbf n_k\f$ is the measurement noise and typically assumed to be zero-mean white Gaussian;
\f$\mathbf z_{n,k}\f$ is the normalized undistorted uv measurement;
\f$\boldsymbol\zeta\f$ is the camera intrinsic parameters such as focal length and distortion parameters;
\f${}^{C_k}\mathbf{p}_f\f$ is the feature position in the current camera frame \f$\{C_k\}\f$;
\f${}^{G}\mathbf{p}_f\f$ is the feature position in the global frame \f$\{G\}\f$;
\f$\{ {}^{C_k}_{G}\mathbf{R},~{}^{G}\mathbf{p}_{C_k} \}\f$ denotes the current camera pose (position and orientation) in the global frame (or camera extrinsics);
and \f$\boldsymbol\lambda\f$ is the feature's parameters of different representations (other than position) such as simply a xyz position or an inverse depth with bearing.
In the above expression, we decompose the measurement function into multiple concatenated functions corresponding to different operations,
which map the states into the raw uv measurement on the image plane.
It should be noted that
as we will perform intrinsic calibration along with extrinsic with different feature representations, the above camera measurement model is general.
The high-level description of each function is given in the next section.
@subsection model-table Measurement Function Overview
| Function | Description |
|---|---|
| \f$\mathbf{z}_k = \mathbf h_d (\mathbf{z}_{n,k}, ~\boldsymbol\zeta)\f$ | The distortion function that takes normalized coordinates and maps it into distorted uv coordinates |
|\f$\mathbf{z}_{n,k}= \mathbf h_p({}^{C_k}\mathbf{p}_f)\f$| The projection function that takes a 3D point in the image and converts it into the normalized uv coordinates |
|\f${}^{C_k}\mathbf{p}_f=\mathbf h_t({}^{G}\mathbf{p}_f,~{}^{C_k}_{G}\mathbf{R},~{}^{G}\mathbf{p}_{C_k})\f$ | Transforming a feature's position in the global frame into the current camera frame |
| \f${}^{G}\mathbf{p}_f= \mathbf h_r(\boldsymbol\lambda,\cdots)\f$ | Converting from a feature representation to a 3D feature in the global frame |
@subsection model-deriv Jacobian Computation
Given the above nested functions, we can leverage the chainrule to find the total state Jacobian.
Since our feature representation function \f$\mathbf h_r(\cdots)\f$ might also depend on the state, i.e. an anchoring pose, we need to carefully consider its additional derivatives.
Consider the following example of our measurement in respect to a state \f$ \mathbf{x} \f$ Jacobian:
\f{align*}{
\frac{\partial \mathbf{z}_k}{\partial \mathbf{x}}
=
\frac{\partial \mathbf h_d (\cdot) }{\partial \mathbf{z}_{n,k}}
\frac{\partial \mathbf h_p (\cdot) }{\partial {}^{C_k}\mathbf{p}_f}
\frac{\partial \mathbf h_t (\cdot) }{\partial \mathbf{x}}
+
\frac{\partial \mathbf h_d (\cdot) }{\partial \mathbf{z}_{n,k}}
\frac{\partial \mathbf h_p (\cdot) }{\partial {}^{C_k}\mathbf{p}_f}
\frac{\partial \mathbf h_t (\cdot) }{\partial {}^{G}\mathbf{p}_f}
\frac{\partial \mathbf h_r (\cdot) }{\partial \mathbf{x}}
\f}
In the global feature representations, see @ref feat-rep section, the second term will be zero while for the anchored representations it will need to be computed.
@section distortion Distortion Function
@subsection distortion-radtan Radial model
To calibrate camera intrinsics, we need to know how to map our normalized coordinates into the raw pixel coordinates on the image plane.
We first employ the radial distortion as in [OpenCV model](https://docs.opencv.org/3.4/da/d54/group__imgproc__transform.html#details):
\f{align*}{
\begin{bmatrix} u \\ v \end{bmatrix}:= \mathbf{z}_k &= \mathbf h_d(\mathbf{z}_{n,k}, ~\boldsymbol\zeta)
= \begin{bmatrix} f_x * x + c_x \\
f_y * y + c_y \end{bmatrix}\\[1em]
\empty
{\rm where}~~
x &= x_n (1 + k_1 r^2 + k_2 r^4) + 2 p_1 x_n y_n + p_2(r^2 + 2 x_n^2) \\\
y &= y_n (1 + k_1 r^2 + k_2 r^4) + p_1 (r^2 + 2 y_n^2) + 2 p_2 x_n y_n \\[1em]
r^2 &= x_n^2 + y_n^2
\f}
where \f$ \mathbf{z}_{n,k} = [ x_n ~ y_n ]^\top\f$ are the normalized coordinates of the 3D feature and u and v are the distorted image coordinates on the image plane.
The following distortion and camera intrinsic (focal length and image center) parameters are involved in the above distortion model,
which can be estimated online:
\f{align*}{
\boldsymbol\zeta = \begin{bmatrix} f_x & f_y & c_x & c_y & k_1 & k_2 & p_1 & p_2 \end{bmatrix}^\top
\f}
Note that we do not estimate the higher order (i.e., higher than fourth order) terms as in most offline calibration methods such as [Kalibr](https://github.com/ethz-asl/kalibr).
To estimate these intrinsic parameters (including the distortation parameters), the following Jacobian for these parameters is needed:
\f{align*}{
\frac{\partial \mathbf h_d(\cdot)}{\partial \boldsymbol\zeta} =
\begin{bmatrix}
x & 0 & 1 & 0 & f_x*(x_nr^2) & f_x*(x_nr^4) & f_x*(2x_ny_n) & f_x*(r^2+2x_n^2) \\[5pt]
0 & y & 0 & 1 & f_y*(y_nr^2) & f_y*(y_nr^4) & f_y*(r^2+2y_n^2) & f_y*(2x_ny_n)
\end{bmatrix}
\f}
Similarly, the Jacobian with respect to the normalized coordinates can be obtained as follows:
\f{align*}{
\frac{\partial \mathbf h_d (\cdot)}{\partial \mathbf{z}_{n,k}} =
\begin{bmatrix}
f_x*((1+k_1r^2+k_2r^4)+(2k_1x_n^2+4k_2x_n^2(x_n^2+y_n^2))+2p_1y_n+(2p_2x_n+4p_2x_n)) & f_x*(2k_1x_ny_n+4k_2x_ny_n(x_n^2+y_n^2)+2p_1x_n+2p_2y_n) \\
f_y*(2k_1x_ny_n+4k_2x_ny_n(x_n^2+y_n^2)+2p_1x_n+2p_2y_n) & f_y*((1+k_1r^2+k_2r^4)+(2k_1y_n^2+4k_2y_n^2(x_n^2+y_n^2))+(2p_1y_n+4p_1y_n)+2p_2x_n)
\end{bmatrix}
\f}
@subsection distortion-equi Fisheye model
As fisheye or wide-angle lenses are widely used in practice, we here provide mathematical derivations of such distortion model as in [OpenCV fisheye](https://docs.opencv.org/3.4/db/d58/group__calib3d__fisheye.html#details).
\f{align*}{
\begin{bmatrix} u \\ v \end{bmatrix}:= \mathbf{z}_k &= \mathbf h_d(\mathbf{z}_{n,k}, ~\boldsymbol\zeta)
= \begin{bmatrix} f_x * x + c_x \\
f_y * y + c_y \end{bmatrix}\\[1em]
\empty
{\rm where}~~
x &= \frac{x_n}{r} * \theta_d \\
y &= \frac{y_n}{r} * \theta_d \\
\theta_d &= \theta (1 + k_1 \theta^2 + k_2 \theta^4 + k_3 \theta^6 + k_4 \theta^8) \\
\quad r^2 &= x_n^2 + y_n^2 \\
\theta &= atan(r)
\f}
where \f$ \mathbf{z}_{n,k} = [ x_n ~ y_n ]^\top\f$ are the normalized coordinates of the 3D feature
and u and v are the distorted image coordinates on the image plane.
Clearly, the following distortion intrinsic parameters are used in the above model:
\f{align*}{
\boldsymbol\zeta = \begin{bmatrix} f_x & f_y & c_x & c_y & k_1 & k_2 & k_3 & k_4 \end{bmatrix}^\top
\f}
In analogy to the previous radial distortion case, the following Jacobian for these parameters is needed for intrinsic calibration:
\f{align*}{
\frac{\partial \mathbf h_d (\cdot)}{\partial \boldsymbol\zeta} =
\begin{bmatrix}
x_n & 0 & 1 & 0 & f_x*(\frac{x_n}{r}\theta^3) & f_x*(\frac{x_n}{r}\theta^5) & f_x*(\frac{x_n}{r}\theta^7) & f_x*(\frac{x_n}{r}\theta^9) \\[5pt]
0 & y_n & 0 & 1 & f_y*(\frac{y_n}{r}\theta^3) & f_y*(\frac{y_n}{r}\theta^5) & f_y*(\frac{y_n}{r}\theta^7) & f_y*(\frac{y_n}{r}\theta^9)
\end{bmatrix}
\f}
Similarly, with the chain rule of differentiation,
we can compute the following Jacobian with respect to the normalized coordinates:
\f{align*}{
\frac{\partial \mathbf h_d(\cdot)}{\partial \mathbf{z}_{n,k}}
&=
\frac{\partial uv}{\partial xy}\frac{\partial xy}{\partial x_ny_n}+
\frac{\partial uv}{\partial xy}\frac{\partial xy}{\partial r}\frac{\partial r}{\partial x_ny_n}+
\frac{\partial uv}{\partial xy}\frac{\partial xy}{\partial \theta_d}\frac{\partial \theta_d}{\partial \theta}\frac{\partial \theta}{\partial r}\frac{\partial r}{\partial x_ny_n} \\[1em]
\empty
{\rm where}~~~~
\frac{\partial uv}{\partial xy} &= \begin{bmatrix} f_x & 0 \\ 0 & f_y \end{bmatrix} \\
\empty
\frac{\partial xy}{\partial x_ny_n} &= \begin{bmatrix} \theta_d/r & 0 \\ 0 & \theta_d/r \end{bmatrix} \\
\empty
\frac{\partial xy}{\partial r} &= \begin{bmatrix} -\frac{x_n}{r^2}\theta_d \\ -\frac{y_n}{r^2}\theta_d \end{bmatrix} \\
\empty
\frac{\partial r}{\partial x_ny_n} &= \begin{bmatrix} \frac{x_n}{r} & \frac{y_n}{r} \end{bmatrix} \\
\empty
\frac{\partial xy}{\partial \theta_d} &= \begin{bmatrix} \frac{x_n}{r} \\ \frac{y_n}{r} \end{bmatrix} \\
\empty
\frac{\partial \theta_d}{\partial \theta} &= \begin{bmatrix} 1 + 3k_1 \theta^2 + 5k_2 \theta^4 + 7k_3 \theta^6 + 9k_4 \theta^8\end{bmatrix} \\
\empty
\frac{\partial \theta}{\partial r} &= \begin{bmatrix} \frac{1}{r^2+1} \end{bmatrix}
\f}
@section projection Perspective Projection Function
The standard pinhole camera model is used to project a 3D point in the *camera* frame into the normalized image plane (with unit depth):
\f{align*}{
\mathbf{z}_{n,k} &= \mathbf h_p ({}^{C_k}\mathbf{p}_f) =
\begin{bmatrix}
{}^Cx/{}^Cz \\
{}^Cy/{}^Cz
\end{bmatrix} \\
\text{where} \quad {}^{C_k}\mathbf{p}_f &= \begin{bmatrix} {}^Cx \\ {}^Cy \\ {}^Cz \end{bmatrix}
\f}
whose Jacobian matrix is computed as follows:
\f{align*}{
\frac{\partial \mathbf h_p (\cdot)}{\partial {}^{C_k}\mathbf{p}_f} =
\begin{bmatrix}
\frac{1}{{}^Cz} & 0 & \frac{-{}^Cx}{({}^Cz)^2} \\
0 & \frac{1}{{}^Cz} & \frac{-{}^Cy}{({}^Cz)^2} \\
\end{bmatrix}
\f}
@section relative Euclidean Transformation
We employ the 6DOF rigid-body Euclidean transformation to transform the 3D feature position in the global frame \f$\{G\}\f$ to the current camera frame \f$\{C_k\}\f$ based on the current global camera pose:
\f{align*}{
{}^{C_k}\mathbf{p}_f
&= \mathbf h_t ({}^{G}\mathbf{p}_f,~{}^{C_k}_{G}\mathbf{R},~{}^{G}\mathbf{p}_{C_k})
= {}^{C_k}_{G}\mathbf{R}({}^{G}\mathbf{p}_f-{}^{G}\mathbf{p}_{C_k})
\f}
Note that in visual-inertial navigation systems, we often keep the IMU, instead of camera, state in the state vector.
So, we need to further transform the above geometry using the time-invariant IMU-camera extrinsic parameters \f$\{ {}^{C}_{I}\mathbf{R}, ~{}^{C}\mathbf{p}_I \}\f$ as follows:
\f{align*}{
{}^{G}\mathbf{p}_{C_k}
&= {}^{G}\mathbf{p}_{I_k} + {}^{G}_{I}\mathbf{R} {}^{I}\mathbf{p}_{C_k}
= {}^{G}\mathbf{p}_{I_k} + {}^{G}_{I}\mathbf{R} {}^{I}\mathbf{p}_{C} \\
{}^{C_k}_{G}\mathbf{R} &= {}^{C_k}_{I}\mathbf{R} {}^{I_k}_{G}\mathbf{R} = {}^{C}_{I}\mathbf{R} {}^{I_k}_{G}\mathbf{R}
\f}
Substituting these quantities into the equation of \f$ {}^{C_k}\mathbf{p}_f\f$ yields:
\f{align*}{
{}^{C_k}\mathbf{p}_f = {}^{C}_{I}\mathbf{R}{}^{I_k}_{G}\mathbf{R}({}^{G}\mathbf{p}_f-{}^{G}\mathbf{p}_{I_k})
+{}^{C}\mathbf{p}_I
\f}
We now can compute the following Jacobian with respect to the pertinent states:
\f{align*}{
\frac{\partial \mathbf h_t (\cdot)}{\partial {}^{G}\mathbf{p}_f} &= {}^{C}_{I}\mathbf{R}{}^{I_k}_{G}\mathbf{R} \\
\frac{\partial \mathbf h_t (\cdot)}{\partial {}^{I_k}_{G}\mathbf{R}} &= {}^{C}_{I}\mathbf{R} \left\lfloor {}^{I_k}_{G}\mathbf{R}({}^{G}\mathbf{p}_f-{}^{G}\mathbf{p}_{I_k}) \times\right\rfloor \\
\frac{\partial \mathbf h_t (\cdot)}{\partial {}^{G}\mathbf{p}_{I_k}} &= -{}^{C}_{I}\mathbf{R}{}^{I_k}_{G}\mathbf{R}
\f}
where \f$\lfloor \mathbf a\times \rfloor \f$ denotes the skew symmetric matrix of a vector \f$\mathbf a\f$ (see [Quaternion TR](http://mars.cs.umn.edu/tr/reports/Trawny05b.pdf) @cite Trawny2005TR).
Note also that in above expression (as well as in ensuing derivations), there is a little abuse of notation;
that is, the Jacobian with respect to the rotation matrix is not the direct differentiation with respect to the 3x3 rotation matrix, instead with respect to the corresponding 3x1 rotation angle vector.
Moreover, if performing online extrinsic calibration, the Jacobian with respect to the IMU-camera extrinsics is needed:
\f{align*}{
\frac{\partial \mathbf h_t (\cdot)}{\partial {}^{C}_{I}\mathbf{R}} &= \left\lfloor {}^{C}_{I}\mathbf{R}{}^{I_k}_{G}\mathbf{R}({}^{G}\mathbf{p}_f-{}^{G}\mathbf{p}_{I_k}) \times\right\rfloor \\
\frac{\partial \mathbf h_t (\cdot)}{\partial {}^{C}\mathbf{p}_I} &= \mathbf{I}_{3\times 3}
\f}
@section feat-rep Point Feature Representations
There are two main parameterizations of a 3D point feature: 3D position (xyz) and inverse depth with bearing.
Both of these can either be represented in the global frame or in an anchor frame of reference which adds a dependency on having an "anchor" pose where the feature is observed.
To allow for a unified treatment of different feature parameterizations \f$\boldsymbol \lambda\f$ in our codebase,
we derive in detail the generic function \f${}^{G}\mathbf{p}_f=\mathbf f (\cdot)\f$ that maps different representations into global position.
@subsection feat-rep-global-xyz Global XYZ
As the canonical parameterization, the global position of a 3D point feature is simply given by its xyz coordinates in the global frame of reference:
\f{align*}{
{}^{G}\mathbf{p}_f
&= \mathbf f(\boldsymbol\lambda) \\
&= \begin{bmatrix} {}^Gx \\ {}^Gy \\ {}^Gz \end{bmatrix} \\
\text{where} &\quad \boldsymbol\lambda = {}^{G}\mathbf{p}_f = \begin{bmatrix} {}^Gx & {}^Gy & {}^Gz \end{bmatrix}^\top
\f}
It is clear that the Jacobian with respect to the feature parameters is:
\f{align*}{
\frac{\partial \mathbf f(\cdot)}{\partial \boldsymbol\lambda} &= \mathbf{I}_{3\times 3}
\f}
@subsection feat-rep-global-inv Global Inverse Depth
The global inverse-depth representation of a 3D point feature is given by (akin to spherical coordinates):
\f{align*}{
{}^{G}\mathbf{p}_f
&= \mathbf f(\boldsymbol\lambda) \\
&= \frac{1}{\rho}\begin{bmatrix} \cos(\theta)\sin(\phi) \\ \sin(\theta)\sin(\phi) \\ \cos(\phi) \end{bmatrix} \\
\text{where} &\quad \boldsymbol\lambda = \begin{bmatrix} \theta & \phi & \rho \end{bmatrix}^\top
\f}
The Jacobian with respect to the feature parameters can be computed as:
\f{align*}{
\frac{\partial \mathbf f(\cdot)}{\partial \boldsymbol\lambda} &=
\begin{bmatrix}
-\frac{1}{\rho}\sin(\theta)\sin(\phi) & \frac{1}{\rho}\cos(\theta)\cos(\phi) & -\frac{1}{\rho^2}\cos(\theta)\sin(\phi) \\
\frac{1}{\rho}\cos(\theta)\sin(\phi) & \frac{1}{\rho}\sin(\theta)\cos(\phi) & -\frac{1}{\rho^2}\sin(\theta)\sin(\phi) \\
0 & -\frac{1}{\rho}\sin(\phi) & -\frac{1}{\rho^2}\cos(\phi)
\end{bmatrix}
\f}
@subsection feat-rep-global-inv2 Global Inverse Depth (MSCKF VERSION)
Note that as this representation has a singularity when the z-distance goes to zero,
it is not recommended to use in practice.
Instead, one should use the @ref feat-rep-anchor-inv2 representation.
The anchored version doesn't have this issue if features are represented in a camera frame that they where seen from (in which features should never have a non-positive z-direction).
@subsection feat-rep-anchor-xyz Anchored XYZ
We can represent a 3D point feature in some "anchor" frame (say some IMU local frame, \f$\{{}^{I_a}_{G}\mathbf{R},~{}^{G}\mathbf{p}_{I_a}\}\f$),
which would normally be the IMU pose corresponding to the first camera frame where the feature was detected.
\f{align*}{
{}^{G}\mathbf{p}_f
&= \mathbf f(\boldsymbol\lambda,~{}^{I_a}_{G}\mathbf{R},~{}^{G}\mathbf{p}_{I_a},~{}^{C}_{I}\mathbf{R},~{}^{C}\mathbf{p}_{I}) \\
&=
{}^{I_a}_{G}\mathbf{R}^\top{}^{C}_{I}\mathbf{R}^\top( \boldsymbol\lambda -{}^{C}\mathbf{p}_{I}) + {}^{G}\mathbf{p}_{I_a} \\
\text{where} &\quad \boldsymbol\lambda = {}^{C_a}\mathbf{p}_f = \begin{bmatrix} {}^{C_a}x & {}^{C_a}y & {}^{C_a}z \end{bmatrix}^\top
\f}
The Jacobian with respect to the feature state is given by:
\f{align*}{
\frac{\partial \mathbf f(\cdot)}{\partial \boldsymbol\lambda} &= {}^{I_a}_{G}\mathbf{R}^\top{}^{C}_{I}\mathbf{R}^\top
\f}
As the anchor pose is involved in this representation, its Jacobians are computed as:
\f{align*}{
\frac{\partial \mathbf f(\cdot)}{\partial {}^{I_a}_{G}\mathbf{R}} &= -{}^{I_a}_{G}\mathbf{R}^\top \left\lfloor{}^{C}_{I}\mathbf{R}^\top({}^{C_a}\mathbf{p}_f-{}^{C}\mathbf{p}_{I}) \times\right\rfloor \\
\frac{\partial \mathbf f(\cdot)}{\partial {}^{G}\mathbf{p}_{I_a}} &= \mathbf{I}_{3\times 3}
\f}
Moreover, if performing extrinsic calibration, the following Jacobians with respect to the IMU-camera extrinsics are also needed:
\f{align*}{
\frac{\partial \mathbf f(\cdot)}{\partial {}^{C}_{I}\mathbf{R}} &= -{}^{I_a}_{G}\mathbf{R}^\top{}^{C}_{I}\mathbf{R}^\top \left\lfloor({}^{C_a}\mathbf{p}_f-{}^{C}\mathbf{p}_{I}) \times\right\rfloor \\
\frac{\partial \mathbf f(\cdot)}{\partial {}^{C}\mathbf{p}_{I}} &= -{}^{I_a}_{G}\mathbf{R}^\top{}^{C}_{I}\mathbf{R}^\top
\f}
@subsection feat-rep-anchor-inv Anchored Inverse Depth
In analogy to the global inverse depth case, we can employ the inverse-depth with bearing (akin to spherical coordinates) in the anchor frame, \f$\{{}^{I_a}_{G}\mathbf{R},~{}^{G}\mathbf{p}_{I_a}\}\f$, to represent a 3D point feature:
\f{align*}{
{}^{G}\mathbf{p}_f
&= \mathbf f(\boldsymbol\lambda,~{}^{I_a}_{G}\mathbf{R},~{}^{G}\mathbf{p}_{I_a},~{}^{C}_{I}\mathbf{R},~{}^{C}\mathbf{p}_{I}) \\
&=
{}^{I_a}_{G}\mathbf{R}^\top{}^{C}_{I}\mathbf{R}^\top({}^{C_a}\mathbf{p}_f-{}^{C}\mathbf{p}_{I}) + {}^{G}\mathbf{p}_{I_a} \\
&=
{}^{I_a}_{G}\mathbf{R}^\top{}^{C}_{I}\mathbf{R}^\top\Bigg(\frac{1}{\rho}\begin{bmatrix} \cos(\theta)\sin(\phi) \\ \sin(\theta)\sin(\phi) \\ \cos(\phi) \end{bmatrix}-{}^{C}\mathbf{p}_{I}\Bigg) + {}^{G}\mathbf{p}_{I_a} \\
\text{where} &\quad \boldsymbol\lambda = \begin{bmatrix} \theta & \phi & \rho \end{bmatrix}^\top
\f}
The Jacobian with respect to the feature state is given by:
\f{align*}{
\frac{\partial \mathbf f(\cdot)}{\partial \boldsymbol\lambda} &=
{}^{I_a}_{G}\mathbf{R}^\top{}^{C}_{I}\mathbf{R}^\top
\begin{bmatrix}
-\frac{1}{\rho}\sin(\theta)\sin(\phi) & \frac{1}{\rho}\cos(\theta)\cos(\phi) & -\frac{1}{\rho^2}\cos(\theta)\sin(\phi) \\
\frac{1}{\rho}\cos(\theta)\sin(\phi) & \frac{1}{\rho}\sin(\theta)\cos(\phi) & -\frac{1}{\rho^2}\sin(\theta)\sin(\phi) \\
0 & -\frac{1}{\rho}\sin(\phi) & -\frac{1}{\rho^2}\cos(\phi)
\end{bmatrix}
\f}
The Jacobians with respect to the anchor pose are:
\f{align*}{
\frac{\partial \mathbf f(\cdot)}{\partial {}^{I_a}_{G}\mathbf{R}} &= -{}^{I_a}_{G}\mathbf{R}^\top \left\lfloor{}^{C}_{I}\mathbf{R}^\top({}^{C_a}\mathbf{p}_f-{}^{C}\mathbf{p}_{I}) \times\right\rfloor \\
\frac{\partial \mathbf f(\cdot)}{\partial {}^{G}\mathbf{p}_{I_a}} &= \mathbf{I}_{3\times 3}
\f}
The Jacobians with respect to the IMU-camera extrinsics are:
\f{align*}{
\frac{\partial \mathbf f(\cdot)}{\partial {}^{C}_{I}\mathbf{R}} &= -{}^{I_a}_{G}\mathbf{R}^\top{}^{C}_{I}\mathbf{R}^\top \left\lfloor({}^{C_a}\mathbf{p}_f-{}^{C}\mathbf{p}_{I}) \times\right\rfloor \\
\frac{\partial \mathbf f(\cdot)}{\partial {}^{C}\mathbf{p}_{I}} &= -{}^{I_a}_{G}\mathbf{R}^\top{}^{C}_{I}\mathbf{R}^\top
\f}
@subsection feat-rep-anchor-inv2 Anchored Inverse Depth (MSCKF Version)
Note that a simpler version of inverse depth was used in the original MSCKF paper @cite Mourikis2007ICRA.
This representation does not have the singularity if it is represented in a camera frame the feature was measured from.
\f{align*}{
{}^{G}\mathbf{p}_f
&= \mathbf f(\boldsymbol\lambda,~{}^{I_a}_{G}\mathbf{R},~{}^{G}\mathbf{p}_{I_a},~{}^{C}_{I}\mathbf{R},~{}^{C}\mathbf{p}_{I}) \\
&=
{}^{I_a}_{G}\mathbf{R}^\top{}^{C}_{I}\mathbf{R}^\top({}^{C_a}\mathbf{p}_f-{}^{C}\mathbf{p}_{I}) + {}^{G}\mathbf{p}_{I_a} \\
&=
{}^{I_a}_{G}\mathbf{R}^\top{}^{C}_{I}\mathbf{R}^\top\Bigg(\frac{1}{\rho}\begin{bmatrix} \alpha \\ \beta \\ 1 \end{bmatrix}-{}^{C}\mathbf{p}_{I}\Bigg) + {}^{G}\mathbf{p}_{I_a} \\
\text{where} &\quad \boldsymbol\lambda = \begin{bmatrix} \alpha & \beta & \rho \end{bmatrix}^\top
\f}
The Jacobian with respect to the feature state is:
\f{align*}{
\frac{\partial \mathbf f(\cdot)}{\partial \boldsymbol\lambda} &=
{}^{I_a}_{G}\mathbf{R}^\top{}^{C}_{I}\mathbf{R}^\top
\begin{bmatrix}
\frac{1}{\rho} & 0 & -\frac{1}{\rho^2}\alpha \\
0 & \frac{1}{\rho} & -\frac{1}{\rho^2}\beta \\
0 & 0 & -\frac{1}{\rho^2}
\end{bmatrix}
\f}
The Jacobians with respect to the anchor state are:
\f{align*}{
\frac{\partial \mathbf f(\cdot)}{\partial {}^{I_a}_{G}\mathbf{R}} &= -{}^{I_a}_{G}\mathbf{R}^\top \left\lfloor{}^{C}_{I}\mathbf{R}^\top({}^{C_a}\mathbf{p}_f-{}^{C}\mathbf{p}_{I}) \times\right\rfloor \\
\frac{\partial \mathbf f(\cdot)}{\partial {}^{G}\mathbf{p}_{I_a}} &= \mathbf{I}_{3\times 3}
\f}
The Jacobians with respect to the IMU-camera extrinsics are:
\f{align*}{
\frac{\partial \mathbf f(\cdot)}{\partial {}^{C}_{I}\mathbf{R}} &= -{}^{I_a}_{G}\mathbf{R}^\top{}^{C}_{I}\mathbf{R}^\top \left\lfloor({}^{C_a}\mathbf{p}_f-{}^{C}\mathbf{p}_{I}) \times\right\rfloor \\
\frac{\partial \mathbf f(\cdot)}{\partial {}^{C}\mathbf{p}_{I}} &= -{}^{I_a}_{G}\mathbf{R}^\top{}^{C}_{I}\mathbf{R}^\top
\f}
@subsection feat-rep-anchor-inv3 Anchored Inverse Depth (MSCKF Single Depth Version)
This feature representation is based on the MSCKF representation @cite Mourikis2007ICRA, and the the single depth from VINS-Mono @cite Qin2018TRO.
As compared to the implementation in @cite Qin2018TRO, we are careful about how we handle treating of the bearing of the feature.
During initialization we initialize a full 3D feature and then follow that by marginalize the bearing portion of it leaving the depth in the state vector.
The marginalized bearing is then fixed for all future linearizations.
Then during update, we perform nullspace projection at every timestep to remove the feature dependence on this bearing.
To do so, we need at least *two* sets of UV measurements to perform this bearing nullspace operation since we loose two dimensions of the feature in the process.
We can define the feature measurement function as follows:
\f{align*}{
{}^{G}\mathbf{p}_f
&= \mathbf f(\boldsymbol\lambda,~{}^{I_a}_{G}\mathbf{R},~{}^{G}\mathbf{p}_{I_a},~{}^{C}_{I}\mathbf{R},~{}^{C}\mathbf{p}_{I}) \\
&=
{}^{I_a}_{G}\mathbf{R}^\top{}^{C}_{I}\mathbf{R}^\top({}^{C_a}\mathbf{p}_f-{}^{C}\mathbf{p}_{I}) + {}^{G}\mathbf{p}_{I_a} \\
&=
{}^{I_a}_{G}\mathbf{R}^\top{}^{C}_{I}\mathbf{R}^\top\Big(\frac{1}{\rho}\hat{\mathbf{b}}-{}^{C}\mathbf{p}_{I}\Big) + {}^{G}\mathbf{p}_{I_a} \\
\text{where} &\quad \boldsymbol\lambda = \begin{bmatrix} \rho \end{bmatrix}
\f}
In the above case we have defined a bearing \f$\hat{\mathbf{b}}\f$ which is the marginalized bearing of the feature after initialization.
After collecting two measurement, we can nullspace project to remove the Jacobian in respect to this bearing variable.
*/

289
docs/update-featinit.dox Normal file
View File

@@ -0,0 +1,289 @@
/**
@page update-featinit Feature Triangulation
@tableofcontents
@section featinit-linear 3D Cartesian Triangulation
We wish to create a solvable linear system that can give us an initial guess for the 3D cartesian position of our feature.
To do this, we take all the poses that the feature is seen from to be of known quantity.
This feature will be triangulated in some anchor camera frame \f$\{A\}\f$ which we can arbitrary pick.
If the feature \f$\mathbf{p}_f\f$ is observed by pose \f$1\ldots m\f$, given the anchor pose \f$A\f$,
we can have the following transformation from any camera pose \f$C_i, i=1\ldots m\f$:
\f{align*}{
{}^{C_i}\mathbf{p}_{f} & = {}^{C_i}_A\mathbf{R} \left( {}^A\mathbf{p}_f - {}^A\mathbf{p}_{C_i}\right) \\
{}^A\mathbf{p}_f & = {}^{C_i}_A\mathbf{R}^\top {}^{C_i}\mathbf{p}_{f} + {}^A\mathbf{p}_{C_i}
\f}
In the absents of noise, the measurement in the current frame is the bearing \f${}^{C_i}\mathbf{b}\f$ and its depth \f${}^{C_i}z\f$.
Thus we have the following mapping to a feature seen from the current frame:
\f{align*}{
{}^{C_i}\mathbf{p}_f &
= {}^{C_i}z_{f} {}^{C_i}\mathbf{b}_{f}
= {}^{C_i}z_{f}
\begin{bmatrix}
u_n \\ v_n \\ 1
\end{bmatrix}
\f}
We note that \f$u_n\f$ and \f$v_n\f$ represent the undistorted normalized image coordinates.
This bearing can be warped into the the anchor frame by substituting into the above equation:
\f{align*}{
{}^A\mathbf{p}_f
& = {}^{C_i}_A\mathbf{R}^\top {}^{C_i}z_{f} {}^{C_i}\mathbf{b}_{f} + {}^A\mathbf{p}_{C_i} \\
& = {}^{C_i}z_{f} {}^{A}\mathbf{b}_{C_i \rightarrow f} + {}^A\mathbf{p}_{C_i}
\f}
To remove the need to estimate the extra degree of freedom of depth \f${}^{C_i}z_{f}\f$, we define the following vectors
which are orthoganal to the bearing \f${}^{A}\mathbf{b}_{C_i \rightarrow f}\f$:
\f{align*}{
{}^{A}\mathbf{N}_i &=
\lfloor {}^{A}\mathbf{b}_{C_i \rightarrow f} \times\rfloor
=
\begin{bmatrix}
0 & -{}^{A}{b}_{C_i \rightarrow f}(3) & {}^{A}{b}_{C_i \rightarrow f}(2) \\
{}^{A}{b}_{C_i \rightarrow f}(3) & 0 & -{}^{A}{b}_{C_i \rightarrow f}(1) \\
-{}^{A}{b}_{C_i \rightarrow f}(2) & {}^{A}{b}_{C_i \rightarrow f}(1) & 0 \\
\end{bmatrix}
\f}
All three rows are perpendicular to the vector \f${}^{A}\mathbf{b}_{C_i \rightarrow f}\f$ and thus \f${}^{A}\mathbf{N}_i{}^{A}\mathbf{b}_{C_i \rightarrow f}=\mathbf{0}_3\f$.
We can then multiple the transform equation/constraint to form two equation which only relates to the unknown 3 d.o.f \f${}^A\mathbf{p}_f\f$:
\f{align*}{
{}^{A}\mathbf{N}_i
{}^A\mathbf{p}_f & =
{}^{A}\mathbf{N}_i {}^{C_i}z_{f} {}^{A}\mathbf{b}_{C_i \rightarrow f} +
{}^{A}\mathbf{N}_i {}^A\mathbf{p}_{C_i} \\
& = {}^{A}\mathbf{N}_i {}^A\mathbf{p}_{C_i}
\f}
By stacking all the measurements, we can have:
\f{align*}{
\underbrace{
\begin{bmatrix}
\vdots
\\
{}^{A}\mathbf{N}_i
\\
\vdots
\end{bmatrix}
}_{\mathbf{A}}
{}^A\mathbf{p}_f & =
\underbrace{
\begin{bmatrix}
\vdots
\\
{}^{A}\mathbf{N}_i
{}^A\mathbf{p}_{C_i}
\\
\vdots
\end{bmatrix}
}_{\mathbf{b}}
\f}
Since each pixel measurement provides two constraints, as long as \f$m>1\f$, we will have enough constraints to triangulate the feature.
In practice, the more views of the feature the better the triangulation and thus normally want to have a feature seen from at least five views.
We could select two rows of the each \f${}^{A}\mathbf{N}_i\f$ to reduce the number of rows, but by having a square system we can perform the following "trick".
\f{align*}{
\mathbf{A}^\top\mathbf{A}~
{}^A\mathbf{p}_f &=
\mathbf{A}^\top\mathbf{b} \\
\Big( \sum_i {}^{A}\mathbf{N}_i^\top {}^{A}\mathbf{N}_i \Big)
{}^A\mathbf{p}_f &=
\Big( \sum_i {}^{A}\mathbf{N}_i^\top {}^{A}\mathbf{N}_i {}^A\mathbf{p}_{C_i} \Big)
\f}
This is a 3x3 system which can be quickly solved for as compared to the originl 3mx3m or 2mx2m system.
We additionally check that the triangulated feature is "valid" and in front of the camera and not too far away.
The [condition number](https://en.wikipedia.org/wiki/Condition_number) of the above linear system and reject systems that are "sensitive" to errors and have a large value.
@section featinit-linear-1d 1D Depth Triangulation
We wish to create a solvable linear system that can give us an initial guess for the 1D depth position of our feature.
To do this, we take all the poses that the feature is seen from to be of known quantity along with the bearing in the anchor frame.
This feature will be triangulated in some anchor camera frame \f$\{A\}\f$ which we can arbitrary pick.
We define it as its normalized image coordiantes \f$[u_n ~ v_n ~ 1]^\top\f$ in tha anchor frame.
If the feature \f$\mathbf{p}_f\f$ is observed by pose \f$1\ldots m\f$, given the anchor pose \f$A\f$,
we can have the following transformation from any camera pose \f$C_i, i=1\ldots m\f$:
\f{align*}{
{}^{C_i}\mathbf{p}_{f} & = {}^{C_i}_A\mathbf{R} \left( {}^A\mathbf{p}_f - {}^A\mathbf{p}_{C_i}\right) \\
{}^A\mathbf{p}_f & = {}^{C_i}_A\mathbf{R}^\top {}^{C_i}\mathbf{p}_{f} + {}^A\mathbf{p}_{C_i} \\
{}^{A}z_{f} {}^A\mathbf{b}_f & = {}^{C_i}_A\mathbf{R}^\top {}^{C_i}\mathbf{p}_{f} + {}^A\mathbf{p}_{C_i} \\
\f}
In the absents of noise, the measurement in the current frame is the bearing \f${}^{C_i}\mathbf{b}\f$ and its depth \f${}^{C_i}z\f$.
\f{align*}{
{}^{C_i}\mathbf{p}_f &
= {}^{C_i}z_{f} {}^{C_i}\mathbf{b}_{f}
= {}^{C_i}z_{f}
\begin{bmatrix}
u_n \\ v_n \\ 1
\end{bmatrix}
\f}
We note that \f$u_n\f$ and \f$v_n\f$ represent the undistorted normalized image coordinates.
This bearing can be warped into the the anchor frame by substituting into the above equation:
\f{align*}{
{}^{A}z_{f} {}^A\mathbf{b}_f
& = {}^{C_i}_A\mathbf{R}^\top {}^{C_i}z_{f} {}^{C_i}\mathbf{b}_{f} + {}^A\mathbf{p}_{C_i} \\
& = {}^{C_i}z_{f} {}^{A}\mathbf{b}_{C_i \rightarrow f} + {}^A\mathbf{p}_{C_i}
\f}
To remove the need to estimate the extra degree of freedom of depth \f${}^{C_i}z_{f}\f$, we define the following vectors
which are orthoganal to the bearing \f${}^{A}\mathbf{b}_{C_i \rightarrow f}\f$:
\f{align*}{
{}^{A}\mathbf{N}_i &=
\lfloor {}^{A}\mathbf{b}_{C_i \rightarrow f} \times\rfloor
\f}
All three rows are perpendicular to the vector \f${}^{A}\mathbf{b}_{C_i \rightarrow f}\f$ and thus \f${}^{A}\mathbf{N}_i{}^{A}\mathbf{b}_{C_i \rightarrow f}=\mathbf{0}_3\f$.
We can then multiple the transform equation/constraint to form two equation which only relates to the unknown \f${}^{A}z_{f}\f$:
\f{align*}{
({}^{A}\mathbf{N}_i {}^A\mathbf{b}_f)
{}^{A}z_{f} & =
{}^{A}\mathbf{N}_i {}^{C_i}z_{f} {}^{A}\mathbf{b}_{C_i \rightarrow f} +
{}^{A}\mathbf{N}_i {}^A\mathbf{p}_{C_i} \\
& = {}^{A}\mathbf{N}_i {}^A\mathbf{p}_{C_i}
\f}
We can then formulate the following system:
\f{align*}{
\Big( \sum_i ({}^{A}\mathbf{N}_i{}^A\mathbf{b}_f)^\top ({}^{A}\mathbf{N}_i{}^A\mathbf{b}_f) \Big)
{}^{A}z_{f} &=
\Big( \sum_i ({}^{A}\mathbf{N}_i{}^A\mathbf{b}_f)^\top {}^{A}\mathbf{N}_i{}^A \mathbf{b}_i \Big)
\f}
This is a 1x1 system which can be quickly solved for with a single scalar division.
We additionally check that the triangulated feature is "valid" and in front of the camera and not too far away.
The full feature can be reconstructed by \f$ {}^A\mathbf{p}_f = {}^{A}z_{f} {}^A\mathbf{b}_f\f$.
@section featinit-nonlinear 3D Inverse Non-linear Optimization
After we get the triangulated feature 3D position, a nonlinear least-squares will be performed to refine this estimate.
In order to achieve good numerical stability, we use the inverse depth representation for point feature which helps with convergence.
We find that in most cases this problem converges within 2-3 iterations in indoor environments.
The feature transformation can be written as:
\f{align*}{
{}^{C_i}\mathbf{p}_f & =
{}^{C_i}_A\mathbf{R}
\left(
{}^A\mathbf{p}_f - {}^A\mathbf{p}_{C_i}
\right) \\
&=
{}^Az_f
{}^{C_i}_A\mathbf{R}
\left(
\begin{bmatrix}
{}^Ax_f/{}^Az_f \\ {}^Ay_f/{}^Az_f \\ 1
\end{bmatrix}
-
\frac{1}{{}^Az_f}
{}^A\mathbf{p}_{C_i}
\right)
\\
\Rightarrow
\frac{1}{{}^Az_f}
{}^{C_i}\mathbf{p}_f
& =
{}^{C_i}_A\mathbf{R}
\left(
\begin{bmatrix}
{}^Ax_f/{}^Az_f \\ {}^Ay_f/{}^Az_f \\ 1
\end{bmatrix}
-
\frac{1}{{}^Az_f}
{}^A\mathbf{p}_{C_i}
\right)
\f}
We define \f$u_A = {}^Ax_f/{}^Az_f\f$, \f$v_A = {}^Ay_f/{}^Az_f\f$, and \f$\rho_A = {1}/{{}^Az_f}\f$ to get the following measurement equation:
\f{align*}{
h(u_A, v_A, \rho_A)
&
=
{}^{C_i}_A\mathbf{R}
\left(
\begin{bmatrix}
u_A \\ v_A \\ 1
\end{bmatrix}
-
\rho_A
{}^A\mathbf{p}_{C_i}
\right)
\f}
The feature measurement seen from the \f$\{C_i\}\f$ camera frame can be reformulated as:
\f{align*}{
\mathbf{z} &
=
\begin{bmatrix}
u_i \\ v_i
\end{bmatrix} \\
&=
\begin{bmatrix}
h(u_A, v_A, \rho_A)(1) / h(u_A, v_A, \rho_A)(3) \\
h(u_A, v_A, \rho_A)(2) / h(u_A, v_A, \rho_A)(3) \\
\end{bmatrix}
\\
& =
\mathbf{h}(u_A, v_A, \rho_A)
\f}
Therefore, we can have the least-squares formulated and Jacobians:
\f{align*}{
\operatorname*{argmin}_{u_A, v_A, \rho_A}||{\mathbf{z} - \mathbf{h}(u_A, v_A, \rho_A)}||^2
\f}
\f{align*}{
\frac{\partial \mathbf{h}(u_A, v_A, \rho_A)}{\partial {h}(u_A, v_A, \rho_A)}
& =
\begin{bmatrix}
1/h(\cdots)(1) & 0 & -h(\cdots)(1)/h(\cdots)(3)^2 \\
0 & 1/h(\cdots)(2) & -h(\cdots)(2)/h(\cdots)(3)^2
\end{bmatrix} \\
\frac{\partial {h}(u_A, v_A, \rho_A)}{\partial [u_A, v_A, \rho_A]}
& =
{}^{C_i}_A\mathbf{R}
\begin{bmatrix}
\begin{bmatrix}
1 & 0 \\
0 & 1 \\
0 & 0
\end{bmatrix} & -{}^A\mathbf{p}_{C_i}
\end{bmatrix}
\f}
The least-squares problem can be solved with [Gaussian-Newton](https://en.wikipedia.org/wiki/Gauss%E2%80%93Newton_algorithm) or [Levenberg-Marquart](https://en.wikipedia.org/wiki/Levenberg%E2%80%93Marquardt_algorithm) algorithm.
*/

165
docs/update-null.dox Normal file
View File

@@ -0,0 +1,165 @@
/**
@page update-null MSCKF Nullspace Projection
In the standard EKF update, given a linearized measurement error (or residual) equation:
\f{align*}{
\tilde{\mathbf{z}}_{m,k}
&\simeq
\mathbf{H}_{x} \tilde{\mathbf{x}}_{k}
+ \mathbf{H}_{f} {}^G\tilde{\mathbf{p}}_f + \mathbf{n}_k
\f}
we naively need to compute the residual covariance matrix \f$\mathbf{P}_{zz}\f$ as follows:
\f{align*}{
\mathbf{P}_{zz}
&= \mathbb{E}\left[ \tilde{\mathbf{z}}_{m,k}{m,k} \tilde{\mathbf{z}}_{m,k}^\top \right] \\[5px]
&= \mathbb{E}\left[
(\mathbf{H}_{x}\tilde{\mathbf{x}}_{k}+\mathbf{H}_{f}{}^G\tilde{\mathbf{p}}_f+\mathbf{n}_k)
(\mathbf{H}_{x}\tilde{\mathbf{x}}_{k}+\mathbf{H}_{f}{}^G\tilde{\mathbf{p}}_f+\mathbf{n}_k)^\top
\right] \\[5px]
&= \mathbb{E}\Big[
\mathbf{H}_{x}\tilde{\mathbf{x}}_{k}\tilde{\mathbf{x}}_{k}^\top\mathbf{H}_{x}^\top
+\mathbf{H}_{x}\tilde{\mathbf{x}}_{k}{}^G\tilde{\mathbf{p}}_f^\top\mathbf{H}_{f}^\top
+\textcolor{red}{\mathbf{H}_{x}\tilde{\mathbf{x}}_{k}\mathbf{n}_k^\top} \nonumber\\[3px]
&\hspace{3cm}+
\mathbf{H}_{f}{}^G\tilde{\mathbf{p}}_f\tilde{\mathbf{x}}_{k}^\top\mathbf{H}_{x}^\top
+\mathbf{H}_{f}{}^G\tilde{\mathbf{p}}_f{}^G\tilde{\mathbf{p}}_f^\top\mathbf{H}_{f}^\top
+\mathbf{H}_{f}{}^G\tilde{\mathbf{p}}_f\mathbf{n}_k^\top \nonumber\\[3px]
&\hspace{3cm}+
\textcolor{red}{\mathbf{n}_k\tilde{\mathbf{x}}_{k}^\top\mathbf{H}_{x}^\top}
+\mathbf{n}_k{}^G\tilde{\mathbf{p}}_f^\top\mathbf{H}_{f}^\top
+\mathbf{n}_k\mathbf{n}_k^\top
\Big] \\[5px]
\empty
&=
\mathbf{H}_x\mathbb{E}\Big[\tilde{\mathbf{x}}_{k}\tilde{\mathbf{x}}_{k}^\top\Big]\mathbf{H}_x^\top
+\mathbf{H}_x\mathbb{E}\Big[\tilde{\mathbf{x}}_{k}{}^G\tilde{\mathbf{p}}_f^\top\Big]\mathbf{H}_f^\top
+\mathbf{H}_f\mathbb{E}\Big[{}^G\tilde{\mathbf{p}}_f\tilde{\mathbf{x}}_{k}^\top\Big]\mathbf{H}_x^\top
+ \mathbf{H}_f\mathbb{E}\Big[{}^G\tilde{\mathbf{p}}_f{}^G\tilde{\mathbf{p}}_f^\top\Big]\mathbf{H}_f^\top \nonumber\\[3px]
&\hspace{3.2cm}+
\mathbf{H}_f\mathbb{E}\Big[{}^G\tilde{\mathbf{p}}_f\mathbf{n}_k^\top\Big]
+\mathbb{E}\Big[\mathbf{n}_k{}^G\tilde{\mathbf{p}}_f^\top\Big]\mathbf{H}_f^\top
+\mathbb{E}\Big[\mathbf{n}_k\mathbf{n}_k^\top\Big] \\[5px]
\empty
&=
\mathbf{H}_x \mathbf{P}_{xx}\mathbf{H}_x^\top
+\mathbf{H}_x \mathbf{P}_{xf}\mathbf{H}_f^\top
+\mathbf{H}_f \mathbf{P}_{fx}\mathbf{H}_x^\top
+ \mathbf{H}_f \mathbf{P}_{ff}\mathbf{H}_f^\top \nonumber\\[3px]
&\hspace{2.3cm}+
\mathbf{H}_f \mathbf{P}_{fn}
+\mathbf{P}_{nf} \mathbf{H}_f^\top
+\mathbf{R}_d
\f}
However, there would be a big problem in visual-inertial odometry (VIO);
that is, we do not know what the prior feature covariance and it is coupled with both the state, itself, and the noise (i.e., \f$\mathbf{P}_{xf}\f$,\f$\mathbf{P}_{ff}\f$, and \f$\mathbf{P}_{nf}\f$).
This motivates the need for a method to remove the feature \f${}^G\tilde{\mathbf{p}}_f\f$ from the linearized measurement equation (thus removing the correlation between the measurement and its error).
To this end, we start with the measurement residual function by removing the "sensitivity" to feature error we compute and apply the left nullspace of the Jacobian \f$\mathbf{H}_{f}\f$. We can compute it using QR decomposition as follows:
\f{align*}{
\mathbf{H}_{f} =
\begin{bmatrix} \mathbf{Q_1} & \mathbf{Q_2} \end{bmatrix}
\begin{bmatrix} \mathbf{R_1} \\ \mathbf{0} \end{bmatrix}
= \mathbf{Q_1}\mathbf{R_1}
\f}
Multiplying the linearized measurement equation by the nullspace of the feature Jacobian from the left yields:
\f{align*}{
\tilde{\mathbf{z}}_{m,k}
&\simeq
\mathbf{H}_{x} \tilde{\mathbf{x}}_{k}
+ \mathbf{Q_1}\mathbf{R_1}{}^G\tilde{\mathbf{p}}_f + \mathbf{n}_k \\[5px]
\empty
\Rightarrow~ \mathbf{Q_2}^\top\tilde{\mathbf{z}}_m
&\simeq
\mathbf{Q_2}^\top\mathbf{H}_{x} \tilde{\mathbf{x}}_{k}
+ \textcolor{red}{\mathbf{Q_2}^\top\mathbf{Q_1}\mathbf{R_1} {}^G\tilde{\mathbf{p}}_f} + \mathbf{Q_2}^\top\mathbf{n}_k \\[5px]
\empty
\Rightarrow~ \mathbf{Q_2}^\top\tilde{\mathbf{z}}_m
&\simeq
\mathbf{Q_2}^\top\mathbf{H}_{x} \tilde{\mathbf{x}}_{k} + \mathbf{Q_2}^\top\mathbf{n}_k \\[5px]
\empty
\Rightarrow~ \tilde{\mathbf{z}}_{o,k}
&\simeq
\mathbf{H}_{o,k}\tilde{\mathbf{x}}_{k} + \mathbf{n}_{o,k}
\f}
where we have employed the fact that \f$\mathbf Q_1\f$ and \f$\mathbf Q_2\f$ are orthonormal.
We now examine the dimensions of the involved matrices to appreciate the computation saving gained from this nullspace projection.
\f{align*}{
\textrm{size}(\mathbf{H}_{f}) &= 2n\times3 \textrm{~~where~}n\textrm{~is the number of uv measurements of this feature} \\[5px]
\textrm{size}({}^G\tilde{\mathbf{p}}_f) &= 3\times1 \\[5px]
\textrm{size}(\mathbf{H}_{x}) &= 2n\times15+6c \textrm{~~where~}c\textrm{~is the number of clones} \\[5px]
\textrm{size}(\tilde{\mathbf{x}}_{k}) &= 15+6c\times1 \textrm{~~where~}c\textrm{~is the number of clones} \\[5px]
\textrm{rank}(\mathbf{H}_{f}) &\leq \textrm{min}(2n,3) = 3 \textrm{~~where equality holds in most cases} \\[5px]
\textrm{nullity}(\mathbf{H}_{f}) &= \textrm{size}(\mathbf{x}) - \textrm{rank}(\mathbf{H}_{f}) = 2n-3 \textrm{~~assuming full rank}
\f}
With that, we can have the following conclusion about the sizes when the nullspace is applied:
\f{align*}{
\mathbf{Q_2}^\top\tilde{\mathbf{z}}_{m,k}
&\simeq
\mathbf{Q_2}^\top\mathbf{H}_{x} \tilde{\mathbf{x}}_{k} + \mathbf{Q_2}^\top\mathbf{n}_k \\[5px]
\empty
\Rightarrow~ (2n-3\times2n)(2n\times1)
&=
(2n-3\times2n)(2n\times15+6c)(15+6c\times1) \\
&\hspace{3.5cm}+ (2n-3\times2n)(2n\times1) \nonumber\\[5px]
\empty
\tilde{\mathbf{z}}_{o,k}
&\simeq
\mathbf{H}_{o,k}\tilde{\mathbf{x}}_{k} + \mathbf{n}_o \\[5px]
\empty
\Rightarrow~ (2n-3\times1)
&=
(2n-3\times15+6c)(15+6c\times1) + (2n-3\times1)
\f}
Finally, we perform the EKF update using the inferred measurement \f$\mathbf z_{o,k}\f$:
\f{align*}{
\hat{\mathbf{x}}_{k|k}
&= \hat{\mathbf{x}}_{k|k-1} + \mathbf{P}_{k|k-1} \mathbf{H}_{o,k}^\top (\mathbf{H}_{o,k} \mathbf{P}_{k|k-1} \mathbf{H}_{o,k}^\top + \mathbf{R}_o)^{-1}\tilde{\mathbf{z}}_{o,k} \\[5px]
\mathbf{P}_{k|k}
&= \mathbf{P}_{k|k-1} - \mathbf{P}_{k|k-1}\mathbf{H}_{o,k}^\top (\mathbf{H}_{o,k} \mathbf{P}_{k|k-1} \mathbf{H}_{o,k}^\top + \mathbf{R}_o)^{-1} \mathbf{H}_{o,k}\mathbf{P}_{k|k-1}^\top
\f}
where the time index (subscript) \f$k|k-1\f$ refers to the prior estimate which was denoted before by symbol \f$\ominus\f$
and \f$k|k\f$ corresponds to the posterior (or updated) estimate indicated before by \f$\oplus\f$.
@section implementation Implementation
Using Eigen 3 library, we perform QR decomposition to get the nullspace.
Here we know that the size of \f$\mathbf{Q}_1\f$ is \f$2n\times3\f$, which corresponds to the number of observations and size of the 3D point feature state.
@code{.cpp}
Eigen::ColPivHouseholderQR<Eigen::MatrixXd> qr(H_f.rows(), H_f.cols());
qr.compute(H_f);
Eigen::MatrixXd Q = qr.householderQ();
Eigen::MatrixXd Q1 = Q.block(0,0,Q.rows(),3);
Eigen::MatrixXd Q2 = Q.block(0,3,Q.rows(),Q.cols()-3);
@endcode
*/

View File

@@ -0,0 +1,90 @@
/**
@page update-zerovelocity Zero Velocity Update
The key idea of the zero velocity update (ZUPT) is to allow for the system to reduce its uncertainty leveraging motion knowledge (i.e. leverage the fact that the system is stationary).
This is of particular importance in cases where we have a monocular system without any temporal SLAM features.
In this case, if we are stationary we will be unable to triangulate features and thus will be unable to update the system.
This can be avoided by either using a stereo system or temporal SLAM features.
One problem that both of these don't solve is the issue of dynamic environmental objects.
In a typical autonomous car scenario the sensor system will become stationary at stop lights in which dynamic objects, such as other cars crossing the intersection, can quickly corrupt the system.
A zero velocity update and skipping feature tracking can address these issues if we are able to classify the cases where the sensor system is at rest.
@section update-zerovelocity-meas Constant Velocity Synthetic Measurement
To perform update, we create a synthetic "measurement" which says that the current **true** acceleration and angular velocity is zero.
As compared to saying the velocity is zero, we can model the uncertainty of these measurements based on the readings from our inertial measurement unit.
\f{align*}{
\mathbf{a} &= \mathbf{0} \\
\boldsymbol{\omega} &= \mathbf{0}
\f}
It is important to realize this is not strictly enforcing zero velocity, but really a constant velocity.
This means we can have a false detection at constant velocity times (zero acceleration), but this can be easily addressed by a velocity magnitude check.
We have the following measurement equation relating this above synthetic "measurement" to the currently recorded inertial readings:
\f{align*}{
\mathbf{a} &= \mathbf{a}_m - \mathbf{b}_a - {}^{I_k}_G\mathbf{R}{}^G\mathbf{g} - \mathbf{n}_a \\
\boldsymbol{\omega} &= \boldsymbol{\omega}_m - \mathbf{b}_g - \mathbf{n}_g
\f}
It is important to note that here our actual measurement is the true \f$\mathbf{a}\f$ and \f$\boldsymbol{\omega}\f$ and thus we will have the following residual where we will subtract the synthetic "measurement" and our measurement function:
\f{align*}{
\tilde{\mathbf{z}} &=
\begin{bmatrix}
\mathbf{a} - \Big(\mathbf{a}_m - \mathbf{b}_a - {}^{I_k}_G\mathbf{R}{}^G\mathbf{g} - \mathbf{n}_a \Big) \\
\boldsymbol{\omega} - \Big(\boldsymbol{\omega}_m - \mathbf{b}_g - \mathbf{n}_g \Big)
\end{bmatrix} &=
\begin{bmatrix}
- \Big(\mathbf{a}_m - \mathbf{b}_a - {}^{I_k}_G\mathbf{R}{}^G\mathbf{g} - \mathbf{n}_a \Big) \\
- \Big(\boldsymbol{\omega}_m - \mathbf{b}_g - \mathbf{n}_g \Big)
\end{bmatrix}
\f}
Where we have the following Jacobians in respect to our state:
\f{align*}{
\frac{\partial \tilde{\mathbf{z}}}{\partial {}^{I_k}_{G}\mathbf{R}} &= - \left\lfloor {}^{I_k}_G\mathbf{R}{}^G\mathbf{g} \times\right\rfloor \\
\frac{\partial \tilde{\mathbf{z}}}{\partial \mathbf{b}_a} &= \frac{\partial \tilde{\mathbf{z}}}{\partial \mathbf{b}_g} = - \mathbf{I}_{3\times 3}
\f}
@section update-zerovelocity-detect Zero Velocity Detection
Zero velocity detection in itself is a challenging problem which has seen many different works tried to address this issue @cite Wagstaff2017IPIN, @cite Ramanandan2011TITS, @cite Davidson2009ENC.
Most works boil down to simple thresholding and the approach is to try to determine the optimal threshold which allows for the best classifications of zero velocity update (ZUPT) portion of the trajectories.
There have been other works, @cite Wagstaff2017IPIN and @cite Ramanandan2011TITS, which have looked at more complicated methods and try to address the issue that this threshold can be dependent on the type of different motions (such as running vs walking) and characteristics of the platform which the sensor is mounted on (we want to ignore vehicle engine vibrations and other non-essential observed vibrations).
@subsection update-zerovelocity-detect-imu Inertial-based Detection
We approach this detection problem based on tuning of a \f$\chi^2\f$, chi-squared, thresholding based on the measurement model above.
It is important to note that we also have a velocity magnitude check which is aimed at preventing constant velocity cases which have non-zero magnitude.
More specifically, we perform the following threshold check to see if we are current at zero velocity:
\f{align*}{
\tilde{\mathbf{z}}^\top(\mathbf{H}\mathbf{P}\mathbf{H}^\top + \alpha\mathbf{R})^{-1}\tilde{\mathbf{z}} < \chi^2
\f}
We found that in the real world experiments, typically the inertial measurement noise \f$\mathbf{R}\f$ needs to be inflated by \f$\alpha\in[50,100]\f$ times to allow for proper detection.
This can hint that we are using overconfident inertial noises, or that there are additional frequencies (such as the vibration of motors) which inject additional noises.
@subsection update-zerovelocity-detect-disp Disparity-based Detection
We additionally have a detection method which leverages the visual feature tracks. Given two sequential images, the assumption is if there is very little disparity change between feature tracks then we will be stationary. Thus we calculate the average disparity and threshold on this value.
\f{align*}{
\frac{1}{N}\sum_{i=0}^N ||\mathbf{uv}_{k,i}-\mathbf{uv}_{k-1,i}|| < \Delta d
\f}
This seems to work reasonably well, but can fail if the environment is dynamic in nature, thus it can be useful to use both the inertial and disparity-based methods together in very dynamic environments.
*/

287
docs/update.dox Normal file
View File

@@ -0,0 +1,287 @@
/**
@page update Update Derivations
@tableofcontents
@section basics Minimum Mean Square Error (MMSE) Estimation
Consider the following static state estimation problem:
Given a prior distribution (probability density function or pdf) for a Gaussian random vector
\f$ \mathbf x\sim \mathcal N (\hat{\mathbf x}^\ominus, \mathbf P_{xx}^\ominus)\f$ with dimension of \f$n\f$ and a new \f$m\f$ dimentional measurement \f$\mathbf{z}_m = \mathbf z + \mathbf n = \mathbf h(\mathbf x) + \mathbf n \f$ corrupted by zero-mean white Gaussian noise independent of state, \f$ \mathbf n \sim \mathcal N(\mathbf 0, \mathbf R)\f$,
we want to compute the first two (central) moments of the posterior pdf \f$ p(\mathbf x|\mathbf z_m)\f$.
Generally (given a nonlinear measurement model), we approximate the posterior pdf as:
\f$ p(\mathbf x|\mathbf z_m) \simeq \mathcal N (\hat{\mathbf x}^\oplus, \mathbf P_{xx}^\oplus)\f$.
By design, this is the (approximate) solution to the MMSE estimation problem [[Kay 1993]](http://users.isr.ist.utl.pt/~pjcro/temp/Fundamentals%20Of%20Statistical%20Signal%20Processing--Estimation%20Theory-Kay.pdf) @cite Kay1993.
@section conditional-pdf Conditional Probability Distribution
To this end, we employ the Bayes Rule:
\f{align*}{
p(\mathbf{x} | \mathbf{z}_m) = \frac{p(\mathbf{x},\mathbf{z}_m)}{p(\mathbf{z}_m)}
\f}
In general, this conditional pdf cannot be computed analytically without imposing simplifying assumptions.
For the problem at hand, we first approximate (if indeed) \f$ p(\mathbf{z}_m) \simeq \mathcal N (\hat{\mathbf z}, \mathbf P_{zz}) \f$,
and then have the following joint Gaussian pdf (noting that joint of Gaussian pdfs is Gaussian):
\f{align*}{
p(\mathbf{x},\mathbf{z}_m) = \mathcal N \left( \begin{bmatrix} \hat{\mathbf x}^\ominus \\ \mathbf z \end{bmatrix}, \begin{bmatrix}\mathbf P_{xx} & \mathbf P_{xz} \\ \mathbf P_{zx} & \mathbf P_{zz} \end{bmatrix} \right) =: \mathcal N(\hat{\mathbf y}, \mathbf P_{yy})
\f}
Substitution of these two Gaussians into the first equation yields the following conditional Gaussian pdf:
\f{align*}{
p(\mathbf{x} | \mathbf{z}_m)
&\simeq \frac{\mathcal N(\hat{\mathbf y}, \mathbf P_{yy})}{\mathcal N (\hat{\mathbf z}, \mathbf P_{zz}) }\\
&= \frac{\frac{1}{\sqrt{(2\pi)^{n+m}|{\mathbf{P}_{yy}}|}}e^{-\frac{1}{2}(\mathbf{y}-\hat{\mathbf{y}})^\top\mathbf{P}_{yy}^{-1}(\mathbf{y}-\hat{\mathbf{y}})}}{ \frac{1}{\sqrt{(2\pi)^{m}|{\mathbf{P}_{zz}}|}}e^{-\frac{1}{2}(\mathbf{z}_m-\hat{\mathbf{z}})^\top\mathbf{P}_{zz}^{-1}(\mathbf{z}_m-\hat{\mathbf{z}})}}\\
&= \frac{1}{\sqrt{(2\pi)^{n}|{\mathbf{P}_{yy}}|/|{\mathbf{P}_{zz}}|}}
e^{ {-\frac{1}{2}\left[
(\mathbf{y}-\hat{\mathbf{y}})^\top\mathbf{P}_{yy}^{-1}(\mathbf{y}-\hat{\mathbf{y}})
-
(\mathbf{z}_m-\hat{\mathbf{z}})^\top\mathbf{P}_{zz}^{-1}(\mathbf{z}_m-\hat{\mathbf{z}})
\right]}
}\\
&=: \mathcal N (\hat{\mathbf x}^\oplus, \mathbf P_{xx}^\oplus)
\f}
We now derive the conditional mean and covariance can be computed as follows:
First we simplify the denominator term \f$|{\mathbf{P}_{yy}}|/|{\mathbf{P}_{zz}}|\f$ in order to find the conditional covariance.
\f{align*}{
|{\mathbf{P}_{yy}}| = \Big|{\begin{bmatrix}
\mathbf{P}_{xx} & \mathbf{P}_{xz} \\
\mathbf{P}_{zx} & \mathbf{P}_{zz}
\end{bmatrix}}\Big|
= \Big|{\mathbf{P}_{xx} - \mathbf{P}_{xz}\mathbf{P}_{zz}^{-1}\mathbf{P}_{zx}}\Big|\ \Big|{\mathbf{P}_{zz}}\Big|
\f}
where we assumed \f$\mathbf{P}_{zz}\f$ is invertible
and employed the determinant property of [Schur complement](https://en.wikipedia.org/wiki/Schur_complement).
Thus, we have:
\f{align*}{
\frac{|{\mathbf{P}_{yy}}|}{|{\mathbf{P}_{zz}}|}
= \frac{\Big|{\mathbf{P}_{xx} - \mathbf{P}_{xz}\mathbf{P}_{zz}^{-1}\mathbf{P}_{zx}}\Big|\Big|{\mathbf{P}_{zz}}\Big|}{|{\mathbf{P}_{zz}}|}
= \Big|{\mathbf{P}_{xx} - \mathbf{P}_{xz}\mathbf{P}_{zz}^{-1}\mathbf{P}_{zx}}\Big|
\f}
Next, by defining the error states \f$\mathbf{r}_x=\mathbf{x}-\hat{\mathbf{x}}^\ominus\f$,\f$\mathbf{r}_z=\mathbf{z}_m-\hat{\mathbf{z}}\f$,\f$\mathbf{r}_y=\mathbf{y}-\hat{\mathbf{y}}\f$,
and using the [matrix inersion lemma](https://en.wikipedia.org/wiki/Woodbury_matrix_identity),
we rewrite the exponential term as follows:
\f{align*}{
&(\mathbf{y}-\hat{\mathbf{y}})^\top\mathbf{P}_{yy}^{-1}(\mathbf{y}-\hat{\mathbf{y}})
-
(\mathbf{z}_m-\hat{\mathbf{z}})^\top\mathbf{P}_{zz}^{-1}(\mathbf{z}_m-\hat{\mathbf{z}}) \\[5px]
&= \mathbf{r}_y^\top\mathbf{P}_{yy}^{-1}\mathbf{r}_y
-
\mathbf{r}_z^\top\mathbf{P}_{zz}^{-1}\mathbf{r}_z \\[5px]
&=
\begin{bmatrix}
\mathbf{r}_x \\
\mathbf{r}_z
\end{bmatrix}^\top
\begin{bmatrix}
\mathbf{P}_{xx} & \mathbf{P}_{xz} \\
\mathbf{P}_{zx} & \mathbf{P}_{zz}
\end{bmatrix}^{-1}
\begin{bmatrix}
\mathbf{r}_x \\
\mathbf{r}_z
\end{bmatrix}
-
\mathbf{r}_z^\top\mathbf{P}_{zz}^{-1}\mathbf{r}_z \\[5px]
&=
\begin{bmatrix}
\mathbf{r}_x \\
\mathbf{r}_z
\end{bmatrix}^\top
\begin{bmatrix}
\mathbf{Q} & -\mathbf{Q}\mathbf{P}_{xz}\mathbf{P}_{zz}^{-1} \\
-\mathbf{P}_{zz}^{-1}\mathbf{P}_{zx}\mathbf{Q} & \mathbf{P}_{zz}^{-1}+\mathbf{P}_{zz}^{-1}\mathbf{P}_{zx}\mathbf{Q}\mathbf{P}_{xz}\mathbf{P}_{zz}^{-1}
\end{bmatrix}
\begin{bmatrix}
\mathbf{r}_x \\
\mathbf{r}_z
\end{bmatrix}
-
\mathbf{r}_z^\top\mathbf{P}_{zz}^{-1}\mathbf{r}_z \\[5px]
&\hspace{8cm} \mathrm{where} ~ \mathbf{Q}= (\mathbf{P}_{xx} - \mathbf{P}_{xz}\mathbf{P}_{zz}^{-1}\mathbf{P}_{zx})^{-1} \nonumber\\[5px]
&=
\mathbf{r}_x^\top\mathbf{Q}\mathbf{r}_x
-\mathbf{r}_x^\top\mathbf{Q}\mathbf{P}_{xz}\mathbf{P}_{zz}^{-1}\mathbf{r}_z
-\mathbf{r}_z^\top\mathbf{P}_{zz}^{-1}\mathbf{P}_{zx}\mathbf{Q}\mathbf{r}_x \nonumber\\
&\hspace{4.6cm}+\mathbf{r}_z^\top(
\textcolor{red}{\mathbf{P}_{zz}^{-1}}+\mathbf{P}_{zz}^{-1}\mathbf{P}_{zx}\mathbf{Q}\mathbf{P}_{xz}\mathbf{P}_{zz}^{-1}
)\mathbf{r}_z
-\textcolor{red}{\mathbf{r}_z^\top\mathbf{P}_{zz}^{-1}\mathbf{r}_z} \\[5px]
&=
\mathbf{r}_x^\top\mathbf{Q}\mathbf{r}_x
-\mathbf{r}_x^\top\mathbf{Q}[\mathbf{P}_{xz}\mathbf{P}_{zz}^{-1}\mathbf{r}_x]
-[\mathbf{P}_{xz}\mathbf{P}_{zz}^{-1}\mathbf{r}_z]^\top\mathbf{Q}\mathbf{r}_x
+[\mathbf{P}_{xz}\mathbf{P}_{zz}^{-1}\mathbf{r}_z]^\top\mathbf{Q}[\mathbf{P}_{xz}\mathbf{P}_{zz}^{-1}\mathbf{r}_z] \\[5px]
&=
(\mathbf{r}_x - \mathbf{P}_{xz}\mathbf{P}_{zz}^{-1}\mathbf{r}_z)^\top
\mathbf{Q}
(\mathbf{r}_x - \mathbf{P}_{xz}\mathbf{P}_{zz}^{-1}\mathbf{r}_z) \\[5px]
&=
(\mathbf{r}_x - \mathbf{P}_{xz}\mathbf{P}_{zz}^{-1}\mathbf{r}_z)^\top
(\mathbf{P}_{xx} - \mathbf{P}_{xz}\mathbf{P}_{zz}^{-1}\mathbf{P}_{zx})^{-1}
(\mathbf{r}_x - \mathbf{P}_{xz}\mathbf{P}_{zz}^{-1}\mathbf{r}_z)
\f}
where \f$(\mathbf{P}_{zz}^{-1})^\top = \mathbf{P}_{zz}^{-1}\f$ since covariance matrices are symmetric.
Up to this point, we can now construct the conditional Gaussian pdf as follows:
\f{align*}{
p(\mathbf{x}_k | \mathbf{z}_m)
&= \frac{1}{\sqrt{(2\pi)^{n}|{\mathbf{P}_{xx} - \mathbf{P}_{xz}\mathbf{P}_{zz}^{-1}\mathbf{P}_{zx}}}|} \times \nonumber\\
&\hspace{0.5cm}\mathrm{exp}\left(
{-\frac{1}{2}\left[
(\mathbf{r}_x - \mathbf{P}_{xz}\mathbf{P}_{zz}^{-1}\mathbf{r}_z)^\top
(\mathbf{P}_{xx} - \mathbf{P}_{xz}\mathbf{P}_{zz}^{-1}\mathbf{P}_{zx})^{-1}
(\mathbf{r}_x - \mathbf{P}_{xz}\mathbf{P}_{zz}^{-1}\mathbf{r}_z)
\right]}
\right)
\f}
which results in the following conditional mean and covariance we were seeking:
\f{align*}{
\hat{\mathbf{x}}^\oplus &= \hat{\mathbf{x}}^\ominus + \mathbf{P}_{xz}\mathbf{P}_{zz}^{-1}(\mathbf{z}_m - \hat{\mathbf{z}}) \\
\mathbf{P}_{xx}^\oplus &= \mathbf{P}_{xx}^\ominus - \mathbf{P}_{xz}\mathbf{P}_{zz}^{-1}\mathbf{P}_{zx}
\f}
These are the fundamental equations for (linear) state estimation.
@section linear-meas Linear Measurement Update
As a special case, we consider a simple linear measurement model to illustrate the linear MMSE estimator:
\f{align*}{
\mathbf{z}_{m,k} &= \mathbf{H}_k \mathbf{x}_k + \mathbf{n}_k \\
\hat{\mathbf{z}}_k &:= \mathbb{E}[\mathbf{z}_{m,k}] = \mathbb{E}[\mathbf{H}_k\mathbf{x}_k + \mathbf{n}_k] = \mathbf{H}_k {\hat{\mathbf{x}}_k^\ominus}
\f}
With this, we can derive the covariance and cross-correlation matrices as follows:
\f{align*}{
\mathbf{P}_{zz}
&= \mathbb{E}\left[
(\mathbf{z}_{m,k} - \hat{\mathbf{z}}_k)(\mathbf{z}_{m,k} - \hat{\mathbf{z}}_k)^\top
\right] \\[5px]
&= \mathbb{E}\left[
(\mathbf{H}_k\mathbf{x}_k + \mathbf{n}_k - \mathbf{H}_k{\hat{\mathbf{x}}_k^\ominus})
(\mathbf{H}_k\mathbf{x}_k + \mathbf{n}_k - \mathbf{H}_k{\hat{\mathbf{x}}_k^\ominus})^\top
\right] \\[5px]
&= \mathbb{E}\left[
(\mathbf{H}_k(\mathbf{x}_k - {\hat{\mathbf{x}}_k^\ominus}) + \mathbf{n}_k)
(\mathbf{H}_k(\mathbf{x}_k - {\hat{\mathbf{x}}_k^\ominus}) + \mathbf{n}_k)^\top
\right] \\[5px]
&= \mathbb{E}\left[
\mathbf{H}_k(\mathbf{x}_k - {\hat{\mathbf{x}}_k^\ominus})
(\mathbf{x}_k - {\hat{\mathbf{x}}_k^\ominus})^\top\mathbf{H}_k^\top
+
\textcolor{red}{\mathbf{H}_k(\mathbf{x}_k - {\hat{\mathbf{x}}_k^\ominus})
\mathbf{n}_k^\top} \right.\nonumber\\
&\hspace{4cm}+
\textcolor{red}{\mathbf{n}_k
(\mathbf{x}_k - {\hat{\mathbf{x}}_k^\ominus})^\top\mathbf{H}_k^\top}
+
\left.
\mathbf{n}_k
\mathbf{n}_k^\top
\right] \\[5px]
&= \mathbb{E}\left[
\mathbf{H}_k(\mathbf{x}_k - {\hat{\mathbf{x}}_k^\ominus})
(\mathbf{x}_k - {\hat{\mathbf{x}}_k^\ominus})^\top\mathbf{H}_k^\top
+
\mathbf{n}_k
\mathbf{n}_k^\top
\right] \\[5px]
&=
\mathbf{H}_k~\mathbb{E}\left[(\mathbf{x}_k - {\hat{\mathbf{x}}_k^\ominus})
(\mathbf{x}_k - {\hat{\mathbf{x}}_k^\ominus})^\top\right]\mathbf{H}_k^\top
+
~\mathbb{E}\left[\mathbf{n}_k\mathbf{n}_k^\top\right] \\[5px]
&=
\mathbf{H}_k\mathbf{P}_{xx}^\ominus\mathbf{H}_k^\top + \mathbf{R}_k
\f}
where \f$\mathbf{R}_k\f$ is the *discrete* measurement noise matrix, \f$\mathbf{H}_k\f$ is the measurement Jacobian mapping the state into the measurement domain, and \f$\mathbf{P}_{xx}^\ominus\f$ is the current state covariance.
\f{align*}{
\mathbf{P}_{xz}
&= \mathbb{E}\left[
(\mathbf{x}_k - {\hat{\mathbf{x}}_k^\ominus})(\mathbf{z}_{m,k} - \hat{\mathbf{z}}_k)^\top
\right] \\[5px]
&= \mathbb{E}\left[
(\mathbf{x}_k - {\hat{\mathbf{x}}_k^\ominus})
(\mathbf{H}_k\mathbf{x}_k + \mathbf{n}_k - \mathbf{H}_k{\hat{\mathbf{x}}_k^\ominus})^\top
\right] \\[5px]
&= \mathbb{E}\left[
(\mathbf{x}_k - {\hat{\mathbf{x}}_k^\ominus})
(\mathbf{H}_k(\mathbf{x}_k - {\hat{\mathbf{x}}_k^\ominus}) + \mathbf{n}_k)^\top
\right] \\[5px]
&= \mathbb{E}\left[
(\mathbf{x}_k - {\hat{\mathbf{x}}_k^\ominus})(\mathbf{x}_k - {\hat{\mathbf{x}}_k^\ominus})^\top\mathbf{H}_k^\top
+(\mathbf{x}_k - {\hat{\mathbf{x}}_k^\ominus})\mathbf{n}_k^\top
\right] \\[5px]
&= \mathbb{E}\left[
(\mathbf{x}_k - {\hat{\mathbf{x}}_k^\ominus})(\mathbf{x}_k - {\hat{\mathbf{x}}_k^\ominus})^\top\right]\mathbf{H}_k^\top
+
\textcolor{red}{\mathbb{E}\left[(\mathbf{x}_k - {\hat{\mathbf{x}}_k^\ominus})\mathbf{n}_k^\top
\right]} \\[5px]
&= \mathbf{P}_{xx}^\ominus\mathbf{H}_k^\top
\f}
where we have employed the fact that the noise is independent of the state.
Substitution of these quantities into the fundamental equation
leads to the following update equations:
\f{align*}{
\hat{\mathbf{x}}_k^\oplus
&= {\hat{\mathbf{x}}_k^\ominus} + \mathbf{P}_{k}^\ominus\mathbf{H}_k^\top (\mathbf{H}_k\mathbf{P}_{k}^\ominus\mathbf{H}_k^\top + \mathbf{R}_{k})^{-1}(\mathbf{z}_{m,k} - \hat{\mathbf{z}}_k) \\[5px]
&= {\hat{\mathbf{x}}_k^\ominus} + \mathbf{P}_{k}^\ominus\mathbf{H}_k^\top (\mathbf{H}_k\mathbf{P}_{k}^\ominus\mathbf{H}_k^\top + \mathbf{R}_{k})^{-1}(\mathbf{z}_{m,k} - \mathbf{H}_k{\hat{\mathbf{x}}_k^\ominus}) \\[5px]
&= {\hat{\mathbf{x}}_k^\ominus} + \mathbf{K}\mathbf{r}_z \\[5px]
\mathbf{P}_{xx}^\oplus
&= \mathbf{P}_{k}^\ominus - \mathbf{P}_{k}^\ominus\mathbf{H}_k^\top (\mathbf{H}_k\mathbf{P}_{k}^\ominus\mathbf{H}_k^\top + \mathbf{R}_{k})^{-1} (\mathbf{P}_{k}^\ominus\mathbf{H}_k^\top)^\top \\[5px]
&= \mathbf{P}_{k}^\ominus - \mathbf{P}_{k}^\ominus\mathbf{H}_k^\top (\mathbf{H}_k\mathbf{P}_{k}^\ominus\mathbf{H}_k^\top + \mathbf{R}_{k})^{-1} \mathbf{H}_k{\mathbf{P}_{k}^\ominus}
\f}
These are essentially the Kalman filter (or linear MMSE) update equations.
@section update-examples Update Equations and Derivations
- @subpage update-featinit --- 3D feature triangulation derivations for getting a feature linearization point
- @subpage update-feat --- Measurement equations and derivation for 3D feature point
- @subpage update-delay --- How to perform delayed initialization
- @subpage update-null --- MSCKF nullspace projection
- @subpage update-compress --- MSCKF measurement compression
- @subpage update-zerovelocity --- Zero velocity stationary update
*/

Binary file not shown.

After

Width:  |  Height:  |  Size: 2.6 KiB

Some files were not shown because too many files have changed in this diff Show More