initial commit

This commit is contained in:
2022-08-05 08:23:25 +03:00
commit 5ecdc6abcf
387 changed files with 3010095 additions and 0 deletions

57
ov_init/CMakeLists.txt Normal file
View File

@@ -0,0 +1,57 @@
cmake_minimum_required(VERSION 3.3)
project(ov_init)
# Include libraries (if we don't have opencv 4, then fallback to opencv 3)
# The OpenCV version needs to match the one used by cv_bridge otherwise you will get a segmentation fault!
find_package(Eigen3 REQUIRED)
find_package(OpenCV 3 QUIET)
if (NOT OpenCV_FOUND)
find_package(OpenCV 4 REQUIRED)
endif ()
find_package(Boost REQUIRED COMPONENTS system filesystem thread date_time)
find_package(Ceres REQUIRED)
message(STATUS "OPENCV: " ${OpenCV_VERSION} " | BOOST: " ${Boost_VERSION} " | CERES: " ${Ceres_VERSION})
# check if we have our python libs files (will search for python3 then python2 installs)
# sudo apt-get install python-matplotlib python-numpy python-dev
# https://cmake.org/cmake/help/v3.10/module/FindPythonLibs.html
find_package(PythonLibs QUIET)
option(DISABLE_MATPLOTLIB "Disable or enable matplotlib plot scripts in ov_eval" OFF)
if (PYTHONLIBS_FOUND AND NOT DISABLE_MATPLOTLIB)
add_definitions(-DHAVE_PYTHONLIBS=1)
message(STATUS "PYTHON VERSION: " ${PYTHONLIBS_VERSION_STRING})
message(STATUS "PYTHON INCLUDE: " ${PYTHON_INCLUDE_DIRS})
message(STATUS "PYTHON LIBRARIES: " ${PYTHON_LIBRARIES})
include_directories(${PYTHON_INCLUDE_DIRS})
list(APPEND thirdparty_libraries ${PYTHON_LIBRARIES})
endif ()
# We need c++14 for ROS2, thus just require it for everybody
# NOTE: To future self, hope this isn't an issue...
set(CMAKE_CXX_STANDARD 14)
set(CMAKE_CXX_STANDARD_REQUIRED ON)
set(CMAKE_CXX_EXTENSIONS OFF)
# Enable compile optimizations
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -O3 -fsee -fomit-frame-pointer -fno-signed-zeros -fno-math-errno -funroll-loops")
# Enable debug flags (use if you want to debug in gdb)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -g3 -Wall -Wuninitialized -Wmaybe-uninitialized")
# Find our ROS version!
# NOTE: Default to using the ROS1 package if both are in our enviroment
# NOTE: https://github.com/romainreignier/share_ros1_ros2_lib_demo
find_package(catkin QUIET COMPONENTS roscpp)
find_package(ament_cmake QUIET)
if (catkin_FOUND)
message(STATUS "ROS *1* version found, building ROS1.cmake")
include(${CMAKE_CURRENT_SOURCE_DIR}/cmake/ROS1.cmake)
elseif (ament_cmake_FOUND)
message(STATUS "ROS *2* version found, building ROS2.cmake")
include(${CMAKE_CURRENT_SOURCE_DIR}/cmake/ROS2.cmake)
else ()
message(STATUS "No ROS versions found, building ROS1.cmake")
include(${CMAKE_CURRENT_SOURCE_DIR}/cmake/ROS1.cmake)
endif ()

111
ov_init/cmake/ROS1.cmake Normal file
View File

@@ -0,0 +1,111 @@
cmake_minimum_required(VERSION 3.3)
# Find ROS build system
find_package(catkin QUIET COMPONENTS roscpp ov_core)
# Describe ROS project
option(ENABLE_ROS "Enable or disable building with ROS (if it is found)" ON)
if (catkin_FOUND AND ENABLE_ROS)
add_definitions(-DROS_AVAILABLE=1)
catkin_package(
CATKIN_DEPENDS roscpp cv_bridge ov_core
INCLUDE_DIRS src/
LIBRARIES ov_init_lib
)
else ()
add_definitions(-DROS_AVAILABLE=0)
message(WARNING "BUILDING WITHOUT ROS!")
include(GNUInstallDirs)
set(CATKIN_PACKAGE_LIB_DESTINATION "${CMAKE_INSTALL_LIBDIR}")
set(CATKIN_PACKAGE_BIN_DESTINATION "${CMAKE_INSTALL_BINDIR}")
set(CATKIN_GLOBAL_INCLUDE_DESTINATION "${CMAKE_INSTALL_INCLUDEDIR}")
endif ()
# Include our header files
include_directories(
src
${EIGEN3_INCLUDE_DIR}
${Boost_INCLUDE_DIRS}
${CERES_INCLUDE_DIRS}
${catkin_INCLUDE_DIRS}
)
# Set link libraries used by all binaries
list(APPEND thirdparty_libraries
${Boost_LIBRARIES}
${OpenCV_LIBRARIES}
${CERES_LIBRARIES}
${catkin_LIBRARIES}
)
# If we are not building with ROS then we need to manually link to its headers
# This isn't that elegant of a way, but this at least allows for building without ROS
# See this stackoverflow answer: https://stackoverflow.com/a/11217008/7718197
if (NOT catkin_FOUND OR NOT ENABLE_ROS)
message(STATUS "MANUALLY LINKING TO OV_CORE LIBRARY....")
include_directories(${CMAKE_SOURCE_DIR}/../ov_core/src/)
file(GLOB_RECURSE OVCORE_LIBRARY_SOURCES "${CMAKE_SOURCE_DIR}/../ov_core/src/*.cpp")
list(FILTER OVCORE_LIBRARY_SOURCES EXCLUDE REGEX ".*test_webcam\\.cpp$")
list(FILTER OVCORE_LIBRARY_SOURCES EXCLUDE REGEX ".*test_tracking\\.cpp$")
list(APPEND LIBRARY_SOURCES ${OVCORE_LIBRARY_SOURCES})
file(GLOB_RECURSE OVCORE_LIBRARY_HEADERS "${CMAKE_SOURCE_DIR}/../ov_core/src/*.h")
list(APPEND LIBRARY_HEADERS ${OVCORE_LIBRARY_HEADERS})
endif ()
##################################################
# Make the shared library
##################################################
list(APPEND LIBRARY_SOURCES
src/dummy.cpp
src/init/InertialInitializer.cpp
src/dynamic/DynamicInitializer.cpp
src/static/StaticInitializer.cpp
src/sim/Simulator.cpp
)
file(GLOB_RECURSE LIBRARY_HEADERS "src/*.h")
add_library(ov_init_lib SHARED ${LIBRARY_SOURCES} ${LIBRARY_HEADERS})
target_link_libraries(ov_init_lib ${thirdparty_libraries})
target_include_directories(ov_init_lib PUBLIC src/)
install(TARGETS ov_init_lib
ARCHIVE DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION}
LIBRARY DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION}
RUNTIME DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION}
)
install(DIRECTORY src/
DESTINATION ${CATKIN_GLOBAL_INCLUDE_DESTINATION}
FILES_MATCHING PATTERN "*.h" PATTERN "*.hpp"
)
##################################################
# Make binary files!
##################################################
add_executable(test_simulation src/test_simulation.cpp)
target_link_libraries(test_simulation ov_init_lib ${thirdparty_libraries})
install(TARGETS test_simulation
ARCHIVE DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION}
LIBRARY DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION}
RUNTIME DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION}
)
add_executable(test_dynamic_mle src/test_dynamic_mle.cpp)
target_link_libraries(test_dynamic_mle ov_init_lib ${thirdparty_libraries})
install(TARGETS test_dynamic_mle
ARCHIVE DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION}
LIBRARY DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION}
RUNTIME DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION}
)
add_executable(test_dynamic_init src/test_dynamic_init.cpp)
target_link_libraries(test_dynamic_init ov_init_lib ${thirdparty_libraries})
install(TARGETS test_dynamic_init
ARCHIVE DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION}
LIBRARY DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION}
RUNTIME DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION}
)

83
ov_init/cmake/ROS2.cmake Normal file
View File

@@ -0,0 +1,83 @@
cmake_minimum_required(VERSION 3.3)
# Find ros dependencies
find_package(ament_cmake REQUIRED)
find_package(rclcpp REQUIRED)
find_package(cv_bridge REQUIRED)
find_package(ov_core REQUIRED)
# Describe ROS project
option(ENABLE_ROS "Enable or disable building with ROS (if it is found)" ON)
if (NOT ENABLE_ROS)
message(FATAL_ERROR "Build with ROS1.cmake if you don't have ROS.")
endif ()
add_definitions(-DROS_AVAILABLE=2)
# Include our header files
include_directories(
src
${EIGEN3_INCLUDE_DIR}
${Boost_INCLUDE_DIRS}
${CERES_INCLUDE_DIRS}
)
# Set link libraries used by all binaries
list(APPEND thirdparty_libraries
${Boost_LIBRARIES}
${CERES_LIBRARIES}
${OpenCV_LIBRARIES}
)
##################################################
# Make the shared library
##################################################
list(APPEND LIBRARY_SOURCES
src/dummy.cpp
src/init/InertialInitializer.cpp
src/dynamic/DynamicInitializer.cpp
src/static/StaticInitializer.cpp
src/sim/Simulator.cpp
)
file(GLOB_RECURSE LIBRARY_HEADERS "src/*.h")
add_library(ov_init_lib SHARED ${LIBRARY_SOURCES} ${LIBRARY_HEADERS})
ament_target_dependencies(ov_init_lib rclcpp ov_core cv_bridge)
target_link_libraries(ov_init_lib ${thirdparty_libraries})
target_include_directories(ov_init_lib PUBLIC src/)
install(TARGETS ov_init_lib
LIBRARY DESTINATION lib
RUNTIME DESTINATION bin
PUBLIC_HEADER DESTINATION include
)
install(DIRECTORY src/
DESTINATION include
FILES_MATCHING PATTERN "*.h" PATTERN "*.hpp"
)
ament_export_include_directories(include)
ament_export_libraries(ov_init_lib)
##################################################
# Make binary files!
##################################################
add_executable(test_simulation src/test_simulation.cpp)
ament_target_dependencies(test_simulation ${ament_libraries})
target_link_libraries(test_simulation ov_init_lib ${thirdparty_libraries})
install(TARGETS test_simulation DESTINATION lib/${PROJECT_NAME})
add_executable(test_dynamic_mle src/test_dynamic_mle.cpp)
ament_target_dependencies(test_dynamic_mle ${ament_libraries})
target_link_libraries(test_dynamic_mle ov_init_lib ${thirdparty_libraries})
install(TARGETS test_dynamic_mle DESTINATION lib/${PROJECT_NAME})
add_executable(test_dynamic_init src/test_dynamic_init.cpp)
ament_target_dependencies(test_dynamic_init ${ament_libraries})
target_link_libraries(test_dynamic_init ov_init_lib ${thirdparty_libraries})
install(TARGETS test_dynamic_init DESTINATION lib/${PROJECT_NAME})
# Install launch and config directories
install(DIRECTORY launch/ DESTINATION share/${PROJECT_NAME}/launch/)
# finally define this as the package
ament_package()

351
ov_init/launch/display.rviz Normal file
View File

@@ -0,0 +1,351 @@
Panels:
- Class: rviz/Displays
Help Height: 0
Name: Displays
Property Tree Widget:
Expanded:
- /Path VIO1
- /Path GT1
Splitter Ratio: 0.6011396050453186
Tree Height: 730
- Class: rviz/Selection
Name: Selection
- Class: rviz/Tool Properties
Expanded:
- /2D Pose Estimate1
- /2D Nav Goal1
- /Publish Point1
Name: Tool Properties
Splitter Ratio: 0.5886790156364441
- Class: rviz/Views
Expanded:
- /Current View1
Name: Views
Splitter Ratio: 0.5
- Class: rviz/Time
Experimental: false
Name: Time
SyncMode: 0
SyncSource: ACTIVE Features
Preferences:
PromptSaveOnExit: true
Toolbars:
toolButtonStyle: 2
Visualization Manager:
Class: ""
Displays:
- Alpha: 0.5
Cell Size: 1
Class: rviz/Grid
Color: 112; 112; 115
Enabled: true
Line Style:
Line Width: 0.029999999329447746
Value: Lines
Name: Grid
Normal Cell Count: 0
Offset:
X: 0
Y: 0
Z: 0
Plane: XY
Plane Cell Count: 80
Reference Frame: <Fixed Frame>
Value: true
- Class: rviz/TF
Enabled: true
Frame Timeout: 120
Frames:
All Enabled: true
Marker Alpha: 1
Marker Scale: 1
Name: TF
Show Arrows: false
Show Axes: true
Show Names: true
Tree:
{}
Update Interval: 0
Value: true
- Class: rviz/Image
Enabled: false
Image Topic: /ov_msckf/trackhist
Max Value: 1
Median window: 5
Min Value: 0
Name: Image Tracks
Normalize Range: true
Queue Size: 2
Transport Hint: compressed
Unreliable: false
Value: false
- Class: rviz/Image
Enabled: false
Image Topic: /ov_msckf/loop_depth_colored
Max Value: 1
Median window: 5
Min Value: 0
Name: Current Depths
Normalize Range: true
Queue Size: 2
Transport Hint: compressed
Unreliable: false
Value: false
- Alpha: 1
Buffer Length: 1
Class: rviz/Path
Color: 0; 255; 0
Enabled: true
Head Diameter: 0.30000001192092896
Head Length: 0.20000000298023224
Length: 0.10000000149011612
Line Style: Billboards
Line Width: 0.029999999329447746
Name: Path VIO
Offset:
X: 0
Y: 0
Z: 0
Pose Color: 255; 85; 255
Pose Style: Axes
Queue Size: 10
Radius: 0.009999999776482582
Shaft Diameter: 0.10000000149011612
Shaft Length: 0.10000000149011612
Topic: /ov_msckf/pathimu
Unreliable: false
Value: true
- Alpha: 1
Buffer Length: 1
Class: rviz/Path
Color: 0; 255; 255
Enabled: true
Head Diameter: 0.30000001192092896
Head Length: 0.20000000298023224
Length: 0.10000000149011612
Line Style: Billboards
Line Width: 0.029999999329447746
Name: Path GT
Offset:
X: 0
Y: 0
Z: 0
Pose Color: 255; 85; 255
Pose Style: Axes
Queue Size: 10
Radius: 0.009999999776482582
Shaft Diameter: 0.10000000149011612
Shaft Length: 0.10000000149011612
Topic: /ov_msckf/pathgt
Unreliable: false
Value: true
- Alpha: 0.800000011920929
Autocompute Intensity Bounds: true
Autocompute Value Bounds:
Max Value: 10
Min Value: -10
Value: true
Axis: Z
Channel Name: intensity
Class: rviz/PointCloud2
Color: 252; 175; 62
Color Transformer: FlatColor
Decay Time: 0
Enabled: true
Invert Rainbow: false
Max Color: 255; 255; 255
Min Color: 0; 0; 0
Name: MSCKF Points
Position Transformer: XYZ
Queue Size: 10
Selectable: true
Size (Pixels): 8
Size (m): 0.009999999776482582
Style: Points
Topic: /ov_msckf/points_msckf
Unreliable: false
Use Fixed Frame: true
Use rainbow: true
Value: true
- Alpha: 1
Autocompute Intensity Bounds: true
Autocompute Value Bounds:
Max Value: 10
Min Value: -10
Value: true
Axis: Z
Channel Name: intensity
Class: rviz/PointCloud2
Color: 255; 0; 0
Color Transformer: FlatColor
Decay Time: 0
Enabled: true
Invert Rainbow: false
Max Color: 255; 255; 255
Min Color: 0; 0; 0
Name: SLAM Points
Position Transformer: XYZ
Queue Size: 10
Selectable: true
Size (Pixels): 8
Size (m): 0.009999999776482582
Style: Points
Topic: /ov_msckf/points_slam
Unreliable: false
Use Fixed Frame: true
Use rainbow: true
Value: true
- Alpha: 1
Autocompute Intensity Bounds: true
Autocompute Value Bounds:
Max Value: 10
Min Value: -10
Value: true
Axis: Z
Channel Name: intensity
Class: rviz/PointCloud2
Color: 117; 80; 123
Color Transformer: FlatColor
Decay Time: 0
Enabled: true
Invert Rainbow: false
Max Color: 255; 255; 255
Min Color: 0; 0; 0
Name: ARUCO Points
Position Transformer: XYZ
Queue Size: 10
Selectable: true
Size (Pixels): 15
Size (m): 0.009999999776482582
Style: Points
Topic: /ov_msckf/points_aruco
Unreliable: false
Use Fixed Frame: true
Use rainbow: true
Value: true
- Alpha: 1
Autocompute Intensity Bounds: true
Autocompute Value Bounds:
Max Value: 10
Min Value: -10
Value: true
Axis: Z
Channel Name: intensity
Class: rviz/PointCloud
Color: 255; 255; 255
Color Transformer: FlatColor
Decay Time: 0
Enabled: true
Invert Rainbow: false
Max Color: 255; 255; 255
Min Color: 0; 0; 0
Name: ACTIVE Features
Position Transformer: XYZ
Queue Size: 10
Selectable: true
Size (Pixels): 6
Size (m): 0.009999999776482582
Style: Points
Topic: /ov_msckf/loop_feats
Unreliable: false
Use Fixed Frame: true
Use rainbow: true
Value: true
- Alpha: 1
Autocompute Intensity Bounds: true
Autocompute Value Bounds:
Max Value: 10
Min Value: -10
Value: true
Axis: Z
Channel Name: intensity
Class: rviz/PointCloud2
Color: 255; 255; 127
Color Transformer: FlatColor
Decay Time: 0
Enabled: true
Invert Rainbow: false
Max Color: 255; 255; 255
Min Color: 0; 0; 0
Name: SIM Points
Position Transformer: XYZ
Queue Size: 10
Selectable: true
Size (Pixels): 6
Size (m): 0.009999999776482582
Style: Points
Topic: /ov_msckf/points_sim
Unreliable: false
Use Fixed Frame: true
Use rainbow: true
Value: true
Enabled: true
Global Options:
Background Color: 44; 44; 44
Default Light: true
Fixed Frame: global
Frame Rate: 60
Name: root
Tools:
- Class: rviz/Interact
Hide Inactive Objects: true
- Class: rviz/MoveCamera
- Class: rviz/Select
- Class: rviz/FocusCamera
- Class: rviz/Measure
- Class: rviz/SetInitialPose
Theta std deviation: 0.2617993950843811
Topic: /initialpose
X std deviation: 0.5
Y std deviation: 0.5
- Class: rviz/SetGoal
Topic: /move_base_simple/goal
- Class: rviz/PublishPoint
Single click: true
Topic: /clicked_point
Value: true
Views:
Current:
Class: rviz/Orbit
Distance: 10
Enable Stereo Rendering:
Stereo Eye Separation: 0.05999999865889549
Stereo Focal Distance: 1
Swap Stereo Eyes: false
Value: false
Field of View: 0.7853981852531433
Focal Point:
X: 0
Y: 0
Z: 0
Focal Shape Fixed Size: false
Focal Shape Size: 0.05000000074505806
Invert Z Axis: false
Name: Current View
Near Clip Distance: 0.009999999776482582
Pitch: 0.7853981852531433
Target Frame: <Fixed Frame>
Yaw: 0.7853981852531433
Saved: ~
Window Geometry:
Current Depths:
collapsed: false
Displays:
collapsed: false
Height: 842
Hide Left Dock: false
Hide Right Dock: true
Image Tracks:
collapsed: false
QMainWindow State: 000000ff00000000fd00000004000000000000015600000317fc020000000efb0000001200530065006c0065006300740069006f006e00000001e10000009b0000005c00fffffffb0000001e0054006f006f006c002000500072006f007000650072007400690065007302000001ed000001df00000185000000a3fb000000120056006900650077007300200054006f006f02000001df000002110000018500000122fb000000200054006f006f006c002000500072006f0070006500720074006900650073003203000002880000011d000002210000017afb000000100044006900730070006c006100790073010000001600000317000000c900fffffffb0000002000730065006c0065006300740069006f006e00200062007500660066006500720200000138000000aa0000023a00000294fb00000014005700690064006500530074006500720065006f02000000e6000000d2000003ee0000030bfb0000000c004b0069006e0065006300740200000186000001060000030c00000261fb000000180049006d00610067006500200054007200610063006b00730000000226000001070000001600fffffffb0000001a0054007200610063006b00200048006900730074006f0072007901000003d0000000160000000000000000fb0000000a0049006d00610067006500000001170000011c0000000000000000fb0000001c00430075007200720065006e0074002000440065007000740068007300000001c9000000b80000001600fffffffb0000000a0049006d0061006700650000000239000000480000000000000000fb000000140054007200610063006b0020004c0069006e006501000003ec000000160000000000000000000000010000010f000002a4fc0200000003fb0000001e0054006f006f006c002000500072006f00700065007200740069006500730100000041000000780000000000000000fb0000000a005600690065007700730000000000000002a4000000a400fffffffb0000001200530065006c0065006300740069006f006e010000025a000000b20000000000000000000000020000073f00000295fc0100000005fb0000000800430041004d003000000000000000020c0000000000000000fb0000000800430041004d00310000000000000002ef0000000000000000fb0000000800430041004d00320000000000000003850000000000000000fb0000000800430041004d003300000000000000073f0000000000000000fb0000000a00560069006500770073030000004e00000080000002e10000019700000003000005a300000158fc0100000002fb0000000800540069006d0065000000000000000570000002eb00fffffffb0000000800540069006d00650100000000000004500000000000000000000004990000031700000004000000040000000800000008fc0000000100000002000000010000000a0054006f006f006c00730000000000ffffffff0000000000000000
Selection:
collapsed: false
Time:
collapsed: false
Tool Properties:
collapsed: false
Views:
collapsed: true
Width: 1525
X: 2314
Y: 1161

View File

@@ -0,0 +1,52 @@
<launch>
<!-- ================================================================ -->
<!-- ================================================================ -->
<!-- what config we are going to run (should match folder name) -->
<arg name="verbosity" default="DEBUG" /> <!-- ALL, DEBUG, INFO, WARNING, ERROR, SILENT -->
<arg name="config" default="rpng_sim" />
<arg name="config_path" default="$(find ov_init)/../config/$(arg config)/estimator_config.yaml" />
<arg name="seed" default="3" />
<arg name="freq_cam" default="10" />
<arg name="freq_imu" default="400" />
<arg name="dataset" default="tum_corridor1_512_16_okvis.txt" /> <!-- udel_gore, udel_gore_zupt, tum_corridor1_512_16_okvis, udel_arl, udel_neighborhood, euroc_V1_01_easy -->
<!-- if we should perturb the initial state values (i.e. calibration) -->
<arg name="sim_do_perturbation" default="true" />
<!-- if we should optimize the calibration -->
<arg name="mle_opt_calib" default="false" />
<!-- ================================================================ -->
<!-- ================================================================ -->
<!-- MASTER NODE! -->
<!-- <node name="test_dynamic_init" pkg="ov_init" type="test_dynamic_init" output="screen" clear_params="true" required="true" launch-prefix="gdb -ex run &#45;&#45;args">-->
<node name="test_dynamic_init" pkg="ov_init" type="test_dynamic_init" output="screen" clear_params="true" required="true">
<param name="sim_traj_path" type="string" value="$(find ov_data)/sim/$(arg dataset)" />
<param name="sim_seed_state_init" type="int" value="0" />
<param name="sim_seed_measurements" type="int" value="$(arg seed)" />
<param name="sim_seed_preturb" type="int" value="$(arg seed)" />
<param name="sim_freq_cam" type="int" value="$(arg freq_cam)" />
<param name="sim_freq_imu" type="int" value="$(arg freq_imu)" />
<param name="sim_do_perturbation" type="bool" value="$(arg sim_do_perturbation)" />
<!-- master configuration object -->
<param name="verbosity" type="string" value="$(arg verbosity)" />
<param name="config_path" type="string" value="$(arg config_path)" />
<!-- world/filter parameters -->
<param name="max_cameras" type="int" value="1" />
<param name="init_dyn_mle_opt_calib" type="bool" value="$(arg mle_opt_calib)" />
</node>
</launch>

53
ov_init/launch/mle.launch Normal file
View File

@@ -0,0 +1,53 @@
<launch>
<!-- ================================================================ -->
<!-- ================================================================ -->
<!-- what config we are going to run (should match folder name) -->
<arg name="verbosity" default="DEBUG" /> <!-- ALL, DEBUG, INFO, WARNING, ERROR, SILENT -->
<arg name="config" default="rpng_sim" />
<arg name="config_path" default="$(find ov_init)/../config/$(arg config)/estimator_config.yaml" />
<arg name="seed" default="3" />
<arg name="freq_cam" default="10" />
<arg name="freq_imu" default="400" />
<arg name="dataset" default="euroc_V1_01_easy.txt" /> <!-- udel_gore, udel_gore_zupt, tum_corridor1_512_16_okvis, udel_arl, udel_neighborhood, euroc_V1_01_easy -->
<!-- if we should perturb the initial state values (i.e. calibration) -->
<arg name="sim_do_perturbation" default="false" />
<!-- if we should optimize the calibration -->
<arg name="mle_opt_calib" default="false" />
<!-- ================================================================ -->
<!-- ================================================================ -->
<!-- MASTER NODE! -->
<node name="test_dynamic_mle" pkg="ov_init" type="test_dynamic_mle" output="screen" clear_params="true" required="true">
<!-- <node name="test_dynamic_mle" pkg="ov_init" type="test_dynamic_mle" output="screen" clear_params="true" required="true" launch-prefix="gdb -ex run &#45;&#45;args">-->
<param name="sim_traj_path" type="string" value="$(find ov_data)/sim/$(arg dataset)" />
<param name="sim_seed_state_init" type="int" value="0" />
<param name="sim_seed_measurements" type="int" value="$(arg seed)" />
<param name="sim_seed_preturb" type="int" value="$(arg seed)" />
<param name="sim_freq_cam" type="int" value="$(arg freq_cam)" />
<param name="sim_freq_imu" type="int" value="$(arg freq_imu)" />
<param name="sim_do_perturbation" type="bool" value="$(arg sim_do_perturbation)" />
<!-- master configuration object -->
<param name="verbosity" type="string" value="$(arg verbosity)" />
<param name="config_path" type="string" value="$(arg config_path)" />
<!-- world/filter parameters -->
<param name="max_cameras" type="int" value="1" />
<param name="init_dyn_mle_opt_calib" type="bool" value="$(arg mle_opt_calib)" />
<param name="init_max_features" type="int" value="30" />
</node>
</launch>

50
ov_init/package.xml Normal file
View File

@@ -0,0 +1,50 @@
<?xml version="1.0"?>
<package format="3">
<!-- Package Information -->
<name>ov_init</name>
<version>2.6.0</version>
<description>
Initialization package which handles static and dynamic initialization.
</description>
<url type="website">https://docs.openvins.com/</url>
<url type="bugtracker">https://github.com/rpng/open_vins/issues</url>
<url type="repository">https://github.com/rpng/open_vins</url>
<!-- Code Authors -->
<author email="pgeneva@udel.edu">Patrick Geneva</author>
<author email="keck@udel.edu">Kevin Eckenhoff</author>
<author email="ghuang@udel.edu">Guoquan Huang</author>
<maintainer email="pgeneva@udel.edu">Patrick Geneva</maintainer>
<!-- Licensing -->
<license>GNU General Public License v3.0</license>
<!-- ROS1: Dependencies needed to compile this package. -->
<buildtool_depend condition="$ROS_VERSION == 1">catkin</buildtool_depend>
<depend condition="$ROS_VERSION == 1">cmake_modules</depend>
<depend condition="$ROS_VERSION == 1">roscpp</depend>
<depend condition="$ROS_VERSION == 1">cv_bridge</depend>
<depend condition="$ROS_VERSION == 1">ov_core</depend>
<!-- ROS2: Dependencies needed to compile this package. -->
<buildtool_depend condition="$ROS_VERSION == 2">ament_cmake</buildtool_depend>
<depend condition="$ROS_VERSION == 2">rclcpp</depend>
<depend condition="$ROS_VERSION == 2">cv_bridge</depend>
<depend condition="$ROS_VERSION == 2">ov_core</depend>
<!-- System dependencies for both versions -->
<depend>eigen</depend>
<depend>libopencv-dev</depend>
<depend>libopencv-contrib-dev</depend>
<depend>boost</depend>
<depend>libceres-dev</depend>
<!-- Note the export is required to expose the executables -->
<export>
<build_type condition="$ROS_VERSION == 1">catkin</build_type>
<build_type condition="$ROS_VERSION == 2">ament_cmake</build_type>
<rosdoc config="rosdoc.yaml" />
</export>
</package>

8
ov_init/rosdoc.yaml Normal file
View File

@@ -0,0 +1,8 @@
# This uses a stupid hack to get all the documentation to be included in each project
# Right now the `use_mdfile_as_mainpage` is also appended to the doxygen "INPUT"
# Thus, we will append the whole OpenVINS documentation by going up one directory!
- builder: doxygen
file_patterns: '*.c *.cpp *.h *.cc *.hh *.dox'
exclude_patterns: '*/ov_core/* */ov_data/* */ov_eval/* */ov_msckf/*'
use_mdfile_as_mainpage: '../'

View File

@@ -0,0 +1,236 @@
/*
* OpenVINS: An Open Platform for Visual-Inertial Research
* Copyright (C) 2018-2022 Patrick Geneva
* Copyright (C) 2018-2022 Guoquan Huang
* Copyright (C) 2018-2022 OpenVINS Contributors
* Copyright (C) 2018-2019 Kevin Eckenhoff
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
#ifndef OV_INIT_CERES_GENERICPRIOR_H
#define OV_INIT_CERES_GENERICPRIOR_H
#include <ceres/ceres.h>
#include "utils/quat_ops.h"
namespace ov_init {
/**
* @brief Factor for generic state priors for specific types.
*
* This is a general factor which handles state priors which have non-zero linear errors.
* In general a unitary factor will have zero error when it is created, thus this extra term can be ignored.
* But if performing marginalization, this can be non-zero. See the following paper Section 3.2 Eq. 25-35
* https://journals.sagepub.com/doi/full/10.1177/0278364919835021
*
* We have the following minimization problem:
* @f[
* \textrm{argmin} ||A * (x - x_{lin}) + b||^2
* @f]
*
*
* In general we have the following after marginalization:
* - @f$(A^T*A) = Inf_{prior} @f$ (the prior information)
* - @f$A^T*b = grad_{prior} @f$ (the prior gradient)
*
* For example, consider we have the following system were we wish to remove the xm states.
* This is the problem of state marginalization.
* @f[
* [ Arr Arm ] [ xr ] = [ - gr ]
* @f]
* @f[
* [ Amr Amm ] [ xm ] = [ - gm ]
* @f]
*
* We wish to marginalize the xm states which are correlated with the other states @f$ xr @f$.
* The Jacobian (and thus information matrix A) is computed at the current best guess @f$ x_{lin} @f$.
* We can define the following optimal subcost form which only involves the @f$ xr @f$ states as:
* @f[
* cost^2 = (xr - xr_{lin})^T*(A^T*A)*(xr - xr_{lin}) + b^T*A*(xr - xr_{lin}) + b^b
* @f]
*
* where we have:
* @f[
* A = sqrt(Arr - Arm*Amm^{-1}*Amr)
* @f]
* @f[
* b = A^-1 * (gr - Arm*Amm^{-1}*gm)
* @f]
*
*/
class Factor_GenericPrior : public ceres::CostFunction {
public:
/// State estimates at the time of marginalization to linearize the problem
Eigen::MatrixXd x_lin;
/// State type for each variable in x_lin. Can be [quat, quat_yaw, vec3, vec8]
std::vector<std::string> x_type;
/// The square-root of the information s.t. sqrtI^T * sqrtI = marginal information
Eigen::MatrixXd sqrtI;
/// Constant term inside the cost s.t. sqrtI^T * b = marginal gradient (can be zero)
Eigen::MatrixXd b;
/**
* @brief Default constructor
*/
Factor_GenericPrior(const Eigen::MatrixXd &x_lin_, const std::vector<std::string> &x_type_, const Eigen::MatrixXd &prior_Info,
const Eigen::MatrixXd &prior_grad)
: x_lin(x_lin_), x_type(x_type_) {
// First assert that our state and variables are of the correct size
int state_size = 0;
int state_error_size = 0;
for (auto const &str : x_type_) {
if (str == "quat") {
state_size += 4;
state_error_size += 3;
} else if (str == "quat_yaw") {
state_size += 4;
state_error_size += 1;
} else if (str == "vec3") {
state_size += 3;
state_error_size += 3;
} else if (str == "vec8") {
state_size += 8;
state_error_size += 8;
} else {
std::cerr << "type - " << str << " not implemented in prior" << std::endl;
std::exit(EXIT_FAILURE);
}
}
assert(x_lin.rows() == state_size);
assert(x_lin.cols() == 1);
assert(prior_Info.rows() == state_error_size);
assert(prior_Info.cols() == state_error_size);
assert(prior_grad.rows() == state_error_size);
assert(prior_grad.cols() == 1);
// Now lets base-compute the square-root information and constant term b
// Comes from the form: cost = A * (x - x_lin) + b
Eigen::LLT<Eigen::MatrixXd> lltOfI(prior_Info);
sqrtI = lltOfI.matrixL().transpose();
Eigen::MatrixXd I = Eigen::MatrixXd::Identity(prior_Info.rows(), prior_Info.rows());
b = sqrtI.triangularView<Eigen::Upper>().solve(I) * prior_grad;
// Check that we have a valid matrix that we can get the information of
if (std::isnan(prior_Info.norm()) || std::isnan(sqrtI.norm()) || std::isnan(b.norm())) {
std::cerr << "prior_Info - " << std::endl << prior_Info << std::endl << std::endl;
std::cerr << "prior_Info_inv - " << std::endl << prior_Info.inverse() << std::endl << std::endl;
std::cerr << "b - " << std::endl << b << std::endl << std::endl;
std::exit(EXIT_FAILURE);
}
// Set the number of measurements, and the block sized
set_num_residuals(state_error_size);
for (auto const &str : x_type_) {
if (str == "quat")
mutable_parameter_block_sizes()->push_back(4);
if (str == "quat_yaw")
mutable_parameter_block_sizes()->push_back(4);
if (str == "vec3")
mutable_parameter_block_sizes()->push_back(3);
if (str == "vec8")
mutable_parameter_block_sizes()->push_back(8);
}
}
virtual ~Factor_GenericPrior() {}
/**
* @brief Error residual and Jacobian calculation
*/
bool Evaluate(double const *const *parameters, double *residuals, double **jacobians) const override {
// Location in our state and output residual
int local_it = 0;
int global_it = 0;
Eigen::MatrixXd res = Eigen::MatrixXd::Zero(num_residuals(), 1);
// Loop through each state and calculate its residual and Jacobian
for (size_t i = 0; i < x_type.size(); i++) {
if (x_type[i] == "quat") {
Eigen::Vector4d q_i = Eigen::Map<const Eigen::Vector4d>(parameters[i]);
Eigen::Matrix3d R_i = ov_core::quat_2_Rot(q_i);
Eigen::Matrix3d R_lin = ov_core::quat_2_Rot(x_lin.block(global_it, 0, 4, 1));
Eigen::Vector3d theta_err = ov_core::log_so3(R_i.transpose() * R_lin);
res.block(local_it, 0, 3, 1) = -theta_err;
if (jacobians && jacobians[i]) {
Eigen::Map<Eigen::Matrix<double, Eigen::Dynamic, Eigen::Dynamic, Eigen::RowMajor>> jacobian(jacobians[i], num_residuals(), 4);
jacobian.setZero();
Eigen::Matrix3d Jr_inv = ov_core::Jr_so3(theta_err).inverse();
Eigen::Matrix3d H_theta = -Jr_inv * R_lin.transpose();
jacobian.block(0, 0, num_residuals(), 3) = sqrtI.block(0, local_it, num_residuals(), 3) * H_theta;
}
global_it += 4;
local_it += 3;
} else if (x_type[i] == "quat_yaw") {
Eigen::Vector3d ez = Eigen::Vector3d(0.0, 0.0, 1.0);
Eigen::Vector4d q_i = Eigen::Map<const Eigen::Vector4d>(parameters[i]);
Eigen::Matrix3d R_i = ov_core::quat_2_Rot(q_i);
Eigen::Matrix3d R_lin = ov_core::quat_2_Rot(x_lin.block(global_it, 0, 4, 1));
Eigen::Vector3d theta_err = ov_core::log_so3(R_i.transpose() * R_lin);
res(local_it, 0) = -(ez.transpose() * theta_err)(0, 0);
if (jacobians && jacobians[i]) {
Eigen::Map<Eigen::Matrix<double, Eigen::Dynamic, Eigen::Dynamic, Eigen::RowMajor>> jacobian(jacobians[i], num_residuals(), 4);
jacobian.setZero();
Eigen::Matrix3d Jr_inv = ov_core::Jr_so3(theta_err).inverse();
Eigen::Matrix<double, 1, 3> H_theta = -ez.transpose() * (Jr_inv * R_lin.transpose());
jacobian.block(0, 0, num_residuals(), 3) = sqrtI.block(0, local_it, num_residuals(), 1) * H_theta;
}
global_it += 4;
local_it += 1;
} else if (x_type[i] == "vec3") {
Eigen::Matrix<double, 3, 1> p_i = Eigen::Map<const Eigen::Matrix<double, 3, 1>>(parameters[i]);
res.block(local_it, 0, 3, 1) = p_i - x_lin.block(global_it, 0, 3, 1);
if (jacobians && jacobians[i]) {
Eigen::Map<Eigen::Matrix<double, Eigen::Dynamic, Eigen::Dynamic, Eigen::RowMajor>> jacobian(jacobians[i], num_residuals(), 3);
jacobian.block(0, 0, num_residuals(), 3) = sqrtI.block(0, local_it, num_residuals(), 3);
}
global_it += 3;
local_it += 3;
} else if (x_type[i] == "vec8") {
Eigen::Matrix<double, 8, 1> p_i = Eigen::Map<const Eigen::Matrix<double, 8, 1>>(parameters[i]);
res.block(local_it, 0, 8, 1) = p_i - x_lin.block(global_it, 0, 8, 1);
if (jacobians && jacobians[i]) {
Eigen::Map<Eigen::Matrix<double, Eigen::Dynamic, Eigen::Dynamic, Eigen::RowMajor>> jacobian(jacobians[i], num_residuals(), 8);
jacobian.block(0, 0, num_residuals(), 8) = sqrtI.block(0, local_it, num_residuals(), 8);
}
global_it += 8;
local_it += 8;
} else {
std::cerr << "type - " << x_type[i] << " not implemented in prior" << std::endl;
std::exit(EXIT_FAILURE);
}
}
// Now that we have done x - x_lin we need to multiply by sqrtI and add b to get full cost
// Jacobians will already have sqrtI applied to them...
res = sqrtI * res;
res += b;
// Store the residuals into ceres
for (int i = 0; i < res.rows(); i++) {
residuals[i] = res(i, 0);
}
return true;
}
};
} // namespace ov_init
#endif // OV_INIT_CERES_GENERICPRIOR_H

View File

@@ -0,0 +1,201 @@
/*
* OpenVINS: An Open Platform for Visual-Inertial Research
* Copyright (C) 2018-2022 Patrick Geneva
* Copyright (C) 2018-2022 Guoquan Huang
* Copyright (C) 2018-2022 OpenVINS Contributors
* Copyright (C) 2018-2019 Kevin Eckenhoff
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
#ifndef OV_INIT_CERES_IMAGEREPROJCALIB_H
#define OV_INIT_CERES_IMAGEREPROJCALIB_H
#include <Eigen/Dense>
#include <ceres/ceres.h>
#include <ceres/rotation.h>
#include <deque>
#include <iostream>
#include <map>
#include "cam/CamEqui.h"
#include "cam/CamRadtan.h"
#include "utils/quat_ops.h"
namespace ov_init {
/**
* @brief Factor of feature bearing observation (raw) with calibration
*/
class Factor_ImageReprojCalib : public ceres::CostFunction {
public:
// Measurement observation of the feature (raw pixel coordinates)
Eigen::Vector2d uv_meas;
// Measurement noise
double pix_sigma = 1.0;
Eigen::Matrix<double, 2, 2> sqrtQ;
// If distortion model is fisheye or radtan
bool is_fisheye = false;
// If value of 1 then this residual adds to the problem, otherwise if zero it is "gated"
double gate = 1.0;
/**
* @brief Default constructor
* @param uv_meas_ Raw pixel uv measurement of a environmental feature
* @param pix_sigma_ Raw pixel measurement uncertainty (typically 1)
* @param is_fisheye_ If this raw pixel camera uses fisheye distortion
*/
Factor_ImageReprojCalib(const Eigen::Vector2d &uv_meas_, double pix_sigma_, bool is_fisheye_)
: uv_meas(uv_meas_), pix_sigma(pix_sigma_), is_fisheye(is_fisheye_) {
// Square root information inverse
sqrtQ = Eigen::Matrix<double, 2, 2>::Identity();
sqrtQ(0, 0) *= 1.0 / pix_sigma;
sqrtQ(1, 1) *= 1.0 / pix_sigma;
// Parameters we are a function of
set_num_residuals(2);
mutable_parameter_block_sizes()->push_back(4); // q_GtoIi
mutable_parameter_block_sizes()->push_back(3); // p_IiinG
mutable_parameter_block_sizes()->push_back(3); // p_FinG
mutable_parameter_block_sizes()->push_back(4); // q_ItoC
mutable_parameter_block_sizes()->push_back(3); // p_IinC
mutable_parameter_block_sizes()->push_back(8); // focal, center, distortion
}
virtual ~Factor_ImageReprojCalib() {}
/**
* @brief Error residual and Jacobian calculation
*
* This computes the Jacobians and residual of the feature projection model.
* This is a function of the observing pose, feature in global, and calibration parameters.
* The normalized pixel coordinates are found and then distorted using the camera distortion model.
* See the @ref update-feat page for more details.
*/
bool Evaluate(double const *const *parameters, double *residuals, double **jacobians) const override {
// Recover the current state from our parameters
Eigen::Vector4d q_GtoIi = Eigen::Map<const Eigen::Vector4d>(parameters[0]);
Eigen::Matrix3d R_GtoIi = ov_core::quat_2_Rot(q_GtoIi);
Eigen::Vector3d p_IiinG = Eigen::Map<const Eigen::Vector3d>(parameters[1]);
Eigen::Vector3d p_FinG = Eigen::Map<const Eigen::Vector3d>(parameters[2]);
Eigen::Vector4d q_ItoC = Eigen::Map<const Eigen::Vector4d>(parameters[3]);
Eigen::Matrix3d R_ItoC = ov_core::quat_2_Rot(q_ItoC);
Eigen::Vector3d p_IinC = Eigen::Map<const Eigen::Vector3d>(parameters[4]);
// order: f_x & f_y & c_x & c_y & k_1 & k_2 & k_3 & k_4
Eigen::Matrix<double, 8, 1> camera_vals = Eigen::Map<const Eigen::Matrix<double, 8, 1>>(parameters[5]);
// Transform the feature into the current camera frame of reference
Eigen::Vector3d p_FinIi = R_GtoIi * (p_FinG - p_IiinG);
Eigen::Vector3d p_FinCi = R_ItoC * p_FinIi + p_IinC;
// Normalized projected feature bearing
Eigen::Vector2d uv_norm;
uv_norm << p_FinCi(0) / p_FinCi(2), p_FinCi(1) / p_FinCi(2);
// Square-root information and gate
Eigen::Matrix<double, 2, 2> sqrtQ_gate = gate * sqrtQ;
// Get the distorted raw image coordinate using the camera model
// Also if jacobians are requested, then compute derivatives
Eigen::Vector2d uv_dist;
Eigen::MatrixXd H_dz_dzn, H_dz_dzeta;
if (is_fisheye) {
ov_core::CamEqui cam(0, 0);
cam.set_value(camera_vals);
uv_dist = cam.distort_d(uv_norm);
if (jacobians) {
cam.compute_distort_jacobian(uv_norm, H_dz_dzn, H_dz_dzeta);
H_dz_dzn = sqrtQ_gate * H_dz_dzn;
H_dz_dzeta = sqrtQ_gate * H_dz_dzeta;
}
} else {
ov_core::CamRadtan cam(0, 0);
cam.set_value(camera_vals);
uv_dist = cam.distort_d(uv_norm);
if (jacobians) {
cam.compute_distort_jacobian(uv_norm, H_dz_dzn, H_dz_dzeta);
H_dz_dzn = sqrtQ_gate * H_dz_dzn;
H_dz_dzeta = sqrtQ_gate * H_dz_dzeta;
}
}
// Compute residual
// NOTE: we make this negative ceres cost function is only min||f(x)||^2
// NOTE: thus since we have found the derivative of uv_meas = f(x) + n
// NOTE: we need to reformulate into a zero error constraint 0 = f(x) + n - uv_meas
// NOTE: if it was the other way (0 = uv_meas - f(x) - n) all the Jacobians would need to be flipped
Eigen::Vector2d res = uv_dist - uv_meas;
res = sqrtQ_gate * res;
residuals[0] = res(0);
residuals[1] = res(1);
// Compute jacobians if requested by ceres
if (jacobians) {
// Normalized coordinates in respect to projection function
Eigen::MatrixXd H_dzn_dpfc = Eigen::MatrixXd::Zero(2, 3);
H_dzn_dpfc << 1.0 / p_FinCi(2), 0, -p_FinCi(0) / std::pow(p_FinCi(2), 2), 0, 1.0 / p_FinCi(2), -p_FinCi(1) / std::pow(p_FinCi(2), 2);
Eigen::MatrixXd H_dz_dpfc = H_dz_dzn * H_dzn_dpfc;
// Jacobian wrt q_GtoIi
if (jacobians[0]) {
Eigen::Map<Eigen::Matrix<double, 2, 4, Eigen::RowMajor>> jacobian(jacobians[0]);
jacobian.block(0, 0, 2, 3) = H_dz_dpfc * R_ItoC * ov_core::skew_x(p_FinIi);
jacobian.block(0, 3, 2, 1).setZero();
}
// Jacobian wrt p_IiinG
if (jacobians[1]) {
Eigen::Map<Eigen::Matrix<double, 2, 3, Eigen::RowMajor>> jacobian(jacobians[1]);
jacobian.block(0, 0, 2, 3) = -H_dz_dpfc * R_ItoC * R_GtoIi;
}
// Jacobian wrt feature p_FinG
if (jacobians[2]) {
Eigen::Map<Eigen::Matrix<double, 2, 3, Eigen::RowMajor>> jacobian(jacobians[2]);
jacobian.block(0, 0, 2, 3) = H_dz_dpfc * R_ItoC * R_GtoIi;
}
// Jacbian wrt IMU-camera transform q_ItoC
if (jacobians[3]) {
Eigen::Map<Eigen::Matrix<double, 2, 4, Eigen::RowMajor>> jacobian(jacobians[3]);
jacobian.block(0, 0, 2, 3) = H_dz_dpfc * ov_core::skew_x(R_ItoC * p_FinIi);
jacobian.block(0, 3, 2, 1).setZero();
}
// Jacbian wrt IMU-camera transform p_IinC
if (jacobians[4]) {
Eigen::Map<Eigen::Matrix<double, 2, 3, Eigen::RowMajor>> jacobian(jacobians[4]);
jacobian.block(0, 0, 2, 3) = H_dz_dpfc;
}
// Jacbian wrt camera intrinsic
if (jacobians[5]) {
Eigen::Map<Eigen::Matrix<double, 2, 8, Eigen::RowMajor>> jacobian(jacobians[5]);
jacobian.block(0, 0, 2, 8) = H_dz_dzeta;
}
}
return true;
}
};
} // namespace ov_init
#endif // OV_INIT_CERES_IMAGEREPROJCALIB_H

View File

@@ -0,0 +1,311 @@
/*
* OpenVINS: An Open Platform for Visual-Inertial Research
* Copyright (C) 2018-2022 Patrick Geneva
* Copyright (C) 2018-2022 Guoquan Huang
* Copyright (C) 2018-2022 OpenVINS Contributors
* Copyright (C) 2018-2019 Kevin Eckenhoff
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
#ifndef OV_INIT_CERES_IMUCPIV1_H
#define OV_INIT_CERES_IMUCPIV1_H
#include <ceres/ceres.h>
#include "utils/quat_ops.h"
namespace ov_init {
/**
* @brief Factor for IMU continuous preintegration version 1
*/
class Factor_ImuCPIv1 : public ceres::CostFunction {
public:
// Preintegrated measurements and time interval
Eigen::Vector3d alpha;
Eigen::Vector3d beta;
Eigen::Vector4d q_breve;
double dt;
// Preintegration linearization points
Eigen::Vector3d b_w_lin_save;
Eigen::Vector3d b_a_lin_save;
// Prinetegrated bias jacobians
Eigen::Matrix3d J_q; // J_q - orientation wrt bias w
Eigen::Matrix3d J_a; // J_a - position wrt bias w
Eigen::Matrix3d J_b; // J_b - velocity wrt bias w
Eigen::Matrix3d H_a; // H_a - position wrt bias a
Eigen::Matrix3d H_b; // H_b - velocity wrt bias a
// Sqrt of the preintegration information
Eigen::Matrix<double, 15, 15> sqrtI_save;
// Gravity
Eigen::Vector3d grav_save;
/**
* @brief Default constructor
*/
Factor_ImuCPIv1(double deltatime, Eigen::Vector3d &grav, Eigen::Vector3d &alpha, Eigen::Vector3d &beta, Eigen::Vector4d &q_KtoK1,
Eigen::Vector3d &ba_lin, Eigen::Vector3d &bg_lin, Eigen::Matrix3d &J_q, Eigen::Matrix3d &J_beta, Eigen::Matrix3d &J_alpha,
Eigen::Matrix3d &H_beta, Eigen::Matrix3d &H_alpha, Eigen::Matrix<double, 15, 15> &covariance) {
// Save measurements
this->alpha = alpha;
this->beta = beta;
this->q_breve = q_KtoK1;
this->dt = deltatime;
this->grav_save = grav;
// Linearization points
this->b_a_lin_save = ba_lin;
this->b_w_lin_save = bg_lin;
// Save bias jacobians
this->J_q = J_q;
this->J_a = J_alpha;
this->J_b = J_beta;
this->H_a = H_alpha;
this->H_b = H_beta;
// Check that we have a valid covariance matrix that we can get the information of
Eigen::MatrixXd I = Eigen::MatrixXd::Identity(covariance.rows(), covariance.rows());
Eigen::MatrixXd information = covariance.llt().solve(I);
if (std::isnan(information.norm())) {
std::cerr << "P - " << std::endl << covariance << std::endl << std::endl;
std::cerr << "Pinv - " << std::endl << covariance.inverse() << std::endl << std::endl;
std::exit(EXIT_FAILURE);
}
// Get square-root of our information matrix
Eigen::LLT<Eigen::MatrixXd> lltOfI(information);
sqrtI_save = lltOfI.matrixL().transpose();
// Set the number of measurements, and the block sized
set_num_residuals(15);
mutable_parameter_block_sizes()->push_back(4); // q_GtoI1
mutable_parameter_block_sizes()->push_back(3); // bg_1
mutable_parameter_block_sizes()->push_back(3); // v_I1inG
mutable_parameter_block_sizes()->push_back(3); // ba_1
mutable_parameter_block_sizes()->push_back(3); // p_I1inG
mutable_parameter_block_sizes()->push_back(4); // q_GtoI2
mutable_parameter_block_sizes()->push_back(3); // bg_2
mutable_parameter_block_sizes()->push_back(3); // v_I2inG
mutable_parameter_block_sizes()->push_back(3); // ba_2
mutable_parameter_block_sizes()->push_back(3); // p_I2inG
}
virtual ~Factor_ImuCPIv1() {}
/**
* @brief Error residual and Jacobian calculation
*
* This computes the error between the integrated preintegrated measurement
* and the current state estimate. This also takes into account the
* bias linearization point changes.
*/
bool Evaluate(double const *const *parameters, double *residuals, double **jacobians) const override {
// Get the local variables (these would be different if we relinearized)
Eigen::Vector3d gravity = grav_save;
Eigen::Matrix<double, 15, 15> sqrtI = sqrtI_save;
Eigen::Vector3d b_w_lin = b_w_lin_save;
Eigen::Vector3d b_a_lin = b_a_lin_save;
// Get the state estimates from the ceres parameters.
// Each clone is stored in the canonical VINS format: q, bw, v, ba, p
Eigen::Vector4d q_1 = Eigen::Map<const Eigen::Vector4d>(parameters[0]);
Eigen::Matrix3d R_1 = ov_core::quat_2_Rot(q_1);
Eigen::Vector4d q_2 = Eigen::Map<const Eigen::Vector4d>(parameters[5]);
// Get bias_w
Eigen::Vector3d b_w1 = Eigen::Map<const Eigen::Vector3d>(parameters[1]);
Eigen::Vector3d b_w2 = Eigen::Map<const Eigen::Vector3d>(parameters[6]);
// Get bias_a
Eigen::Vector3d b_a1 = Eigen::Map<const Eigen::Vector3d>(parameters[3]);
Eigen::Vector3d b_a2 = Eigen::Map<const Eigen::Vector3d>(parameters[8]);
// Get velocity
Eigen::Vector3d v_1 = Eigen::Map<const Eigen::Vector3d>(parameters[2]);
Eigen::Vector3d v_2 = Eigen::Map<const Eigen::Vector3d>(parameters[7]);
// Get positions
Eigen::Vector3d p_1 = Eigen::Map<const Eigen::Vector3d>(parameters[4]);
Eigen::Vector3d p_2 = Eigen::Map<const Eigen::Vector3d>(parameters[9]);
// Get the change in clone 1's biases from the linearization points
Eigen::Vector3d dbw = b_w1 - b_w_lin;
Eigen::Vector3d dba = b_a1 - b_a_lin;
// Quaternion associated with the bias w correction
// Eigen::Vector4d q_b= rot_2_quat(Exp(-J_q*dbw));
Eigen::Vector4d q_b;
q_b.block(0, 0, 3, 1) = 0.5 * J_q * dbw;
q_b(3, 0) = 1.0;
q_b = q_b / q_b.norm();
// Relative orientation from state estimates
Eigen::Vector4d q_1_to_2 = ov_core::quat_multiply(q_2, ov_core::Inv(q_1));
// Intermediate quaternions for error/jacobian computation
Eigen::Vector4d q_res_minus = ov_core::quat_multiply(q_1_to_2, ov_core::Inv(q_breve));
Eigen::Vector4d q_res_plus = ov_core::quat_multiply(q_res_minus, q_b);
//================================================================================
//================================================================================
//================================================================================
// Computer residual
Eigen::Matrix<double, 15, 1> res;
res.block(0, 0, 3, 1) = 2 * q_res_plus.block(0, 0, 3, 1);
res.block(3, 0, 3, 1) = b_w2 - b_w1;
res.block(6, 0, 3, 1) = R_1 * (v_2 - v_1 + gravity * dt) - J_b * dbw - H_b * dba - beta;
res.block(9, 0, 3, 1) = b_a2 - b_a1;
res.block(12, 0, 3, 1) = R_1 * (p_2 - p_1 - v_1 * dt + .5 * gravity * std::pow(dt, 2)) - J_a * dbw - H_a * dba - alpha;
res = sqrtI * res;
// Store residuals
for (int i = 0; i < res.rows(); i++) {
residuals[i] = res(i, 0);
}
//================================================================================
//================================================================================
//================================================================================
// Compute jacobians if asked
if (jacobians) {
// Stores the total preintegrated jacobian into one spot
Eigen::Matrix<double, 15, 30> Jacobian = Eigen::Matrix<double, 15, 30>::Zero();
// Quick identity
Eigen::Matrix<double, 3, 3> eye = Eigen::MatrixXd::Identity(3, 3);
Eigen::Matrix<double, 4, 1> q_meas_plus = ov_core::quat_multiply(ov_core::Inv(q_breve), q_b);
// Dtheta wrt theta 1
Jacobian.block(0, 0, 3, 3) = -((q_1_to_2(3, 0) * eye - ov_core::skew_x(q_1_to_2.block(0, 0, 3, 1))) *
(q_meas_plus(3, 0) * eye + ov_core::skew_x(q_meas_plus.block(0, 0, 3, 1))) -
q_1_to_2.block(0, 0, 3, 1) * q_meas_plus.block(0, 0, 3, 1).transpose());
// Dtheta wrt theta 2
Jacobian.block(0, 15, 3, 3) = q_res_plus(3, 0) * eye + ov_core::skew_x(q_res_plus.block(0, 0, 3, 1));
// Dtheta wrt bw 1
Jacobian.block(0, 3, 3, 3) = (q_res_minus(3, 0) * eye - ov_core::skew_x(q_res_minus.block(0, 0, 3, 1))) * J_q;
// Dbw wrt bw1 and bw2
Jacobian.block(3, 3, 3, 3) = -eye;
Jacobian.block(3, 18, 3, 3) = eye;
// Dvelocity wrt theta 1
Jacobian.block(6, 0, 3, 3) = ov_core::skew_x(R_1 * (v_2 - v_1 + gravity * dt));
// Dvelocity wrt v 1
Jacobian.block(6, 6, 3, 3) = -R_1;
// Dvelocity wrt v 2
Jacobian.block(6, 21, 3, 3) = R_1;
// Dvelocity wrt bw 1
Jacobian.block(6, 3, 3, 3) = -J_b;
// Dvelocity wrt ba 1
Jacobian.block(6, 9, 3, 3) = -H_b;
// Dbw wrt ba1 and ba2
Jacobian.block(9, 9, 3, 3) = -eye;
Jacobian.block(9, 24, 3, 3) = eye;
// Dposition wrt theta 1
Jacobian.block(12, 0, 3, 3) = ov_core::skew_x(R_1 * (p_2 - p_1 - v_1 * dt + .5 * gravity * std::pow(dt, 2)));
// Dposition wrt v 1
Jacobian.block(12, 6, 3, 3) = -R_1 * dt;
// Dposition wrt p 1
Jacobian.block(12, 12, 3, 3) = -R_1;
// Dposition wrt p 2
Jacobian.block(12, 27, 3, 3) = R_1;
// Dposition wrt bw 1
Jacobian.block(12, 3, 3, 3) = -J_a;
// Dposition wrt ba 1
Jacobian.block(12, 9, 3, 3) = -H_a;
// Apply sqrt info
Jacobian = sqrtI * Jacobian;
// Now store jacobians
// Th1
if (jacobians[0]) {
Eigen::Map<Eigen::Matrix<double, 15, 4, Eigen::RowMajor>> J_th1(jacobians[0], 15, 4);
J_th1.block(0, 0, 15, 3) = Jacobian.block(0, 0, 15, 3);
J_th1.block(0, 3, 15, 1).setZero();
}
// Th2
if (jacobians[5]) {
Eigen::Map<Eigen::Matrix<double, 15, 4, Eigen::RowMajor>> J_th2(jacobians[5], 15, 4);
J_th2.block(0, 0, 15, 3) = Jacobian.block(0, 15, 15, 3);
J_th2.block(0, 3, 15, 1).setZero();
}
// bw1
if (jacobians[1]) {
Eigen::Map<Eigen::Matrix<double, 15, 3, Eigen::RowMajor>> J_bw1(jacobians[1], 15, 3);
J_bw1.block(0, 0, 15, 3) = Jacobian.block(0, 3, 15, 3);
}
// bw2
if (jacobians[6]) {
Eigen::Map<Eigen::Matrix<double, 15, 3, Eigen::RowMajor>> J_bw2(jacobians[6], 15, 3);
J_bw2.block(0, 0, 15, 3) = Jacobian.block(0, 18, 15, 3);
}
// v1
if (jacobians[2]) {
Eigen::Map<Eigen::Matrix<double, 15, 3, Eigen::RowMajor>> J_v1(jacobians[2], 15, 3);
J_v1.block(0, 0, 15, 3) = Jacobian.block(0, 6, 15, 3);
}
// v2
if (jacobians[7]) {
Eigen::Map<Eigen::Matrix<double, 15, 3, Eigen::RowMajor>> J_v2(jacobians[7], 15, 3);
J_v2.block(0, 0, 15, 3) = Jacobian.block(0, 21, 15, 3);
}
// ba1
if (jacobians[3]) {
Eigen::Map<Eigen::Matrix<double, 15, 3, Eigen::RowMajor>> J_ba1(jacobians[3], 15, 3);
J_ba1.block(0, 0, 15, 3) = Jacobian.block(0, 9, 15, 3);
}
// ba2
if (jacobians[8]) {
Eigen::Map<Eigen::Matrix<double, 15, 3, Eigen::RowMajor>> J_ba2(jacobians[8], 15, 3);
J_ba2.block(0, 0, 15, 3) = Jacobian.block(0, 24, 15, 3);
}
// p1
if (jacobians[4]) {
Eigen::Map<Eigen::Matrix<double, 15, 3, Eigen::RowMajor>> J_p1(jacobians[4], 15, 3);
J_p1.block(0, 0, 15, 3) = Jacobian.block(0, 12, 15, 3);
}
// p2
if (jacobians[9]) {
Eigen::Map<Eigen::Matrix<double, 15, 3, Eigen::RowMajor>> J_p2(jacobians[9], 15, 3);
J_p2.block(0, 0, 15, 3) = Jacobian.block(0, 27, 15, 3);
}
}
return true;
}
};
} // namespace ov_init
#endif // OV_INIT_CERES_IMUCPIV1_H

View File

@@ -0,0 +1,94 @@
/*
* OpenVINS: An Open Platform for Visual-Inertial Research
* Copyright (C) 2018-2022 Patrick Geneva
* Copyright (C) 2018-2022 Guoquan Huang
* Copyright (C) 2018-2022 OpenVINS Contributors
* Copyright (C) 2018-2019 Kevin Eckenhoff
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
#ifndef OV_INIT_CERES_JPLQUATLOCAL_H
#define OV_INIT_CERES_JPLQUATLOCAL_H
#include <ceres/ceres.h>
#include "utils/quat_ops.h"
namespace ov_init {
/**
* @brief JPL quaternion CERES state parameterization
*/
class State_JPLQuatLocal : public ceres::LocalParameterization {
public:
/**
* @brief State update function for a JPL quaternion representation.
*
* Implements update operation by left-multiplying the current
* quaternion with a quaternion built from a small axis-angle perturbation.
*
* @f[
* \bar{q}=norm\Big(\begin{bmatrix} 0.5*\mathbf{\theta_{dx}} \\ 1 \end{bmatrix}\Big) \hat{\bar{q}}
* @f]
*/
bool Plus(const double *x, const double *delta, double *x_plus_delta) const override {
// Apply the standard JPL update: q <-- [d_th/2; 1] (x) q
Eigen::Map<const Eigen::Vector4d> q(x);
// Get delta into eigen
Eigen::Map<const Eigen::Vector3d> d_th(delta);
Eigen::Matrix<double, 4, 1> d_q;
double theta = d_th.norm();
if (theta < 1e-8) {
d_q << .5 * d_th, 1.0;
} else {
d_q.block(0, 0, 3, 1) = (d_th / theta) * std::sin(theta / 2);
d_q(3, 0) = std::cos(theta / 2);
}
d_q = ov_core::quatnorm(d_q);
// Do the update
Eigen::Map<Eigen::Vector4d> q_plus(x_plus_delta);
q_plus = ov_core::quat_multiply(d_q, q);
return true;
}
/**
* @brief Computes the jacobian in respect to the local parameterization
*
* This essentially "tricks" ceres.
* Instead of doing what ceres wants:
* dr/dlocal= dr/dglobal * dglobal/dlocal
*
* We instead directly do:
* dr/dlocal= [ dr/dlocal, 0] * [I; 0]= dr/dlocal.
* Therefore we here define dglobal/dlocal= [I; 0]
*/
bool ComputeJacobian(const double *x, double *jacobian) const override {
Eigen::Map<Eigen::Matrix<double, 4, 3, Eigen::RowMajor>> j(jacobian);
j.topRows<3>().setIdentity();
j.bottomRows<1>().setZero();
return true;
}
int GlobalSize() const override { return 4; };
int LocalSize() const override { return 3; };
};
} // namespace ov_init
#endif // OV_INIT_CERES_JPLQUATLOCAL_H

36
ov_init/src/dummy.cpp Normal file
View File

@@ -0,0 +1,36 @@
/*
* OpenVINS: An Open Platform for Visual-Inertial Research
* Copyright (C) 2018-2022 Patrick Geneva
* Copyright (C) 2018-2022 Guoquan Huang
* Copyright (C) 2018-2022 OpenVINS Contributors
* Copyright (C) 2018-2019 Kevin Eckenhoff
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
/**
* @namespace ov_init
* @brief State initialization code
*
* Right now this contains static and dynamic initialization code for a visual-inertial system.
* It will wait for the platform to stationary, and then initialize its orientation in the gravity frame.
*
* If the platform is not stationary then we leverage dynamic initialization to try to recover the initial state.
* This is an implementation of the work [Estimator initialization in vision-aided inertial navigation with unknown camera-IMU
* calibration](https://ieeexplore.ieee.org/document/6386235) @cite Dong2012IROS which solves the initialization problem by first creating a
* linear system for recovering tthe velocity, gravity, and feature positions.
* After the initial recovery, a full optimization is performed to allow for covariance recovery.
*
*/
namespace ov_init {}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,106 @@
/*
* OpenVINS: An Open Platform for Visual-Inertial Research
* Copyright (C) 2018-2022 Patrick Geneva
* Copyright (C) 2018-2022 Guoquan Huang
* Copyright (C) 2018-2022 OpenVINS Contributors
* Copyright (C) 2018-2019 Kevin Eckenhoff
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
#ifndef OV_INIT_DYNAMICINITIALIZER_H
#define OV_INIT_DYNAMICINITIALIZER_H
#include "ceres/Factor_GenericPrior.h"
#include "ceres/Factor_ImageReprojCalib.h"
#include "ceres/Factor_ImuCPIv1.h"
#include "ceres/State_JPLQuatLocal.h"
#include "init/InertialInitializerOptions.h"
#include "utils/helper.h"
#include "cpi/CpiV1.h"
#include "feat/FeatureHelper.h"
#include "types/IMU.h"
#include "types/Landmark.h"
#include "utils/colors.h"
#include "utils/print.h"
#include "utils/quat_ops.h"
#include "utils/sensor_data.h"
namespace ov_init {
/**
* @brief Initializer for a dynamic visual-inertial system.
*
* This implementation that will try to recover the initial conditions of the system.
* Additionally, we will try to recover the covariance of the system.
* To initialize with arbitrary motion:
* 1. Preintegrate our system to get the relative rotation change (biases assumed known)
* 2. Construct linear system with features to recover velocity (solve with |g| constraint)
* 3. Perform a large MLE with all calibration and recover the covariance.
*
* Method is based on this work:
* > Dong-Si, Tue-Cuong, and Anastasios I. Mourikis.
* > "Estimator initialization in vision-aided inertial navigation with unknown camera-IMU calibration."
* > 2012 IEEE/RSJ International Conference on Intelligent Robots and Systems. IEEE, 2012.
*
* - https://ieeexplore.ieee.org/abstract/document/6386235
* - https://tdongsi.github.io/download/pubs/2011_VIO_Init_TR.pdf
*
*/
class DynamicInitializer {
public:
/**
* @brief Default constructor
* @param params_ Parameters loaded from either ROS or CMDLINE
* @param db Feature tracker database with all features in it
* @param imu_data_ Shared pointer to our IMU vector of historical information
*/
explicit DynamicInitializer(const InertialInitializerOptions &params_, std::shared_ptr<ov_core::FeatureDatabase> db,
std::shared_ptr<std::vector<ov_core::ImuData>> imu_data_)
: params(params_), _db(db), imu_data(imu_data_) {}
/**
* @brief Try to get the initialized system
*
* @param[out] timestamp Timestamp we have initialized the state at (last imu state)
* @param[out] covariance Calculated covariance of the returned state
* @param[out] order Order of the covariance matrix
* @param _imu Pointer to the "active" IMU state (q_GtoI, p_IinG, v_IinG, bg, ba)
* @param _clones_IMU Map between imaging times and clone poses (q_GtoIi, p_IiinG)
* @param _features_SLAM Our current set of SLAM features (3d positions)
* @param _calib_IMUtoCAM Calibration poses for each camera (R_ItoC, p_IinC)
* @param _cam_intrinsics Camera intrinsics
* @return True if we have successfully initialized our system
*/
bool initialize(double &timestamp, Eigen::MatrixXd &covariance, std::vector<std::shared_ptr<ov_type::Type>> &order,
std::shared_ptr<ov_type::IMU> &_imu, std::map<double, std::shared_ptr<ov_type::PoseJPL>> &_clones_IMU,
std::unordered_map<size_t, std::shared_ptr<ov_type::Landmark>> &_features_SLAM,
std::unordered_map<size_t, std::shared_ptr<ov_type::PoseJPL>> &_calib_IMUtoCAM,
std::unordered_map<size_t, std::shared_ptr<ov_type::Vec>> &_cam_intrinsics);
private:
/// Initialization parameters
InertialInitializerOptions params;
/// Feature tracker database with all features in it
std::shared_ptr<ov_core::FeatureDatabase> _db;
/// Our history of IMU messages (time, angular, linear)
std::shared_ptr<std::vector<ov_core::ImuData>> imu_data;
};
} // namespace ov_init
#endif // OV_INIT_DYNAMICINITIALIZER_H

View File

@@ -0,0 +1,98 @@
/*
* OpenVINS: An Open Platform for Visual-Inertial Research
* Copyright (C) 2018-2022 Patrick Geneva
* Copyright (C) 2018-2022 Guoquan Huang
* Copyright (C) 2018-2022 OpenVINS Contributors
* Copyright (C) 2018-2019 Kevin Eckenhoff
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
#include "InertialInitializer.h"
using namespace ov_core;
using namespace ov_type;
using namespace ov_init;
InertialInitializer::InertialInitializer(InertialInitializerOptions &params_, std::shared_ptr<ov_core::FeatureDatabase> db)
: params(params_), _db(db) {
// Vector of our IMU data
imu_data = std::make_shared<std::vector<ov_core::ImuData>>();
// Create initializers
init_static = std::make_shared<StaticInitializer>(params, _db, imu_data);
init_dynamic = std::make_shared<DynamicInitializer>(params, _db, imu_data);
}
bool InertialInitializer::initialize(double &timestamp, Eigen::MatrixXd &covariance, std::vector<std::shared_ptr<ov_type::Type>> &order,
std::shared_ptr<ov_type::IMU> t_imu, bool wait_for_jerk) {
// Get the newest and oldest timestamps we will try to initialize between!
double newest_cam_time = -1;
for (auto const &feat : _db->get_internal_data()) {
for (auto const &camtimepair : feat.second->timestamps) {
for (auto const &time : camtimepair.second) {
newest_cam_time = std::max(newest_cam_time, time);
}
}
}
double oldest_time = newest_cam_time - params.init_window_time - 0.01;
if (newest_cam_time < 0 || oldest_time < 0) {
return false;
}
// Remove all measurements that are older then our initialization window
// Then we will try to use all features that are in the feature database!
_db->cleanup_measurements(oldest_time);
auto it_imu = imu_data->begin();
while (it_imu != imu_data->end() && it_imu->timestamp < oldest_time + params.calib_camimu_dt) {
it_imu = imu_data->erase(it_imu);
}
// Compute the disparity of the system at the current timestep
// If disparity is zero or negative we will always use the static initializer
bool disparity_detected_moving = false;
if (params.init_max_disparity > 0) {
// Get the disparity statistics from this image to the previous
int num_features = 0;
double average_disparity = 0.0;
double variance_disparity = 0.0;
FeatureHelper::compute_disparity(_db, average_disparity, variance_disparity, num_features);
// Return if we can't compute the disparity
if (num_features < 10) {
PRINT_DEBUG(YELLOW "[init]: not enough features to compute disparity %d < 10\n" RESET, num_features);
return false;
}
// Check if it passed our check!
PRINT_DEBUG(YELLOW "[init]: disparity of the platform is %.4f (%.4f threshold)\n" RESET, average_disparity, params.init_max_disparity);
disparity_detected_moving = (average_disparity > params.init_max_disparity);
}
// Use our static initializer!
if (!disparity_detected_moving && params.init_imu_thresh > 0.0) {
PRINT_DEBUG(GREEN "[init]: USING STATIC INITIALIZER METHOD!\n" RESET);
return init_static->initialize(timestamp, covariance, order, t_imu, wait_for_jerk);
} else {
PRINT_DEBUG(GREEN "[init]: USING DYNAMIC INITIALIZER METHOD!\n" RESET);
std::map<double, std::shared_ptr<ov_type::PoseJPL>> _clones_IMU;
std::unordered_map<size_t, std::shared_ptr<ov_type::Landmark>> _features_SLAM;
std::unordered_map<size_t, std::shared_ptr<ov_type::PoseJPL>> _calib_IMUtoCAM;
std::unordered_map<size_t, std::shared_ptr<ov_type::Vec>> _cam_intrinsics;
return init_dynamic->initialize(timestamp, covariance, order, t_imu, _clones_IMU, _features_SLAM, _calib_IMUtoCAM, _cam_intrinsics);
}
}

View File

@@ -0,0 +1,136 @@
/*
* OpenVINS: An Open Platform for Visual-Inertial Research
* Copyright (C) 2018-2022 Patrick Geneva
* Copyright (C) 2018-2022 Guoquan Huang
* Copyright (C) 2018-2022 OpenVINS Contributors
* Copyright (C) 2018-2019 Kevin Eckenhoff
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
#ifndef OV_INIT_INERTIALINITIALIZER_H
#define OV_INIT_INERTIALINITIALIZER_H
#include "dynamic/DynamicInitializer.h"
#include "init/InertialInitializerOptions.h"
#include "static/StaticInitializer.h"
#include "types/Type.h"
#include "utils/colors.h"
#include "utils/print.h"
#include "utils/quat_ops.h"
#include "utils/sensor_data.h"
namespace ov_init {
/**
* @brief Initializer for visual-inertial system.
*
* This will try to do both dynamic and state initialization of the state.
* The user can request to wait for a jump in our IMU readings (i.e. device is picked up) or to initialize as soon as possible.
* For state initialization, the user needs to specify the calibration beforehand, otherwise dynamic is always used.
* The logic is as follows:
* 1. Try to perform dynamic initialization of state elements.
* 2. If this fails and we have calibration then we can try to do static initialization
* 3. If the unit is stationary and we are waiting for a jerk, just return, otherwise initialize the state!
*
* The dynamic system is based on an implementation and extension of the work [Estimator initialization in vision-aided inertial navigation
* with unknown camera-IMU calibration](https://ieeexplore.ieee.org/document/6386235) @cite Dong2012IROS which solves the initialization
* problem by first creating a linear system for recovering the camera to IMU rotation, then for velocity, gravity, and feature positions,
* and finally a full optimization to allow for covariance recovery.
* Another paper which might be of interest to the reader is [An Analytical Solution to the IMU Initialization
* Problem for Visual-Inertial Systems](https://ieeexplore.ieee.org/abstract/document/9462400) which has some detailed
* experiments on scale recovery and the accelerometer bias.
*/
class InertialInitializer {
public:
/**
* @brief Default constructor
* @param params_ Parameters loaded from either ROS or CMDLINE
* @param db Feature tracker database with all features in it
*/
explicit InertialInitializer(InertialInitializerOptions &params_, std::shared_ptr<ov_core::FeatureDatabase> db);
/**
* @brief Feed function for inertial data
* @param message Contains our timestamp and inertial information
* @param oldest_time Time that we can discard measurements before
*/
void feed_imu(const ov_core::ImuData &message, double oldest_time = -1) {
// Append it to our vector
imu_data->emplace_back(message);
// Sort our imu data (handles any out of order measurements)
// std::sort(imu_data->begin(), imu_data->end(), [](const IMUDATA i, const IMUDATA j) {
// return i.timestamp < j.timestamp;
//});
// Loop through and delete imu messages that are older than our requested time
if (oldest_time != -1) {
auto it0 = imu_data->begin();
while (it0 != imu_data->end()) {
if (message.timestamp < oldest_time) {
it0 = imu_data->erase(it0);
} else {
it0++;
}
}
}
}
/**
* @brief Try to get the initialized system
*
*
* @m_class{m-note m-warning}
*
* @par Processing Cost
* This is a serial process that can take on orders of seconds to complete.
* If you are a real-time application then you will likely want to call this from
* a async thread which allows for this to process in the background.
* The features used are cloned from the feature database thus should be thread-safe
* to continue to append new feature tracks to the database.
*
* @param[out] timestamp Timestamp we have initialized the state at
* @param[out] covariance Calculated covariance of the returned state
* @param[out] order Order of the covariance matrix
* @param[out] t_imu Our imu type (need to have correct ids)
* @param wait_for_jerk If true we will wait for a "jerk"
* @return True if we have successfully initialized our system
*/
bool initialize(double &timestamp, Eigen::MatrixXd &covariance, std::vector<std::shared_ptr<ov_type::Type>> &order,
std::shared_ptr<ov_type::IMU> t_imu, bool wait_for_jerk = true);
protected:
/// Initialization parameters
InertialInitializerOptions params;
/// Feature tracker database with all features in it
std::shared_ptr<ov_core::FeatureDatabase> _db;
/// Our history of IMU messages (time, angular, linear)
std::shared_ptr<std::vector<ov_core::ImuData>> imu_data;
/// Static initialization helper class
std::shared_ptr<StaticInitializer> init_static;
/// Dynamic initialization helper class
std::shared_ptr<DynamicInitializer> init_dynamic;
};
} // namespace ov_init
#endif // OV_INIT_INERTIALINITIALIZER_H

View File

@@ -0,0 +1,400 @@
/*
* OpenVINS: An Open Platform for Visual-Inertial Research
* Copyright (C) 2018-2022 Patrick Geneva
* Copyright (C) 2018-2022 Guoquan Huang
* Copyright (C) 2018-2022 OpenVINS Contributors
* Copyright (C) 2018-2019 Kevin Eckenhoff
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
#ifndef OV_INIT_INERTIALINITIALIZEROPTIONS_H
#define OV_INIT_INERTIALINITIALIZEROPTIONS_H
#include <Eigen/Eigen>
#include <iostream>
#include <sstream>
#include <string>
#include <vector>
#include "cam/CamEqui.h"
#include "cam/CamRadtan.h"
#include "feat/FeatureInitializerOptions.h"
#include "track/TrackBase.h"
#include "utils/colors.h"
#include "utils/opencv_yaml_parse.h"
#include "utils/print.h"
#include "utils/quat_ops.h"
namespace ov_init {
/**
* @brief Struct which stores all options needed for state estimation.
*
* This is broken into a few different parts: estimator, trackers, and simulation.
* If you are going to add a parameter here you will need to add it to the parsers.
* You will also need to add it to the print statement at the bottom of each.
*/
struct InertialInitializerOptions {
/**
* @brief This function will load the non-simulation parameters of the system and print.
* @param parser If not null, this parser will be used to load our parameters
*/
void print_and_load(const std::shared_ptr<ov_core::YamlParser> &parser = nullptr) {
print_and_load_initializer(parser);
print_and_load_noise(parser);
print_and_load_state(parser);
}
// INITIALIZATION ============================
/// Amount of time we will initialize over (seconds)
double init_window_time = 1.0;
/// Variance threshold on our acceleration to be classified as moving
double init_imu_thresh = 1.0;
/// Max disparity we will consider the unit to be stationary
double init_max_disparity = 1.0;
/// Number of features we should try to track
int init_max_features = 50;
/// If we should optimize and recover the calibration in our MLE
bool init_dyn_mle_opt_calib = false;
/// Max number of MLE iterations for dynamic initialization
int init_dyn_mle_max_iter = 20;
/// Max number of MLE threads for dynamic initialization
int init_dyn_mle_max_threads = 20;
/// Max time for MLE optimization (seconds)
double init_dyn_mle_max_time = 5.0;
/// Number of poses to use during initialization (max should be cam freq * window)
int init_dyn_num_pose = 5;
/// Minimum degrees we need to rotate before we try to init (sum of norm)
double init_dyn_min_deg = 45.0;
/// Magnitude we will inflate initial covariance of orientation
double init_dyn_inflation_orientation = 10.0;
/// Magnitude we will inflate initial covariance of velocity
double init_dyn_inflation_velocity = 10.0;
/// Magnitude we will inflate initial covariance of gyroscope bias
double init_dyn_inflation_bias_gyro = 100.0;
/// Magnitude we will inflate initial covariance of accelerometer bias
double init_dyn_inflation_bias_accel = 100.0;
/// Minimum reciprocal condition number acceptable for our covariance recovery (min_sigma / max_sigma <
/// sqrt(min_reciprocal_condition_number))
double init_dyn_min_rec_cond = 1e-15;
/// Initial IMU gyroscope bias values for dynamic initialization (will be optimized)
Eigen::Vector3d init_dyn_bias_g = Eigen::Vector3d::Zero();
/// Initial IMU accelerometer bias values for dynamic initialization (will be optimized)
Eigen::Vector3d init_dyn_bias_a = Eigen::Vector3d::Zero();
/**
* @brief This function will load print out all initializer settings loaded.
* This allows for visual checking that everything was loaded properly from ROS/CMD parsers.
*
* @param parser If not null, this parser will be used to load our parameters
*/
void print_and_load_initializer(const std::shared_ptr<ov_core::YamlParser> &parser = nullptr) {
PRINT_DEBUG("INITIALIZATION SETTINGS:\n");
if (parser != nullptr) {
parser->parse_config("init_window_time", init_window_time);
parser->parse_config("init_imu_thresh", init_imu_thresh);
parser->parse_config("init_max_disparity", init_max_disparity);
parser->parse_config("init_max_features", init_max_features);
parser->parse_config("init_dyn_mle_opt_calib", init_dyn_mle_opt_calib);
parser->parse_config("init_dyn_mle_max_iter", init_dyn_mle_max_iter);
parser->parse_config("init_dyn_mle_max_threads", init_dyn_mle_max_threads);
parser->parse_config("init_dyn_mle_max_time", init_dyn_mle_max_time);
parser->parse_config("init_dyn_num_pose", init_dyn_num_pose);
parser->parse_config("init_dyn_min_deg", init_dyn_min_deg);
parser->parse_config("init_dyn_inflation_ori", init_dyn_inflation_orientation);
parser->parse_config("init_dyn_inflation_vel", init_dyn_inflation_velocity);
parser->parse_config("init_dyn_inflation_bg", init_dyn_inflation_bias_gyro);
parser->parse_config("init_dyn_inflation_ba", init_dyn_inflation_bias_accel);
parser->parse_config("init_dyn_min_rec_cond", init_dyn_min_rec_cond);
std::vector<double> bias_g = {0, 0, 0};
std::vector<double> bias_a = {0, 0, 0};
parser->parse_config("init_dyn_bias_g", bias_g);
parser->parse_config("init_dyn_bias_a", bias_a);
init_dyn_bias_g << bias_g.at(0), bias_g.at(1), bias_g.at(2);
init_dyn_bias_a << bias_a.at(0), bias_a.at(1), bias_a.at(2);
}
PRINT_DEBUG(" - init_window_time: %.2f\n", init_window_time);
PRINT_DEBUG(" - init_imu_thresh: %.2f\n", init_imu_thresh);
PRINT_DEBUG(" - init_max_disparity: %.2f\n", init_max_disparity);
PRINT_DEBUG(" - init_max_features: %.2f\n", init_max_features);
if (init_max_features < 15) {
PRINT_ERROR(RED "number of requested feature tracks to init not enough!!\n" RESET);
PRINT_ERROR(RED " init_max_features = %d\n" RESET, init_max_features);
std::exit(EXIT_FAILURE);
}
PRINT_DEBUG(" - init_dyn_mle_opt_calib: %d\n", init_dyn_mle_opt_calib);
PRINT_DEBUG(" - init_dyn_mle_max_iter: %d\n", init_dyn_mle_max_iter);
PRINT_DEBUG(" - init_dyn_mle_max_threads: %d\n", init_dyn_mle_max_threads);
PRINT_DEBUG(" - init_dyn_mle_max_time: %.2f\n", init_dyn_mle_max_time);
PRINT_DEBUG(" - init_dyn_num_pose: %d\n", init_dyn_num_pose);
PRINT_DEBUG(" - init_dyn_min_deg: %.2f\n", init_dyn_min_deg);
PRINT_DEBUG(" - init_dyn_inflation_ori: %.2e\n", init_dyn_inflation_orientation);
PRINT_DEBUG(" - init_dyn_inflation_vel: %.2e\n", init_dyn_inflation_velocity);
PRINT_DEBUG(" - init_dyn_inflation_bg: %.2e\n", init_dyn_inflation_bias_gyro);
PRINT_DEBUG(" - init_dyn_inflation_ba: %.2e\n", init_dyn_inflation_bias_accel);
PRINT_DEBUG(" - init_dyn_min_rec_cond: %.2e\n", init_dyn_min_rec_cond);
if (init_dyn_num_pose < 4) {
PRINT_ERROR(RED "number of requested frames to init not enough!!\n" RESET);
PRINT_ERROR(RED " init_dyn_num_pose = %d (4 min)\n" RESET, init_dyn_num_pose);
std::exit(EXIT_FAILURE);
}
PRINT_DEBUG(" - init_dyn_bias_g: %.2f, %.2f, %.2f\n", init_dyn_bias_g(0), init_dyn_bias_g(1), init_dyn_bias_g(2));
PRINT_DEBUG(" - init_dyn_bias_a: %.2f, %.2f, %.2f\n", init_dyn_bias_a(0), init_dyn_bias_a(1), init_dyn_bias_a(2));
}
// NOISE / CHI2 ============================
/// Gyroscope white noise (rad/s/sqrt(hz))
double sigma_w = 1.6968e-04;
/// Gyroscope random walk (rad/s^2/sqrt(hz))
double sigma_wb = 1.9393e-05;
/// Accelerometer white noise (m/s^2/sqrt(hz))
double sigma_a = 2.0000e-3;
/// Accelerometer random walk (m/s^3/sqrt(hz))
double sigma_ab = 3.0000e-03;
/// Noise sigma for our raw pixel measurements
double sigma_pix = 1;
/**
* @brief This function will load print out all noise parameters loaded.
* This allows for visual checking that everything was loaded properly from ROS/CMD parsers.
*
* @param parser If not null, this parser will be used to load our parameters
*/
void print_and_load_noise(const std::shared_ptr<ov_core::YamlParser> &parser = nullptr) {
PRINT_DEBUG("NOISE PARAMETERS:\n");
if (parser != nullptr) {
parser->parse_external("relative_config_imu", "imu0", "gyroscope_noise_density", sigma_w);
parser->parse_external("relative_config_imu", "imu0", "gyroscope_random_walk", sigma_wb);
parser->parse_external("relative_config_imu", "imu0", "accelerometer_noise_density", sigma_a);
parser->parse_external("relative_config_imu", "imu0", "accelerometer_random_walk", sigma_ab);
parser->parse_config("up_slam_sigma_px", sigma_pix);
}
PRINT_DEBUG(" - gyroscope_noise_density: %.6f\n", sigma_w);
PRINT_DEBUG(" - accelerometer_noise_density: %.5f\n", sigma_a);
PRINT_DEBUG(" - gyroscope_random_walk: %.7f\n", sigma_wb);
PRINT_DEBUG(" - accelerometer_random_walk: %.6f\n", sigma_ab);
PRINT_DEBUG(" - sigma_pix: %.2f\n", sigma_pix);
}
// STATE DEFAULTS ==========================
/// Gravity magnitude in the global frame (i.e. should be 9.81 typically)
double gravity_mag = 9.81;
/// Number of distinct cameras that we will observe features in
int num_cameras = 1;
/// If we should process two cameras are being stereo or binocular. If binocular, we do monocular feature tracking on each image.
bool use_stereo = true;
/// Will half the resolution all tracking image (aruco will be 1/4 instead of halved if dowsize_aruoc also enabled)
bool downsample_cameras = false;
/// Time offset between camera and IMU (t_imu = t_cam + t_off)
double calib_camimu_dt = 0.0;
/// Map between camid and camera intrinsics (fx, fy, cx, cy, d1...d4, cam_w, cam_h)
std::unordered_map<size_t, std::shared_ptr<ov_core::CamBase>> camera_intrinsics;
/// Map between camid and camera extrinsics (q_ItoC, p_IinC).
std::map<size_t, Eigen::VectorXd> camera_extrinsics;
/**
* @brief This function will load and print all state parameters (e.g. sensor extrinsics)
* This allows for visual checking that everything was loaded properly from ROS/CMD parsers.
*
* @param parser If not null, this parser will be used to load our parameters
*/
void print_and_load_state(const std::shared_ptr<ov_core::YamlParser> &parser = nullptr) {
if (parser != nullptr) {
parser->parse_config("gravity_mag", gravity_mag);
parser->parse_config("max_cameras", num_cameras); // might be redundant
parser->parse_config("use_stereo", use_stereo);
parser->parse_config("downsample_cameras", downsample_cameras);
for (int i = 0; i < num_cameras; i++) {
// Time offset (use the first one)
// TODO: support multiple time offsets between cameras
if (i == 0) {
parser->parse_external("relative_config_imucam", "cam" + std::to_string(i), "timeshift_cam_imu", calib_camimu_dt, false);
}
// Distortion model
std::string dist_model = "radtan";
parser->parse_external("relative_config_imucam", "cam" + std::to_string(i), "distortion_model", dist_model);
// Distortion parameters
std::vector<double> cam_calib1 = {1, 1, 0, 0};
std::vector<double> cam_calib2 = {0, 0, 0, 0};
parser->parse_external("relative_config_imucam", "cam" + std::to_string(i), "intrinsics", cam_calib1);
parser->parse_external("relative_config_imucam", "cam" + std::to_string(i), "distortion_coeffs", cam_calib2);
Eigen::VectorXd cam_calib = Eigen::VectorXd::Zero(8);
cam_calib << cam_calib1.at(0), cam_calib1.at(1), cam_calib1.at(2), cam_calib1.at(3), cam_calib2.at(0), cam_calib2.at(1),
cam_calib2.at(2), cam_calib2.at(3);
cam_calib(0) /= (downsample_cameras) ? 2.0 : 1.0;
cam_calib(1) /= (downsample_cameras) ? 2.0 : 1.0;
cam_calib(2) /= (downsample_cameras) ? 2.0 : 1.0;
cam_calib(3) /= (downsample_cameras) ? 2.0 : 1.0;
// FOV / resolution
std::vector<int> matrix_wh = {1, 1};
parser->parse_external("relative_config_imucam", "cam" + std::to_string(i), "resolution", matrix_wh);
matrix_wh.at(0) /= (downsample_cameras) ? 2.0 : 1.0;
matrix_wh.at(1) /= (downsample_cameras) ? 2.0 : 1.0;
// Extrinsics
Eigen::Matrix4d T_CtoI = Eigen::Matrix4d::Identity();
parser->parse_external("relative_config_imucam", "cam" + std::to_string(i), "T_imu_cam", T_CtoI);
// Load these into our state
Eigen::Matrix<double, 7, 1> cam_eigen;
cam_eigen.block(0, 0, 4, 1) = ov_core::rot_2_quat(T_CtoI.block(0, 0, 3, 3).transpose());
cam_eigen.block(4, 0, 3, 1) = -T_CtoI.block(0, 0, 3, 3).transpose() * T_CtoI.block(0, 3, 3, 1);
// Create intrinsics model
if (dist_model == "equidistant") {
camera_intrinsics.insert({i, std::make_shared<ov_core::CamEqui>(matrix_wh.at(0), matrix_wh.at(1))});
camera_intrinsics.at(i)->set_value(cam_calib);
} else {
camera_intrinsics.insert({i, std::make_shared<ov_core::CamRadtan>(matrix_wh.at(0), matrix_wh.at(1))});
camera_intrinsics.at(i)->set_value(cam_calib);
}
camera_extrinsics.insert({i, cam_eigen});
}
}
PRINT_DEBUG("STATE PARAMETERS:\n");
PRINT_DEBUG(" - gravity_mag: %.4f\n", gravity_mag);
PRINT_DEBUG(" - gravity: %.3f, %.3f, %.3f\n", 0.0, 0.0, gravity_mag);
PRINT_DEBUG(" - num_cameras: %d\n", num_cameras);
PRINT_DEBUG(" - use_stereo: %d\n", use_stereo);
PRINT_DEBUG(" - downsize cameras: %d\n", downsample_cameras);
if (num_cameras != (int)camera_intrinsics.size() || num_cameras != (int)camera_extrinsics.size()) {
PRINT_ERROR(RED "[SIM]: camera calib size does not match max cameras...\n" RESET);
PRINT_ERROR(RED "[SIM]: got %d but expected %d max cameras (camera_intrinsics)\n" RESET, (int)camera_intrinsics.size(), num_cameras);
PRINT_ERROR(RED "[SIM]: got %d but expected %d max cameras (camera_extrinsics)\n" RESET, (int)camera_extrinsics.size(), num_cameras);
std::exit(EXIT_FAILURE);
}
PRINT_DEBUG(" - calib_camimu_dt: %.4f\n", calib_camimu_dt);
for (int n = 0; n < num_cameras; n++) {
std::stringstream ss;
ss << "cam_" << n << "_fisheye:" << (std::dynamic_pointer_cast<ov_core::CamEqui>(camera_intrinsics.at(n)) != nullptr) << std::endl;
ss << "cam_" << n << "_wh:" << std::endl << camera_intrinsics.at(n)->w() << " x " << camera_intrinsics.at(n)->h() << std::endl;
ss << "cam_" << n << "_intrinsic(0:3):" << std::endl
<< camera_intrinsics.at(n)->get_value().block(0, 0, 4, 1).transpose() << std::endl;
ss << "cam_" << n << "_intrinsic(4:7):" << std::endl
<< camera_intrinsics.at(n)->get_value().block(4, 0, 4, 1).transpose() << std::endl;
ss << "cam_" << n << "_extrinsic(0:3):" << std::endl << camera_extrinsics.at(n).block(0, 0, 4, 1).transpose() << std::endl;
ss << "cam_" << n << "_extrinsic(4:6):" << std::endl << camera_extrinsics.at(n).block(4, 0, 3, 1).transpose() << std::endl;
Eigen::Matrix4d T_CtoI = Eigen::Matrix4d::Identity();
T_CtoI.block(0, 0, 3, 3) = ov_core::quat_2_Rot(camera_extrinsics.at(n).block(0, 0, 4, 1)).transpose();
T_CtoI.block(0, 3, 3, 1) = -T_CtoI.block(0, 0, 3, 3) * camera_extrinsics.at(n).block(4, 0, 3, 1);
ss << "T_C" << n << "toI:" << std::endl << T_CtoI << std::endl << std::endl;
PRINT_DEBUG(ss.str().c_str());
}
}
// SIMULATOR ===============================
/// Seed for initial states (i.e. random feature 3d positions in the generated map)
int sim_seed_state_init = 0;
/// Seed for calibration perturbations. Change this to perturb by different random values if perturbations are enabled.
int sim_seed_preturb = 0;
/// Measurement noise seed. This should be incremented for each run in the Monte-Carlo simulation to generate the same true measurements,
/// but diffferent noise values.
int sim_seed_measurements = 0;
/// If we should perturb the calibration that the estimator starts with
bool sim_do_perturbation = false;
/// Path to the trajectory we will b-spline and simulate on. Should be time(s),pos(xyz),ori(xyzw) format.
std::string sim_traj_path = "../ov_data/sim/udel_gore.txt";
/// We will start simulating after we have moved this much along the b-spline. This prevents static starts as we init from groundtruth in
/// simulation.
double sim_distance_threshold = 1.2;
/// Frequency (Hz) that we will simulate our cameras
double sim_freq_cam = 10.0;
/// Frequency (Hz) that we will simulate our inertial measurement unit
double sim_freq_imu = 400.0;
/// Feature distance we generate features from (minimum)
double sim_min_feature_gen_distance = 5;
/// Feature distance we generate features from (maximum)
double sim_max_feature_gen_distance = 10;
/**
* @brief This function will load print out all simulated parameters.
* This allows for visual checking that everything was loaded properly from ROS/CMD parsers.
*
* @param parser If not null, this parser will be used to load our parameters
*/
void print_and_load_simulation(const std::shared_ptr<ov_core::YamlParser> &parser = nullptr) {
if (parser != nullptr) {
parser->parse_config("sim_seed_state_init", sim_seed_state_init);
parser->parse_config("sim_seed_preturb", sim_seed_preturb);
parser->parse_config("sim_seed_measurements", sim_seed_measurements);
parser->parse_config("sim_do_perturbation", sim_do_perturbation);
parser->parse_config("sim_traj_path", sim_traj_path);
parser->parse_config("sim_distance_threshold", sim_distance_threshold);
parser->parse_config("sim_freq_cam", sim_freq_cam);
parser->parse_config("sim_freq_imu", sim_freq_imu);
parser->parse_config("sim_min_feature_gen_dist", sim_min_feature_gen_distance);
parser->parse_config("sim_max_feature_gen_dist", sim_max_feature_gen_distance);
}
PRINT_DEBUG("SIMULATION PARAMETERS:\n");
PRINT_WARNING(BOLDRED " - state init seed: %d \n" RESET, sim_seed_state_init);
PRINT_WARNING(BOLDRED " - perturb seed: %d \n" RESET, sim_seed_preturb);
PRINT_WARNING(BOLDRED " - measurement seed: %d \n" RESET, sim_seed_measurements);
PRINT_WARNING(BOLDRED " - do perturb?: %d\n" RESET, sim_do_perturbation);
PRINT_DEBUG(" - traj path: %s\n", sim_traj_path.c_str());
PRINT_DEBUG(" - dist thresh: %.2f\n", sim_distance_threshold);
PRINT_DEBUG(" - cam feq: %.2f\n", sim_freq_cam);
PRINT_DEBUG(" - imu feq: %.2f\n", sim_freq_imu);
PRINT_DEBUG(" - min feat dist: %.2f\n", sim_min_feature_gen_distance);
PRINT_DEBUG(" - max feat dist: %.2f\n", sim_max_feature_gen_distance);
}
};
} // namespace ov_init
#endif // OV_INIT_INERTIALINITIALIZEROPTIONS_H

View File

@@ -0,0 +1,500 @@
/*
* OpenVINS: An Open Platform for Visual-Inertial Research
* Copyright (C) 2018-2022 Patrick Geneva
* Copyright (C) 2018-2022 Guoquan Huang
* Copyright (C) 2018-2022 OpenVINS Contributors
* Copyright (C) 2018-2019 Kevin Eckenhoff
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
#include "Simulator.h"
using namespace ov_core;
using namespace ov_init;
Simulator::Simulator(InertialInitializerOptions &params_) {
//===============================================================
//===============================================================
// Nice startup message
PRINT_DEBUG("=======================================\n");
PRINT_DEBUG("VISUAL-INERTIAL SIMULATOR STARTING\n");
PRINT_DEBUG("=======================================\n");
// Store a copy of our params
// NOTE: We need to explicitly create a copy of our shared pointers to the camera objects
// NOTE: Otherwise if we perturb it would also change our "true" parameters
this->params = params_;
params.camera_intrinsics.clear();
for (auto const &tmp : params_.camera_intrinsics) {
auto tmp_cast = std::dynamic_pointer_cast<ov_core::CamEqui>(tmp.second);
if (tmp_cast != nullptr) {
params.camera_intrinsics.insert({tmp.first, std::make_shared<ov_core::CamEqui>(tmp.second->w(), tmp.second->h())});
params.camera_intrinsics.at(tmp.first)->set_value(params_.camera_intrinsics.at(tmp.first)->get_value());
} else {
params.camera_intrinsics.insert({tmp.first, std::make_shared<ov_core::CamRadtan>(tmp.second->w(), tmp.second->h())});
params.camera_intrinsics.at(tmp.first)->set_value(params_.camera_intrinsics.at(tmp.first)->get_value());
}
}
// Load the groundtruth trajectory and its spline
DatasetReader::load_simulated_trajectory(params.sim_traj_path, traj_data);
spline.feed_trajectory(traj_data);
// Set all our timestamps as starting from the minimum spline time
timestamp = spline.get_start_time();
timestamp_last_imu = spline.get_start_time();
timestamp_last_cam = spline.get_start_time();
// Get the pose at the current timestep
Eigen::Matrix3d R_GtoI_init;
Eigen::Vector3d p_IinG_init;
bool success_pose_init = spline.get_pose(timestamp, R_GtoI_init, p_IinG_init);
if (!success_pose_init) {
PRINT_ERROR(RED "[SIM]: unable to find the first pose in the spline...\n" RESET);
std::exit(EXIT_FAILURE);
}
// Find the timestamp that we move enough to be considered "moved"
double distance = 0.0;
double distancethreshold = params.sim_distance_threshold;
while (true) {
// Get the pose at the current timestep
Eigen::Matrix3d R_GtoI;
Eigen::Vector3d p_IinG;
bool success_pose = spline.get_pose(timestamp, R_GtoI, p_IinG);
// Check if it fails
if (!success_pose) {
PRINT_ERROR(RED "[SIM]: unable to find jolt in the groundtruth data to initialize at\n" RESET);
std::exit(EXIT_FAILURE);
}
// Append to our scalar distance
distance += (p_IinG - p_IinG_init).norm();
p_IinG_init = p_IinG;
// Now check if we have an acceleration, else move forward in time
if (distance > distancethreshold) {
break;
} else {
timestamp += 1.0 / params.sim_freq_cam;
timestamp_last_imu += 1.0 / params.sim_freq_cam;
timestamp_last_cam += 1.0 / params.sim_freq_cam;
}
}
PRINT_DEBUG("[SIM]: moved %.3f seconds into the dataset where it starts moving\n", timestamp - spline.get_start_time());
// Append the current true bias to our history
hist_true_bias_time.push_back(timestamp_last_imu - 1.0 / params.sim_freq_imu);
hist_true_bias_accel.push_back(true_bias_accel);
hist_true_bias_gyro.push_back(true_bias_gyro);
hist_true_bias_time.push_back(timestamp_last_imu);
hist_true_bias_accel.push_back(true_bias_accel);
hist_true_bias_gyro.push_back(true_bias_gyro);
// Our simulation is running
is_running = true;
//===============================================================
//===============================================================
// Load the seeds for the random number generators
gen_state_init = std::mt19937(params.sim_seed_state_init);
gen_state_init.seed(params.sim_seed_state_init);
gen_state_perturb = std::mt19937(params.sim_seed_preturb);
gen_state_perturb.seed(params.sim_seed_preturb);
gen_meas_imu = std::mt19937(params.sim_seed_measurements);
gen_meas_imu.seed(params.sim_seed_measurements);
// Create generator for our camera
for (int i = 0; i < params.num_cameras; i++) {
gen_meas_cams.push_back(std::mt19937(params.sim_seed_measurements));
gen_meas_cams.at(i).seed(params.sim_seed_measurements);
}
//===============================================================
//===============================================================
// Perturb all calibration if we should
if (params.sim_do_perturbation) {
// Do the perturbation
perturb_parameters(params_);
// Debug print simulation parameters
params.print_and_load_noise();
params.print_and_load_state();
params.print_and_load_simulation();
}
//===============================================================
//===============================================================
// We will create synthetic camera frames and ensure that each has enough features
// double dt = 0.25/freq_cam;
double dt = 0.25;
size_t mapsize = featmap.size();
PRINT_DEBUG("[SIM]: Generating map features at %d rate\n", (int)(1.0 / dt));
// Loop through each camera
// NOTE: we loop through cameras here so that the feature map for camera 1 will always be the same
// NOTE: thus when we add more cameras the first camera should get the same measurements
for (int i = 0; i < params.num_cameras; i++) {
// Reset the start time
double time_synth = spline.get_start_time();
// Loop through each pose and generate our feature map in them!!!!
while (true) {
// Get the pose at the current timestep
Eigen::Matrix3d R_GtoI;
Eigen::Vector3d p_IinG;
bool success_pose = spline.get_pose(time_synth, R_GtoI, p_IinG);
// We have finished generating features
if (!success_pose)
break;
// Get the uv features for this frame
std::vector<std::pair<size_t, Eigen::VectorXf>> uvs = project_pointcloud(R_GtoI, p_IinG, i, featmap);
// If we do not have enough, generate more
if ((int)uvs.size() < params.init_max_features) {
generate_points(R_GtoI, p_IinG, i, featmap, params.init_max_features - (int)uvs.size());
}
// Move forward in time
time_synth += dt;
}
// Debug print
PRINT_DEBUG("[SIM]: Generated %d map features in total over %d frames (camera %d)\n", (int)(featmap.size() - mapsize),
(int)((time_synth - spline.get_start_time()) / dt), i);
mapsize = featmap.size();
}
// Nice sleep so the user can look at the printout
sleep(1);
}
void Simulator::perturb_parameters(InertialInitializerOptions &params_) {
// One std generator
std::normal_distribution<double> w(0, 1);
// Perturb our bias
true_bias_accel(0) = 0.08 * w(gen_state_perturb);
true_bias_accel(1) = 0.08 * w(gen_state_perturb);
true_bias_accel(2) = 0.08 * w(gen_state_perturb);
true_bias_gyro(0) = 0.01 * w(gen_state_perturb);
true_bias_gyro(1) = 0.01 * w(gen_state_perturb);
true_bias_gyro(2) = 0.01 * w(gen_state_perturb);
// Camera IMU offset
// params_.calib_camimu_dt = 0.005 * w(gen_state_perturb) + params.calib_camimu_dt;
// Camera intrinsics and extrinsics
for (int i = 0; i < params_.num_cameras; i++) {
// Camera intrinsic properties (k1, k2, p1, p2, r1, r2, r3, r4)
// Eigen::MatrixXd intrinsics = params_.camera_intrinsics.at(i)->get_value();
// for (int r = 0; r < 4; r++) {
// intrinsics(r) += 1.0 * w(gen_state);
//}
// for (int r = 4; r < 8; r++) {
// intrinsics(r) += 0.005 * w(gen_state);
//}
// params_.camera_intrinsics.at(i)->set_value(intrinsics);
// Our camera extrinsics transform (orientation)
// Eigen::Vector3d w_vec;
// w_vec << 0.001 * w(gen_state_perturb), 0.001 * w(gen_state_perturb), 0.001 * w(gen_state_perturb);
// params_.camera_extrinsics.at(i).block(0, 0, 4, 1) =
// rot_2_quat(exp_so3(w_vec) * quat_2_Rot(params.camera_extrinsics.at(i).block(0, 0, 4, 1)));
// Our camera extrinsics transform (position)
// for (int r = 4; r < 7; r++) {
// params_.camera_extrinsics.at(i)(r) = 0.005 * w(gen_state_perturb) + params.camera_extrinsics.at(i)(r);
// }
}
}
bool Simulator::get_state(double desired_time, Eigen::Matrix<double, 17, 1> &imustate) {
// Set to default state
imustate.setZero();
imustate(4) = 1;
// Current state values
Eigen::Matrix3d R_GtoI;
Eigen::Vector3d p_IinG, w_IinI, v_IinG;
// Get the pose, velocity, and acceleration
bool success_vel = spline.get_velocity(desired_time, R_GtoI, p_IinG, w_IinI, v_IinG);
// Find the bounding bias values
bool success_bias = false;
size_t id_loc = 0;
for (size_t i = 0; i < hist_true_bias_time.size() - 1; i++) {
if (hist_true_bias_time.at(i) < desired_time && hist_true_bias_time.at(i + 1) >= desired_time) {
id_loc = i;
success_bias = true;
break;
}
}
// If failed, then that means we don't have any more spline or bias
if (!success_vel || !success_bias) {
return false;
}
// Interpolate our biases (they will be at every IMU timestep)
double lambda = (desired_time - hist_true_bias_time.at(id_loc)) / (hist_true_bias_time.at(id_loc + 1) - hist_true_bias_time.at(id_loc));
Eigen::Vector3d true_bg_interp = (1 - lambda) * hist_true_bias_gyro.at(id_loc) + lambda * hist_true_bias_gyro.at(id_loc + 1);
Eigen::Vector3d true_ba_interp = (1 - lambda) * hist_true_bias_accel.at(id_loc) + lambda * hist_true_bias_accel.at(id_loc + 1);
// Finally lets create the current state
imustate(0, 0) = desired_time;
imustate.block(1, 0, 4, 1) = rot_2_quat(R_GtoI);
imustate.block(5, 0, 3, 1) = p_IinG;
imustate.block(8, 0, 3, 1) = v_IinG;
imustate.block(11, 0, 3, 1) = true_bg_interp;
imustate.block(14, 0, 3, 1) = true_ba_interp;
return true;
}
bool Simulator::get_next_imu(double &time_imu, Eigen::Vector3d &wm, Eigen::Vector3d &am) {
// Return if the camera measurement should go before us
if (timestamp_last_cam + 1.0 / params.sim_freq_cam < timestamp_last_imu + 1.0 / params.sim_freq_imu)
return false;
// Else lets do a new measurement!!!
timestamp_last_imu += 1.0 / params.sim_freq_imu;
timestamp = timestamp_last_imu;
time_imu = timestamp_last_imu;
// Current state values
Eigen::Matrix3d R_GtoI;
Eigen::Vector3d p_IinG, w_IinI, v_IinG, alpha_IinI, a_IinG;
// Get the pose, velocity, and acceleration
// NOTE: we get the acceleration between our two IMU
// NOTE: this is because we are using a constant measurement model for integration
// bool success_accel = spline.get_acceleration(timestamp+0.5/freq_imu, R_GtoI, p_IinG, w_IinI, v_IinG, alpha_IinI, a_IinG);
bool success_accel = spline.get_acceleration(timestamp, R_GtoI, p_IinG, w_IinI, v_IinG, alpha_IinI, a_IinG);
// If failed, then that means we don't have any more spline
// Thus we should stop the simulation
if (!success_accel) {
is_running = false;
return false;
}
// Transform omega and linear acceleration into imu frame
Eigen::Vector3d omega_inI = w_IinI;
Eigen::Vector3d gravity;
gravity << 0.0, 0.0, params.gravity_mag;
Eigen::Vector3d accel_inI = R_GtoI * (a_IinG + gravity);
// Now add noise to these measurements
double dt = 1.0 / params.sim_freq_imu;
std::normal_distribution<double> w(0, 1);
wm(0) = omega_inI(0) + true_bias_gyro(0) + params.sigma_w / std::sqrt(dt) * w(gen_meas_imu);
wm(1) = omega_inI(1) + true_bias_gyro(1) + params.sigma_w / std::sqrt(dt) * w(gen_meas_imu);
wm(2) = omega_inI(2) + true_bias_gyro(2) + params.sigma_w / std::sqrt(dt) * w(gen_meas_imu);
am(0) = accel_inI(0) + true_bias_accel(0) + params.sigma_a / std::sqrt(dt) * w(gen_meas_imu);
am(1) = accel_inI(1) + true_bias_accel(1) + params.sigma_a / std::sqrt(dt) * w(gen_meas_imu);
am(2) = accel_inI(2) + true_bias_accel(2) + params.sigma_a / std::sqrt(dt) * w(gen_meas_imu);
// Move the biases forward in time
true_bias_gyro(0) += params.sigma_wb * std::sqrt(dt) * w(gen_meas_imu);
true_bias_gyro(1) += params.sigma_wb * std::sqrt(dt) * w(gen_meas_imu);
true_bias_gyro(2) += params.sigma_wb * std::sqrt(dt) * w(gen_meas_imu);
true_bias_accel(0) += params.sigma_ab * std::sqrt(dt) * w(gen_meas_imu);
true_bias_accel(1) += params.sigma_ab * std::sqrt(dt) * w(gen_meas_imu);
true_bias_accel(2) += params.sigma_ab * std::sqrt(dt) * w(gen_meas_imu);
// Append the current true bias to our history
hist_true_bias_time.push_back(timestamp_last_imu);
hist_true_bias_gyro.push_back(true_bias_gyro);
hist_true_bias_accel.push_back(true_bias_accel);
// Return success
return true;
}
bool Simulator::get_next_cam(double &time_cam, std::vector<int> &camids,
std::vector<std::vector<std::pair<size_t, Eigen::VectorXf>>> &feats) {
// Return if the imu measurement should go before us
if (timestamp_last_imu + 1.0 / params.sim_freq_imu < timestamp_last_cam + 1.0 / params.sim_freq_cam)
return false;
// Else lets do a new measurement!!!
timestamp_last_cam += 1.0 / params.sim_freq_cam;
timestamp = timestamp_last_cam;
time_cam = timestamp_last_cam - params.calib_camimu_dt;
// Get the pose at the current timestep
Eigen::Matrix3d R_GtoI;
Eigen::Vector3d p_IinG;
bool success_pose = spline.get_pose(timestamp, R_GtoI, p_IinG);
// We have finished generating measurements
if (!success_pose) {
is_running = false;
return false;
}
// Loop through each camera
for (int i = 0; i < params.num_cameras; i++) {
// Get the uv features for this frame
std::vector<std::pair<size_t, Eigen::VectorXf>> uvs = project_pointcloud(R_GtoI, p_IinG, i, featmap);
// If we do not have enough, generate more
if ((int)uvs.size() < params.init_max_features) {
PRINT_WARNING(YELLOW "[SIM]: cam %d was unable to generate enough features (%d < %d projections)\n" RESET, (int)i, (int)uvs.size(),
params.init_max_features);
}
// If greater than only select the first set
if ((int)uvs.size() > params.init_max_features) {
uvs.erase(uvs.begin() + params.init_max_features, uvs.end());
}
// Append the map size so all cameras have unique features in them (but the same map)
// Only do this if we are not enforcing stereo constraints between all our cameras
for (size_t f = 0; f < uvs.size() && !params.use_stereo; f++) {
uvs.at(f).first += i * featmap.size();
}
// Loop through and add noise to each uv measurement
std::normal_distribution<double> w(0, 1);
for (size_t j = 0; j < uvs.size(); j++) {
uvs.at(j).second(0) += params.sigma_pix * w(gen_meas_cams.at(i));
uvs.at(j).second(1) += params.sigma_pix * w(gen_meas_cams.at(i));
}
// Push back for this camera
feats.push_back(uvs);
camids.push_back(i);
}
// Return success
return true;
}
std::vector<std::pair<size_t, Eigen::VectorXf>> Simulator::project_pointcloud(const Eigen::Matrix3d &R_GtoI, const Eigen::Vector3d &p_IinG,
int camid,
const std::unordered_map<size_t, Eigen::Vector3d> &feats) {
// Assert we have good camera
assert(camid < params.num_cameras);
assert((int)params.camera_intrinsics.size() == params.num_cameras);
assert((int)params.camera_extrinsics.size() == params.num_cameras);
// Grab our extrinsic and intrinsic values
Eigen::Matrix<double, 3, 3> R_ItoC = quat_2_Rot(params.camera_extrinsics.at(camid).block(0, 0, 4, 1));
Eigen::Matrix<double, 3, 1> p_IinC = params.camera_extrinsics.at(camid).block(4, 0, 3, 1);
std::shared_ptr<ov_core::CamBase> camera = params.camera_intrinsics.at(camid);
// Our projected uv true measurements
std::vector<std::pair<size_t, Eigen::VectorXf>> uvs;
// Loop through our map
for (const auto &feat : feats) {
// Transform feature into current camera frame
Eigen::Vector3d p_FinI = R_GtoI * (feat.second - p_IinG);
Eigen::Vector3d p_FinC = R_ItoC * p_FinI + p_IinC;
// Skip cloud if too far away
if (p_FinC(2) > params.sim_max_feature_gen_distance || p_FinC(2) < 0.1)
continue;
// Project to normalized coordinates
Eigen::Vector2f uv_norm;
uv_norm << (float)(p_FinC(0) / p_FinC(2)), (float)(p_FinC(1) / p_FinC(2));
// Distort the normalized coordinates
Eigen::Vector2f uv_dist;
uv_dist = camera->distort_f(uv_norm);
// Check that it is inside our bounds
if (uv_dist(0) < 0 || uv_dist(0) > camera->w() || uv_dist(1) < 0 || uv_dist(1) > camera->h()) {
continue;
}
// Else we can add this as a good projection
uvs.push_back({feat.first, uv_dist});
}
// Return our projections
return uvs;
}
void Simulator::generate_points(const Eigen::Matrix3d &R_GtoI, const Eigen::Vector3d &p_IinG, int camid,
std::unordered_map<size_t, Eigen::Vector3d> &feats, int numpts) {
// Assert we have good camera
assert(camid < params.num_cameras);
assert((int)params.camera_intrinsics.size() == params.num_cameras);
assert((int)params.camera_extrinsics.size() == params.num_cameras);
// Grab our extrinsic and intrinsic values
Eigen::Matrix<double, 3, 3> R_ItoC = quat_2_Rot(params.camera_extrinsics.at(camid).block(0, 0, 4, 1));
Eigen::Matrix<double, 3, 1> p_IinC = params.camera_extrinsics.at(camid).block(4, 0, 3, 1);
std::shared_ptr<ov_core::CamBase> camera = params.camera_intrinsics.at(camid);
// Generate the desired number of features
for (int i = 0; i < numpts; i++) {
// Uniformly randomly generate within our fov
std::uniform_real_distribution<double> gen_u(0, camera->w());
std::uniform_real_distribution<double> gen_v(0, camera->h());
double u_dist = gen_u(gen_state_init);
double v_dist = gen_v(gen_state_init);
// Convert to opencv format
cv::Point2f uv_dist((float)u_dist, (float)v_dist);
// Undistort this point to our normalized coordinates
cv::Point2f uv_norm;
uv_norm = camera->undistort_cv(uv_dist);
// Generate a random depth
std::uniform_real_distribution<double> gen_depth(params.sim_min_feature_gen_distance, params.sim_max_feature_gen_distance);
double depth = gen_depth(gen_state_init);
// Get the 3d point
Eigen::Vector3d bearing;
bearing << uv_norm.x, uv_norm.y, 1;
Eigen::Vector3d p_FinC;
p_FinC = depth * bearing;
// Move to the global frame of reference
Eigen::Vector3d p_FinI = R_ItoC.transpose() * (p_FinC - p_IinC);
Eigen::Vector3d p_FinG = R_GtoI.transpose() * p_FinI + p_IinG;
// Append this as a new feature
featmap.insert({id_map, p_FinG});
id_map++;
}
}

208
ov_init/src/sim/Simulator.h Normal file
View File

@@ -0,0 +1,208 @@
/*
* OpenVINS: An Open Platform for Visual-Inertial Research
* Copyright (C) 2018-2022 Patrick Geneva
* Copyright (C) 2018-2022 Guoquan Huang
* Copyright (C) 2018-2022 OpenVINS Contributors
* Copyright (C) 2018-2019 Kevin Eckenhoff
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
#ifndef OV_INIT_SIMULATOR_H
#define OV_INIT_SIMULATOR_H
#include <Eigen/Eigen>
#include <fstream>
#include <opencv2/core/core.hpp>
#include <random>
#include <sstream>
#include <string>
#include <unordered_map>
#include "cam/CamBase.h"
#include "cam/CamEqui.h"
#include "cam/CamRadtan.h"
#include "sim/BsplineSE3.h"
#include "utils/colors.h"
#include "utils/dataset_reader.h"
#include "init/InertialInitializerOptions.h"
namespace ov_init {
/**
* @brief Master simulator class that generated visual-inertial measurements
*
* Given a trajectory this will generate a SE(3) @ref ov_core::BsplineSE3 for that trajectory.
* This allows us to get the inertial measurement information at each timestep during this trajectory.
* After creating the bspline we will generate an environmental feature map which will be used as our feature measurements.
* This map will be projected into the frame at each timestep to get our "raw" uv measurements.
* We inject bias and white noises into our inertial readings while adding our white noise to the uv measurements also.
* The user should specify the sensor rates that they desire along with the seeds of the random number generators.
*
*/
class Simulator {
public:
/**
* @brief Default constructor, will load all configuration variables
* @param params_ InertialInitializer parameters. Should have already been loaded from cmd.
*/
Simulator(InertialInitializerOptions &params_);
/**
* @brief Will get a set of perturbed parameters
* @param params_ Parameters we will perturb
*/
void perturb_parameters(InertialInitializerOptions &params_);
/**
* @brief Returns if we are actively simulating
* @return True if we still have simulation data
*/
bool ok() { return is_running; }
/**
* @brief Gets the timestamp we have simulated up too
* @return Timestamp
*/
double current_timestamp() { return timestamp; }
/**
* @brief Get the simulation state at a specified timestep
* @param desired_time Timestamp we want to get the state at
* @param imustate State in the MSCKF ordering: [time(sec),q_GtoI,p_IinG,v_IinG,b_gyro,b_accel]
* @return True if we have a state
*/
bool get_state(double desired_time, Eigen::Matrix<double, 17, 1> &imustate);
/**
* @brief Gets the next inertial reading if we have one.
* @param time_imu Time that this measurement occured at
* @param wm Angular velocity measurement in the inertial frame
* @param am Linear velocity in the inertial frame
* @return True if we have a measurement
*/
bool get_next_imu(double &time_imu, Eigen::Vector3d &wm, Eigen::Vector3d &am);
/**
* @brief Gets the next inertial reading if we have one.
* @param time_cam Time that this measurement occured at
* @param camids Camera ids that the corresponding vectors match
* @param feats Noisy uv measurements and ids for the returned time
* @return True if we have a measurement
*/
bool get_next_cam(double &time_cam, std::vector<int> &camids, std::vector<std::vector<std::pair<size_t, Eigen::VectorXf>>> &feats);
/// Returns the true 3d map of features
std::unordered_map<size_t, Eigen::Vector3d> get_map() { return featmap; }
/// Returns the true 3d map of features
std::vector<Eigen::Vector3d> get_map_vec() {
std::vector<Eigen::Vector3d> feats;
for (auto const &feat : featmap)
feats.push_back(feat.second);
return feats;
}
/// Access function to get the true parameters (i.e. calibration and settings)
InertialInitializerOptions get_true_parameters() { return params; }
protected:
/**
* @brief Projects the passed map features into the desired camera frame.
* @param R_GtoI Orientation of the IMU pose
* @param p_IinG Position of the IMU pose
* @param camid Camera id of the camera sensor we want to project into
* @param feats Our set of 3d features
* @return True distorted raw image measurements and their ids for the specified camera
*/
std::vector<std::pair<size_t, Eigen::VectorXf>> project_pointcloud(const Eigen::Matrix3d &R_GtoI, const Eigen::Vector3d &p_IinG,
int camid, const std::unordered_map<size_t, Eigen::Vector3d> &feats);
/**
* @brief Will generate points in the fov of the specified camera
* @param R_GtoI Orientation of the IMU pose
* @param p_IinG Position of the IMU pose
* @param camid Camera id of the camera sensor we want to project into
* @param[out] feats Map we will append new features to
* @param numpts Number of points we should generate
*/
void generate_points(const Eigen::Matrix3d &R_GtoI, const Eigen::Vector3d &p_IinG, int camid,
std::unordered_map<size_t, Eigen::Vector3d> &feats, int numpts);
//===================================================================
// Configuration variables
//===================================================================
/// True params (a copy of the parsed ones)
InertialInitializerOptions params;
//===================================================================
// State related variables
//===================================================================
/// Our loaded trajectory data (timestamp(s), q_GtoI, p_IinG)
std::vector<Eigen::VectorXd> traj_data;
/// Our b-spline trajectory
ov_core::BsplineSE3 spline;
/// Our map of 3d features
size_t id_map = 0;
std::unordered_map<size_t, Eigen::Vector3d> featmap;
/// Mersenne twister PRNG for measurements (IMU)
std::mt19937 gen_meas_imu;
/// Mersenne twister PRNG for measurements (CAMERAS)
std::vector<std::mt19937> gen_meas_cams;
/// Mersenne twister PRNG for state initialization
std::mt19937 gen_state_init;
/// Mersenne twister PRNG for state perturbations
std::mt19937 gen_state_perturb;
/// If our simulation is running
bool is_running;
//===================================================================
// Simulation specific variables
//===================================================================
/// Current timestamp of the system
double timestamp;
/// Last time we had an IMU reading
double timestamp_last_imu;
/// Last time we had an CAMERA reading
double timestamp_last_cam;
/// Our running acceleration bias
Eigen::Vector3d true_bias_accel = Eigen::Vector3d::Zero();
/// Our running gyroscope bias
Eigen::Vector3d true_bias_gyro = Eigen::Vector3d::Zero();
// Our history of true biases
std::vector<double> hist_true_bias_time;
std::vector<Eigen::Vector3d> hist_true_bias_accel;
std::vector<Eigen::Vector3d> hist_true_bias_gyro;
};
} // namespace ov_init
#endif // OV_INIT_SIMULATOR_H

View File

@@ -0,0 +1,157 @@
/*
* OpenVINS: An Open Platform for Visual-Inertial Research
* Copyright (C) 2018-2022 Patrick Geneva
* Copyright (C) 2018-2022 Guoquan Huang
* Copyright (C) 2018-2022 OpenVINS Contributors
* Copyright (C) 2018-2019 Kevin Eckenhoff
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
#include "StaticInitializer.h"
using namespace ov_core;
using namespace ov_type;
using namespace ov_init;
bool StaticInitializer::initialize(double &timestamp, Eigen::MatrixXd &covariance, std::vector<std::shared_ptr<Type>> &order,
std::shared_ptr<IMU> t_imu, bool wait_for_jerk) {
// Return if we don't have any measurements
if (imu_data->size() < 2) {
return false;
}
// Newest and oldest imu timestamp
double newesttime = imu_data->at(imu_data->size() - 1).timestamp;
double oldesttime = imu_data->at(0).timestamp;
// Return if we don't have enough for two windows
if (newesttime - oldesttime < params.init_window_time) {
PRINT_DEBUG(YELLOW "[init-s]: unable to select window of IMU readings, not enough readings\n" RESET);
return false;
}
// First lets collect a window of IMU readings from the newest measurement to the oldest
std::vector<ImuData> window_1to0, window_2to1;
for (const ImuData &data : *imu_data) {
if (data.timestamp > newesttime - 0.5 * params.init_window_time && data.timestamp <= newesttime - 0.0 * params.init_window_time) {
window_1to0.push_back(data);
}
if (data.timestamp > newesttime - 1.0 * params.init_window_time && data.timestamp <= newesttime - 0.5 * params.init_window_time) {
window_2to1.push_back(data);
}
}
// Return if both of these failed
if (window_1to0.size() < 2 || window_2to1.size() < 2) {
PRINT_DEBUG(YELLOW "[init-s]: unable to select window of IMU readings, not enough readings\n" RESET);
return false;
}
// Calculate the sample variance for the newest window from 1 to 0
Eigen::Vector3d a_avg_1to0 = Eigen::Vector3d::Zero();
for (const ImuData &data : window_1to0) {
a_avg_1to0 += data.am;
}
a_avg_1to0 /= (int)window_1to0.size();
double a_var_1to0 = 0;
for (const ImuData &data : window_1to0) {
a_var_1to0 += (data.am - a_avg_1to0).dot(data.am - a_avg_1to0);
}
a_var_1to0 = std::sqrt(a_var_1to0 / ((int)window_1to0.size() - 1));
// Calculate the sample variance for the second newest window from 2 to 1
Eigen::Vector3d a_avg_2to1 = Eigen::Vector3d::Zero();
Eigen::Vector3d w_avg_2to1 = Eigen::Vector3d::Zero();
for (const ImuData &data : window_2to1) {
a_avg_2to1 += data.am;
w_avg_2to1 += data.wm;
}
a_avg_2to1 = a_avg_2to1 / window_2to1.size();
w_avg_2to1 = w_avg_2to1 / window_2to1.size();
double a_var_2to1 = 0;
for (const ImuData &data : window_2to1) {
a_var_2to1 += (data.am - a_avg_2to1).dot(data.am - a_avg_2to1);
}
a_var_2to1 = std::sqrt(a_var_2to1 / ((int)window_2to1.size() - 1));
// PRINT_DEBUG(BOLDGREEN "[init-s]: IMU excitation, %.4f,%.4f\n" RESET, a_var_1to0, a_var_2to1);
// If it is below the threshold and we want to wait till we detect a jerk
if (a_var_1to0 < params.init_imu_thresh && wait_for_jerk) {
PRINT_DEBUG(YELLOW "[init-s]: no IMU excitation, below threshold %.4f < %.4f\n" RESET, a_var_1to0, params.init_imu_thresh);
return false;
}
// We should also check that the old state was below the threshold!
// This is the case when we have started up moving, and thus we need to wait for a period of stationary motion
if (a_var_2to1 > params.init_imu_thresh && wait_for_jerk) {
PRINT_DEBUG(YELLOW "[init-s]: to much IMU excitation, above threshold %.4f > %.4f\n" RESET, a_var_2to1, params.init_imu_thresh);
return false;
}
// If it is above the threshold and we are not waiting for a jerk
// Then we are not stationary (i.e. moving) so we should wait till we are
if ((a_var_1to0 > params.init_imu_thresh || a_var_2to1 > params.init_imu_thresh) && !wait_for_jerk) {
PRINT_DEBUG(YELLOW "[init-s]: to much IMU excitation, above threshold %.4f,%.4f > %.4f\n" RESET, a_var_1to0, a_var_2to1,
params.init_imu_thresh);
return false;
}
// Get rotation with z axis aligned with -g (z_in_G=0,0,1)
Eigen::Vector3d z_axis = a_avg_2to1 / a_avg_2to1.norm();
Eigen::Matrix3d Ro;
InitializerHelper::gram_schmidt(z_axis, Ro);
Eigen::Vector4d q_GtoI = rot_2_quat(Ro);
// Set our biases equal to our noise (subtract our gravity from accelerometer bias)
Eigen::Vector3d gravity_inG;
gravity_inG << 0.0, 0.0, params.gravity_mag;
Eigen::Vector3d bg = w_avg_2to1;
Eigen::Vector3d ba = a_avg_2to1 - quat_2_Rot(q_GtoI) * gravity_inG;
// Set our state variables
timestamp = window_2to1.at(window_2to1.size() - 1).timestamp;
Eigen::VectorXd imu_state = Eigen::VectorXd::Zero(16);
imu_state.block(0, 0, 4, 1) = q_GtoI;
imu_state.block(10, 0, 3, 1) = bg;
imu_state.block(13, 0, 3, 1) = ba;
assert(t_imu != nullptr);
t_imu->set_value(imu_state);
t_imu->set_fej(imu_state);
// Create base covariance and its covariance ordering
order.clear();
order.push_back(t_imu);
covariance = 1e-3 * Eigen::MatrixXd::Identity(t_imu->size(), t_imu->size());
// Make velocity uncertainty a bit bigger
covariance.block(t_imu->v()->id(), t_imu->v()->id(), 3, 3) *= 2;
// A VIO system has 4dof unobservabile directions which can be arbitrarily picked.
// This means that on startup, we can fix the yaw and position to be 100 percent known.
// Thus, after determining the global to current IMU orientation after initialization, we can propagate the global error
// into the new IMU pose. In this case the position is directly equivalent, but the orientation needs to be propagated.
covariance(t_imu->q()->id() + 2, t_imu->q()->id() + 2) = 0.0;
covariance.block(t_imu->p()->id(), t_imu->p()->id(), 3, 3).setZero();
// Propagate into the current local IMU frame
// R_GtoI = R_GtoI*R_GtoG -> H = R_GtoI
Eigen::Matrix3d R_GtoI = quat_2_Rot(q_GtoI);
covariance.block(t_imu->q()->id(), t_imu->q()->id(), 3, 3) =
R_GtoI * covariance.block(t_imu->q()->id(), t_imu->q()->id(), 3, 3) * R_GtoI.transpose();
// Return :D
return true;
}

View File

@@ -0,0 +1,96 @@
/*
* OpenVINS: An Open Platform for Visual-Inertial Research
* Copyright (C) 2018-2022 Patrick Geneva
* Copyright (C) 2018-2022 Guoquan Huang
* Copyright (C) 2018-2022 OpenVINS Contributors
* Copyright (C) 2018-2019 Kevin Eckenhoff
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
#ifndef OV_INIT_STATICINITIALIZER_H
#define OV_INIT_STATICINITIALIZER_H
#include "init/InertialInitializerOptions.h"
#include "utils/helper.h"
#include "feat/FeatureHelper.h"
#include "types/IMU.h"
#include "utils/colors.h"
#include "utils/print.h"
#include "utils/quat_ops.h"
#include "utils/sensor_data.h"
namespace ov_init {
/**
* @brief Initializer for a static visual-inertial system.
*
* This implementation that assumes that the imu starts from standing still.
* To initialize from standstill:
* 1. Collect all inertial measurements
* 2. See if within the last window there was a jump in acceleration
* 3. If the jump is past our threshold we should init (i.e. we have started moving)
* 4. Use the *previous* window, which should have been stationary to initialize orientation
* 5. Return a roll and pitch aligned with gravity and biases.
*
*/
class StaticInitializer {
public:
/**
* @brief Default constructor
* @param params_ Parameters loaded from either ROS or CMDLINE
* @param db Feature tracker database with all features in it
* @param imu_data_ Shared pointer to our IMU vector of historical information
*/
explicit StaticInitializer(InertialInitializerOptions &params_, std::shared_ptr<ov_core::FeatureDatabase> db,
std::shared_ptr<std::vector<ov_core::ImuData>> imu_data_)
: params(params_), _db(db), imu_data(imu_data_) {}
/**
* @brief Try to get the initialized system using just the imu
*
* This will check if we have had a large enough jump in our acceleration.
* If we have then we will use the period of time before this jump to initialize the state.
* This assumes that our imu is sitting still and is not moving (so this would fail if we are experiencing constant acceleration).
*
* In the case that we do not wait for a jump (i.e. `wait_for_jerk` is false), then the system will try to initialize as soon as possible.
* This is only recommended if you have zero velocity update enabled to handle the stationary cases.
* To initialize in this case, we need to have the average angular variance be below the set threshold (i.e. we need to be stationary).
*
* @param[out] timestamp Timestamp we have initialized the state at
* @param[out] covariance Calculated covariance of the returned state
* @param[out] order Order of the covariance matrix
* @param[out] t_imu Our imu type element
* @param wait_for_jerk If true we will wait for a "jerk"
* @return True if we have successfully initialized our system
*/
bool initialize(double &timestamp, Eigen::MatrixXd &covariance, std::vector<std::shared_ptr<ov_type::Type>> &order,
std::shared_ptr<ov_type::IMU> t_imu, bool wait_for_jerk = true);
private:
/// Initialization parameters
InertialInitializerOptions params;
/// Feature tracker database with all features in it
std::shared_ptr<ov_core::FeatureDatabase> _db;
/// Our history of IMU messages (time, angular, linear)
std::shared_ptr<std::vector<ov_core::ImuData>> imu_data;
};
} // namespace ov_init
#endif // OV_INIT_STATICINITIALIZER_H

View File

@@ -0,0 +1,355 @@
/*
* OpenVINS: An Open Platform for Visual-Inertial Research
* Copyright (C) 2018-2022 Patrick Geneva
* Copyright (C) 2018-2022 Guoquan Huang
* Copyright (C) 2018-2022 OpenVINS Contributors
* Copyright (C) 2018-2019 Kevin Eckenhoff
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
#include <cmath>
#include <csignal>
#include <deque>
#include <iomanip>
#include <sstream>
#include <unistd.h>
#include <vector>
#include <boost/date_time/posix_time/posix_time.hpp>
#include <boost/filesystem.hpp>
#if ROS_AVAILABLE == 1
#include <nav_msgs/Path.h>
#include <ros/ros.h>
#include <sensor_msgs/PointCloud.h>
#include <sensor_msgs/PointCloud2.h>
#include <sensor_msgs/point_cloud2_iterator.h>
#endif
#include "dynamic/DynamicInitializer.h"
#include "init/InertialInitializerOptions.h"
#include "sim/Simulator.h"
#include "track/TrackSIM.h"
#include "utils/colors.h"
#include "utils/sensor_data.h"
using namespace ov_init;
// Define the function to be called when ctrl-c (SIGINT) is sent to process
void signal_callback_handler(int signum) { std::exit(signum); }
// taken from ov_eval/src/alignment/AlignUtils.h
static inline double get_best_yaw(const Eigen::Matrix<double, 3, 3> &C) {
double A = C(0, 1) - C(1, 0);
double B = C(0, 0) + C(1, 1);
// return M_PI_2 - atan2(B, A);
return atan2(A, B);
}
// taken from ov_eval/src/alignment/AlignUtils.h
void align_posyaw_single(const Eigen::Vector4d &q_es_0, const Eigen::Vector3d &p_es_0, const Eigen::Vector4d &q_gt_0,
const Eigen::Vector3d &p_gt_0, Eigen::Matrix3d &R, Eigen::Vector3d &t) {
Eigen::Matrix3d g_rot = ov_core::quat_2_Rot(q_gt_0).transpose();
Eigen::Matrix3d est_rot = ov_core::quat_2_Rot(q_es_0).transpose();
Eigen::Matrix3d C_R = est_rot * g_rot.transpose();
double theta = get_best_yaw(C_R);
R = ov_core::rot_z(theta);
t.noalias() = p_gt_0 - R * p_es_0;
}
// Main function
int main(int argc, char **argv) {
// Ensure we have a path, if the user passes it then we should use it
std::string config_path = "unset_path_to_config.yaml";
if (argc > 1) {
config_path = argv[1];
}
#if ROS_AVAILABLE == 1
// Launch our ros node
ros::init(argc, argv, "test_dynamic_init");
auto nh = std::make_shared<ros::NodeHandle>("~");
nh->param<std::string>("config_path", config_path, config_path);
// Topics to publish
auto pub_pathimu = nh->advertise<nav_msgs::Path>("/ov_msckf/pathimu", 2);
PRINT_DEBUG("Publishing: %s\n", pub_pathimu.getTopic().c_str());
auto pub_pathgt = nh->advertise<nav_msgs::Path>("/ov_msckf/pathgt", 2);
PRINT_DEBUG("Publishing: %s\n", pub_pathgt.getTopic().c_str());
auto pub_loop_point = nh->advertise<sensor_msgs::PointCloud>("/ov_msckf/loop_feats", 2);
PRINT_DEBUG("Publishing: %s\n", pub_loop_point.getTopic().c_str());
auto pub_points_sim = nh->advertise<sensor_msgs::PointCloud2>("/ov_msckf/points_sim", 2);
PRINT_DEBUG("Publishing: %s\n", pub_points_sim.getTopic().c_str());
#endif
// Load the config
auto parser = std::make_shared<ov_core::YamlParser>(config_path);
#if ROS_AVAILABLE == 1
parser->set_node_handler(nh);
#endif
// Verbosity
std::string verbosity = "INFO";
parser->parse_config("verbosity", verbosity);
ov_core::Printer::setPrintLevel(verbosity);
// Create the simulator
InertialInitializerOptions params;
params.print_and_load(parser);
params.print_and_load_simulation(parser);
if (!parser->successful()) {
PRINT_ERROR(RED "unable to parse all parameters, please fix\n" RESET);
std::exit(EXIT_FAILURE);
}
Simulator sim(params);
// Our initialization class objects
auto imu_readings = std::make_shared<std::vector<ov_core::ImuData>>();
auto tracker = std::make_shared<ov_core::TrackSIM>(params.camera_intrinsics, 0);
auto initializer = std::make_shared<DynamicInitializer>(params, tracker->get_feature_database(), imu_readings);
//===================================================================================
//===================================================================================
//===================================================================================
// Buffer our camera image
double buffer_timecam = -1;
std::vector<int> buffer_camids;
std::vector<std::vector<std::pair<size_t, Eigen::VectorXf>>> buffer_feats;
// Continue to simulate until we have processed all the measurements
signal(SIGINT, signal_callback_handler);
while (sim.ok()) {
// IMU: get the next simulated IMU measurement if we have it
ov_core::ImuData message_imu;
bool hasimu = sim.get_next_imu(message_imu.timestamp, message_imu.wm, message_imu.am);
if (hasimu) {
imu_readings->push_back(message_imu);
}
// CAM: get the next simulated camera uv measurements if we have them
double time_cam;
std::vector<int> camids;
std::vector<std::vector<std::pair<size_t, Eigen::VectorXf>>> feats;
bool hascam = sim.get_next_cam(time_cam, camids, feats);
if (hascam) {
// Pass to our feature database / tracker
if (buffer_timecam != -1) {
// Feed it
tracker->feed_measurement_simulation(buffer_timecam, buffer_camids, buffer_feats);
// Display the resulting tracks
// cv::Mat img_history;
// tracker->display_history(img_history, 255, 255, 0, 255, 255, 255);
// cv::imshow("Track History", img_history);
// cv::waitKey(1);
}
buffer_timecam = time_cam;
buffer_camids = camids;
buffer_feats = feats;
// Return states of our initializer
double timestamp = -1;
Eigen::MatrixXd covariance;
std::vector<std::shared_ptr<ov_type::Type>> order;
std::shared_ptr<ov_type::IMU> _imu;
std::map<double, std::shared_ptr<ov_type::PoseJPL>> _clones_IMU;
std::unordered_map<size_t, std::shared_ptr<ov_type::Landmark>> _features_SLAM;
std::unordered_map<size_t, std::shared_ptr<ov_type::PoseJPL>> _calib_IMUtoCAM;
std::unordered_map<size_t, std::shared_ptr<ov_type::Vec>> _cam_intrinsics;
// First we will try to make sure we have all the data required for our initialization
boost::posix_time::ptime rT1 = boost::posix_time::microsec_clock::local_time();
bool success =
initializer->initialize(timestamp, covariance, order, _imu, _clones_IMU, _features_SLAM, _calib_IMUtoCAM, _cam_intrinsics);
boost::posix_time::ptime rT2 = boost::posix_time::microsec_clock::local_time();
double time = (rT2 - rT1).total_microseconds() * 1e-6;
if (success) {
// Debug that we finished!
PRINT_INFO(GREEN "success! got initialized state information (%.4f seconds)\n" RESET, time);
// First lets align the groundtruth state with the IMU state
// NOTE: imu biases do not have to be corrected with the pos yaw alignment here...
Eigen::Matrix<double, 17, 1> gt_imu;
assert(sim.get_state(timestamp + sim.get_true_parameters().calib_camimu_dt, gt_imu));
Eigen::Matrix3d R_ESTtoGT_imu;
Eigen::Vector3d t_ESTinGT_imu;
align_posyaw_single(_imu->quat(), _imu->pos(), gt_imu.block(1, 0, 4, 1), gt_imu.block(5, 0, 3, 1), R_ESTtoGT_imu, t_ESTinGT_imu);
gt_imu.block(1, 0, 4, 1) = ov_core::quat_multiply(gt_imu.block(1, 0, 4, 1), ov_core::rot_2_quat(R_ESTtoGT_imu));
gt_imu.block(5, 0, 3, 1) = R_ESTtoGT_imu.transpose() * (gt_imu.block(5, 0, 3, 1) - t_ESTinGT_imu);
gt_imu.block(8, 0, 3, 1) = R_ESTtoGT_imu.transpose() * gt_imu.block(8, 0, 3, 1);
// Finally compute the error
Eigen::Matrix<double, 15, 1> err = Eigen::Matrix<double, 15, 1>::Zero();
Eigen::Matrix3d R_GtoI_gt = ov_core::quat_2_Rot(gt_imu.block(1, 0, 4, 1));
Eigen::Matrix3d R_GtoI_hat = _imu->Rot();
err.block(0, 0, 3, 1) = -ov_core::log_so3(R_GtoI_gt * R_GtoI_hat.transpose());
err.block(3, 0, 3, 1) = gt_imu.block(5, 0, 3, 1) - _imu->pos();
err.block(6, 0, 3, 1) = gt_imu.block(8, 0, 3, 1) - _imu->vel();
err.block(9, 0, 3, 1) = gt_imu.block(11, 0, 3, 1) - _imu->bias_g();
err.block(12, 0, 3, 1) = gt_imu.block(14, 0, 3, 1) - _imu->bias_a();
// debug print the error of the recovered IMU state
PRINT_INFO(REDPURPLE "e_ori = %.3f,%.3f,%.3f | %.3f,%.3f,%.3f,%.3f (true) | %.3f,%.3f,%.3f,%.3f (est)\n" RESET,
180.0 / M_PI * err(0 + 0), 180.0 / M_PI * err(0 + 1), 180.0 / M_PI * err(0 + 2), gt_imu(1), gt_imu(2), gt_imu(3),
gt_imu(4), _imu->quat()(0), _imu->quat()(1), _imu->quat()(2), _imu->quat()(3));
PRINT_INFO(REDPURPLE "e_pos = %.3f,%.3f,%.3f | %.3f,%.3f,%.3f (true) | %.3f,%.3f,%.3f (est)\n" RESET, err(3 + 0), err(3 + 1),
err(3 + 2), gt_imu(5), gt_imu(6), gt_imu(7), _imu->pos()(0), _imu->pos()(1), _imu->pos()(2));
PRINT_INFO(REDPURPLE "e_vel = %.3f,%.3f,%.3f | %.3f,%.3f,%.3f (true) | %.3f,%.3f,%.3f (est)\n" RESET, err(6 + 0), err(6 + 1),
err(6 + 2), gt_imu(8), gt_imu(9), gt_imu(10), _imu->vel()(0), _imu->vel()(1), _imu->vel()(2));
PRINT_INFO(REDPURPLE "e_bias_g = %.3f,%.3f,%.3f | %.3f,%.3f,%.3f (true) | %.3f,%.3f,%.3f (est)\n" RESET, err(9 + 0), err(9 + 1),
err(9 + 2), gt_imu(11), gt_imu(12), gt_imu(13), _imu->bias_g()(0), _imu->bias_g()(1), _imu->bias_g()(2));
PRINT_INFO(REDPURPLE "e_bias_a = %.3f,%.3f,%.3f | %.3f,%.3f,%.3f (true) | %.3f,%.3f,%.3f (est)\n" RESET, err(12 + 0), err(12 + 1),
err(12 + 2), gt_imu(14), gt_imu(15), gt_imu(16), _imu->bias_a()(0), _imu->bias_a()(1), _imu->bias_a()(2));
// calculate normalized estimation error squared
// the recovered error should be on the order of the state size (15 or 3 for marginals)
Eigen::MatrixXd information = covariance.inverse();
double nees_total = (err.transpose() * information * err)(0, 0);
double nees_ori = (err.block(0, 0, 3, 1).transpose() * information.block(0, 0, 3, 3) * err.block(0, 0, 3, 1))(0, 0);
double nees_pos = (err.block(3, 0, 3, 1).transpose() * information.block(3, 3, 3, 3) * err.block(3, 0, 3, 1))(0, 0);
double nees_vel = (err.block(6, 0, 3, 1).transpose() * information.block(6, 6, 3, 3) * err.block(6, 0, 3, 1))(0, 0);
double nees_bg = (err.block(9, 0, 3, 1).transpose() * information.block(9, 9, 3, 3) * err.block(9, 0, 3, 1))(0, 0);
double nees_ba = (err.block(12, 0, 3, 1).transpose() * information.block(12, 12, 3, 3) * err.block(12, 0, 3, 1))(0, 0);
PRINT_INFO(REDPURPLE "nees total = %.3f | ori = %.3f | pos = %.3f (ideal is 15 and 3)\n" RESET, nees_total, nees_ori, nees_pos);
PRINT_INFO(REDPURPLE "nees vel = %.3f | bg = %.3f | ba = %.3f (ideal 3)\n" RESET, nees_vel, nees_bg, nees_ba);
#if ROS_AVAILABLE == 1
// Align the groundtruth to the current estimate yaw
// Only use the first frame so we can see the drift
double oldestpose_time = -1;
std::shared_ptr<ov_type::PoseJPL> oldestpose = nullptr;
for (auto const &_pose : _clones_IMU) {
if (oldestpose == nullptr || _pose.first < oldestpose_time) {
oldestpose_time = _pose.first;
oldestpose = _pose.second;
}
}
assert(oldestpose != nullptr);
Eigen::Vector4d q_es_0, q_gt_0;
Eigen::Vector3d p_es_0, p_gt_0;
q_es_0 = oldestpose->quat();
p_es_0 = oldestpose->pos();
Eigen::Matrix<double, 17, 1> gt_imustate_0;
assert(sim.get_state(oldestpose_time + sim.get_true_parameters().calib_camimu_dt, gt_imustate_0));
q_gt_0 = gt_imustate_0.block(1, 0, 4, 1);
p_gt_0 = gt_imustate_0.block(5, 0, 3, 1);
Eigen::Matrix3d R_ESTtoGT;
Eigen::Vector3d t_ESTinGT;
align_posyaw_single(q_es_0, p_es_0, q_gt_0, p_gt_0, R_ESTtoGT, t_ESTinGT);
// Pose states
nav_msgs::Path arrEST, arrGT;
arrEST.header.stamp = ros::Time::now();
arrEST.header.frame_id = "global";
arrGT.header.stamp = ros::Time::now();
arrGT.header.frame_id = "global";
for (auto const &_pose : _clones_IMU) {
geometry_msgs::PoseStamped poseEST, poseGT;
poseEST.header.stamp = ros::Time(_pose.first);
poseEST.header.frame_id = "global";
poseEST.pose.orientation.x = _pose.second->quat()(0, 0);
poseEST.pose.orientation.y = _pose.second->quat()(1, 0);
poseEST.pose.orientation.z = _pose.second->quat()(2, 0);
poseEST.pose.orientation.w = _pose.second->quat()(3, 0);
poseEST.pose.position.x = _pose.second->pos()(0, 0);
poseEST.pose.position.y = _pose.second->pos()(1, 0);
poseEST.pose.position.z = _pose.second->pos()(2, 0);
Eigen::Matrix<double, 17, 1> gt_imustate;
assert(sim.get_state(_pose.first + sim.get_true_parameters().calib_camimu_dt, gt_imustate));
gt_imustate.block(1, 0, 4, 1) = ov_core::quat_multiply(gt_imustate.block(1, 0, 4, 1), ov_core::rot_2_quat(R_ESTtoGT));
gt_imustate.block(5, 0, 3, 1) = R_ESTtoGT.transpose() * (gt_imustate.block(5, 0, 3, 1) - t_ESTinGT);
poseGT.header.stamp = ros::Time(_pose.first);
poseGT.header.frame_id = "global";
poseGT.pose.orientation.x = gt_imustate(1);
poseGT.pose.orientation.y = gt_imustate(2);
poseGT.pose.orientation.z = gt_imustate(3);
poseGT.pose.orientation.w = gt_imustate(4);
poseGT.pose.position.x = gt_imustate(5);
poseGT.pose.position.y = gt_imustate(6);
poseGT.pose.position.z = gt_imustate(7);
arrEST.poses.push_back(poseEST);
arrGT.poses.push_back(poseGT);
}
pub_pathimu.publish(arrEST);
pub_pathgt.publish(arrGT);
// Features ESTIMATES pointcloud
sensor_msgs::PointCloud point_cloud;
point_cloud.header.frame_id = "global";
point_cloud.header.stamp = ros::Time::now();
for (auto const &featpair : _features_SLAM) {
geometry_msgs::Point32 p;
p.x = (float)featpair.second->get_xyz(false)(0, 0);
p.y = (float)featpair.second->get_xyz(false)(1, 0);
p.z = (float)featpair.second->get_xyz(false)(2, 0);
point_cloud.points.push_back(p);
}
pub_loop_point.publish(point_cloud);
// Features GROUNDTRUTH pointcloud
sensor_msgs::PointCloud2 cloud;
cloud.header.frame_id = "global";
cloud.header.stamp = ros::Time::now();
cloud.width = 3 * _features_SLAM.size();
cloud.height = 1;
cloud.is_bigendian = false;
cloud.is_dense = false; // there may be invalid points
sensor_msgs::PointCloud2Modifier modifier(cloud);
modifier.setPointCloud2FieldsByString(1, "xyz");
modifier.resize(3 * _features_SLAM.size());
sensor_msgs::PointCloud2Iterator<float> out_x(cloud, "x");
sensor_msgs::PointCloud2Iterator<float> out_y(cloud, "y");
sensor_msgs::PointCloud2Iterator<float> out_z(cloud, "z");
for (auto &featpair : _features_SLAM) {
// TrackSIM adds 1 to sim id if num_aruco is zero
Eigen::Vector3d feat = sim.get_map().at(featpair.first - 1);
feat = R_ESTtoGT.transpose() * (feat - t_ESTinGT);
*out_x = (float)feat(0);
++out_x;
*out_y = (float)feat(1);
++out_y;
*out_z = (float)feat(2);
++out_z;
}
pub_points_sim.publish(cloud);
#endif
// Wait for user approval
do {
cout << '\n' << "Press a key to continue...";
} while (cin.get() != '\n');
// Reset our tracker and simulator so we can try to init again
if (params.sim_do_perturbation) {
sim.perturb_parameters(params);
}
imu_readings = std::make_shared<std::vector<ov_core::ImuData>>();
tracker = std::make_shared<ov_core::TrackSIM>(params.camera_intrinsics, 0);
initializer = std::make_shared<DynamicInitializer>(params, tracker->get_feature_database(), imu_readings);
} else if (timestamp != -1) {
PRINT_INFO(RED "failed (%.4f seconds)\n\n" RESET, time);
}
}
}
// Done!
return EXIT_SUCCESS;
}

View File

@@ -0,0 +1,642 @@
/*
* OpenVINS: An Open Platform for Visual-Inertial Research
* Copyright (C) 2018-2022 Patrick Geneva
* Copyright (C) 2018-2022 Guoquan Huang
* Copyright (C) 2018-2022 OpenVINS Contributors
* Copyright (C) 2018-2019 Kevin Eckenhoff
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
#include <cmath>
#include <csignal>
#include <deque>
#include <iomanip>
#include <sstream>
#include <unistd.h>
#include <vector>
#include <ceres/ceres.h>
#include <boost/date_time/posix_time/posix_time.hpp>
#include <boost/filesystem.hpp>
#if ROS_AVAILABLE == 1
#include <nav_msgs/Path.h>
#include <ros/ros.h>
#include <sensor_msgs/PointCloud.h>
#include <sensor_msgs/PointCloud2.h>
#include <sensor_msgs/point_cloud2_iterator.h>
#endif
#include "utils/colors.h"
#include "utils/sensor_data.h"
#include "ceres/Factor_GenericPrior.h"
#include "ceres/Factor_ImageReprojCalib.h"
#include "ceres/Factor_ImuCPIv1.h"
#include "ceres/State_JPLQuatLocal.h"
#include "init/InertialInitializerOptions.h"
#include "sim/Simulator.h"
#include "utils/helper.h"
using namespace ov_init;
// Define the function to be called when ctrl-c (SIGINT) is sent to process
void signal_callback_handler(int signum) { std::exit(signum); }
// Main function
int main(int argc, char **argv) {
// Ensure we have a path, if the user passes it then we should use it
std::string config_path = "unset_path_to_config.yaml";
if (argc > 1) {
config_path = argv[1];
}
#if ROS_AVAILABLE == 1
// Launch our ros node
ros::init(argc, argv, "test_dynamic_mle");
auto nh = std::make_shared<ros::NodeHandle>("~");
nh->param<std::string>("config_path", config_path, config_path);
// Topics to publish
auto pub_pathimu = nh->advertise<nav_msgs::Path>("/ov_msckf/pathimu", 2);
PRINT_DEBUG("Publishing: %s\n", pub_pathimu.getTopic().c_str());
auto pub_pathgt = nh->advertise<nav_msgs::Path>("/ov_msckf/pathgt", 2);
PRINT_DEBUG("Publishing: %s\n", pub_pathgt.getTopic().c_str());
auto pub_loop_point = nh->advertise<sensor_msgs::PointCloud>("/ov_msckf/loop_feats", 2);
PRINT_DEBUG("Publishing: %s\n", pub_loop_point.getTopic().c_str());
auto pub_points_sim = nh->advertise<sensor_msgs::PointCloud2>("/ov_msckf/points_sim", 2);
PRINT_DEBUG("Publishing: %s\n", pub_points_sim.getTopic().c_str());
#endif
// Load the config
auto parser = std::make_shared<ov_core::YamlParser>(config_path);
#if ROS_AVAILABLE == 1
parser->set_node_handler(nh);
#endif
// Verbosity
std::string verbosity = "INFO";
parser->parse_config("verbosity", verbosity);
ov_core::Printer::setPrintLevel(verbosity);
// Create the simulator
InertialInitializerOptions params;
params.print_and_load(parser);
params.print_and_load_simulation(parser);
if (!parser->successful()) {
PRINT_ERROR(RED "unable to parse all parameters, please fix\n" RESET);
std::exit(EXIT_FAILURE);
}
Simulator sim(params);
//===================================================================================
//===================================================================================
//===================================================================================
// Ceres problem stuff
// NOTE: By default the problem takes ownership of the memory
ceres::Problem problem;
// Our system states (map from time to index)
std::map<double, int> map_states;
std::vector<double *> ceres_vars_ori;
std::vector<double *> ceres_vars_pos;
std::vector<double *> ceres_vars_vel;
std::vector<double *> ceres_vars_bias_g;
std::vector<double *> ceres_vars_bias_a;
// Feature states (3dof p_FinG)
std::map<size_t, int> map_features;
std::vector<double *> ceres_vars_feat;
typedef std::vector<std::pair<Factor_ImageReprojCalib *, std::vector<double *>>> factpair;
std::map<size_t, factpair> map_features_delayed; // only valid as long as problem is
// Setup extrinsic calibration q_ItoC, p_IinC (map from camera id to index)
std::map<size_t, int> map_calib_cam2imu;
std::vector<double *> ceres_vars_calib_cam2imu_ori;
std::vector<double *> ceres_vars_calib_cam2imu_pos;
// Setup intrinsic calibration focal, center, distortion (map from camera id to index)
std::map<size_t, int> map_calib_cam;
std::vector<double *> ceres_vars_calib_cam_intrinsics;
// Set the optimization settings
// NOTE: We use dense schur since after eliminating features we have a dense problem
// NOTE: http://ceres-solver.org/solving_faqs.html#solving
ceres::Solver::Options options;
options.linear_solver_type = ceres::DENSE_SCHUR;
options.trust_region_strategy_type = ceres::DOGLEG;
// options.linear_solver_type = ceres::SPARSE_SCHUR;
// options.trust_region_strategy_type = ceres::LEVENBERG_MARQUARDT;
options.num_threads = 6;
options.max_solver_time_in_seconds = 9999;
options.max_num_iterations = 20;
options.minimizer_progress_to_stdout = true;
// options.use_nonmonotonic_steps = true;
// options.function_tolerance = 1e-5;
// options.gradient_tolerance = 1e-4 * options.function_tolerance;
// DEBUG: check analytical jacobians using ceres finite differences
// options.check_gradients = true;
// options.gradient_check_relative_precision = 1e-4;
// options.gradient_check_numeric_derivative_relative_step_size = 1e-8;
//===================================================================================
//===================================================================================
//===================================================================================
// Random number generator used to perturb the groundtruth features
// NOTE: It seems that if this too large it can really prevent good optimization
// NOTE: Values greater 5cm seems to fail, not sure if this is reasonable or not...
std::mt19937 gen_state_perturb(params.sim_seed_preturb);
double feat_noise = 0.05; // meters
// Buffer our camera image
double buffer_timecam = -1;
double buffer_timecam_last = -1;
std::vector<int> buffer_camids;
std::vector<std::vector<std::pair<size_t, Eigen::VectorXf>>> buffer_feats;
auto imu_readings = std::make_shared<std::vector<ov_core::ImuData>>();
// Simulate!
signal(SIGINT, signal_callback_handler);
while (sim.ok()) {
// IMU: get the next simulated IMU measurement if we have it
ov_core::ImuData message_imu;
bool hasimu = sim.get_next_imu(message_imu.timestamp, message_imu.wm, message_imu.am);
if (hasimu) {
imu_readings->push_back(message_imu);
}
// CAM: get the next simulated camera uv measurements if we have them
double time_cam;
std::vector<int> camids;
std::vector<std::vector<std::pair<size_t, Eigen::VectorXf>>> feats;
bool hascam = sim.get_next_cam(time_cam, camids, feats);
if (hascam) {
if (buffer_timecam != -1) {
// Get our predicted state at the requested camera timestep
double timestamp_k = buffer_timecam_last;
double timestamp_k1 = buffer_timecam;
Eigen::Matrix<double, 16, 1> state_k1;
std::shared_ptr<ov_core::CpiV1> cpi = nullptr;
if (map_states.empty()) {
// Add the first ever pose to the problem
// TODO: do not initialize from the groundtruth pose
double time1 = timestamp_k1 + sim.get_true_parameters().calib_camimu_dt;
Eigen::Matrix<double, 17, 1> gt_imustate;
assert(sim.get_state(time1, gt_imustate));
state_k1 = gt_imustate.block(1, 0, 16, 1);
} else {
// Get our previous state timestamp (newest time) and biases to integrate with
assert(timestamp_k != -1);
// timestamp_k = map_states.rbegin()->first;
Eigen::Vector4d quat_k;
for (int i = 0; i < 4; i++) {
quat_k(i) = ceres_vars_ori.at(map_states.at(timestamp_k))[i];
}
Eigen::Vector3d pos_k, vel_k, bias_g_k, bias_a_k;
for (int i = 0; i < 3; i++) {
pos_k(i) = ceres_vars_pos.at(map_states.at(timestamp_k))[i];
vel_k(i) = ceres_vars_vel.at(map_states.at(timestamp_k))[i];
bias_g_k(i) = ceres_vars_bias_g.at(map_states.at(timestamp_k))[i];
bias_a_k(i) = ceres_vars_bias_a.at(map_states.at(timestamp_k))[i];
}
Eigen::Vector3d gravity;
gravity << 0.0, 0.0, params.gravity_mag;
// Preintegrate from previous state
// Then append a new state with preintegration factor
// TODO: estimate the timeoffset? don't use the true?
double time0 = timestamp_k + sim.get_true_parameters().calib_camimu_dt;
double time1 = timestamp_k1 + sim.get_true_parameters().calib_camimu_dt;
auto readings = InitializerHelper::select_imu_readings(*imu_readings, time0, time1);
cpi = std::make_shared<ov_core::CpiV1>(params.sigma_w, params.sigma_wb, params.sigma_a, params.sigma_ab, false);
cpi->setLinearizationPoints(bias_g_k, bias_a_k, quat_k, gravity);
for (size_t k = 0; k < readings.size() - 1; k++) {
auto imu0 = readings.at(k);
auto imu1 = readings.at(k + 1);
cpi->feed_IMU(imu0.timestamp, imu1.timestamp, imu0.wm, imu0.am, imu1.wm, imu1.am);
}
assert(timestamp_k1 > timestamp_k);
// Compute the predicted state
state_k1.block(0, 0, 4, 1) = ov_core::quat_multiply(cpi->q_k2tau, quat_k);
state_k1.block(4, 0, 3, 1) =
pos_k + vel_k * cpi->DT - 0.5 * cpi->grav * std::pow(cpi->DT, 2) + ov_core::quat_2_Rot(quat_k).transpose() * cpi->alpha_tau;
state_k1.block(7, 0, 3, 1) = vel_k - cpi->grav * cpi->DT + ov_core::quat_2_Rot(quat_k).transpose() * cpi->beta_tau;
state_k1.block(10, 0, 3, 1) = bias_g_k;
state_k1.block(13, 0, 3, 1) = bias_a_k;
}
// ================================================================
// ADDING GRAPH STATE / ESTIMATES!
// ================================================================
// Load our state variables into our allocated state pointers
auto *var_ori = new double[4];
for (int i = 0; i < 4; i++) {
var_ori[i] = state_k1(0 + i, 0);
}
auto *var_pos = new double[3];
auto *var_vel = new double[3];
auto *var_bias_g = new double[3];
auto *var_bias_a = new double[3];
for (int i = 0; i < 3; i++) {
var_pos[i] = state_k1(4 + i, 0);
var_vel[i] = state_k1(7 + i, 0);
var_bias_g[i] = state_k1(10 + i, 0);
var_bias_a[i] = state_k1(13 + i, 0);
}
// Now actually create the parameter block in the ceres problem
auto ceres_jplquat = new State_JPLQuatLocal();
problem.AddParameterBlock(var_ori, 4, ceres_jplquat);
problem.AddParameterBlock(var_pos, 3);
problem.AddParameterBlock(var_vel, 3);
problem.AddParameterBlock(var_bias_g, 3);
problem.AddParameterBlock(var_bias_a, 3);
// Fix this first ever pose to constrain the problem
// On the Comparison of Gauge Freedom Handling in Optimization-Based Visual-Inertial State Estimation
// Zichao Zhang; Guillermo Gallego; Davide Scaramuzza
// https://ieeexplore.ieee.org/document/8354808
if (map_states.empty()) {
// Construct state and prior
Eigen::MatrixXd x_lin = Eigen::MatrixXd::Zero(7, 1);
for (int i = 0; i < 4; i++) {
x_lin(0 + i) = var_ori[i];
}
for (int i = 0; i < 3; i++) {
x_lin(4 + i) = var_pos[i];
// x_lin(7 + i) = var_bias_g[i];
// x_lin(10 + i) = var_bias_a[i];
}
Eigen::MatrixXd prior_grad = Eigen::MatrixXd::Zero(4, 1);
Eigen::MatrixXd prior_Info = Eigen::MatrixXd::Identity(4, 4);
prior_Info.block(0, 0, 4, 4) *= 1.0 / std::pow(1e-8, 2); // 4dof unobservable yaw and position
// prior_Info.block(4, 4, 3, 3) *= 1.0 / std::pow(1e-1, 2); // bias_g prior
// prior_Info.block(7, 7, 3, 3) *= 1.0 / std::pow(1e-1, 2); // bias_a prior
// Construct state type and ceres parameter pointers
std::vector<std::string> x_types;
std::vector<double *> factor_params;
factor_params.push_back(var_ori);
x_types.emplace_back("quat_yaw");
factor_params.push_back(var_pos);
x_types.emplace_back("vec3");
// factor_params.push_back(var_bias_g);
// x_types.emplace_back("vec3");
// factor_params.push_back(var_bias_a);
// x_types.emplace_back("vec3");
// Append it to the problem
auto *factor_prior = new Factor_GenericPrior(x_lin, x_types, prior_Info, prior_grad);
problem.AddResidualBlock(factor_prior, nullptr, factor_params);
// problem.SetParameterBlockConstant(var_ori);
// problem.SetParameterBlockConstant(var_pos);
}
// Append to our historical vector of states
map_states.insert({timestamp_k1, (int)ceres_vars_ori.size()});
ceres_vars_ori.push_back(var_ori);
ceres_vars_pos.push_back(var_pos);
ceres_vars_vel.push_back(var_vel);
ceres_vars_bias_g.push_back(var_bias_g);
ceres_vars_bias_a.push_back(var_bias_a);
buffer_timecam_last = timestamp_k1;
// ================================================================
// ADDING GRAPH FACTORS!
// ================================================================
// Append the new IMU factor
if (cpi != nullptr) {
assert(timestamp_k != -1);
std::vector<double *> factor_params;
factor_params.push_back(ceres_vars_ori.at(map_states.at(timestamp_k)));
factor_params.push_back(ceres_vars_bias_g.at(map_states.at(timestamp_k)));
factor_params.push_back(ceres_vars_vel.at(map_states.at(timestamp_k)));
factor_params.push_back(ceres_vars_bias_a.at(map_states.at(timestamp_k)));
factor_params.push_back(ceres_vars_pos.at(map_states.at(timestamp_k)));
factor_params.push_back(ceres_vars_ori.at(map_states.at(timestamp_k1)));
factor_params.push_back(ceres_vars_bias_g.at(map_states.at(timestamp_k1)));
factor_params.push_back(ceres_vars_vel.at(map_states.at(timestamp_k1)));
factor_params.push_back(ceres_vars_bias_a.at(map_states.at(timestamp_k1)));
factor_params.push_back(ceres_vars_pos.at(map_states.at(timestamp_k1)));
auto *factor_imu = new Factor_ImuCPIv1(cpi->DT, cpi->grav, cpi->alpha_tau, cpi->beta_tau, cpi->q_k2tau, cpi->b_a_lin,
cpi->b_w_lin, cpi->J_q, cpi->J_b, cpi->J_a, cpi->H_b, cpi->H_a, cpi->P_meas);
problem.AddResidualBlock(factor_imu, nullptr, factor_params);
}
// Then, append new feature observations factors seen from this frame (initialize features as needed)
// We first loop through each camera and for each we append measurements as needed
assert(buffer_camids.size() == buffer_feats.size());
for (size_t n = 0; n < buffer_camids.size(); n++) {
// First make sure we have calibration states added
int cam_id = buffer_camids.at(n);
if (map_calib_cam2imu.find(cam_id) == map_calib_cam2imu.end()) {
auto *var_calib_ori = new double[4];
for (int i = 0; i < 4; i++) {
var_calib_ori[i] = params.camera_extrinsics.at(cam_id)(0 + i, 0);
}
auto *var_calib_pos = new double[3];
for (int i = 0; i < 3; i++) {
var_calib_pos[i] = params.camera_extrinsics.at(cam_id)(4 + i, 0);
}
auto ceres_calib_jplquat = new State_JPLQuatLocal();
problem.AddParameterBlock(var_calib_ori, 4, ceres_calib_jplquat);
problem.AddParameterBlock(var_calib_pos, 3);
map_calib_cam2imu.insert({cam_id, (int)ceres_vars_calib_cam2imu_ori.size()});
ceres_vars_calib_cam2imu_ori.push_back(var_calib_ori);
ceres_vars_calib_cam2imu_pos.push_back(var_calib_pos);
// Construct state and prior
Eigen::MatrixXd x_lin = Eigen::MatrixXd::Zero(7, 1);
for (int i = 0; i < 4; i++) {
x_lin(0 + i) = var_calib_ori[i];
}
for (int i = 0; i < 3; i++) {
x_lin(4 + i) = var_calib_pos[i];
}
Eigen::MatrixXd prior_grad = Eigen::MatrixXd::Zero(6, 1);
Eigen::MatrixXd prior_Info = Eigen::MatrixXd::Identity(6, 6);
prior_Info.block(0, 0, 3, 3) *= 1.0 / std::pow(0.001, 2);
prior_Info.block(3, 3, 3, 3) *= 1.0 / std::pow(0.01, 2);
// Construct state type and ceres parameter pointers
std::vector<std::string> x_types;
std::vector<double *> factor_params;
factor_params.push_back(var_calib_ori);
x_types.emplace_back("quat");
factor_params.push_back(var_calib_pos);
x_types.emplace_back("vec3");
if (!params.init_dyn_mle_opt_calib) {
problem.SetParameterBlockConstant(var_calib_ori);
problem.SetParameterBlockConstant(var_calib_pos);
} else {
auto *factor_prior = new Factor_GenericPrior(x_lin, x_types, prior_Info, prior_grad);
problem.AddResidualBlock(factor_prior, nullptr, factor_params);
}
}
bool is_fisheye = (std::dynamic_pointer_cast<ov_core::CamEqui>(params.camera_intrinsics.at(cam_id)) != nullptr);
if (map_calib_cam.find(cam_id) == map_calib_cam.end()) {
auto *var_calib_cam = new double[8];
for (int i = 0; i < 8; i++) {
var_calib_cam[i] = params.camera_intrinsics.at(cam_id)->get_value()(i, 0);
}
problem.AddParameterBlock(var_calib_cam, 8);
map_calib_cam.insert({cam_id, (int)ceres_vars_calib_cam_intrinsics.size()});
ceres_vars_calib_cam_intrinsics.push_back(var_calib_cam);
// Construct state and prior
Eigen::MatrixXd x_lin = Eigen::MatrixXd::Zero(8, 1);
for (int i = 0; i < 8; i++) {
x_lin(0 + i) = var_calib_cam[i];
}
Eigen::MatrixXd prior_grad = Eigen::MatrixXd::Zero(8, 1);
Eigen::MatrixXd prior_Info = Eigen::MatrixXd::Identity(8, 8);
prior_Info.block(0, 0, 4, 4) *= 1.0 / std::pow(1.0, 2);
prior_Info.block(4, 4, 4, 4) *= 1.0 / std::pow(0.005, 2);
// Construct state type and ceres parameter pointers
std::vector<std::string> x_types;
std::vector<double *> factor_params;
factor_params.push_back(var_calib_cam);
x_types.emplace_back("vec8");
if (!params.init_dyn_mle_opt_calib) {
problem.SetParameterBlockConstant(var_calib_cam);
} else {
auto *factor_prior = new Factor_GenericPrior(x_lin, x_types, prior_Info, prior_grad);
problem.AddResidualBlock(factor_prior, nullptr, factor_params);
}
}
// Now loop through each feature observed at this k+1 timestamp
for (auto const &feat : buffer_feats.at(n)) {
size_t featid = feat.first;
Eigen::Vector2d uv_raw;
uv_raw << feat.second(0), feat.second(1);
// Next make sure that we have the feature state
// Normally, we should need to triangulate this once we have enough measurements
// TODO: do not initialize features from the groundtruth map
if (map_features.find(featid) == map_features.end()) {
assert(params.use_stereo);
Eigen::Vector3d p_FinG = sim.get_map().at(featid);
auto *var_feat = new double[3];
for (int i = 0; i < 3; i++) {
std::normal_distribution<double> w(0, 1);
var_feat[i] = p_FinG(i) + feat_noise * w(gen_state_perturb);
}
map_features.insert({featid, (int)ceres_vars_feat.size()});
map_features_delayed.insert({featid, factpair()});
ceres_vars_feat.push_back(var_feat);
}
// Factor parameters it is a function of
std::vector<double *> factor_params;
factor_params.push_back(ceres_vars_ori.at(map_states.at(timestamp_k1)));
factor_params.push_back(ceres_vars_pos.at(map_states.at(timestamp_k1)));
factor_params.push_back(ceres_vars_feat.at(map_features.at(featid)));
factor_params.push_back(ceres_vars_calib_cam2imu_ori.at(map_calib_cam2imu.at(cam_id)));
factor_params.push_back(ceres_vars_calib_cam2imu_pos.at(map_calib_cam2imu.at(cam_id)));
factor_params.push_back(ceres_vars_calib_cam_intrinsics.at(map_calib_cam.at(cam_id)));
auto *factor_pinhole = new Factor_ImageReprojCalib(uv_raw, params.sigma_pix, is_fisheye);
if (map_features_delayed.find(featid) != map_features_delayed.end()) {
map_features_delayed.at(featid).push_back({factor_pinhole, factor_params});
} else {
ceres::LossFunction *loss_function = nullptr;
// ceres::LossFunction *loss_function = new ceres::CauchyLoss(1.0);
problem.AddResidualBlock(factor_pinhole, loss_function, factor_params);
}
}
}
}
buffer_timecam = time_cam;
buffer_camids = camids;
buffer_feats = feats;
// If a delayed feature has enough measurements we can add it to the problem!
// We will wait for enough observations before adding the parameter
// This should make it much more stable since the parameter / optimization is more constrained
size_t min_num_meas_to_optimize = 10;
auto it0 = map_features_delayed.begin();
while (it0 != map_features_delayed.end()) {
size_t featid = (*it0).first;
auto factors = (*it0).second;
if (factors.size() >= min_num_meas_to_optimize) {
problem.AddParameterBlock(ceres_vars_feat.at(map_features.at(featid)), 3);
for (auto const &factorpair : factors) {
auto factor_pinhole = factorpair.first;
auto factor_params = factorpair.second;
ceres::LossFunction *loss_function = nullptr;
// ceres::LossFunction *loss_function = new ceres::CauchyLoss(1.0);
problem.AddResidualBlock(factor_pinhole, loss_function, factor_params);
}
it0 = map_features_delayed.erase(it0);
} else {
it0++;
}
}
// ================================================================
// PERFORM ACTUAL OPTIMIZATION!
// ================================================================
// We can try to optimize every few frames, but this can cause the IMU to drift
// Thus this can't be too large, nor too small to reduce the computation
if (map_states.size() % 10 == 0 && map_states.size() > min_num_meas_to_optimize) {
// COMPUTE: error before optimization
double error_before = 0.0;
if (!map_features.empty()) {
for (auto &feat : map_features) {
Eigen::Vector3d pos1, pos2;
pos1 << ceres_vars_feat.at(feat.second)[0], ceres_vars_feat.at(feat.second)[1], ceres_vars_feat.at(feat.second)[2];
pos2 = sim.get_map().at(feat.first);
error_before += (pos2 - pos1).norm();
}
error_before /= (double)map_features.size();
}
// Optimize the ceres graph
ceres::Solver::Summary summary;
ceres::Solve(options, &problem, &summary);
PRINT_INFO("[CERES]: %s\n", summary.message.c_str());
PRINT_INFO("[CERES]: %d iterations | %zu states, %zu features | %d parameters and %d residuals | cost %.4e => %.4e\n",
(int)summary.iterations.size(), map_states.size(), map_features.size(), summary.num_parameters, summary.num_residuals,
summary.initial_cost, summary.final_cost);
// COMPUTE: error after optimization
double error_after = 0.0;
if (!map_features.empty()) {
for (auto &feat : map_features) {
Eigen::Vector3d pos1, pos2;
pos1 << ceres_vars_feat.at(feat.second)[0], ceres_vars_feat.at(feat.second)[1], ceres_vars_feat.at(feat.second)[2];
pos2 = sim.get_map().at(feat.first);
error_after += (pos2 - pos1).norm();
}
error_after /= (double)map_features.size();
}
PRINT_INFO("[CERES]: map error %.4f => %.4f (meters) for %zu features\n", error_before, error_after, map_features.size());
// Print feature positions to console
// https://gist.github.com/goldbattle/177a6b2cccb4b4208e687b0abae4bc9f
// for (auto &feat : map_features) {
// size_t featid = feat.first;
// std::cout << featid << ",";
// std::cout << ceres_vars_feat[map_features[featid]][0] << "," << ceres_vars_feat[map_features[featid]][1] << ","
// << ceres_vars_feat[map_features[featid]][2] << ",";
// std::cout << sim.get_map().at(featid)[0] << "," << sim.get_map().at(featid)[1] << "," << sim.get_map().at(featid)[2] <<
// ","; std::cout << map_features_num_meas[feat.first] << ";" << std::endl;
// }
}
#if ROS_AVAILABLE == 1
// Visualize in RVIZ our current estimates and
if (map_states.size() % 1 == 0) {
// Pose states
nav_msgs::Path arrEST, arrGT;
arrEST.header.stamp = ros::Time::now();
arrEST.header.frame_id = "global";
arrGT.header.stamp = ros::Time::now();
arrGT.header.frame_id = "global";
for (auto &statepair : map_states) {
geometry_msgs::PoseStamped poseEST, poseGT;
poseEST.header.stamp = ros::Time(statepair.first);
poseEST.header.frame_id = "global";
poseEST.pose.orientation.x = ceres_vars_ori[statepair.second][0];
poseEST.pose.orientation.y = ceres_vars_ori[statepair.second][1];
poseEST.pose.orientation.z = ceres_vars_ori[statepair.second][2];
poseEST.pose.orientation.w = ceres_vars_ori[statepair.second][3];
poseEST.pose.position.x = ceres_vars_pos[statepair.second][0];
poseEST.pose.position.y = ceres_vars_pos[statepair.second][1];
poseEST.pose.position.z = ceres_vars_pos[statepair.second][2];
Eigen::Matrix<double, 17, 1> gt_imustate;
assert(sim.get_state(statepair.first + sim.get_true_parameters().calib_camimu_dt, gt_imustate));
poseGT.header.stamp = ros::Time(statepair.first);
poseGT.header.frame_id = "global";
poseGT.pose.orientation.x = gt_imustate(1);
poseGT.pose.orientation.y = gt_imustate(2);
poseGT.pose.orientation.z = gt_imustate(3);
poseGT.pose.orientation.w = gt_imustate(4);
poseGT.pose.position.x = gt_imustate(5);
poseGT.pose.position.y = gt_imustate(6);
poseGT.pose.position.z = gt_imustate(7);
arrEST.poses.push_back(poseEST);
arrGT.poses.push_back(poseGT);
}
pub_pathimu.publish(arrEST);
pub_pathgt.publish(arrGT);
// Features ESTIMATES pointcloud
sensor_msgs::PointCloud point_cloud;
point_cloud.header.frame_id = "global";
point_cloud.header.stamp = ros::Time::now();
for (auto &featpair : map_features) {
if (map_features_delayed.find(featpair.first) != map_features_delayed.end())
continue;
geometry_msgs::Point32 p;
p.x = (float)ceres_vars_feat[map_features[featpair.first]][0];
p.y = (float)ceres_vars_feat[map_features[featpair.first]][1];
p.z = (float)ceres_vars_feat[map_features[featpair.first]][2];
point_cloud.points.push_back(p);
}
pub_loop_point.publish(point_cloud);
// Features GROUNDTRUTH pointcloud
sensor_msgs::PointCloud2 cloud;
cloud.header.frame_id = "global";
cloud.header.stamp = ros::Time::now();
cloud.width = 3 * map_features.size();
cloud.height = 1;
cloud.is_bigendian = false;
cloud.is_dense = false; // there may be invalid points
sensor_msgs::PointCloud2Modifier modifier(cloud);
modifier.setPointCloud2FieldsByString(1, "xyz");
modifier.resize(3 * map_features.size());
sensor_msgs::PointCloud2Iterator<float> out_x(cloud, "x");
sensor_msgs::PointCloud2Iterator<float> out_y(cloud, "y");
sensor_msgs::PointCloud2Iterator<float> out_z(cloud, "z");
for (auto &featpair : map_features) {
if (map_features_delayed.find(featpair.first) != map_features_delayed.end())
continue;
*out_x = (float)sim.get_map().at(featpair.first)(0); // no change in id since no tracker is used
++out_x;
*out_y = (float)sim.get_map().at(featpair.first)(1); // no change in id since no tracker is used
++out_y;
*out_z = (float)sim.get_map().at(featpair.first)(2); // no change in id since no tracker is used
++out_z;
}
pub_points_sim.publish(cloud);
}
#endif
}
}
// Done!
return EXIT_SUCCESS;
};

View File

@@ -0,0 +1,102 @@
/*
* OpenVINS: An Open Platform for Visual-Inertial Research
* Copyright (C) 2018-2022 Patrick Geneva
* Copyright (C) 2018-2022 Guoquan Huang
* Copyright (C) 2018-2022 OpenVINS Contributors
* Copyright (C) 2018-2019 Kevin Eckenhoff
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
#include <cmath>
#include <csignal>
#include <deque>
#include <iomanip>
#include <sstream>
#include <unistd.h>
#include <vector>
#include <boost/date_time/posix_time/posix_time.hpp>
#include <boost/filesystem.hpp>
#if ROS_AVAILABLE == 1
#include <ros/ros.h>
#endif
#include "init/InertialInitializerOptions.h"
#include "sim/Simulator.h"
using namespace ov_init;
// Define the function to be called when ctrl-c (SIGINT) is sent to process
void signal_callback_handler(int signum) { std::exit(signum); }
// Main function
int main(int argc, char **argv) {
// Ensure we have a path, if the user passes it then we should use it
std::string config_path = "unset_path_to_config.yaml";
if (argc > 1) {
config_path = argv[1];
}
#if ROS_AVAILABLE == 1
// Launch our ros node
ros::init(argc, argv, "test_sim_meas");
auto nh = std::make_shared<ros::NodeHandle>("~");
nh->param<std::string>("config_path", config_path, config_path);
#endif
// Load the config
auto parser = std::make_shared<ov_core::YamlParser>(config_path);
#if ROS_AVAILABLE == 1
parser->set_node_handler(nh);
#endif
// Verbosity
std::string verbosity = "INFO";
parser->parse_config("verbosity", verbosity);
ov_core::Printer::setPrintLevel(verbosity);
// Create the simulator
InertialInitializerOptions params;
params.print_and_load(parser);
params.print_and_load_simulation(parser);
Simulator sim(params);
// Continue to simulate until we have processed all the measurements
signal(SIGINT, signal_callback_handler);
while (sim.ok()) {
// IMU: get the next simulated IMU measurement if we have it
double time_imu;
Eigen::Vector3d wm, am;
bool hasimu = sim.get_next_imu(time_imu, wm, am);
if (hasimu) {
PRINT_DEBUG("new imu measurement = %0.15g | w = %0.3g | a = %0.3g\n", time_imu, wm.norm(), am.norm());
}
// CAM: get the next simulated camera uv measurements if we have them
double time_cam;
std::vector<int> camids;
std::vector<std::vector<std::pair<size_t, Eigen::VectorXf>>> feats;
bool hascam = sim.get_next_cam(time_cam, camids, feats);
if (hascam) {
PRINT_DEBUG("new cam measurement = %0.15g | %u cameras | uvs(0) = %u \n", time_cam, camids.size(), feats.at(0).size());
}
}
// Done!
return EXIT_SUCCESS;
};

324
ov_init/src/utils/helper.h Normal file
View File

@@ -0,0 +1,324 @@
/*
* OpenVINS: An Open Platform for Visual-Inertial Research
* Copyright (C) 2018-2022 Patrick Geneva
* Copyright (C) 2018-2022 Guoquan Huang
* Copyright (C) 2018-2022 OpenVINS Contributors
* Copyright (C) 2018-2019 Kevin Eckenhoff
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
#ifndef OV_INIT_HELPER
#define OV_INIT_HELPER
#include "cpi/CpiV1.h"
#include "feat/FeatureHelper.h"
#include "types/IMU.h"
namespace ov_init {
/**
* @brief Has a bunch of helper functions for dynamic initialization (should all be static)
*/
class InitializerHelper {
public:
/**
* @brief Nice helper function that will linearly interpolate between two imu messages.
*
* This should be used instead of just "cutting" imu messages that bound the camera times
* Give better time offset if we use this function, could try other orders/splines if the imu is slow.
*
* @param imu_1 imu at begining of interpolation interval
* @param imu_2 imu at end of interpolation interval
* @param timestamp Timestamp being interpolated to
*/
static ov_core::ImuData interpolate_data(const ov_core::ImuData &imu_1, const ov_core::ImuData &imu_2, double timestamp) {
double lambda = (timestamp - imu_1.timestamp) / (imu_2.timestamp - imu_1.timestamp);
ov_core::ImuData data;
data.timestamp = timestamp;
data.am = (1 - lambda) * imu_1.am + lambda * imu_2.am;
data.wm = (1 - lambda) * imu_1.wm + lambda * imu_2.wm;
return data;
}
/**
* @brief Helper function that given current imu data, will select imu readings between the two times.
*
* This will create measurements that we will integrate with, and an extra measurement at the end.
* We use the @ref interpolate_data() function to "cut" the imu readings at the beginning and end of the integration.
* The timestamps passed should already take into account the time offset values.
*
* @param imu_data_tmp IMU data we will select measurements from
* @param time0 Start timestamp
* @param time1 End timestamp
* @return Vector of measurements (if we could compute them)
*/
static std::vector<ov_core::ImuData> select_imu_readings(const std::vector<ov_core::ImuData> &imu_data_tmp, double time0, double time1) {
// Our vector imu readings
std::vector<ov_core::ImuData> prop_data;
// Ensure we have some measurements in the first place!
if (imu_data_tmp.empty()) {
return prop_data;
}
// Loop through and find all the needed measurements to propagate with
// Note we split measurements based on the given state time, and the update timestamp
for (size_t i = 0; i < imu_data_tmp.size() - 1; i++) {
// START OF THE INTEGRATION PERIOD
if (imu_data_tmp.at(i + 1).timestamp > time0 && imu_data_tmp.at(i).timestamp < time0) {
ov_core::ImuData data = interpolate_data(imu_data_tmp.at(i), imu_data_tmp.at(i + 1), time0);
prop_data.push_back(data);
continue;
}
// MIDDLE OF INTEGRATION PERIOD
if (imu_data_tmp.at(i).timestamp >= time0 && imu_data_tmp.at(i + 1).timestamp <= time1) {
prop_data.push_back(imu_data_tmp.at(i));
continue;
}
// END OF THE INTEGRATION PERIOD
if (imu_data_tmp.at(i + 1).timestamp > time1) {
if (imu_data_tmp.at(i).timestamp > time1 && i == 0) {
break;
} else if (imu_data_tmp.at(i).timestamp > time1) {
ov_core::ImuData data = interpolate_data(imu_data_tmp.at(i - 1), imu_data_tmp.at(i), time1);
prop_data.push_back(data);
} else {
prop_data.push_back(imu_data_tmp.at(i));
}
if (prop_data.at(prop_data.size() - 1).timestamp != time1) {
ov_core::ImuData data = interpolate_data(imu_data_tmp.at(i), imu_data_tmp.at(i + 1), time1);
prop_data.push_back(data);
}
break;
}
}
// Check that we have at least one measurement to propagate with
if (prop_data.empty()) {
return prop_data;
}
// Loop through and ensure we do not have an zero dt values
// This would cause the noise covariance to be Infinity
for (size_t i = 0; i < prop_data.size() - 1; i++) {
if (std::abs(prop_data.at(i + 1).timestamp - prop_data.at(i).timestamp) < 1e-12) {
prop_data.erase(prop_data.begin() + i);
i--;
}
}
// Success :D
return prop_data;
}
/**
* @brief Given a gravity vector, compute the rotation from the inertial reference frame to this vector.
*
* The key assumption here is that our gravity is along the vertical direction in the inertial frame.
* We can take this vector (z_in_G=0,0,1) and find two arbitrary tangent directions to it.
* https://en.wikipedia.org/wiki/Gram%E2%80%93Schmidt_process
*
* @param gravity_inI Gravity in our sensor frame
* @param R_GtoI Rotation from the arbitrary inertial reference frame to this gravity vector
*/
static void gram_schmidt(const Eigen::Vector3d &gravity_inI, Eigen::Matrix3d &R_GtoI) {
// Now gram schmidt to find rot for grav
// https://en.wikipedia.org/wiki/Gram%E2%80%93Schmidt_process
// Get z axis, which alines with -g (z_in_G=0,0,1)
Eigen::Vector3d z_axis = gravity_inI / gravity_inI.norm();
Eigen::Vector3d x_axis, y_axis;
Eigen::Vector3d e_1(1.0, 0.0, 0.0);
// Eigen::Vector3d e_2(0.0, 1.0, 0.0);
// double inner1 = e_1.dot(z_axis) / z_axis.norm();
// double inner2 = e_2.dot(z_axis) / z_axis.norm();
// if (fabs(inner1) >= fabs(inner2)) {
// x_axis = z_axis.cross(e_1);
// x_axis = x_axis / x_axis.norm();
// y_axis = z_axis.cross(x_axis);
// y_axis = y_axis / y_axis.norm();
// } else {
// x_axis = z_axis.cross(e_2);
// x_axis = x_axis / x_axis.norm();
// y_axis = z_axis.cross(x_axis);
// y_axis = y_axis / y_axis.norm();
// }
// Original method
x_axis = e_1 - z_axis * z_axis.transpose() * e_1;
x_axis = x_axis / x_axis.norm();
y_axis = ov_core::skew_x(z_axis) * x_axis;
y_axis = y_axis / y_axis.norm();
// Rotation from our global (where gravity is only along the z-axis) to the local one
R_GtoI.block(0, 0, 3, 1) = x_axis;
R_GtoI.block(0, 1, 3, 1) = y_axis;
R_GtoI.block(0, 2, 3, 1) = z_axis;
}
/**
* @brief Compute coefficients for the constrained optimization quadratic problem.
*
* https://gist.github.com/goldbattle/3791cbb11bbf4f5feb3f049dad72bfdd
*
* @param D Gravity in our sensor frame
* @param d Rotation from the arbitrary inertial reference frame to this gravity vector
* @param gravity_mag Scalar size of gravity (normally is 9.81)
* @return Coefficents from highest to the constant
*/
static Eigen::VectorXd compute_dongsi_coeff(Eigen::MatrixXd &D, const Eigen::MatrixXd &d, double gravity_mag) {
// matlab constants
assert(D.rows() == 3);
assert(D.cols() == 3);
assert(d.rows() == 3);
double D1_1 = D(0, 0), D1_2 = D(0, 1), D1_3 = D(0, 2);
double D2_1 = D(1, 0), D2_2 = D(1, 1), D2_3 = D(1, 2);
double D3_1 = D(2, 0), D3_2 = D(2, 1), D3_3 = D(2, 2);
double d1 = d(0, 0), d2 = d(1, 0), d3 = d(2, 0);
double g = gravity_mag;
// squared version we subbed for x^2
double D1_1_sq = D1_1 * D1_1, D1_2_sq = D1_2 * D1_2, D1_3_sq = D1_3 * D1_3;
double D2_1_sq = D2_1 * D2_1, D2_2_sq = D2_2 * D2_2, D2_3_sq = D2_3 * D2_3;
double D3_1_sq = D3_1 * D3_1, D3_2_sq = D3_2 * D3_2, D3_3_sq = D3_3 * D3_3;
double d1_sq = d1 * d1, d2_sq = d2 * d2, d3_sq = d3 * d3;
double g_sq = g * g;
// Compute the coefficients
Eigen::VectorXd coeff = Eigen::VectorXd::Zero(7);
coeff(6) =
-(-D1_1_sq * D2_2_sq * D3_3_sq * g_sq + D1_1_sq * D2_2_sq * d3_sq + 2 * D1_1_sq * D2_2 * D2_3 * D3_2 * D3_3 * g_sq -
D1_1_sq * D2_2 * D2_3 * d2 * d3 - D1_1_sq * D2_2 * D3_2 * d2 * d3 - D1_1_sq * D2_3_sq * D3_2_sq * g_sq +
D1_1_sq * D2_3 * D3_2 * d2_sq + D1_1_sq * D2_3 * D3_2 * d3_sq - D1_1_sq * D2_3 * D3_3 * d2 * d3 -
D1_1_sq * D3_2 * D3_3 * d2 * d3 + D1_1_sq * D3_3_sq * d2_sq + 2 * D1_1 * D1_2 * D2_1 * D2_2 * D3_3_sq * g_sq -
2 * D1_1 * D1_2 * D2_1 * D2_2 * d3_sq - 2 * D1_1 * D1_2 * D2_1 * D2_3 * D3_2 * D3_3 * g_sq + D1_1 * D1_2 * D2_1 * D2_3 * d2 * d3 +
D1_1 * D1_2 * D2_1 * D3_2 * d2 * d3 - 2 * D1_1 * D1_2 * D2_2 * D2_3 * D3_1 * D3_3 * g_sq + D1_1 * D1_2 * D2_2 * D2_3 * d1 * d3 +
D1_1 * D1_2 * D2_2 * D3_1 * d2 * d3 + 2 * D1_1 * D1_2 * D2_3_sq * D3_1 * D3_2 * g_sq - D1_1 * D1_2 * D2_3 * D3_1 * d2_sq -
D1_1 * D1_2 * D2_3 * D3_1 * d3_sq - D1_1 * D1_2 * D2_3 * D3_2 * d1 * d2 + D1_1 * D1_2 * D2_3 * D3_3 * d1 * d3 +
D1_1 * D1_2 * D3_1 * D3_3 * d2 * d3 - D1_1 * D1_2 * D3_3_sq * d1 * d2 - 2 * D1_1 * D1_3 * D2_1 * D2_2 * D3_2 * D3_3 * g_sq +
D1_1 * D1_3 * D2_1 * D2_2 * d2 * d3 + 2 * D1_1 * D1_3 * D2_1 * D2_3 * D3_2_sq * g_sq - D1_1 * D1_3 * D2_1 * D3_2 * d2_sq -
D1_1 * D1_3 * D2_1 * D3_2 * d3_sq + D1_1 * D1_3 * D2_1 * D3_3 * d2 * d3 + 2 * D1_1 * D1_3 * D2_2_sq * D3_1 * D3_3 * g_sq -
D1_1 * D1_3 * D2_2_sq * d1 * d3 - 2 * D1_1 * D1_3 * D2_2 * D2_3 * D3_1 * D3_2 * g_sq + D1_1 * D1_3 * D2_2 * D3_2 * d1 * d2 +
D1_1 * D1_3 * D2_3 * D3_1 * d2 * d3 - D1_1 * D1_3 * D2_3 * D3_2 * d1 * d3 + D1_1 * D1_3 * D3_1 * D3_2 * d2 * d3 -
2 * D1_1 * D1_3 * D3_1 * D3_3 * d2_sq + D1_1 * D1_3 * D3_2 * D3_3 * d1 * d2 + D1_1 * D2_1 * D2_2 * D3_2 * d1 * d3 -
D1_1 * D2_1 * D2_3 * D3_2 * d1 * d2 + D1_1 * D2_1 * D3_2 * D3_3 * d1 * d3 - D1_1 * D2_1 * D3_3_sq * d1 * d2 -
D1_1 * D2_2_sq * D3_1 * d1 * d3 + D1_1 * D2_2 * D2_3 * D3_1 * d1 * d2 - D1_1 * D2_3 * D3_1 * D3_2 * d1 * d3 +
D1_1 * D2_3 * D3_1 * D3_3 * d1 * d2 - D1_2_sq * D2_1_sq * D3_3_sq * g_sq + D1_2_sq * D2_1_sq * d3_sq +
2 * D1_2_sq * D2_1 * D2_3 * D3_1 * D3_3 * g_sq - D1_2_sq * D2_1 * D2_3 * d1 * d3 - D1_2_sq * D2_1 * D3_1 * d2 * d3 -
D1_2_sq * D2_3_sq * D3_1_sq * g_sq + D1_2_sq * D2_3 * D3_1 * d1 * d2 + 2 * D1_2 * D1_3 * D2_1_sq * D3_2 * D3_3 * g_sq -
D1_2 * D1_3 * D2_1_sq * d2 * d3 - 2 * D1_2 * D1_3 * D2_1 * D2_2 * D3_1 * D3_3 * g_sq + D1_2 * D1_3 * D2_1 * D2_2 * d1 * d3 -
2 * D1_2 * D1_3 * D2_1 * D2_3 * D3_1 * D3_2 * g_sq + D1_2 * D1_3 * D2_1 * D3_1 * d2_sq + D1_2 * D1_3 * D2_1 * D3_1 * d3_sq -
D1_2 * D1_3 * D2_1 * D3_3 * d1 * d3 + 2 * D1_2 * D1_3 * D2_2 * D2_3 * D3_1_sq * g_sq - D1_2 * D1_3 * D2_2 * D3_1 * d1 * d2 -
D1_2 * D1_3 * D3_1_sq * d2 * d3 + D1_2 * D1_3 * D3_1 * D3_3 * d1 * d2 - D1_2 * D2_1_sq * D3_2 * d1 * d3 +
D1_2 * D2_1 * D2_2 * D3_1 * d1 * d3 + D1_2 * D2_1 * D2_3 * D3_2 * d1_sq + D1_2 * D2_1 * D2_3 * D3_2 * d3_sq -
D1_2 * D2_1 * D2_3 * D3_3 * d2 * d3 - D1_2 * D2_1 * D3_1 * D3_3 * d1 * d3 - D1_2 * D2_1 * D3_2 * D3_3 * d2 * d3 +
D1_2 * D2_1 * D3_3_sq * d1_sq + D1_2 * D2_1 * D3_3_sq * d2_sq - D1_2 * D2_2 * D2_3 * D3_1 * d1_sq -
D1_2 * D2_2 * D2_3 * D3_1 * d3_sq + D1_2 * D2_2 * D2_3 * D3_3 * d1 * d3 + D1_2 * D2_2 * D3_1 * D3_3 * d2 * d3 -
D1_2 * D2_2 * D3_3_sq * d1 * d2 + D1_2 * D2_3_sq * D3_1 * d2 * d3 - D1_2 * D2_3_sq * D3_2 * d1 * d3 +
D1_2 * D2_3 * D3_1_sq * d1 * d3 - D1_2 * D2_3 * D3_1 * D3_3 * d1_sq - D1_2 * D2_3 * D3_1 * D3_3 * d2_sq +
D1_2 * D2_3 * D3_2 * D3_3 * d1 * d2 - D1_3_sq * D2_1_sq * D3_2_sq * g_sq + 2 * D1_3_sq * D2_1 * D2_2 * D3_1 * D3_2 * g_sq -
D1_3_sq * D2_1 * D3_1 * d2 * d3 + D1_3_sq * D2_1 * D3_2 * d1 * d3 - D1_3_sq * D2_2_sq * D3_1_sq * g_sq +
D1_3_sq * D3_1_sq * d2_sq - D1_3_sq * D3_1 * D3_2 * d1 * d2 + D1_3 * D2_1_sq * D3_2 * d1 * d2 -
D1_3 * D2_1 * D2_2 * D3_1 * d1 * d2 - D1_3 * D2_1 * D2_2 * D3_2 * d1_sq - D1_3 * D2_1 * D2_2 * D3_2 * d3_sq +
D1_3 * D2_1 * D2_2 * D3_3 * d2 * d3 + D1_3 * D2_1 * D3_1 * D3_3 * d1 * d2 + D1_3 * D2_1 * D3_2_sq * d2 * d3 -
D1_3 * D2_1 * D3_2 * D3_3 * d1_sq - D1_3 * D2_1 * D3_2 * D3_3 * d2_sq + D1_3 * D2_2_sq * D3_1 * d1_sq +
D1_3 * D2_2_sq * D3_1 * d3_sq - D1_3 * D2_2_sq * D3_3 * d1 * d3 - D1_3 * D2_2 * D2_3 * D3_1 * d2 * d3 +
D1_3 * D2_2 * D2_3 * D3_2 * d1 * d3 - D1_3 * D2_2 * D3_1 * D3_2 * d2 * d3 + D1_3 * D2_2 * D3_2 * D3_3 * d1 * d2 -
D1_3 * D2_3 * D3_1_sq * d1 * d2 + D1_3 * D2_3 * D3_1 * D3_2 * d1_sq + D1_3 * D2_3 * D3_1 * D3_2 * d2_sq -
D1_3 * D2_3 * D3_2_sq * d1 * d2 + D2_1 * D2_2 * D3_2 * D3_3 * d1 * d3 - D2_1 * D2_2 * D3_3_sq * d1 * d2 -
D2_1 * D2_3 * D3_2_sq * d1 * d3 + D2_1 * D2_3 * D3_2 * D3_3 * d1 * d2 - D2_2_sq * D3_1 * D3_3 * d1 * d3 +
D2_2_sq * D3_3_sq * d1_sq + D2_2 * D2_3 * D3_1 * D3_2 * d1 * d3 + D2_2 * D2_3 * D3_1 * D3_3 * d1 * d2 -
2 * D2_2 * D2_3 * D3_2 * D3_3 * d1_sq - D2_3_sq * D3_1 * D3_2 * d1 * d2 + D2_3_sq * D3_2_sq * d1_sq) /
g_sq;
coeff(5) =
(-(2 * D1_1_sq * D2_2_sq * D3_3 * g_sq - 2 * D1_1_sq * D2_2 * D2_3 * D3_2 * g_sq + 2 * D1_1_sq * D2_2 * D3_3_sq * g_sq -
2 * D1_1_sq * D2_2 * d3_sq - 2 * D1_1_sq * D2_3 * D3_2 * D3_3 * g_sq + 2 * D1_1_sq * D2_3 * d2 * d3 +
2 * D1_1_sq * D3_2 * d2 * d3 - 2 * D1_1_sq * D3_3 * d2_sq - 4 * D1_1 * D1_2 * D2_1 * D2_2 * D3_3 * g_sq +
2 * D1_1 * D1_2 * D2_1 * D2_3 * D3_2 * g_sq - 2 * D1_1 * D1_2 * D2_1 * D3_3_sq * g_sq + 2 * D1_1 * D1_2 * D2_1 * d3_sq +
2 * D1_1 * D1_2 * D2_2 * D2_3 * D3_1 * g_sq + 2 * D1_1 * D1_2 * D2_3 * D3_1 * D3_3 * g_sq - 2 * D1_1 * D1_2 * D2_3 * d1 * d3 -
2 * D1_1 * D1_2 * D3_1 * d2 * d3 + 2 * D1_1 * D1_2 * D3_3 * d1 * d2 + 2 * D1_1 * D1_3 * D2_1 * D2_2 * D3_2 * g_sq +
2 * D1_1 * D1_3 * D2_1 * D3_2 * D3_3 * g_sq - 2 * D1_1 * D1_3 * D2_1 * d2 * d3 - 2 * D1_1 * D1_3 * D2_2_sq * D3_1 * g_sq -
4 * D1_1 * D1_3 * D2_2 * D3_1 * D3_3 * g_sq + 2 * D1_1 * D1_3 * D2_2 * d1 * d3 + 2 * D1_1 * D1_3 * D2_3 * D3_1 * D3_2 * g_sq +
2 * D1_1 * D1_3 * D3_1 * d2_sq - 2 * D1_1 * D1_3 * D3_2 * d1 * d2 - 2 * D1_1 * D2_1 * D3_2 * d1 * d3 +
2 * D1_1 * D2_1 * D3_3 * d1 * d2 + 2 * D1_1 * D2_2_sq * D3_3_sq * g_sq - 2 * D1_1 * D2_2_sq * d3_sq -
4 * D1_1 * D2_2 * D2_3 * D3_2 * D3_3 * g_sq + 2 * D1_1 * D2_2 * D2_3 * d2 * d3 + 2 * D1_1 * D2_2 * D3_1 * d1 * d3 +
2 * D1_1 * D2_2 * D3_2 * d2 * d3 + 2 * D1_1 * D2_3_sq * D3_2_sq * g_sq - 2 * D1_1 * D2_3 * D3_1 * d1 * d2 -
2 * D1_1 * D2_3 * D3_2 * d2_sq - 2 * D1_1 * D2_3 * D3_2 * d3_sq + 2 * D1_1 * D2_3 * D3_3 * d2 * d3 +
2 * D1_1 * D3_2 * D3_3 * d2 * d3 - 2 * D1_1 * D3_3_sq * d2_sq + 2 * D1_2_sq * D2_1_sq * D3_3 * g_sq -
2 * D1_2_sq * D2_1 * D2_3 * D3_1 * g_sq - 2 * D1_2 * D1_3 * D2_1_sq * D3_2 * g_sq + 2 * D1_2 * D1_3 * D2_1 * D2_2 * D3_1 * g_sq +
2 * D1_2 * D1_3 * D2_1 * D3_1 * D3_3 * g_sq - 2 * D1_2 * D1_3 * D2_3 * D3_1_sq * g_sq - 2 * D1_2 * D2_1 * D2_2 * D3_3_sq * g_sq +
2 * D1_2 * D2_1 * D2_2 * d3_sq + 2 * D1_2 * D2_1 * D2_3 * D3_2 * D3_3 * g_sq - 2 * D1_2 * D2_1 * D3_3 * d1_sq -
2 * D1_2 * D2_1 * D3_3 * d2_sq + 2 * D1_2 * D2_2 * D2_3 * D3_1 * D3_3 * g_sq - 2 * D1_2 * D2_2 * D2_3 * d1 * d3 -
2 * D1_2 * D2_2 * D3_1 * d2 * d3 + 2 * D1_2 * D2_2 * D3_3 * d1 * d2 - 2 * D1_2 * D2_3_sq * D3_1 * D3_2 * g_sq +
2 * D1_2 * D2_3 * D3_1 * d1_sq + 2 * D1_2 * D2_3 * D3_1 * d2_sq + 2 * D1_2 * D2_3 * D3_1 * d3_sq -
2 * D1_2 * D2_3 * D3_3 * d1 * d3 - 2 * D1_2 * D3_1 * D3_3 * d2 * d3 + 2 * D1_2 * D3_3_sq * d1 * d2 -
2 * D1_3_sq * D2_1 * D3_1 * D3_2 * g_sq + 2 * D1_3_sq * D2_2 * D3_1_sq * g_sq + 2 * D1_3 * D2_1 * D2_2 * D3_2 * D3_3 * g_sq -
2 * D1_3 * D2_1 * D2_2 * d2 * d3 - 2 * D1_3 * D2_1 * D2_3 * D3_2_sq * g_sq + 2 * D1_3 * D2_1 * D3_2 * d1_sq +
2 * D1_3 * D2_1 * D3_2 * d2_sq + 2 * D1_3 * D2_1 * D3_2 * d3_sq - 2 * D1_3 * D2_1 * D3_3 * d2 * d3 -
2 * D1_3 * D2_2_sq * D3_1 * D3_3 * g_sq + 2 * D1_3 * D2_2_sq * d1 * d3 + 2 * D1_3 * D2_2 * D2_3 * D3_1 * D3_2 * g_sq -
2 * D1_3 * D2_2 * D3_1 * d1_sq - 2 * D1_3 * D2_2 * D3_1 * d3_sq - 2 * D1_3 * D2_2 * D3_2 * d1 * d2 +
2 * D1_3 * D2_2 * D3_3 * d1 * d3 + 2 * D1_3 * D3_1 * D3_3 * d2_sq - 2 * D1_3 * D3_2 * D3_3 * d1 * d2 -
2 * D2_1 * D2_2 * D3_2 * d1 * d3 + 2 * D2_1 * D2_2 * D3_3 * d1 * d2 - 2 * D2_1 * D3_2 * D3_3 * d1 * d3 +
2 * D2_1 * D3_3_sq * d1 * d2 + 2 * D2_2_sq * D3_1 * d1 * d3 - 2 * D2_2_sq * D3_3 * d1_sq - 2 * D2_2 * D2_3 * D3_1 * d1 * d2 +
2 * D2_2 * D2_3 * D3_2 * d1_sq + 2 * D2_2 * D3_1 * D3_3 * d1 * d3 - 2 * D2_2 * D3_3_sq * d1_sq -
2 * D2_3 * D3_1 * D3_3 * d1 * d2 + 2 * D2_3 * D3_2 * D3_3 * d1_sq) /
g_sq);
coeff(4) =
((D1_1_sq * D2_2_sq * g_sq + 4 * D1_1_sq * D2_2 * D3_3 * g_sq - 2 * D1_1_sq * D2_3 * D3_2 * g_sq + D1_1_sq * D3_3_sq * g_sq -
D1_1_sq * d2_sq - D1_1_sq * d3_sq - 2 * D1_1 * D1_2 * D2_1 * D2_2 * g_sq - 4 * D1_1 * D1_2 * D2_1 * D3_3 * g_sq +
2 * D1_1 * D1_2 * D2_3 * D3_1 * g_sq + D1_1 * D1_2 * d1 * d2 + 2 * D1_1 * D1_3 * D2_1 * D3_2 * g_sq -
4 * D1_1 * D1_3 * D2_2 * D3_1 * g_sq - 2 * D1_1 * D1_3 * D3_1 * D3_3 * g_sq + D1_1 * D1_3 * d1 * d3 + D1_1 * D2_1 * d1 * d2 +
4 * D1_1 * D2_2_sq * D3_3 * g_sq - 4 * D1_1 * D2_2 * D2_3 * D3_2 * g_sq + 4 * D1_1 * D2_2 * D3_3_sq * g_sq -
4 * D1_1 * D2_2 * d3_sq - 4 * D1_1 * D2_3 * D3_2 * D3_3 * g_sq + 4 * D1_1 * D2_3 * d2 * d3 + D1_1 * D3_1 * d1 * d3 +
4 * D1_1 * D3_2 * d2 * d3 - 4 * D1_1 * D3_3 * d2_sq + D1_2_sq * D2_1_sq * g_sq + 2 * D1_2 * D1_3 * D2_1 * D3_1 * g_sq -
4 * D1_2 * D2_1 * D2_2 * D3_3 * g_sq + 2 * D1_2 * D2_1 * D2_3 * D3_2 * g_sq - 2 * D1_2 * D2_1 * D3_3_sq * g_sq -
D1_2 * D2_1 * d1_sq - D1_2 * D2_1 * d2_sq + 2 * D1_2 * D2_1 * d3_sq + 2 * D1_2 * D2_2 * D2_3 * D3_1 * g_sq +
D1_2 * D2_2 * d1 * d2 + 2 * D1_2 * D2_3 * D3_1 * D3_3 * g_sq - 3 * D1_2 * D2_3 * d1 * d3 - 3 * D1_2 * D3_1 * d2 * d3 +
4 * D1_2 * D3_3 * d1 * d2 + D1_3_sq * D3_1_sq * g_sq + 2 * D1_3 * D2_1 * D2_2 * D3_2 * g_sq +
2 * D1_3 * D2_1 * D3_2 * D3_3 * g_sq - 3 * D1_3 * D2_1 * d2 * d3 - 2 * D1_3 * D2_2_sq * D3_1 * g_sq -
4 * D1_3 * D2_2 * D3_1 * D3_3 * g_sq + 4 * D1_3 * D2_2 * d1 * d3 + 2 * D1_3 * D2_3 * D3_1 * D3_2 * g_sq - D1_3 * D3_1 * d1_sq +
2 * D1_3 * D3_1 * d2_sq - D1_3 * D3_1 * d3_sq - 3 * D1_3 * D3_2 * d1 * d2 + D1_3 * D3_3 * d1 * d3 + D2_1 * D2_2 * d1 * d2 -
3 * D2_1 * D3_2 * d1 * d3 + 4 * D2_1 * D3_3 * d1 * d2 + D2_2_sq * D3_3_sq * g_sq - D2_2_sq * d1_sq - D2_2_sq * d3_sq -
2 * D2_2 * D2_3 * D3_2 * D3_3 * g_sq + D2_2 * D2_3 * d2 * d3 + 4 * D2_2 * D3_1 * d1 * d3 + D2_2 * D3_2 * d2 * d3 -
4 * D2_2 * D3_3 * d1_sq + D2_3_sq * D3_2_sq * g_sq - 3 * D2_3 * D3_1 * d1 * d2 + 2 * D2_3 * D3_2 * d1_sq - D2_3 * D3_2 * d2_sq -
D2_3 * D3_2 * d3_sq + D2_3 * D3_3 * d2 * d3 + D3_1 * D3_3 * d1 * d3 + D3_2 * D3_3 * d2 * d3 - D3_3_sq * d1_sq - D3_3_sq * d2_sq) /
g_sq);
coeff(3) =
((2 * D1_1 * d2_sq + 2 * D1_1 * d3_sq + 2 * D2_2 * d1_sq + 2 * D2_2 * d3_sq + 2 * D3_3 * d1_sq + 2 * D3_3 * d2_sq -
2 * D1_1 * D2_2_sq * g_sq - 2 * D1_1_sq * D2_2 * g_sq - 2 * D1_1 * D3_3_sq * g_sq - 2 * D1_1_sq * D3_3 * g_sq -
2 * D2_2 * D3_3_sq * g_sq - 2 * D2_2_sq * D3_3 * g_sq - 2 * D1_2 * d1 * d2 - 2 * D1_3 * d1 * d3 - 2 * D2_1 * d1 * d2 -
2 * D2_3 * d2 * d3 - 2 * D3_1 * d1 * d3 - 2 * D3_2 * d2 * d3 + 2 * D1_1 * D1_2 * D2_1 * g_sq + 2 * D1_1 * D1_3 * D3_1 * g_sq +
2 * D1_2 * D2_1 * D2_2 * g_sq - 8 * D1_1 * D2_2 * D3_3 * g_sq + 4 * D1_1 * D2_3 * D3_2 * g_sq + 4 * D1_2 * D2_1 * D3_3 * g_sq -
2 * D1_2 * D2_3 * D3_1 * g_sq - 2 * D1_3 * D2_1 * D3_2 * g_sq + 4 * D1_3 * D2_2 * D3_1 * g_sq + 2 * D1_3 * D3_1 * D3_3 * g_sq +
2 * D2_2 * D2_3 * D3_2 * g_sq + 2 * D2_3 * D3_2 * D3_3 * g_sq) /
g_sq);
coeff(2) =
(-(d1_sq + d2_sq + d3_sq - D1_1_sq * g_sq - D2_2_sq * g_sq - D3_3_sq * g_sq - 4 * D1_1 * D2_2 * g_sq + 2 * D1_2 * D2_1 * g_sq -
4 * D1_1 * D3_3 * g_sq + 2 * D1_3 * D3_1 * g_sq - 4 * D2_2 * D3_3 * g_sq + 2 * D2_3 * D3_2 * g_sq) /
g_sq);
coeff(1) = (-(2 * D1_1 * g_sq + 2 * D2_2 * g_sq + 2 * D3_3 * g_sq) / g_sq);
coeff(0) = 1;
// finally return
return coeff;
}
};
} // namespace ov_init
#endif /* OV_INIT_HELPER */