initial commit

This commit is contained in:
2022-08-05 08:23:25 +03:00
commit 5ecdc6abcf
387 changed files with 3010095 additions and 0 deletions

54
ov_msckf/CMakeLists.txt Normal file
View File

@@ -0,0 +1,54 @@
cmake_minimum_required(VERSION 3.3)
project(ov_msckf)
# Include libraries (if we don't have opencv 4, then fallback to opencv 3)
# The OpenCV version needs to match the one used by cv_bridge otherwise you will get a segmentation fault!
find_package(Eigen3 REQUIRED)
find_package(OpenCV 3 QUIET)
if (NOT OpenCV_FOUND)
find_package(OpenCV 4 REQUIRED)
endif ()
find_package(Boost REQUIRED COMPONENTS system filesystem thread date_time)
find_package(Ceres REQUIRED)
include_directories(/home/pi/work_drivecast/slams/ORB-SLAM3_Linux/Thirdparty/Sophus)
message(STATUS "OPENCV: " ${OpenCV_VERSION} " | BOOST: " ${Boost_VERSION} " | CERES: " ${Ceres_VERSION})
# If we will compile with aruco support
option(ENABLE_ARUCO_TAGS "Enable or disable aruco tag (disable if no contrib modules)" ON)
if (NOT ENABLE_ARUCO_TAGS)
add_definitions(-DENABLE_ARUCO_TAGS=0)
message(WARNING "DISABLING ARUCOTAG TRACKING!")
else ()
add_definitions(-DENABLE_ARUCO_TAGS=1)
endif ()
# We need c++14 for ROS2, thus just require it for everybody
# NOTE: To future self, hope this isn't an issue...
set(CMAKE_CXX_STANDARD 14)
set(CMAKE_CXX_STANDARD_REQUIRED ON)
set(CMAKE_CXX_EXTENSIONS OFF)
# Enable compile optimizations
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -O3 -fsee -fomit-frame-pointer -fno-signed-zeros -fno-math-errno -funroll-loops")
# Enable debug flags (use if you want to debug in gdb)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -g3 -Wall -Wuninitialized -fno-omit-frame-pointer")
# Find our ROS version!
# NOTE: Default to using the ROS1 package if both are in our enviroment
# NOTE: https://github.com/romainreignier/share_ros1_ros2_lib_demo
find_package(catkin QUIET COMPONENTS roscpp)
find_package(ament_cmake QUIET)
if (catkin_FOUND)
message(STATUS "ROS *1* version found, building ROS1.cmake")
include(${CMAKE_CURRENT_SOURCE_DIR}/cmake/ROS1.cmake)
elseif (ament_cmake_FOUND)
message(STATUS "ROS *2* version found, building ROS2.cmake")
include(${CMAKE_CURRENT_SOURCE_DIR}/cmake/ROS2.cmake)
else ()
message(STATUS "No ROS versions found, building ROS1.cmake")
include(${CMAKE_CURRENT_SOURCE_DIR}/cmake/ROS1.cmake)
endif ()

167
ov_msckf/cmake/ROS1.cmake Normal file
View File

@@ -0,0 +1,167 @@
cmake_minimum_required(VERSION 3.3)
# Find ROS build system
find_package(catkin QUIET COMPONENTS roscpp rosbag tf std_msgs geometry_msgs sensor_msgs nav_msgs visualization_msgs image_transport cv_bridge ov_core ov_init)
# Describe ROS project
option(ENABLE_ROS "Enable or disable building with ROS (if it is found)" ON)
if (catkin_FOUND AND ENABLE_ROS)
add_definitions(-DROS_AVAILABLE=1)
catkin_package(
CATKIN_DEPENDS roscpp rosbag tf std_msgs geometry_msgs sensor_msgs nav_msgs visualization_msgs image_transport cv_bridge ov_core ov_init
INCLUDE_DIRS src/
LIBRARIES ov_msckf_lib
)
else ()
add_definitions(-DROS_AVAILABLE=0)
message(WARNING "BUILDING WITHOUT ROS!")
include(GNUInstallDirs)
set(CATKIN_PACKAGE_LIB_DESTINATION "${CMAKE_INSTALL_LIBDIR}")
set(CATKIN_PACKAGE_BIN_DESTINATION "${CMAKE_INSTALL_BINDIR}")
set(CATKIN_GLOBAL_INCLUDE_DESTINATION "${CMAKE_INSTALL_INCLUDEDIR}")
endif ()
# Include our header files
include_directories(
src
${EIGEN3_INCLUDE_DIR}
${Boost_INCLUDE_DIRS}
${CERES_INCLUDE_DIRS}
${catkin_INCLUDE_DIRS}
)
# Set link libraries used by all binaries
list(APPEND thirdparty_libraries
${Boost_LIBRARIES}
${OpenCV_LIBRARIES}
${CERES_LIBRARIES}
${catkin_LIBRARIES}
)
# If we are not building with ROS then we need to manually link to its headers
# This isn't that elegant of a way, but this at least allows for building without ROS
# If we had a root cmake we could do this: https://stackoverflow.com/a/11217008/7718197
# But since we don't we need to basically build all the cpp / h files explicitly :(
if (NOT catkin_FOUND OR NOT ENABLE_ROS)
message(STATUS "MANUALLY LINKING TO OV_CORE LIBRARY....")
include_directories(${CMAKE_SOURCE_DIR}/../ov_core/src/)
file(GLOB_RECURSE OVCORE_LIBRARY_SOURCES "${CMAKE_SOURCE_DIR}/../ov_core/src/*.cpp")
list(FILTER OVCORE_LIBRARY_SOURCES EXCLUDE REGEX ".*test_webcam\\.cpp$")
list(FILTER OVCORE_LIBRARY_SOURCES EXCLUDE REGEX ".*test_tracking\\.cpp$")
list(APPEND LIBRARY_SOURCES ${OVCORE_LIBRARY_SOURCES})
file(GLOB_RECURSE OVCORE_LIBRARY_HEADERS "${CMAKE_SOURCE_DIR}/../ov_core/src/*.h")
list(APPEND LIBRARY_HEADERS ${OVCORE_LIBRARY_HEADERS})
message(STATUS "MANUALLY LINKING TO OV_INIT LIBRARY....")
include_directories(${CMAKE_SOURCE_DIR}/../ov_init/src/)
file(GLOB_RECURSE OVINIT_LIBRARY_SOURCES "${CMAKE_SOURCE_DIR}/../ov_init/src/*.cpp")
list(FILTER OVINIT_LIBRARY_SOURCES EXCLUDE REGEX ".*test_dynamic_init\\.cpp$")
list(FILTER OVINIT_LIBRARY_SOURCES EXCLUDE REGEX ".*test_dynamic_mle\\.cpp$")
list(FILTER OVINIT_LIBRARY_SOURCES EXCLUDE REGEX ".*test_simulation\\.cpp$")
list(FILTER OVINIT_LIBRARY_SOURCES EXCLUDE REGEX ".*Simulator\\.cpp$")
list(APPEND LIBRARY_SOURCES ${OVINIT_LIBRARY_SOURCES})
file(GLOB_RECURSE OVINIT_LIBRARY_HEADERS "${CMAKE_SOURCE_DIR}/../ov_init/src/*.h")
list(FILTER OVINIT_LIBRARY_HEADERS EXCLUDE REGEX ".*Simulator\\.h$")
list(APPEND LIBRARY_HEADERS ${OVINIT_LIBRARY_HEADERS})
endif ()
##################################################
# Make the shared library
##################################################
list(APPEND LIBRARY_SOURCES
src/dummy.cpp
src/sim/Simulator.cpp
src/state/State.cpp
src/state/StateHelper.cpp
src/state/Propagator.cpp
src/core/VioManager.cpp
src/update/UpdaterHelper.cpp
src/update/UpdaterMSCKF.cpp
src/update/UpdaterSLAM.cpp
src/update/UpdaterZeroVelocity.cpp
)
include_directories(/home/pi/work_drivecast/slams/ORB-SLAM3_Linux/Thirdparty/Sophus)
if (catkin_FOUND AND ENABLE_ROS)
list(APPEND LIBRARY_SOURCES src/ros/ROS1Visualizer.cpp)
endif ()
file(GLOB_RECURSE LIBRARY_HEADERS "src/*.h")
add_library(ov_msckf_lib SHARED ${LIBRARY_SOURCES} ${LIBRARY_HEADERS})
target_link_libraries(ov_msckf_lib ${thirdparty_libraries})
target_include_directories(ov_msckf_lib PUBLIC src/)
install(TARGETS ov_msckf_lib
ARCHIVE DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION}
LIBRARY DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION}
RUNTIME DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION}
)
install(DIRECTORY src/
DESTINATION ${CATKIN_GLOBAL_INCLUDE_DESTINATION}
FILES_MATCHING PATTERN "*.h" PATTERN "*.hpp"
)
##################################################
# Make binary files!
##################################################
if (catkin_FOUND AND ENABLE_ROS)
add_executable(ros1_serial_msckf src/ros1_serial_msckf.cpp)
target_link_libraries(ros1_serial_msckf ov_msckf_lib ${thirdparty_libraries})
install(TARGETS ros1_serial_msckf
ARCHIVE DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION}
LIBRARY DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION}
RUNTIME DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION}
)
add_executable(run_subscribe_msckf src/run_subscribe_msckf.cpp)
target_link_libraries(run_subscribe_msckf ov_msckf_lib ${thirdparty_libraries})
install(TARGETS run_subscribe_msckf
ARCHIVE DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION}
LIBRARY DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION}
RUNTIME DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION}
)
endif ()
add_executable(run_simulation src/run_simulation.cpp)
target_link_libraries(run_simulation ov_msckf_lib ${thirdparty_libraries})
install(TARGETS run_simulation
ARCHIVE DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION}
LIBRARY DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION}
RUNTIME DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION}
)
add_executable(test_sim_meas src/test_sim_meas.cpp)
target_link_libraries(test_sim_meas ov_msckf_lib ${thirdparty_libraries})
install(TARGETS test_sim_meas
ARCHIVE DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION}
LIBRARY DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION}
RUNTIME DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION}
)
add_executable(test_sim_repeat src/test_sim_repeat.cpp)
target_link_libraries(test_sim_repeat ov_msckf_lib ${thirdparty_libraries})
install(TARGETS test_sim_repeat
ARCHIVE DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION}
LIBRARY DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION}
RUNTIME DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION}
)
##################################################
# Launch files!
##################################################
install(DIRECTORY launch/
DESTINATION ${CATKIN_PACKAGE_SHARE_DESTINATION}/launch
)

115
ov_msckf/cmake/ROS2.cmake Normal file
View File

@@ -0,0 +1,115 @@
cmake_minimum_required(VERSION 3.3)
# Find ROS build system
find_package(ament_cmake REQUIRED)
find_package(rclcpp REQUIRED)
find_package(tf2_ros REQUIRED)
find_package(tf2_geometry_msgs REQUIRED)
find_package(std_msgs REQUIRED)
find_package(geometry_msgs REQUIRED)
find_package(sensor_msgs REQUIRED)
find_package(nav_msgs REQUIRED)
find_package(cv_bridge REQUIRED)
find_package(image_transport REQUIRED)
find_package(ov_core REQUIRED)
find_package(ov_init REQUIRED)
# Describe ROS project
option(ENABLE_ROS "Enable or disable building with ROS (if it is found)" ON)
if (NOT ENABLE_ROS)
message(FATAL_ERROR "Build with ROS1.cmake if you don't have ROS.")
endif ()
add_definitions(-DROS_AVAILABLE=2)
# Include our header files
include_directories(
src
${EIGEN3_INCLUDE_DIR}
${Boost_INCLUDE_DIRS}
${CERES_INCLUDE_DIRS}
)
# Set link libraries used by all binaries
list(APPEND thirdparty_libraries
${Boost_LIBRARIES}
${CERES_LIBRARIES}
${OpenCV_LIBRARIES}
)
list(APPEND ament_libraries
rclcpp
tf2_ros
tf2_geometry_msgs
std_msgs
geometry_msgs
sensor_msgs
nav_msgs
cv_bridge
image_transport
ov_core
ov_init
)
##################################################
# Make the shared library
##################################################
list(APPEND LIBRARY_SOURCES
src/dummy.cpp
src/sim/Simulator.cpp
src/state/State.cpp
src/state/StateHelper.cpp
src/state/Propagator.cpp
src/core/VioManager.cpp
src/update/UpdaterHelper.cpp
src/update/UpdaterMSCKF.cpp
src/update/UpdaterSLAM.cpp
src/update/UpdaterZeroVelocity.cpp
)
list(APPEND LIBRARY_SOURCES src/ros/ROS2Visualizer.cpp)
file(GLOB_RECURSE LIBRARY_HEADERS "src/*.h")
add_library(ov_msckf_lib SHARED ${LIBRARY_SOURCES} ${LIBRARY_HEADERS})
ament_target_dependencies(ov_msckf_lib ${ament_libraries})
target_link_libraries(ov_msckf_lib ${thirdparty_libraries})
target_include_directories(ov_msckf_lib PUBLIC src/)
install(TARGETS ov_msckf_lib
LIBRARY DESTINATION lib
RUNTIME DESTINATION bin
PUBLIC_HEADER DESTINATION include
)
install(DIRECTORY src/
DESTINATION include
FILES_MATCHING PATTERN "*.h" PATTERN "*.hpp"
)
ament_export_include_directories(include)
ament_export_libraries(ov_msckf_lib)
##################################################
# Make binary files!
##################################################
add_executable(run_subscribe_msckf src/run_subscribe_msckf.cpp)
ament_target_dependencies(run_subscribe_msckf ${ament_libraries})
target_link_libraries(run_subscribe_msckf ov_msckf_lib ${thirdparty_libraries})
install(TARGETS run_subscribe_msckf DESTINATION lib/${PROJECT_NAME})
add_executable(run_simulation src/run_simulation.cpp)
ament_target_dependencies(run_simulation ${ament_libraries})
target_link_libraries(run_simulation ov_msckf_lib ${thirdparty_libraries})
install(TARGETS run_simulation DESTINATION lib/${PROJECT_NAME})
add_executable(test_sim_meas src/test_sim_meas.cpp)
ament_target_dependencies(test_sim_meas ${ament_libraries})
target_link_libraries(test_sim_meas ov_msckf_lib ${thirdparty_libraries})
install(TARGETS test_sim_meas DESTINATION lib/${PROJECT_NAME})
add_executable(test_sim_repeat src/test_sim_repeat.cpp)
ament_target_dependencies(test_sim_repeat ${ament_libraries})
target_link_libraries(test_sim_repeat ov_msckf_lib ${thirdparty_libraries})
install(TARGETS test_sim_repeat DESTINATION lib/${PROJECT_NAME})
# Install launch and config directories
install(DIRECTORY launch/ DESTINATION share/${PROJECT_NAME}/launch/)
install(DIRECTORY ../config/ DESTINATION share/${PROJECT_NAME}/config/)
# finally define this as the package
ament_package()

View File

@@ -0,0 +1,365 @@
Panels:
- Class: rviz/Displays
Help Height: 0
Name: Displays
Property Tree Widget:
Expanded:
- /Camera1
- /Camera1/Status1
Splitter Ratio: 0.6011396050453186
Tree Height: 345
- Class: rviz/Selection
Name: Selection
- Class: rviz/Tool Properties
Expanded:
- /2D Pose Estimate1
- /2D Nav Goal1
- /Publish Point1
Name: Tool Properties
Splitter Ratio: 0.5886790156364441
- Class: rviz/Views
Expanded:
- /Current View1
Name: Views
Splitter Ratio: 0.5
- Class: rviz/Time
Experimental: false
Name: Time
SyncMode: 0
SyncSource: ""
Preferences:
PromptSaveOnExit: true
Toolbars:
toolButtonStyle: 2
Visualization Manager:
Class: ""
Displays:
- Alpha: 0.5
Cell Size: 1
Class: rviz/Grid
Color: 112; 112; 115
Enabled: true
Line Style:
Line Width: 0.029999999329447746
Value: Lines
Name: Grid
Normal Cell Count: 0
Offset:
X: 0
Y: 0
Z: 0
Plane: XY
Plane Cell Count: 80
Reference Frame: <Fixed Frame>
Value: true
- Class: rviz/Image
Enabled: true
Image Topic: /ov_msckf/trackhist
Max Value: 1
Median window: 5
Min Value: 0
Name: Image Tracks
Normalize Range: true
Queue Size: 2
Transport Hint: compressed
Unreliable: false
Value: true
Camera Info Topic: /ov_msckf/camera_info
- Class: rviz/Image
Enabled: true
Image Topic: /ov_msckf/loop_depth_colored
Max Value: 1
Median window: 5
Min Value: 0
Name: Current Depths
Normalize Range: true
Queue Size: 2
Transport Hint: compressed
Unreliable: false
Value: true
- Camera Info Topic: /ov_msckf/camera_info
- Alpha: 1
Buffer Length: 1
Class: rviz/Path
Color: 0; 255; 0
Enabled: true
Head Diameter: 0.30000001192092896
Head Length: 0.20000000298023224
Length: 0.30000001192092896
Line Style: Billboards
Line Width: 0.009999999776482582
Name: Path VIO
Offset:
X: 0
Y: 0
Z: 0
Pose Color: 255; 85; 255
Pose Style: None
Queue Size: 10
Radius: 0.029999999329447746
Shaft Diameter: 0.10000000149011612
Shaft Length: 0.10000000149011612
Topic: /ov_msckf/pathimu
Unreliable: false
Value: true
- Alpha: 1
Buffer Length: 1
Class: rviz/Path
Color: 0; 255; 255
Enabled: true
Head Diameter: 0.30000001192092896
Head Length: 0.20000000298023224
Length: 0.30000001192092896
Line Style: Billboards
Line Width: 0.029999999329447746
Name: Path GT
Offset:
X: 0
Y: 0
Z: 0
Pose Color: 255; 85; 255
Pose Style: None
Queue Size: 10
Radius: 0.029999999329447746
Shaft Diameter: 0.10000000149011612
Shaft Length: 0.10000000149011612
Topic: /ov_msckf/pathgt
Unreliable: false
Value: true
- Alpha: 0.800000011920929
Autocompute Intensity Bounds: true
Autocompute Value Bounds:
Max Value: 10
Min Value: -10
Value: true
Axis: Z
Channel Name: intensity
Class: rviz/PointCloud2
Color: 252; 175; 62
Color Transformer: FlatColor
Decay Time: 0
Enabled: true
Invert Rainbow: false
Max Color: 255; 255; 255
Min Color: 0; 0; 0
Name: MSCKF Points
Position Transformer: XYZ
Queue Size: 10
Selectable: true
Size (Pixels): 8
Size (m): 0.009999999776482582
Style: Points
Topic: /ov_msckf/points_msckf
Unreliable: false
Use Fixed Frame: true
Use rainbow: true
Value: true
- Alpha: 1
Autocompute Intensity Bounds: true
Autocompute Value Bounds:
Max Value: 10
Min Value: -10
Value: true
Axis: Z
Channel Name: intensity
Class: rviz/PointCloud2
Color: 255; 0; 0
Color Transformer: FlatColor
Decay Time: 0
Enabled: true
Invert Rainbow: false
Max Color: 255; 255; 255
Min Color: 0; 0; 0
Name: SLAM Points
Position Transformer: XYZ
Queue Size: 10
Selectable: true
Size (Pixels): 8
Size (m): 0.009999999776482582
Style: Points
Topic: /ov_msckf/points_slam
Unreliable: false
Use Fixed Frame: true
Use rainbow: true
Value: true
- Alpha: 1
Autocompute Intensity Bounds: true
Autocompute Value Bounds:
Max Value: 10
Min Value: -10
Value: true
Axis: Z
Channel Name: intensity
Class: rviz/PointCloud2
Color: 117; 80; 123
Color Transformer: FlatColor
Decay Time: 0
Enabled: true
Invert Rainbow: false
Max Color: 255; 255; 255
Min Color: 0; 0; 0
Name: ARUCO Points
Position Transformer: XYZ
Queue Size: 10
Selectable: true
Size (Pixels): 15
Size (m): 0.009999999776482582
Style: Points
Topic: /ov_msckf/points_aruco
Unreliable: false
Use Fixed Frame: true
Use rainbow: true
Value: true
- Alpha: 1
Autocompute Intensity Bounds: true
Autocompute Value Bounds:
Max Value: 10
Min Value: -10
Value: true
Axis: Z
Channel Name: intensity
Class: rviz/PointCloud
Color: 255; 255; 255
Color Transformer: FlatColor
Decay Time: 0
Enabled: true
Invert Rainbow: false
Max Color: 255; 255; 255
Min Color: 0; 0; 0
Name: ACTIVE Features
Position Transformer: XYZ
Queue Size: 10
Selectable: true
Size (Pixels): 3
Size (m): 0.009999999776482582
Style: Points
Topic: /ov_msckf/loop_feats
Unreliable: false
Use Fixed Frame: true
Use rainbow: true
Value: true
- Alpha: 1
Autocompute Intensity Bounds: true
Autocompute Value Bounds:
Max Value: 10
Min Value: -10
Value: true
Axis: Z
Channel Name: intensity
Class: rviz/PointCloud2
Color: 255; 255; 127
Color Transformer: FlatColor
Decay Time: 0
Enabled: true
Invert Rainbow: false
Max Color: 255; 255; 255
Min Color: 0; 0; 0
Name: SIM Points
Position Transformer: XYZ
Queue Size: 10
Selectable: true
Size (Pixels): 3
Size (m): 0.009999999776482582
Style: Points
Topic: /ov_msckf/points_sim
Unreliable: false
Use Fixed Frame: true
Use rainbow: true
Value: true
- Class: rviz/Camera
Camera Info Topic: /ov_msckf/camera_info
Enabled: true
Image Rendering: background and overlay
Image Topic: /ov_msckf/trackhist
Name: Camera
Overlay Alpha: 0.5
Queue Size: 2
Transport Hint: raw
Unreliable: false
Value: true
Visibility:
ACTIVE Features: true
ARUCO Points: true
Current Depths: true
Grid: true
Image Tracks: true
MSCKF Points: true
Path GT: true
Path VIO: true
SIM Points: true
SLAM Points: true
TF: true
Value: true
Zoom Factor: 1
Enabled: true
Global Options:
Background Color: 44; 44; 44
Default Light: true
Fixed Frame: global
Frame Rate: 60
Name: root
Tools:
- Class: rviz/Interact
Hide Inactive Objects: true
- Class: rviz/MoveCamera
- Class: rviz/Select
- Class: rviz/FocusCamera
- Class: rviz/Measure
- Class: rviz/SetInitialPose
Theta std deviation: 0.2617993950843811
Topic: /initialpose
X std deviation: 0.5
Y std deviation: 0.5
- Class: rviz/SetGoal
Topic: /move_base_simple/goal
- Class: rviz/PublishPoint
Single click: true
Topic: /clicked_point
Value: true
Views:
Current:
Class: rviz/Orbit
Distance: 12.363367080688477
Enable Stereo Rendering:
Stereo Eye Separation: 0.05999999865889549
Stereo Focal Distance: 1
Swap Stereo Eyes: false
Value: false
Field of View: 0.7853981852531433
Focal Point:
X: 0
Y: 0
Z: 0
Focal Shape Fixed Size: false
Focal Shape Size: 0.05000000074505806
Invert Z Axis: false
Name: Current View
Near Clip Distance: 0.009999999776482582
Pitch: 0.8953980803489685
Target Frame: <Fixed Frame>
Yaw: 0.8253981471061707
Saved: ~
Window Geometry:
Camera:
collapsed: false
Current Depths:
collapsed: false
Displays:
collapsed: false
Height: 1016
Hide Left Dock: false
Hide Right Dock: true
Image Tracks:
collapsed: false
QMainWindow State: 000000ff00000000fd000000040000000000000438000003c5fc0200000012fb0000001200530065006c0065006300740069006f006e00000001e10000009b0000005c00fffffffb0000001e0054006f006f006c002000500072006f007000650072007400690065007302000001ed000001df00000185000000a3fb000000120056006900650077007300200054006f006f02000001df000002110000018500000122fb000000200054006f006f006c002000500072006f0070006500720074006900650073003203000002880000011d000002210000017afb000000100044006900730070006c006100790073010000001600000196000000c900fffffffb0000002000730065006c0065006300740069006f006e00200062007500660066006500720200000138000000aa0000023a00000294fb00000014005700690064006500530074006500720065006f02000000e6000000d2000003ee0000030bfb0000000c004b0069006e0065006300740200000186000001060000030c00000261fb000000180049006d00610067006500200054007200610063006b007300000000e5000000cd0000000000000000fb0000001a0054007200610063006b00200048006900730074006f0072007901000003d0000000160000000000000000fb0000000a0049006d00610067006500000001170000011c0000000000000000fb0000001c00430075007200720065006e0074002000440065007000740068007300000001c9000000b80000000000000000fb0000000a0049006d0061006700650000000239000000480000000000000000fb000000140054007200610063006b0020004c0069006e006501000003ec000000160000000000000000fb0000000c00430061006d00650072006100000001b8000000160000000000000000fb000000180049006d00610067006500200054007200610063006b007301000001b2000000a90000001600fffffffb0000001c00430075007200720065006e007400200044006500700074006800730100000261000000a80000001600fffffffb0000000c00430061006d006500720061010000030f000000cc0000001600ffffff000000010000010f000002a4fc0200000003fb0000001e0054006f006f006c002000500072006f00700065007200740069006500730100000041000000780000000000000000fb0000000a005600690065007700730000000000000002a4000000a400fffffffb0000001200530065006c0065006300740069006f006e010000025a000000b20000000000000000000000020000073f00000295fc0100000005fb0000000800430041004d003000000000000000020c0000000000000000fb0000000800430041004d00310000000000000002ef0000000000000000fb0000000800430041004d00320000000000000003850000000000000000fb0000000800430041004d003300000000000000073f0000000000000000fb0000000a00560069006500770073030000004e00000080000002e10000019700000003000005a300000158fc0100000002fb0000000800540069006d0065000000000000000570000002eb00fffffffb0000000800540069006d0065010000000000000450000000000000000000000342000003c500000004000000040000000800000008fc0000000100000002000000010000000a0054006f006f006c00730000000000ffffffff0000000000000000
Selection:
collapsed: false
Time:
collapsed: false
Tool Properties:
collapsed: false
Views:
collapsed: true
Width: 1920
X: 0
Y: 27

View File

@@ -0,0 +1,329 @@
Panels:
- Class: rviz/Displays
Help Height: 0
Name: Displays
Property Tree Widget:
Expanded: ~
Splitter Ratio: 0.6000000238418579
Tree Height: 467
- Class: rviz/Selection
Name: Selection
- Class: rviz/Tool Properties
Expanded:
- /2D Pose Estimate1
- /2D Nav Goal1
- /Publish Point1
Name: Tool Properties
Splitter Ratio: 0.5886790156364441
- Class: rviz/Views
Expanded:
- /Current View1
Name: Views
Splitter Ratio: 0.5
- Class: rviz/Time
Experimental: false
Name: Time
SyncMode: 0
SyncSource: ""
Preferences:
PromptSaveOnExit: true
Toolbars:
toolButtonStyle: 2
Visualization Manager:
Class: ""
Displays:
- Alpha: 0.5
Cell Size: 10
Class: rviz/Grid
Color: 112; 112; 115
Enabled: true
Line Style:
Line Width: 0.029999999329447746
Value: Lines
Name: Grid
Normal Cell Count: 0
Offset:
X: 0
Y: 0
Z: 0
Plane: XY
Plane Cell Count: 500
Reference Frame: <Fixed Frame>
Value: true
- Class: rviz/TF
Enabled: true
Frame Timeout: 999
Frames:
All Enabled: true
Marker Alpha: 1
Marker Scale: 50
Name: TF
Show Arrows: false
Show Axes: true
Show Names: true
Tree:
{}
Update Interval: 0
Value: true
- Class: rviz/Image
Enabled: true
Image Topic: /ov_msckf/trackhist
Max Value: 1
Median window: 5
Min Value: 0
Name: Image
Normalize Range: true
Queue Size: 2
Transport Hint: raw
Unreliable: false
Value: true
- Alpha: 1
Buffer Length: 1
Class: rviz/Path
Color: 0; 255; 0
Enabled: true
Head Diameter: 0.30000001192092896
Head Length: 0.20000000298023224
Length: 0.30000001192092896
Line Style: Billboards
Line Width: 1
Name: Path VIO
Offset:
X: 0
Y: 0
Z: 0
Pose Color: 255; 85; 255
Pose Style: None
Queue Size: 10
Radius: 0.029999999329447746
Shaft Diameter: 0.10000000149011612
Shaft Length: 0.10000000149011612
Topic: /ov_msckf/pathimu
Unreliable: false
Value: true
- Alpha: 1
Buffer Length: 1
Class: rviz/Path
Color: 0; 255; 255
Enabled: true
Head Diameter: 0.30000001192092896
Head Length: 0.20000000298023224
Length: 0.30000001192092896
Line Style: Billboards
Line Width: 1
Name: Path GT
Offset:
X: 0
Y: 0
Z: 0
Pose Color: 255; 85; 255
Pose Style: None
Queue Size: 10
Radius: 0.029999999329447746
Shaft Diameter: 0.10000000149011612
Shaft Length: 0.10000000149011612
Topic: /ov_msckf/pathgt
Unreliable: false
Value: true
- Alpha: 0.4000000059604645
Autocompute Intensity Bounds: true
Autocompute Value Bounds:
Max Value: 10
Min Value: -10
Value: true
Axis: Z
Channel Name: intensity
Class: rviz/PointCloud2
Color: 252; 175; 62
Color Transformer: FlatColor
Decay Time: 5
Enabled: true
Invert Rainbow: false
Max Color: 255; 255; 255
Min Color: 0; 0; 0
Name: MSCKF Points
Position Transformer: XYZ
Queue Size: 10
Selectable: true
Size (Pixels): 2
Size (m): 0.009999999776482582
Style: Points
Topic: /ov_msckf/points_msckf
Unreliable: false
Use Fixed Frame: true
Use rainbow: true
Value: true
- Alpha: 1
Autocompute Intensity Bounds: true
Autocompute Value Bounds:
Max Value: 10
Min Value: -10
Value: true
Axis: Z
Channel Name: intensity
Class: rviz/PointCloud2
Color: 255; 0; 0
Color Transformer: FlatColor
Decay Time: 0
Enabled: true
Invert Rainbow: false
Max Color: 255; 255; 255
Min Color: 0; 0; 0
Name: SLAM Points
Position Transformer: XYZ
Queue Size: 10
Selectable: true
Size (Pixels): 5
Size (m): 0.009999999776482582
Style: Points
Topic: /ov_msckf/points_slam
Unreliable: false
Use Fixed Frame: true
Use rainbow: true
Value: true
- Alpha: 1
Autocompute Intensity Bounds: true
Autocompute Value Bounds:
Max Value: 10
Min Value: -10
Value: true
Axis: Z
Channel Name: intensity
Class: rviz/PointCloud2
Color: 255; 170; 0
Color Transformer: FlatColor
Decay Time: 0
Enabled: true
Invert Rainbow: false
Max Color: 255; 255; 255
Min Color: 0; 0; 0
Name: ARUCO Points
Position Transformer: XYZ
Queue Size: 10
Selectable: true
Size (Pixels): 15
Size (m): 0.009999999776482582
Style: Points
Topic: /ov_msckf/points_aruco
Unreliable: false
Use Fixed Frame: true
Use rainbow: true
Value: true
- Alpha: 1
Autocompute Intensity Bounds: true
Autocompute Value Bounds:
Max Value: 10
Min Value: -10
Value: true
Axis: Z
Channel Name: intensity
Class: rviz/PointCloud2
Color: 255; 255; 127
Color Transformer: FlatColor
Decay Time: 0
Enabled: true
Invert Rainbow: false
Max Color: 255; 255; 255
Min Color: 0; 0; 0
Name: SIM Points
Position Transformer: XYZ
Queue Size: 10
Selectable: true
Size (Pixels): 3
Size (m): 0.009999999776482582
Style: Points
Topic: /ov_msckf/points_sim
Unreliable: false
Use Fixed Frame: true
Use rainbow: true
Value: true
- Alpha: 1
Autocompute Intensity Bounds: true
Autocompute Value Bounds:
Max Value: 10
Min Value: -10
Value: true
Axis: Z
Channel Name: intensity
Class: rviz/PointCloud
Color: 255; 255; 255
Color Transformer: Intensity
Decay Time: 0
Enabled: true
Invert Rainbow: false
Max Color: 255; 255; 255
Min Color: 0; 0; 0
Name: Active Points
Position Transformer: XYZ
Queue Size: 10
Selectable: true
Size (Pixels): 3
Size (m): 0.009999999776482582
Style: Points
Topic: /ov_msckf/loop_feats
Unreliable: false
Use Fixed Frame: true
Use rainbow: true
Value: true
Enabled: true
Global Options:
Background Color: 44; 44; 44
Default Light: true
Fixed Frame: global
Frame Rate: 60
Name: root
Tools:
- Class: rviz/Interact
Hide Inactive Objects: true
- Class: rviz/MoveCamera
- Class: rviz/Select
- Class: rviz/FocusCamera
- Class: rviz/Measure
- Class: rviz/SetInitialPose
Theta std deviation: 0.2617993950843811
Topic: /initialpose
X std deviation: 0.5
Y std deviation: 0.5
- Class: rviz/SetGoal
Topic: /move_base_simple/goal
- Class: rviz/PublishPoint
Single click: true
Topic: /clicked_point
Value: true
Views:
Current:
Angle: 0
Class: rviz/TopDownOrtho
Enable Stereo Rendering:
Stereo Eye Separation: 0.05999999865889549
Stereo Focal Distance: 1
Swap Stereo Eyes: false
Value: false
Invert Z Axis: false
Name: Current View
Near Clip Distance: 0.009999999776482582
Scale: 2.45080828666687
Target Frame: imu
X: 0
Y: 0
Saved: ~
Window Geometry:
Displays:
collapsed: false
Height: 972
Hide Left Dock: false
Hide Right Dock: true
Image:
collapsed: false
QMainWindow State: 000000ff00000000fd00000004000000000000017200000210fc020000000cfb0000001200530065006c0065006300740069006f006e00000001e10000009b0000005c00fffffffb0000001e0054006f006f006c002000500072006f007000650072007400690065007302000001ed000001df00000185000000a3fb000000120056006900650077007300200054006f006f02000001df000002110000018500000122fb000000200054006f006f006c002000500072006f0070006500720074006900650073003203000002880000011d000002210000017afb000000100044006900730070006c006100790073010000001600000210000000c900fffffffb0000002000730065006c0065006300740069006f006e00200062007500660066006500720200000138000000aa0000023a00000294fb00000014005700690064006500530074006500720065006f02000000e6000000d2000003ee0000030bfb0000000c004b0069006e0065006300740200000186000001060000030c00000261fb0000000a0049006d00610067006500000001a4000000a10000000000000000fb0000001a0054007200610063006b00200048006900730074006f0072007901000003d0000000160000000000000000fb000000140054007200610063006b0020004c0069006e006501000003ec000000160000000000000000fb0000001c00430075007200720065006e00740020004400650070007400680073000000024b000000160000000000000000000000010000010f000002b7fc0200000003fb0000001e0054006f006f006c002000500072006f00700065007200740069006500730100000041000000780000000000000000fb0000000a005600690065007700730000000016000002b7000000a400fffffffb0000001200530065006c0065006300740069006f006e010000025a000000b20000000000000000000000020000073f00000295fc0100000005fb0000000800430041004d003000000000000000020c0000000000000000fb0000000800430041004d00310000000000000002ef0000000000000000fb0000000800430041004d00320000000000000003850000000000000000fb0000000800430041004d003300000000000000073f0000000000000000fb0000000a00560069006500770073030000004e00000080000002e10000019700000003000006ab00000183fc0100000004fb0000000a0049006d0061006700650100000000000006ab0000005e00fffffffb0000000a0049006d00610067006501000000000000073f0000000000000000fb0000000800540069006d0065000000000000000570000002eb00fffffffb0000000800540069006d00650100000000000004500000000000000000000005330000021000000004000000040000000800000008fc0000000100000002000000010000000a0054006f006f006c00730000000000ffffffff0000000000000000
Selection:
collapsed: false
Time:
collapsed: false
Tool Properties:
collapsed: false
Views:
collapsed: true
Width: 1707
X: 2243
Y: 1301

View File

@@ -0,0 +1,353 @@
Panels:
- Class: rviz_common/Displays
Help Height: 0
Name: Displays
Property Tree Widget:
Expanded: ~
Splitter Ratio: 0.4882352948188782
Tree Height: 272
- Class: rviz_common/Selection
Name: Selection
- Class: rviz_common/Tool Properties
Expanded:
- /2D Nav Goal1
- /Publish Point1
Name: Tool Properties
Splitter Ratio: 0.5886790156364441
- Class: rviz_common/Views
Expanded:
- /Current View1
Name: Views
Splitter Ratio: 0.5
Visualization Manager:
Class: ""
Displays:
- Alpha: 0.5
Cell Size: 1
Class: rviz_default_plugins/Grid
Color: 160; 160; 164
Enabled: true
Line Style:
Line Width: 0.029999999329447746
Value: Lines
Name: Grid
Normal Cell Count: 0
Offset:
X: 0
Y: 0
Z: 0
Plane: XY
Plane Cell Count: 10
Reference Frame: <Fixed Frame>
Value: true
- Class: rviz_default_plugins/TF
Enabled: true
Frame Timeout: 120
Frames:
All Enabled: true
cam0:
Value: true
global:
Value: true
imu:
Value: true
truth:
Value: true
Marker Scale: 1
Name: TF
Show Arrows: false
Show Axes: true
Show Names: true
Tree:
global:
imu:
cam0:
{}
truth:
{}
Update Interval: 0
Value: true
- Class: rviz_default_plugins/Image
Enabled: true
Max Value: 1
Median window: 5
Min Value: 0
Name: Image Tracks
Normalize Range: true
Queue Size: 10
Topic: /ov_msckf/trackhist
Unreliable: false
Value: true
- Class: rviz_default_plugins/Image
Enabled: false
Max Value: 1
Median window: 5
Min Value: 0
Name: Current Depths
Normalize Range: true
Queue Size: 10
Topic: /ov_msckf/loop_depth_colored
Unreliable: false
Value: false
- Alpha: 1
Buffer Length: 1
Class: rviz_default_plugins/Path
Color: 25; 255; 0
Enabled: true
Head Diameter: 0.30000001192092896
Head Length: 0.20000000298023224
Length: 0.30000001192092896
Line Style: Billboards
Line Width: 0.009999999776482582
Name: Path VIO
Offset:
X: 0
Y: 0
Z: 0
Pose Color: 255; 85; 255
Pose Style: None
Radius: 0.029999999329447746
Shaft Diameter: 0.10000000149011612
Shaft Length: 0.10000000149011612
Topic: /ov_msckf/pathimu
Unreliable: false
Value: true
- Alpha: 1
Buffer Length: 1
Class: rviz_default_plugins/Path
Color: 0; 255; 255
Enabled: true
Head Diameter: 0.30000001192092896
Head Length: 0.20000000298023224
Length: 0.30000001192092896
Line Style: Billboards
Line Width: 0.009999999776482582
Name: Path GT
Offset:
X: 0
Y: 0
Z: 0
Pose Color: 255; 85; 255
Pose Style: None
Radius: 0.029999999329447746
Shaft Diameter: 0.10000000149011612
Shaft Length: 0.10000000149011612
Topic: /ov_msckf/pathgt
Unreliable: false
Value: true
- Alpha: 0.800000011920929
Autocompute Intensity Bounds: true
Autocompute Value Bounds:
Max Value: 10
Min Value: -10
Value: true
Axis: Z
Channel Name: intensity
Class: rviz_default_plugins/PointCloud2
Color: 252; 175; 62
Color Transformer: FlatColor
Decay Time: 0
Enabled: true
Invert Rainbow: false
Max Color: 255; 255; 255
Max Intensity: 4096
Min Color: 0; 0; 0
Min Intensity: 0
Name: MSCKF Points
Position Transformer: XYZ
Queue Size: 10
Selectable: true
Size (Pixels): 8
Size (m): 0.009999999776482582
Style: Points
Topic: /ov_msckf/points_msckf
Unreliable: false
Use Fixed Frame: true
Use rainbow: true
Value: true
- Alpha: 1
Autocompute Intensity Bounds: true
Autocompute Value Bounds:
Max Value: 10
Min Value: -10
Value: true
Axis: Z
Channel Name: intensity
Class: rviz_default_plugins/PointCloud2
Color: 255; 0; 0
Color Transformer: FlatColor
Decay Time: 0
Enabled: true
Invert Rainbow: false
Max Color: 255; 255; 255
Max Intensity: 4096
Min Color: 0; 0; 0
Min Intensity: 0
Name: SLAM Points
Position Transformer: XYZ
Queue Size: 10
Selectable: true
Size (Pixels): 8
Size (m): 0.009999999776482582
Style: Points
Topic: /ov_msckf/points_slam
Unreliable: false
Use Fixed Frame: true
Use rainbow: true
Value: true
- Alpha: 1
Autocompute Intensity Bounds: true
Autocompute Value Bounds:
Max Value: 10
Min Value: -10
Value: true
Axis: Z
Channel Name: intensity
Class: rviz_default_plugins/PointCloud2
Color: 117; 80; 123
Color Transformer: FlatColor
Decay Time: 0
Enabled: true
Invert Rainbow: false
Max Color: 255; 255; 255
Max Intensity: 4096
Min Color: 0; 0; 0
Min Intensity: 0
Name: ARUCO Points
Position Transformer: XYZ
Queue Size: 10
Selectable: true
Size (Pixels): 15
Size (m): 0.009999999776482582
Style: Points
Topic: /ov_msckf/points_aruco
Unreliable: false
Use Fixed Frame: true
Use rainbow: true
Value: true
- Alpha: 1
Autocompute Intensity Bounds: true
Autocompute Value Bounds:
Max Value: 10
Min Value: -10
Value: true
Axis: Z
Channel Name: intensity
Class: rviz_default_plugins/PointCloud
Color: 255; 255; 255
Color Transformer: FlatColor
Decay Time: 0
Enabled: true
Invert Rainbow: false
Max Color: 255; 255; 255
Max Intensity: 4096
Min Color: 0; 0; 0
Min Intensity: 0
Name: ACTIVE Points
Position Transformer: XYZ
Queue Size: 10
Selectable: true
Size (Pixels): 3
Size (m): 0.009999999776482582
Style: Points
Topic: /ov_msckf/loop_feats
Unreliable: false
Use Fixed Frame: true
Use rainbow: true
Value: true
- Alpha: 1
Autocompute Intensity Bounds: true
Autocompute Value Bounds:
Max Value: 10
Min Value: -10
Value: true
Axis: Z
Channel Name: intensity
Class: rviz_default_plugins/PointCloud2
Color: 255; 255; 127
Color Transformer: FlatColor
Decay Time: 0
Enabled: true
Invert Rainbow: false
Max Color: 255; 255; 255
Max Intensity: 4096
Min Color: 0; 0; 0
Min Intensity: 0
Name: SIM Points
Position Transformer: XYZ
Queue Size: 10
Selectable: true
Size (Pixels): 3
Size (m): 0.009999999776482582
Style: Points
Topic: /ov_msckf/points_sim
Unreliable: false
Use Fixed Frame: true
Use rainbow: true
Value: true
Enabled: true
Global Options:
Background Color: 48; 48; 48
Fixed Frame: global
Frame Rate: 60
Name: root
Tools:
- Class: rviz_default_plugins/MoveCamera
- Class: rviz_default_plugins/Select
- Class: rviz_default_plugins/FocusCamera
- Class: rviz_default_plugins/Measure
Line color: 128; 128; 0
- Class: rviz_default_plugins/SetInitialPose
Topic: /initialpose
- Class: rviz_default_plugins/SetGoal
Topic: /move_base_simple/goal
- Class: rviz_default_plugins/PublishPoint
Single click: true
Topic: /clicked_point
Transformation:
Current:
Class: rviz_default_plugins/TF
Value: true
Views:
Current:
Class: rviz_default_plugins/Orbit
Distance: 10
Enable Stereo Rendering:
Stereo Eye Separation: 0.05999999865889549
Stereo Focal Distance: 1
Swap Stereo Eyes: false
Value: false
Focal Point:
X: 0
Y: 0
Z: 0
Focal Shape Fixed Size: false
Focal Shape Size: 0.05000000074505806
Invert Z Axis: false
Name: Current View
Near Clip Distance: 0.009999999776482582
Pitch: 0.9453980326652527
Target Frame: <Fixed Frame>
Value: Orbit (rviz)
Yaw: 0.7853981852531433
Saved: ~
Window Geometry:
Current Depths:
collapsed: false
Displays:
collapsed: false
Height: 690
Hide Left Dock: false
Hide Right Dock: true
Image Tracks:
collapsed: false
QMainWindow State: 000000ff00000000fd00000004000000000000016a00000258fc020000000bfb0000001200530065006c0065006300740069006f006e00000001e10000009b0000005c00fffffffb0000001e0054006f006f006c002000500072006f007000650072007400690065007302000001ed000001df00000185000000a3fb000000120056006900650077007300200054006f006f02000001df000002110000018500000122fb000000200054006f006f006c002000500072006f0070006500720074006900650073003203000002880000011d000002210000017afb000000100044006900730070006c006100790073010000003d0000014d000000c900fffffffb0000002000730065006c0065006300740069006f006e00200062007500660066006500720200000138000000aa0000023a00000294fb00000014005700690064006500530074006500720065006f02000000e6000000d2000003ee0000030bfb0000000c004b0069006e0065006300740200000186000001060000030c00000261fb000000180049006d00610067006500200054007200610063006b00730100000190000001050000002800fffffffb0000000a0049006d00610067006501000001c60000011c0000000000000000fb0000001c00430075007200720065006e0074002000440065007000740068007300000001eb000000b50000002800ffffff000000010000010f00000218fc0200000003fb0000001e0054006f006f006c002000500072006f00700065007200740069006500730100000041000000780000000000000000fb0000000a00560069006500770073000000003d00000218000000a400fffffffb0000001200530065006c0065006300740069006f006e010000025a000000b200000000000000000000000200000490000000a9fc0100000001fb0000000a00560069006500770073030000004e00000080000002e10000019700000003000004420000003efc0100000002fb0000000800540069006d00650100000000000004420000000000000000fb0000000800540069006d006501000000000000045000000000000000000000039b0000025800000004000000040000000800000008fc0000000100000002000000010000000a0054006f006f006c00730100000000ffffffff0000000000000000
Selection:
collapsed: false
Tool Properties:
collapsed: false
Views:
collapsed: true
Width: 1291
X: 2051
Y: 110

View File

@@ -0,0 +1,69 @@
<launch>
<!-- what config we are going to run (should match folder name) -->
<arg name="verbosity" default="INFO" /> <!-- ALL, DEBUG, INFO, WARNING, ERROR, SILENT -->
<arg name="config" default="euroc_mav" /> <!-- euroc_mav, tum_vi, rpng_aruco -->
<arg name="config_path" default="$(find ov_msckf)/../config/$(arg config)/estimator_config.yaml" />
<!-- mono or stereo and what ros bag to play -->
<arg name="max_cameras" default="2" />
<arg name="use_stereo" default="true" />
<arg name="bag_start" default="0" /> <!-- v1-2: 0, mh1: 40, mh2: 35, mh3: 17.5, mh4-5: 15 -->
<arg name="dataset" default="V1_02_medium" /> <!-- V1_01_easy, V1_02_medium, V2_02_medium -->
<arg name="bag" default="/media/patrick/RPNG FLASH 3/$(arg config)/$(arg dataset).bag" />
<!-- <arg name="bag" default="/datasets/$(arg config)/$(arg dataset).bag" />-->
<!-- saving trajectory path and timing information -->
<arg name="dosave" default="false" />
<arg name="dotime" default="false" />
<arg name="path_est" default="/tmp/traj_estimate.txt" />
<arg name="path_time" default="/tmp/traj_timing.txt" />
<!-- if we should viz the groundtruth -->
<arg name="dolivetraj" default="false" />
<arg name="path_gt" default="$(find ov_data)/$(arg config)/$(arg dataset).txt" />
<!-- MASTER NODE! -->
<!-- <node name="ros1_serial_msckf" pkg="ov_msckf" type="ros1_serial_msckf" output="screen" clear_params="true" required="true" launch-prefix="gdb -ex run &#45;&#45;args">-->
<node name="ros1_serial_msckf" pkg="ov_msckf" type="ros1_serial_msckf" output="screen" clear_params="true" required="true">
<!-- bag parameters -->
<param name="path_bag" type="str" value="$(arg bag)" />
<param name="bag_start" type="double" value="$(arg bag_start)" />
<param name="bag_durr" type="int" value="-1" />
<!-- master configuration object -->
<param name="verbosity" type="str" value="$(arg verbosity)" />
<param name="config_path" type="str" value="$(arg config_path)" />
<param name="multi_threading" type="bool" value="false" />
<!-- world/filter parameters -->
<param name="use_stereo" type="bool" value="$(arg use_stereo)" />
<param name="max_cameras" type="int" value="$(arg max_cameras)" />
<!-- timing statistics recording -->
<param name="record_timing_information" type="bool" value="$(arg dotime)" />
<param name="record_timing_filepath" type="str" value="$(arg path_time)" />
</node>
<!-- record the trajectory if enabled -->
<group if="$(arg dosave)">
<node name="recorder_estimate" pkg="ov_eval" type="pose_to_file" output="screen" required="true">
<param name="topic" type="str" value="/ov_msckf/poseimu" />
<param name="topic_type" type="str" value="PoseWithCovarianceStamped" />
<param name="output" type="str" value="$(arg path_est)" />
</node>
</group>
<!-- path viz of aligned gt -->
<group if="$(arg dolivetraj)">
<node name="live_align_trajectory" pkg="ov_eval" type="live_align_trajectory" output="log" clear_params="true">
<param name="alignment_type" type="str" value="posyaw" />
<param name="path_gt" type="str" value="$(arg path_gt)" />
</node>
</group>
</launch>

View File

@@ -0,0 +1,106 @@
<launch>
<!-- ================================================================ -->
<!-- ================================================================ -->
<!-- what config we are going to run (should match folder name) -->
<arg name="verbosity" default="INFO" /> <!-- ALL, DEBUG, INFO, WARNING, ERROR, SILENT -->
<arg name="config" default="rpng_sim" />
<arg name="config_path" default="$(find ov_msckf)/../config/$(arg config)/estimator_config.yaml" />
<!-- simulation parameters we can vary with our scripts -->
<arg name="seed" default="3" />
<arg name="fej" default="true" />
<arg name="feat_rep" default="GLOBAL_3D" />
<arg name="num_clones" default="11" />
<arg name="num_slam" default="50" />
<arg name="num_pts" default="100" />
<arg name="max_cameras" default="2" />
<arg name="freq_cam" default="10" />
<arg name="freq_imu" default="400" />
<arg name="dataset" default="udel_gore.txt" /> <!-- udel_gore, udel_gore_zupt, tum_corridor1_512_16_okvis, udel_arl -->
<!-- if we should perturb the initial state values (i.e. calibration) -->
<arg name="sim_do_perturbation" default="true" />
<arg name="sim_do_calibration" default="true" />
<!-- saving trajectory paths -->
<arg name="dosave_pose" default="false" />
<arg name="path_est" default="$(find ov_eval)/data/sim/traj_estimate.txt" />
<arg name="path_gt" default="$(find ov_eval)/data/sim/traj_groundtruth.txt" />
<arg name="dosave_state" default="false" />
<arg name="path_state_est" default="$(find ov_eval)/data/sim/state_estimate.txt" />
<arg name="path_state_std" default="$(find ov_eval)/data/sim/state_deviation.txt" />
<arg name="path_state_gt" default="$(find ov_eval)/data/sim/state_groundtruth.txt" />
<!-- ================================================================ -->
<!-- ================================================================ -->
<!-- MASTER NODE! -->
<node name="run_simulation" pkg="ov_msckf" type="run_simulation" output="screen" clear_params="true" required="true">
<!-- <node name="run_simulation" pkg="ov_msckf" type="run_simulation" output="screen" clear_params="true" required="true" launch-prefix="gdb -ex run &#45;&#45;args">-->
<!-- =============================================== -->
<!-- SIMULATION PARAMETERS -->
<!-- =============================================== -->
<param name="sim_traj_path" type="str" value="$(find ov_data)/sim/$(arg dataset)" />
<param name="sim_seed_state_init" type="int" value="0" />
<param name="sim_seed_measurements" type="int" value="$(arg seed)" />
<param name="sim_seed_preturb" type="int" value="$(arg seed)" />
<param name="sim_freq_cam" type="int" value="$(arg freq_cam)" />
<param name="sim_freq_imu" type="int" value="$(arg freq_imu)" />
<param name="sim_do_perturbation" type="bool" value="$(arg sim_do_perturbation)" />
<param name="save_total_state" type="bool" value="$(arg dosave_state)" />
<param name="filepath_est" type="str" value="$(arg path_state_est)" />
<param name="filepath_std" type="str" value="$(arg path_state_std)" />
<param name="filepath_gt" type="str" value="$(arg path_state_gt)" />
<!-- =============================================== -->
<!-- =============================================== -->
<!-- master configuration object -->
<param name="verbosity" type="str" value="$(arg verbosity)" />
<param name="config_path" type="str" value="$(arg config_path)" />
<param name="multi_threading" type="bool" value="false" />
<!-- world/filter parameters -->
<param name="use_fej" type="bool" value="$(arg fej)" />
<param name="calib_cam_extrinsics" type="bool" value="$(arg sim_do_calibration)" />
<param name="calib_cam_intrinsics" type="bool" value="$(arg sim_do_calibration)" />
<param name="calib_cam_timeoffset" type="bool" value="$(arg sim_do_calibration)" />
<param name="max_clones" type="int" value="$(arg num_clones)" />
<param name="max_slam" type="int" value="$(arg num_slam)" />
<param name="max_cameras" type="int" value="$(arg max_cameras)" />
<param name="feat_rep_msckf" type="str" value="$(arg feat_rep)" />
<param name="feat_rep_slam" type="str" value="$(arg feat_rep)" />
<param name="feat_rep_aruco" type="str" value="$(arg feat_rep)" />
<!-- tracker/extractor properties -->
<param name="num_pts" type="int" value="$(arg num_pts)" />
</node>
<!-- record the trajectory if enabled -->
<group if="$(arg dosave_pose)">
<node name="recorder_estimate" pkg="ov_eval" type="pose_to_file" output="screen">
<param name="topic" type="str" value="/ov_msckf/poseimu" />
<param name="topic_type" type="str" value="PoseWithCovarianceStamped" />
<param name="output" type="str" value="$(arg path_est)" />
</node>
<node name="recorder_groundtruth" pkg="ov_eval" type="pose_to_file" output="screen">
<param name="topic" type="str" value="/ov_msckf/posegt" />
<param name="topic_type" type="str" value="PoseStamped" />
<param name="output" type="str" value="$(arg path_gt)" />
</node>
</group>
</launch>

View File

@@ -0,0 +1,70 @@
<launch>
<!-- what config we are going to run (should match folder name) -->
<arg name="verbosity" default="INFO" /> <!-- ALL, DEBUG, INFO, WARNING, ERROR, SILENT -->
<arg name="config" default="uzh-fpv" /> <!-- euroc_mav, tum_vi, rpng_aruco -->
<arg name="config_path" default="$(find ov_msckf)/../config/$(arg config)/estimator_config.yaml" />
<!-- mono or stereo and what ros bag to play -->
<arg name="max_cameras" default="2" />
<arg name="use_stereo" default="true" />
<arg name="bag_start" default="0" /> <!-- v1-2: 0, mh1: 40, mh2: 35, mh3: 17.5, mh4-5: 15 -->
<arg name="dataset" default="indoor_forward_3_snapdragon_with_gt" /> <!-- V1_01_easy, V1_02_medium, V2_02_medium -->
<arg name="dobag" default="false" /> <!-- if we should play back the bag -->
<!-- <arg name="bag" default="/media/patrick/RPNG\ FLASH\ 3/$(arg config)/$(arg dataset).bag" /> -->
<arg name="bag" default="/home/pi/work_drivecast/datasets/$(arg config)/$(arg dataset).bag" />
<!-- <arg name="bag" default="/datasets/$(arg config)/$(arg dataset).bag" />-->
<!-- saving trajectory path and timing information -->
<arg name="dosave" default="false" />
<arg name="dotime" default="false" />
<arg name="path_est" default="/tmp/traj_estimate.txt" />
<arg name="path_time" default="/tmp/traj_timing.txt" />
<!-- if we should viz the groundtruth -->
<arg name="dolivetraj" default="false" />
<arg name="path_gt" default="$(find ov_data)/$(arg config)/$(arg dataset).txt" />
<!-- MASTER NODE! -->
<!-- <node name="run_subscribe_msckf" pkg="ov_msckf" type="run_subscribe_msckf" output="screen" clear_params="true" required="true" launch-prefix="gdb -ex run &#45;&#45;args">-->
<node name="run_subscribe_msckf" pkg="ov_msckf" type="run_subscribe_msckf" output="screen" clear_params="true" required="true">
<!-- master configuration object -->
<param name="verbosity" type="string" value="$(arg verbosity)" />
<param name="config_path" type="string" value="$(arg config_path)" />
<!-- world/filter parameters -->
<param name="use_stereo" type="bool" value="$(arg use_stereo)" />
<param name="max_cameras" type="int" value="$(arg max_cameras)" />
<!-- timing statistics recording -->
<param name="record_timing_information" type="bool" value="$(arg dotime)" />
<param name="record_timing_filepath" type="string" value="$(arg path_time)" />
</node>
<!-- play the dataset -->
<group if="$(arg dobag)">
<node pkg="rosbag" type="play" name="rosbag" args="-d 1 -s $(arg bag_start) $(arg bag)" required="true"/>
</group>
<!-- record the trajectory if enabled -->
<group if="$(arg dosave)">
<node name="recorder_estimate" pkg="ov_eval" type="pose_to_file" output="screen" required="true">
<param name="topic" type="str" value="/ov_msckf/poseimu" />
<param name="topic_type" type="str" value="PoseWithCovarianceStamped" />
<param name="output" type="str" value="$(arg path_est)" />
</node>
</group>
<!-- path viz of aligned gt -->
<group if="$(arg dolivetraj)">
<node name="live_align_trajectory" pkg="ov_eval" type="live_align_trajectory" output="log" clear_params="true">
<param name="alignment_type" type="str" value="posyaw" />
<param name="path_gt" type="str" value="$(arg path_gt)" />
</node>
</group>
</launch>

View File

@@ -0,0 +1,114 @@
from launch import LaunchDescription
from launch.actions import DeclareLaunchArgument, LogInfo, OpaqueFunction
from launch.conditions import IfCondition
from launch.substitutions import LaunchConfiguration, TextSubstitution
from launch_ros.actions import Node
from ament_index_python.packages import get_package_share_directory, get_package_prefix
import os
import sys
launch_args = [
DeclareLaunchArgument(name="namespace", default_value="", description="namespace"),
DeclareLaunchArgument(
name="ov_enable", default_value="true", description="enable OpenVINS node"
),
DeclareLaunchArgument(
name="rviz_enable", default_value="true", description="enable rviz node"
),
DeclareLaunchArgument(
name="config",
default_value="euroc_mav",
description="euroc_mav, tum_vi, rpng_aruco...",
),
DeclareLaunchArgument(
name="config_path",
default_value="",
description="path to estimator_config.yaml. If not given, determined based on provided 'config' above",
),
DeclareLaunchArgument(
name="verbosity",
default_value="INFO",
description="ALL, DEBUG, INFO, WARNING, ERROR, SILENT",
),
DeclareLaunchArgument(
name="use_stereo",
default_value="true",
description="if we have more than 1 camera, if we should try to track stereo constraints between pairs",
),
DeclareLaunchArgument(
name="max_cameras",
default_value="2",
description="how many cameras we have 1 = mono, 2 = stereo, >2 = binocular (all mono tracking)",
),
DeclareLaunchArgument(
name="save_total_state",
default_value="false",
description="record the total state with calibration and features to a txt file",
)
]
def launch_setup(context):
config_path = LaunchConfiguration("config_path").perform(context)
if not config_path:
configs_dir = os.path.join(get_package_share_directory("ov_msckf"), "config")
available_configs = os.listdir(configs_dir)
config = LaunchConfiguration("config").perform(context)
if config in available_configs:
config_path = os.path.join(
get_package_share_directory("ov_msckf"),
"config",config,"estimator_config.yaml"
)
else:
return [
LogInfo(
msg="ERROR: unknown config: '{}' - Available configs are: {} - not starting OpenVINS".format(
config, ", ".join(available_configs)
)
)
]
else:
if not os.path.isfile(config_path):
return [
LogInfo(
msg="ERROR: config_path file: '{}' - does not exist. - not starting OpenVINS".format(
config_path)
)
]
node1 = Node(
package="ov_msckf",
executable="run_subscribe_msckf",
condition=IfCondition(LaunchConfiguration("ov_enable")),
namespace=LaunchConfiguration("namespace"),
output='screen',
parameters=[
{"verbosity": LaunchConfiguration("verbosity")},
{"use_stereo": LaunchConfiguration("use_stereo")},
{"max_cameras": LaunchConfiguration("max_cameras")},
{"save_total_state": LaunchConfiguration("save_total_state")},
{"config_path": config_path},
],
)
node2 = Node(
package="rviz2",
executable="rviz2",
condition=IfCondition(LaunchConfiguration("rviz_enable")),
arguments=[
"-d"
+ os.path.join(
get_package_share_directory("ov_msckf"), "launch", "display_ros2.rviz"
),
"--ros-args",
"--log-level",
"warn",
],
)
return [node1, node2]
def generate_launch_description():
opfunc = OpaqueFunction(function=launch_setup)
ld = LaunchDescription(launch_args)
ld.add_action(opfunc)
return ld

73
ov_msckf/package.xml Normal file
View File

@@ -0,0 +1,73 @@
<?xml version="1.0"?>
<package format="3">
<!-- Package Information -->
<name>ov_msckf</name>
<version>2.6.0</version>
<description>
Implementation of a type-based error-state Kalman filter.
</description>
<url type="website">https://docs.openvins.com/</url>
<url type="bugtracker">https://github.com/rpng/open_vins/issues</url>
<url type="repository">https://github.com/rpng/open_vins</url>
<!-- Code Authors -->
<author email="pgeneva@udel.edu">Patrick Geneva</author>
<author email="keck@udel.edu">Kevin Eckenhoff</author>
<author email="ghuang@udel.edu">Guoquan Huang</author>
<maintainer email="pgeneva@udel.edu">Patrick Geneva</maintainer>
<!-- Licensing -->
<license>GNU General Public License v3.0</license>
<!-- ROS1: Dependencies needed to compile this package. -->
<buildtool_depend condition="$ROS_VERSION == 1">catkin</buildtool_depend>
<depend condition="$ROS_VERSION == 1">cmake_modules</depend>
<depend condition="$ROS_VERSION == 1">roscpp</depend>
<depend condition="$ROS_VERSION == 1">rosbag</depend>
<depend condition="$ROS_VERSION == 1">tf</depend>
<depend condition="$ROS_VERSION == 1">std_msgs</depend>
<depend condition="$ROS_VERSION == 1">sensor_msgs</depend>
<depend condition="$ROS_VERSION == 1">geometry_msgs</depend>
<depend condition="$ROS_VERSION == 1">nav_msgs</depend>
<depend condition="$ROS_VERSION == 1">visualization_msgs</depend>
<depend condition="$ROS_VERSION == 1">image_transport</depend>
<depend condition="$ROS_VERSION == 1">cv_bridge</depend>
<depend condition="$ROS_VERSION == 1">ov_core</depend>
<depend condition="$ROS_VERSION == 1">ov_init</depend>
<!-- ROS2: Dependencies needed to compile this package. -->
<buildtool_depend condition="$ROS_VERSION == 2">ament_cmake</buildtool_depend>
<depend condition="$ROS_VERSION == 2">rclcpp</depend>
<depend condition="$ROS_VERSION == 2">tf2_ros</depend>
<depend condition="$ROS_VERSION == 2">tf2_geometry_msgs</depend>
<depend condition="$ROS_VERSION == 2">std_msgs</depend>
<depend condition="$ROS_VERSION == 2">geometry_msgs</depend>
<depend condition="$ROS_VERSION == 2">sensor_msgs</depend>
<depend condition="$ROS_VERSION == 2">nav_msgs</depend>
<depend condition="$ROS_VERSION == 2">cv_bridge</depend>
<depend condition="$ROS_VERSION == 2">image_transport</depend>
<depend condition="$ROS_VERSION == 2">ov_core</depend>
<depend condition="$ROS_VERSION == 2">ov_init</depend>
<!-- System dependencies for both versions -->
<depend>eigen</depend>
<depend>libopencv-dev</depend>
<depend>libopencv-contrib-dev</depend>
<depend>boost</depend>
<depend>libceres-dev</depend>
<!-- Note the export is required to expose the executables -->
<export>
<build_type condition="$ROS_VERSION == 1">catkin</build_type>
<build_type condition="$ROS_VERSION == 2">ament_cmake</build_type>
<rosdoc config="rosdoc.yaml" />
</export>
<!-- These are really "soft" dependencies, and are just required if you need them for launch... -->
<!-- <depend condition="$ROS_VERSION == 1">ov_eval</depend>-->
<!-- <depend condition="$ROS_VERSION == 1">ov_data</depend>-->
<!-- <depend condition="$ROS_VERSION == 2">ov_eval</depend>-->
<!-- <depend condition="$ROS_VERSION == 2">ov_data</depend>-->
</package>

8
ov_msckf/rosdoc.yaml Normal file
View File

@@ -0,0 +1,8 @@
# This uses a stupid hack to get all the documentation to be included in each project
# Right now the `use_mdfile_as_mainpage` is also appended to the doxygen "INPUT"
# Thus, we will append the whole OpenVINS documentation by going up one directory!
- builder: doxygen
file_patterns: '*.c *.cpp *.h *.cc *.hh *.dox'
exclude_patterns: '*/ov_core/* */ov_data/* */ov_eval/* */ov_init/*'
use_mdfile_as_mainpage: '../'

115
ov_msckf/scripts/run_ros_eth.sh Executable file
View File

@@ -0,0 +1,115 @@
#!/usr/bin/env bash
# Source our workspace directory to load ENV variables
SCRIPT_DIR="$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )"
source ${SCRIPT_DIR}/../../../../devel/setup.bash
#=============================================================
#=============================================================
#=============================================================
# estimator configurations
modes=(
"mono"
"binocular"
"stereo"
)
# dataset locations
bagnames=(
"V1_01_easy"
"V1_02_medium"
"V1_03_difficult"
"V2_01_easy"
"V2_02_medium"
"V2_03_difficult"
"MH_01_easy"
"MH_02_easy"
"MH_03_medium"
"MH_04_difficult"
"MH_05_difficult"
)
# how far we should start into the dataset
# this can be used to skip the initial sections
bagstarttimes=(
"0"
"0"
"0"
"0"
"0"
"0"
"40"
"35"
"10"
"17"
"18"
)
# location to save log files into
save_path1="/home/chuchu/test_ov/openvins_pra/exp_euroc/algorithms"
save_path2="/home/chuchu/test_ov/openvins_pra/exp_euroc/timings"
bag_path="/home/chuchu/datasets/euroc_mav/"
#=============================================================
#=============================================================
#=============================================================
# Loop through all modes
for h in "${!modes[@]}"; do
# Loop through all datasets
for i in "${!bagnames[@]}"; do
# Monte Carlo runs for this dataset
# If you want more runs, change the below loop
for j in {00..04}; do
# start timing
start_time="$(date -u +%s)"
filename_est="$save_path1/ov_2.4_${modes[h]}/${bagnames[i]}/${j}_estimate.txt"
filename_time="$save_path2/ov_2.4_${modes[h]}/${bagnames[i]}/${j}_timing.txt"
# number of cameras
if [ "${modes[h]}" == "mono" ]
then
temp1="1"
temp2="true"
fi
if [ "${modes[h]}" == "binocular" ]
then
temp1="2"
temp2="false"
fi
if [ "${modes[h]}" == "stereo" ]
then
temp1="2"
temp2="true"
fi
# run our ROS launch file (note we send console output to terminator)
roslaunch ov_msckf serial.launch \
max_cameras:="$temp1" \
use_stereo:="$temp2" \
config:="euroc_mav" \
bag:="$bag_path/${bagnames[i]}.bag" \
bag_start:="${bagstarttimes[i]}" \
dosave:="true" \
path_est:="$filename_est" \
dotime:="true" \
dolivetraj:="true" \
path_time:="$filename_time" &> /dev/null
# print out the time elapsed
end_time="$(date -u +%s)"
elapsed="$(($end_time-$start_time))"
echo "BASH: ${modes[h]} - ${bagnames[i]} - run $j took $elapsed seconds";
done
done
done

View File

@@ -0,0 +1,116 @@
#!/usr/bin/env bash
# Source our workspace directory to load ENV variables
source /home/patrick/workspace/catkin_ws_ov/devel/setup.bash
#=============================================================
#=============================================================
#=============================================================
# estimator configurations
modes=(
"mono"
"binocular"
"stereo"
)
# dataset locations
bagnames=(
"circle"
"circle_fast"
"circle_head"
"infinite"
"infinite_fast"
"infinite_head"
"rotation"
"rotation_fast"
"square"
"square_fast"
"square_head"
)
# how far we should start into the dataset
# this can be used to skip the initial sections
bagstarttimes=(
"0"
"0"
"0"
"0"
"0"
"0"
"0"
"0"
"0"
"0"
"0"
)
# location to save log files into
save_path1="/home/patrick/github/pubs_data/pgeneva/2020_openvins_2.3.1/exp_kaistvio/algorithms"
save_path2="/home/patrick/github/pubs_data/pgeneva/2020_openvins_2.3.1/exp_kaistvio/timings"
bag_path="/media/patrick/RPNG\ FLASH\ 3/KAIST_VIO"
#=============================================================
#=============================================================
#=============================================================
# Loop through all modes
for h in "${!modes[@]}"; do
# Loop through all datasets
for i in "${!bagnames[@]}"; do
# Monte Carlo runs for this dataset
# If you want more runs, change the below loop
for j in {00..04}; do
# start timing
start_time="$(date -u +%s)"
filename_est="$save_path1/ov_2.3.3_${modes[h]}/${bagnames[i]}/${j}_estimate.txt"
filename_time="$save_path2/ov_2.3.3_${modes[h]}/${bagnames[i]}/${j}_timing.txt"
# number of cameras
if [ "${modes[h]}" == "mono" ]
then
temp1="1"
temp2="true"
fi
if [ "${modes[h]}" == "binocular" ]
then
temp1="2"
temp2="false"
fi
if [ "${modes[h]}" == "stereo" ]
then
temp1="2"
temp2="true"
fi
# run our ROS launch file (note we send console output to terminator)
roslaunch ov_msckf pgeneva_ros_kaistvio.launch \
max_cameras:="$temp1" \
use_stereo:="$temp2" \
bag:="$bag_path/${bagnames[i]}.bag" \
bag_start:="${bagstarttimes[i]}" \
dosave:="true" \
path_est:="$filename_est" \
dotime:="true" \
path_time:="$filename_time" &> /dev/null
# print out the time elapsed
end_time="$(date -u +%s)"
elapsed="$(($end_time-$start_time))"
echo "BASH: ${modes[h]} - ${bagnames[i]} - run $j took $elapsed seconds";
done
done
done

104
ov_msckf/scripts/run_ros_tumvi.sh Executable file
View File

@@ -0,0 +1,104 @@
#!/usr/bin/env bash
# Source our workspace directory to load ENV variables
SCRIPT_DIR="$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )"
source ${SCRIPT_DIR}/../../../../devel/setup.bash
#=============================================================
#=============================================================
#=============================================================
# estimator configurations
modes=(
"mono"
"binocular"
"stereo"
)
# dataset locations
bagnames=(
"dataset-room1_512_16"
"dataset-room2_512_16"
"dataset-room3_512_16"
"dataset-room4_512_16"
"dataset-room5_512_16"
"dataset-room6_512_16"
)
# how far we should start into the dataset
# this can be used to skip the initial sections
bagstarttimes=(
"0"
"0"
"0"
"0"
"0"
"0"
)
# location to save log files into
save_path1="/home/chuchu/test_ov/openvins_pra/exp_tum/algorithms"
save_path2="/home/chuchu/test_ov/openvins_pra/exp_tum/timings"
bag_path="/home/chuchu/datasets/tum_vi"
#=============================================================
#=============================================================
#=============================================================
# TODO: Still need to test all, see how to get imu threshold in
# Loop through all modes
for h in "${!modes[@]}"; do
# Loop through all datasets
for i in "${!bagnames[@]}"; do
# Monte Carlo runs for this dataset
# If you want more runs, change the below loop
for j in {00..04}; do
# start timing
start_time="$(date -u +%s)"
filename_est="$save_path1/ov_2.4_${modes[h]}/${bagnames[i]}/${j}_estimate.txt"
filename_time="$save_path2/ov_2.4_${modes[h]}/${bagnames[i]}/${j}_timing.txt"
# number of cameras
if [ "${modes[h]}" == "mono" ]
then
temp1="1"
temp2="true"
fi
if [ "${modes[h]}" == "binocular" ]
then
temp1="2"
temp2="false"
fi
if [ "${modes[h]}" == "stereo" ]
then
temp1="2"
temp2="true"
fi
# run our ROS launch file (note we send console output to terminator)
roslaunch ov_msckf serial.launch \
max_cameras:="$temp1" \
use_stereo:="$temp2" \
config:="tum_vi" \
bag:="$bag_path/${bagnames[i]}.bag" \
bag_start:="${bagstarttimes[i]}" \
dosave:="true" \
path_est:="$filename_est" \
dotime:="true" \
dolivetraj:="true" \
path_time:="$filename_time" &> /dev/null
# print out the time elapsed
end_time="$(date -u +%s)"
elapsed="$(($end_time-$start_time))"
echo "BASH: ${modes[h]} - ${bagnames[i]} - run $j took $elapsed seconds";
done
done
done

View File

@@ -0,0 +1,180 @@
#!/usr/bin/env bash
# Source our workspace directory to load ENV variables
SCRIPT_DIR="$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )"
source ${SCRIPT_DIR}/../../../../devel/setup.bash
#=============================================================
#=============================================================
#=============================================================
# estimator configurations
modes=(
"mono"
"binocular"
"stereo"
)
# dataset locations
bagnames=(
# "indoor_forward_3_snapdragon_with_gt" # bag needs to end early as there is a hard landing
"indoor_forward_5_snapdragon_with_gt"
"indoor_forward_6_snapdragon_with_gt"
"indoor_forward_7_snapdragon_with_gt"
"indoor_forward_9_snapdragon_with_gt"
"indoor_forward_10_snapdragon_with_gt"
"indoor_45_2_snapdragon_with_gt"
"indoor_45_4_snapdragon_with_gt"
# "indoor_45_9_snapdragon_with_gt" # problem one, seems to fail part way in due to freefalling
"indoor_45_12_snapdragon_with_gt"
"indoor_45_13_snapdragon_with_gt"
"indoor_45_14_snapdragon_with_gt"
# "outdoor_forward_1_snapdragon_with_gt"
# "outdoor_forward_3_snapdragon_with_gt"
# "outdoor_forward_5_snapdragon_with_gt"
# "outdoor_45_1_snapdragon_with_gt"
)
# what sensor configuration each dataset has
# 0: indoor forward facing
# 1: indoor 45 degree downward facing
# 2: outdoor forward facing
# 3: outdoor 45 degree downward facing
sensorconfig=(
# indoor forward
# "0" # bag needs to end early as there is a hard landing
"0"
"0"
"0"
"0"
"0"
# indoor 45 degree
"1"
"1"
# "1" # problem one, seems to fail part way in due to freefalling
"1"
"1"
"1"
# outdoor forward and 45
# "2"
# "2"
# "2"
# "3"
)
# how far we should start into the dataset
# this can be used to skip the initial sections
bagstarttimes=(
# indoor forward
# "25"
"0"
"0"
"0"
"0"
"0"
# indoor 45 degree
"0"
"0"
# "17" # problem one, seems to fail part way in due to freefalling
"0"
"0"
"0"
# outdoor forward and 45
# "0"
# "0"
# "41"
# "21"
)
# threshold for variance to detect if the unit has moved yet
imuthreshold=(
# indoor forward
# "5.0"
"0.5"
"0.5"
"0.5"
"0.5"
"0.5"
# indoor 45 degree
"0.5"
"0.5"
# "4.0" # problem one, seems to fail part way in due to freefalling
"0.5"
"0.5"
"0.5"
# outdoor forward and 45
# "0.5"
# "0.5"
# "4.0"
# "4.0"
)
# location to save log files into
save_path1="/home/patrick/github/pubs_data/pgeneva/2020_openvins_2.4/exp_uzhfpv/algorithms"
save_path2="/home/patrick/github/pubs_data/pgeneva/2020_openvins_2.4/exp_uzhfpv/timings"
bag_path="/media/patrick/RPNG\ FLASH\ 2/uzhfpv_newer"
#=============================================================
#=============================================================
#=============================================================
# Loop through all modes
for h in "${!modes[@]}"; do
# Loop through all datasets
for i in "${!bagnames[@]}"; do
# Monte Carlo runs for this dataset
# If you want more runs, change the below loop
for j in {00..04}; do
# start timing
start_time="$(date -u +%s)"
filename_est="$save_path1/ov_2.4_${modes[h]}/${bagnames[i]}/${j}_estimate.txt"
filename_time="$save_path2/ov_2.4_${modes[h]}/${bagnames[i]}/${j}_timing.txt"
# number of cameras
if [ "${modes[h]}" == "mono" ]
then
temp1="1"
temp2="true"
fi
if [ "${modes[h]}" == "binocular" ]
then
temp1="2"
temp2="false"
fi
if [ "${modes[h]}" == "stereo" ]
then
temp1="2"
temp2="true"
fi
# run our ROS launch file (note we send console output to terminator)
roslaunch ov_msckf pgeneva_ros_uzhfpv.launch \
max_cameras:="$temp1" \
use_stereo:="$temp2" \
bag:="$bag_path/${bagnames[i]}.bag" \
bag_start:="${bagstarttimes[i]}" \
sensor_config:="${sensorconfig[i]}" \
init_imu_thresh:="${imuthreshold[i]}" \
dosave:="true" \
path_est:="$filename_est" \
dotime:="true" \
path_time:="$filename_time" &> /dev/null
# print out the time elapsed
end_time="$(date -u +%s)"
elapsed="$(($end_time-$start_time))"
echo "BASH: ${modes[h]} - ${bagnames[i]} - run $j took $elapsed seconds";
done
done
done

View File

@@ -0,0 +1,74 @@
#!/usr/bin/env bash
# Source our workspace directory to load ENV variables
SCRIPT_DIR="$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )"
source ${SCRIPT_DIR}/../../../../devel/setup.bash
#=============================================================
#=============================================================
#=============================================================
# datasets
datasets=(
"udel_gore"
# "udel_arl"
# "udel_gore_zupt"
# "tum_corridor1_512_16_okvis"
)
# If we want to calibrate parameters
sim_do_calibration=(
"false"
"true"
)
# If we want to perturb the initial state values
sim_do_perturbation=(
"false"
"true"
)
# location to save log files into
save_path_est="/home/chuchu/test_ov/openvins_pra/sim_calib/algorithms"
save_path_gt="/home/chuchu/test_ov/openvins_pra/sim_calib/truths"
#=============================================================
# Start the Monte Carlo Simulations
#=============================================================
# Loop through the datasets
for h in "${!datasets[@]}"; do
# Loop through if we want to calibrate
for m in "${!sim_do_calibration[@]}"; do
# Loop through if we want to perturb
for n in "${!sim_do_perturbation[@]}"; do
# Monte Carlo runs for this dataset
for j in {00..02}; do
filename_est="$save_path_est/calib_${sim_do_calibration[m]}/perturb_${sim_do_perturbation[n]}/${datasets[h]}/estimate_$j.txt"
filename_gt="$save_path_gt/${datasets[h]}.txt"
#===============================================
# Start Monte Carlo Simulations
#===============================================
start_time="$(date -u +%s)"
roslaunch ov_msckf simulation.launch \
seed:="$((10#$j + 1))" \
dataset:="${datasets[h]}.txt" \
sim_do_calibration:="${sim_do_calibration[m]}" \
sim_do_perturbation:="${sim_do_perturbation[n]}" \
dosave_pose:="true" \
path_est:="$filename_est" \
path_gt:="$filename_gt" &> /dev/null
end_time="$(date -u +%s)"
elapsed="$(($end_time-$start_time))"
echo "BASH: ${datasets[h]} - calib_${sim_do_calibration[m]} - perturb_${sim_do_perturbation[n]} - run $j took $elapsed seconds";
#===============================================
#===============================================
done
done
done
done

View File

@@ -0,0 +1,66 @@
#!/usr/bin/env bash
# Source our workspace directory to load ENV variables
SCRIPT_DIR="$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )"
source ${SCRIPT_DIR}/../../../../devel/setup.bash
#=============================================================
#=============================================================
#=============================================================
# datasets
datasets=(
"udel_gore"
# "udel_arl"
# "udel_gore_zupt"
# "tum_corridor1_512_16_okvis"
)
# number of cameras
cameras=(
"1"
"2"
"3"
"4"
)
# location to save log files into
save_path_est="/home/cc/test/openvins_pra/sim_cam/algorithms"
save_path_gt="/home/cc/test/openvins_pra/sim_cam/truths"
#=============================================================
# Start Monte-Carlo Simulations
#=============================================================
# Loop through datasets
for h in "${!datasets[@]}"; do
# Loop through number of cameras we want to use
for i in "${!cameras[@]}"; do
# Monte Carlo runs for this dataset
for j in {00..02}; do
# start timing
start_time="$(date -u +%s)"
# our filename
filename_est="$save_path_est/ov_v23_cam${cameras[i]}/${datasets[h]}/estimate_$j.txt"
filename_gt="$save_path_gt/${datasets[h]}.txt"
# run our ROS launch file (note we send console output to terminator)
roslaunch ov_msckf simulation.launch \
seed:="$((10#$j + 1))" \
max_cameras:="${cameras[i]}" \
dataset:="${datasets[h]}.txt" \
dosave_pose:="true" \
path_est:="$filename_est" \
path_gt:="$filename_gt" &> /dev/null
# print out the time elapsed
end_time="$(date -u +%s)"
elapsed="$(($end_time-$start_time))"
echo "BASH: ${datasets[h]} - ${cameras[i]} - run $j took $elapsed seconds";
done
done
done

85
ov_msckf/scripts/run_sim_rep.sh Executable file
View File

@@ -0,0 +1,85 @@
#!/usr/bin/env bash
# Source our workspace directory to load ENV variables
SCRIPT_DIR="$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )"
source ${SCRIPT_DIR}/../../../../devel/setup.bash
#=============================================================
#=============================================================
#=============================================================
# datasets
datasets=(
# "udel_gore"
"udel_arl"
# "udel_gore_zupt"
# "tum_corridor1_512_16_okvis"
)
# estimator configurations
usefej=(
"false"
"true"
)
# feature representations
representations=(
"GLOBAL_3D"
"GLOBAL_FULL_INVERSE_DEPTH"
"ANCHORED_3D"
"ANCHORED_FULL_INVERSE_DEPTH"
"ANCHORED_MSCKF_INVERSE_DEPTH"
"ANCHORED_INVERSE_DEPTH_SINGLE"
)
# location to save log files into
save_path_est="/home/cc/test/openvins_pra/sim_representations/algorithms"
save_path_gt="/home/cc/test/openvins_pra/sim_representations/truths"
#=============================================================
# Start Monte-Carlo Simulations
#=============================================================
# Loop through datasets
for m in "${!datasets[@]}"; do
# Loop through if use fej or not
for h in "${!usefej[@]}"; do
# Loop through all representations
for i in "${!representations[@]}"; do
# Monte Carlo runs for this dataset
for j in {00..09}; do
# start timing
start_time="$(date -u +%s)"
# filename change if we are using fej
if [ "${usefej[h]}" == "true" ]
then
temp="_FEJ"
else
temp=""
fi
filename_est="$save_path_est/${representations[i]}$temp/${datasets[m]}/estimate_$j.txt"
filename_gt="$save_path_gt/${datasets[m]}.txt"
# run our ROS launch file (note we send console output to terminator)
roslaunch ov_msckf simulation.launch \
seed:="$((10#$j + 1))" \
dataset:="${datasets[m]}.txt" \
fej:="${usefej[h]}" \
feat_rep:="${representations[i]}" \
dosave_pose:="true" \
path_est:="$filename_est" \
path_gt:="$filename_gt" &> /dev/null
# print out the time elapsed
end_time="$(date -u +%s)"
elapsed="$(($end_time-$start_time))"
echo "BASH: ${datasets[m]} - ${usefej[h]} - ${representations[i]} - run $j took $elapsed seconds";
done
done
done
done

View File

@@ -0,0 +1,897 @@
/*
* OpenVINS: An Open Platform for Visual-Inertial Research
* Copyright (C) 2018-2022 Patrick Geneva
* Copyright (C) 2018-2022 Guoquan Huang
* Copyright (C) 2018-2022 OpenVINS Contributors
* Copyright (C) 2018-2019 Kevin Eckenhoff
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
#include "VioManager.h"
using namespace ov_core;
using namespace ov_type;
using namespace ov_msckf;
VioManager::VioManager(VioManagerOptions &params_) : thread_init_running(false), thread_init_success(false) {
// Nice startup message
PRINT_DEBUG("=======================================\n");
PRINT_DEBUG("OPENVINS ON-MANIFOLD EKF IS STARTING\n");
PRINT_DEBUG("=======================================\n");
// Nice debug
this->params = params_;
params.print_and_load_estimator();
params.print_and_load_noise();
params.print_and_load_state();
params.print_and_load_trackers();
// This will globally set the thread count we will use
// -1 will reset to the system default threading (usually the num of cores)
cv::setNumThreads(params.use_multi_threading ? -1 : 0);
cv::setRNGSeed(0);
// Create the state!!
state = std::make_shared<State>(params.state_options);
// Timeoffset from camera to IMU
Eigen::VectorXd temp_camimu_dt;
temp_camimu_dt.resize(1);
temp_camimu_dt(0) = params.calib_camimu_dt;
state->_calib_dt_CAMtoIMU->set_value(temp_camimu_dt);
state->_calib_dt_CAMtoIMU->set_fej(temp_camimu_dt);
// Loop through and load each of the cameras
state->_cam_intrinsics_cameras = params.camera_intrinsics;
for (int i = 0; i < state->_options.num_cameras; i++) {
state->_cam_intrinsics.at(i)->set_value(params.camera_intrinsics.at(i)->get_value());
state->_cam_intrinsics.at(i)->set_fej(params.camera_intrinsics.at(i)->get_value());
state->_calib_IMUtoCAM.at(i)->set_value(params.camera_extrinsics.at(i));
state->_calib_IMUtoCAM.at(i)->set_fej(params.camera_extrinsics.at(i));
}
//===================================================================================
//===================================================================================
//===================================================================================
// If we are recording statistics, then open our file
if (params.record_timing_information) {
// If the file exists, then delete it
if (boost::filesystem::exists(params.record_timing_filepath)) {
boost::filesystem::remove(params.record_timing_filepath);
PRINT_INFO(YELLOW "[STATS]: found old file found, deleted...\n" RESET);
}
// Create the directory that we will open the file in
boost::filesystem::path p(params.record_timing_filepath);
boost::filesystem::create_directories(p.parent_path());
// Open our statistics file!
of_statistics.open(params.record_timing_filepath, std::ofstream::out | std::ofstream::app);
// Write the header information into it
of_statistics << "# timestamp (sec),tracking,propagation,msckf update,";
if (state->_options.max_slam_features > 0) {
of_statistics << "slam update,slam delayed,";
}
of_statistics << "re-tri & marg,total" << std::endl;
}
//===================================================================================
//===================================================================================
//===================================================================================
// Let's make a feature extractor
// NOTE: after we initialize we will increase the total number of feature tracks
trackDATABASE = std::make_shared<FeatureDatabase>();
if (params.use_klt) {
trackFEATS = std::shared_ptr<TrackBase>(new TrackKLT(state->_cam_intrinsics_cameras, params.init_options.init_max_features,
state->_options.max_aruco_features, params.use_stereo, params.histogram_method,
params.fast_threshold, params.grid_x, params.grid_y, params.min_px_dist));
} else {
trackFEATS = std::shared_ptr<TrackBase>(new TrackDescriptor(
state->_cam_intrinsics_cameras, params.init_options.init_max_features, state->_options.max_aruco_features, params.use_stereo,
params.histogram_method, params.fast_threshold, params.grid_x, params.grid_y, params.min_px_dist, params.knn_ratio));
}
// Initialize our aruco tag extractor
if (params.use_aruco) {
trackARUCO = std::shared_ptr<TrackBase>(new TrackAruco(state->_cam_intrinsics_cameras, state->_options.max_aruco_features,
params.use_stereo, params.histogram_method, params.downsize_aruco));
}
// Initialize our state propagator
propagator = std::make_shared<Propagator>(params.imu_noises, params.gravity_mag);
// Our state initialize
initializer = std::make_shared<ov_init::InertialInitializer>(params.init_options, trackFEATS->get_feature_database());
// Make the updater!
updaterMSCKF = std::make_shared<UpdaterMSCKF>(params.msckf_options, params.featinit_options);
updaterSLAM = std::make_shared<UpdaterSLAM>(params.slam_options, params.aruco_options, params.featinit_options);
// If we are using zero velocity updates, then create the updater
if (params.try_zupt) {
updaterZUPT = std::make_shared<UpdaterZeroVelocity>(params.zupt_options, params.imu_noises, trackFEATS->get_feature_database(),
propagator, params.gravity_mag, params.zupt_max_velocity,
params.zupt_noise_multiplier, params.zupt_max_disparity);
}
// Feature initializer for active tracks
active_tracks_initializer = std::make_shared<FeatureInitializer>(params.featinit_options);
}
void VioManager::feed_measurement_simulation(double timestamp, const std::vector<int> &camids,
const std::vector<std::vector<std::pair<size_t, Eigen::VectorXf>>> &feats) {
// Start timing
rT1 = boost::posix_time::microsec_clock::local_time();
// Check if we actually have a simulated tracker
// If not, recreate and re-cast the tracker to our simulation tracker
std::shared_ptr<TrackSIM> trackSIM = std::dynamic_pointer_cast<TrackSIM>(trackFEATS);
if (trackSIM == nullptr) {
// Replace with the simulated tracker
trackSIM = std::make_shared<TrackSIM>(state->_cam_intrinsics_cameras, state->_options.max_aruco_features);
trackFEATS = trackSIM;
PRINT_WARNING(RED "[SIM]: casting our tracker to a TrackSIM object!\n" RESET);
}
// Feed our simulation tracker
trackSIM->feed_measurement_simulation(timestamp, camids, feats);
if (is_initialized_vio) {
trackDATABASE->append_new_measurements(trackSIM->get_feature_database());
}
rT2 = boost::posix_time::microsec_clock::local_time();
// Check if we should do zero-velocity, if so update the state with it
// Note that in the case that we only use in the beginning initialization phase
// If we have since moved, then we should never try to do a zero velocity update!
if (is_initialized_vio && updaterZUPT != nullptr && (!params.zupt_only_at_beginning || !has_moved_since_zupt)) {
// If the same state time, use the previous timestep decision
if (state->_timestamp != timestamp) {
did_zupt_update = updaterZUPT->try_update(state, timestamp);
}
if (did_zupt_update) {
return;
}
}
// If we do not have VIO initialization, then return an error
if (!is_initialized_vio) {
PRINT_ERROR(RED "[SIM]: your vio system should already be initialized before simulating features!!!\n" RESET);
PRINT_ERROR(RED "[SIM]: initialize your system first before calling feed_measurement_simulation()!!!!\n" RESET);
std::exit(EXIT_FAILURE);
}
// Call on our propagate and update function
// Simulation is either all sync, or single camera...
ov_core::CameraData message;
message.timestamp = timestamp;
for (auto const &camid : camids) {
int width = state->_cam_intrinsics_cameras.at(camid)->w();
int height = state->_cam_intrinsics_cameras.at(camid)->h();
message.sensor_ids.push_back(camid);
message.images.push_back(cv::Mat::zeros(cv::Size(width, height), CV_8UC1));
message.masks.push_back(cv::Mat::zeros(cv::Size(width, height), CV_8UC1));
}
do_feature_propagate_update(message);
}
void VioManager::track_image_and_update(const ov_core::CameraData &message_const) {
// Start timing
rT1 = boost::posix_time::microsec_clock::local_time();
// Assert we have valid measurement data and ids
assert(!message_const.sensor_ids.empty());
assert(message_const.sensor_ids.size() == message_const.images.size());
for (size_t i = 0; i < message_const.sensor_ids.size() - 1; i++) {
assert(message_const.sensor_ids.at(i) != message_const.sensor_ids.at(i + 1));
}
// Downsample if we are downsampling
ov_core::CameraData message = message_const;
for (size_t i = 0; i < message.sensor_ids.size() && params.downsample_cameras; i++) {
cv::Mat img = message.images.at(i);
cv::Mat mask = message.masks.at(i);
cv::Mat img_temp, mask_temp;
cv::pyrDown(img, img_temp, cv::Size(img.cols / 2.0, img.rows / 2.0));
message.images.at(i) = img_temp;
cv::pyrDown(mask, mask_temp, cv::Size(mask.cols / 2.0, mask.rows / 2.0));
message.masks.at(i) = mask_temp;
}
// Perform our feature tracking!
trackFEATS->feed_new_camera(message);
if (is_initialized_vio) {
trackDATABASE->append_new_measurements(trackFEATS->get_feature_database());
}
// If the aruco tracker is available, the also pass to it
// NOTE: binocular tracking for aruco doesn't make sense as we by default have the ids
// NOTE: thus we just call the stereo tracking if we are doing binocular!
if (is_initialized_vio && trackARUCO != nullptr) {
trackARUCO->feed_new_camera(message);
trackDATABASE->append_new_measurements(trackARUCO->get_feature_database());
}
rT2 = boost::posix_time::microsec_clock::local_time();
// Check if we should do zero-velocity, if so update the state with it
// Note that in the case that we only use in the beginning initialization phase
// If we have since moved, then we should never try to do a zero velocity update!
if (is_initialized_vio && updaterZUPT != nullptr && (!params.zupt_only_at_beginning || !has_moved_since_zupt)) {
// If the same state time, use the previous timestep decision
if (state->_timestamp != message.timestamp) {
did_zupt_update = updaterZUPT->try_update(state, message.timestamp);
}
if (did_zupt_update) {
return;
}
}
// If we do not have VIO initialization, then try to initialize
// TODO: Or if we are trying to reset the system, then do that here!
if (!is_initialized_vio) {
is_initialized_vio = try_to_initialize(message);
if (!is_initialized_vio) {
double time_track = (rT2 - rT1).total_microseconds() * 1e-6;
PRINT_DEBUG(BLUE "[TIME]: %.4f seconds for tracking\n" RESET, time_track);
return;
}
}
// Call on our propagate and update function
do_feature_propagate_update(message);
}
void VioManager::do_feature_propagate_update(const ov_core::CameraData &message) {
//===================================================================================
// State propagation, and clone augmentation
//===================================================================================
// Return if the camera measurement is out of order
if (state->_timestamp > message.timestamp) {
PRINT_WARNING(YELLOW "image received out of order, unable to do anything (prop dt = %3f)\n" RESET,
(message.timestamp - state->_timestamp));
return;
}
// Propagate the state forward to the current update time
// Also augment it with a new clone!
// NOTE: if the state is already at the given time (can happen in sim)
// NOTE: then no need to prop since we already are at the desired timestep
if (state->_timestamp != message.timestamp) {
propagator->propagate_and_clone(state, message.timestamp);
}
rT3 = boost::posix_time::microsec_clock::local_time();
// If we have not reached max clones, we should just return...
// This isn't super ideal, but it keeps the logic after this easier...
// We can start processing things when we have at least 5 clones since we can start triangulating things...
if ((int)state->_clones_IMU.size() < std::min(state->_options.max_clone_size, 5)) {
PRINT_DEBUG("waiting for enough clone states (%d of %d)....\n", (int)state->_clones_IMU.size(),
std::min(state->_options.max_clone_size, 5));
return;
}
// Return if we where unable to propagate
if (state->_timestamp != message.timestamp) {
PRINT_WARNING(RED "[PROP]: Propagator unable to propagate the state forward in time!\n" RESET);
PRINT_WARNING(RED "[PROP]: It has been %.3f since last time we propagated\n" RESET, message.timestamp - state->_timestamp);
return;
}
has_moved_since_zupt = true;
//===================================================================================
// MSCKF features and KLT tracks that are SLAM features
//===================================================================================
// Now, lets get all features that should be used for an update that are lost in the newest frame
// We explicitly request features that have not been deleted (used) in another update step
std::vector<std::shared_ptr<Feature>> feats_lost, feats_marg, feats_slam;
feats_lost = trackFEATS->get_feature_database()->features_not_containing_newer(state->_timestamp, false, true);
// Don't need to get the oldest features until we reach our max number of clones
if ((int)state->_clones_IMU.size() > state->_options.max_clone_size) {
feats_marg = trackFEATS->get_feature_database()->features_containing(state->margtimestep(), false, true);
if (trackARUCO != nullptr && message.timestamp - startup_time >= params.dt_slam_delay) {
feats_slam = trackARUCO->get_feature_database()->features_containing(state->margtimestep(), false, true);
}
}
// Remove any lost features that were from other image streams
// E.g: if we are cam1 and cam0 has not processed yet, we don't want to try to use those in the update yet
// E.g: thus we wait until cam0 process its newest image to remove features which were seen from that camera
auto it1 = feats_lost.begin();
while (it1 != feats_lost.end()) {
bool found_current_message_camid = false;
for (const auto &camuvpair : (*it1)->uvs) {
if (std::find(message.sensor_ids.begin(), message.sensor_ids.end(), camuvpair.first) != message.sensor_ids.end()) {
found_current_message_camid = true;
break;
}
}
if (found_current_message_camid) {
it1++;
} else {
it1 = feats_lost.erase(it1);
}
}
// We also need to make sure that the max tracks does not contain any lost features
// This could happen if the feature was lost in the last frame, but has a measurement at the marg timestep
it1 = feats_lost.begin();
while (it1 != feats_lost.end()) {
if (std::find(feats_marg.begin(), feats_marg.end(), (*it1)) != feats_marg.end()) {
// PRINT_WARNING(YELLOW "FOUND FEATURE THAT WAS IN BOTH feats_lost and feats_marg!!!!!!\n" RESET);
it1 = feats_lost.erase(it1);
} else {
it1++;
}
}
// Find tracks that have reached max length, these can be made into SLAM features
std::vector<std::shared_ptr<Feature>> feats_maxtracks;
auto it2 = feats_marg.begin();
while (it2 != feats_marg.end()) {
// See if any of our camera's reached max track
bool reached_max = false;
for (const auto &cams : (*it2)->timestamps) {
if ((int)cams.second.size() > state->_options.max_clone_size) {
reached_max = true;
break;
}
}
// If max track, then add it to our possible slam feature list
if (reached_max) {
feats_maxtracks.push_back(*it2);
it2 = feats_marg.erase(it2);
} else {
it2++;
}
}
// Count how many aruco tags we have in our state
int curr_aruco_tags = 0;
auto it0 = state->_features_SLAM.begin();
while (it0 != state->_features_SLAM.end()) {
if ((int)(*it0).second->_featid <= 4 * state->_options.max_aruco_features)
curr_aruco_tags++;
it0++;
}
// Append a new SLAM feature if we have the room to do so
// Also check that we have waited our delay amount (normally prevents bad first set of slam points)
if (state->_options.max_slam_features > 0 && message.timestamp - startup_time >= params.dt_slam_delay &&
(int)state->_features_SLAM.size() < state->_options.max_slam_features + curr_aruco_tags) {
// Get the total amount to add, then the max amount that we can add given our marginalize feature array
int amount_to_add = (state->_options.max_slam_features + curr_aruco_tags) - (int)state->_features_SLAM.size();
int valid_amount = (amount_to_add > (int)feats_maxtracks.size()) ? (int)feats_maxtracks.size() : amount_to_add;
// If we have at least 1 that we can add, lets add it!
// Note: we remove them from the feat_marg array since we don't want to reuse information...
if (valid_amount > 0) {
feats_slam.insert(feats_slam.end(), feats_maxtracks.end() - valid_amount, feats_maxtracks.end());
feats_maxtracks.erase(feats_maxtracks.end() - valid_amount, feats_maxtracks.end());
}
}
// Loop through current SLAM features, we have tracks of them, grab them for this update!
// Note: if we have a slam feature that has lost tracking, then we should marginalize it out
// Note: we only enforce this if the current camera message is where the feature was seen from
// Note: if you do not use FEJ, these types of slam features *degrade* the estimator performance....
for (std::pair<const size_t, std::shared_ptr<Landmark>> &landmark : state->_features_SLAM) {
if (trackARUCO != nullptr) {
std::shared_ptr<Feature> feat1 = trackARUCO->get_feature_database()->get_feature(landmark.second->_featid);
if (feat1 != nullptr)
feats_slam.push_back(feat1);
}
std::shared_ptr<Feature> feat2 = trackFEATS->get_feature_database()->get_feature(landmark.second->_featid);
if (feat2 != nullptr)
feats_slam.push_back(feat2);
assert(landmark.second->_unique_camera_id != -1);
bool current_unique_cam =
std::find(message.sensor_ids.begin(), message.sensor_ids.end(), landmark.second->_unique_camera_id) != message.sensor_ids.end();
if (feat2 == nullptr && current_unique_cam)
landmark.second->should_marg = true;
}
// Lets marginalize out all old SLAM features here
// These are ones that where not successfully tracked into the current frame
// We do *NOT* marginalize out our aruco tags landmarks
StateHelper::marginalize_slam(state);
// Separate our SLAM features into new ones, and old ones
std::vector<std::shared_ptr<Feature>> feats_slam_DELAYED, feats_slam_UPDATE;
for (size_t i = 0; i < feats_slam.size(); i++) {
if (state->_features_SLAM.find(feats_slam.at(i)->featid) != state->_features_SLAM.end()) {
feats_slam_UPDATE.push_back(feats_slam.at(i));
// PRINT_DEBUG("[UPDATE-SLAM]: found old feature %d (%d
// measurements)\n",(int)feats_slam.at(i)->featid,(int)feats_slam.at(i)->timestamps_left.size());
} else {
feats_slam_DELAYED.push_back(feats_slam.at(i));
// PRINT_DEBUG("[UPDATE-SLAM]: new feature ready %d (%d
// measurements)\n",(int)feats_slam.at(i)->featid,(int)feats_slam.at(i)->timestamps_left.size());
}
}
// Concatenate our MSCKF feature arrays (i.e., ones not being used for slam updates)
std::vector<std::shared_ptr<Feature>> featsup_MSCKF = feats_lost;
featsup_MSCKF.insert(featsup_MSCKF.end(), feats_marg.begin(), feats_marg.end());
featsup_MSCKF.insert(featsup_MSCKF.end(), feats_maxtracks.begin(), feats_maxtracks.end());
//===================================================================================
// Now that we have a list of features, lets do the EKF update for MSCKF and SLAM!
//===================================================================================
// Sort based on track length
// TODO: we should have better selection logic here (i.e. even feature distribution in the FOV etc..)
// TODO: right now features that are "lost" are at the front of this vector, while ones at the end are long-tracks
std::sort(featsup_MSCKF.begin(), featsup_MSCKF.end(), [](const std::shared_ptr<Feature> &a, const std::shared_ptr<Feature> &b) -> bool {
size_t asize = 0;
size_t bsize = 0;
for (const auto &pair : a->timestamps)
asize += pair.second.size();
for (const auto &pair : b->timestamps)
bsize += pair.second.size();
return asize < bsize;
});
// Pass them to our MSCKF updater
// NOTE: if we have more then the max, we select the "best" ones (i.e. max tracks) for this update
// NOTE: this should only really be used if you want to track a lot of features, or have limited computational resources
if ((int)featsup_MSCKF.size() > state->_options.max_msckf_in_update)
featsup_MSCKF.erase(featsup_MSCKF.begin(), featsup_MSCKF.end() - state->_options.max_msckf_in_update);
updaterMSCKF->update(state, featsup_MSCKF);
rT4 = boost::posix_time::microsec_clock::local_time();
// Perform SLAM delay init and update
// NOTE: that we provide the option here to do a *sequential* update
// NOTE: this will be a lot faster but won't be as accurate.
std::vector<std::shared_ptr<Feature>> feats_slam_UPDATE_TEMP;
while (!feats_slam_UPDATE.empty()) {
// Get sub vector of the features we will update with
std::vector<std::shared_ptr<Feature>> featsup_TEMP;
featsup_TEMP.insert(featsup_TEMP.begin(), feats_slam_UPDATE.begin(),
feats_slam_UPDATE.begin() + std::min(state->_options.max_slam_in_update, (int)feats_slam_UPDATE.size()));
feats_slam_UPDATE.erase(feats_slam_UPDATE.begin(),
feats_slam_UPDATE.begin() + std::min(state->_options.max_slam_in_update, (int)feats_slam_UPDATE.size()));
// Do the update
updaterSLAM->update(state, featsup_TEMP);
feats_slam_UPDATE_TEMP.insert(feats_slam_UPDATE_TEMP.end(), featsup_TEMP.begin(), featsup_TEMP.end());
}
feats_slam_UPDATE = feats_slam_UPDATE_TEMP;
rT5 = boost::posix_time::microsec_clock::local_time();
updaterSLAM->delayed_init(state, feats_slam_DELAYED);
rT6 = boost::posix_time::microsec_clock::local_time();
//===================================================================================
// Update our visualization feature set, and clean up the old features
//===================================================================================
// Re-triangulate all current tracks in the current frame
if (message.sensor_ids.at(0) == 0) {
// Re-triangulate features
retriangulate_active_tracks(message);
// Clear the MSCKF features only on the base camera
// Thus we should be able to visualize the other unique camera stream
// MSCKF features as they will also be appended to the vector
good_features_MSCKF.clear();
}
// Save all the MSCKF features used in the update
for (auto const &feat : featsup_MSCKF) {
good_features_MSCKF.push_back(feat->p_FinG);
feat->to_delete = true;
}
//===================================================================================
// Cleanup, marginalize out what we don't need any more...
//===================================================================================
// Remove features that where used for the update from our extractors at the last timestep
// This allows for measurements to be used in the future if they failed to be used this time
// Note we need to do this before we feed a new image, as we want all new measurements to NOT be deleted
trackFEATS->get_feature_database()->cleanup();
if (trackARUCO != nullptr) {
trackARUCO->get_feature_database()->cleanup();
}
// First do anchor change if we are about to lose an anchor pose
updaterSLAM->change_anchors(state);
// Cleanup any features older then the marginalization time
if ((int)state->_clones_IMU.size() > state->_options.max_clone_size) {
trackFEATS->get_feature_database()->cleanup_measurements(state->margtimestep());
trackDATABASE->cleanup_measurements(state->margtimestep());
if (trackARUCO != nullptr) {
trackARUCO->get_feature_database()->cleanup_measurements(state->margtimestep());
}
}
// Finally marginalize the oldest clone if needed
StateHelper::marginalize_old_clone(state);
rT7 = boost::posix_time::microsec_clock::local_time();
//===================================================================================
// Debug info, and stats tracking
//===================================================================================
// Get timing statitics information
double time_track = (rT2 - rT1).total_microseconds() * 1e-6;
double time_prop = (rT3 - rT2).total_microseconds() * 1e-6;
double time_msckf = (rT4 - rT3).total_microseconds() * 1e-6;
double time_slam_update = (rT5 - rT4).total_microseconds() * 1e-6;
double time_slam_delay = (rT6 - rT5).total_microseconds() * 1e-6;
double time_marg = (rT7 - rT6).total_microseconds() * 1e-6;
double time_total = (rT7 - rT1).total_microseconds() * 1e-6;
// Timing information
PRINT_DEBUG(BLUE "[TIME]: %.4f seconds for tracking\n" RESET, time_track);
PRINT_DEBUG(BLUE "[TIME]: %.4f seconds for propagation\n" RESET, time_prop);
PRINT_DEBUG(BLUE "[TIME]: %.4f seconds for MSCKF update (%d feats)\n" RESET, time_msckf, (int)featsup_MSCKF.size());
if (state->_options.max_slam_features > 0) {
PRINT_DEBUG(BLUE "[TIME]: %.4f seconds for SLAM update (%d feats)\n" RESET, time_slam_update, (int)state->_features_SLAM.size());
PRINT_DEBUG(BLUE "[TIME]: %.4f seconds for SLAM delayed init (%d feats)\n" RESET, time_slam_delay, (int)feats_slam_DELAYED.size());
}
PRINT_DEBUG(BLUE "[TIME]: %.4f seconds for re-tri & marg (%d clones in state)\n" RESET, time_marg, (int)state->_clones_IMU.size());
std::stringstream ss;
ss << "[TIME]: " << std::setprecision(4) << time_total << " seconds for total (camera";
for (const auto &id : message.sensor_ids) {
ss << " " << id;
}
ss << ")" << std::endl;
PRINT_DEBUG(BLUE "%s" RESET, ss.str().c_str());
// Finally if we are saving stats to file, lets save it to file
if (params.record_timing_information && of_statistics.is_open()) {
// We want to publish in the IMU clock frame
// The timestamp in the state will be the last camera time
double t_ItoC = state->_calib_dt_CAMtoIMU->value()(0);
double timestamp_inI = state->_timestamp + t_ItoC;
// Append to the file
of_statistics << std::fixed << std::setprecision(15) << timestamp_inI << "," << std::fixed << std::setprecision(5) << time_track << ","
<< time_prop << "," << time_msckf << ",";
if (state->_options.max_slam_features > 0) {
of_statistics << time_slam_update << "," << time_slam_delay << ",";
}
of_statistics << time_marg << "," << time_total << std::endl;
of_statistics.flush();
}
// Update our distance traveled
if (timelastupdate != -1 && state->_clones_IMU.find(timelastupdate) != state->_clones_IMU.end()) {
Eigen::Matrix<double, 3, 1> dx = state->_imu->pos() - state->_clones_IMU.at(timelastupdate)->pos();
distance += dx.norm();
}
timelastupdate = message.timestamp;
// Debug, print our current state
PRINT_INFO("q_GtoI = %.3f,%.3f,%.3f,%.3f | p_IinG = %.3f,%.3f,%.3f | dist = %.2f (meters)\n", state->_imu->quat()(0),
state->_imu->quat()(1), state->_imu->quat()(2), state->_imu->quat()(3), state->_imu->pos()(0), state->_imu->pos()(1),
state->_imu->pos()(2), distance);
PRINT_INFO("bg = %.4f,%.4f,%.4f | ba = %.4f,%.4f,%.4f\n", state->_imu->bias_g()(0), state->_imu->bias_g()(1), state->_imu->bias_g()(2),
state->_imu->bias_a()(0), state->_imu->bias_a()(1), state->_imu->bias_a()(2));
// Debug for camera imu offset
if (state->_options.do_calib_camera_timeoffset) {
PRINT_INFO("camera-imu timeoffset = %.5f\n", state->_calib_dt_CAMtoIMU->value()(0));
}
// Debug for camera intrinsics
if (state->_options.do_calib_camera_intrinsics) {
for (int i = 0; i < state->_options.num_cameras; i++) {
std::shared_ptr<Vec> calib = state->_cam_intrinsics.at(i);
PRINT_INFO("cam%d intrinsics = %.3f,%.3f,%.3f,%.3f | %.3f,%.3f,%.3f,%.3f\n", (int)i, calib->value()(0), calib->value()(1),
calib->value()(2), calib->value()(3), calib->value()(4), calib->value()(5), calib->value()(6), calib->value()(7));
}
}
// Debug for camera extrinsics
if (state->_options.do_calib_camera_pose) {
for (int i = 0; i < state->_options.num_cameras; i++) {
std::shared_ptr<PoseJPL> calib = state->_calib_IMUtoCAM.at(i);
PRINT_INFO("cam%d extrinsics = %.3f,%.3f,%.3f,%.3f | %.3f,%.3f,%.3f\n", (int)i, calib->quat()(0), calib->quat()(1), calib->quat()(2),
calib->quat()(3), calib->pos()(0), calib->pos()(1), calib->pos()(2));
}
}
}
bool VioManager::try_to_initialize(const ov_core::CameraData &message) {
// Directly return if the initialization thread is running
// Note that we lock on the queue since we could have finished an update
// And are using this queue to propagate the state forward. We should wait in this case
if (thread_init_running) {
std::lock_guard<std::mutex> lck(camera_queue_init_mtx);
camera_queue_init.push_back(message.timestamp);
return false;
}
// If the thread was a success, then return success!
if (thread_init_success) {
return true;
}
// Run the initialization in a second thread so it can go as slow as it desires
thread_init_running = true;
std::thread thread([&] {
// Returns from our initializer
double timestamp;
Eigen::MatrixXd covariance;
std::vector<std::shared_ptr<ov_type::Type>> order;
auto init_rT1 = boost::posix_time::microsec_clock::local_time();
// Try to initialize the system
// We will wait for a jerk if we do not have the zero velocity update enabled
// Otherwise we can initialize right away as the zero velocity will handle the stationary case
bool wait_for_jerk = (updaterZUPT == nullptr);
bool success = initializer->initialize(timestamp, covariance, order, state->_imu, wait_for_jerk);
// If we have initialized successfully we will set the covariance and state elements as needed
// TODO: set the clones and SLAM features here so we can start updating right away...
if (success) {
// Set our covariance (state should already be set in the initializer)
StateHelper::set_initial_covariance(state, covariance, order);
// Set the state time
state->_timestamp = timestamp;
startup_time = timestamp;
// Cleanup any features older than the initialization time
// Also increase the number of features to the desired amount during estimation
trackFEATS->get_feature_database()->cleanup_measurements(state->_timestamp);
trackFEATS->set_num_features(params.num_pts);
if (trackARUCO != nullptr) {
trackARUCO->get_feature_database()->cleanup_measurements(state->_timestamp);
}
// If we are moving then don't do zero velocity update4
if (state->_imu->vel().norm() > params.zupt_max_velocity) {
has_moved_since_zupt = true;
}
// Else we are good to go, print out our stats
auto init_rT2 = boost::posix_time::microsec_clock::local_time();
PRINT_INFO(GREEN "[init]: successful initialization in %.4f seconds\n" RESET, (init_rT2 - init_rT1).total_microseconds() * 1e-6);
PRINT_INFO(GREEN "[init]: orientation = %.4f, %.4f, %.4f, %.4f\n" RESET, state->_imu->quat()(0), state->_imu->quat()(1),
state->_imu->quat()(2), state->_imu->quat()(3));
PRINT_INFO(GREEN "[init]: bias gyro = %.4f, %.4f, %.4f\n" RESET, state->_imu->bias_g()(0), state->_imu->bias_g()(1),
state->_imu->bias_g()(2));
PRINT_INFO(GREEN "[init]: velocity = %.4f, %.4f, %.4f\n" RESET, state->_imu->vel()(0), state->_imu->vel()(1), state->_imu->vel()(2));
PRINT_INFO(GREEN "[init]: bias accel = %.4f, %.4f, %.4f\n" RESET, state->_imu->bias_a()(0), state->_imu->bias_a()(1),
state->_imu->bias_a()(2));
PRINT_INFO(GREEN "[init]: position = %.4f, %.4f, %.4f\n" RESET, state->_imu->pos()(0), state->_imu->pos()(1), state->_imu->pos()(2));
// Remove any camera times that are order then the initialized time
// This can happen if the initialization has taken a while to perform
std::lock_guard<std::mutex> lck(camera_queue_init_mtx);
std::vector<double> camera_timestamps_to_init;
for (size_t i = 0; i < camera_queue_init.size(); i++) {
if (camera_queue_init.at(i) > timestamp) {
camera_timestamps_to_init.push_back(camera_queue_init.at(i));
}
}
// Now we have initialized we will propagate the state to the current timestep
// In general this should be ok as long as the initialization didn't take too long to perform
// Propagating over multiple seconds will become an issue if the initial biases are bad
size_t clone_rate = (size_t)((double)camera_timestamps_to_init.size() / (double)params.state_options.max_clone_size) + 1;
for (size_t i = 0; i < camera_timestamps_to_init.size(); i += clone_rate) {
propagator->propagate_and_clone(state, camera_timestamps_to_init.at(i));
StateHelper::marginalize_old_clone(state);
}
PRINT_DEBUG(YELLOW "[init]: moved the state forward %.2f seconds\n" RESET, state->_timestamp - timestamp);
thread_init_success = true;
camera_queue_init.clear();
} else {
auto init_rT2 = boost::posix_time::microsec_clock::local_time();
PRINT_DEBUG(YELLOW "[init]: failed initialization in %.4f seconds\n" RESET, (init_rT2 - init_rT1).total_microseconds() * 1e-6);
thread_init_success = false;
std::lock_guard<std::mutex> lck(camera_queue_init_mtx);
camera_queue_init.clear();
}
// Finally, mark that the thread has finished running
thread_init_running = false;
});
// If we are single threaded, then run single threaded
// Otherwise detach this thread so it runs in the background!
if (!params.use_multi_threading) {
thread.join();
} else {
thread.detach();
}
return false;
}
void VioManager::retriangulate_active_tracks(const ov_core::CameraData &message) {
// Start timing
boost::posix_time::ptime retri_rT1, retri_rT2, retri_rT3, retri_rT4, retri_rT5;
retri_rT1 = boost::posix_time::microsec_clock::local_time();
// Clear old active track data
active_tracks_time = state->_timestamp;
active_image = message.images.at(0).clone();
active_tracks_posinG.clear();
active_tracks_uvd.clear();
// Get all features which are tracked in the current frame
// NOTE: This database should have all features from all trackers already in it
// NOTE: it also has the complete history so we shouldn't see jumps from deleting measurements
std::vector<std::shared_ptr<Feature>> active_features = trackDATABASE->features_containing_older(state->_timestamp);
// 0. Get all timestamps our clones are at (and thus valid measurement times)
std::vector<double> clonetimes;
for (const auto &clone_imu : state->_clones_IMU) {
clonetimes.emplace_back(clone_imu.first);
}
// 1. Clean all feature measurements and make sure they all have valid clone times
// Also remove any that we are unable to triangulate (due to not having enough measurements)
auto it0 = active_features.begin();
while (it0 != active_features.end()) {
// Skip if it is a SLAM feature since it already is already going to be added
if (state->_features_SLAM.find((*it0)->featid) != state->_features_SLAM.end()) {
it0 = active_features.erase(it0);
continue;
}
// Clean the feature
(*it0)->clean_old_measurements(clonetimes);
// Count how many measurements
int ct_meas = 0;
for (const auto &pair : (*it0)->timestamps) {
ct_meas += (*it0)->timestamps[pair.first].size();
}
// Remove if we don't have enough and am not a SLAM feature which doesn't need triangulation
if (ct_meas < (int)std::max(4.0, std::floor(state->_options.max_clone_size * 2.0 / 5.0))) {
it0 = active_features.erase(it0);
} else {
it0++;
}
}
retri_rT2 = boost::posix_time::microsec_clock::local_time();
// Return if no features
if (active_features.empty() && state->_features_SLAM.empty())
return;
// 2. Create vector of cloned *CAMERA* poses at each of our clone timesteps
std::unordered_map<size_t, std::unordered_map<double, FeatureInitializer::ClonePose>> clones_cam;
for (const auto &clone_calib : state->_calib_IMUtoCAM) {
// For this camera, create the vector of camera poses
std::unordered_map<double, FeatureInitializer::ClonePose> clones_cami;
for (const auto &clone_imu : state->_clones_IMU) {
// Get current camera pose
Eigen::Matrix3d R_GtoCi = clone_calib.second->Rot() * clone_imu.second->Rot();
Eigen::Vector3d p_CioinG = clone_imu.second->pos() - R_GtoCi.transpose() * clone_calib.second->pos();
// Append to our map
clones_cami.insert({clone_imu.first, FeatureInitializer::ClonePose(R_GtoCi, p_CioinG)});
}
// Append to our map
clones_cam.insert({clone_calib.first, clones_cami});
}
retri_rT3 = boost::posix_time::microsec_clock::local_time();
// 3. Try to triangulate all features that have measurements
auto it1 = active_features.begin();
while (it1 != active_features.end()) {
// Triangulate the feature and remove if it fails
bool success_tri = true;
if (active_tracks_initializer->config().triangulate_1d) {
success_tri = active_tracks_initializer->single_triangulation_1d(it1->get(), clones_cam);
} else {
success_tri = active_tracks_initializer->single_triangulation(it1->get(), clones_cam);
}
// Remove the feature if not a success
if (!success_tri) {
it1 = active_features.erase(it1);
continue;
}
it1++;
}
retri_rT4 = boost::posix_time::microsec_clock::local_time();
// Return if no features
if (active_features.empty() && state->_features_SLAM.empty())
return;
// Points which we have in the global frame
for (const auto &feat : active_features) {
active_tracks_posinG[feat->featid] = feat->p_FinG;
}
for (const auto &feat : state->_features_SLAM) {
Eigen::Vector3d p_FinG = feat.second->get_xyz(false);
if (LandmarkRepresentation::is_relative_representation(feat.second->_feat_representation)) {
// Assert that we have an anchor pose for this feature
assert(feat.second->_anchor_cam_id != -1);
// Get calibration for our anchor camera
Eigen::Matrix3d R_ItoC = state->_calib_IMUtoCAM.at(feat.second->_anchor_cam_id)->Rot();
Eigen::Vector3d p_IinC = state->_calib_IMUtoCAM.at(feat.second->_anchor_cam_id)->pos();
// Anchor pose orientation and position
Eigen::Matrix3d R_GtoI = state->_clones_IMU.at(feat.second->_anchor_clone_timestamp)->Rot();
Eigen::Vector3d p_IinG = state->_clones_IMU.at(feat.second->_anchor_clone_timestamp)->pos();
// Feature in the global frame
p_FinG = R_GtoI.transpose() * R_ItoC.transpose() * (feat.second->get_xyz(false) - p_IinC) + p_IinG;
}
active_tracks_posinG[feat.second->_featid] = p_FinG;
}
// Calibration of the first camera (cam0)
std::shared_ptr<Vec> distortion = state->_cam_intrinsics.at(0);
std::shared_ptr<PoseJPL> calibration = state->_calib_IMUtoCAM.at(0);
Eigen::Matrix<double, 3, 3> R_ItoC = calibration->Rot();
Eigen::Matrix<double, 3, 1> p_IinC = calibration->pos();
// Get current IMU clone state
std::shared_ptr<PoseJPL> clone_Ii = state->_clones_IMU.at(active_tracks_time);
Eigen::Matrix3d R_GtoIi = clone_Ii->Rot();
Eigen::Vector3d p_IiinG = clone_Ii->pos();
// 4. Next we can update our variable with the global position
// We also will project the features into the current frame
for (const auto &feat : active_tracks_posinG) {
// Project the current feature into the current frame of reference
Eigen::Vector3d p_FinIi = R_GtoIi * (feat.second - p_IiinG);
Eigen::Vector3d p_FinCi = R_ItoC * p_FinIi + p_IinC;
double depth = p_FinCi(2);
Eigen::Vector2d uv_norm, uv_dist;
uv_norm << p_FinCi(0) / depth, p_FinCi(1) / depth;
uv_dist = state->_cam_intrinsics_cameras.at(0)->distort_d(uv_norm);
// Skip if not valid (i.e. negative depth, or outside of image)
if (depth < 0.1) {
continue;
}
// Skip if not valid (i.e. negative depth, or outside of image)
int width = state->_cam_intrinsics_cameras.at(0)->w();
int height = state->_cam_intrinsics_cameras.at(0)->h();
if (uv_dist(0) < 0 || (int)uv_dist(0) >= width || uv_dist(1) < 0 || (int)uv_dist(1) >= height) {
// PRINT_DEBUG("feat %zu -> depth = %.2f | u_d = %.2f | v_d = %.2f\n",(*it2)->featid,depth,uv_dist(0),uv_dist(1));
continue;
}
// Finally construct the uv and depth
Eigen::Vector3d uvd;
uvd << uv_dist, depth;
active_tracks_uvd.insert({feat.first, uvd});
}
retri_rT5 = boost::posix_time::microsec_clock::local_time();
// Timing information
// PRINT_DEBUG(CYAN "[RETRI-TIME]: %.4f seconds for cleaning\n" RESET, (retri_rT2-retri_rT1).total_microseconds() * 1e-6);
// PRINT_DEBUG(CYAN "[RETRI-TIME]: %.4f seconds for triangulate setup\n" RESET, (retri_rT3-retri_rT2).total_microseconds() * 1e-6);
// PRINT_DEBUG(CYAN "[RETRI-TIME]: %.4f seconds for triangulation\n" RESET, (retri_rT4-retri_rT3).total_microseconds() * 1e-6);
// PRINT_DEBUG(CYAN "[RETRI-TIME]: %.4f seconds for re-projection\n" RESET, (retri_rT5-retri_rT4).total_microseconds() * 1e-6);
// PRINT_DEBUG(CYAN "[RETRI-TIME]: %.4f seconds total\n" RESET, (retri_rT5-retri_rT1).total_microseconds() * 1e-6);
}

View File

@@ -0,0 +1,374 @@
/*
* OpenVINS: An Open Platform for Visual-Inertial Research
* Copyright (C) 2018-2022 Patrick Geneva
* Copyright (C) 2018-2022 Guoquan Huang
* Copyright (C) 2018-2022 OpenVINS Contributors
* Copyright (C) 2018-2019 Kevin Eckenhoff
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
#ifndef OV_MSCKF_VIOMANAGER_H
#define OV_MSCKF_VIOMANAGER_H
#include <Eigen/StdVector>
#include <algorithm>
#include <boost/filesystem.hpp>
#include <fstream>
#include <memory>
#include <string>
#include "cam/CamBase.h"
#include "cam/CamEqui.h"
#include "cam/CamRadtan.h"
#include "track/TrackAruco.h"
#include "track/TrackDescriptor.h"
#include "track/TrackKLT.h"
#include "track/TrackSIM.h"
#include "types/Landmark.h"
#include "types/LandmarkRepresentation.h"
#include "utils/opencv_lambda_body.h"
#include "utils/print.h"
#include "utils/sensor_data.h"
#include "init/InertialInitializer.h"
#include "state/Propagator.h"
#include "state/State.h"
#include "state/StateHelper.h"
#include "update/UpdaterMSCKF.h"
#include "update/UpdaterSLAM.h"
#include "update/UpdaterZeroVelocity.h"
#include "VioManagerOptions.h"
namespace ov_msckf {
/**
* @brief Core class that manages the entire system
*
* This class contains the state and other algorithms needed for the MSCKF to work.
* We feed in measurements into this class and send them to their respective algorithms.
* If we have measurements to propagate or update with, this class will call on our state to do that.
*/
class VioManager {
public:
/**
* @brief Default constructor, will load all configuration variables
* @param params_ Parameters loaded from either ROS or CMDLINE
*/
VioManager(VioManagerOptions &params_);
/**
* @brief Feed function for inertial data
* @param message Contains our timestamp and inertial information
*/
void feed_measurement_imu(const ov_core::ImuData &message) {
// Get the oldest camera timestamp that we can remove IMU measurements before
// Then push back to our propagator and pass the IMU time we can delete up to
double oldest_time = trackFEATS->get_feature_database()->get_oldest_timestamp();
if (oldest_time != -1) {
oldest_time += params.calib_camimu_dt;
}
propagator->feed_imu(message, oldest_time);
// Push back to our initializer
if (!is_initialized_vio) {
initializer->feed_imu(message, oldest_time);
}
// Push back to the zero velocity updater if we have it
if (is_initialized_vio && updaterZUPT != nullptr) {
updaterZUPT->feed_imu(message, oldest_time);
}
}
/**
* @brief Feed function for camera measurements
* @param message Contains our timestamp, images, and camera ids
*/
void feed_measurement_camera(const ov_core::CameraData &message) { track_image_and_update(message); }
/**
* @brief Feed function for a synchronized simulated cameras
* @param timestamp Time that this image was collected
* @param camids Camera ids that we have simulated measurements for
* @param feats Raw uv simulated measurements
*/
void feed_measurement_simulation(double timestamp, const std::vector<int> &camids,
const std::vector<std::vector<std::pair<size_t, Eigen::VectorXf>>> &feats);
/**
* @brief Given a state, this will initialize our IMU state.
* @param imustate State in the MSCKF ordering: [time(sec),q_GtoI,p_IinG,v_IinG,b_gyro,b_accel]
*/
void initialize_with_gt(Eigen::Matrix<double, 17, 1> imustate) {
// Initialize the system
state->_imu->set_value(imustate.block(1, 0, 16, 1));
state->_imu->set_fej(imustate.block(1, 0, 16, 1));
// Fix the global yaw and position gauge freedoms
// TODO: Why does this break out simulation consistency metrics?
std::vector<std::shared_ptr<ov_type::Type>> order = {state->_imu};
Eigen::MatrixXd Cov = 1e-4 * Eigen::MatrixXd::Identity(state->_imu->size(), state->_imu->size());
// Cov.block(state->_imu->v()->id(), state->_imu->v()->id(), 3, 3) *= 10;
// Cov(state->_imu->q()->id() + 2, state->_imu->q()->id() + 2) = 0.0;
// Cov.block(state->_imu->p()->id(), state->_imu->p()->id(), 3, 3).setZero();
// Cov.block(state->_imu->q()->id(), state->_imu->q()->id(), 3, 3) =
// state->_imu->Rot() * Cov.block(state->_imu->q()->id(), state->_imu->q()->id(), 3, 3) * state->_imu->Rot().transpose();
StateHelper::set_initial_covariance(state, Cov, order);
// Set the state time
state->_timestamp = imustate(0, 0);
startup_time = imustate(0, 0);
is_initialized_vio = true;
// Cleanup any features older then the initialization time
trackFEATS->get_feature_database()->cleanup_measurements(state->_timestamp);
if (trackARUCO != nullptr) {
trackARUCO->get_feature_database()->cleanup_measurements(state->_timestamp);
}
// Print what we init'ed with
PRINT_DEBUG(GREEN "[INIT]: INITIALIZED FROM GROUNDTRUTH FILE!!!!!\n" RESET);
PRINT_DEBUG(GREEN "[INIT]: orientation = %.4f, %.4f, %.4f, %.4f\n" RESET, state->_imu->quat()(0), state->_imu->quat()(1),
state->_imu->quat()(2), state->_imu->quat()(3));
PRINT_DEBUG(GREEN "[INIT]: bias gyro = %.4f, %.4f, %.4f\n" RESET, state->_imu->bias_g()(0), state->_imu->bias_g()(1),
state->_imu->bias_g()(2));
PRINT_DEBUG(GREEN "[INIT]: velocity = %.4f, %.4f, %.4f\n" RESET, state->_imu->vel()(0), state->_imu->vel()(1), state->_imu->vel()(2));
PRINT_DEBUG(GREEN "[INIT]: bias accel = %.4f, %.4f, %.4f\n" RESET, state->_imu->bias_a()(0), state->_imu->bias_a()(1),
state->_imu->bias_a()(2));
PRINT_DEBUG(GREEN "[INIT]: position = %.4f, %.4f, %.4f\n" RESET, state->_imu->pos()(0), state->_imu->pos()(1), state->_imu->pos()(2));
}
/// If we are initialized or not
bool initialized() { return is_initialized_vio; }
/// Timestamp that the system was initialized at
double initialized_time() { return startup_time; }
/// Accessor for current system parameters
VioManagerOptions get_params() { return params; }
/// Accessor to get the current state
std::shared_ptr<State> get_state() { return state; }
/// Accessor to get the current propagator
std::shared_ptr<Propagator> get_propagator() { return propagator; }
/// Get a nice visualization image of what tracks we have
cv::Mat get_historical_viz_image() {
// Build an id-list of what features we should highlight (i.e. SLAM)
std::vector<size_t> highlighted_ids;
for (const auto &feat : state->_features_SLAM) {
highlighted_ids.push_back(feat.first);
}
// Text we will overlay if needed
std::string overlay = (did_zupt_update) ? "zvupt" : "";
overlay = (!is_initialized_vio) ? "init" : overlay;
// Get the current active tracks
cv::Mat img_history;
trackFEATS->display_history(img_history, 255, 255, 0, 255, 255, 255, highlighted_ids, overlay);
if (trackARUCO != nullptr) {
trackARUCO->display_history(img_history, 0, 255, 255, 255, 255, 255, highlighted_ids, overlay);
// trackARUCO->display_active(img_history, 0, 255, 255, 255, 255, 255, overlay);
}
// Finally return the image
return img_history;
}
/// Returns 3d features used in the last update in global frame
std::vector<Eigen::Vector3d> get_good_features_MSCKF() { return good_features_MSCKF; }
/// Returns 3d SLAM features in the global frame
std::vector<Eigen::Vector3d> get_features_SLAM() {
std::vector<Eigen::Vector3d> slam_feats;
for (auto &f : state->_features_SLAM) {
if ((int)f.first <= 4 * state->_options.max_aruco_features)
continue;
if (ov_type::LandmarkRepresentation::is_relative_representation(f.second->_feat_representation)) {
// Assert that we have an anchor pose for this feature
assert(f.second->_anchor_cam_id != -1);
// Get calibration for our anchor camera
Eigen::Matrix<double, 3, 3> R_ItoC = state->_calib_IMUtoCAM.at(f.second->_anchor_cam_id)->Rot();
Eigen::Matrix<double, 3, 1> p_IinC = state->_calib_IMUtoCAM.at(f.second->_anchor_cam_id)->pos();
// Anchor pose orientation and position
Eigen::Matrix<double, 3, 3> R_GtoI = state->_clones_IMU.at(f.second->_anchor_clone_timestamp)->Rot();
Eigen::Matrix<double, 3, 1> p_IinG = state->_clones_IMU.at(f.second->_anchor_clone_timestamp)->pos();
// Feature in the global frame
slam_feats.push_back(R_GtoI.transpose() * R_ItoC.transpose() * (f.second->get_xyz(false) - p_IinC) + p_IinG);
} else {
slam_feats.push_back(f.second->get_xyz(false));
}
}
return slam_feats;
}
/// Returns 3d ARUCO features in the global frame
std::vector<Eigen::Vector3d> get_features_ARUCO() {
std::vector<Eigen::Vector3d> aruco_feats;
for (auto &f : state->_features_SLAM) {
if ((int)f.first > 4 * state->_options.max_aruco_features)
continue;
if (ov_type::LandmarkRepresentation::is_relative_representation(f.second->_feat_representation)) {
// Assert that we have an anchor pose for this feature
assert(f.second->_anchor_cam_id != -1);
// Get calibration for our anchor camera
Eigen::Matrix<double, 3, 3> R_ItoC = state->_calib_IMUtoCAM.at(f.second->_anchor_cam_id)->Rot();
Eigen::Matrix<double, 3, 1> p_IinC = state->_calib_IMUtoCAM.at(f.second->_anchor_cam_id)->pos();
// Anchor pose orientation and position
Eigen::Matrix<double, 3, 3> R_GtoI = state->_clones_IMU.at(f.second->_anchor_clone_timestamp)->Rot();
Eigen::Matrix<double, 3, 1> p_IinG = state->_clones_IMU.at(f.second->_anchor_clone_timestamp)->pos();
// Feature in the global frame
aruco_feats.push_back(R_GtoI.transpose() * R_ItoC.transpose() * (f.second->get_xyz(false) - p_IinC) + p_IinG);
} else {
aruco_feats.push_back(f.second->get_xyz(false));
}
}
return aruco_feats;
}
/// Return the image used when projecting the active tracks
void get_active_image(double &timestamp, cv::Mat &image) {
timestamp = active_tracks_time;
image = active_image;
}
/// Returns active tracked features in the current frame
void get_active_tracks(double &timestamp, std::unordered_map<size_t, Eigen::Vector3d> &feat_posinG,
std::unordered_map<size_t, Eigen::Vector3d> &feat_tracks_uvd) {
timestamp = active_tracks_time;
feat_posinG = active_tracks_posinG;
feat_tracks_uvd = active_tracks_uvd;
}
protected:
/**
* @brief Given a new set of camera images, this will track them.
*
* If we are having stereo tracking, we should call stereo tracking functions.
* Otherwise we will try to track on each of the images passed.
*
* @param message Contains our timestamp, images, and camera ids
*/
void track_image_and_update(const ov_core::CameraData &message);
/**
* @brief This will do the propagation and feature updates to the state
* @param message Contains our timestamp, images, and camera ids
*/
void do_feature_propagate_update(const ov_core::CameraData &message);
/**
* @brief This function will try to initialize the state.
*
* This should call on our initializer and try to init the state.
* In the future we should call the structure-from-motion code from here.
* This function could also be repurposed to re-initialize the system after failure.
*
* @param message Contains our timestamp, images, and camera ids
* @return True if we have successfully initialized
*/
bool try_to_initialize(const ov_core::CameraData &message);
/**
* @brief This function will will re-triangulate all features in the current frame
*
* For all features that are currently being tracked by the system, this will re-triangulate them.
* This is useful for downstream applications which need the current pointcloud of points (e.g. loop closure).
* This will try to triangulate *all* points, not just ones that have been used in the update.
*
* @param message Contains our timestamp, images, and camera ids
*/
void retriangulate_active_tracks(const ov_core::CameraData &message);
/// Manager parameters
VioManagerOptions params;
/// Our master state object :D
std::shared_ptr<State> state;
/// Propagator of our state
std::shared_ptr<Propagator> propagator;
/// Complete history of our feature tracks
std::shared_ptr<ov_core::FeatureDatabase> trackDATABASE;
/// Our sparse feature tracker (klt or descriptor)
std::shared_ptr<ov_core::TrackBase> trackFEATS;
/// Our aruoc tracker
std::shared_ptr<ov_core::TrackBase> trackARUCO;
/// State initializer
std::shared_ptr<ov_init::InertialInitializer> initializer;
/// Boolean if we are initialized or not
bool is_initialized_vio = false;
/// Our MSCKF feature updater
std::shared_ptr<UpdaterMSCKF> updaterMSCKF;
/// Our SLAM/ARUCO feature updater
std::shared_ptr<UpdaterSLAM> updaterSLAM;
/// Our zero velocity tracker
std::shared_ptr<UpdaterZeroVelocity> updaterZUPT;
/// This is the queue of measurement times that have come in since we starting doing initialization
/// After we initialize, we will want to prop & update to the latest timestamp quickly
std::vector<double> camera_queue_init;
std::mutex camera_queue_init_mtx;
// Timing statistic file and variables
std::ofstream of_statistics;
boost::posix_time::ptime rT1, rT2, rT3, rT4, rT5, rT6, rT7;
// Track how much distance we have traveled
double timelastupdate = -1;
double distance = 0;
// Startup time of the filter
double startup_time = -1;
// Threads and their atomics
std::atomic<bool> thread_init_running, thread_init_success;
// If we did a zero velocity update
bool did_zupt_update = false;
bool has_moved_since_zupt = false;
// Good features that where used in the last update (used in visualization)
std::vector<Eigen::Vector3d> good_features_MSCKF;
/// Feature initializer used to triangulate all active tracks
std::shared_ptr<ov_core::FeatureInitializer> active_tracks_initializer;
// Re-triangulated features 3d positions seen from the current frame (used in visualization)
double active_tracks_time = -1;
std::unordered_map<size_t, Eigen::Vector3d> active_tracks_posinG;
std::unordered_map<size_t, Eigen::Vector3d> active_tracks_uvd;
cv::Mat active_image;
};
} // namespace ov_msckf
#endif // OV_MSCKF_VIOMANAGER_H

View File

@@ -0,0 +1,487 @@
/*
* OpenVINS: An Open Platform for Visual-Inertial Research
* Copyright (C) 2018-2022 Patrick Geneva
* Copyright (C) 2018-2022 Guoquan Huang
* Copyright (C) 2018-2022 OpenVINS Contributors
* Copyright (C) 2018-2019 Kevin Eckenhoff
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
#ifndef OV_MSCKF_VIOMANAGEROPTIONS_H
#define OV_MSCKF_VIOMANAGEROPTIONS_H
#include <Eigen/Eigen>
#include <iostream>
#include <sstream>
#include <string>
#include <vector>
#include "state/Propagator.h"
#include "state/StateOptions.h"
#include "update/UpdaterOptions.h"
#include "init/InertialInitializerOptions.h"
#include "cam/CamEqui.h"
#include "cam/CamRadtan.h"
#include "feat/FeatureInitializerOptions.h"
#include "track/TrackBase.h"
#include "utils/colors.h"
#include "utils/opencv_yaml_parse.h"
#include "utils/print.h"
#include "utils/quat_ops.h"
namespace ov_msckf {
/**
* @brief Struct which stores all options needed for state estimation.
*
* This is broken into a few different parts: estimator, trackers, and simulation.
* If you are going to add a parameter here you will need to add it to the parsers.
* You will also need to add it to the print statement at the bottom of each.
*/
struct VioManagerOptions {
/**
* @brief This function will load the non-simulation parameters of the system and print.
* @param parser If not null, this parser will be used to load our parameters
*/
void print_and_load(const std::shared_ptr<ov_core::YamlParser> &parser = nullptr) {
print_and_load_estimator(parser);
print_and_load_noise(parser);
print_and_load_state(parser);
print_and_load_trackers(parser);
}
// ESTIMATOR ===============================
/// Core state options (e.g. number of cameras, use fej, stereo, what calibration to enable etc)
StateOptions state_options;
/// Our state initialization options (e.g. window size, num features, if we should get the calibration)
ov_init::InertialInitializerOptions init_options;
/// Delay, in seconds, that we should wait from init before we start estimating SLAM features
double dt_slam_delay = 2.0;
/// If we should try to use zero velocity update
bool try_zupt = false;
/// Max velocity we will consider to try to do a zupt (i.e. if above this, don't do zupt)
double zupt_max_velocity = 1.0;
/// Multiplier of our zupt measurement IMU noise matrix (default should be 1.0)
double zupt_noise_multiplier = 1.0;
/// Max disparity we will consider to try to do a zupt (i.e. if above this, don't do zupt)
double zupt_max_disparity = 1.0;
/// If we should only use the zupt at the very beginning static initialization phase
bool zupt_only_at_beginning = false;
/// If we should record the timing performance to file
bool record_timing_information = false;
/// The path to the file we will record the timing information into
std::string record_timing_filepath = "ov_msckf_timing.txt";
/**
* @brief This function will load print out all estimator settings loaded.
* This allows for visual checking that everything was loaded properly from ROS/CMD parsers.
*
* @param parser If not null, this parser will be used to load our parameters
*/
void print_and_load_estimator(const std::shared_ptr<ov_core::YamlParser> &parser = nullptr) {
PRINT_DEBUG("ESTIMATOR PARAMETERS:\n");
state_options.print(parser);
init_options.print_and_load(parser);
if (parser != nullptr) {
parser->parse_config("dt_slam_delay", dt_slam_delay);
parser->parse_config("try_zupt", try_zupt);
parser->parse_config("zupt_max_velocity", zupt_max_velocity);
parser->parse_config("zupt_noise_multiplier", zupt_noise_multiplier);
parser->parse_config("zupt_max_disparity", zupt_max_disparity);
parser->parse_config("zupt_only_at_beginning", zupt_only_at_beginning);
parser->parse_config("record_timing_information", record_timing_information);
parser->parse_config("record_timing_filepath", record_timing_filepath);
}
PRINT_DEBUG(" - dt_slam_delay: %.1f\n", dt_slam_delay);
PRINT_DEBUG(" - zero_velocity_update: %d\n", try_zupt);
PRINT_DEBUG(" - zupt_max_velocity: %.2f\n", zupt_max_velocity);
PRINT_DEBUG(" - zupt_noise_multiplier: %.2f\n", zupt_noise_multiplier);
PRINT_DEBUG(" - zupt_max_disparity: %.4f\n", zupt_max_disparity);
PRINT_DEBUG(" - zupt_only_at_beginning?: %d\n", zupt_only_at_beginning);
PRINT_DEBUG(" - record timing?: %d\n", (int)record_timing_information);
PRINT_DEBUG(" - record timing filepath: %s\n", record_timing_filepath.c_str());
}
// NOISE / CHI2 ============================
/// IMU noise (gyroscope and accelerometer)
Propagator::NoiseManager imu_noises;
/// Update options for MSCKF features (pixel noise and chi2 multiplier)
UpdaterOptions msckf_options;
/// Update options for SLAM features (pixel noise and chi2 multiplier)
UpdaterOptions slam_options;
/// Update options for ARUCO features (pixel noise and chi2 multiplier)
UpdaterOptions aruco_options;
/// Update options for zero velocity (chi2 multiplier)
UpdaterOptions zupt_options;
/**
* @brief This function will load print out all noise parameters loaded.
* This allows for visual checking that everything was loaded properly from ROS/CMD parsers.
*
* @param parser If not null, this parser will be used to load our parameters
*/
void print_and_load_noise(const std::shared_ptr<ov_core::YamlParser> &parser = nullptr) {
PRINT_DEBUG("NOISE PARAMETERS:\n");
if (parser != nullptr) {
parser->parse_external("relative_config_imu", "imu0", "gyroscope_noise_density", imu_noises.sigma_w);
parser->parse_external("relative_config_imu", "imu0", "gyroscope_random_walk", imu_noises.sigma_wb);
parser->parse_external("relative_config_imu", "imu0", "accelerometer_noise_density", imu_noises.sigma_a);
parser->parse_external("relative_config_imu", "imu0", "accelerometer_random_walk", imu_noises.sigma_ab);
imu_noises.sigma_w_2 = std::pow(imu_noises.sigma_w, 2);
imu_noises.sigma_wb_2 = std::pow(imu_noises.sigma_wb, 2);
imu_noises.sigma_a_2 = std::pow(imu_noises.sigma_a, 2);
imu_noises.sigma_ab_2 = std::pow(imu_noises.sigma_ab, 2);
}
imu_noises.print();
if (parser != nullptr) {
parser->parse_config("up_msckf_sigma_px", msckf_options.sigma_pix);
parser->parse_config("up_msckf_chi2_multipler", msckf_options.chi2_multipler);
parser->parse_config("up_slam_sigma_px", slam_options.sigma_pix);
parser->parse_config("up_slam_chi2_multipler", slam_options.chi2_multipler);
parser->parse_config("up_aruco_sigma_px", aruco_options.sigma_pix);
parser->parse_config("up_aruco_chi2_multipler", aruco_options.chi2_multipler);
msckf_options.sigma_pix_sq = std::pow(msckf_options.sigma_pix, 2);
slam_options.sigma_pix_sq = std::pow(slam_options.sigma_pix, 2);
aruco_options.sigma_pix_sq = std::pow(aruco_options.sigma_pix, 2);
parser->parse_config("zupt_chi2_multipler", zupt_options.chi2_multipler);
}
PRINT_DEBUG(" Updater MSCKF Feats:\n");
msckf_options.print();
PRINT_DEBUG(" Updater SLAM Feats:\n");
slam_options.print();
PRINT_DEBUG(" Updater ARUCO Tags:\n");
aruco_options.print();
PRINT_DEBUG(" Updater ZUPT:\n");
zupt_options.print();
}
// STATE DEFAULTS ==========================
/// Gravity magnitude in the global frame (i.e. should be 9.81 typically)
double gravity_mag = 9.81;
/// Time offset between camera and IMU.
double calib_camimu_dt = 0.0;
/// Map between camid and camera intrinsics (fx, fy, cx, cy, d1...d4, cam_w, cam_h)
std::unordered_map<size_t, std::shared_ptr<ov_core::CamBase>> camera_intrinsics;
/// Map between camid and camera extrinsics (q_ItoC, p_IinC).
std::map<size_t, Eigen::VectorXd> camera_extrinsics;
/// If we should try to load a mask and use it to reject invalid features
bool use_mask = false;
/// Mask images for each camera
std::map<size_t, cv::Mat> masks;
/**
* @brief This function will load and print all state parameters (e.g. sensor extrinsics)
* This allows for visual checking that everything was loaded properly from ROS/CMD parsers.
*
* @param parser If not null, this parser will be used to load our parameters
*/
void print_and_load_state(const std::shared_ptr<ov_core::YamlParser> &parser = nullptr) {
if (parser != nullptr) {
parser->parse_config("gravity_mag", gravity_mag);
parser->parse_config("max_cameras", state_options.num_cameras); // might be redundant
parser->parse_config("downsample_cameras", downsample_cameras); // might be redundant
for (int i = 0; i < state_options.num_cameras; i++) {
// Time offset (use the first one)
// TODO: support multiple time offsets between cameras
if (i == 0) {
parser->parse_external("relative_config_imucam", "cam" + std::to_string(i), "timeshift_cam_imu", calib_camimu_dt, false);
}
// Distortion model
std::string dist_model = "radtan";
parser->parse_external("relative_config_imucam", "cam" + std::to_string(i), "distortion_model", dist_model);
// Distortion parameters
std::vector<double> cam_calib1 = {1, 1, 0, 0};
std::vector<double> cam_calib2 = {0, 0, 0, 0};
parser->parse_external("relative_config_imucam", "cam" + std::to_string(i), "intrinsics", cam_calib1);
parser->parse_external("relative_config_imucam", "cam" + std::to_string(i), "distortion_coeffs", cam_calib2);
Eigen::VectorXd cam_calib = Eigen::VectorXd::Zero(8);
cam_calib << cam_calib1.at(0), cam_calib1.at(1), cam_calib1.at(2), cam_calib1.at(3), cam_calib2.at(0), cam_calib2.at(1),
cam_calib2.at(2), cam_calib2.at(3);
cam_calib(0) /= (downsample_cameras) ? 2.0 : 1.0;
cam_calib(1) /= (downsample_cameras) ? 2.0 : 1.0;
cam_calib(2) /= (downsample_cameras) ? 2.0 : 1.0;
cam_calib(3) /= (downsample_cameras) ? 2.0 : 1.0;
// FOV / resolution
std::vector<int> matrix_wh = {1, 1};
parser->parse_external("relative_config_imucam", "cam" + std::to_string(i), "resolution", matrix_wh);
matrix_wh.at(0) /= (downsample_cameras) ? 2.0 : 1.0;
matrix_wh.at(1) /= (downsample_cameras) ? 2.0 : 1.0;
std::pair<int, int> wh(matrix_wh.at(0), matrix_wh.at(1));
// Extrinsics
Eigen::Matrix4d T_CtoI = Eigen::Matrix4d::Identity();
parser->parse_external("relative_config_imucam", "cam" + std::to_string(i), "T_imu_cam", T_CtoI);
// Load these into our state
Eigen::Matrix<double, 7, 1> cam_eigen;
cam_eigen.block(0, 0, 4, 1) = ov_core::rot_2_quat(T_CtoI.block(0, 0, 3, 3).transpose());
cam_eigen.block(4, 0, 3, 1) = -T_CtoI.block(0, 0, 3, 3).transpose() * T_CtoI.block(0, 3, 3, 1);
// Create intrinsics model
if (dist_model == "equidistant") {
camera_intrinsics.insert({i, std::make_shared<ov_core::CamEqui>(matrix_wh.at(0), matrix_wh.at(1))});
camera_intrinsics.at(i)->set_value(cam_calib);
} else {
camera_intrinsics.insert({i, std::make_shared<ov_core::CamRadtan>(matrix_wh.at(0), matrix_wh.at(1))});
camera_intrinsics.at(i)->set_value(cam_calib);
}
camera_extrinsics.insert({i, cam_eigen});
}
parser->parse_config("use_mask", use_mask);
if (use_mask) {
for (int i = 0; i < state_options.num_cameras; i++) {
std::string mask_path;
std::string mask_node = "mask" + std::to_string(i);
parser->parse_config(mask_node, mask_path);
std::string total_mask_path = parser->get_config_folder() + mask_path;
if (!boost::filesystem::exists(total_mask_path)) {
PRINT_ERROR(RED "VioManager(): invalid mask path:\n" RESET);
PRINT_ERROR(RED "\t- mask%d - %s\n" RESET, i, total_mask_path.c_str());
std::exit(EXIT_FAILURE);
}
masks.insert({i, cv::imread(total_mask_path, cv::IMREAD_GRAYSCALE)});
}
}
}
PRINT_DEBUG("STATE PARAMETERS:\n");
PRINT_DEBUG(" - gravity_mag: %.4f\n", gravity_mag);
PRINT_DEBUG(" - gravity: %.3f, %.3f, %.3f\n", 0.0, 0.0, gravity_mag);
PRINT_DEBUG(" - camera masks?: %d\n", use_mask);
if (state_options.num_cameras != (int)camera_intrinsics.size() || state_options.num_cameras != (int)camera_extrinsics.size()) {
PRINT_ERROR(RED "[SIM]: camera calib size does not match max cameras...\n" RESET);
PRINT_ERROR(RED "[SIM]: got %d but expected %d max cameras (camera_intrinsics)\n" RESET, (int)camera_intrinsics.size(),
state_options.num_cameras);
PRINT_ERROR(RED "[SIM]: got %d but expected %d max cameras (camera_extrinsics)\n" RESET, (int)camera_extrinsics.size(),
state_options.num_cameras);
std::exit(EXIT_FAILURE);
}
PRINT_DEBUG(" - calib_camimu_dt: %.4f\n", calib_camimu_dt);
for (int n = 0; n < state_options.num_cameras; n++) {
std::stringstream ss;
ss << "cam_" << n << "_fisheye:" << (std::dynamic_pointer_cast<ov_core::CamEqui>(camera_intrinsics.at(n)) != nullptr) << std::endl;
ss << "cam_" << n << "_wh:" << std::endl << camera_intrinsics.at(n)->w() << " x " << camera_intrinsics.at(n)->h() << std::endl;
ss << "cam_" << n << "_intrinsic(0:3):" << std::endl
<< camera_intrinsics.at(n)->get_value().block(0, 0, 4, 1).transpose() << std::endl;
ss << "cam_" << n << "_intrinsic(4:7):" << std::endl
<< camera_intrinsics.at(n)->get_value().block(4, 0, 4, 1).transpose() << std::endl;
ss << "cam_" << n << "_extrinsic(0:3):" << std::endl << camera_extrinsics.at(n).block(0, 0, 4, 1).transpose() << std::endl;
ss << "cam_" << n << "_extrinsic(4:6):" << std::endl << camera_extrinsics.at(n).block(4, 0, 3, 1).transpose() << std::endl;
Eigen::Matrix4d T_CtoI = Eigen::Matrix4d::Identity();
T_CtoI.block(0, 0, 3, 3) = ov_core::quat_2_Rot(camera_extrinsics.at(n).block(0, 0, 4, 1)).transpose();
T_CtoI.block(0, 3, 3, 1) = -T_CtoI.block(0, 0, 3, 3) * camera_extrinsics.at(n).block(4, 0, 3, 1);
ss << "T_C" << n << "toI:" << std::endl << T_CtoI << std::endl << std::endl;
PRINT_DEBUG(ss.str().c_str());
}
}
// TRACKERS ===============================
/// If we should process two cameras are being stereo or binocular. If binocular, we do monocular feature tracking on each image.
bool use_stereo = true;
/// If we should use KLT tracking, or descriptor matcher
bool use_klt = true;
/// If should extract aruco tags and estimate them
bool use_aruco = true;
/// Will half the resolution of the aruco tag image (will be faster)
bool downsize_aruco = true;
/// Will half the resolution all tracking image (aruco will be 1/4 instead of halved if dowsize_aruoc also enabled)
bool downsample_cameras = false;
/// If our front-end should try to use some multi-threading for stereo matching
bool use_multi_threading = true;
/// The number of points we should extract and track in *each* image frame. This highly effects the computation required for tracking.
int num_pts = 150;
/// Fast extraction threshold
int fast_threshold = 20;
/// Number of grids we should split column-wise to do feature extraction in
int grid_x = 5;
/// Number of grids we should split row-wise to do feature extraction in
int grid_y = 5;
/// Will check after doing KLT track and remove any features closer than this
int min_px_dist = 10;
/// What type of pre-processing histogram method should be applied to images
ov_core::TrackBase::HistogramMethod histogram_method = ov_core::TrackBase::HistogramMethod::HISTOGRAM;
/// KNN ration between top two descriptor matcher which is required to be a good match
double knn_ratio = 0.85;
/// Frequency we want to track images at (higher freq ones will be dropped)
double track_frequency = 20.0;
/// Parameters used by our feature initialize / triangulator
ov_core::FeatureInitializerOptions featinit_options;
/**
* @brief This function will load print out all parameters related to visual tracking
* This allows for visual checking that everything was loaded properly from ROS/CMD parsers.
*
* @param parser If not null, this parser will be used to load our parameters
*/
void print_and_load_trackers(const std::shared_ptr<ov_core::YamlParser> &parser = nullptr) {
if (parser != nullptr) {
parser->parse_config("use_stereo", use_stereo);
parser->parse_config("use_klt", use_klt);
parser->parse_config("use_aruco", use_aruco);
parser->parse_config("downsize_aruco", downsize_aruco);
parser->parse_config("downsample_cameras", downsample_cameras);
parser->parse_config("multi_threading", use_multi_threading);
parser->parse_config("num_pts", num_pts);
parser->parse_config("fast_threshold", fast_threshold);
parser->parse_config("grid_x", grid_x);
parser->parse_config("grid_y", grid_y);
parser->parse_config("min_px_dist", min_px_dist);
std::string histogram_method_str = "HISTOGRAM";
parser->parse_config("histogram_method", histogram_method_str);
if (histogram_method_str == "NONE") {
histogram_method = ov_core::TrackBase::NONE;
} else if (histogram_method_str == "HISTOGRAM") {
histogram_method = ov_core::TrackBase::HISTOGRAM;
} else if (histogram_method_str == "CLAHE") {
histogram_method = ov_core::TrackBase::CLAHE;
} else {
printf(RED "VioManager(): invalid feature histogram specified:\n" RESET);
printf(RED "\t- NONE\n" RESET);
printf(RED "\t- HISTOGRAM\n" RESET);
printf(RED "\t- CLAHE\n" RESET);
std::exit(EXIT_FAILURE);
}
parser->parse_config("knn_ratio", knn_ratio);
parser->parse_config("track_frequency", track_frequency);
}
PRINT_DEBUG("FEATURE TRACKING PARAMETERS:\n");
PRINT_DEBUG(" - use_stereo: %d\n", use_stereo);
PRINT_DEBUG(" - use_klt: %d\n", use_klt);
PRINT_DEBUG(" - use_aruco: %d\n", use_aruco);
PRINT_DEBUG(" - downsize aruco: %d\n", downsize_aruco);
PRINT_DEBUG(" - downsize cameras: %d\n", downsample_cameras);
PRINT_DEBUG(" - use multi-threading: %d\n", use_multi_threading);
PRINT_DEBUG(" - num_pts: %d\n", num_pts);
PRINT_DEBUG(" - fast threshold: %d\n", fast_threshold);
PRINT_DEBUG(" - grid X by Y: %d by %d\n", grid_x, grid_y);
PRINT_DEBUG(" - min px dist: %d\n", min_px_dist);
PRINT_DEBUG(" - hist method: %d\n", (int)histogram_method);
PRINT_DEBUG(" - knn ratio: %.3f\n", knn_ratio);
PRINT_DEBUG(" - track frequency: %.1f\n", track_frequency);
featinit_options.print(parser);
}
// SIMULATOR ===============================
/// Seed for initial states (i.e. random feature 3d positions in the generated map)
int sim_seed_state_init = 0;
/// Seed for calibration perturbations. Change this to perturb by different random values if perturbations are enabled.
int sim_seed_preturb = 0;
/// Measurement noise seed. This should be incremented for each run in the Monte-Carlo simulation to generate the same true measurements,
/// but diffferent noise values.
int sim_seed_measurements = 0;
/// If we should perturb the calibration that the estimator starts with
bool sim_do_perturbation = false;
/// Path to the trajectory we will b-spline and simulate on. Should be time(s),pos(xyz),ori(xyzw) format.
std::string sim_traj_path = "src/open_vins/ov_data/sim/udel_gore.txt";
/// We will start simulating after we have moved this much along the b-spline. This prevents static starts as we init from groundtruth in
/// simulation.
double sim_distance_threshold = 1.2;
/// Frequency (Hz) that we will simulate our cameras
double sim_freq_cam = 10.0;
/// Frequency (Hz) that we will simulate our inertial measurement unit
double sim_freq_imu = 400.0;
/// Feature distance we generate features from (minimum)
double sim_min_feature_gen_distance = 5;
/// Feature distance we generate features from (maximum)
double sim_max_feature_gen_distance = 10;
/**
* @brief This function will load print out all simulated parameters.
* This allows for visual checking that everything was loaded properly from ROS/CMD parsers.
*
* @param parser If not null, this parser will be used to load our parameters
*/
void print_and_load_simulation(const std::shared_ptr<ov_core::YamlParser> &parser = nullptr) {
if (parser != nullptr) {
parser->parse_config("sim_seed_state_init", sim_seed_state_init);
parser->parse_config("sim_seed_preturb", sim_seed_preturb);
parser->parse_config("sim_seed_measurements", sim_seed_measurements);
parser->parse_config("sim_do_perturbation", sim_do_perturbation);
parser->parse_config("sim_traj_path", sim_traj_path);
parser->parse_config("sim_distance_threshold", sim_distance_threshold);
parser->parse_config("sim_freq_cam", sim_freq_cam);
parser->parse_config("sim_freq_imu", sim_freq_imu);
parser->parse_config("sim_min_feature_gen_dist", sim_min_feature_gen_distance);
parser->parse_config("sim_max_feature_gen_dist", sim_max_feature_gen_distance);
}
PRINT_DEBUG("SIMULATION PARAMETERS:\n");
PRINT_WARNING(BOLDRED " - state init seed: %d \n" RESET, sim_seed_state_init);
PRINT_WARNING(BOLDRED " - perturb seed: %d \n" RESET, sim_seed_preturb);
PRINT_WARNING(BOLDRED " - measurement seed: %d \n" RESET, sim_seed_measurements);
PRINT_WARNING(BOLDRED " - do perturb?: %d\n" RESET, sim_do_perturbation);
PRINT_DEBUG(" - traj path: %s\n", sim_traj_path.c_str());
PRINT_DEBUG(" - dist thresh: %.2f\n", sim_distance_threshold);
PRINT_DEBUG(" - cam feq: %.2f\n", sim_freq_cam);
PRINT_DEBUG(" - imu feq: %.2f\n", sim_freq_imu);
PRINT_DEBUG(" - min feat dist: %.2f\n", sim_min_feature_gen_distance);
PRINT_DEBUG(" - max feat dist: %.2f\n", sim_max_feature_gen_distance);
}
};
} // namespace ov_msckf
#endif // OV_MSCKF_VIOMANAGEROPTIONS_H

52
ov_msckf/src/dummy.cpp Normal file
View File

@@ -0,0 +1,52 @@
/*
* OpenVINS: An Open Platform for Visual-Inertial Research
* Copyright (C) 2018-2022 Patrick Geneva
* Copyright (C) 2018-2022 Guoquan Huang
* Copyright (C) 2018-2022 OpenVINS Contributors
* Copyright (C) 2018-2019 Kevin Eckenhoff
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
/**
* @namespace ov_msckf
* @brief Extended Kalman Filter estimator
*
* This is an implementation of a [Multi-State Constraint Kalman Filter (MSCKF)](https://ieeexplore.ieee.org/document/4209642) @cite
* Mourikis2007ICRA which leverages inertial and visual feature information. We want to stress that this is **not** a "vanilla"
* implementation of the filter and instead has many more features and improvements over the original. In additional we have a modular type
* system which allows us to initialize and marginalizing variables out of state with ease. Please see the following documentation pages for
* derivation details:
*
* - @ref propagation --- Inertial propagation derivations and details.
* - @ref update --- General state update for the different measurements.
* - @ref fej --- Background on First-Estimate Jacobians and how we use them.
* - @ref dev-index --- High level details on how the type system and covariance management works.
*
* The key features of the system are the following:
*
* - Sliding stochastic imu clones
* - First estimate Jacobians
* - Camera intrinsics and extrinsic online calibration
* - Time offset between camera and imu calibration
* - Standard MSCKF features with nullspace projection
* - 3d SLAM feature support (with six different representations)
* - Generic simulation of trajectories through and environment (see @ref ov_msckf::Simulator)
*
* We suggest those that are interested to first checkout the State and Propagator which should provide a nice introduction to the code.
* Both the slam and msckf features leverage the same Jacobian code, and thus we also recommend looking at the UpdaterHelper class for
* details on that.
*
*/
namespace ov_msckf {}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,231 @@
/*
* OpenVINS: An Open Platform for Visual-Inertial Research
* Copyright (C) 2018-2022 Patrick Geneva
* Copyright (C) 2018-2022 Guoquan Huang
* Copyright (C) 2018-2022 OpenVINS Contributors
* Copyright (C) 2018-2019 Kevin Eckenhoff
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
#ifndef OV_MSCKF_ROS1VISUALIZER_H
#define OV_MSCKF_ROS1VISUALIZER_H
#include <fstream>
#include <sstream>
#include <string>
#include <geometry_msgs/PoseStamped.h>
#include <geometry_msgs/PoseWithCovarianceStamped.h>
#include <image_transport/image_transport.h>
#include <message_filters/subscriber.h>
#include <message_filters/sync_policies/approximate_time.h>
#include <message_filters/time_synchronizer.h>
#include <nav_msgs/Odometry.h>
#include <nav_msgs/Path.h>
#include <ros/ros.h>
#include <sensor_msgs/CameraInfo.h>
#include <sensor_msgs/Image.h>
#include <sensor_msgs/Imu.h>
#include <sensor_msgs/NavSatFix.h>
#include <sensor_msgs/PointCloud.h>
#include <sensor_msgs/PointCloud2.h>
#include <sensor_msgs/point_cloud2_iterator.h>
#include <std_msgs/Float64.h>
#include <tf/transform_broadcaster.h>
#include <visualization_msgs/Marker.h>
#include <boost/filesystem.hpp>
#include <cv_bridge/cv_bridge.h>
#include "core/VioManager.h"
#include "ros/RosVisualizerHelper.h"
#include "sim/Simulator.h"
#include "utils/dataset_reader.h"
#include "utils/print.h"
#include "utils/sensor_data.h"
#include "sophus/se3.hpp"
namespace Eigen {
template<typename _Tp>
using aligned_vector = std::vector<_Tp, Eigen::aligned_allocator<_Tp>>;
}
namespace ov_msckf {
/**
* @brief Helper class that will publish results onto the ROS framework.
*
* Also save to file the current total state and covariance along with the groundtruth if we are simulating.
* We visualize the following things:
* - State of the system on TF, pose message, and path
* - Image of our tracker
* - Our different features (SLAM, MSCKF, ARUCO)
* - Groundtruth trajectory if we have it
*/
class ROS1Visualizer {
public:
/**
* @brief Default constructor
* @param nh ROS node handler
* @param app Core estimator manager
* @param sim Simulator if we are simulating
*/
ROS1Visualizer(std::shared_ptr<ros::NodeHandle> nh, std::shared_ptr<VioManager> app, std::shared_ptr<Simulator> sim = nullptr);
/**
* @brief Will setup ROS subscribers and callbacks
* @param parser Configuration file parser
*/
void setup_subscribers(std::shared_ptr<ov_core::YamlParser> parser);
/**
* @brief Will visualize the system if we have new things
*/
void visualize();
/**
* @brief Will publish our odometry message for the current timestep.
* This will take the current state estimate and get the propagated pose to the desired time.
* This can be used to get pose estimates on systems which require high frequency pose estimates.
*/
void visualize_odometry(double timestamp);
/**
* @brief After the run has ended, print results
*/
void visualize_final();
/// Callback for inertial information
void callback_inertial(const sensor_msgs::Imu::ConstPtr &msg);
/// Callback for monocular cameras information
void callback_monocular(const sensor_msgs::ImageConstPtr &msg0, int cam_id0);
/// Callback for synchronized stereo camera information
void callback_stereo(const sensor_msgs::ImageConstPtr &msg0, const sensor_msgs::ImageConstPtr &msg1, int cam_id0, int cam_id1);
protected:
/// Publish the current state
void publish_state();
/// Publish the active tracking image
void publish_images();
/// Publish current features
void publish_features();
/// Publish groundtruth (if we have it)
void publish_groundtruth();
/// Publish loop-closure information of current pose and active track information
void publish_loopclosure_information();
double alignSVD(const std::vector<int64_t>& filter_t_ns,
const Eigen::aligned_vector<Eigen::Vector3d>& filter_t_w_i,
const std::vector<int64_t>& gt_t_ns,
Eigen::aligned_vector<Eigen::Vector3d>& gt_t_w_i);
/// Global node handler
std::shared_ptr<ros::NodeHandle> _nh;
/// Core application of the filter system
std::shared_ptr<VioManager> _app;
/// Simulator (is nullptr if we are not sim'ing)
std::shared_ptr<Simulator> _sim;
// Our publishers
image_transport::Publisher it_pub_tracks, it_pub_loop_img_depth, it_pub_loop_img_depth_color;
ros::Publisher pub_poseimu, pub_odomimu, pub_pathimu;
ros::Publisher pub_points_msckf, pub_points_slam, pub_points_aruco, pub_points_sim;
ros::Publisher pub_loop_pose, pub_loop_point, pub_loop_extrinsic, pub_loop_intrinsics;
std::shared_ptr<tf::TransformBroadcaster> mTfBr;
// Added by PI on 27.07.2022
// image_transport::CameraPublisher pub_camera_info, pub_camera_info_trackhist;
image_transport::CameraPublisher camPub;
visualization_msgs::Marker Marker;
std::vector<std::vector<double>> cubeVisPoints;
image_transport::CameraSubscriber camSub;
sensor_msgs::CameraInfoPtr camInfoPtrGlob;
ros::Publisher pub_camera_info;
ros::Publisher vis_pub;
sensor_msgs::CameraInfo camInfoGlob;
sensor_msgs::ImagePtr exl_msg_global;
ros::Subscriber sub_camera_info;
ros::Publisher pub_poserec;
std::vector<Eigen::Matrix<double, 7, 1>> loadedTrajectory;
// End of adding by PI
// Our subscribers and camera synchronizers
ros::Subscriber sub_imu;
std::vector<ros::Subscriber> subs_cam;
typedef message_filters::sync_policies::ApproximateTime<sensor_msgs::Image, sensor_msgs::Image> sync_pol;
std::vector<std::shared_ptr<message_filters::Synchronizer<sync_pol>>> sync_cam;
std::vector<std::shared_ptr<message_filters::Subscriber<sensor_msgs::Image>>> sync_subs_cam;
// For path viz
unsigned int poses_seq_imu = 0;
std::vector<geometry_msgs::PoseStamped> poses_imu;
// Groundtruth infomation
ros::Publisher pub_pathgt, pub_posegt;
double summed_mse_ori = 0.0;
double summed_mse_pos = 0.0;
double summed_nees_ori = 0.0;
double summed_nees_pos = 0.0;
size_t summed_number = 0;
// Start and end timestamps
bool start_time_set = false;
boost::posix_time::ptime rT1, rT2;
// Thread atomics
std::atomic<bool> thread_update_running;
/// Queue up camera measurements sorted by time and trigger once we have
/// exactly one IMU measurement with timestamp newer than the camera measurement
/// This also handles out-of-order camera measurements, which is rare, but
/// a nice feature to have for general robustness to bad camera drivers.
std::deque<ov_core::CameraData> camera_queue;
std::mutex camera_queue_mtx;
// Last camera message timestamps we have received (mapped by cam id)
std::map<int, double> camera_last_timestamp;
// Last timestamp we visualized at
double last_visualization_timestamp = 0;
// Our groundtruth states
std::map<double, Eigen::Matrix<double, 17, 1>> gt_states;
// For path viz
unsigned int poses_seq_gt = 0;
std::vector<geometry_msgs::PoseStamped> poses_gt;
bool publish_global2imu_tf = true;
bool publish_calibration_tf = true;
// Files and if we should save total state
bool save_total_state;
std::ofstream of_state_est, of_state_std, of_state_gt;
};
} // namespace ov_msckf
#endif // OV_MSCKF_ROS1VISUALIZER_H

View File

@@ -0,0 +1,924 @@
/*
* OpenVINS: An Open Platform for Visual-Inertial Research
* Copyright (C) 2018-2022 Patrick Geneva
* Copyright (C) 2018-2022 Guoquan Huang
* Copyright (C) 2018-2022 OpenVINS Contributors
* Copyright (C) 2018-2019 Kevin Eckenhoff
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
#include "ROS1Visualizer.h"
using namespace ov_core;
using namespace ov_type;
using namespace ov_msckf;
ROS1Visualizer::ROS1Visualizer(std::shared_ptr<ros::NodeHandle> nh, std::shared_ptr<VioManager> app, std::shared_ptr<Simulator> sim)
: _nh(nh), _app(app), _sim(sim), thread_update_running(false) {
// Setup our transform broadcaster
mTfBr = std::make_shared<tf::TransformBroadcaster>();
// Create image transport
image_transport::ImageTransport it(*_nh);
// Setup pose and path publisher
pub_poseimu = nh->advertise<geometry_msgs::PoseWithCovarianceStamped>("/ov_msckf/poseimu", 2);
PRINT_DEBUG("Publishing: %s\n", pub_poseimu.getTopic().c_str());
pub_odomimu = nh->advertise<nav_msgs::Odometry>("/ov_msckf/odomimu", 2);
PRINT_DEBUG("Publishing: %s\n", pub_odomimu.getTopic().c_str());
pub_pathimu = nh->advertise<nav_msgs::Path>("/ov_msckf/pathimu", 2);
PRINT_DEBUG("Publishing: %s\n", pub_pathimu.getTopic().c_str());
// 3D points publishing
pub_points_msckf = nh->advertise<sensor_msgs::PointCloud2>("/ov_msckf/points_msckf", 2);
PRINT_DEBUG("Publishing: %s\n", pub_points_msckf.getTopic().c_str());
pub_points_slam = nh->advertise<sensor_msgs::PointCloud2>("/ov_msckf/points_slam", 2);
PRINT_DEBUG("Publishing: %s\n", pub_points_msckf.getTopic().c_str());
pub_points_aruco = nh->advertise<sensor_msgs::PointCloud2>("/ov_msckf/points_aruco", 2);
PRINT_DEBUG("Publishing: %s\n", pub_points_aruco.getTopic().c_str());
pub_points_sim = nh->advertise<sensor_msgs::PointCloud2>("/ov_msckf/points_sim", 2);
PRINT_DEBUG("Publishing: %s\n", pub_points_sim.getTopic().c_str());
// Our tracking image
it_pub_tracks = it.advertise("/ov_msckf/trackhist", 2);
PRINT_DEBUG("Publishing: %s\n", it_pub_tracks.getTopic().c_str());
// Groundtruth publishers
pub_posegt = nh->advertise<geometry_msgs::PoseStamped>("/ov_msckf/posegt", 2);
PRINT_DEBUG("Publishing: %s\n", pub_posegt.getTopic().c_str());
pub_pathgt = nh->advertise<nav_msgs::Path>("/ov_msckf/pathgt", 2);
PRINT_DEBUG("Publishing: %s\n", pub_pathgt.getTopic().c_str());
// Loop closure publishers
pub_loop_pose = nh->advertise<nav_msgs::Odometry>("/ov_msckf/loop_pose", 2);
pub_loop_point = nh->advertise<sensor_msgs::PointCloud>("/ov_msckf/loop_feats", 2);
pub_loop_extrinsic = nh->advertise<nav_msgs::Odometry>("/ov_msckf/loop_extrinsic", 2);
pub_loop_intrinsics = nh->advertise<sensor_msgs::CameraInfo>("/ov_msckf/loop_intrinsics", 2);
it_pub_loop_img_depth = it.advertise("/ov_msckf/loop_depth", 2);
it_pub_loop_img_depth_color = it.advertise("/ov_msckf/loop_depth_colored", 2);
// Added by PI on 27.07.2022
// pub_camera_info = nh->advertise<sensor_msgs::CameraInfo>("/ov_mskcf/camera_info", 2);
// pub_camera_info_trackhist = nh->advertise<sensor_msgs::CameraInfo>("/ov_msckf/trackhist", 2);
// camPub = nh->advertise<image_transport::CameraPublisher>("ov_msckf/camera_info", 2);
camPub = it.advertiseCamera("/ov_msckf/camera_info", 2);
// option `to enable publishing of global to IMU transformation
nh->param<bool>("publish_global_to_imu_tf", publish_global2imu_tf, true);
nh->param<bool>("publish_calibration_tf", publish_calibration_tf, true);
// Load groundtruth if we have it and are not doing simulation
if (nh->hasParam("path_gt") && _sim == nullptr) {
std::string path_to_gt;
nh->param<std::string>("path_gt", path_to_gt, "");
if (!path_to_gt.empty()) {
DatasetReader::load_gt_file(path_to_gt, gt_states);
PRINT_DEBUG("gt file path is: %s\n", path_to_gt.c_str());
}
}
// Load if we should save the total state to file
nh->param<bool>("save_total_state", save_total_state, false);
// If the file is not open, then open the file
if (save_total_state) {
// files we will open
std::string filepath_est, filepath_std, filepath_gt;
nh->param<std::string>("filepath_est", filepath_est, "state_estimate.txt");
nh->param<std::string>("filepath_std", filepath_std, "state_deviation.txt");
nh->param<std::string>("filepath_gt", filepath_gt, "state_groundtruth.txt");
// If it exists, then delete it
if (boost::filesystem::exists(filepath_est))
boost::filesystem::remove(filepath_est);
if (boost::filesystem::exists(filepath_std))
boost::filesystem::remove(filepath_std);
// Create folder path to this location if not exists
boost::filesystem::create_directories(boost::filesystem::path(filepath_est.c_str()).parent_path());
boost::filesystem::create_directories(boost::filesystem::path(filepath_std.c_str()).parent_path());
boost::filesystem::create_directories(boost::filesystem::path(filepath_gt.c_str()).parent_path());
// Open the files
of_state_est.open(filepath_est.c_str());
of_state_std.open(filepath_std.c_str());
of_state_est << "# timestamp(s) q p v bg ba cam_imu_dt num_cam cam0_k cam0_d cam0_rot cam0_trans .... etc" << std::endl;
of_state_std << "# timestamp(s) q p v bg ba cam_imu_dt num_cam cam0_k cam0_d cam0_rot cam0_trans .... etc" << std::endl;
// Groundtruth if we are simulating
if (_sim != nullptr) {
if (boost::filesystem::exists(filepath_gt))
boost::filesystem::remove(filepath_gt);
of_state_gt.open(filepath_gt.c_str());
of_state_gt << "# timestamp(s) q p v bg ba cam_imu_dt num_cam cam0_k cam0_d cam0_rot cam0_trans .... etc" << std::endl;
}
}
}
void ROS1Visualizer::setup_subscribers(std::shared_ptr<ov_core::YamlParser> parser) {
// We need a valid parser
assert(parser != nullptr);
// Create imu subscriber (handle legacy ros param info)
std::string topic_imu;
_nh->param<std::string>("topic_imu", topic_imu, "/imu0");
parser->parse_external("relative_config_imu", "imu0", "rostopic", topic_imu);
sub_imu = _nh->subscribe(topic_imu, 1000, &ROS1Visualizer::callback_inertial, this);
// Logic for sync stereo subscriber
// https://answers.ros.org/question/96346/subscribe-to-two-image_raws-with-one-function/?answer=96491#post-id-96491
if (_app->get_params().state_options.num_cameras == 2) {
// Read in the topics
std::string cam_topic0, cam_topic1;
_nh->param<std::string>("topic_camera" + std::to_string(0), cam_topic0, "/cam" + std::to_string(0) + "/image_raw");
_nh->param<std::string>("topic_camera" + std::to_string(1), cam_topic1, "/cam" + std::to_string(1) + "/image_raw");
parser->parse_external("relative_config_imucam", "cam" + std::to_string(0), "rostopic", cam_topic0);
parser->parse_external("relative_config_imucam", "cam" + std::to_string(1), "rostopic", cam_topic1);
// Create sync filter (they have unique pointers internally, so we have to use move logic here...)
auto image_sub0 = std::make_shared<message_filters::Subscriber<sensor_msgs::Image>>(*_nh, cam_topic0, 1);
auto image_sub1 = std::make_shared<message_filters::Subscriber<sensor_msgs::Image>>(*_nh, cam_topic1, 1);
auto sync = std::make_shared<message_filters::Synchronizer<sync_pol>>(sync_pol(10), *image_sub0, *image_sub1);
sync->registerCallback(boost::bind(&ROS1Visualizer::callback_stereo, this, _1, _2, 0, 1));
// Append to our vector of subscribers
sync_cam.push_back(sync);
sync_subs_cam.push_back(image_sub0);
sync_subs_cam.push_back(image_sub1);
PRINT_DEBUG("subscribing to cam (stereo): %s\n", cam_topic0.c_str());
PRINT_DEBUG("subscribing to cam (stereo): %s\n", cam_topic1.c_str());
} else {
// Now we should add any non-stereo callbacks here
for (int i = 0; i < _app->get_params().state_options.num_cameras; i++) {
// read in the topic
std::string cam_topic;
_nh->param<std::string>("topic_camera" + std::to_string(i), cam_topic, "/cam" + std::to_string(i) + "/image_raw");
parser->parse_external("relative_config_imucam", "cam" + std::to_string(i), "rostopic", cam_topic);
// create subscriber
subs_cam.push_back(_nh->subscribe<sensor_msgs::Image>(cam_topic, 10, boost::bind(&ROS1Visualizer::callback_monocular, this, _1, i)));
PRINT_DEBUG("subscribing to cam (mono): %s\n", cam_topic.c_str());
}
}
}
void ROS1Visualizer::visualize() {
// Return if we have already visualized
if (last_visualization_timestamp == _app->get_state()->_timestamp && _app->initialized())
return;
last_visualization_timestamp = _app->get_state()->_timestamp;
// Start timing
boost::posix_time::ptime rT0_1, rT0_2;
rT0_1 = boost::posix_time::microsec_clock::local_time();
// publish current image
publish_images();
// Return if we have not inited
if (!_app->initialized())
return;
// Save the start time of this dataset
if (!start_time_set) {
rT1 = boost::posix_time::microsec_clock::local_time();
start_time_set = true;
}
// publish state
publish_state();
// publish points
publish_features();
// Publish gt if we have it
publish_groundtruth();
// Publish keyframe information
publish_loopclosure_information();
// Save total state
if (save_total_state) {
RosVisualizerHelper::sim_save_total_state_to_file(_app->get_state(), _sim, of_state_est, of_state_std, of_state_gt);
}
// Print how much time it took to publish / displaying things
rT0_2 = boost::posix_time::microsec_clock::local_time();
double time_total = (rT0_2 - rT0_1).total_microseconds() * 1e-6;
PRINT_DEBUG(BLUE "[TIME]: %.4f seconds for visualization\n" RESET, time_total);
}
void ROS1Visualizer::visualize_odometry(double timestamp) {
// Return if we have not inited
if (!_app->initialized())
return;
// Get fast propagate state at the desired timestamp
std::shared_ptr<State> state = _app->get_state();
Eigen::Matrix<double, 13, 1> state_plus = Eigen::Matrix<double, 13, 1>::Zero();
Eigen::Matrix<double, 12, 12> cov_plus = Eigen::Matrix<double, 12, 12>::Zero();
if (!_app->get_propagator()->fast_state_propagate(state, timestamp, state_plus, cov_plus))
return;
// Publish our odometry message if requested
if (pub_odomimu.getNumSubscribers() != 0) {
nav_msgs::Odometry odomIinM;
odomIinM.header.stamp = ros::Time(timestamp);
odomIinM.header.frame_id = "global";
// The POSE component (orientation and position)
odomIinM.pose.pose.orientation.x = state_plus(0);
odomIinM.pose.pose.orientation.y = state_plus(1);
odomIinM.pose.pose.orientation.z = state_plus(2);
odomIinM.pose.pose.orientation.w = state_plus(3);
odomIinM.pose.pose.position.x = state_plus(4);
odomIinM.pose.pose.position.y = state_plus(5);
odomIinM.pose.pose.position.z = state_plus(6);
// The TWIST component (angular and linear velocities)
odomIinM.child_frame_id = "imu";
odomIinM.twist.twist.linear.x = state_plus(7); // vel in local frame
odomIinM.twist.twist.linear.y = state_plus(8); // vel in local frame
odomIinM.twist.twist.linear.z = state_plus(9); // vel in local frame
odomIinM.twist.twist.angular.x = state_plus(10); // we do not estimate this...
odomIinM.twist.twist.angular.y = state_plus(11); // we do not estimate this...
odomIinM.twist.twist.angular.z = state_plus(12); // we do not estimate this...
// Finally set the covariance in the message (in the order position then orientation as per ros convention)
Eigen::Matrix<double, 12, 12> Phi = Eigen::Matrix<double, 12, 12>::Zero();
Phi.block(0, 3, 3, 3).setIdentity();
Phi.block(3, 0, 3, 3).setIdentity();
Phi.block(6, 6, 6, 6).setIdentity();
cov_plus = Phi * cov_plus * Phi.transpose();
for (int r = 0; r < 6; r++) {
for (int c = 0; c < 6; c++) {
odomIinM.pose.covariance[6 * r + c] = cov_plus(r, c);
}
}
for (int r = 0; r < 6; r++) {
for (int c = 0; c < 6; c++) {
odomIinM.twist.covariance[6 * r + c] = cov_plus(r + 6, c + 6);
}
}
pub_odomimu.publish(odomIinM);
}
// Publish our transform on TF
// NOTE: since we use JPL we have an implicit conversion to Hamilton when we publish
// NOTE: a rotation from GtoI in JPL has the same xyzw as a ItoG Hamilton rotation
auto odom_pose = std::make_shared<ov_type::PoseJPL>();
odom_pose->set_value(state_plus.block(0, 0, 7, 1));
tf::StampedTransform trans = RosVisualizerHelper::get_stamped_transform_from_pose(odom_pose, false);
trans.frame_id_ = "global";
trans.child_frame_id_ = "imu";
if (publish_global2imu_tf) {
mTfBr->sendTransform(trans);
}
// Loop through each camera calibration and publish it
for (const auto &calib : state->_calib_IMUtoCAM) {
tf::StampedTransform trans_calib = RosVisualizerHelper::get_stamped_transform_from_pose(calib.second, true);
trans_calib.frame_id_ = "imu";
trans_calib.child_frame_id_ = "cam" + std::to_string(calib.first);
if (publish_calibration_tf) {
mTfBr->sendTransform(trans_calib);
}
}
}
void ROS1Visualizer::visualize_final() {
// Final time offset value
if (_app->get_state()->_options.do_calib_camera_timeoffset) {
PRINT_INFO(REDPURPLE "camera-imu timeoffset = %.5f\n\n" RESET, _app->get_state()->_calib_dt_CAMtoIMU->value()(0));
}
// Final camera intrinsics
if (_app->get_state()->_options.do_calib_camera_intrinsics) {
for (int i = 0; i < _app->get_state()->_options.num_cameras; i++) {
std::shared_ptr<Vec> calib = _app->get_state()->_cam_intrinsics.at(i);
PRINT_INFO(REDPURPLE "cam%d intrinsics:\n" RESET, (int)i);
PRINT_INFO(REDPURPLE "%.3f,%.3f,%.3f,%.3f\n" RESET, calib->value()(0), calib->value()(1), calib->value()(2), calib->value()(3));
PRINT_INFO(REDPURPLE "%.5f,%.5f,%.5f,%.5f\n\n" RESET, calib->value()(4), calib->value()(5), calib->value()(6), calib->value()(7));
}
}
// Final camera extrinsics
if (_app->get_state()->_options.do_calib_camera_pose) {
for (int i = 0; i < _app->get_state()->_options.num_cameras; i++) {
std::shared_ptr<PoseJPL> calib = _app->get_state()->_calib_IMUtoCAM.at(i);
Eigen::Matrix4d T_CtoI = Eigen::Matrix4d::Identity();
T_CtoI.block(0, 0, 3, 3) = quat_2_Rot(calib->quat()).transpose();
T_CtoI.block(0, 3, 3, 1) = -T_CtoI.block(0, 0, 3, 3) * calib->pos();
PRINT_INFO(REDPURPLE "T_C%dtoI:\n" RESET, i);
PRINT_INFO(REDPURPLE "%.3f,%.3f,%.3f,%.3f,\n" RESET, T_CtoI(0, 0), T_CtoI(0, 1), T_CtoI(0, 2), T_CtoI(0, 3));
PRINT_INFO(REDPURPLE "%.3f,%.3f,%.3f,%.3f,\n" RESET, T_CtoI(1, 0), T_CtoI(1, 1), T_CtoI(1, 2), T_CtoI(1, 3));
PRINT_INFO(REDPURPLE "%.3f,%.3f,%.3f,%.3f,\n" RESET, T_CtoI(2, 0), T_CtoI(2, 1), T_CtoI(2, 2), T_CtoI(2, 3));
PRINT_INFO(REDPURPLE "%.3f,%.3f,%.3f,%.3f\n\n" RESET, T_CtoI(3, 0), T_CtoI(3, 1), T_CtoI(3, 2), T_CtoI(3, 3));
}
}
// Publish RMSE if we have it
if (!gt_states.empty()) {
PRINT_INFO(REDPURPLE "RMSE: %.3f (deg) orientation\n" RESET, std::sqrt(summed_mse_ori / summed_number));
PRINT_INFO(REDPURPLE "RMSE: %.3f (m) position\n\n" RESET, std::sqrt(summed_mse_pos / summed_number));
}
// Publish RMSE and NEES if doing simulation
if (_sim != nullptr) {
PRINT_INFO(REDPURPLE "RMSE: %.3f (deg) orientation\n" RESET, std::sqrt(summed_mse_ori / summed_number));
PRINT_INFO(REDPURPLE "RMSE: %.3f (m) position\n\n" RESET, std::sqrt(summed_mse_pos / summed_number));
PRINT_INFO(REDPURPLE "NEES: %.3f (deg) orientation\n" RESET, summed_nees_ori / summed_number);
PRINT_INFO(REDPURPLE "NEES: %.3f (m) position\n\n" RESET, summed_nees_pos / summed_number);
}
// Print the total time
rT2 = boost::posix_time::microsec_clock::local_time();
PRINT_INFO(REDPURPLE "TIME: %.3f seconds\n\n" RESET, (rT2 - rT1).total_microseconds() * 1e-6);
}
void ROS1Visualizer::callback_inertial(const sensor_msgs::Imu::ConstPtr &msg) {
// convert into correct format
ov_core::ImuData message;
message.timestamp = msg->header.stamp.toSec();
message.wm << msg->angular_velocity.x, msg->angular_velocity.y, msg->angular_velocity.z;
message.am << msg->linear_acceleration.x, msg->linear_acceleration.y, msg->linear_acceleration.z;
// send it to our VIO system
_app->feed_measurement_imu(message);
visualize_odometry(message.timestamp);
// If the processing queue is currently active / running just return so we can keep getting measurements
// Otherwise create a second thread to do our update in an async manor
// The visualization of the state, images, and features will be synchronous with the update!
if (thread_update_running)
return;
thread_update_running = true;
std::thread thread([&] {
// Lock on the queue (prevents new images from appending)
std::lock_guard<std::mutex> lck(camera_queue_mtx);
// Count how many unique image streams
std::map<int, bool> unique_cam_ids;
for (const auto &cam_msg : camera_queue) {
unique_cam_ids[cam_msg.sensor_ids.at(0)] = true;
}
// If we do not have enough unique cameras then we need to wait
// We should wait till we have one of each camera to ensure we propagate in the correct order
auto params = _app->get_params();
size_t num_unique_cameras = (params.state_options.num_cameras == 2) ? 1 : params.state_options.num_cameras;
if (unique_cam_ids.size() == num_unique_cameras) {
// Loop through our queue and see if we are able to process any of our camera measurements
// We are able to process if we have at least one IMU measurement greater than the camera time
double timestamp_imu_inC = message.timestamp - _app->get_state()->_calib_dt_CAMtoIMU->value()(0);
while (!camera_queue.empty() && camera_queue.at(0).timestamp < timestamp_imu_inC) {
auto rT0_1 = boost::posix_time::microsec_clock::local_time();
_app->feed_measurement_camera(camera_queue.at(0));
visualize();
camera_queue.pop_front();
auto rT0_2 = boost::posix_time::microsec_clock::local_time();
double time_total = (rT0_2 - rT0_1).total_microseconds() * 1e-6;
PRINT_INFO(BLUE "[TIME]: %.4f seconds total (%.1f hz)\n" RESET, time_total, 1.0 / time_total);
}
}
thread_update_running = false;
});
// If we are single threaded, then run single threaded
// Otherwise detach this thread so it runs in the background!
if (!_app->get_params().use_multi_threading) {
thread.join();
} else {
thread.detach();
}
}
void ROS1Visualizer::callback_monocular(const sensor_msgs::ImageConstPtr &msg0, int cam_id0) {
// Check if we should drop this image
double timestamp = msg0->header.stamp.toSec();
double time_delta = 1.0 / _app->get_params().track_frequency;
if (camera_last_timestamp.find(cam_id0) != camera_last_timestamp.end() && timestamp < camera_last_timestamp.at(cam_id0) + time_delta) {
return;
}
camera_last_timestamp[cam_id0] = timestamp;
// Get the image
cv_bridge::CvImageConstPtr cv_ptr;
try {
cv_ptr = cv_bridge::toCvShare(msg0, sensor_msgs::image_encodings::MONO8);
} catch (cv_bridge::Exception &e) {
PRINT_ERROR("cv_bridge exception: %s", e.what());
return;
}
// Create the measurement
ov_core::CameraData message;
message.timestamp = cv_ptr->header.stamp.toSec();
message.sensor_ids.push_back(cam_id0);
message.images.push_back(cv_ptr->image.clone());
// Load the mask if we are using it, else it is empty
// TODO: in the future we should get this from external pixel segmentation
if (_app->get_params().use_mask) {
message.masks.push_back(_app->get_params().masks.at(cam_id0));
} else {
message.masks.push_back(cv::Mat::zeros(cv_ptr->image.rows, cv_ptr->image.cols, CV_8UC1));
}
// append it to our queue of images
std::lock_guard<std::mutex> lck(camera_queue_mtx);
camera_queue.push_back(message);
std::sort(camera_queue.begin(), camera_queue.end());
}
void ROS1Visualizer::callback_stereo(const sensor_msgs::ImageConstPtr &msg0, const sensor_msgs::ImageConstPtr &msg1, int cam_id0,
int cam_id1) {
// Check if we should drop this image
double timestamp = msg0->header.stamp.toSec();
double time_delta = 1.0 / _app->get_params().track_frequency;
if (camera_last_timestamp.find(cam_id0) != camera_last_timestamp.end() && timestamp < camera_last_timestamp.at(cam_id0) + time_delta) {
return;
}
camera_last_timestamp[cam_id0] = timestamp;
// Get the image
cv_bridge::CvImageConstPtr cv_ptr0;
try {
cv_ptr0 = cv_bridge::toCvShare(msg0, sensor_msgs::image_encodings::MONO8);
} catch (cv_bridge::Exception &e) {
PRINT_ERROR("cv_bridge exception: %s\n", e.what());
return;
}
// Get the image
cv_bridge::CvImageConstPtr cv_ptr1;
try {
cv_ptr1 = cv_bridge::toCvShare(msg1, sensor_msgs::image_encodings::MONO8);
} catch (cv_bridge::Exception &e) {
PRINT_ERROR("cv_bridge exception: %s\n", e.what());
return;
}
// Create the measurement
ov_core::CameraData message;
message.timestamp = cv_ptr0->header.stamp.toSec();
message.sensor_ids.push_back(cam_id0);
message.sensor_ids.push_back(cam_id1);
message.images.push_back(cv_ptr0->image.clone());
message.images.push_back(cv_ptr1->image.clone());
// Load the mask if we are using it, else it is empty
// TODO: in the future we should get this from external pixel segmentation
if (_app->get_params().use_mask) {
message.masks.push_back(_app->get_params().masks.at(cam_id0));
message.masks.push_back(_app->get_params().masks.at(cam_id1));
} else {
// message.masks.push_back(cv::Mat(cv_ptr0->image.rows, cv_ptr0->image.cols, CV_8UC1, cv::Scalar(255)));
message.masks.push_back(cv::Mat::zeros(cv_ptr0->image.rows, cv_ptr0->image.cols, CV_8UC1));
message.masks.push_back(cv::Mat::zeros(cv_ptr1->image.rows, cv_ptr1->image.cols, CV_8UC1));
}
// append it to our queue of images
std::lock_guard<std::mutex> lck(camera_queue_mtx);
camera_queue.push_back(message);
std::sort(camera_queue.begin(), camera_queue.end());
}
void ROS1Visualizer::publish_state() {
// Get the current state
std::shared_ptr<State> state = _app->get_state();
// We want to publish in the IMU clock frame
// The timestamp in the state will be the last camera time
double t_ItoC = state->_calib_dt_CAMtoIMU->value()(0);
double timestamp_inI = state->_timestamp + t_ItoC;
// Create pose of IMU (note we use the bag time)
geometry_msgs::PoseWithCovarianceStamped poseIinM;
poseIinM.header.stamp = ros::Time(timestamp_inI);
poseIinM.header.seq = poses_seq_imu;
poseIinM.header.frame_id = "global";
poseIinM.pose.pose.orientation.x = state->_imu->quat()(0);
poseIinM.pose.pose.orientation.y = state->_imu->quat()(1);
poseIinM.pose.pose.orientation.z = state->_imu->quat()(2);
poseIinM.pose.pose.orientation.w = state->_imu->quat()(3);
poseIinM.pose.pose.position.x = state->_imu->pos()(0);
poseIinM.pose.pose.position.y = state->_imu->pos()(1);
poseIinM.pose.pose.position.z = state->_imu->pos()(2);
// Finally set the covariance in the message (in the order position then orientation as per ros convention)
std::vector<std::shared_ptr<Type>> statevars;
statevars.push_back(state->_imu->pose()->p());
statevars.push_back(state->_imu->pose()->q());
Eigen::Matrix<double, 6, 6> covariance_posori = StateHelper::get_marginal_covariance(_app->get_state(), statevars);
for (int r = 0; r < 6; r++) {
for (int c = 0; c < 6; c++) {
poseIinM.pose.covariance[6 * r + c] = covariance_posori(r, c);
}
}
pub_poseimu.publish(poseIinM);
//=========================================================
//=========================================================
// Append to our pose vector
geometry_msgs::PoseStamped posetemp;
posetemp.header = poseIinM.header;
posetemp.pose = poseIinM.pose.pose;
poses_imu.push_back(posetemp);
// Create our path (imu)
// NOTE: We downsample the number of poses as needed to prevent rviz crashes
// NOTE: https://github.com/ros-visualization/rviz/issues/1107
nav_msgs::Path arrIMU;
arrIMU.header.stamp = ros::Time::now();
arrIMU.header.seq = poses_seq_imu;
arrIMU.header.frame_id = "global";
for (size_t i = 0; i < poses_imu.size(); i += std::floor((double)poses_imu.size() / 16384.0) + 1) {
arrIMU.poses.push_back(poses_imu.at(i));
}
pub_pathimu.publish(arrIMU);
// Move them forward in time
poses_seq_imu++;
}
void ROS1Visualizer::publish_images() {
// Check if we have subscribers
if (it_pub_tracks.getNumSubscribers() == 0)
return;
// Get our image of history tracks
cv::Mat img_history = _app->get_historical_viz_image();
// Create our message
std_msgs::Header header;
header.stamp = ros::Time::now();
header.frame_id = "cam0";
sensor_msgs::ImagePtr exl_msg = cv_bridge::CvImage(header, "bgr8", img_history).toImageMsg();
exl_msg_global = exl_msg;
// Publish
it_pub_tracks.publish(exl_msg);
}
void ROS1Visualizer::publish_features() {
// Check if we have subscribers
if (pub_points_msckf.getNumSubscribers() == 0 && pub_points_slam.getNumSubscribers() == 0 && pub_points_aruco.getNumSubscribers() == 0 &&
pub_points_sim.getNumSubscribers() == 0)
return;
// Get our good MSCKF features
std::vector<Eigen::Vector3d> feats_msckf = _app->get_good_features_MSCKF();
sensor_msgs::PointCloud2 cloud = RosVisualizerHelper::get_ros_pointcloud(feats_msckf);
pub_points_msckf.publish(cloud);
// Get our good SLAM features
std::vector<Eigen::Vector3d> feats_slam = _app->get_features_SLAM();
sensor_msgs::PointCloud2 cloud_SLAM = RosVisualizerHelper::get_ros_pointcloud(feats_slam);
pub_points_slam.publish(cloud_SLAM);
// Get our good ARUCO features
std::vector<Eigen::Vector3d> feats_aruco = _app->get_features_ARUCO();
sensor_msgs::PointCloud2 cloud_ARUCO = RosVisualizerHelper::get_ros_pointcloud(feats_aruco);
pub_points_aruco.publish(cloud_ARUCO);
// Skip the rest of we are not doing simulation
if (_sim == nullptr)
return;
// Get our good SIMULATION features
std::vector<Eigen::Vector3d> feats_sim = _sim->get_map_vec();
sensor_msgs::PointCloud2 cloud_SIM = RosVisualizerHelper::get_ros_pointcloud(feats_sim);
pub_points_sim.publish(cloud_SIM);
}
void ROS1Visualizer::publish_groundtruth() {
// Our groundtruth state
Eigen::Matrix<double, 17, 1> state_gt;
// We want to publish in the IMU clock frame
// The timestamp in the state will be the last camera time
double t_ItoC = _app->get_state()->_calib_dt_CAMtoIMU->value()(0);
double timestamp_inI = _app->get_state()->_timestamp + t_ItoC;
// Check that we have the timestamp in our GT file [time(sec),q_GtoI,p_IinG,v_IinG,b_gyro,b_accel]
if (_sim == nullptr && (gt_states.empty() || !DatasetReader::get_gt_state(timestamp_inI, state_gt, gt_states))) {
return;
}
// Get the simulated groundtruth
// NOTE: we get the true time in the IMU clock frame
if (_sim != nullptr) {
timestamp_inI = _app->get_state()->_timestamp + _sim->get_true_parameters().calib_camimu_dt;
if (!_sim->get_state(timestamp_inI, state_gt))
return;
}
// Get the GT and system state state
Eigen::Matrix<double, 16, 1> state_ekf = _app->get_state()->_imu->value();
// Create pose of IMU
geometry_msgs::PoseStamped poseIinM;
poseIinM.header.stamp = ros::Time(timestamp_inI);
poseIinM.header.seq = poses_seq_gt;
poseIinM.header.frame_id = "global";
poseIinM.pose.orientation.x = state_gt(1, 0);
poseIinM.pose.orientation.y = state_gt(2, 0);
poseIinM.pose.orientation.z = state_gt(3, 0);
poseIinM.pose.orientation.w = state_gt(4, 0);
poseIinM.pose.position.x = state_gt(5, 0);
poseIinM.pose.position.y = state_gt(6, 0);
poseIinM.pose.position.z = state_gt(7, 0);
pub_posegt.publish(poseIinM);
// Append to our pose vector
poses_gt.push_back(poseIinM);
// Create our path (imu)
// NOTE: We downsample the number of poses as needed to prevent rviz crashes
// NOTE: https://github.com/ros-visualization/rviz/issues/1107
nav_msgs::Path arrIMU;
arrIMU.header.stamp = ros::Time::now();
arrIMU.header.seq = poses_seq_gt;
arrIMU.header.frame_id = "global";
for (size_t i = 0; i < poses_gt.size(); i += std::floor((double)poses_gt.size() / 16384.0) + 1) {
arrIMU.poses.push_back(poses_gt.at(i));
}
pub_pathgt.publish(arrIMU);
// Move them forward in time
poses_seq_gt++;
// Publish our transform on TF
tf::StampedTransform trans;
trans.stamp_ = ros::Time::now();
trans.frame_id_ = "global";
trans.child_frame_id_ = "truth";
tf::Quaternion quat(state_gt(1, 0), state_gt(2, 0), state_gt(3, 0), state_gt(4, 0));
trans.setRotation(quat);
tf::Vector3 orig(state_gt(5, 0), state_gt(6, 0), state_gt(7, 0));
trans.setOrigin(orig);
if (publish_global2imu_tf) {
mTfBr->sendTransform(trans);
}
//==========================================================================
//==========================================================================
// Difference between positions
double dx = state_ekf(4, 0) - state_gt(5, 0);
double dy = state_ekf(5, 0) - state_gt(6, 0);
double dz = state_ekf(6, 0) - state_gt(7, 0);
double err_pos = std::sqrt(dx * dx + dy * dy + dz * dz);
// Quaternion error
Eigen::Matrix<double, 4, 1> quat_gt, quat_st, quat_diff;
quat_gt << state_gt(1, 0), state_gt(2, 0), state_gt(3, 0), state_gt(4, 0);
quat_st << state_ekf(0, 0), state_ekf(1, 0), state_ekf(2, 0), state_ekf(3, 0);
quat_diff = quat_multiply(quat_st, Inv(quat_gt));
double err_ori = (180 / M_PI) * 2 * quat_diff.block(0, 0, 3, 1).norm();
//==========================================================================
//==========================================================================
// Get covariance of pose
std::vector<std::shared_ptr<Type>> statevars;
statevars.push_back(_app->get_state()->_imu->q());
statevars.push_back(_app->get_state()->_imu->p());
Eigen::Matrix<double, 6, 6> covariance = StateHelper::get_marginal_covariance(_app->get_state(), statevars);
// Calculate NEES values
// NOTE: need to manually multiply things out to make static asserts work
// NOTE: https://github.com/rpng/open_vins/pull/226
// NOTE: https://github.com/rpng/open_vins/issues/236
// NOTE: https://gitlab.com/libeigen/eigen/-/issues/1664
Eigen::Vector3d quat_diff_vec = quat_diff.block(0, 0, 3, 1);
Eigen::Vector3d cov_vec = covariance.block(0, 0, 3, 3).inverse() * 2 * quat_diff.block(0, 0, 3, 1);
double ori_nees = 2 * quat_diff_vec.dot(cov_vec);
Eigen::Vector3d errpos = state_ekf.block(4, 0, 3, 1) - state_gt.block(5, 0, 3, 1);
double pos_nees = errpos.transpose() * covariance.block(3, 3, 3, 3).inverse() * errpos;
//==========================================================================
//==========================================================================
// Update our average variables
if (!std::isnan(ori_nees) && !std::isnan(pos_nees)) {
summed_mse_ori += err_ori * err_ori;
summed_mse_pos += err_pos * err_pos;
summed_nees_ori += ori_nees;
summed_nees_pos += pos_nees;
summed_number++;
}
// Nice display for the user
PRINT_INFO(REDPURPLE "error to gt => %.3f, %.3f (deg,m) | rmse => %.3f, %.3f (deg,m) | called %d times\n" RESET, err_ori, err_pos,
std::sqrt(summed_mse_ori / summed_number), std::sqrt(summed_mse_pos / summed_number), (int)summed_number);
PRINT_INFO(REDPURPLE "nees => %.1f, %.1f (ori,pos) | avg nees = %.1f, %.1f (ori,pos)\n" RESET, ori_nees, pos_nees,
summed_nees_ori / summed_number, summed_nees_pos / summed_number);
//==========================================================================
//==========================================================================
}
void ROS1Visualizer::publish_loopclosure_information() {
// Get the current tracks in this frame
double active_tracks_time1 = -1;
double active_tracks_time2 = -1;
std::unordered_map<size_t, Eigen::Vector3d> active_tracks_posinG;
std::unordered_map<size_t, Eigen::Vector3d> active_tracks_uvd;
cv::Mat active_cam0_image;
_app->get_active_tracks(active_tracks_time1, active_tracks_posinG, active_tracks_uvd);
_app->get_active_image(active_tracks_time2, active_cam0_image);
if (active_tracks_time1 == -1)
return;
if (_app->get_state()->_clones_IMU.find(active_tracks_time1) == _app->get_state()->_clones_IMU.end())
return;
Eigen::Vector4d quat = _app->get_state()->_clones_IMU.at(active_tracks_time1)->quat();
Eigen::Vector3d pos = _app->get_state()->_clones_IMU.at(active_tracks_time1)->pos();
if (active_tracks_time1 != active_tracks_time2)
return;
// Default header
std_msgs::Header header;
header.stamp = ros::Time(active_tracks_time1);
//======================================================
// Check if we have subscribers for the pose odometry, camera intrinsics, or extrinsics
if (pub_loop_pose.getNumSubscribers() != 0 || pub_loop_extrinsic.getNumSubscribers() != 0 ||
pub_loop_intrinsics.getNumSubscribers() != 0) {
// PUBLISH HISTORICAL POSE ESTIMATE
nav_msgs::Odometry odometry_pose;
odometry_pose.header = header;
odometry_pose.header.frame_id = "global";
odometry_pose.pose.pose.position.x = pos(0);
odometry_pose.pose.pose.position.y = pos(1);
odometry_pose.pose.pose.position.z = pos(2);
odometry_pose.pose.pose.orientation.x = quat(0);
odometry_pose.pose.pose.orientation.y = quat(1);
odometry_pose.pose.pose.orientation.z = quat(2);
odometry_pose.pose.pose.orientation.w = quat(3);
pub_loop_pose.publish(odometry_pose);
// PUBLISH IMU TO CAMERA0 EXTRINSIC
// need to flip the transform to the IMU frame
Eigen::Vector4d q_ItoC = _app->get_state()->_calib_IMUtoCAM.at(0)->quat();
Eigen::Vector3d p_CinI = -_app->get_state()->_calib_IMUtoCAM.at(0)->Rot().transpose() * _app->get_state()->_calib_IMUtoCAM.at(0)->pos();
nav_msgs::Odometry odometry_calib;
odometry_calib.header = header;
odometry_calib.header.frame_id = "imu";
odometry_calib.pose.pose.position.x = p_CinI(0);
odometry_calib.pose.pose.position.y = p_CinI(1);
odometry_calib.pose.pose.position.z = p_CinI(2);
odometry_calib.pose.pose.orientation.x = q_ItoC(0);
odometry_calib.pose.pose.orientation.y = q_ItoC(1);
odometry_calib.pose.pose.orientation.z = q_ItoC(2);
odometry_calib.pose.pose.orientation.w = q_ItoC(3);
pub_loop_extrinsic.publish(odometry_calib);
// PUBLISH CAMERA0 INTRINSICS
bool is_fisheye = (std::dynamic_pointer_cast<ov_core::CamEqui>(_app->get_params().camera_intrinsics.at(0)) != nullptr);
sensor_msgs::CameraInfo cameraparams;
cameraparams.header = header;
cameraparams.header.frame_id = "cam0";
cameraparams.distortion_model = is_fisheye ? "equidistant" : "plumb_bob";
Eigen::VectorXd cparams = _app->get_state()->_cam_intrinsics.at(0)->value();
cameraparams.D = {cparams(4), cparams(5), cparams(6), cparams(7)};
cameraparams.K = {cparams(0), 0, cparams(2), 0, cparams(1), cparams(3), 0, 0, 1};
pub_loop_intrinsics.publish(cameraparams);
// Added by PI
std::string yaw = "hello!";
// pub_camera_info.publish(cameraparams);
// pub_camera_info_trackhist.publish(cameraparams);
sensor_msgs::CameraInfoPtr cameraParamsPtr;
// camPub.publish(exl_msg_global, cameraparams);
camPub.publish(exl_msg_global.operator*(), cameraparams, exl_msg_global->header.stamp);
}
//======================================================
// PUBLISH FEATURE TRACKS IN THE GLOBAL FRAME OF REFERENCE
if (pub_loop_point.getNumSubscribers() != 0) {
// Construct the message
sensor_msgs::PointCloud point_cloud;
point_cloud.header = header;
point_cloud.header.frame_id = "global";
for (const auto &feattimes : active_tracks_posinG) {
// Get this feature information
size_t featid = feattimes.first;
Eigen::Vector3d uvd = Eigen::Vector3d::Zero();
if (active_tracks_uvd.find(featid) != active_tracks_uvd.end()) {
uvd = active_tracks_uvd.at(featid);
}
Eigen::Vector3d pFinG = active_tracks_posinG.at(featid);
// Push back 3d point
geometry_msgs::Point32 p;
p.x = pFinG(0);
p.y = pFinG(1);
p.z = pFinG(2);
point_cloud.points.push_back(p);
// Push back the uv_norm, uv_raw, and feature id
// NOTE: we don't use the normalized coordinates to save time here
// NOTE: they will have to be re-normalized in the loop closure code
sensor_msgs::ChannelFloat32 p_2d;
p_2d.values.push_back(0);
p_2d.values.push_back(0);
p_2d.values.push_back(uvd(0));
p_2d.values.push_back(uvd(1));
p_2d.values.push_back(featid);
point_cloud.channels.push_back(p_2d);
}
pub_loop_point.publish(point_cloud);
}
//======================================================
// Depth images of sparse points and its colorized version
if (it_pub_loop_img_depth.getNumSubscribers() != 0 || it_pub_loop_img_depth_color.getNumSubscribers() != 0) {
// Create the images we will populate with the depths
std::pair<int, int> wh_pair = {active_cam0_image.cols, active_cam0_image.rows};
cv::Mat depthmap_viz;
cv::cvtColor(active_cam0_image, depthmap_viz, cv::COLOR_GRAY2RGB);
cv::Mat depthmap = cv::Mat::zeros(wh_pair.second, wh_pair.first, CV_16UC1);
// Loop through all points and append
for (const auto &feattimes : active_tracks_uvd) {
// Get this feature information
size_t featid = feattimes.first;
Eigen::Vector3d uvd = active_tracks_uvd.at(featid);
// Skip invalid points
double dw = 3;
if (uvd(0) < dw || uvd(0) > wh_pair.first - dw || uvd(1) < dw || uvd(1) > wh_pair.second - dw) {
continue;
}
// Append the depth
// NOTE: scaled by 1000 to fit the 16U
// NOTE: access order is y,x (stupid opencv convention stuff)
depthmap.at<uint16_t>((int)uvd(1), (int)uvd(0)) = (uint16_t)(1000 * uvd(2));
// Taken from LSD-SLAM codebase segment into 0-4 meter segments:
// https://github.com/tum-vision/lsd_slam/blob/d1e6f0e1a027889985d2e6b4c0fe7a90b0c75067/lsd_slam_core/src/util/globalFuncs.cpp#L87-L96
float id = 1.0f / (float)uvd(2);
float r = (0.0f - id) * 255 / 1.0f;
if (r < 0)
r = -r;
float g = (1.0f - id) * 255 / 1.0f;
if (g < 0)
g = -g;
float b = (2.0f - id) * 255 / 1.0f;
if (b < 0)
b = -b;
uchar rc = r < 0 ? 0 : (r > 255 ? 255 : r);
uchar gc = g < 0 ? 0 : (g > 255 ? 255 : g);
uchar bc = b < 0 ? 0 : (b > 255 ? 255 : b);
cv::Scalar color(255 - rc, 255 - gc, 255 - bc);
// Small square around the point (note the above bound check needs to take into account this width)
cv::Point p0(uvd(0) - dw, uvd(1) - dw);
cv::Point p1(uvd(0) + dw, uvd(1) + dw);
cv::rectangle(depthmap_viz, p0, p1, color, -1);
}
// Create our messages
header.frame_id = "cam0";
sensor_msgs::ImagePtr exl_msg1 = cv_bridge::CvImage(header, sensor_msgs::image_encodings::TYPE_16UC1, depthmap).toImageMsg();
it_pub_loop_img_depth.publish(exl_msg1);
header.stamp = ros::Time::now();
header.frame_id = "cam0";
sensor_msgs::ImagePtr exl_msg2 = cv_bridge::CvImage(header, "bgr8", depthmap_viz).toImageMsg();
it_pub_loop_img_depth_color.publish(exl_msg2);
}
}

View File

@@ -0,0 +1,204 @@
/*
* OpenVINS: An Open Platform for Visual-Inertial Research
* Copyright (C) 2018-2022 Patrick Geneva
* Copyright (C) 2018-2022 Guoquan Huang
* Copyright (C) 2018-2022 OpenVINS Contributors
* Copyright (C) 2018-2019 Kevin Eckenhoff
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
#ifndef OV_MSCKF_ROS1VISUALIZER_H
#define OV_MSCKF_ROS1VISUALIZER_H
#include <geometry_msgs/PoseStamped.h>
#include <geometry_msgs/PoseWithCovarianceStamped.h>
#include <image_transport/image_transport.h>
#include <message_filters/subscriber.h>
#include <message_filters/sync_policies/approximate_time.h>
#include <message_filters/time_synchronizer.h>
#include <nav_msgs/Odometry.h>
#include <nav_msgs/Path.h>
#include <ros/ros.h>
#include <sensor_msgs/CameraInfo.h>
#include <sensor_msgs/Image.h>
#include <sensor_msgs/Imu.h>
#include <sensor_msgs/NavSatFix.h>
#include <sensor_msgs/PointCloud.h>
#include <sensor_msgs/PointCloud2.h>
#include <sensor_msgs/point_cloud2_iterator.h>
#include <std_msgs/Float64.h>
#include <tf/transform_broadcaster.h>
#include <boost/filesystem.hpp>
#include <cv_bridge/cv_bridge.h>
#include "core/VioManager.h"
#include "ros/RosVisualizerHelper.h"
#include "sim/Simulator.h"
#include "utils/dataset_reader.h"
#include "utils/print.h"
#include "utils/sensor_data.h"
namespace ov_msckf {
/**
* @brief Helper class that will publish results onto the ROS framework.
*
* Also save to file the current total state and covariance along with the groundtruth if we are simulating.
* We visualize the following things:
* - State of the system on TF, pose message, and path
* - Image of our tracker
* - Our different features (SLAM, MSCKF, ARUCO)
* - Groundtruth trajectory if we have it
*/
class ROS1Visualizer {
public:
/**
* @brief Default constructor
* @param nh ROS node handler
* @param app Core estimator manager
* @param sim Simulator if we are simulating
*/
ROS1Visualizer(std::shared_ptr<ros::NodeHandle> nh, std::shared_ptr<VioManager> app, std::shared_ptr<Simulator> sim = nullptr);
/**
* @brief Will setup ROS subscribers and callbacks
* @param parser Configuration file parser
*/
void setup_subscribers(std::shared_ptr<ov_core::YamlParser> parser);
/**
* @brief Will visualize the system if we have new things
*/
void visualize();
/**
* @brief Will publish our odometry message for the current timestep.
* This will take the current state estimate and get the propagated pose to the desired time.
* This can be used to get pose estimates on systems which require high frequency pose estimates.
*/
void visualize_odometry(double timestamp);
/**
* @brief After the run has ended, print results
*/
void visualize_final();
/// Callback for inertial information
void callback_inertial(const sensor_msgs::Imu::ConstPtr &msg);
/// Callback for monocular cameras information
void callback_monocular(const sensor_msgs::ImageConstPtr &msg0, int cam_id0);
/// Callback for synchronized stereo camera information
void callback_stereo(const sensor_msgs::ImageConstPtr &msg0, const sensor_msgs::ImageConstPtr &msg1, int cam_id0, int cam_id1);
protected:
/// Publish the current state
void publish_state();
/// Publish the active tracking image
void publish_images();
/// Publish current features
void publish_features();
/// Publish groundtruth (if we have it)
void publish_groundtruth();
/// Publish loop-closure information of current pose and active track information
void publish_loopclosure_information();
/// Global node handler
std::shared_ptr<ros::NodeHandle> _nh;
/// Core application of the filter system
std::shared_ptr<VioManager> _app;
/// Simulator (is nullptr if we are not sim'ing)
std::shared_ptr<Simulator> _sim;
// Our publishers
image_transport::Publisher it_pub_tracks, it_pub_loop_img_depth, it_pub_loop_img_depth_color;
ros::Publisher pub_poseimu, pub_odomimu, pub_pathimu;
ros::Publisher pub_points_msckf, pub_points_slam, pub_points_aruco, pub_points_sim;
ros::Publisher pub_loop_pose, pub_loop_point, pub_loop_extrinsic, pub_loop_intrinsics;
std::shared_ptr<tf::TransformBroadcaster> mTfBr;
// Added by PI on 27.07.2022
// image_transport::CameraPublisher pub_camera_info, pub_camera_info_trackhist;
image_transport::CameraPublisher camPub;
image_transport::CameraSubscriber camSub;
sensor_msgs::ImagePtr exl_msg_global;
ros::Subscriber sub_camera_info;
// End of adding by PI
// Our subscribers and camera synchronizers
ros::Subscriber sub_imu;
std::vector<ros::Subscriber> subs_cam;
typedef message_filters::sync_policies::ApproximateTime<sensor_msgs::Image, sensor_msgs::Image> sync_pol;
std::vector<std::shared_ptr<message_filters::Synchronizer<sync_pol>>> sync_cam;
std::vector<std::shared_ptr<message_filters::Subscriber<sensor_msgs::Image>>> sync_subs_cam;
// For path viz
unsigned int poses_seq_imu = 0;
std::vector<geometry_msgs::PoseStamped> poses_imu;
// Groundtruth infomation
ros::Publisher pub_pathgt, pub_posegt;
double summed_mse_ori = 0.0;
double summed_mse_pos = 0.0;
double summed_nees_ori = 0.0;
double summed_nees_pos = 0.0;
size_t summed_number = 0;
// Start and end timestamps
bool start_time_set = false;
boost::posix_time::ptime rT1, rT2;
// Thread atomics
std::atomic<bool> thread_update_running;
/// Queue up camera measurements sorted by time and trigger once we have
/// exactly one IMU measurement with timestamp newer than the camera measurement
/// This also handles out-of-order camera measurements, which is rare, but
/// a nice feature to have for general robustness to bad camera drivers.
std::deque<ov_core::CameraData> camera_queue;
std::mutex camera_queue_mtx;
// Last camera message timestamps we have received (mapped by cam id)
std::map<int, double> camera_last_timestamp;
// Last timestamp we visualized at
double last_visualization_timestamp = 0;
// Our groundtruth states
std::map<double, Eigen::Matrix<double, 17, 1>> gt_states;
// For path viz
unsigned int poses_seq_gt = 0;
std::vector<geometry_msgs::PoseStamped> poses_gt;
bool publish_global2imu_tf = true;
bool publish_calibration_tf = true;
// Files and if we should save total state
bool save_total_state;
std::ofstream of_state_est, of_state_std, of_state_gt;
};
} // namespace ov_msckf
#endif // OV_MSCKF_ROS1VISUALIZER_H

View File

@@ -0,0 +1,915 @@
/*
* OpenVINS: An Open Platform for Visual-Inertial Research
* Copyright (C) 2018-2022 Patrick Geneva
* Copyright (C) 2018-2022 Guoquan Huang
* Copyright (C) 2018-2022 OpenVINS Contributors
* Copyright (C) 2018-2019 Kevin Eckenhoff
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
#include "ROS2Visualizer.h"
using namespace ov_core;
using namespace ov_type;
using namespace ov_msckf;
ROS2Visualizer::ROS2Visualizer(std::shared_ptr<rclcpp::Node> node, std::shared_ptr<VioManager> app, std::shared_ptr<Simulator> sim)
: _node(node), _app(app), _sim(sim), thread_update_running(false) {
// Setup our transform broadcaster
mTfBr = std::make_shared<tf2_ros::TransformBroadcaster>(node);
// Create image transport
image_transport::ImageTransport it(node);
// Setup pose and path publisher
pub_poseimu = node->create_publisher<geometry_msgs::msg::PoseWithCovarianceStamped>("/ov_msckf/poseimu", 2);
PRINT_DEBUG("Publishing: %s\n", pub_poseimu->get_topic_name());
pub_odomimu = node->create_publisher<nav_msgs::msg::Odometry>("/ov_msckf/odomimu", 2);
PRINT_DEBUG("Publishing: %s\n", pub_odomimu->get_topic_name());
pub_pathimu = node->create_publisher<nav_msgs::msg::Path>("/ov_msckf/pathimu", 2);
PRINT_DEBUG("Publishing: %s\n", pub_pathimu->get_topic_name());
// 3D points publishing
pub_points_msckf = node->create_publisher<sensor_msgs::msg::PointCloud2>("/ov_msckf/points_msckf", 2);
PRINT_DEBUG("Publishing: %s\n", pub_points_msckf->get_topic_name());
pub_points_slam = node->create_publisher<sensor_msgs::msg::PointCloud2>("/ov_msckf/points_slam", 2);
PRINT_DEBUG("Publishing: %s\n", pub_points_msckf->get_topic_name());
pub_points_aruco = node->create_publisher<sensor_msgs::msg::PointCloud2>("/ov_msckf/points_aruco", 2);
PRINT_DEBUG("Publishing: %s\n", pub_points_aruco->get_topic_name());
pub_points_sim = node->create_publisher<sensor_msgs::msg::PointCloud2>("/ov_msckf/points_sim", 2);
PRINT_DEBUG("Publishing: %s\n", pub_points_sim->get_topic_name());
// Our tracking image
it_pub_tracks = it.advertise("/ov_msckf/trackhist", 2);
PRINT_DEBUG("Publishing: %s\n", it_pub_tracks.getTopic().c_str());
// Groundtruth publishers
pub_posegt = node->create_publisher<geometry_msgs::msg::PoseStamped>("/ov_msckf/posegt", 2);
PRINT_DEBUG("Publishing: %s\n", pub_posegt->get_topic_name());
pub_pathgt = node->create_publisher<nav_msgs::msg::Path>("/ov_msckf/pathgt", 2);
PRINT_DEBUG("Publishing: %s\n", pub_pathgt->get_topic_name());
// Loop closure publishers
pub_loop_pose = node->create_publisher<nav_msgs::msg::Odometry>("/ov_msckf/loop_pose", 2);
pub_loop_point = node->create_publisher<sensor_msgs::msg::PointCloud>("/ov_msckf/loop_feats", 2);
pub_loop_extrinsic = node->create_publisher<nav_msgs::msg::Odometry>("/ov_msckf/loop_extrinsic", 2);
pub_loop_intrinsics = node->create_publisher<sensor_msgs::msg::CameraInfo>("/ov_msckf/loop_intrinsics", 2);
it_pub_loop_img_depth = it.advertise("/ov_msckf/loop_depth", 2);
it_pub_loop_img_depth_color = it.advertise("/ov_msckf/loop_depth_colored", 2);
// option to enable publishing of global to IMU transformation
node->declare_parameter<bool>("publish_global_to_imu_tf", true);
node->get_parameter("publish_global_to_imu_tf", publish_global2imu_tf);
node->declare_parameter<bool>("publish_calibration_tf", true);
node->get_parameter("publish_calibration_tf", publish_calibration_tf);
// Load groundtruth if we have it and are not doing simulation
std::string path_to_gt;
bool has_gt = node->get_parameter("path_gt", path_to_gt);
if (has_gt && _sim == nullptr && !path_to_gt.empty()) {
DatasetReader::load_gt_file(path_to_gt, gt_states);
PRINT_DEBUG("gt file path is: %s\n", path_to_gt.c_str());
}
// Load if we should save the total state to file
node->declare_parameter<bool>("save_total_state", false);
node->get_parameter("save_total_state", save_total_state);
// If the file is not open, then open the file
if (save_total_state) {
// files we will open
std::string filepath_est, filepath_std, filepath_gt;
node->declare_parameter<std::string>("filepath_est", "state_estimate.txt");
node->declare_parameter<std::string>("filepath_std", "state_deviation.txt");
node->declare_parameter<std::string>("filepath_gt", "state_groundtruth.txt");
node->get_parameter<std::string>("filepath_est", filepath_est);
node->get_parameter<std::string>("filepath_std", filepath_std);
node->get_parameter<std::string>("filepath_gt", filepath_gt);
// If it exists, then delete it
if (boost::filesystem::exists(filepath_est))
boost::filesystem::remove(filepath_est);
if (boost::filesystem::exists(filepath_std))
boost::filesystem::remove(filepath_std);
// Open the files
of_state_est.open(filepath_est.c_str());
of_state_std.open(filepath_std.c_str());
of_state_est << "# timestamp(s) q p v bg ba cam_imu_dt num_cam cam0_k cam0_d cam0_rot cam0_trans .... etc" << std::endl;
of_state_std << "# timestamp(s) q p v bg ba cam_imu_dt num_cam cam0_k cam0_d cam0_rot cam0_trans .... etc" << std::endl;
// Groundtruth if we are simulating
if (_sim != nullptr) {
if (boost::filesystem::exists(filepath_gt))
boost::filesystem::remove(filepath_gt);
of_state_gt.open(filepath_gt.c_str());
of_state_gt << "# timestamp(s) q p v bg ba cam_imu_dt num_cam cam0_k cam0_d cam0_rot cam0_trans .... etc" << std::endl;
}
}
}
void ROS2Visualizer::setup_subscribers(std::shared_ptr<ov_core::YamlParser> parser) {
// We need a valid parser
assert(parser != nullptr);
// Create imu subscriber (handle legacy ros param info)
std::string topic_imu;
_node->declare_parameter<std::string>("topic_imu", "/imu0");
_node->get_parameter("topic_imu", topic_imu);
parser->parse_external("relative_config_imu", "imu0", "rostopic", topic_imu);
sub_imu = _node->create_subscription<sensor_msgs::msg::Imu>(topic_imu, 1000,
std::bind(&ROS2Visualizer::callback_inertial, this, std::placeholders::_1));
// Logic for sync stereo subscriber
// https://answers.ros.org/question/96346/subscribe-to-two-image_raws-with-one-function/?answer=96491#post-id-96491
if (_app->get_params().state_options.num_cameras == 2) {
// Read in the topics
std::string cam_topic0, cam_topic1;
_node->declare_parameter<std::string>("topic_camera" + std::to_string(0), "/cam" + std::to_string(0) + "/image_raw");
_node->get_parameter("topic_camera" + std::to_string(0), cam_topic0);
_node->declare_parameter<std::string>("topic_camera" + std::to_string(1), "/cam" + std::to_string(1) + "/image_raw");
_node->get_parameter("topic_camera" + std::to_string(1), cam_topic0);
parser->parse_external("relative_config_imucam", "cam" + std::to_string(0), "rostopic", cam_topic0);
parser->parse_external("relative_config_imucam", "cam" + std::to_string(1), "rostopic", cam_topic1);
// Create sync filter (they have unique pointers internally, so we have to use move logic here...)
auto image_sub0 = std::make_shared<message_filters::Subscriber<sensor_msgs::msg::Image>>(_node, cam_topic0);
auto image_sub1 = std::make_shared<message_filters::Subscriber<sensor_msgs::msg::Image>>(_node, cam_topic1);
auto sync = std::make_shared<message_filters::Synchronizer<sync_pol>>(sync_pol(10), *image_sub0, *image_sub1);
sync->registerCallback(std::bind(&ROS2Visualizer::callback_stereo, this, std::placeholders::_1, std::placeholders::_2, 0, 1));
// sync->registerCallback([](const sensor_msgs::msg::Image::SharedPtr msg0, const sensor_msgs::msg::Image::SharedPtr msg1)
// {callback_stereo(msg0, msg1, 0, 1);});
// sync->registerCallback(&callback_stereo2); // since the above two alternatives fail to compile for some reason
// Append to our vector of subscribers
sync_cam.push_back(sync);
sync_subs_cam.push_back(image_sub0);
sync_subs_cam.push_back(image_sub1);
PRINT_DEBUG("subscribing to cam (stereo): %s", cam_topic0.c_str());
PRINT_DEBUG("subscribing to cam (stereo): %s", cam_topic1.c_str());
} else {
// Now we should add any non-stereo callbacks here
for (int i = 0; i < _app->get_params().state_options.num_cameras; i++) {
// read in the topic
std::string cam_topic;
_node->declare_parameter<std::string>("topic_camera" + std::to_string(i), "/cam" + std::to_string(i) + "/image_raw");
_node->get_parameter("topic_camera" + std::to_string(i), cam_topic);
parser->parse_external("relative_config_imucam", "cam" + std::to_string(i), "rostopic", cam_topic);
// create subscriber
// auto sub = _node->create_subscription<sensor_msgs::msg::Image>(
// cam_topic, rclcpp::SensorDataQoS(), std::bind(&ROS2Visualizer::callback_monocular, this, std::placeholders::_1, i));
auto sub = _node->create_subscription<sensor_msgs::msg::Image>(
cam_topic, 10, [this, i](const sensor_msgs::msg::Image::SharedPtr msg0) { callback_monocular(msg0, i); });
subs_cam.push_back(sub);
PRINT_DEBUG("subscribing to cam (mono): %s", cam_topic.c_str());
}
}
}
void ROS2Visualizer::visualize() {
// Return if we have already visualized
if (last_visualization_timestamp == _app->get_state()->_timestamp && _app->initialized())
return;
last_visualization_timestamp = _app->get_state()->_timestamp;
// Start timing
boost::posix_time::ptime rT0_1, rT0_2;
rT0_1 = boost::posix_time::microsec_clock::local_time();
// publish current image
publish_images();
// Return if we have not inited
if (!_app->initialized())
return;
// Save the start time of this dataset
if (!start_time_set) {
rT1 = boost::posix_time::microsec_clock::local_time();
start_time_set = true;
}
// publish state
publish_state();
// publish points
publish_features();
// Publish gt if we have it
publish_groundtruth();
// Publish keyframe information
publish_loopclosure_information();
// Save total state
if (save_total_state) {
RosVisualizerHelper::sim_save_total_state_to_file(_app->get_state(), _sim, of_state_est, of_state_std, of_state_gt);
}
// Print how much time it took to publish / displaying things
rT0_2 = boost::posix_time::microsec_clock::local_time();
double time_total = (rT0_2 - rT0_1).total_microseconds() * 1e-6;
PRINT_DEBUG(BLUE "[TIME]: %.4f seconds for visualization\n" RESET, time_total);
}
void ROS2Visualizer::visualize_odometry(double timestamp) {
// Return if we have not inited and a second has passes
if (!_app->initialized() || (timestamp - _app->initialized_time()) < 1)
return;
// Get fast propagate state at the desired timestamp
std::shared_ptr<State> state = _app->get_state();
Eigen::Matrix<double, 13, 1> state_plus = Eigen::Matrix<double, 13, 1>::Zero();
Eigen::Matrix<double, 12, 12> cov_plus = Eigen::Matrix<double, 12, 12>::Zero();
if (!_app->get_propagator()->fast_state_propagate(state, timestamp, state_plus, cov_plus))
return;
// Publish our odometry message if requested
if (pub_odomimu->get_subscription_count() != 0) {
// Our odometry message
nav_msgs::msg::Odometry odomIinM;
odomIinM.header.stamp = RosVisualizerHelper::get_time_from_seconds(timestamp);
odomIinM.header.frame_id = "global";
// The POSE component (orientation and position)
odomIinM.pose.pose.orientation.x = state_plus(0);
odomIinM.pose.pose.orientation.y = state_plus(1);
odomIinM.pose.pose.orientation.z = state_plus(2);
odomIinM.pose.pose.orientation.w = state_plus(3);
odomIinM.pose.pose.position.x = state_plus(4);
odomIinM.pose.pose.position.y = state_plus(5);
odomIinM.pose.pose.position.z = state_plus(6);
// The TWIST component (angular and linear velocities)
odomIinM.child_frame_id = "imu";
odomIinM.twist.twist.linear.x = state_plus(7); // vel in local frame
odomIinM.twist.twist.linear.y = state_plus(8); // vel in local frame
odomIinM.twist.twist.linear.z = state_plus(9); // vel in local frame
odomIinM.twist.twist.angular.x = state_plus(10); // we do not estimate this...
odomIinM.twist.twist.angular.y = state_plus(11); // we do not estimate this...
odomIinM.twist.twist.angular.z = state_plus(12); // we do not estimate this...
// Finally set the covariance in the message (in the order position then orientation as per ros convention)
Eigen::Matrix<double, 12, 12> Phi = Eigen::Matrix<double, 12, 12>::Zero();
Phi.block(0, 3, 3, 3).setIdentity();
Phi.block(3, 0, 3, 3).setIdentity();
Phi.block(6, 6, 6, 6).setIdentity();
cov_plus = Phi * cov_plus * Phi.transpose();
for (int r = 0; r < 6; r++) {
for (int c = 0; c < 6; c++) {
odomIinM.pose.covariance[6 * r + c] = cov_plus(r, c);
}
}
for (int r = 0; r < 6; r++) {
for (int c = 0; c < 6; c++) {
odomIinM.twist.covariance[6 * r + c] = cov_plus(r + 6, c + 6);
}
}
pub_odomimu->publish(odomIinM);
}
// Publish our transform on TF
// NOTE: since we use JPL we have an implicit conversion to Hamilton when we publish
// NOTE: a rotation from GtoI in JPL has the same xyzw as a ItoG Hamilton rotation
auto odom_pose = std::make_shared<ov_type::PoseJPL>();
odom_pose->set_value(state_plus.block(0, 0, 7, 1));
geometry_msgs::msg::TransformStamped trans = RosVisualizerHelper::get_stamped_transform_from_pose(_node, odom_pose, false);
trans.header.stamp = _node->now();
trans.header.frame_id = "global";
trans.child_frame_id = "imu";
if (publish_global2imu_tf) {
mTfBr->sendTransform(trans);
}
// Loop through each camera calibration and publish it
for (const auto &calib : state->_calib_IMUtoCAM) {
geometry_msgs::msg::TransformStamped trans_calib = RosVisualizerHelper::get_stamped_transform_from_pose(_node, calib.second, true);
trans_calib.header.stamp = _node->now();
trans_calib.header.frame_id = "imu";
trans_calib.child_frame_id = "cam" + std::to_string(calib.first);
if (publish_calibration_tf) {
mTfBr->sendTransform(trans_calib);
}
}
}
void ROS2Visualizer::visualize_final() {
// Final time offset value
if (_app->get_state()->_options.do_calib_camera_timeoffset) {
PRINT_INFO(REDPURPLE "camera-imu timeoffset = %.5f\n\n" RESET, _app->get_state()->_calib_dt_CAMtoIMU->value()(0));
}
// Final camera intrinsics
if (_app->get_state()->_options.do_calib_camera_intrinsics) {
for (int i = 0; i < _app->get_state()->_options.num_cameras; i++) {
std::shared_ptr<Vec> calib = _app->get_state()->_cam_intrinsics.at(i);
PRINT_INFO(REDPURPLE "cam%d intrinsics:\n" RESET, (int)i);
PRINT_INFO(REDPURPLE "%.3f,%.3f,%.3f,%.3f\n" RESET, calib->value()(0), calib->value()(1), calib->value()(2), calib->value()(3));
PRINT_INFO(REDPURPLE "%.5f,%.5f,%.5f,%.5f\n\n" RESET, calib->value()(4), calib->value()(5), calib->value()(6), calib->value()(7));
}
}
// Final camera extrinsics
if (_app->get_state()->_options.do_calib_camera_pose) {
for (int i = 0; i < _app->get_state()->_options.num_cameras; i++) {
std::shared_ptr<PoseJPL> calib = _app->get_state()->_calib_IMUtoCAM.at(i);
Eigen::Matrix4d T_CtoI = Eigen::Matrix4d::Identity();
T_CtoI.block(0, 0, 3, 3) = quat_2_Rot(calib->quat()).transpose();
T_CtoI.block(0, 3, 3, 1) = -T_CtoI.block(0, 0, 3, 3) * calib->pos();
PRINT_INFO(REDPURPLE "T_C%dtoI:\n" RESET, i);
PRINT_INFO(REDPURPLE "%.3f,%.3f,%.3f,%.3f,\n" RESET, T_CtoI(0, 0), T_CtoI(0, 1), T_CtoI(0, 2), T_CtoI(0, 3));
PRINT_INFO(REDPURPLE "%.3f,%.3f,%.3f,%.3f,\n" RESET, T_CtoI(1, 0), T_CtoI(1, 1), T_CtoI(1, 2), T_CtoI(1, 3));
PRINT_INFO(REDPURPLE "%.3f,%.3f,%.3f,%.3f,\n" RESET, T_CtoI(2, 0), T_CtoI(2, 1), T_CtoI(2, 2), T_CtoI(2, 3));
PRINT_INFO(REDPURPLE "%.3f,%.3f,%.3f,%.3f\n\n" RESET, T_CtoI(3, 0), T_CtoI(3, 1), T_CtoI(3, 2), T_CtoI(3, 3));
}
}
// Publish RMSE if we have it
if (!gt_states.empty()) {
PRINT_INFO(REDPURPLE "RMSE: %.3f (deg) orientation\n" RESET, std::sqrt(summed_mse_ori / summed_number));
PRINT_INFO(REDPURPLE "RMSE: %.3f (m) position\n\n" RESET, std::sqrt(summed_mse_pos / summed_number));
}
// Publish RMSE and NEES if doing simulation
if (_sim != nullptr) {
PRINT_INFO(REDPURPLE "RMSE: %.3f (deg) orientation\n" RESET, std::sqrt(summed_mse_ori / summed_number));
PRINT_INFO(REDPURPLE "RMSE: %.3f (m) position\n\n" RESET, std::sqrt(summed_mse_pos / summed_number));
PRINT_INFO(REDPURPLE "NEES: %.3f (deg) orientation\n" RESET, summed_nees_ori / summed_number);
PRINT_INFO(REDPURPLE "NEES: %.3f (m) position\n\n" RESET, summed_nees_pos / summed_number);
}
// Print the total time
rT2 = boost::posix_time::microsec_clock::local_time();
PRINT_INFO(REDPURPLE "TIME: %.3f seconds\n\n" RESET, (rT2 - rT1).total_microseconds() * 1e-6);
}
void ROS2Visualizer::callback_inertial(const sensor_msgs::msg::Imu::SharedPtr msg) {
// convert into correct format
ov_core::ImuData message;
message.timestamp = msg->header.stamp.sec + msg->header.stamp.nanosec * 1e-9;
message.wm << msg->angular_velocity.x, msg->angular_velocity.y, msg->angular_velocity.z;
message.am << msg->linear_acceleration.x, msg->linear_acceleration.y, msg->linear_acceleration.z;
// send it to our VIO system
_app->feed_measurement_imu(message);
visualize_odometry(message.timestamp);
// If the processing queue is currently active / running just return so we can keep getting measurements
// Otherwise create a second thread to do our update in an async manor
// The visualization of the state, images, and features will be synchronous with the update!
if (thread_update_running)
return;
thread_update_running = true;
std::thread thread([&] {
// Lock on the queue (prevents new images from appending)
std::lock_guard<std::mutex> lck(camera_queue_mtx);
// Count how many unique image streams
std::map<int, bool> unique_cam_ids;
for (const auto &cam_msg : camera_queue) {
unique_cam_ids[cam_msg.sensor_ids.at(0)] = true;
}
// If we do not have enough unique cameras then we need to wait
// We should wait till we have one of each camera to ensure we propagate in the correct order
auto params = _app->get_params();
size_t num_unique_cameras = (params.state_options.num_cameras == 2) ? 1 : params.state_options.num_cameras;
if (unique_cam_ids.size() == num_unique_cameras) {
// Loop through our queue and see if we are able to process any of our camera measurements
// We are able to process if we have at least one IMU measurement greater than the camera time
double timestamp_imu_inC = message.timestamp - _app->get_state()->_calib_dt_CAMtoIMU->value()(0);
while (!camera_queue.empty() && camera_queue.at(0).timestamp < timestamp_imu_inC) {
auto rT0_1 = boost::posix_time::microsec_clock::local_time();
_app->feed_measurement_camera(camera_queue.at(0));
visualize();
camera_queue.pop_front();
auto rT0_2 = boost::posix_time::microsec_clock::local_time();
double time_total = (rT0_2 - rT0_1).total_microseconds() * 1e-6;
PRINT_INFO(BLUE "[TIME]: %.4f seconds total (%.1f hz)\n" RESET, time_total, 1.0 / time_total);
}
}
thread_update_running = false;
});
// If we are single threaded, then run single threaded
// Otherwise detach this thread so it runs in the background!
if (!_app->get_params().use_multi_threading) {
thread.join();
} else {
thread.detach();
}
}
void ROS2Visualizer::callback_monocular(const sensor_msgs::msg::Image::SharedPtr msg0, int cam_id0) {
// Check if we should drop this image
double timestamp = msg0->header.stamp.sec + msg0->header.stamp.nanosec * 1e-9;
double time_delta = 1.0 / _app->get_params().track_frequency;
if (camera_last_timestamp.find(cam_id0) != camera_last_timestamp.end() && timestamp < camera_last_timestamp.at(cam_id0) + time_delta) {
return;
}
camera_last_timestamp[cam_id0] = timestamp;
// Get the image
cv_bridge::CvImageConstPtr cv_ptr;
try {
cv_ptr = cv_bridge::toCvShare(msg0, sensor_msgs::image_encodings::MONO8);
} catch (cv_bridge::Exception &e) {
PRINT_ERROR("cv_bridge exception: %s", e.what());
return;
}
// Create the measurement
ov_core::CameraData message;
message.timestamp = cv_ptr->header.stamp.sec + cv_ptr->header.stamp.nanosec * 1e-9;
message.sensor_ids.push_back(cam_id0);
message.images.push_back(cv_ptr->image.clone());
// Load the mask if we are using it, else it is empty
// TODO: in the future we should get this from external pixel segmentation
if (_app->get_params().use_mask) {
message.masks.push_back(_app->get_params().masks.at(cam_id0));
} else {
message.masks.push_back(cv::Mat::zeros(cv_ptr->image.rows, cv_ptr->image.cols, CV_8UC1));
}
// append it to our queue of images
std::lock_guard<std::mutex> lck(camera_queue_mtx);
camera_queue.push_back(message);
std::sort(camera_queue.begin(), camera_queue.end());
}
void ROS2Visualizer::callback_stereo(const sensor_msgs::msg::Image::ConstSharedPtr msg0, const sensor_msgs::msg::Image::ConstSharedPtr msg1,
int cam_id0, int cam_id1) {
// Check if we should drop this image
double timestamp = msg0->header.stamp.sec + msg0->header.stamp.nanosec * 1e-9;
double time_delta = 1.0 / _app->get_params().track_frequency;
if (camera_last_timestamp.find(cam_id0) != camera_last_timestamp.end() && timestamp < camera_last_timestamp.at(cam_id0) + time_delta) {
return;
}
camera_last_timestamp[cam_id0] = timestamp;
// Get the image
cv_bridge::CvImageConstPtr cv_ptr0;
try {
cv_ptr0 = cv_bridge::toCvShare(msg0, sensor_msgs::image_encodings::MONO8);
} catch (cv_bridge::Exception &e) {
PRINT_ERROR("cv_bridge exception: %s", e.what());
return;
}
// Get the image
cv_bridge::CvImageConstPtr cv_ptr1;
try {
cv_ptr1 = cv_bridge::toCvShare(msg1, sensor_msgs::image_encodings::MONO8);
} catch (cv_bridge::Exception &e) {
PRINT_ERROR("cv_bridge exception: %s", e.what());
return;
}
// Create the measurement
ov_core::CameraData message;
message.timestamp = cv_ptr0->header.stamp.sec + cv_ptr0->header.stamp.nanosec * 1e-9;
message.sensor_ids.push_back(cam_id0);
message.sensor_ids.push_back(cam_id1);
message.images.push_back(cv_ptr0->image.clone());
message.images.push_back(cv_ptr1->image.clone());
// Load the mask if we are using it, else it is empty
// TODO: in the future we should get this from external pixel segmentation
if (_app->get_params().use_mask) {
message.masks.push_back(_app->get_params().masks.at(cam_id0));
message.masks.push_back(_app->get_params().masks.at(cam_id1));
} else {
// message.masks.push_back(cv::Mat(cv_ptr0->image.rows, cv_ptr0->image.cols, CV_8UC1, cv::Scalar(255)));
message.masks.push_back(cv::Mat::zeros(cv_ptr0->image.rows, cv_ptr0->image.cols, CV_8UC1));
message.masks.push_back(cv::Mat::zeros(cv_ptr1->image.rows, cv_ptr1->image.cols, CV_8UC1));
}
// append it to our queue of images
std::lock_guard<std::mutex> lck(camera_queue_mtx);
camera_queue.push_back(message);
std::sort(camera_queue.begin(), camera_queue.end());
}
void ROS2Visualizer::publish_state() {
// Get the current state
std::shared_ptr<State> state = _app->get_state();
// We want to publish in the IMU clock frame
// The timestamp in the state will be the last camera time
double t_ItoC = state->_calib_dt_CAMtoIMU->value()(0);
double timestamp_inI = state->_timestamp + t_ItoC;
// Create pose of IMU (note we use the bag time)
geometry_msgs::msg::PoseWithCovarianceStamped poseIinM;
poseIinM.header.stamp = RosVisualizerHelper::get_time_from_seconds(timestamp_inI);
poseIinM.header.frame_id = "global";
poseIinM.pose.pose.orientation.x = state->_imu->quat()(0);
poseIinM.pose.pose.orientation.y = state->_imu->quat()(1);
poseIinM.pose.pose.orientation.z = state->_imu->quat()(2);
poseIinM.pose.pose.orientation.w = state->_imu->quat()(3);
poseIinM.pose.pose.position.x = state->_imu->pos()(0);
poseIinM.pose.pose.position.y = state->_imu->pos()(1);
poseIinM.pose.pose.position.z = state->_imu->pos()(2);
// Finally set the covariance in the message (in the order position then orientation as per ros convention)
std::vector<std::shared_ptr<Type>> statevars;
statevars.push_back(state->_imu->pose()->p());
statevars.push_back(state->_imu->pose()->q());
Eigen::Matrix<double, 6, 6> covariance_posori = StateHelper::get_marginal_covariance(_app->get_state(), statevars);
for (int r = 0; r < 6; r++) {
for (int c = 0; c < 6; c++) {
poseIinM.pose.covariance[6 * r + c] = covariance_posori(r, c);
}
}
pub_poseimu->publish(poseIinM);
//=========================================================
//=========================================================
// Append to our pose vector
geometry_msgs::msg::PoseStamped posetemp;
posetemp.header = poseIinM.header;
posetemp.pose = poseIinM.pose.pose;
poses_imu.push_back(posetemp);
// Create our path (imu)
// NOTE: We downsample the number of poses as needed to prevent rviz crashes
// NOTE: https://github.com/ros-visualization/rviz/issues/1107
nav_msgs::msg::Path arrIMU;
arrIMU.header.stamp = _node->now();
arrIMU.header.frame_id = "global";
for (size_t i = 0; i < poses_imu.size(); i += std::floor((double)poses_imu.size() / 16384.0) + 1) {
arrIMU.poses.push_back(poses_imu.at(i));
}
pub_pathimu->publish(arrIMU);
}
void ROS2Visualizer::publish_images() {
// Check if we have subscribers
if (it_pub_tracks.getNumSubscribers() == 0)
return;
// Get our image of history tracks
cv::Mat img_history = _app->get_historical_viz_image();
// Create our message
std_msgs::msg::Header header;
header.stamp = _node->now();
header.frame_id = "cam0";
sensor_msgs::msg::Image::SharedPtr exl_msg = cv_bridge::CvImage(header, "bgr8", img_history).toImageMsg();
// Publish
it_pub_tracks.publish(exl_msg);
}
void ROS2Visualizer::publish_features() {
// Check if we have subscribers
if (pub_points_msckf->get_subscription_count() == 0 && pub_points_slam->get_subscription_count() == 0 &&
pub_points_aruco->get_subscription_count() == 0 && pub_points_sim->get_subscription_count() == 0)
return;
// Get our good MSCKF features
std::vector<Eigen::Vector3d> feats_msckf = _app->get_good_features_MSCKF();
sensor_msgs::msg::PointCloud2 cloud = RosVisualizerHelper::get_ros_pointcloud(_node, feats_msckf);
pub_points_msckf->publish(cloud);
// Get our good SLAM features
std::vector<Eigen::Vector3d> feats_slam = _app->get_features_SLAM();
sensor_msgs::msg::PointCloud2 cloud_SLAM = RosVisualizerHelper::get_ros_pointcloud(_node, feats_slam);
pub_points_slam->publish(cloud_SLAM);
// Get our good ARUCO features
std::vector<Eigen::Vector3d> feats_aruco = _app->get_features_ARUCO();
sensor_msgs::msg::PointCloud2 cloud_ARUCO = RosVisualizerHelper::get_ros_pointcloud(_node, feats_aruco);
pub_points_aruco->publish(cloud_ARUCO);
// Skip the rest of we are not doing simulation
if (_sim == nullptr)
return;
// Get our good SIMULATION features
std::vector<Eigen::Vector3d> feats_sim = _sim->get_map_vec();
sensor_msgs::msg::PointCloud2 cloud_SIM = RosVisualizerHelper::get_ros_pointcloud(_node, feats_sim);
pub_points_sim->publish(cloud_SIM);
}
void ROS2Visualizer::publish_groundtruth() {
// Our groundtruth state
Eigen::Matrix<double, 17, 1> state_gt;
// We want to publish in the IMU clock frame
// The timestamp in the state will be the last camera time
double t_ItoC = _app->get_state()->_calib_dt_CAMtoIMU->value()(0);
double timestamp_inI = _app->get_state()->_timestamp + t_ItoC;
// Check that we have the timestamp in our GT file [time(sec),q_GtoI,p_IinG,v_IinG,b_gyro,b_accel]
if (_sim == nullptr && (gt_states.empty() || !DatasetReader::get_gt_state(timestamp_inI, state_gt, gt_states))) {
return;
}
// Get the simulated groundtruth
// NOTE: we get the true time in the IMU clock frame
if (_sim != nullptr) {
timestamp_inI = _app->get_state()->_timestamp + _sim->get_true_parameters().calib_camimu_dt;
if (!_sim->get_state(timestamp_inI, state_gt))
return;
}
// Get the GT and system state state
Eigen::Matrix<double, 16, 1> state_ekf = _app->get_state()->_imu->value();
// Create pose of IMU
geometry_msgs::msg::PoseStamped poseIinM;
poseIinM.header.stamp = RosVisualizerHelper::get_time_from_seconds(timestamp_inI);
poseIinM.header.frame_id = "global";
poseIinM.pose.orientation.x = state_gt(1, 0);
poseIinM.pose.orientation.y = state_gt(2, 0);
poseIinM.pose.orientation.z = state_gt(3, 0);
poseIinM.pose.orientation.w = state_gt(4, 0);
poseIinM.pose.position.x = state_gt(5, 0);
poseIinM.pose.position.y = state_gt(6, 0);
poseIinM.pose.position.z = state_gt(7, 0);
pub_posegt->publish(poseIinM);
// Append to our pose vector
poses_gt.push_back(poseIinM);
// Create our path (imu)
// NOTE: We downsample the number of poses as needed to prevent rviz crashes
// NOTE: https://github.com/ros-visualization/rviz/issues/1107
nav_msgs::msg::Path arrIMU;
arrIMU.header.stamp = _node->now();
arrIMU.header.frame_id = "global";
for (size_t i = 0; i < poses_gt.size(); i += std::floor((double)poses_gt.size() / 16384.0) + 1) {
arrIMU.poses.push_back(poses_gt.at(i));
}
pub_pathgt->publish(arrIMU);
// Publish our transform on TF
geometry_msgs::msg::TransformStamped trans;
trans.header.stamp = _node->now();
trans.header.frame_id = "global";
trans.child_frame_id = "truth";
trans.transform.rotation.x = state_gt(1, 0);
trans.transform.rotation.y = state_gt(2, 0);
trans.transform.rotation.z = state_gt(3, 0);
trans.transform.rotation.w = state_gt(4, 0);
trans.transform.translation.x = state_gt(5, 0);
trans.transform.translation.y = state_gt(6, 0);
trans.transform.translation.z = state_gt(7, 0);
if (publish_global2imu_tf) {
mTfBr->sendTransform(trans);
}
//==========================================================================
//==========================================================================
// Difference between positions
double dx = state_ekf(4, 0) - state_gt(5, 0);
double dy = state_ekf(5, 0) - state_gt(6, 0);
double dz = state_ekf(6, 0) - state_gt(7, 0);
double err_pos = std::sqrt(dx * dx + dy * dy + dz * dz);
// Quaternion error
Eigen::Matrix<double, 4, 1> quat_gt, quat_st, quat_diff;
quat_gt << state_gt(1, 0), state_gt(2, 0), state_gt(3, 0), state_gt(4, 0);
quat_st << state_ekf(0, 0), state_ekf(1, 0), state_ekf(2, 0), state_ekf(3, 0);
quat_diff = quat_multiply(quat_st, Inv(quat_gt));
double err_ori = (180 / M_PI) * 2 * quat_diff.block(0, 0, 3, 1).norm();
//==========================================================================
//==========================================================================
// Get covariance of pose
std::vector<std::shared_ptr<Type>> statevars;
statevars.push_back(_app->get_state()->_imu->q());
statevars.push_back(_app->get_state()->_imu->p());
Eigen::Matrix<double, 6, 6> covariance = StateHelper::get_marginal_covariance(_app->get_state(), statevars);
// Calculate NEES values
// NOTE: need to manually multiply things out to make static asserts work
// NOTE: https://github.com/rpng/open_vins/pull/226
// NOTE: https://github.com/rpng/open_vins/issues/236
// NOTE: https://gitlab.com/libeigen/eigen/-/issues/1664
Eigen::Vector3d quat_diff_vec = quat_diff.block(0, 0, 3, 1);
Eigen::Vector3d cov_vec = covariance.block(0, 0, 3, 3).inverse() * 2 * quat_diff.block(0, 0, 3, 1);
double ori_nees = 2 * quat_diff_vec.dot(cov_vec);
Eigen::Vector3d errpos = state_ekf.block(4, 0, 3, 1) - state_gt.block(5, 0, 3, 1);
double pos_nees = errpos.transpose() * covariance.block(3, 3, 3, 3).inverse() * errpos;
//==========================================================================
//==========================================================================
// Update our average variables
if (!std::isnan(ori_nees) && !std::isnan(pos_nees)) {
summed_mse_ori += err_ori * err_ori;
summed_mse_pos += err_pos * err_pos;
summed_nees_ori += ori_nees;
summed_nees_pos += pos_nees;
summed_number++;
}
// Nice display for the user
PRINT_INFO(REDPURPLE "error to gt => %.3f, %.3f (deg,m) | rmse => %.3f, %.3f (deg,m) | called %d times\n" RESET, err_ori, err_pos,
std::sqrt(summed_mse_ori / summed_number), std::sqrt(summed_mse_pos / summed_number), (int)summed_number);
PRINT_INFO(REDPURPLE "nees => %.1f, %.1f (ori,pos) | avg nees = %.1f, %.1f (ori,pos)\n" RESET, ori_nees, pos_nees,
summed_nees_ori / summed_number, summed_nees_pos / summed_number);
//==========================================================================
//==========================================================================
}
void ROS2Visualizer::publish_loopclosure_information() {
// Get the current tracks in this frame
double active_tracks_time1 = -1;
double active_tracks_time2 = -1;
std::unordered_map<size_t, Eigen::Vector3d> active_tracks_posinG;
std::unordered_map<size_t, Eigen::Vector3d> active_tracks_uvd;
cv::Mat active_cam0_image;
_app->get_active_tracks(active_tracks_time1, active_tracks_posinG, active_tracks_uvd);
_app->get_active_image(active_tracks_time2, active_cam0_image);
if (active_tracks_time1 == -1)
return;
if (_app->get_state()->_clones_IMU.find(active_tracks_time1) == _app->get_state()->_clones_IMU.end())
return;
if (active_tracks_time1 != active_tracks_time2)
return;
// Default header
std_msgs::msg::Header header;
header.stamp = RosVisualizerHelper::get_time_from_seconds(active_tracks_time1);
//======================================================
// Check if we have subscribers for the pose odometry, camera intrinsics, or extrinsics
if (pub_loop_pose->get_subscription_count() != 0 || pub_loop_extrinsic->get_subscription_count() != 0 ||
pub_loop_intrinsics->get_subscription_count() != 0) {
// PUBLISH HISTORICAL POSE ESTIMATE
nav_msgs::msg::Odometry odometry_pose;
odometry_pose.header = header;
odometry_pose.header.frame_id = "global";
odometry_pose.pose.pose.position.x = _app->get_state()->_clones_IMU.at(active_tracks_time1)->pos()(0);
odometry_pose.pose.pose.position.y = _app->get_state()->_clones_IMU.at(active_tracks_time1)->pos()(1);
odometry_pose.pose.pose.position.z = _app->get_state()->_clones_IMU.at(active_tracks_time1)->pos()(2);
odometry_pose.pose.pose.orientation.x = _app->get_state()->_clones_IMU.at(active_tracks_time1)->quat()(0);
odometry_pose.pose.pose.orientation.y = _app->get_state()->_clones_IMU.at(active_tracks_time1)->quat()(1);
odometry_pose.pose.pose.orientation.z = _app->get_state()->_clones_IMU.at(active_tracks_time1)->quat()(2);
odometry_pose.pose.pose.orientation.w = _app->get_state()->_clones_IMU.at(active_tracks_time1)->quat()(3);
pub_loop_pose->publish(odometry_pose);
// PUBLISH IMU TO CAMERA0 EXTRINSIC
// need to flip the transform to the IMU frame
Eigen::Vector4d q_ItoC = _app->get_state()->_calib_IMUtoCAM.at(0)->quat();
Eigen::Vector3d p_CinI = -_app->get_state()->_calib_IMUtoCAM.at(0)->Rot().transpose() * _app->get_state()->_calib_IMUtoCAM.at(0)->pos();
nav_msgs::msg::Odometry odometry_calib;
odometry_calib.header = header;
odometry_calib.header.frame_id = "imu";
odometry_calib.pose.pose.position.x = p_CinI(0);
odometry_calib.pose.pose.position.y = p_CinI(1);
odometry_calib.pose.pose.position.z = p_CinI(2);
odometry_calib.pose.pose.orientation.x = q_ItoC(0);
odometry_calib.pose.pose.orientation.y = q_ItoC(1);
odometry_calib.pose.pose.orientation.z = q_ItoC(2);
odometry_calib.pose.pose.orientation.w = q_ItoC(3);
pub_loop_extrinsic->publish(odometry_calib);
// PUBLISH CAMERA0 INTRINSICS
bool is_fisheye = (std::dynamic_pointer_cast<ov_core::CamEqui>(_app->get_params().camera_intrinsics.at(0)) != nullptr);
sensor_msgs::msg::CameraInfo cameraparams;
cameraparams.header = header;
cameraparams.header.frame_id = "cam0";
cameraparams.distortion_model = is_fisheye ? "equidistant" : "plumb_bob";
Eigen::VectorXd cparams = _app->get_state()->_cam_intrinsics.at(0)->value();
cameraparams.d = {cparams(4), cparams(5), cparams(6), cparams(7)};
cameraparams.k = {cparams(0), 0, cparams(2), 0, cparams(1), cparams(3), 0, 0, 1};
pub_loop_intrinsics->publish(cameraparams);
}
//======================================================
// PUBLISH FEATURE TRACKS IN THE GLOBAL FRAME OF REFERENCE
if (pub_loop_point->get_subscription_count() != 0) {
// Construct the message
sensor_msgs::msg::PointCloud point_cloud;
point_cloud.header = header;
point_cloud.header.frame_id = "global";
for (const auto &feattimes : active_tracks_posinG) {
// Get this feature information
size_t featid = feattimes.first;
Eigen::Vector3d uvd = Eigen::Vector3d::Zero();
if (active_tracks_uvd.find(featid) != active_tracks_uvd.end()) {
uvd = active_tracks_uvd.at(featid);
}
Eigen::Vector3d pFinG = active_tracks_posinG.at(featid);
// Push back 3d point
geometry_msgs::msg::Point32 p;
p.x = pFinG(0);
p.y = pFinG(1);
p.z = pFinG(2);
point_cloud.points.push_back(p);
// Push back the uv_norm, uv_raw, and feature id
// NOTE: we don't use the normalized coordinates to save time here
// NOTE: they will have to be re-normalized in the loop closure code
sensor_msgs::msg::ChannelFloat32 p_2d;
p_2d.values.push_back(0);
p_2d.values.push_back(0);
p_2d.values.push_back(uvd(0));
p_2d.values.push_back(uvd(1));
p_2d.values.push_back(featid);
point_cloud.channels.push_back(p_2d);
}
pub_loop_point->publish(point_cloud);
}
//======================================================
// Depth images of sparse points and its colorized version
if (it_pub_loop_img_depth.getNumSubscribers() != 0 || it_pub_loop_img_depth_color.getNumSubscribers() != 0) {
// Create the images we will populate with the depths
std::pair<int, int> wh_pair = {active_cam0_image.cols, active_cam0_image.rows};
cv::Mat depthmap_viz;
cv::cvtColor(active_cam0_image, depthmap_viz, cv::COLOR_GRAY2RGB);
cv::Mat depthmap = cv::Mat::zeros(wh_pair.second, wh_pair.first, CV_16UC1);
// Loop through all points and append
for (const auto &feattimes : active_tracks_uvd) {
// Get this feature information
size_t featid = feattimes.first;
Eigen::Vector3d uvd = active_tracks_uvd.at(featid);
// Skip invalid points
double dw = 3;
if (uvd(0) < dw || uvd(0) > wh_pair.first - dw || uvd(1) < dw || uvd(1) > wh_pair.second - dw) {
continue;
}
// Append the depth
// NOTE: scaled by 1000 to fit the 16U
// NOTE: access order is y,x (stupid opencv convention stuff)
depthmap.at<uint16_t>((int)uvd(1), (int)uvd(0)) = (uint16_t)(1000 * uvd(2));
// Taken from LSD-SLAM codebase segment into 0-4 meter segments:
// https://github.com/tum-vision/lsd_slam/blob/d1e6f0e1a027889985d2e6b4c0fe7a90b0c75067/lsd_slam_core/src/util/globalFuncs.cpp#L87-L96
float id = 1.0f / (float)uvd(2);
float r = (0.0f - id) * 255 / 1.0f;
if (r < 0)
r = -r;
float g = (1.0f - id) * 255 / 1.0f;
if (g < 0)
g = -g;
float b = (2.0f - id) * 255 / 1.0f;
if (b < 0)
b = -b;
uchar rc = r < 0 ? 0 : (r > 255 ? 255 : r);
uchar gc = g < 0 ? 0 : (g > 255 ? 255 : g);
uchar bc = b < 0 ? 0 : (b > 255 ? 255 : b);
cv::Scalar color(255 - rc, 255 - gc, 255 - bc);
// Small square around the point (note the above bound check needs to take into account this width)
cv::Point p0(uvd(0) - dw, uvd(1) - dw);
cv::Point p1(uvd(0) + dw, uvd(1) + dw);
cv::rectangle(depthmap_viz, p0, p1, color, -1);
}
// Create our messages
header.frame_id = "cam0";
sensor_msgs::msg::Image::SharedPtr exl_msg1 =
cv_bridge::CvImage(header, sensor_msgs::image_encodings::TYPE_16UC1, depthmap).toImageMsg();
it_pub_loop_img_depth.publish(exl_msg1);
header.stamp = _node->now();
header.frame_id = "cam0";
sensor_msgs::msg::Image::SharedPtr exl_msg2 = cv_bridge::CvImage(header, "bgr8", depthmap_viz).toImageMsg();
it_pub_loop_img_depth_color.publish(exl_msg2);
}
}

View File

@@ -0,0 +1,204 @@
/*
* OpenVINS: An Open Platform for Visual-Inertial Research
* Copyright (C) 2018-2022 Patrick Geneva
* Copyright (C) 2018-2022 Guoquan Huang
* Copyright (C) 2018-2022 OpenVINS Contributors
* Copyright (C) 2018-2019 Kevin Eckenhoff
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
#ifndef OV_MSCKF_ROS2VISUALIZER_H
#define OV_MSCKF_ROS2VISUALIZER_H
#include <geometry_msgs/msg/pose_stamped.hpp>
#include <geometry_msgs/msg/pose_with_covariance_stamped.hpp>
#include <geometry_msgs/msg/transform_stamped.hpp>
#include <image_transport/image_transport.h>
#include <message_filters/subscriber.h>
#include <message_filters/sync_policies/approximate_time.h>
#include <message_filters/time_synchronizer.h>
#include <nav_msgs/msg/odometry.hpp>
#include <nav_msgs/msg/path.hpp>
#include <rclcpp/rclcpp.hpp>
#include <sensor_msgs/msg/camera_info.hpp>
#include <sensor_msgs/msg/image.hpp>
#include <sensor_msgs/msg/imu.hpp>
#include <sensor_msgs/msg/nav_sat_fix.hpp>
#include <sensor_msgs/msg/point_cloud.hpp>
#include <sensor_msgs/msg/point_cloud2.hpp>
#include <sensor_msgs/point_cloud2_iterator.hpp>
#include <std_msgs/msg/float64.hpp>
#include <tf2/LinearMath/Quaternion.h>
#include <tf2/transform_datatypes.h>
#include <tf2_geometry_msgs/tf2_geometry_msgs.h>
#include <tf2_ros/transform_broadcaster.h>
#include <boost/filesystem.hpp>
#include <cv_bridge/cv_bridge.h>
#include "core/VioManager.h"
#include "ros/RosVisualizerHelper.h"
#include "sim/Simulator.h"
#include "utils/dataset_reader.h"
#include "utils/print.h"
#include "utils/sensor_data.h"
namespace ov_msckf {
/**
* @brief Helper class that will publish results onto the ROS framework.
*
* Also save to file the current total state and covariance along with the groundtruth if we are simulating.
* We visualize the following things:
* - State of the system on TF, pose message, and path
* - Image of our tracker
* - Our different features (SLAM, MSCKF, ARUCO)
* - Groundtruth trajectory if we have it
*/
class ROS2Visualizer {
public:
/**
* @brief Default constructor
* @param node ROS node pointer
* @param app Core estimator manager
* @param sim Simulator if we are simulating
*/
ROS2Visualizer(std::shared_ptr<rclcpp::Node> node, std::shared_ptr<VioManager> app, std::shared_ptr<Simulator> sim = nullptr);
/**
* @brief Will setup ROS subscribers and callbacks
* @param parser Configuration file parser
*/
void setup_subscribers(std::shared_ptr<ov_core::YamlParser> parser);
/**
* @brief Will visualize the system if we have new things
*/
void visualize();
/**
* @brief Will publish our odometry message for the current timestep.
* This will take the current state estimate and get the propagated pose to the desired time.
* This can be used to get pose estimates on systems which require high frequency pose estimates.
*/
void visualize_odometry(double timestamp);
/**
* @brief After the run has ended, print results
*/
void visualize_final();
/// Callback for inertial information
void callback_inertial(const sensor_msgs::msg::Imu::SharedPtr msg);
/// Callback for monocular cameras information
void callback_monocular(const sensor_msgs::msg::Image::SharedPtr msg0, int cam_id0);
/// Callback for synchronized stereo camera information
void callback_stereo(const sensor_msgs::msg::Image::ConstSharedPtr msg0, const sensor_msgs::msg::Image::ConstSharedPtr msg1, int cam_id0,
int cam_id1);
protected:
/// Publish the current state
void publish_state();
/// Publish the active tracking image
void publish_images();
/// Publish current features
void publish_features();
/// Publish groundtruth (if we have it)
void publish_groundtruth();
/// Publish loop-closure information of current pose and active track information
void publish_loopclosure_information();
/// Global node handler
std::shared_ptr<rclcpp::Node> _node;
/// Core application of the filter system
std::shared_ptr<VioManager> _app;
/// Simulator (is nullptr if we are not sim'ing)
std::shared_ptr<Simulator> _sim;
// Our publishers
image_transport::Publisher it_pub_tracks, it_pub_loop_img_depth, it_pub_loop_img_depth_color;
rclcpp::Publisher<geometry_msgs::msg::PoseWithCovarianceStamped>::SharedPtr pub_poseimu;
rclcpp::Publisher<nav_msgs::msg::Odometry>::SharedPtr pub_odomimu;
rclcpp::Publisher<nav_msgs::msg::Path>::SharedPtr pub_pathimu;
rclcpp::Publisher<sensor_msgs::msg::PointCloud2>::SharedPtr pub_points_msckf, pub_points_slam, pub_points_aruco, pub_points_sim;
rclcpp::Publisher<nav_msgs::msg::Odometry>::SharedPtr pub_loop_pose, pub_loop_extrinsic;
rclcpp::Publisher<sensor_msgs::msg::PointCloud>::SharedPtr pub_loop_point;
rclcpp::Publisher<sensor_msgs::msg::CameraInfo>::SharedPtr pub_loop_intrinsics;
std::shared_ptr<tf2_ros::TransformBroadcaster> mTfBr;
// Our subscribers and camera synchronizers
rclcpp::Subscription<sensor_msgs::msg::Imu>::SharedPtr sub_imu;
std::vector<rclcpp::Subscription<sensor_msgs::msg::Image>::SharedPtr> subs_cam;
typedef message_filters::sync_policies::ApproximateTime<sensor_msgs::msg::Image, sensor_msgs::msg::Image> sync_pol;
std::vector<std::shared_ptr<message_filters::Synchronizer<sync_pol>>> sync_cam;
std::vector<std::shared_ptr<message_filters::Subscriber<sensor_msgs::msg::Image>>> sync_subs_cam;
// For path viz
std::vector<geometry_msgs::msg::PoseStamped> poses_imu;
// Groundtruth infomation
rclcpp::Publisher<nav_msgs::msg::Path>::SharedPtr pub_pathgt;
rclcpp::Publisher<geometry_msgs::msg::PoseStamped>::SharedPtr pub_posegt;
double summed_mse_ori = 0.0;
double summed_mse_pos = 0.0;
double summed_nees_ori = 0.0;
double summed_nees_pos = 0.0;
size_t summed_number = 0;
// Start and end timestamps
bool start_time_set = false;
boost::posix_time::ptime rT1, rT2;
// Thread atomics
std::atomic<bool> thread_update_running;
/// Queue up camera measurements sorted by time and trigger once we have
/// exactly one IMU measurement with timestamp newer than the camera measurement
/// This also handles out-of-order camera measurements, which is rare, but
/// a nice feature to have for general robustness to bad camera drivers.
std::deque<ov_core::CameraData> camera_queue;
std::mutex camera_queue_mtx;
// Last camera message timestamps we have received (mapped by cam id)
std::map<int, double> camera_last_timestamp;
// Last timestamp we visualized at
double last_visualization_timestamp = 0;
// Our groundtruth states
std::map<double, Eigen::Matrix<double, 17, 1>> gt_states;
// For path viz
std::vector<geometry_msgs::msg::PoseStamped> poses_gt;
bool publish_global2imu_tf = true;
bool publish_calibration_tf = true;
// Files and if we should save total state
bool save_total_state;
std::ofstream of_state_est, of_state_std, of_state_gt;
};
} // namespace ov_msckf
#endif // OV_MSCKF_ROS2VISUALIZER_H

View File

@@ -0,0 +1,388 @@
/*
* OpenVINS: An Open Platform for Visual-Inertial Research
* Copyright (C) 2018-2022 Patrick Geneva
* Copyright (C) 2018-2022 Guoquan Huang
* Copyright (C) 2018-2022 OpenVINS Contributors
* Copyright (C) 2018-2019 Kevin Eckenhoff
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
#ifndef OV_MSCKF_ROSVISUALIZER_HELPER_H
#define OV_MSCKF_ROSVISUALIZER_HELPER_H
#if ROS_AVAILABLE == 1
#include <sensor_msgs/PointCloud.h>
#include <sensor_msgs/PointCloud2.h>
#include <sensor_msgs/point_cloud2_iterator.h>
#include <tf/transform_broadcaster.h>
#elif ROS_AVAILABLE == 2
#include <sensor_msgs/msg/point_cloud.hpp>
#include <sensor_msgs/msg/point_cloud2.hpp>
#include <sensor_msgs/point_cloud2_iterator.hpp>
#include <tf2/transform_datatypes.h>
#include <tf2_geometry_msgs/tf2_geometry_msgs.h>
#endif
#include "core/VioManager.h"
#include "sim/Simulator.h"
namespace ov_msckf {
/**
* @brief Helper class that handles some common versions into and out of ROS formats
*/
class RosVisualizerHelper {
public:
#if ROS_AVAILABLE == 1
/**
* @brief Will visualize the system if we have new things
* @param feats Vector of features we will convert into ros format
* @return ROS pointcloud
*/
static sensor_msgs::PointCloud2 get_ros_pointcloud(const std::vector<Eigen::Vector3d> &feats) {
// Declare message and sizes
sensor_msgs::PointCloud2 cloud;
cloud.header.frame_id = "global";
cloud.header.stamp = ros::Time::now();
cloud.width = 3 * feats.size();
cloud.height = 1;
cloud.is_bigendian = false;
cloud.is_dense = false; // there may be invalid points
// Setup pointcloud fields
sensor_msgs::PointCloud2Modifier modifier(cloud);
modifier.setPointCloud2FieldsByString(1, "xyz");
modifier.resize(3 * feats.size());
// Iterators
sensor_msgs::PointCloud2Iterator<float> out_x(cloud, "x");
sensor_msgs::PointCloud2Iterator<float> out_y(cloud, "y");
sensor_msgs::PointCloud2Iterator<float> out_z(cloud, "z");
// Fill our iterators
for (const auto &pt : feats) {
*out_x = (float)pt(0);
++out_x;
*out_y = (float)pt(1);
++out_y;
*out_z = (float)pt(2);
++out_z;
}
return cloud;
}
/**
* @brief Given a ov_type::PoseJPL this will convert into the ros format.
*
* NOTE: frame ids need to be handled externally!
*
* @param pose Pose with JPL quaternion (e.g. q_GtoI, p_IinG)
* @param flip_trans If we should flip / inverse the translation
* @return TF of our pose in global (e.g. q_ItoG, p_IinG)
*/
static tf::StampedTransform get_stamped_transform_from_pose(const std::shared_ptr<ov_type::PoseJPL> &pose, bool flip_trans) {
// Need to flip the transform to the IMU frame
Eigen::Vector4d q_ItoC = pose->quat();
Eigen::Vector3d p_CinI = pose->pos();
if (flip_trans) {
p_CinI = -pose->Rot().transpose() * pose->pos();
}
// publish our transform on TF
// NOTE: since we use JPL we have an implicit conversion to Hamilton when we publish
// NOTE: a rotation from ItoC in JPL has the same xyzw as a CtoI Hamilton rotation
tf::StampedTransform trans;
trans.stamp_ = ros::Time::now();
tf::Quaternion quat(q_ItoC(0), q_ItoC(1), q_ItoC(2), q_ItoC(3));
trans.setRotation(quat);
tf::Vector3 orig(p_CinI(0), p_CinI(1), p_CinI(2));
trans.setOrigin(orig);
return trans;
}
#endif
#if ROS_AVAILABLE == 2
/**
* @brief Will visualize the system if we have new things
* @param node ROS2 node pointer
* @param feats Vector of features we will convert into ros format
* @return ROS pointcloud
*/
static sensor_msgs::msg::PointCloud2 get_ros_pointcloud(std::shared_ptr<rclcpp::Node> node, const std::vector<Eigen::Vector3d> &feats) {
// Declare message and sizes
sensor_msgs::msg::PointCloud2 cloud;
cloud.header.frame_id = "global";
cloud.header.stamp = node->now();
cloud.width = 3 * feats.size();
cloud.height = 1;
cloud.is_bigendian = false;
cloud.is_dense = false; // there may be invalid points
// Setup pointcloud fields
sensor_msgs::PointCloud2Modifier modifier(cloud);
modifier.setPointCloud2FieldsByString(1, "xyz");
modifier.resize(3 * feats.size());
// Iterators
sensor_msgs::PointCloud2Iterator<float> out_x(cloud, "x");
sensor_msgs::PointCloud2Iterator<float> out_y(cloud, "y");
sensor_msgs::PointCloud2Iterator<float> out_z(cloud, "z");
// Fill our iterators
for (const auto &pt : feats) {
*out_x = (float)pt(0);
++out_x;
*out_y = (float)pt(1);
++out_y;
*out_z = (float)pt(2);
++out_z;
}
return cloud;
}
/**
* @brief Given a ov_type::PoseJPL this will convert into the ros format.
*
* NOTE: frame ids need to be handled externally!
*
* @param node ROS2 node pointer
* @param pose Pose with JPL quaternion (e.g. q_GtoI, p_IinG)
* @param flip_trans If we should flip / inverse the translation
* @return TF of our pose in global (e.g. q_ItoG, p_IinG)
*/
static geometry_msgs::msg::TransformStamped
get_stamped_transform_from_pose(std::shared_ptr<rclcpp::Node> node, const std::shared_ptr<ov_type::PoseJPL> &pose, bool flip_trans) {
// Need to flip the transform to the IMU frame
Eigen::Vector4d q_ItoC = pose->quat();
Eigen::Vector3d p_CinI = pose->pos();
if (flip_trans) {
p_CinI = -pose->Rot().transpose() * pose->pos();
}
// publish our transform on TF
// NOTE: since we use JPL we have an implicit conversion to Hamilton when we publish
// NOTE: a rotation from ItoC in JPL has the same xyzw as a CtoI Hamilton rotation
geometry_msgs::msg::TransformStamped trans;
trans.header.stamp = node->now();
trans.transform.rotation.x = q_ItoC(0);
trans.transform.rotation.y = q_ItoC(1);
trans.transform.rotation.z = q_ItoC(2);
trans.transform.rotation.w = q_ItoC(3);
trans.transform.translation.x = p_CinI(0);
trans.transform.translation.y = p_CinI(1);
trans.transform.translation.z = p_CinI(2);
return trans;
}
/**
* @brief Helper function that converts time in seconds to our rclcpp timestamp
* @param seconds Time in seconds
* @return Return ROS2 header timestamp
*/
static rclcpp::Time get_time_from_seconds(double seconds) {
// ROS2 time class has no double constructor
// Extract compatible time from timestamp using ros1 implementation for now
uint32_t sec, nsec;
int64_t sec64 = static_cast<int64_t>(floor(seconds));
if (sec64 < 0 || sec64 > std::numeric_limits<uint32_t>::max())
throw std::runtime_error("Time is out of dual 32-bit range");
sec = static_cast<uint32_t>(sec64);
nsec = static_cast<uint32_t>(std::round((seconds - sec) * 1e9));
// Avoid rounding errors
sec += (nsec / 1000000000ul);
nsec %= 1000000000ul;
return rclcpp::Time(sec, nsec);
}
#endif
/**
* @brief Save current estimate state and groundtruth including calibration
* @param state Pointer to the state
* @param sim Pointer to the simulator (or null)
* @param of_state_est Output file for state estimate
* @param of_state_std Output file for covariance
* @param of_state_gt Output file for groundtruth (if we have it from sim)
*/
static void sim_save_total_state_to_file(std::shared_ptr<State> state, std::shared_ptr<Simulator> sim, std::ofstream &of_state_est,
std::ofstream &of_state_std, std::ofstream &of_state_gt) {
// We want to publish in the IMU clock frame
// The timestamp in the state will be the last camera time
double t_ItoC = state->_calib_dt_CAMtoIMU->value()(0);
double timestamp_inI = state->_timestamp + t_ItoC;
// If we have our simulator, then save it to our groundtruth file
if (sim != nullptr) {
// Note that we get the true time in the IMU clock frame
// NOTE: we record both the estimate and groundtruth with the same "true" timestamp if we are doing simulation
Eigen::Matrix<double, 17, 1> state_gt;
timestamp_inI = state->_timestamp + sim->get_true_parameters().calib_camimu_dt;
if (sim->get_state(timestamp_inI, state_gt)) {
// STATE: write current true state
of_state_gt.precision(5);
of_state_gt.setf(std::ios::fixed, std::ios::floatfield);
of_state_gt << state_gt(0) << " ";
of_state_gt.precision(6);
of_state_gt << state_gt(1) << " " << state_gt(2) << " " << state_gt(3) << " " << state_gt(4) << " ";
of_state_gt << state_gt(5) << " " << state_gt(6) << " " << state_gt(7) << " ";
of_state_gt << state_gt(8) << " " << state_gt(9) << " " << state_gt(10) << " ";
of_state_gt << state_gt(11) << " " << state_gt(12) << " " << state_gt(13) << " ";
of_state_gt << state_gt(14) << " " << state_gt(15) << " " << state_gt(16) << " ";
// TIMEOFF: Get the current true time offset
of_state_gt.precision(7);
of_state_gt << sim->get_true_parameters().calib_camimu_dt << " ";
of_state_gt.precision(0);
of_state_gt << state->_options.num_cameras << " ";
of_state_gt.precision(6);
// CALIBRATION: Write the camera values to file
assert(state->_options.num_cameras == sim->get_true_parameters().state_options.num_cameras);
for (int i = 0; i < state->_options.num_cameras; i++) {
// Intrinsics values
of_state_gt << sim->get_true_parameters().camera_intrinsics.at(i)->get_value()(0) << " "
<< sim->get_true_parameters().camera_intrinsics.at(i)->get_value()(1) << " "
<< sim->get_true_parameters().camera_intrinsics.at(i)->get_value()(2) << " "
<< sim->get_true_parameters().camera_intrinsics.at(i)->get_value()(3) << " ";
of_state_gt << sim->get_true_parameters().camera_intrinsics.at(i)->get_value()(4) << " "
<< sim->get_true_parameters().camera_intrinsics.at(i)->get_value()(5) << " "
<< sim->get_true_parameters().camera_intrinsics.at(i)->get_value()(6) << " "
<< sim->get_true_parameters().camera_intrinsics.at(i)->get_value()(7) << " ";
// Rotation and position
of_state_gt << sim->get_true_parameters().camera_extrinsics.at(i)(0) << " "
<< sim->get_true_parameters().camera_extrinsics.at(i)(1) << " "
<< sim->get_true_parameters().camera_extrinsics.at(i)(2) << " "
<< sim->get_true_parameters().camera_extrinsics.at(i)(3) << " ";
of_state_gt << sim->get_true_parameters().camera_extrinsics.at(i)(4) << " "
<< sim->get_true_parameters().camera_extrinsics.at(i)(5) << " "
<< sim->get_true_parameters().camera_extrinsics.at(i)(6) << " ";
}
// New line
of_state_gt << endl;
}
}
//==========================================================================
//==========================================================================
//==========================================================================
// Get the covariance of the whole system
Eigen::MatrixXd cov = StateHelper::get_full_covariance(state);
// STATE: Write the current state to file
of_state_est.precision(5);
of_state_est.setf(std::ios::fixed, std::ios::floatfield);
of_state_est << timestamp_inI << " ";
of_state_est.precision(6);
of_state_est << state->_imu->quat()(0) << " " << state->_imu->quat()(1) << " " << state->_imu->quat()(2) << " "
<< state->_imu->quat()(3) << " ";
of_state_est << state->_imu->pos()(0) << " " << state->_imu->pos()(1) << " " << state->_imu->pos()(2) << " ";
of_state_est << state->_imu->vel()(0) << " " << state->_imu->vel()(1) << " " << state->_imu->vel()(2) << " ";
of_state_est << state->_imu->bias_g()(0) << " " << state->_imu->bias_g()(1) << " " << state->_imu->bias_g()(2) << " ";
of_state_est << state->_imu->bias_a()(0) << " " << state->_imu->bias_a()(1) << " " << state->_imu->bias_a()(2) << " ";
// STATE: Write current uncertainty to file
of_state_std.precision(5);
of_state_std.setf(std::ios::fixed, std::ios::floatfield);
of_state_std << timestamp_inI << " ";
of_state_std.precision(6);
int id = state->_imu->q()->id();
of_state_std << std::sqrt(cov(id + 0, id + 0)) << " " << std::sqrt(cov(id + 1, id + 1)) << " " << std::sqrt(cov(id + 2, id + 2)) << " ";
id = state->_imu->p()->id();
of_state_std << std::sqrt(cov(id + 0, id + 0)) << " " << std::sqrt(cov(id + 1, id + 1)) << " " << std::sqrt(cov(id + 2, id + 2)) << " ";
id = state->_imu->v()->id();
of_state_std << std::sqrt(cov(id + 0, id + 0)) << " " << std::sqrt(cov(id + 1, id + 1)) << " " << std::sqrt(cov(id + 2, id + 2)) << " ";
id = state->_imu->bg()->id();
of_state_std << std::sqrt(cov(id + 0, id + 0)) << " " << std::sqrt(cov(id + 1, id + 1)) << " " << std::sqrt(cov(id + 2, id + 2)) << " ";
id = state->_imu->ba()->id();
of_state_std << std::sqrt(cov(id + 0, id + 0)) << " " << std::sqrt(cov(id + 1, id + 1)) << " " << std::sqrt(cov(id + 2, id + 2)) << " ";
// TIMEOFF: Get the current estimate time offset
of_state_est.precision(7);
of_state_est << state->_calib_dt_CAMtoIMU->value()(0) << " ";
of_state_est.precision(0);
of_state_est << state->_options.num_cameras << " ";
of_state_est.precision(6);
// TIMEOFF: Get the current std values
if (state->_options.do_calib_camera_timeoffset) {
of_state_std << std::sqrt(cov(state->_calib_dt_CAMtoIMU->id(), state->_calib_dt_CAMtoIMU->id())) << " ";
} else {
of_state_std << 0.0 << " ";
}
of_state_std.precision(0);
of_state_std << state->_options.num_cameras << " ";
of_state_std.precision(6);
// CALIBRATION: Write the camera values to file
for (int i = 0; i < state->_options.num_cameras; i++) {
// Intrinsics values
of_state_est << state->_cam_intrinsics.at(i)->value()(0) << " " << state->_cam_intrinsics.at(i)->value()(1) << " "
<< state->_cam_intrinsics.at(i)->value()(2) << " " << state->_cam_intrinsics.at(i)->value()(3) << " ";
of_state_est << state->_cam_intrinsics.at(i)->value()(4) << " " << state->_cam_intrinsics.at(i)->value()(5) << " "
<< state->_cam_intrinsics.at(i)->value()(6) << " " << state->_cam_intrinsics.at(i)->value()(7) << " ";
// Rotation and position
of_state_est << state->_calib_IMUtoCAM.at(i)->value()(0) << " " << state->_calib_IMUtoCAM.at(i)->value()(1) << " "
<< state->_calib_IMUtoCAM.at(i)->value()(2) << " " << state->_calib_IMUtoCAM.at(i)->value()(3) << " ";
of_state_est << state->_calib_IMUtoCAM.at(i)->value()(4) << " " << state->_calib_IMUtoCAM.at(i)->value()(5) << " "
<< state->_calib_IMUtoCAM.at(i)->value()(6) << " ";
// Covariance
if (state->_options.do_calib_camera_intrinsics) {
int index_in = state->_cam_intrinsics.at(i)->id();
of_state_std << std::sqrt(cov(index_in + 0, index_in + 0)) << " " << std::sqrt(cov(index_in + 1, index_in + 1)) << " "
<< std::sqrt(cov(index_in + 2, index_in + 2)) << " " << std::sqrt(cov(index_in + 3, index_in + 3)) << " ";
of_state_std << std::sqrt(cov(index_in + 4, index_in + 4)) << " " << std::sqrt(cov(index_in + 5, index_in + 5)) << " "
<< std::sqrt(cov(index_in + 6, index_in + 6)) << " " << std::sqrt(cov(index_in + 7, index_in + 7)) << " ";
} else {
of_state_std << 0.0 << " " << 0.0 << " " << 0.0 << " " << 0.0 << " ";
of_state_std << 0.0 << " " << 0.0 << " " << 0.0 << " " << 0.0 << " ";
}
if (state->_options.do_calib_camera_pose) {
int index_ex = state->_calib_IMUtoCAM.at(i)->id();
of_state_std << std::sqrt(cov(index_ex + 0, index_ex + 0)) << " " << std::sqrt(cov(index_ex + 1, index_ex + 1)) << " "
<< std::sqrt(cov(index_ex + 2, index_ex + 2)) << " ";
of_state_std << std::sqrt(cov(index_ex + 3, index_ex + 3)) << " " << std::sqrt(cov(index_ex + 4, index_ex + 4)) << " "
<< std::sqrt(cov(index_ex + 5, index_ex + 5)) << " ";
} else {
of_state_std << 0.0 << " " << 0.0 << " " << 0.0 << " ";
of_state_std << 0.0 << " " << 0.0 << " " << 0.0 << " ";
}
}
// Done with the estimates!
of_state_est << endl;
of_state_std << endl;
}
private:
// Cannot create this class
RosVisualizerHelper() = default;
};
} // namespace ov_msckf
#endif // OV_MSCKF_ROSVISUALIZER_HELPER_H

View File

@@ -0,0 +1,353 @@
/*
* OpenVINS: An Open Platform for Visual-Inertial Research
* Copyright (C) 2018-2022 Patrick Geneva
* Copyright (C) 2018-2022 Guoquan Huang
* Copyright (C) 2018-2022 OpenVINS Contributors
* Copyright (C) 2018-2019 Kevin Eckenhoff
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
#include <ros/ros.h>
#include <rosbag/bag.h>
#include <rosbag/view.h>
#include <sensor_msgs/Image.h>
#include <sensor_msgs/Imu.h>
#include <memory>
#include "core/VioManager.h"
#include "core/VioManagerOptions.h"
#include "ros/ROS1Visualizer.h"
#include "utils/dataset_reader.h"
using namespace ov_msckf;
std::shared_ptr<VioManager> sys;
std::shared_ptr<ROS1Visualizer> viz;
// Main function
int main(int argc, char **argv) {
// Ensure we have a path, if the user passes it then we should use it
std::string config_path = "unset_path_to_config.yaml";
if (argc > 1) {
config_path = argv[1];
}
// Launch our ros node
ros::init(argc, argv, "ros1_serial_msckf");
auto nh = std::make_shared<ros::NodeHandle>("~");
nh->param<std::string>("config_path", config_path, config_path);
// Load the config
auto parser = std::make_shared<ov_core::YamlParser>(config_path);
parser->set_node_handler(nh);
// Verbosity
std::string verbosity = "INFO";
parser->parse_config("verbosity", verbosity);
ov_core::Printer::setPrintLevel(verbosity);
// Create our VIO system
VioManagerOptions params;
params.print_and_load(parser);
params.use_multi_threading = false;
sys = std::make_shared<VioManager>(params);
viz = std::make_shared<ROS1Visualizer>(nh, sys);
// Ensure we read in all parameters required
if (!parser->successful()) {
PRINT_ERROR(RED "unable to parse all parameters, please fix\n" RESET);
std::exit(EXIT_FAILURE);
}
//===================================================================================
//===================================================================================
//===================================================================================
// Our imu topic
std::string topic_imu;
nh->param<std::string>("topic_imu", topic_imu, "/imu0");
parser->parse_external("relative_config_imu", "imu0", "rostopic", topic_imu);
// Our camera topics (stereo pairs and non-stereo mono)
std::vector<std::pair<size_t, std::string>> topic_cameras;
if (params.state_options.num_cameras == 2) {
// Read in the topics
std::string cam_topic0, cam_topic1;
nh->param<std::string>("topic_camera" + std::to_string(0), cam_topic0, "/cam" + std::to_string(0) + "/image_raw");
nh->param<std::string>("topic_camera" + std::to_string(1), cam_topic1, "/cam" + std::to_string(1) + "/image_raw");
parser->parse_external("relative_config_imucam", "cam" + std::to_string(0), "rostopic", cam_topic0);
parser->parse_external("relative_config_imucam", "cam" + std::to_string(1), "rostopic", cam_topic1);
topic_cameras.emplace_back(0, cam_topic0);
topic_cameras.emplace_back(1, cam_topic1);
PRINT_DEBUG("serial cam (stereo): %s\n", cam_topic0.c_str());
PRINT_DEBUG("serial cam (stereo): %s\n", cam_topic1.c_str());
} else {
for (int i = 0; i < params.state_options.num_cameras; i++) {
// read in the topic
std::string cam_topic;
nh->param<std::string>("topic_camera" + std::to_string(i), cam_topic, "/cam" + std::to_string(i) + "/image_raw");
parser->parse_external("relative_config_imucam", "cam" + std::to_string(i), "rostopic", cam_topic);
topic_cameras.emplace_back(i, cam_topic);
PRINT_DEBUG("serial cam (mono): %s\n", cam_topic.c_str());
}
}
// Location of the ROS bag we want to read in
std::string path_to_bag;
nh->param<std::string>("path_bag", path_to_bag, "/home/patrick/datasets/eth/V1_01_easy.bag");
PRINT_DEBUG("ros bag path is: %s\n", path_to_bag.c_str());
// Load groundtruth if we have it
std::map<double, Eigen::Matrix<double, 17, 1>> gt_states;
if (nh->hasParam("path_gt")) {
std::string path_to_gt;
nh->param<std::string>("path_gt", path_to_gt, "");
if (!path_to_gt.empty()) {
ov_core::DatasetReader::load_gt_file(path_to_gt, gt_states);
PRINT_DEBUG("gt file path is: %s\n", path_to_gt.c_str());
}
}
// Get our start location and how much of the bag we want to play
// Make the bag duration < 0 to just process to the end of the bag
double bag_start, bag_durr;
nh->param<double>("bag_start", bag_start, 0);
nh->param<double>("bag_durr", bag_durr, -1);
PRINT_DEBUG("bag start: %.1f\n", bag_start);
PRINT_DEBUG("bag duration: %.1f\n", bag_durr);
//===================================================================================
//===================================================================================
//===================================================================================
// Load rosbag here, and find messages we can play
rosbag::Bag bag;
bag.open(path_to_bag, rosbag::bagmode::Read);
// We should load the bag as a view
// Here we go from beginning of the bag to the end of the bag
rosbag::View view_full;
rosbag::View view;
// Start a few seconds in from the full view time
// If we have a negative duration then use the full bag length
view_full.addQuery(bag);
ros::Time time_init = view_full.getBeginTime();
time_init += ros::Duration(bag_start);
ros::Time time_finish = (bag_durr < 0) ? view_full.getEndTime() : time_init + ros::Duration(bag_durr);
PRINT_DEBUG("time start = %.6f\n", time_init.toSec());
PRINT_DEBUG("time end = %.6f\n", time_finish.toSec());
view.addQuery(bag, time_init, time_finish);
// Check to make sure we have data to play
if (view.size() == 0) {
PRINT_ERROR(RED "No messages to play on specified topics. Exiting.\n" RESET);
ros::shutdown();
return EXIT_FAILURE;
}
// Open our iterators
auto view_imu = std::make_shared<rosbag::View>(bag, rosbag::TopicQuery(topic_imu), time_init, time_finish);
auto view_imu_iter = view_imu->begin();
std::vector<std::shared_ptr<rosbag::View>> view_cameras;
std::vector<rosbag::View::iterator> view_cameras_iterators;
for (const auto &topic : topic_cameras) {
auto view_tmp = std::make_shared<rosbag::View>(bag, rosbag::TopicQuery(topic.second), time_init, time_finish);
view_cameras.push_back(view_tmp);
view_cameras_iterators.push_back(view_tmp->begin());
}
// Record the current measurement timestamps
sensor_msgs::Imu::ConstPtr msg_imu_current;
sensor_msgs::Imu::ConstPtr msg_imu_next;
std::vector<sensor_msgs::Image::ConstPtr> msg_images_current;
std::vector<sensor_msgs::Image::ConstPtr> msg_images_next;
msg_imu_current = view_imu_iter->instantiate<sensor_msgs::Imu>();
view_imu_iter++;
msg_imu_next = view_imu_iter->instantiate<sensor_msgs::Imu>();
for (int i = 0; i < params.state_options.num_cameras; i++) {
msg_images_current.emplace_back(view_cameras_iterators.at(i)->instantiate<sensor_msgs::Image>());
view_cameras_iterators.at(i)++;
msg_images_next.emplace_back(view_cameras_iterators.at(i)->instantiate<sensor_msgs::Image>());
}
// Last camera message timestamps we have received (mapped by cam id)
std::map<int, double> camera_last_timestamp;
//===================================================================================
//===================================================================================
//===================================================================================
while (ros::ok()) {
// Check if we should end since we have run out of measurements
bool should_stop = false;
if (view_imu_iter == view_imu->end()) {
should_stop = true;
}
for (int i = 0; i < params.state_options.num_cameras; i++) {
if (view_cameras_iterators.at(i) == view_cameras.at(i)->end()) {
should_stop = true;
}
}
if (should_stop) {
break;
}
// We should process the IMU if all the current cameras are greater then its time
bool should_process_imu = false;
for (int i = 0; i < params.state_options.num_cameras; i++) {
double time_imu = msg_imu_current->header.stamp.toSec();
double time_cam = msg_images_current.at(i)->header.stamp.toSec();
if (time_imu <= time_cam) {
should_process_imu = true;
}
}
if (should_process_imu) {
viz->callback_inertial(msg_imu_current);
msg_imu_current = msg_imu_next;
view_imu_iter++;
msg_imu_next = view_imu_iter->instantiate<sensor_msgs::Imu>();
continue;
}
// If we are stereo, then we should collect both the left and right
if (params.state_options.num_cameras == 2) {
// Now lets do some logic to find two images which are next to each other
// We want to ensure that our stereo pair are very close to occurring at the same time
bool have_found_pair = false;
while (!have_found_pair && view_cameras_iterators.at(0) != view_cameras.at(0)->end() &&
view_cameras_iterators.at(1) != view_cameras.at(1)->end()) {
// Get left and right cameras
auto msg_cam0 = msg_images_current.at(0);
auto msg_cam1 = msg_images_current.at(1);
auto msg_cam0_next = msg_images_next.at(0);
auto msg_cam1_next = msg_images_next.at(1);
// timestamps
double time0 = msg_cam0->header.stamp.toSec();
double time1 = msg_cam1->header.stamp.toSec();
double time0_next = msg_cam0_next->header.stamp.toSec();
double time1_next = msg_cam1_next->header.stamp.toSec();
// We will have a match if the current left and right images are closer then the next one
// Consider the case that we drop an image:
// (L1) L2 (R2) R3 <- current pointers are at L1 and R2
// In this case, we dropped the R1 image, thus we should skip the L1 image
// We can check to see that L1 is further away compared to L2 from R2
// Thus we should skip the L1 frame (advance the bag forward) and check this logic again!
if (std::abs(time1 - time0) < std::abs(time1_next - time0) && std::abs(time0 - time1) < std::abs(time0_next - time1)) {
have_found_pair = true;
} else if (std::abs(time1 - time0) >= std::abs(time1_next - time0)) {
// PRINT_WARNING("skipping cam1 (%.4f >= %.4f)",std::abs(time1-time0), std::abs(time1_next-time0));
msg_images_current.at(1) = msg_images_next.at(1);
view_cameras_iterators.at(1)++;
if (view_cameras_iterators.at(1) != view_cameras.at(1)->end()) {
msg_images_next.at(1) = view_cameras_iterators.at(1)->instantiate<sensor_msgs::Image>();
}
} else {
// PRINT_WARNING("skipping cam0 (%.4f >= %.4f)",std::abs(time0-time1), std::abs(time0_next-time1));
msg_images_current.at(0) = msg_images_next.at(0);
view_cameras_iterators.at(0)++;
if (view_cameras_iterators.at(0) != view_cameras.at(0)->end()) {
msg_images_next.at(0) = view_cameras_iterators.at(0)->instantiate<sensor_msgs::Image>();
}
}
}
// Break out if we have ended
if (view_cameras_iterators.at(0) == view_cameras.at(0)->end() || view_cameras_iterators.at(1) == view_cameras.at(1)->end()) {
break;
}
// Check if we should initialize using the groundtruth (always use left)
Eigen::Matrix<double, 17, 1> imustate;
if (!gt_states.empty() && !sys->initialized() &&
ov_core::DatasetReader::get_gt_state(msg_images_current.at(0)->header.stamp.toSec(), imustate, gt_states)) {
// biases are pretty bad normally, so zero them
// imustate.block(11,0,6,1).setZero();
sys->initialize_with_gt(imustate);
}
// Check if we should feed this into the system at the specified frequency
double timestamp = msg_images_current.at(0)->header.stamp.toSec();
double time_delta = 1.0 / params.track_frequency;
if (camera_last_timestamp.find(0) == camera_last_timestamp.end() || timestamp >= camera_last_timestamp.at(0) + time_delta) {
camera_last_timestamp[0] = timestamp;
viz->callback_stereo(msg_images_current.at(0), msg_images_current.at(1), 0, 1);
}
// Move forward in time
msg_images_current.at(0) = msg_images_next.at(0);
view_cameras_iterators.at(0)++;
if (view_cameras_iterators.at(0) != view_cameras.at(0)->end()) {
msg_images_next.at(0) = view_cameras_iterators.at(0)->instantiate<sensor_msgs::Image>();
}
msg_images_current.at(1) = msg_images_next.at(1);
view_cameras_iterators.at(1)++;
if (view_cameras_iterators.at(1) != view_cameras.at(1)->end()) {
msg_images_next.at(1) = view_cameras_iterators.at(1)->instantiate<sensor_msgs::Image>();
}
} else {
// Find the camera which should be processed (smallest time)
int smallest_cam = 0;
for (int i = 0; i < params.state_options.num_cameras; i++) {
double time_cam0 = msg_images_current.at(smallest_cam)->header.stamp.toSec();
double time_cam1 = msg_images_current.at(i)->header.stamp.toSec();
if (time_cam1 < time_cam0) {
smallest_cam = i;
}
}
// Check if we should initialize using the groundtruth
auto msg_camera = msg_images_current.at(smallest_cam);
Eigen::Matrix<double, 17, 1> imustate;
if (!gt_states.empty() && !sys->initialized() &&
ov_core::DatasetReader::get_gt_state(msg_camera->header.stamp.toSec(), imustate, gt_states)) {
// biases are pretty bad normally, so zero them
// imustate.block(11,0,6,1).setZero();
sys->initialize_with_gt(imustate);
}
// Check if we should feed this into the system at the specified frequency
double timestamp = msg_camera->header.stamp.toSec();
double time_delta = 1.0 / params.track_frequency;
if (camera_last_timestamp.find(smallest_cam) == camera_last_timestamp.end() ||
timestamp >= camera_last_timestamp.at(smallest_cam) + time_delta) {
camera_last_timestamp[smallest_cam] = timestamp;
viz->callback_monocular(msg_camera, smallest_cam);
}
// move forward
msg_images_current.at(smallest_cam) = msg_images_next.at(smallest_cam);
view_cameras_iterators.at(smallest_cam)++;
if (view_cameras_iterators.at(smallest_cam) != view_cameras.at(smallest_cam)->end()) {
msg_images_next.at(smallest_cam) = view_cameras_iterators.at(smallest_cam)->instantiate<sensor_msgs::Image>();
}
}
}
// Final visualization
viz->visualize_final();
// Done!
return EXIT_SUCCESS;
}

View File

@@ -0,0 +1,191 @@
/*
* OpenVINS: An Open Platform for Visual-Inertial Research
* Copyright (C) 2018-2022 Patrick Geneva
* Copyright (C) 2018-2022 Guoquan Huang
* Copyright (C) 2018-2022 OpenVINS Contributors
* Copyright (C) 2018-2019 Kevin Eckenhoff
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
#include <csignal>
#include <memory>
#include "core/VioManager.h"
#include "sim/Simulator.h"
#include "utils/colors.h"
#include "utils/dataset_reader.h"
#include "utils/print.h"
#include "utils/sensor_data.h"
#if ROS_AVAILABLE == 1
#include "ros/ROS1Visualizer.h"
#include <ros/ros.h>
#elif ROS_AVAILABLE == 2
#include "ros/ROS2Visualizer.h"
#include <rclcpp/rclcpp.hpp>
#endif
using namespace ov_msckf;
std::shared_ptr<Simulator> sim;
std::shared_ptr<VioManager> sys;
#if ROS_AVAILABLE == 1
std::shared_ptr<ROS1Visualizer> viz;
#elif ROS_AVAILABLE == 2
std::shared_ptr<ROS2Visualizer> viz;
#endif
// Define the function to be called when ctrl-c (SIGINT) is sent to process
void signal_callback_handler(int signum) { std::exit(signum); }
// Main function
int main(int argc, char **argv) {
// Ensure we have a path, if the user passes it then we should use it
std::string config_path = "unset_path_to_config.yaml";
if (argc > 1) {
config_path = argv[1];
}
#if ROS_AVAILABLE == 1
// Launch our ros node
ros::init(argc, argv, "run_simulation");
auto nh = std::make_shared<ros::NodeHandle>("~");
nh->param<std::string>("config_path", config_path, config_path);
#elif ROS_AVAILABLE == 2
// Launch our ros node
rclcpp::init(argc, argv);
rclcpp::NodeOptions options;
options.allow_undeclared_parameters(true);
options.automatically_declare_parameters_from_overrides(true);
auto node = std::make_shared<rclcpp::Node>("run_simulation", options);
node->get_parameter<std::string>("config_path", config_path);
#endif
// Load the config
auto parser = std::make_shared<ov_core::YamlParser>(config_path);
#if ROS_AVAILABLE == 1
parser->set_node_handler(nh);
#elif ROS_AVAILABLE == 2
parser->set_node(node);
#endif
// Verbosity
std::string verbosity = "INFO";
parser->parse_config("verbosity", verbosity);
ov_core::Printer::setPrintLevel(verbosity);
// Create our VIO system
VioManagerOptions params;
params.print_and_load(parser);
params.print_and_load_simulation(parser);
params.use_multi_threading = false;
sim = std::make_shared<Simulator>(params);
sys = std::make_shared<VioManager>(params);
#if ROS_AVAILABLE == 1
viz = std::make_shared<ROS1Visualizer>(nh, sys, sim);
#elif ROS_AVAILABLE == 2
viz = std::make_shared<ROS2Visualizer>(node, sys, sim);
#endif
// Ensure we read in all parameters required
if (!parser->successful()) {
PRINT_ERROR(RED "unable to parse all parameters, please fix\n" RESET);
std::exit(EXIT_FAILURE);
}
//===================================================================================
//===================================================================================
//===================================================================================
// Get initial state
Eigen::Matrix<double, 17, 1> imustate;
bool success = sim->get_state(sim->current_timestamp(), imustate);
if (!success) {
PRINT_ERROR(RED "[SIM]: Could not initialize the filter to the first state\n" RESET);
PRINT_ERROR(RED "[SIM]: Did the simulator load properly???\n" RESET);
std::exit(EXIT_FAILURE);
}
// Since the state time is in the camera frame of reference
// Subtract out the imu to camera time offset
imustate(0, 0) -= sim->get_true_parameters().calib_camimu_dt;
// Initialize our filter with the groundtruth
sys->initialize_with_gt(imustate);
//===================================================================================
//===================================================================================
//===================================================================================
// Buffer our camera image
double buffer_timecam = -1;
std::vector<int> buffer_camids;
std::vector<std::vector<std::pair<size_t, Eigen::VectorXf>>> buffer_feats;
// Step through the rosbag
#if ROS_AVAILABLE == 1
while (sim->ok() && ros::ok()) {
#elif ROS_AVAILABLE == 2
while (sim->ok() && rclcpp::ok()) {
#else
signal(SIGINT, signal_callback_handler);
while (sim->ok()) {
#endif
// IMU: get the next simulated IMU measurement if we have it
ov_core::ImuData message_imu;
bool hasimu = sim->get_next_imu(message_imu.timestamp, message_imu.wm, message_imu.am);
if (hasimu) {
sys->feed_measurement_imu(message_imu);
#if ROS_AVAILABLE == 1 || ROS_AVAILABLE == 2
// TODO: fix this, can be slow at high frequency...
// viz->visualize_odometry(message_imu.timestamp - sim->get_true_parameters().calib_camimu_dt);
#endif
}
// CAM: get the next simulated camera uv measurements if we have them
double time_cam;
std::vector<int> camids;
std::vector<std::vector<std::pair<size_t, Eigen::VectorXf>>> feats;
bool hascam = sim->get_next_cam(time_cam, camids, feats);
if (hascam) {
if (buffer_timecam != -1) {
#if ROS_AVAILABLE == 1 || ROS_AVAILABLE == 2
viz->visualize_odometry(buffer_timecam);
#endif
sys->feed_measurement_simulation(buffer_timecam, buffer_camids, buffer_feats);
#if ROS_AVAILABLE == 1 || ROS_AVAILABLE == 2
viz->visualize();
#endif
}
buffer_timecam = time_cam;
buffer_camids = camids;
buffer_feats = feats;
}
}
// Final visualization
#if ROS_AVAILABLE == 1
viz->visualize_final();
ros::shutdown();
#elif ROS_AVAILABLE == 2
viz->visualize_final();
rclcpp::shutdown();
#endif
// Done!
return EXIT_SUCCESS;
}

View File

@@ -0,0 +1,124 @@
/*
* OpenVINS: An Open Platform for Visual-Inertial Research
* Copyright (C) 2018-2022 Patrick Geneva
* Copyright (C) 2018-2022 Guoquan Huang
* Copyright (C) 2018-2022 OpenVINS Contributors
* Copyright (C) 2018-2019 Kevin Eckenhoff
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
#include <memory>
#include "core/VioManager.h"
#include "core/VioManagerOptions.h"
#include "utils/dataset_reader.h"
#if ROS_AVAILABLE == 1
#include "ros/ROS1Visualizer.h"
#include <ros/ros.h>
#elif ROS_AVAILABLE == 2
#include "ros/ROS2Visualizer.h"
#include <rclcpp/rclcpp.hpp>
#endif
using namespace ov_msckf;
std::shared_ptr<VioManager> sys;
#if ROS_AVAILABLE == 1
std::shared_ptr<ROS1Visualizer> viz;
#elif ROS_AVAILABLE == 2
std::shared_ptr<ROS2Visualizer> viz;
#endif
// Main function
int main(int argc, char **argv) {
// Ensure we have a path, if the user passes it then we should use it
std::string config_path = "unset_path_to_config.yaml";
if (argc > 1) {
config_path = argv[1];
}
#if ROS_AVAILABLE == 1
// Launch our ros node
ros::init(argc, argv, "run_subscribe_msckf");
auto nh = std::make_shared<ros::NodeHandle>("~");
nh->param<std::string>("config_path", config_path, config_path);
#elif ROS_AVAILABLE == 2
// Launch our ros node
rclcpp::init(argc, argv);
rclcpp::NodeOptions options;
options.allow_undeclared_parameters(true);
options.automatically_declare_parameters_from_overrides(true);
auto node = std::make_shared<rclcpp::Node>("run_subscribe_msckf", options);
node->get_parameter<std::string>("config_path", config_path);
#endif
// Load the config
auto parser = std::make_shared<ov_core::YamlParser>(config_path);
#if ROS_AVAILABLE == 1
parser->set_node_handler(nh);
#elif ROS_AVAILABLE == 2
parser->set_node(node);
#endif
// Verbosity
std::string verbosity = "DEBUG";
parser->parse_config("verbosity", verbosity);
ov_core::Printer::setPrintLevel(verbosity);
// Create our VIO system
VioManagerOptions params;
params.print_and_load(parser);
sys = std::make_shared<VioManager>(params);
#if ROS_AVAILABLE == 1
viz = std::make_shared<ROS1Visualizer>(nh, sys);
viz->setup_subscribers(parser);
#elif ROS_AVAILABLE == 2
viz = std::make_shared<ROS2Visualizer>(node, sys);
viz->setup_subscribers(parser);
#endif
// Ensure we read in all parameters required
if (!parser->successful()) {
PRINT_ERROR(RED "unable to parse all parameters, please fix\n" RESET);
std::exit(EXIT_FAILURE);
}
// Spin off to ROS
PRINT_DEBUG("done...spinning to ros\n");
#if ROS_AVAILABLE == 1
// ros::spin();
ros::AsyncSpinner spinner(0);
spinner.start();
ros::waitForShutdown();
#elif ROS_AVAILABLE == 2
// rclcpp::spin(node);
rclcpp::executors::MultiThreadedExecutor executor;
executor.add_node(node);
executor.spin();
#endif
// Final visualization
viz->visualize_final();
#if ROS_AVAILABLE == 1
ros::shutdown();
#elif ROS_AVAILABLE == 2
rclcpp::shutdown();
#endif
// Done!
return EXIT_SUCCESS;
}

View File

@@ -0,0 +1,494 @@
/*
* OpenVINS: An Open Platform for Visual-Inertial Research
* Copyright (C) 2018-2022 Patrick Geneva
* Copyright (C) 2018-2022 Guoquan Huang
* Copyright (C) 2018-2022 OpenVINS Contributors
* Copyright (C) 2018-2019 Kevin Eckenhoff
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
#include "Simulator.h"
using namespace ov_core;
using namespace ov_msckf;
Simulator::Simulator(VioManagerOptions &params_) {
//===============================================================
//===============================================================
// Nice startup message
PRINT_DEBUG("=======================================\n");
PRINT_DEBUG("VISUAL-INERTIAL SIMULATOR STARTING\n");
PRINT_DEBUG("=======================================\n");
// Store a copy of our params
// NOTE: We need to explicitly create a copy of our shared pointers to the camera objects
// NOTE: Otherwise if we perturb it would also change our "true" parameters
this->params = params_;
params.camera_intrinsics.clear();
for (auto const &tmp : params_.camera_intrinsics) {
auto tmp_cast = std::dynamic_pointer_cast<ov_core::CamEqui>(tmp.second);
if (tmp_cast != nullptr) {
params.camera_intrinsics.insert({tmp.first, std::make_shared<ov_core::CamEqui>(tmp.second->w(), tmp.second->h())});
params.camera_intrinsics.at(tmp.first)->set_value(params_.camera_intrinsics.at(tmp.first)->get_value());
} else {
params.camera_intrinsics.insert({tmp.first, std::make_shared<ov_core::CamRadtan>(tmp.second->w(), tmp.second->h())});
params.camera_intrinsics.at(tmp.first)->set_value(params_.camera_intrinsics.at(tmp.first)->get_value());
}
}
// Load the groundtruth trajectory and its spline
DatasetReader::load_simulated_trajectory(params.sim_traj_path, traj_data);
spline.feed_trajectory(traj_data);
// Set all our timestamps as starting from the minimum spline time
timestamp = spline.get_start_time();
timestamp_last_imu = spline.get_start_time();
timestamp_last_cam = spline.get_start_time();
// Get the pose at the current timestep
Eigen::Matrix3d R_GtoI_init;
Eigen::Vector3d p_IinG_init;
bool success_pose_init = spline.get_pose(timestamp, R_GtoI_init, p_IinG_init);
if (!success_pose_init) {
PRINT_ERROR(RED "[SIM]: unable to find the first pose in the spline...\n" RESET);
std::exit(EXIT_FAILURE);
}
// Find the timestamp that we move enough to be considered "moved"
double distance = 0.0;
double distancethreshold = params.sim_distance_threshold;
while (true) {
// Get the pose at the current timestep
Eigen::Matrix3d R_GtoI;
Eigen::Vector3d p_IinG;
bool success_pose = spline.get_pose(timestamp, R_GtoI, p_IinG);
// Check if it fails
if (!success_pose) {
PRINT_ERROR(RED "[SIM]: unable to find jolt in the groundtruth data to initialize at\n" RESET);
std::exit(EXIT_FAILURE);
}
// Append to our scalar distance
distance += (p_IinG - p_IinG_init).norm();
p_IinG_init = p_IinG;
// Now check if we have an acceleration, else move forward in time
if (distance > distancethreshold) {
break;
} else {
timestamp += 1.0 / params.sim_freq_cam;
timestamp_last_imu += 1.0 / params.sim_freq_cam;
timestamp_last_cam += 1.0 / params.sim_freq_cam;
}
}
PRINT_DEBUG("[SIM]: moved %.3f seconds into the dataset where it starts moving\n", timestamp - spline.get_start_time());
// Append the current true bias to our history
hist_true_bias_time.push_back(timestamp_last_imu - 1.0 / params.sim_freq_imu);
hist_true_bias_accel.push_back(true_bias_accel);
hist_true_bias_gyro.push_back(true_bias_gyro);
hist_true_bias_time.push_back(timestamp_last_imu);
hist_true_bias_accel.push_back(true_bias_accel);
hist_true_bias_gyro.push_back(true_bias_gyro);
// Our simulation is running
is_running = true;
//===============================================================
//===============================================================
// Load the seeds for the random number generators
gen_state_init = std::mt19937(params.sim_seed_state_init);
gen_state_init.seed(params.sim_seed_state_init);
gen_state_perturb = std::mt19937(params.sim_seed_preturb);
gen_state_perturb.seed(params.sim_seed_preturb);
gen_meas_imu = std::mt19937(params.sim_seed_measurements);
gen_meas_imu.seed(params.sim_seed_measurements);
// Create generator for our camera
for (int i = 0; i < params.state_options.num_cameras; i++) {
gen_meas_cams.push_back(std::mt19937(params.sim_seed_measurements));
gen_meas_cams.at(i).seed(params.sim_seed_measurements);
}
//===============================================================
//===============================================================
// Perturb all calibration if we should
if (params.sim_do_perturbation) {
// Do the perturbation
perturb_parameters(gen_state_perturb, params_);
// Debug print simulation parameters
params.print_and_load_estimator();
params.print_and_load_noise();
params.print_and_load_state();
params.print_and_load_trackers();
params.print_and_load_simulation();
}
//===============================================================
//===============================================================
// We will create synthetic camera frames and ensure that each has enough features
// double dt = 0.25/freq_cam;
double dt = 0.25;
size_t mapsize = featmap.size();
PRINT_DEBUG("[SIM]: Generating map features at %d rate\n", (int)(1.0 / dt));
// Loop through each camera
// NOTE: we loop through cameras here so that the feature map for camera 1 will always be the same
// NOTE: thus when we add more cameras the first camera should get the same measurements
for (int i = 0; i < params.state_options.num_cameras; i++) {
// Reset the start time
double time_synth = spline.get_start_time();
// Loop through each pose and generate our feature map in them!!!!
while (true) {
// Get the pose at the current timestep
Eigen::Matrix3d R_GtoI;
Eigen::Vector3d p_IinG;
bool success_pose = spline.get_pose(time_synth, R_GtoI, p_IinG);
// We have finished generating features
if (!success_pose)
break;
// Get the uv features for this frame
std::vector<std::pair<size_t, Eigen::VectorXf>> uvs = project_pointcloud(R_GtoI, p_IinG, i, featmap);
// If we do not have enough, generate more
if ((int)uvs.size() < params.num_pts) {
generate_points(R_GtoI, p_IinG, i, featmap, params.num_pts - (int)uvs.size());
}
// Move forward in time
time_synth += dt;
}
// Debug print
PRINT_DEBUG("[SIM]: Generated %d map features in total over %d frames (camera %d)\n", (int)(featmap.size() - mapsize),
(int)((time_synth - spline.get_start_time()) / dt), i);
mapsize = featmap.size();
}
// Nice sleep so the user can look at the printout
sleep(1);
}
void Simulator::perturb_parameters(std::mt19937 gen_state, VioManagerOptions &params_) {
// One std generator
std::normal_distribution<double> w(0, 1);
// Camera IMU offset
params_.calib_camimu_dt += 0.01 * w(gen_state);
// Camera intrinsics and extrinsics
for (int i = 0; i < params_.state_options.num_cameras; i++) {
// Camera intrinsic properties (k1, k2, p1, p2, r1, r2, r3, r4)
Eigen::MatrixXd intrinsics = params_.camera_intrinsics.at(i)->get_value();
for (int r = 0; r < 4; r++) {
intrinsics(r) += 1.0 * w(gen_state);
}
for (int r = 4; r < 8; r++) {
intrinsics(r) += 0.005 * w(gen_state);
}
params_.camera_intrinsics.at(i)->set_value(intrinsics);
// Our camera extrinsics transform (orientation)
Eigen::Vector3d w_vec;
w_vec << 0.001 * w(gen_state), 0.001 * w(gen_state), 0.001 * w(gen_state);
params_.camera_extrinsics.at(i).block(0, 0, 4, 1) =
rot_2_quat(exp_so3(w_vec) * quat_2_Rot(params_.camera_extrinsics.at(i).block(0, 0, 4, 1)));
// Our camera extrinsics transform (position)
for (int r = 4; r < 7; r++) {
params_.camera_extrinsics.at(i)(r) += 0.01 * w(gen_state);
}
}
}
bool Simulator::get_state(double desired_time, Eigen::Matrix<double, 17, 1> &imustate) {
// Set to default state
imustate.setZero();
imustate(4) = 1;
// Current state values
Eigen::Matrix3d R_GtoI;
Eigen::Vector3d p_IinG, w_IinI, v_IinG;
// Get the pose, velocity, and acceleration
bool success_vel = spline.get_velocity(desired_time, R_GtoI, p_IinG, w_IinI, v_IinG);
// Find the bounding bias values
bool success_bias = false;
size_t id_loc = 0;
for (size_t i = 0; i < hist_true_bias_time.size() - 1; i++) {
if (hist_true_bias_time.at(i) < desired_time && hist_true_bias_time.at(i + 1) >= desired_time) {
id_loc = i;
success_bias = true;
break;
}
}
// If failed, then that means we don't have any more spline or bias
if (!success_vel || !success_bias) {
return false;
}
// Interpolate our biases (they will be at every IMU timestep)
double lambda = (desired_time - hist_true_bias_time.at(id_loc)) / (hist_true_bias_time.at(id_loc + 1) - hist_true_bias_time.at(id_loc));
Eigen::Vector3d true_bg_interp = (1 - lambda) * hist_true_bias_gyro.at(id_loc) + lambda * hist_true_bias_gyro.at(id_loc + 1);
Eigen::Vector3d true_ba_interp = (1 - lambda) * hist_true_bias_accel.at(id_loc) + lambda * hist_true_bias_accel.at(id_loc + 1);
// Finally lets create the current state
imustate(0, 0) = desired_time;
imustate.block(1, 0, 4, 1) = rot_2_quat(R_GtoI);
imustate.block(5, 0, 3, 1) = p_IinG;
imustate.block(8, 0, 3, 1) = v_IinG;
imustate.block(11, 0, 3, 1) = true_bg_interp;
imustate.block(14, 0, 3, 1) = true_ba_interp;
return true;
}
bool Simulator::get_next_imu(double &time_imu, Eigen::Vector3d &wm, Eigen::Vector3d &am) {
// Return if the camera measurement should go before us
if (timestamp_last_cam + 1.0 / params.sim_freq_cam < timestamp_last_imu + 1.0 / params.sim_freq_imu)
return false;
// Else lets do a new measurement!!!
timestamp_last_imu += 1.0 / params.sim_freq_imu;
timestamp = timestamp_last_imu;
time_imu = timestamp_last_imu;
// Current state values
Eigen::Matrix3d R_GtoI;
Eigen::Vector3d p_IinG, w_IinI, v_IinG, alpha_IinI, a_IinG;
// Get the pose, velocity, and acceleration
// NOTE: we get the acceleration between our two IMU
// NOTE: this is because we are using a constant measurement model for integration
// bool success_accel = spline.get_acceleration(timestamp+0.5/freq_imu, R_GtoI, p_IinG, w_IinI, v_IinG, alpha_IinI, a_IinG);
bool success_accel = spline.get_acceleration(timestamp, R_GtoI, p_IinG, w_IinI, v_IinG, alpha_IinI, a_IinG);
// If failed, then that means we don't have any more spline
// Thus we should stop the simulation
if (!success_accel) {
is_running = false;
return false;
}
// Transform omega and linear acceleration into imu frame
Eigen::Vector3d omega_inI = w_IinI;
Eigen::Vector3d gravity;
gravity << 0.0, 0.0, params.gravity_mag;
Eigen::Vector3d accel_inI = R_GtoI * (a_IinG + gravity);
// Now add noise to these measurements
double dt = 1.0 / params.sim_freq_imu;
std::normal_distribution<double> w(0, 1);
wm(0) = omega_inI(0) + true_bias_gyro(0) + params.imu_noises.sigma_w / std::sqrt(dt) * w(gen_meas_imu);
wm(1) = omega_inI(1) + true_bias_gyro(1) + params.imu_noises.sigma_w / std::sqrt(dt) * w(gen_meas_imu);
wm(2) = omega_inI(2) + true_bias_gyro(2) + params.imu_noises.sigma_w / std::sqrt(dt) * w(gen_meas_imu);
am(0) = accel_inI(0) + true_bias_accel(0) + params.imu_noises.sigma_a / std::sqrt(dt) * w(gen_meas_imu);
am(1) = accel_inI(1) + true_bias_accel(1) + params.imu_noises.sigma_a / std::sqrt(dt) * w(gen_meas_imu);
am(2) = accel_inI(2) + true_bias_accel(2) + params.imu_noises.sigma_a / std::sqrt(dt) * w(gen_meas_imu);
// Move the biases forward in time
true_bias_gyro(0) += params.imu_noises.sigma_wb * std::sqrt(dt) * w(gen_meas_imu);
true_bias_gyro(1) += params.imu_noises.sigma_wb * std::sqrt(dt) * w(gen_meas_imu);
true_bias_gyro(2) += params.imu_noises.sigma_wb * std::sqrt(dt) * w(gen_meas_imu);
true_bias_accel(0) += params.imu_noises.sigma_ab * std::sqrt(dt) * w(gen_meas_imu);
true_bias_accel(1) += params.imu_noises.sigma_ab * std::sqrt(dt) * w(gen_meas_imu);
true_bias_accel(2) += params.imu_noises.sigma_ab * std::sqrt(dt) * w(gen_meas_imu);
// Append the current true bias to our history
hist_true_bias_time.push_back(timestamp_last_imu);
hist_true_bias_gyro.push_back(true_bias_gyro);
hist_true_bias_accel.push_back(true_bias_accel);
// Return success
return true;
}
bool Simulator::get_next_cam(double &time_cam, std::vector<int> &camids,
std::vector<std::vector<std::pair<size_t, Eigen::VectorXf>>> &feats) {
// Return if the imu measurement should go before us
if (timestamp_last_imu + 1.0 / params.sim_freq_imu < timestamp_last_cam + 1.0 / params.sim_freq_cam)
return false;
// Else lets do a new measurement!!!
timestamp_last_cam += 1.0 / params.sim_freq_cam;
timestamp = timestamp_last_cam;
time_cam = timestamp_last_cam - params.calib_camimu_dt;
// Get the pose at the current timestep
Eigen::Matrix3d R_GtoI;
Eigen::Vector3d p_IinG;
bool success_pose = spline.get_pose(timestamp, R_GtoI, p_IinG);
// We have finished generating measurements
if (!success_pose) {
is_running = false;
return false;
}
// Loop through each camera
for (int i = 0; i < params.state_options.num_cameras; i++) {
// Get the uv features for this frame
std::vector<std::pair<size_t, Eigen::VectorXf>> uvs = project_pointcloud(R_GtoI, p_IinG, i, featmap);
// If we do not have enough, generate more
if ((int)uvs.size() < params.num_pts) {
PRINT_WARNING(YELLOW "[SIM]: cam %d was unable to generate enough features (%d < %d projections)\n" RESET, (int)i, (int)uvs.size(),
params.num_pts);
}
// If greater than only select the first set
if ((int)uvs.size() > params.num_pts) {
uvs.erase(uvs.begin() + params.num_pts, uvs.end());
}
// Append the map size so all cameras have unique features in them (but the same map)
// Only do this if we are not enforcing stereo constraints between all our cameras
for (size_t f = 0; f < uvs.size() && !params.use_stereo; f++) {
uvs.at(f).first += i * featmap.size();
}
// Loop through and add noise to each uv measurement
std::normal_distribution<double> w(0, 1);
for (size_t j = 0; j < uvs.size(); j++) {
uvs.at(j).second(0) += params.msckf_options.sigma_pix * w(gen_meas_cams.at(i));
uvs.at(j).second(1) += params.msckf_options.sigma_pix * w(gen_meas_cams.at(i));
}
// Push back for this camera
feats.push_back(uvs);
camids.push_back(i);
}
// Return success
return true;
}
std::vector<std::pair<size_t, Eigen::VectorXf>> Simulator::project_pointcloud(const Eigen::Matrix3d &R_GtoI, const Eigen::Vector3d &p_IinG,
int camid,
const std::unordered_map<size_t, Eigen::Vector3d> &feats) {
// Assert we have good camera
assert(camid < params.state_options.num_cameras);
assert((int)params.camera_intrinsics.size() == params.state_options.num_cameras);
assert((int)params.camera_extrinsics.size() == params.state_options.num_cameras);
// Grab our extrinsic and intrinsic values
Eigen::Matrix<double, 3, 3> R_ItoC = quat_2_Rot(params.camera_extrinsics.at(camid).block(0, 0, 4, 1));
Eigen::Matrix<double, 3, 1> p_IinC = params.camera_extrinsics.at(camid).block(4, 0, 3, 1);
std::shared_ptr<ov_core::CamBase> camera = params.camera_intrinsics.at(camid);
// Our projected uv true measurements
std::vector<std::pair<size_t, Eigen::VectorXf>> uvs;
// Loop through our map
for (const auto &feat : feats) {
// Transform feature into current camera frame
Eigen::Vector3d p_FinI = R_GtoI * (feat.second - p_IinG);
Eigen::Vector3d p_FinC = R_ItoC * p_FinI + p_IinC;
// Skip cloud if too far away
if (p_FinC(2) > params.sim_max_feature_gen_distance || p_FinC(2) < 0.1)
continue;
// Project to normalized coordinates
Eigen::Vector2f uv_norm;
uv_norm << (float)(p_FinC(0) / p_FinC(2)), (float)(p_FinC(1) / p_FinC(2));
// Distort the normalized coordinates
Eigen::Vector2f uv_dist;
uv_dist = camera->distort_f(uv_norm);
// Check that it is inside our bounds
if (uv_dist(0) < 0 || uv_dist(0) > camera->w() || uv_dist(1) < 0 || uv_dist(1) > camera->h()) {
continue;
}
// Else we can add this as a good projection
uvs.push_back({feat.first, uv_dist});
}
// Return our projections
return uvs;
}
void Simulator::generate_points(const Eigen::Matrix3d &R_GtoI, const Eigen::Vector3d &p_IinG, int camid,
std::unordered_map<size_t, Eigen::Vector3d> &feats, int numpts) {
// Assert we have good camera
assert(camid < params.state_options.num_cameras);
assert((int)params.camera_intrinsics.size() == params.state_options.num_cameras);
assert((int)params.camera_extrinsics.size() == params.state_options.num_cameras);
// Grab our extrinsic and intrinsic values
Eigen::Matrix<double, 3, 3> R_ItoC = quat_2_Rot(params.camera_extrinsics.at(camid).block(0, 0, 4, 1));
Eigen::Matrix<double, 3, 1> p_IinC = params.camera_extrinsics.at(camid).block(4, 0, 3, 1);
std::shared_ptr<ov_core::CamBase> camera = params.camera_intrinsics.at(camid);
// Generate the desired number of features
for (int i = 0; i < numpts; i++) {
// Uniformly randomly generate within our fov
std::uniform_real_distribution<double> gen_u(0, camera->w());
std::uniform_real_distribution<double> gen_v(0, camera->h());
double u_dist = gen_u(gen_state_init);
double v_dist = gen_v(gen_state_init);
// Convert to opencv format
cv::Point2f uv_dist((float)u_dist, (float)v_dist);
// Undistort this point to our normalized coordinates
cv::Point2f uv_norm;
uv_norm = camera->undistort_cv(uv_dist);
// Generate a random depth
std::uniform_real_distribution<double> gen_depth(params.sim_min_feature_gen_distance, params.sim_max_feature_gen_distance);
double depth = gen_depth(gen_state_init);
// Get the 3d point
Eigen::Vector3d bearing;
bearing << uv_norm.x, uv_norm.y, 1;
Eigen::Vector3d p_FinC;
p_FinC = depth * bearing;
// Move to the global frame of reference
Eigen::Vector3d p_FinI = R_ItoC.transpose() * (p_FinC - p_IinC);
Eigen::Vector3d p_FinG = R_GtoI.transpose() * p_FinI + p_IinG;
// Append this as a new feature
featmap.insert({id_map, p_FinG});
id_map++;
}
}

View File

@@ -0,0 +1,209 @@
/*
* OpenVINS: An Open Platform for Visual-Inertial Research
* Copyright (C) 2018-2022 Patrick Geneva
* Copyright (C) 2018-2022 Guoquan Huang
* Copyright (C) 2018-2022 OpenVINS Contributors
* Copyright (C) 2018-2019 Kevin Eckenhoff
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
#ifndef OV_MSCKF_SIMULATOR_H
#define OV_MSCKF_SIMULATOR_H
#include <Eigen/Eigen>
#include <fstream>
#include <opencv2/core/core.hpp>
#include <random>
#include <sstream>
#include <string>
#include <unordered_map>
#include "cam/CamBase.h"
#include "cam/CamEqui.h"
#include "cam/CamRadtan.h"
#include "sim/BsplineSE3.h"
#include "utils/colors.h"
#include "utils/dataset_reader.h"
#include "core/VioManagerOptions.h"
namespace ov_msckf {
/**
* @brief Master simulator class that generated visual-inertial measurements
*
* Given a trajectory this will generate a SE(3) @ref ov_core::BsplineSE3 for that trajectory.
* This allows us to get the inertial measurement information at each timestep during this trajectory.
* After creating the bspline we will generate an environmental feature map which will be used as our feature measurements.
* This map will be projected into the frame at each timestep to get our "raw" uv measurements.
* We inject bias and white noises into our inertial readings while adding our white noise to the uv measurements also.
* The user should specify the sensor rates that they desire along with the seeds of the random number generators.
*
*/
class Simulator {
public:
/**
* @brief Default constructor, will load all configuration variables
* @param params_ VioManager parameters. Should have already been loaded from cmd.
*/
Simulator(VioManagerOptions &params_);
/**
* @brief Will get a set of perturbed parameters
* @param gen_state Random number gen to use
* @param params_ Parameters we will perturb
*/
static void perturb_parameters(std::mt19937 gen_state, VioManagerOptions &params_);
/**
* @brief Returns if we are actively simulating
* @return True if we still have simulation data
*/
bool ok() { return is_running; }
/**
* @brief Gets the timestamp we have simulated up too
* @return Timestamp
*/
double current_timestamp() { return timestamp; }
/**
* @brief Get the simulation state at a specified timestep
* @param desired_time Timestamp we want to get the state at
* @param imustate State in the MSCKF ordering: [time(sec),q_GtoI,p_IinG,v_IinG,b_gyro,b_accel]
* @return True if we have a state
*/
bool get_state(double desired_time, Eigen::Matrix<double, 17, 1> &imustate);
/**
* @brief Gets the next inertial reading if we have one.
* @param time_imu Time that this measurement occured at
* @param wm Angular velocity measurement in the inertial frame
* @param am Linear velocity in the inertial frame
* @return True if we have a measurement
*/
bool get_next_imu(double &time_imu, Eigen::Vector3d &wm, Eigen::Vector3d &am);
/**
* @brief Gets the next inertial reading if we have one.
* @param time_cam Time that this measurement occured at
* @param camids Camera ids that the corresponding vectors match
* @param feats Noisy uv measurements and ids for the returned time
* @return True if we have a measurement
*/
bool get_next_cam(double &time_cam, std::vector<int> &camids, std::vector<std::vector<std::pair<size_t, Eigen::VectorXf>>> &feats);
/// Returns the true 3d map of features
std::unordered_map<size_t, Eigen::Vector3d> get_map() { return featmap; }
/// Returns the true 3d map of features
std::vector<Eigen::Vector3d> get_map_vec() {
std::vector<Eigen::Vector3d> feats;
for (auto const &feat : featmap)
feats.push_back(feat.second);
return feats;
}
/// Access function to get the true parameters (i.e. calibration and settings)
VioManagerOptions get_true_parameters() { return params; }
protected:
/**
* @brief Projects the passed map features into the desired camera frame.
* @param R_GtoI Orientation of the IMU pose
* @param p_IinG Position of the IMU pose
* @param camid Camera id of the camera sensor we want to project into
* @param feats Our set of 3d features
* @return True distorted raw image measurements and their ids for the specified camera
*/
std::vector<std::pair<size_t, Eigen::VectorXf>> project_pointcloud(const Eigen::Matrix3d &R_GtoI, const Eigen::Vector3d &p_IinG,
int camid, const std::unordered_map<size_t, Eigen::Vector3d> &feats);
/**
* @brief Will generate points in the fov of the specified camera
* @param R_GtoI Orientation of the IMU pose
* @param p_IinG Position of the IMU pose
* @param camid Camera id of the camera sensor we want to project into
* @param[out] feats Map we will append new features to
* @param numpts Number of points we should generate
*/
void generate_points(const Eigen::Matrix3d &R_GtoI, const Eigen::Vector3d &p_IinG, int camid,
std::unordered_map<size_t, Eigen::Vector3d> &feats, int numpts);
//===================================================================
// Configuration variables
//===================================================================
/// True vio manager params (a copy of the parsed ones)
VioManagerOptions params;
//===================================================================
// State related variables
//===================================================================
/// Our loaded trajectory data (timestamp(s), q_GtoI, p_IinG)
std::vector<Eigen::VectorXd> traj_data;
/// Our b-spline trajectory
ov_core::BsplineSE3 spline;
/// Our map of 3d features
size_t id_map = 0;
std::unordered_map<size_t, Eigen::Vector3d> featmap;
/// Mersenne twister PRNG for measurements (IMU)
std::mt19937 gen_meas_imu;
/// Mersenne twister PRNG for measurements (CAMERAS)
std::vector<std::mt19937> gen_meas_cams;
/// Mersenne twister PRNG for state initialization
std::mt19937 gen_state_init;
/// Mersenne twister PRNG for state perturbations
std::mt19937 gen_state_perturb;
/// If our simulation is running
bool is_running;
//===================================================================
// Simulation specific variables
//===================================================================
/// Current timestamp of the system
double timestamp;
/// Last time we had an IMU reading
double timestamp_last_imu;
/// Last time we had an CAMERA reading
double timestamp_last_cam;
/// Our running acceleration bias
Eigen::Vector3d true_bias_accel = Eigen::Vector3d::Zero();
/// Our running gyroscope bias
Eigen::Vector3d true_bias_gyro = Eigen::Vector3d::Zero();
// Our history of true biases
std::vector<double> hist_true_bias_time;
std::vector<Eigen::Vector3d> hist_true_bias_accel;
std::vector<Eigen::Vector3d> hist_true_bias_gyro;
};
} // namespace ov_msckf
#endif // OV_MSCKF_SIMULATOR_H

View File

@@ -0,0 +1,559 @@
/*
* OpenVINS: An Open Platform for Visual-Inertial Research
* Copyright (C) 2018-2022 Patrick Geneva
* Copyright (C) 2018-2022 Guoquan Huang
* Copyright (C) 2018-2022 OpenVINS Contributors
* Copyright (C) 2018-2019 Kevin Eckenhoff
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
#include "Propagator.h"
using namespace ov_core;
using namespace ov_type;
using namespace ov_msckf;
void Propagator::propagate_and_clone(std::shared_ptr<State> state, double timestamp) {
// If the difference between the current update time and state is zero
// We should crash, as this means we would have two clones at the same time!!!!
if (state->_timestamp == timestamp) {
PRINT_ERROR(RED "Propagator::propagate_and_clone(): Propagation called again at same timestep at last update timestep!!!!\n" RESET);
std::exit(EXIT_FAILURE);
}
// We should crash if we are trying to propagate backwards
if (state->_timestamp > timestamp) {
PRINT_ERROR(RED "Propagator::propagate_and_clone(): Propagation called trying to propagate backwards in time!!!!\n" RESET);
PRINT_ERROR(RED "Propagator::propagate_and_clone(): desired propagation = %.4f\n" RESET, (timestamp - state->_timestamp));
std::exit(EXIT_FAILURE);
}
//===================================================================================
//===================================================================================
//===================================================================================
// Set the last time offset value if we have just started the system up
if (!have_last_prop_time_offset) {
last_prop_time_offset = state->_calib_dt_CAMtoIMU->value()(0);
have_last_prop_time_offset = true;
}
// Get what our IMU-camera offset should be (t_imu = t_cam + calib_dt)
double t_off_new = state->_calib_dt_CAMtoIMU->value()(0);
// First lets construct an IMU vector of measurements we need
double time0 = state->_timestamp + last_prop_time_offset;
double time1 = timestamp + t_off_new;
std::vector<ov_core::ImuData> prop_data;
{
std::lock_guard<std::mutex> lck(imu_data_mtx);
prop_data = Propagator::select_imu_readings(imu_data, time0, time1);
}
// We are going to sum up all the state transition matrices, so we can do a single large multiplication at the end
// Phi_summed = Phi_i*Phi_summed
// Q_summed = Phi_i*Q_summed*Phi_i^T + Q_i
// After summing we can multiple the total phi to get the updated covariance
// We will then add the noise to the IMU portion of the state
Eigen::Matrix<double, 15, 15> Phi_summed = Eigen::Matrix<double, 15, 15>::Identity();
Eigen::Matrix<double, 15, 15> Qd_summed = Eigen::Matrix<double, 15, 15>::Zero();
double dt_summed = 0;
// Loop through all IMU messages, and use them to move the state forward in time
// This uses the zero'th order quat, and then constant acceleration discrete
if (prop_data.size() > 1) {
for (size_t i = 0; i < prop_data.size() - 1; i++) {
// Get the next state Jacobian and noise Jacobian for this IMU reading
Eigen::Matrix<double, 15, 15> F = Eigen::Matrix<double, 15, 15>::Zero();
Eigen::Matrix<double, 15, 15> Qdi = Eigen::Matrix<double, 15, 15>::Zero();
predict_and_compute(state, prop_data.at(i), prop_data.at(i + 1), F, Qdi);
// Next we should propagate our IMU covariance
// Pii' = F*Pii*F.transpose() + G*Q*G.transpose()
// Pci' = F*Pci and Pic' = Pic*F.transpose()
// NOTE: Here we are summing the state transition F so we can do a single mutiplication later
// NOTE: Phi_summed = Phi_i*Phi_summed
// NOTE: Q_summed = Phi_i*Q_summed*Phi_i^T + G*Q_i*G^T
Phi_summed = F * Phi_summed;
Qd_summed = F * Qd_summed * F.transpose() + Qdi;
Qd_summed = 0.5 * (Qd_summed + Qd_summed.transpose());
dt_summed += prop_data.at(i + 1).timestamp - prop_data.at(i).timestamp;
}
}
// Last angular velocity (used for cloning when estimating time offset)
Eigen::Matrix<double, 3, 1> last_w = Eigen::Matrix<double, 3, 1>::Zero();
if (prop_data.size() > 1)
last_w = prop_data.at(prop_data.size() - 2).wm - state->_imu->bias_g();
else if (!prop_data.empty())
last_w = prop_data.at(prop_data.size() - 1).wm - state->_imu->bias_g();
// Do the update to the covariance with our "summed" state transition and IMU noise addition...
std::vector<std::shared_ptr<Type>> Phi_order;
Phi_order.push_back(state->_imu);
StateHelper::EKFPropagation(state, Phi_order, Phi_order, Phi_summed, Qd_summed);
// Set timestamp data
state->_timestamp = timestamp;
last_prop_time_offset = t_off_new;
// Now perform stochastic cloning
StateHelper::augment_clone(state, last_w);
}
bool Propagator::fast_state_propagate(std::shared_ptr<State> state, double timestamp, Eigen::Matrix<double, 13, 1> &state_plus,
Eigen::Matrix<double, 12, 12> &covariance) {
// First we will store the current calibration / estimates of the state
double state_time = state->_timestamp;
Eigen::MatrixXd state_est = state->_imu->value();
Eigen::MatrixXd state_covariance = StateHelper::get_marginal_covariance(state, {state->_imu});
double t_off = state->_calib_dt_CAMtoIMU->value()(0);
// First lets construct an IMU vector of measurements we need
double time0 = state_time + t_off;
double time1 = timestamp + t_off;
std::vector<ov_core::ImuData> prop_data;
{
std::lock_guard<std::mutex> lck(imu_data_mtx);
prop_data = Propagator::select_imu_readings(imu_data, time0, time1, false);
}
if (prop_data.size() < 2)
return false;
// Biases
Eigen::Vector3d bias_g = state_est.block(10, 0, 3, 1);
Eigen::Vector3d bias_a = state_est.block(13, 0, 3, 1);
// Loop through all IMU messages, and use them to move the state forward in time
// This uses the zero'th order quat, and then constant acceleration discrete
for (size_t i = 0; i < prop_data.size() - 1; i++) {
// Corrected imu measurements
double dt = prop_data.at(i + 1).timestamp - prop_data.at(i).timestamp;
Eigen::Vector3d w_hat = 0.5 * (prop_data.at(i + 1).wm + prop_data.at(i).wm) - bias_g;
Eigen::Vector3d a_hat = 0.5 * (prop_data.at(i + 1).am + prop_data.at(i).am) - bias_a;
Eigen::Matrix3d R_Gtoi = quat_2_Rot(state_est.block(0, 0, 4, 1));
Eigen::Vector3d v_iinG = state_est.block(7, 0, 3, 1);
Eigen::Vector3d p_iinG = state_est.block(4, 0, 3, 1);
// State transition and noise matrix
Eigen::Matrix<double, 15, 15> F = Eigen::Matrix<double, 15, 15>::Zero();
Eigen::Matrix<double, 15, 15> Qd = Eigen::Matrix<double, 15, 15>::Zero();
F.block(0, 0, 3, 3) = exp_so3(-w_hat * dt);
F.block(0, 9, 3, 3).noalias() = -exp_so3(-w_hat * dt) * Jr_so3(-w_hat * dt) * dt;
F.block(9, 9, 3, 3).setIdentity();
F.block(6, 0, 3, 3).noalias() = -R_Gtoi.transpose() * skew_x(a_hat * dt);
F.block(6, 6, 3, 3).setIdentity();
F.block(6, 12, 3, 3) = -R_Gtoi.transpose() * dt;
F.block(12, 12, 3, 3).setIdentity();
F.block(3, 0, 3, 3).noalias() = -0.5 * R_Gtoi.transpose() * skew_x(a_hat * dt * dt);
F.block(3, 6, 3, 3) = Eigen::Matrix3d::Identity() * dt;
F.block(3, 12, 3, 3) = -0.5 * R_Gtoi.transpose() * dt * dt;
F.block(3, 3, 3, 3).setIdentity();
Eigen::Matrix<double, 15, 12> G = Eigen::Matrix<double, 15, 12>::Zero();
G.block(0, 0, 3, 3) = -exp_so3(-w_hat * dt) * Jr_so3(-w_hat * dt) * dt;
G.block(6, 3, 3, 3) = -R_Gtoi.transpose() * dt;
G.block(3, 3, 3, 3) = -0.5 * R_Gtoi.transpose() * dt * dt;
G.block(9, 6, 3, 3).setIdentity();
G.block(12, 9, 3, 3).setIdentity();
// Construct our discrete noise covariance matrix
// Note that we need to convert our continuous time noises to discrete
// Equations (129) amd (130) of Trawny tech report
Eigen::Matrix<double, 12, 12> Qc = Eigen::Matrix<double, 12, 12>::Zero();
Qc.block(0, 0, 3, 3) = _noises.sigma_w_2 / dt * Eigen::Matrix3d::Identity();
Qc.block(3, 3, 3, 3) = _noises.sigma_a_2 / dt * Eigen::Matrix3d::Identity();
Qc.block(6, 6, 3, 3) = _noises.sigma_wb_2 * dt * Eigen::Matrix3d::Identity();
Qc.block(9, 9, 3, 3) = _noises.sigma_ab_2 * dt * Eigen::Matrix3d::Identity();
Qd = G * Qc * G.transpose();
Qd = 0.5 * (Qd + Qd.transpose());
state_covariance = F * state_covariance * F.transpose() + Qd;
// Propagate the mean forward
state_est.block(0, 0, 4, 1) = rot_2_quat(exp_so3(-w_hat * dt) * R_Gtoi);
state_est.block(4, 0, 3, 1) = p_iinG + v_iinG * dt + 0.5 * R_Gtoi.transpose() * a_hat * dt * dt - 0.5 * _gravity * dt * dt;
state_est.block(7, 0, 3, 1) = v_iinG + R_Gtoi.transpose() * a_hat * dt - _gravity * dt;
}
// Now record what the predicted state should be
Eigen::Vector4d q_Gtoi = state_est.block(0, 0, 4, 1);
Eigen::Vector3d v_iinG = state_est.block(7, 0, 3, 1);
Eigen::Vector3d p_iinG = state_est.block(4, 0, 3, 1);
state_plus.setZero();
state_plus.block(0, 0, 4, 1) = q_Gtoi;
state_plus.block(4, 0, 3, 1) = p_iinG;
state_plus.block(7, 0, 3, 1) = quat_2_Rot(q_Gtoi) * v_iinG;
state_plus.block(10, 0, 3, 1) = 0.5 * (prop_data.at(prop_data.size() - 1).wm + prop_data.at(prop_data.size() - 2).wm) - bias_g;
// Do a covariance propagation for our velocity
// TODO: more properly do the covariance of the angular velocity here...
// TODO: it should be dependent on the state bias, thus correlated with the pose
covariance.setZero();
Eigen::Matrix<double, 15, 15> Phi = Eigen::Matrix<double, 15, 15>::Identity();
Phi.block(6, 6, 3, 3) = quat_2_Rot(q_Gtoi);
state_covariance = Phi * state_covariance * Phi.transpose();
covariance.block(0, 0, 9, 9) = state_covariance.block(0, 0, 9, 9);
double dt = prop_data.at(prop_data.size() - 1).timestamp + prop_data.at(prop_data.size() - 2).timestamp;
covariance.block(9, 9, 3, 3) = _noises.sigma_w_2 / dt * Eigen::Matrix3d::Identity();
return true;
}
std::vector<ov_core::ImuData> Propagator::select_imu_readings(const std::vector<ov_core::ImuData> &imu_data, double time0, double time1,
bool warn) {
// Our vector imu readings
std::vector<ov_core::ImuData> prop_data;
// Ensure we have some measurements in the first place!
if (imu_data.empty()) {
if (warn)
PRINT_WARNING(YELLOW "Propagator::select_imu_readings(): No IMU measurements. IMU-CAMERA are likely messed up!!!\n" RESET);
return prop_data;
}
// Loop through and find all the needed measurements to propagate with
// Note we split measurements based on the given state time, and the update timestamp
for (size_t i = 0; i < imu_data.size() - 1; i++) {
// START OF THE INTEGRATION PERIOD
// If the next timestamp is greater then our current state time
// And the current is not greater then it yet...
// Then we should "split" our current IMU measurement
if (imu_data.at(i + 1).timestamp > time0 && imu_data.at(i).timestamp < time0) {
ov_core::ImuData data = Propagator::interpolate_data(imu_data.at(i), imu_data.at(i + 1), time0);
prop_data.push_back(data);
// PRINT_DEBUG("propagation #%d = CASE 1 = %.3f => %.3f\n",
// (int)i,data.timestamp-prop_data.at(0).timestamp,time0-prop_data.at(0).timestamp);
continue;
}
// MIDDLE OF INTEGRATION PERIOD
// If our imu measurement is right in the middle of our propagation period
// Then we should just append the whole measurement time to our propagation vector
if (imu_data.at(i).timestamp >= time0 && imu_data.at(i + 1).timestamp <= time1) {
prop_data.push_back(imu_data.at(i));
// PRINT_DEBUG("propagation #%d = CASE 2 = %.3f\n",(int)i,imu_data.at(i).timestamp-prop_data.at(0).timestamp);
continue;
}
// END OF THE INTEGRATION PERIOD
// If the current timestamp is greater then our update time
// We should just "split" the NEXT IMU measurement to the update time,
// NOTE: we add the current time, and then the time at the end of the interval (so we can get a dt)
// NOTE: we also break out of this loop, as this is the last IMU measurement we need!
if (imu_data.at(i + 1).timestamp > time1) {
// If we have a very low frequency IMU then, we could have only recorded the first integration (i.e. case 1) and nothing else
// In this case, both the current IMU measurement and the next is greater than the desired intepolation, thus we should just cut the
// current at the desired time Else, we have hit CASE2 and this IMU measurement is not past the desired propagation time, thus add the
// whole IMU reading
if (imu_data.at(i).timestamp > time1 && i == 0) {
// This case can happen if we don't have any imu data that has occured before the startup time
// This means that either we have dropped IMU data, or we have not gotten enough.
// In this case we can't propgate forward in time, so there is not that much we can do.
break;
} else if (imu_data.at(i).timestamp > time1) {
ov_core::ImuData data = interpolate_data(imu_data.at(i - 1), imu_data.at(i), time1);
prop_data.push_back(data);
// PRINT_DEBUG("propagation #%d = CASE 3.1 = %.3f => %.3f\n",
// (int)i,imu_data.at(i).timestamp-prop_data.at(0).timestamp,imu_data.at(i).timestamp-time0);
} else {
prop_data.push_back(imu_data.at(i));
// PRINT_DEBUG("propagation #%d = CASE 3.2 = %.3f => %.3f\n",
// (int)i,imu_data.at(i).timestamp-prop_data.at(0).timestamp,imu_data.at(i).timestamp-time0);
}
// If the added IMU message doesn't end exactly at the camera time
// Then we need to add another one that is right at the ending time
if (prop_data.at(prop_data.size() - 1).timestamp != time1) {
ov_core::ImuData data = interpolate_data(imu_data.at(i), imu_data.at(i + 1), time1);
prop_data.push_back(data);
// PRINT_DEBUG("propagation #%d = CASE 3.3 = %.3f => %.3f\n", (int)i,data.timestamp-prop_data.at(0).timestamp,data.timestamp-time0);
}
break;
}
}
// Check that we have at least one measurement to propagate with
if (prop_data.empty()) {
if (warn)
PRINT_WARNING(
YELLOW
"Propagator::select_imu_readings(): No IMU measurements to propagate with (%d of 2). IMU-CAMERA are likely messed up!!!\n" RESET,
(int)prop_data.size());
return prop_data;
}
// If we did not reach the whole integration period (i.e., the last inertial measurement we have is smaller then the time we want to
// reach) Then we should just "stretch" the last measurement to be the whole period (case 3 in the above loop)
// if(time1-imu_data.at(imu_data.size()-1).timestamp > 1e-3) {
// PRINT_DEBUG(YELLOW "Propagator::select_imu_readings(): Missing inertial measurements to propagate with (%.6f sec missing).
// IMU-CAMERA are likely messed up!!!\n" RESET, (time1-imu_data.at(imu_data.size()-1).timestamp)); return prop_data;
//}
// Loop through and ensure we do not have an zero dt values
// This would cause the noise covariance to be Infinity
for (size_t i = 0; i < prop_data.size() - 1; i++) {
if (std::abs(prop_data.at(i + 1).timestamp - prop_data.at(i).timestamp) < 1e-12) {
if (warn)
PRINT_WARNING(YELLOW "Propagator::select_imu_readings(): Zero DT between IMU reading %d and %d, removing it!\n" RESET, (int)i,
(int)(i + 1));
prop_data.erase(prop_data.begin() + i);
i--;
}
}
// Check that we have at least one measurement to propagate with
if (prop_data.size() < 2) {
if (warn)
PRINT_WARNING(
YELLOW
"Propagator::select_imu_readings(): No IMU measurements to propagate with (%d of 2). IMU-CAMERA are likely messed up!!!\n" RESET,
(int)prop_data.size());
return prop_data;
}
// Success :D
return prop_data;
}
void Propagator::predict_and_compute(std::shared_ptr<State> state, const ov_core::ImuData &data_minus, const ov_core::ImuData &data_plus,
Eigen::Matrix<double, 15, 15> &F, Eigen::Matrix<double, 15, 15> &Qd) {
// Set them to zero
F.setZero();
Qd.setZero();
// Time elapsed over interval
double dt = data_plus.timestamp - data_minus.timestamp;
// assert(data_plus.timestamp>data_minus.timestamp);
// Corrected imu measurements
Eigen::Matrix<double, 3, 1> w_hat = data_minus.wm - state->_imu->bias_g();
Eigen::Matrix<double, 3, 1> a_hat = data_minus.am - state->_imu->bias_a();
Eigen::Matrix<double, 3, 1> w_hat2 = data_plus.wm - state->_imu->bias_g();
Eigen::Matrix<double, 3, 1> a_hat2 = data_plus.am - state->_imu->bias_a();
// Compute the new state mean value
Eigen::Vector4d new_q;
Eigen::Vector3d new_v, new_p;
if (state->_options.use_rk4_integration)
predict_mean_rk4(state, dt, w_hat, a_hat, w_hat2, a_hat2, new_q, new_v, new_p);
else
predict_mean_discrete(state, dt, w_hat, a_hat, w_hat2, a_hat2, new_q, new_v, new_p);
// Get the locations of each entry of the imu state
int th_id = state->_imu->q()->id() - state->_imu->id();
int p_id = state->_imu->p()->id() - state->_imu->id();
int v_id = state->_imu->v()->id() - state->_imu->id();
int bg_id = state->_imu->bg()->id() - state->_imu->id();
int ba_id = state->_imu->ba()->id() - state->_imu->id();
// Allocate noise Jacobian
Eigen::Matrix<double, 15, 12> G = Eigen::Matrix<double, 15, 12>::Zero();
// Now compute Jacobian of new state wrt old state and noise
if (state->_options.do_fej) {
// This is the change in the orientation from the end of the last prop to the current prop
// This is needed since we need to include the "k-th" updated orientation information
Eigen::Matrix<double, 3, 3> Rfej = state->_imu->Rot_fej();
Eigen::Matrix<double, 3, 3> dR = quat_2_Rot(new_q) * Rfej.transpose();
Eigen::Matrix<double, 3, 1> v_fej = state->_imu->vel_fej();
Eigen::Matrix<double, 3, 1> p_fej = state->_imu->pos_fej();
F.block(th_id, th_id, 3, 3) = dR;
F.block(th_id, bg_id, 3, 3).noalias() = -dR * Jr_so3(-w_hat * dt) * dt;
// F.block(th_id, bg_id, 3, 3).noalias() = -dR * Jr_so3(-log_so3(dR)) * dt;
F.block(bg_id, bg_id, 3, 3).setIdentity();
F.block(v_id, th_id, 3, 3).noalias() = -skew_x(new_v - v_fej + _gravity * dt) * Rfej.transpose();
// F.block(v_id, th_id, 3, 3).noalias() = -Rfej.transpose() * skew_x(Rfej*(new_v-v_fej+_gravity*dt));
F.block(v_id, v_id, 3, 3).setIdentity();
F.block(v_id, ba_id, 3, 3) = -Rfej.transpose() * dt;
F.block(ba_id, ba_id, 3, 3).setIdentity();
F.block(p_id, th_id, 3, 3).noalias() = -skew_x(new_p - p_fej - v_fej * dt + 0.5 * _gravity * dt * dt) * Rfej.transpose();
// F.block(p_id, th_id, 3, 3).noalias() = -0.5 * Rfej.transpose() * skew_x(2*Rfej*(new_p-p_fej-v_fej*dt+0.5*_gravity*dt*dt));
F.block(p_id, v_id, 3, 3) = Eigen::Matrix<double, 3, 3>::Identity() * dt;
F.block(p_id, ba_id, 3, 3) = -0.5 * Rfej.transpose() * dt * dt;
F.block(p_id, p_id, 3, 3).setIdentity();
G.block(th_id, 0, 3, 3) = -dR * Jr_so3(-w_hat * dt) * dt;
// G.block(th_id, 0, 3, 3) = -dR * Jr_so3(-log_so3(dR)) * dt;
G.block(v_id, 3, 3, 3) = -Rfej.transpose() * dt;
G.block(p_id, 3, 3, 3) = -0.5 * Rfej.transpose() * dt * dt;
G.block(bg_id, 6, 3, 3) = Eigen::Matrix<double, 3, 3>::Identity();
G.block(ba_id, 9, 3, 3) = Eigen::Matrix<double, 3, 3>::Identity();
} else {
Eigen::Matrix<double, 3, 3> R_Gtoi = state->_imu->Rot();
F.block(th_id, th_id, 3, 3) = exp_so3(-w_hat * dt);
F.block(th_id, bg_id, 3, 3).noalias() = -exp_so3(-w_hat * dt) * Jr_so3(-w_hat * dt) * dt;
F.block(bg_id, bg_id, 3, 3).setIdentity();
F.block(v_id, th_id, 3, 3).noalias() = -R_Gtoi.transpose() * skew_x(a_hat * dt);
F.block(v_id, v_id, 3, 3).setIdentity();
F.block(v_id, ba_id, 3, 3) = -R_Gtoi.transpose() * dt;
F.block(ba_id, ba_id, 3, 3).setIdentity();
F.block(p_id, th_id, 3, 3).noalias() = -0.5 * R_Gtoi.transpose() * skew_x(a_hat * dt * dt);
F.block(p_id, v_id, 3, 3) = Eigen::Matrix<double, 3, 3>::Identity() * dt;
F.block(p_id, ba_id, 3, 3) = -0.5 * R_Gtoi.transpose() * dt * dt;
F.block(p_id, p_id, 3, 3).setIdentity();
G.block(th_id, 0, 3, 3) = -exp_so3(-w_hat * dt) * Jr_so3(-w_hat * dt) * dt;
G.block(v_id, 3, 3, 3) = -R_Gtoi.transpose() * dt;
G.block(p_id, 3, 3, 3) = -0.5 * R_Gtoi.transpose() * dt * dt;
G.block(bg_id, 6, 3, 3) = Eigen::Matrix<double, 3, 3>::Identity();
G.block(ba_id, 9, 3, 3) = Eigen::Matrix<double, 3, 3>::Identity();
}
// Construct our discrete noise covariance matrix
// Note that we need to convert our continuous time noises to discrete
// Equations (129) amd (130) of Trawny tech report
Eigen::Matrix<double, 12, 12> Qc = Eigen::Matrix<double, 12, 12>::Zero();
Qc.block(0, 0, 3, 3) = _noises.sigma_w_2 / dt * Eigen::Matrix<double, 3, 3>::Identity();
Qc.block(3, 3, 3, 3) = _noises.sigma_a_2 / dt * Eigen::Matrix<double, 3, 3>::Identity();
Qc.block(6, 6, 3, 3) = _noises.sigma_wb_2 * dt * Eigen::Matrix<double, 3, 3>::Identity();
Qc.block(9, 9, 3, 3) = _noises.sigma_ab_2 * dt * Eigen::Matrix<double, 3, 3>::Identity();
// Compute the noise injected into the state over the interval
Qd = G * Qc * G.transpose();
Qd = 0.5 * (Qd + Qd.transpose());
// Now replace imu estimate and fej with propagated values
Eigen::Matrix<double, 16, 1> imu_x = state->_imu->value();
imu_x.block(0, 0, 4, 1) = new_q;
imu_x.block(4, 0, 3, 1) = new_p;
imu_x.block(7, 0, 3, 1) = new_v;
state->_imu->set_value(imu_x);
state->_imu->set_fej(imu_x);
}
void Propagator::predict_mean_discrete(std::shared_ptr<State> state, double dt, const Eigen::Vector3d &w_hat1,
const Eigen::Vector3d &a_hat1, const Eigen::Vector3d &w_hat2, const Eigen::Vector3d &a_hat2,
Eigen::Vector4d &new_q, Eigen::Vector3d &new_v, Eigen::Vector3d &new_p) {
// If we are averaging the IMU, then do so
Eigen::Vector3d w_hat = w_hat1;
Eigen::Vector3d a_hat = a_hat1;
if (state->_options.imu_avg) {
w_hat = .5 * (w_hat1 + w_hat2);
a_hat = .5 * (a_hat1 + a_hat2);
}
// Pre-compute things
double w_norm = w_hat.norm();
Eigen::Matrix<double, 4, 4> I_4x4 = Eigen::Matrix<double, 4, 4>::Identity();
Eigen::Matrix<double, 3, 3> R_Gtoi = state->_imu->Rot();
// Orientation: Equation (101) and (103) and of Trawny indirect TR
Eigen::Matrix<double, 4, 4> bigO;
if (w_norm > 1e-20) {
bigO = cos(0.5 * w_norm * dt) * I_4x4 + 1 / w_norm * sin(0.5 * w_norm * dt) * Omega(w_hat);
} else {
bigO = I_4x4 + 0.5 * dt * Omega(w_hat);
}
new_q = quatnorm(bigO * state->_imu->quat());
// new_q = rot_2_quat(exp_so3(-w_hat*dt)*R_Gtoi);
// Velocity: just the acceleration in the local frame, minus global gravity
new_v = state->_imu->vel() + R_Gtoi.transpose() * a_hat * dt - _gravity * dt;
// Position: just velocity times dt, with the acceleration integrated twice
new_p = state->_imu->pos() + state->_imu->vel() * dt + 0.5 * R_Gtoi.transpose() * a_hat * dt * dt - 0.5 * _gravity * dt * dt;
}
void Propagator::predict_mean_rk4(std::shared_ptr<State> state, double dt, const Eigen::Vector3d &w_hat1, const Eigen::Vector3d &a_hat1,
const Eigen::Vector3d &w_hat2, const Eigen::Vector3d &a_hat2, Eigen::Vector4d &new_q,
Eigen::Vector3d &new_v, Eigen::Vector3d &new_p) {
// Pre-compute things
Eigen::Vector3d w_hat = w_hat1;
Eigen::Vector3d a_hat = a_hat1;
Eigen::Vector3d w_alpha = (w_hat2 - w_hat1) / dt;
Eigen::Vector3d a_jerk = (a_hat2 - a_hat1) / dt;
// y0 ================
Eigen::Vector4d q_0 = state->_imu->quat();
Eigen::Vector3d p_0 = state->_imu->pos();
Eigen::Vector3d v_0 = state->_imu->vel();
// k1 ================
Eigen::Vector4d dq_0 = {0, 0, 0, 1};
Eigen::Vector4d q0_dot = 0.5 * Omega(w_hat) * dq_0;
Eigen::Vector3d p0_dot = v_0;
Eigen::Matrix3d R_Gto0 = quat_2_Rot(quat_multiply(dq_0, q_0));
Eigen::Vector3d v0_dot = R_Gto0.transpose() * a_hat - _gravity;
Eigen::Vector4d k1_q = q0_dot * dt;
Eigen::Vector3d k1_p = p0_dot * dt;
Eigen::Vector3d k1_v = v0_dot * dt;
// k2 ================
w_hat += 0.5 * w_alpha * dt;
a_hat += 0.5 * a_jerk * dt;
Eigen::Vector4d dq_1 = quatnorm(dq_0 + 0.5 * k1_q);
// Eigen::Vector3d p_1 = p_0+0.5*k1_p;
Eigen::Vector3d v_1 = v_0 + 0.5 * k1_v;
Eigen::Vector4d q1_dot = 0.5 * Omega(w_hat) * dq_1;
Eigen::Vector3d p1_dot = v_1;
Eigen::Matrix3d R_Gto1 = quat_2_Rot(quat_multiply(dq_1, q_0));
Eigen::Vector3d v1_dot = R_Gto1.transpose() * a_hat - _gravity;
Eigen::Vector4d k2_q = q1_dot * dt;
Eigen::Vector3d k2_p = p1_dot * dt;
Eigen::Vector3d k2_v = v1_dot * dt;
// k3 ================
Eigen::Vector4d dq_2 = quatnorm(dq_0 + 0.5 * k2_q);
// Eigen::Vector3d p_2 = p_0+0.5*k2_p;
Eigen::Vector3d v_2 = v_0 + 0.5 * k2_v;
Eigen::Vector4d q2_dot = 0.5 * Omega(w_hat) * dq_2;
Eigen::Vector3d p2_dot = v_2;
Eigen::Matrix3d R_Gto2 = quat_2_Rot(quat_multiply(dq_2, q_0));
Eigen::Vector3d v2_dot = R_Gto2.transpose() * a_hat - _gravity;
Eigen::Vector4d k3_q = q2_dot * dt;
Eigen::Vector3d k3_p = p2_dot * dt;
Eigen::Vector3d k3_v = v2_dot * dt;
// k4 ================
w_hat += 0.5 * w_alpha * dt;
a_hat += 0.5 * a_jerk * dt;
Eigen::Vector4d dq_3 = quatnorm(dq_0 + k3_q);
// Eigen::Vector3d p_3 = p_0+k3_p;
Eigen::Vector3d v_3 = v_0 + k3_v;
Eigen::Vector4d q3_dot = 0.5 * Omega(w_hat) * dq_3;
Eigen::Vector3d p3_dot = v_3;
Eigen::Matrix3d R_Gto3 = quat_2_Rot(quat_multiply(dq_3, q_0));
Eigen::Vector3d v3_dot = R_Gto3.transpose() * a_hat - _gravity;
Eigen::Vector4d k4_q = q3_dot * dt;
Eigen::Vector3d k4_p = p3_dot * dt;
Eigen::Vector3d k4_v = v3_dot * dt;
// y+dt ================
Eigen::Vector4d dq = quatnorm(dq_0 + (1.0 / 6.0) * k1_q + (1.0 / 3.0) * k2_q + (1.0 / 3.0) * k3_q + (1.0 / 6.0) * k4_q);
new_q = quat_multiply(dq, q_0);
new_p = p_0 + (1.0 / 6.0) * k1_p + (1.0 / 3.0) * k2_p + (1.0 / 3.0) * k3_p + (1.0 / 6.0) * k4_p;
new_v = v_0 + (1.0 / 6.0) * k1_v + (1.0 / 3.0) * k2_v + (1.0 / 3.0) * k3_v + (1.0 / 6.0) * k4_v;
}

View File

@@ -0,0 +1,284 @@
/*
* OpenVINS: An Open Platform for Visual-Inertial Research
* Copyright (C) 2018-2022 Patrick Geneva
* Copyright (C) 2018-2022 Guoquan Huang
* Copyright (C) 2018-2022 OpenVINS Contributors
* Copyright (C) 2018-2019 Kevin Eckenhoff
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
#ifndef OV_MSCKF_STATE_PROPAGATOR_H
#define OV_MSCKF_STATE_PROPAGATOR_H
#include <mutex>
#include "state/StateHelper.h"
#include "utils/print.h"
#include "utils/quat_ops.h"
#include "utils/sensor_data.h"
namespace ov_msckf {
/**
* @brief Performs the state covariance and mean propagation using imu measurements
*
* We will first select what measurements we need to propagate with.
* We then compute the state transition matrix at each step and update the state and covariance.
* For derivations look at @ref propagation page which has detailed equations.
*/
class Propagator {
public:
/**
* @brief Struct of our imu noise parameters
*/
struct NoiseManager {
/// Gyroscope white noise (rad/s/sqrt(hz))
double sigma_w = 1.6968e-04;
/// Gyroscope white noise covariance
double sigma_w_2 = pow(1.6968e-04, 2);
/// Gyroscope random walk (rad/s^2/sqrt(hz))
double sigma_wb = 1.9393e-05;
/// Gyroscope random walk covariance
double sigma_wb_2 = pow(1.9393e-05, 2);
/// Accelerometer white noise (m/s^2/sqrt(hz))
double sigma_a = 2.0000e-3;
/// Accelerometer white noise covariance
double sigma_a_2 = pow(2.0000e-3, 2);
/// Accelerometer random walk (m/s^3/sqrt(hz))
double sigma_ab = 3.0000e-03;
/// Accelerometer random walk covariance
double sigma_ab_2 = pow(3.0000e-03, 2);
/// Nice print function of what parameters we have loaded
void print() {
PRINT_DEBUG(" - gyroscope_noise_density: %.6f\n", sigma_w);
PRINT_DEBUG(" - accelerometer_noise_density: %.5f\n", sigma_a);
PRINT_DEBUG(" - gyroscope_random_walk: %.7f\n", sigma_wb);
PRINT_DEBUG(" - accelerometer_random_walk: %.6f\n", sigma_ab);
}
};
/**
* @brief Default constructor
* @param noises imu noise characteristics (continuous time)
* @param gravity_mag Global gravity magnitude of the system (normally 9.81)
*/
Propagator(NoiseManager noises, double gravity_mag) : _noises(noises) {
_noises.sigma_w_2 = std::pow(_noises.sigma_w, 2);
_noises.sigma_a_2 = std::pow(_noises.sigma_a, 2);
_noises.sigma_wb_2 = std::pow(_noises.sigma_wb, 2);
_noises.sigma_ab_2 = std::pow(_noises.sigma_ab, 2);
last_prop_time_offset = 0.0;
_gravity << 0.0, 0.0, gravity_mag;
}
/**
* @brief Stores incoming inertial readings
* @param message Contains our timestamp and inertial information
* @param oldest_time Time that we can discard measurements before
*/
void feed_imu(const ov_core::ImuData &message, double oldest_time = -1) {
// Append it to our vector
std::lock_guard<std::mutex> lck(imu_data_mtx);
imu_data.emplace_back(message);
// Loop through and delete imu messages that are older than our requested time
if (oldest_time != -1) {
auto it0 = imu_data.begin();
while (it0 != imu_data.end()) {
if (message.timestamp < oldest_time) {
it0 = imu_data.erase(it0);
} else {
it0++;
}
}
}
}
/**
* @brief Propagate state up to given timestamp and then clone
*
* This will first collect all imu readings that occured between the
* *current* state time and the new time we want the state to be at.
* If we don't have any imu readings we will try to extrapolate into the future.
* After propagating the mean and covariance using our dynamics,
* We clone the current imu pose as a new clone in our state.
*
* @param state Pointer to state
* @param timestamp Time to propagate to and clone at (CAM clock frame)
*/
void propagate_and_clone(std::shared_ptr<State> state, double timestamp);
/**
* @brief Gets what the state and its covariance will be at a given timestamp
*
* This can be used to find what the state will be in the "future" without propagating it.
* This will propagate a clone of the current IMU state and its covariance matrix.
* This is typically used to provide high frequency pose estimates between updates.
*
* @param state Pointer to state
* @param timestamp Time to propagate to (IMU clock frame)
* @param state_plus The propagated state (q_GtoI, p_IinG, v_IinI, w_IinI)
* @param covariance The propagated covariance (q_GtoI, p_IinG, v_IinI, w_IinI)
* @return True if we were able to propagate the state to the current timestep
*/
bool fast_state_propagate(std::shared_ptr<State> state, double timestamp, Eigen::Matrix<double, 13, 1> &state_plus,
Eigen::Matrix<double, 12, 12> &covariance);
/**
* @brief Helper function that given current imu data, will select imu readings between the two times.
*
* This will create measurements that we will integrate with, and an extra measurement at the end.
* We use the @ref interpolate_data() function to "cut" the imu readings at the begining and end of the integration.
* The timestamps passed should already take into account the time offset values.
*
* @param imu_data IMU data we will select measurements from
* @param time0 Start timestamp
* @param time1 End timestamp
* @param warn If we should warn if we don't have enough IMU to propagate with (e.g. fast prop will get warnings otherwise)
* @return Vector of measurements (if we could compute them)
*/
static std::vector<ov_core::ImuData> select_imu_readings(const std::vector<ov_core::ImuData> &imu_data, double time0, double time1,
bool warn = true);
/**
* @brief Nice helper function that will linearly interpolate between two imu messages.
*
* This should be used instead of just "cutting" imu messages that bound the camera times
* Give better time offset if we use this function, could try other orders/splines if the imu is slow.
*
* @param imu_1 imu at begining of interpolation interval
* @param imu_2 imu at end of interpolation interval
* @param timestamp Timestamp being interpolated to
*/
static ov_core::ImuData interpolate_data(const ov_core::ImuData &imu_1, const ov_core::ImuData &imu_2, double timestamp) {
// time-distance lambda
double lambda = (timestamp - imu_1.timestamp) / (imu_2.timestamp - imu_1.timestamp);
// PRINT_DEBUG("lambda - %d\n", lambda);
// interpolate between the two times
ov_core::ImuData data;
data.timestamp = timestamp;
data.am = (1 - lambda) * imu_1.am + lambda * imu_2.am;
data.wm = (1 - lambda) * imu_1.wm + lambda * imu_2.wm;
return data;
}
protected:
/// Estimate for time offset at last propagation time
double last_prop_time_offset = 0.0;
bool have_last_prop_time_offset = false;
/**
* @brief Propagates the state forward using the imu data and computes the noise covariance and state-transition
* matrix of this interval.
*
* This function can be replaced with analytical/numerical integration or when using a different state representation.
* This contains our state transition matrix along with how our noise evolves in time.
* If you have other state variables besides the IMU that evolve you would add them here.
* See the @ref error_prop page for details on how this was derived.
*
* @param state Pointer to state
* @param data_minus imu readings at beginning of interval
* @param data_plus imu readings at end of interval
* @param F State-transition matrix over the interval
* @param Qd Discrete-time noise covariance over the interval
*/
void predict_and_compute(std::shared_ptr<State> state, const ov_core::ImuData &data_minus, const ov_core::ImuData &data_plus,
Eigen::Matrix<double, 15, 15> &F, Eigen::Matrix<double, 15, 15> &Qd);
/**
* @brief Discrete imu mean propagation.
*
* See @ref propagation for details on these equations.
* \f{align*}{
* \text{}^{I_{k+1}}_{G}\hat{\bar{q}}
* &= \exp\bigg(\frac{1}{2}\boldsymbol{\Omega}\big({\boldsymbol{\omega}}_{m,k}-\hat{\mathbf{b}}_{g,k}\big)\Delta t\bigg)
* \text{}^{I_{k}}_{G}\hat{\bar{q}} \\
* ^G\hat{\mathbf{v}}_{k+1} &= \text{}^G\hat{\mathbf{v}}_{I_k} - {}^G\mathbf{g}\Delta t
* +\text{}^{I_k}_G\hat{\mathbf{R}}^\top(\mathbf{a}_{m,k} - \hat{\mathbf{b}}_{\mathbf{a},k})\Delta t\\
* ^G\hat{\mathbf{p}}_{I_{k+1}}
* &= \text{}^G\hat{\mathbf{p}}_{I_k} + {}^G\hat{\mathbf{v}}_{I_k} \Delta t
* - \frac{1}{2}{}^G\mathbf{g}\Delta t^2
* + \frac{1}{2} \text{}^{I_k}_{G}\hat{\mathbf{R}}^\top(\mathbf{a}_{m,k} - \hat{\mathbf{b}}_{\mathbf{a},k})\Delta t^2
* \f}
*
* @param state Pointer to state
* @param dt Time we should integrate over
* @param w_hat1 Angular velocity with bias removed
* @param a_hat1 Linear acceleration with bias removed
* @param w_hat2 Next angular velocity with bias removed
* @param a_hat2 Next linear acceleration with bias removed
* @param new_q The resulting new orientation after integration
* @param new_v The resulting new velocity after integration
* @param new_p The resulting new position after integration
*/
void predict_mean_discrete(std::shared_ptr<State> state, double dt, const Eigen::Vector3d &w_hat1, const Eigen::Vector3d &a_hat1,
const Eigen::Vector3d &w_hat2, const Eigen::Vector3d &a_hat2, Eigen::Vector4d &new_q, Eigen::Vector3d &new_v,
Eigen::Vector3d &new_p);
/**
* @brief RK4 imu mean propagation.
*
* See this wikipedia page on [Runge-Kutta Methods](https://en.wikipedia.org/wiki/Runge%E2%80%93Kutta_methods).
* We are doing a RK4 method, [this wolframe page](http://mathworld.wolfram.com/Runge-KuttaMethod.html) has the forth order equation
* defined below. We define function \f$ f(t,y) \f$ where y is a function of time t, see @ref imu_kinematic for the definition of the
* continous-time functions.
*
* \f{align*}{
* {k_1} &= f({t_0}, y_0) \Delta t \\
* {k_2} &= f( {t_0}+{\Delta t \over 2}, y_0 + {1 \over 2}{k_1} ) \Delta t \\
* {k_3} &= f( {t_0}+{\Delta t \over 2}, y_0 + {1 \over 2}{k_2} ) \Delta t \\
* {k_4} &= f( {t_0} + {\Delta t}, y_0 + {k_3} ) \Delta t \\
* y_{0+\Delta t} &= y_0 + \left( {{1 \over 6}{k_1} + {1 \over 3}{k_2} + {1 \over 3}{k_3} + {1 \over 6}{k_4}} \right)
* \f}
*
* @param state Pointer to state
* @param dt Time we should integrate over
* @param w_hat1 Angular velocity with bias removed
* @param a_hat1 Linear acceleration with bias removed
* @param w_hat2 Next angular velocity with bias removed
* @param a_hat2 Next linear acceleration with bias removed
* @param new_q The resulting new orientation after integration
* @param new_v The resulting new velocity after integration
* @param new_p The resulting new position after integration
*/
void predict_mean_rk4(std::shared_ptr<State> state, double dt, const Eigen::Vector3d &w_hat1, const Eigen::Vector3d &a_hat1,
const Eigen::Vector3d &w_hat2, const Eigen::Vector3d &a_hat2, Eigen::Vector4d &new_q, Eigen::Vector3d &new_v,
Eigen::Vector3d &new_p);
/// Container for the noise values
NoiseManager _noises;
/// Our history of IMU messages (time, angular, linear)
std::vector<ov_core::ImuData> imu_data;
std::mutex imu_data_mtx;
/// Gravity vector
Eigen::Vector3d _gravity;
};
} // namespace ov_msckf
#endif // OV_MSCKF_STATE_PROPAGATOR_H

View File

@@ -0,0 +1,97 @@
/*
* OpenVINS: An Open Platform for Visual-Inertial Research
* Copyright (C) 2018-2022 Patrick Geneva
* Copyright (C) 2018-2022 Guoquan Huang
* Copyright (C) 2018-2022 OpenVINS Contributors
* Copyright (C) 2018-2019 Kevin Eckenhoff
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
#include "State.h"
using namespace ov_core;
using namespace ov_type;
using namespace ov_msckf;
State::State(StateOptions &options) {
// Save our options
_options = options;
// Append the imu to the state and covariance
int current_id = 0;
_imu = std::make_shared<IMU>();
_imu->set_local_id(current_id);
_variables.push_back(_imu);
current_id += _imu->size();
// Camera to IMU time offset
_calib_dt_CAMtoIMU = std::make_shared<Vec>(1);
if (_options.do_calib_camera_timeoffset) {
_calib_dt_CAMtoIMU->set_local_id(current_id);
_variables.push_back(_calib_dt_CAMtoIMU);
current_id += _calib_dt_CAMtoIMU->size();
}
// Loop through each camera and create extrinsic and intrinsics
for (int i = 0; i < _options.num_cameras; i++) {
// Allocate extrinsic transform
auto pose = std::make_shared<PoseJPL>();
// Allocate intrinsics for this camera
auto intrin = std::make_shared<Vec>(8);
// Add these to the corresponding maps
_calib_IMUtoCAM.insert({i, pose});
_cam_intrinsics.insert({i, intrin});
// If calibrating camera-imu pose, add to variables
if (_options.do_calib_camera_pose) {
pose->set_local_id(current_id);
_variables.push_back(pose);
current_id += pose->size();
}
// If calibrating camera intrinsics, add to variables
if (_options.do_calib_camera_intrinsics) {
intrin->set_local_id(current_id);
_variables.push_back(intrin);
current_id += intrin->size();
}
}
// Finally initialize our covariance to small value
_Cov = 1e-3 * Eigen::MatrixXd::Identity(current_id, current_id);
// Finally, set some of our priors for our calibration parameters
if (_options.do_calib_camera_timeoffset) {
_Cov(_calib_dt_CAMtoIMU->id(), _calib_dt_CAMtoIMU->id()) = std::pow(0.01, 2);
}
if (_options.do_calib_camera_pose) {
for (int i = 0; i < _options.num_cameras; i++) {
_Cov.block(_calib_IMUtoCAM.at(i)->id(), _calib_IMUtoCAM.at(i)->id(), 3, 3) = std::pow(0.001, 2) * Eigen::MatrixXd::Identity(3, 3);
_Cov.block(_calib_IMUtoCAM.at(i)->id() + 3, _calib_IMUtoCAM.at(i)->id() + 3, 3, 3) =
std::pow(0.01, 2) * Eigen::MatrixXd::Identity(3, 3);
}
}
if (_options.do_calib_camera_intrinsics) {
for (int i = 0; i < _options.num_cameras; i++) {
_Cov.block(_cam_intrinsics.at(i)->id(), _cam_intrinsics.at(i)->id(), 4, 4) = std::pow(1.0, 2) * Eigen::MatrixXd::Identity(4, 4);
_Cov.block(_cam_intrinsics.at(i)->id() + 4, _cam_intrinsics.at(i)->id() + 4, 4, 4) =
std::pow(0.005, 2) * Eigen::MatrixXd::Identity(4, 4);
}
}
}

123
ov_msckf/src/state/State.h Normal file
View File

@@ -0,0 +1,123 @@
/*
* OpenVINS: An Open Platform for Visual-Inertial Research
* Copyright (C) 2018-2022 Patrick Geneva
* Copyright (C) 2018-2022 Guoquan Huang
* Copyright (C) 2018-2022 OpenVINS Contributors
* Copyright (C) 2018-2019 Kevin Eckenhoff
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
#ifndef OV_MSCKF_STATE_H
#define OV_MSCKF_STATE_H
#include <map>
#include <memory>
#include <unordered_map>
#include <vector>
#include "StateOptions.h"
#include "cam/CamBase.h"
#include "types/IMU.h"
#include "types/Landmark.h"
#include "types/PoseJPL.h"
#include "types/Type.h"
#include "types/Vec.h"
namespace ov_msckf {
/**
* @brief State of our filter
*
* This state has all the current estimates for the filter.
* This system is modeled after the MSCKF filter, thus we have a sliding window of clones.
* We additionally have more parameters for online estimation of calibration and SLAM features.
* We also have the covariance of the system, which should be managed using the StateHelper class.
*/
class State {
public:
/**
* @brief Default Constructor (will initialize variables to defaults)
* @param options_ Options structure containing filter options
*/
State(StateOptions &options_);
~State() {}
/**
* @brief Will return the timestep that we will marginalize next.
* As of right now, since we are using a sliding window, this is the oldest clone.
* But if you wanted to do a keyframe system, you could selectively marginalize clones.
* @return timestep of clone we will marginalize
*/
double margtimestep() {
double time = INFINITY;
for (const auto &clone_imu : _clones_IMU) {
if (clone_imu.first < time) {
time = clone_imu.first;
}
}
return time;
}
/**
* @brief Calculates the current max size of the covariance
* @return Size of the current covariance matrix
*/
int max_covariance_size() { return (int)_Cov.rows(); }
/// Current timestamp (should be the last update time!)
double _timestamp = -1;
/// Struct containing filter options
StateOptions _options;
/// Pointer to the "active" IMU state (q_GtoI, p_IinG, v_IinG, bg, ba)
std::shared_ptr<ov_type::IMU> _imu;
/// Map between imaging times and clone poses (q_GtoIi, p_IiinG)
std::map<double, std::shared_ptr<ov_type::PoseJPL>> _clones_IMU;
/// Our current set of SLAM features (3d positions)
std::unordered_map<size_t, std::shared_ptr<ov_type::Landmark>> _features_SLAM;
/// Time offset base IMU to camera (t_imu = t_cam + t_off)
std::shared_ptr<ov_type::Vec> _calib_dt_CAMtoIMU;
/// Calibration poses for each camera (R_ItoC, p_IinC)
std::unordered_map<size_t, std::shared_ptr<ov_type::PoseJPL>> _calib_IMUtoCAM;
/// Camera intrinsics
std::unordered_map<size_t, std::shared_ptr<ov_type::Vec>> _cam_intrinsics;
/// Camera intrinsics camera objects
std::unordered_map<size_t, std::shared_ptr<ov_core::CamBase>> _cam_intrinsics_cameras;
private:
// Define that the state helper is a friend class of this class
// This will allow it to access the below functions which should normally not be called
// This prevents a developer from thinking that the "insert clone" will actually correctly add it to the covariance
friend class StateHelper;
/// Covariance of all active variables
Eigen::MatrixXd _Cov;
/// Vector of variables
std::vector<std::shared_ptr<ov_type::Type>> _variables;
};
} // namespace ov_msckf
#endif // OV_MSCKF_STATE_H

View File

@@ -0,0 +1,603 @@
/*
* OpenVINS: An Open Platform for Visual-Inertial Research
* Copyright (C) 2018-2022 Patrick Geneva
* Copyright (C) 2018-2022 Guoquan Huang
* Copyright (C) 2018-2022 OpenVINS Contributors
* Copyright (C) 2018-2019 Kevin Eckenhoff
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
#include "StateHelper.h"
using namespace ov_core;
using namespace ov_type;
using namespace ov_msckf;
void StateHelper::EKFPropagation(std::shared_ptr<State> state, const std::vector<std::shared_ptr<Type>> &order_NEW,
const std::vector<std::shared_ptr<Type>> &order_OLD, const Eigen::MatrixXd &Phi,
const Eigen::MatrixXd &Q) {
// We need at least one old and new variable
if (order_NEW.empty() || order_OLD.empty()) {
PRINT_ERROR(RED "StateHelper::EKFPropagation() - Called with empty variable arrays!\n" RESET);
std::exit(EXIT_FAILURE);
}
// Loop through our Phi order and ensure that they are continuous in memory
int size_order_NEW = order_NEW.at(0)->size();
for (size_t i = 0; i < order_NEW.size() - 1; i++) {
if (order_NEW.at(i)->id() + order_NEW.at(i)->size() != order_NEW.at(i + 1)->id()) {
PRINT_ERROR(RED "StateHelper::EKFPropagation() - Called with non-contiguous state elements!\n" RESET);
PRINT_ERROR(
RED "StateHelper::EKFPropagation() - This code only support a state transition which is in the same order as the state\n" RESET);
std::exit(EXIT_FAILURE);
}
size_order_NEW += order_NEW.at(i + 1)->size();
}
// Size of the old phi matrix
int size_order_OLD = order_OLD.at(0)->size();
for (size_t i = 0; i < order_OLD.size() - 1; i++) {
size_order_OLD += order_OLD.at(i + 1)->size();
}
// Assert that we have correct sizes
assert(size_order_NEW == Phi.rows());
assert(size_order_OLD == Phi.cols());
assert(size_order_NEW == Q.cols());
assert(size_order_NEW == Q.rows());
// Get the location in small phi for each measuring variable
int current_it = 0;
std::vector<int> Phi_id;
for (const auto &var : order_OLD) {
Phi_id.push_back(current_it);
current_it += var->size();
}
// Loop through all our old states and get the state transition times it
// Cov_PhiT = [ Pxx ] [ Phi' ]'
Eigen::MatrixXd Cov_PhiT = Eigen::MatrixXd::Zero(state->_Cov.rows(), Phi.rows());
for (size_t i = 0; i < order_OLD.size(); i++) {
std::shared_ptr<Type> var = order_OLD.at(i);
Cov_PhiT.noalias() +=
state->_Cov.block(0, var->id(), state->_Cov.rows(), var->size()) * Phi.block(0, Phi_id[i], Phi.rows(), var->size()).transpose();
}
// Get Phi_NEW*Covariance*Phi_NEW^t + Q
Eigen::MatrixXd Phi_Cov_PhiT = Q.selfadjointView<Eigen::Upper>();
for (size_t i = 0; i < order_OLD.size(); i++) {
std::shared_ptr<Type> var = order_OLD.at(i);
Phi_Cov_PhiT.noalias() += Phi.block(0, Phi_id[i], Phi.rows(), var->size()) * Cov_PhiT.block(var->id(), 0, var->size(), Phi.rows());
}
// We are good to go!
int start_id = order_NEW.at(0)->id();
int phi_size = Phi.rows();
int total_size = state->_Cov.rows();
state->_Cov.block(start_id, 0, phi_size, total_size) = Cov_PhiT.transpose();
state->_Cov.block(0, start_id, total_size, phi_size) = Cov_PhiT;
state->_Cov.block(start_id, start_id, phi_size, phi_size) = Phi_Cov_PhiT;
// We should check if we are not positive semi-definitate (i.e. negative diagionals is not s.p.d)
Eigen::VectorXd diags = state->_Cov.diagonal();
bool found_neg = false;
for (int i = 0; i < diags.rows(); i++) {
if (diags(i) < 0.0) {
PRINT_WARNING(RED "StateHelper::EKFPropagation() - diagonal at %d is %.2f\n" RESET, i, diags(i));
found_neg = true;
}
}
assert(!found_neg);
}
void StateHelper::EKFUpdate(std::shared_ptr<State> state, const std::vector<std::shared_ptr<Type>> &H_order, const Eigen::MatrixXd &H,
const Eigen::VectorXd &res, const Eigen::MatrixXd &R) {
//==========================================================
//==========================================================
// Part of the Kalman Gain K = (P*H^T)*S^{-1} = M*S^{-1}
assert(res.rows() == R.rows());
assert(H.rows() == res.rows());
Eigen::MatrixXd M_a = Eigen::MatrixXd::Zero(state->_Cov.rows(), res.rows());
// Get the location in small jacobian for each measuring variable
int current_it = 0;
std::vector<int> H_id;
for (const auto &meas_var : H_order) {
H_id.push_back(current_it);
current_it += meas_var->size();
}
//==========================================================
//==========================================================
// For each active variable find its M = P*H^T
for (const auto &var : state->_variables) {
// Sum up effect of each subjacobian = K_i= \sum_m (P_im Hm^T)
Eigen::MatrixXd M_i = Eigen::MatrixXd::Zero(var->size(), res.rows());
for (size_t i = 0; i < H_order.size(); i++) {
std::shared_ptr<Type> meas_var = H_order[i];
M_i.noalias() += state->_Cov.block(var->id(), meas_var->id(), var->size(), meas_var->size()) *
H.block(0, H_id[i], H.rows(), meas_var->size()).transpose();
}
M_a.block(var->id(), 0, var->size(), res.rows()) = M_i;
}
//==========================================================
//==========================================================
// Get covariance of the involved terms
Eigen::MatrixXd P_small = StateHelper::get_marginal_covariance(state, H_order);
// Residual covariance S = H*Cov*H' + R
Eigen::MatrixXd S(R.rows(), R.rows());
S.triangularView<Eigen::Upper>() = H * P_small * H.transpose();
S.triangularView<Eigen::Upper>() += R;
// Eigen::MatrixXd S = H * P_small * H.transpose() + R;
// Invert our S (should we use a more stable method here??)
Eigen::MatrixXd Sinv = Eigen::MatrixXd::Identity(R.rows(), R.rows());
S.selfadjointView<Eigen::Upper>().llt().solveInPlace(Sinv);
Eigen::MatrixXd K = M_a * Sinv.selfadjointView<Eigen::Upper>();
// Eigen::MatrixXd K = M_a * S.inverse();
// Update Covariance
state->_Cov.triangularView<Eigen::Upper>() -= K * M_a.transpose();
state->_Cov = state->_Cov.selfadjointView<Eigen::Upper>();
// Cov -= K * M_a.transpose();
// Cov = 0.5*(Cov+Cov.transpose());
// We should check if we are not positive semi-definitate (i.e. negative diagionals is not s.p.d)
Eigen::VectorXd diags = state->_Cov.diagonal();
bool found_neg = false;
for (int i = 0; i < diags.rows(); i++) {
if (diags(i) < 0.0) {
PRINT_WARNING(RED "StateHelper::EKFUpdate() - diagonal at %d is %.2f\n" RESET, i, diags(i));
found_neg = true;
}
}
assert(!found_neg);
// Calculate our delta and update all our active states
Eigen::VectorXd dx = K * res;
for (size_t i = 0; i < state->_variables.size(); i++) {
state->_variables.at(i)->update(dx.block(state->_variables.at(i)->id(), 0, state->_variables.at(i)->size(), 1));
}
// If we are doing online intrinsic calibration we should update our camera objects
// NOTE: is this the best place to put this update logic??? probably..
if (state->_options.do_calib_camera_intrinsics) {
for (auto const &calib : state->_cam_intrinsics) {
state->_cam_intrinsics_cameras.at(calib.first)->set_value(calib.second->value());
}
}
}
void StateHelper::set_initial_covariance(std::shared_ptr<State> state, const Eigen::MatrixXd &covariance,
const std::vector<std::shared_ptr<ov_type::Type>> &order) {
// We need to loop through each element and overwrite the current covariance values
// For example consider the following:
// x = [ ori pos ] -> insert into -> x = [ ori bias pos ]
// P = [ P_oo P_op ] -> P = [ P_oo 0 P_op ]
// [ P_po P_pp ] [ 0 P* 0 ]
// [ P_po 0 P_pp ]
// The key assumption here is that the covariance is block diagonal (cross-terms zero with P* can be dense)
// This is normally the care on startup (for example between calibration and the initial state
// For each variable, lets copy over all other variable cross terms
// Note: this copies over itself to when i_index=k_index
int i_index = 0;
for (size_t i = 0; i < order.size(); i++) {
int k_index = 0;
for (size_t k = 0; k < order.size(); k++) {
state->_Cov.block(order[i]->id(), order[k]->id(), order[i]->size(), order[k]->size()) =
covariance.block(i_index, k_index, order[i]->size(), order[k]->size());
k_index += order[k]->size();
}
i_index += order[i]->size();
}
state->_Cov = state->_Cov.selfadjointView<Eigen::Upper>();
}
Eigen::MatrixXd StateHelper::get_marginal_covariance(std::shared_ptr<State> state,
const std::vector<std::shared_ptr<Type>> &small_variables) {
// Calculate the marginal covariance size we need to make our matrix
int cov_size = 0;
for (size_t i = 0; i < small_variables.size(); i++) {
cov_size += small_variables[i]->size();
}
// Construct our return covariance
Eigen::MatrixXd Small_cov = Eigen::MatrixXd::Zero(cov_size, cov_size);
// For each variable, lets copy over all other variable cross terms
// Note: this copies over itself to when i_index=k_index
int i_index = 0;
for (size_t i = 0; i < small_variables.size(); i++) {
int k_index = 0;
for (size_t k = 0; k < small_variables.size(); k++) {
Small_cov.block(i_index, k_index, small_variables[i]->size(), small_variables[k]->size()) =
state->_Cov.block(small_variables[i]->id(), small_variables[k]->id(), small_variables[i]->size(), small_variables[k]->size());
k_index += small_variables[k]->size();
}
i_index += small_variables[i]->size();
}
// Return the covariance
// Small_cov = 0.5*(Small_cov+Small_cov.transpose());
return Small_cov;
}
Eigen::MatrixXd StateHelper::get_full_covariance(std::shared_ptr<State> state) {
// Size of the covariance is the active
int cov_size = (int)state->_Cov.rows();
// Construct our return covariance
Eigen::MatrixXd full_cov = Eigen::MatrixXd::Zero(cov_size, cov_size);
// Copy in the active state elements
full_cov.block(0, 0, state->_Cov.rows(), state->_Cov.rows()) = state->_Cov;
// Return the covariance
return full_cov;
}
void StateHelper::marginalize(std::shared_ptr<State> state, std::shared_ptr<Type> marg) {
// Check if the current state has the element we want to marginalize
if (std::find(state->_variables.begin(), state->_variables.end(), marg) == state->_variables.end()) {
PRINT_ERROR(RED "StateHelper::marginalize() - Called on variable that is not in the state\n" RESET);
PRINT_ERROR(RED "StateHelper::marginalize() - Marginalization, does NOT work on sub-variables yet...\n" RESET);
std::exit(EXIT_FAILURE);
}
// Generic covariance has this form for x_1, x_m, x_2. If we want to remove x_m:
//
// P_(x_1,x_1) P(x_1,x_m) P(x_1,x_2)
// P_(x_m,x_1) P(x_m,x_m) P(x_m,x_2)
// P_(x_2,x_1) P(x_2,x_m) P(x_2,x_2)
//
// to
//
// P_(x_1,x_1) P(x_1,x_2)
// P_(x_2,x_1) P(x_2,x_2)
//
// i.e. x_1 goes from 0 to marg_id, x_2 goes from marg_id+marg_size to Cov.rows() in the original covariance
int marg_size = marg->size();
int marg_id = marg->id();
int x2_size = (int)state->_Cov.rows() - marg_id - marg_size;
Eigen::MatrixXd Cov_new(state->_Cov.rows() - marg_size, state->_Cov.rows() - marg_size);
// P_(x_1,x_1)
Cov_new.block(0, 0, marg_id, marg_id) = state->_Cov.block(0, 0, marg_id, marg_id);
// P_(x_1,x_2)
Cov_new.block(0, marg_id, marg_id, x2_size) = state->_Cov.block(0, marg_id + marg_size, marg_id, x2_size);
// P_(x_2,x_1)
Cov_new.block(marg_id, 0, x2_size, marg_id) = Cov_new.block(0, marg_id, marg_id, x2_size).transpose();
// P(x_2,x_2)
Cov_new.block(marg_id, marg_id, x2_size, x2_size) = state->_Cov.block(marg_id + marg_size, marg_id + marg_size, x2_size, x2_size);
// Now set new covariance
// state->_Cov.resize(Cov_new.rows(),Cov_new.cols());
state->_Cov = Cov_new;
// state->Cov() = 0.5*(Cov_new+Cov_new.transpose());
assert(state->_Cov.rows() == Cov_new.rows());
// Now we keep the remaining variables and update their ordering
// Note: DOES NOT SUPPORT MARGINALIZING SUBVARIABLES YET!!!!!!!
std::vector<std::shared_ptr<Type>> remaining_variables;
for (size_t i = 0; i < state->_variables.size(); i++) {
// Only keep non-marginal states
if (state->_variables.at(i) != marg) {
if (state->_variables.at(i)->id() > marg_id) {
// If the variable is "beyond" the marginal one in ordering, need to "move it forward"
state->_variables.at(i)->set_local_id(state->_variables.at(i)->id() - marg_size);
}
remaining_variables.push_back(state->_variables.at(i));
}
}
// Delete the old state variable to free up its memory
// NOTE: we don't need to do this any more since our variable is a shared ptr
// NOTE: thus this is automatically managed, but this allows outside references to keep the old variable
// delete marg;
marg->set_local_id(-1);
// Now set variables as the remaining ones
state->_variables = remaining_variables;
}
std::shared_ptr<Type> StateHelper::clone(std::shared_ptr<State> state, std::shared_ptr<Type> variable_to_clone) {
// Get total size of new cloned variables, and the old covariance size
int total_size = variable_to_clone->size();
int old_size = (int)state->_Cov.rows();
int new_loc = (int)state->_Cov.rows();
// Resize both our covariance to the new size
state->_Cov.conservativeResizeLike(Eigen::MatrixXd::Zero(old_size + total_size, old_size + total_size));
// What is the new state, and variable we inserted
const std::vector<std::shared_ptr<Type>> new_variables = state->_variables;
std::shared_ptr<Type> new_clone = nullptr;
// Loop through all variables, and find the variable that we are going to clone
for (size_t k = 0; k < state->_variables.size(); k++) {
// Skip this if it is not the same
// First check if the top level variable is the same, then check the sub-variables
std::shared_ptr<Type> type_check = state->_variables.at(k)->check_if_subvariable(variable_to_clone);
if (state->_variables.at(k) == variable_to_clone) {
type_check = state->_variables.at(k);
} else if (type_check != variable_to_clone) {
continue;
}
// So we will clone this one
int old_loc = type_check->id();
// Copy the covariance elements
state->_Cov.block(new_loc, new_loc, total_size, total_size) = state->_Cov.block(old_loc, old_loc, total_size, total_size);
state->_Cov.block(0, new_loc, old_size, total_size) = state->_Cov.block(0, old_loc, old_size, total_size);
state->_Cov.block(new_loc, 0, total_size, old_size) = state->_Cov.block(old_loc, 0, total_size, old_size);
// Create clone from the type being cloned
new_clone = type_check->clone();
new_clone->set_local_id(new_loc);
break;
}
// Check if the current state has this variable
if (new_clone == nullptr) {
PRINT_ERROR(RED "StateHelper::clone() - Called on variable is not in the state\n" RESET);
PRINT_ERROR(RED "StateHelper::clone() - Ensure that the variable specified is a variable, or sub-variable..\n" RESET);
std::exit(EXIT_FAILURE);
}
// Add to variable list and return
state->_variables.push_back(new_clone);
return new_clone;
}
bool StateHelper::initialize(std::shared_ptr<State> state, std::shared_ptr<Type> new_variable,
const std::vector<std::shared_ptr<Type>> &H_order, Eigen::MatrixXd &H_R, Eigen::MatrixXd &H_L,
Eigen::MatrixXd &R, Eigen::VectorXd &res, double chi_2_mult) {
// Check that this new variable is not already initialized
if (std::find(state->_variables.begin(), state->_variables.end(), new_variable) != state->_variables.end()) {
PRINT_ERROR("StateHelper::initialize_invertible() - Called on variable that is already in the state\n");
PRINT_ERROR("StateHelper::initialize_invertible() - Found this variable at %d in covariance\n", new_variable->id());
std::exit(EXIT_FAILURE);
}
// Check that we have isotropic noise (i.e. is diagonal and all the same value)
// TODO: can we simplify this so it doesn't take as much time?
assert(R.rows() == R.cols());
assert(R.rows() > 0);
for (int r = 0; r < R.rows(); r++) {
for (int c = 0; c < R.cols(); c++) {
if (r == c && R(0, 0) != R(r, c)) {
PRINT_ERROR(RED "StateHelper::initialize() - Your noise is not isotropic!\n" RESET);
PRINT_ERROR(RED "StateHelper::initialize() - Found a value of %.2f verses value of %.2f\n" RESET, R(r, c), R(0, 0));
std::exit(EXIT_FAILURE);
} else if (r != c && R(r, c) != 0.0) {
PRINT_ERROR(RED "StateHelper::initialize() - Your noise is not diagonal!\n" RESET);
PRINT_ERROR(RED "StateHelper::initialize() - Found a value of %.2f at row %d and column %d\n" RESET, R(r, c), r, c);
std::exit(EXIT_FAILURE);
}
}
}
//==========================================================
//==========================================================
// First we perform QR givens to seperate the system
// The top will be a system that depends on the new state, while the bottom does not
size_t new_var_size = new_variable->size();
assert((int)new_var_size == H_L.cols());
Eigen::JacobiRotation<double> tempHo_GR;
for (int n = 0; n < H_L.cols(); ++n) {
for (int m = (int)H_L.rows() - 1; m > n; m--) {
// Givens matrix G
tempHo_GR.makeGivens(H_L(m - 1, n), H_L(m, n));
// Multiply G to the corresponding lines (m-1,m) in each matrix
// Note: we only apply G to the nonzero cols [n:Ho.cols()-n-1], while
// it is equivalent to applying G to the entire cols [0:Ho.cols()-1].
(H_L.block(m - 1, n, 2, H_L.cols() - n)).applyOnTheLeft(0, 1, tempHo_GR.adjoint());
(res.block(m - 1, 0, 2, 1)).applyOnTheLeft(0, 1, tempHo_GR.adjoint());
(H_R.block(m - 1, 0, 2, H_R.cols())).applyOnTheLeft(0, 1, tempHo_GR.adjoint());
}
}
// Separate into initializing and updating portions
// 1. Invertible initializing system
Eigen::MatrixXd Hxinit = H_R.block(0, 0, new_var_size, H_R.cols());
Eigen::MatrixXd H_finit = H_L.block(0, 0, new_var_size, new_var_size);
Eigen::VectorXd resinit = res.block(0, 0, new_var_size, 1);
Eigen::MatrixXd Rinit = R.block(0, 0, new_var_size, new_var_size);
// 2. Nullspace projected updating system
Eigen::MatrixXd Hup = H_R.block(new_var_size, 0, H_R.rows() - new_var_size, H_R.cols());
Eigen::VectorXd resup = res.block(new_var_size, 0, res.rows() - new_var_size, 1);
Eigen::MatrixXd Rup = R.block(new_var_size, new_var_size, R.rows() - new_var_size, R.rows() - new_var_size);
//==========================================================
//==========================================================
// Do mahalanobis distance testing
Eigen::MatrixXd P_up = get_marginal_covariance(state, H_order);
assert(Rup.rows() == Hup.rows());
assert(Hup.cols() == P_up.cols());
Eigen::MatrixXd S = Hup * P_up * Hup.transpose() + Rup;
double chi2 = resup.dot(S.llt().solve(resup));
// Get what our threshold should be
boost::math::chi_squared chi_squared_dist(res.rows());
double chi2_check = boost::math::quantile(chi_squared_dist, 0.95);
if (chi2 > chi_2_mult * chi2_check) {
return false;
}
//==========================================================
//==========================================================
// Finally, initialize it in our state
StateHelper::initialize_invertible(state, new_variable, H_order, Hxinit, H_finit, Rinit, resinit);
// Update with updating portion
if (Hup.rows() > 0) {
StateHelper::EKFUpdate(state, H_order, Hup, resup, Rup);
}
return true;
}
void StateHelper::initialize_invertible(std::shared_ptr<State> state, std::shared_ptr<Type> new_variable,
const std::vector<std::shared_ptr<Type>> &H_order, const Eigen::MatrixXd &H_R,
const Eigen::MatrixXd &H_L, const Eigen::MatrixXd &R, const Eigen::VectorXd &res) {
// Check that this new variable is not already initialized
if (std::find(state->_variables.begin(), state->_variables.end(), new_variable) != state->_variables.end()) {
PRINT_ERROR("StateHelper::initialize_invertible() - Called on variable that is already in the state\n");
PRINT_ERROR("StateHelper::initialize_invertible() - Found this variable at %d in covariance\n", new_variable->id());
std::exit(EXIT_FAILURE);
}
// Check that we have isotropic noise (i.e. is diagonal and all the same value)
// TODO: can we simplify this so it doesn't take as much time?
assert(R.rows() == R.cols());
assert(R.rows() > 0);
for (int r = 0; r < R.rows(); r++) {
for (int c = 0; c < R.cols(); c++) {
if (r == c && R(0, 0) != R(r, c)) {
PRINT_ERROR(RED "StateHelper::initialize_invertible() - Your noise is not isotropic!\n" RESET);
PRINT_ERROR(RED "StateHelper::initialize_invertible() - Found a value of %.2f verses value of %.2f\n" RESET, R(r, c), R(0, 0));
std::exit(EXIT_FAILURE);
} else if (r != c && R(r, c) != 0.0) {
PRINT_ERROR(RED "StateHelper::initialize_invertible() - Your noise is not diagonal!\n" RESET);
PRINT_ERROR(RED "StateHelper::initialize_invertible() - Found a value of %.2f at row %d and column %d\n" RESET, R(r, c), r, c);
std::exit(EXIT_FAILURE);
}
}
}
//==========================================================
//==========================================================
// Part of the Kalman Gain K = (P*H^T)*S^{-1} = M*S^{-1}
assert(res.rows() == R.rows());
assert(H_L.rows() == res.rows());
assert(H_L.rows() == H_R.rows());
Eigen::MatrixXd M_a = Eigen::MatrixXd::Zero(state->_Cov.rows(), res.rows());
// Get the location in small jacobian for each measuring variable
int current_it = 0;
std::vector<int> H_id;
for (const auto &meas_var : H_order) {
H_id.push_back(current_it);
current_it += meas_var->size();
}
//==========================================================
//==========================================================
// For each active variable find its M = P*H^T
for (const auto &var : state->_variables) {
// Sum up effect of each subjacobian= K_i= \sum_m (P_im Hm^T)
Eigen::MatrixXd M_i = Eigen::MatrixXd::Zero(var->size(), res.rows());
for (size_t i = 0; i < H_order.size(); i++) {
std::shared_ptr<Type> meas_var = H_order.at(i);
M_i += state->_Cov.block(var->id(), meas_var->id(), var->size(), meas_var->size()) *
H_R.block(0, H_id[i], H_R.rows(), meas_var->size()).transpose();
}
M_a.block(var->id(), 0, var->size(), res.rows()) = M_i;
}
//==========================================================
//==========================================================
// Get covariance of this small jacobian
Eigen::MatrixXd P_small = StateHelper::get_marginal_covariance(state, H_order);
// M = H_R*Cov*H_R' + R
Eigen::MatrixXd M(H_R.rows(), H_R.rows());
M.triangularView<Eigen::Upper>() = H_R * P_small * H_R.transpose();
M.triangularView<Eigen::Upper>() += R;
// Covariance of the variable/landmark that will be initialized
assert(H_L.rows() == H_L.cols());
assert(H_L.rows() == new_variable->size());
Eigen::MatrixXd H_Linv = H_L.inverse();
Eigen::MatrixXd P_LL = H_Linv * M.selfadjointView<Eigen::Upper>() * H_Linv.transpose();
// Augment the covariance matrix
size_t oldSize = state->_Cov.rows();
state->_Cov.conservativeResizeLike(Eigen::MatrixXd::Zero(oldSize + new_variable->size(), oldSize + new_variable->size()));
state->_Cov.block(0, oldSize, oldSize, new_variable->size()).noalias() = -M_a * H_Linv.transpose();
state->_Cov.block(oldSize, 0, new_variable->size(), oldSize) = state->_Cov.block(0, oldSize, oldSize, new_variable->size()).transpose();
state->_Cov.block(oldSize, oldSize, new_variable->size(), new_variable->size()) = P_LL;
// Update the variable that will be initialized (invertible systems can only update the new variable).
// However this update should be almost zero if we already used a conditional Gauss-Newton to solve for the initial estimate
new_variable->update(H_Linv * res);
// Now collect results, and add it to the state variables
new_variable->set_local_id(oldSize);
state->_variables.push_back(new_variable);
// std::stringstream ss;
// ss << new_variable->id() << " init dx = " << (H_Linv * res).transpose() << std::endl;
// PRINT_DEBUG(ss.str().c_str());
}
void StateHelper::augment_clone(std::shared_ptr<State> state, Eigen::Matrix<double, 3, 1> last_w) {
// We can't insert a clone that occured at the same timestamp!
if (state->_clones_IMU.find(state->_timestamp) != state->_clones_IMU.end()) {
PRINT_ERROR(RED "TRIED TO INSERT A CLONE AT THE SAME TIME AS AN EXISTING CLONE, EXITING!#!@#!@#\n" RESET);
std::exit(EXIT_FAILURE);
}
// Call on our cloner and add it to our vector of types
// NOTE: this will clone the clone pose to the END of the covariance...
std::shared_ptr<Type> posetemp = StateHelper::clone(state, state->_imu->pose());
// Cast to a JPL pose type, check if valid
std::shared_ptr<PoseJPL> pose = std::dynamic_pointer_cast<PoseJPL>(posetemp);
if (pose == nullptr) {
PRINT_ERROR(RED "INVALID OBJECT RETURNED FROM STATEHELPER CLONE, EXITING!#!@#!@#\n" RESET);
std::exit(EXIT_FAILURE);
}
// Append the new clone to our clone vector
state->_clones_IMU[state->_timestamp] = pose;
// If we are doing time calibration, then our clones are a function of the time offset
// Logic is based on Mingyang Li and Anastasios I. Mourikis paper:
// http://journals.sagepub.com/doi/pdf/10.1177/0278364913515286
if (state->_options.do_calib_camera_timeoffset) {
// Jacobian to augment by
Eigen::Matrix<double, 6, 1> dnc_dt = Eigen::MatrixXd::Zero(6, 1);
dnc_dt.block(0, 0, 3, 1) = last_w;
dnc_dt.block(3, 0, 3, 1) = state->_imu->vel();
// Augment covariance with time offset Jacobian
state->_Cov.block(0, pose->id(), state->_Cov.rows(), 6) +=
state->_Cov.block(0, state->_calib_dt_CAMtoIMU->id(), state->_Cov.rows(), 1) * dnc_dt.transpose();
state->_Cov.block(pose->id(), 0, 6, state->_Cov.rows()) +=
dnc_dt * state->_Cov.block(state->_calib_dt_CAMtoIMU->id(), 0, 1, state->_Cov.rows());
}
}

View File

@@ -0,0 +1,260 @@
/*
* OpenVINS: An Open Platform for Visual-Inertial Research
* Copyright (C) 2018-2022 Patrick Geneva
* Copyright (C) 2018-2022 Guoquan Huang
* Copyright (C) 2018-2022 OpenVINS Contributors
* Copyright (C) 2018-2019 Kevin Eckenhoff
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
#ifndef OV_MSCKF_STATE_HELPER_H
#define OV_MSCKF_STATE_HELPER_H
#include "State.h"
#include "types/Landmark.h"
#include "utils/colors.h"
#include "utils/print.h"
#include <boost/math/distributions/chi_squared.hpp>
namespace ov_msckf {
/**
* @brief Helper which manipulates the State and its covariance.
*
* In general, this class has all the core logic for an Extended Kalman Filter (EKF)-based system.
* This has all functions that change the covariance along with addition and removing elements from the state.
* All functions here are static, and thus are self-contained so that in the future multiple states could be tracked and updated.
* We recommend you look directly at the code for this class for clarity on what exactly we are doing in each and the matching documentation
* pages.
*/
class StateHelper {
public:
/**
* @brief Performs EKF propagation of the state covariance.
*
* The mean of the state should already have been propagated, thus just moves the covariance forward in time.
* The new states that we are propagating the old covariance into, should be **contiguous** in memory.
* The user only needs to specify the sub-variables that this block is a function of.
* \f[
* \tilde{\mathbf{x}}' =
* \begin{bmatrix}
* \boldsymbol\Phi_1 &
* \boldsymbol\Phi_2 &
* \boldsymbol\Phi_3
* \end{bmatrix}
* \begin{bmatrix}
* \tilde{\mathbf{x}}_1 \\
* \tilde{\mathbf{x}}_2 \\
* \tilde{\mathbf{x}}_3
* \end{bmatrix}
* +
* \mathbf{n}
* \f]
*
* @param state Pointer to state
* @param order_NEW Contiguous variables that have evolved according to this state transition
* @param order_OLD Variable ordering used in the state transition
* @param Phi State transition matrix (size order_NEW by size order_OLD)
* @param Q Additive state propagation noise matrix (size order_NEW by size order_NEW)
*/
static void EKFPropagation(std::shared_ptr<State> state, const std::vector<std::shared_ptr<ov_type::Type>> &order_NEW,
const std::vector<std::shared_ptr<ov_type::Type>> &order_OLD, const Eigen::MatrixXd &Phi,
const Eigen::MatrixXd &Q);
/**
* @brief Performs EKF update of the state (see @ref linear-meas page)
* @param state Pointer to state
* @param H_order Variable ordering used in the compressed Jacobian
* @param H Condensed Jacobian of updating measurement
* @param res residual of updating measurement
* @param R updating measurement covariance
*/
static void EKFUpdate(std::shared_ptr<State> state, const std::vector<std::shared_ptr<ov_type::Type>> &H_order, const Eigen::MatrixXd &H,
const Eigen::VectorXd &res, const Eigen::MatrixXd &R);
/**
* @brief This will set the initial covaraince of the specified state elements.
* Will also ensure that proper cross-covariances are inserted.
* @param state Pointer to state
* @param covariance The covariance of the system state
* @param order Order of the covariance matrix
*/
static void set_initial_covariance(std::shared_ptr<State> state, const Eigen::MatrixXd &covariance,
const std::vector<std::shared_ptr<ov_type::Type>> &order);
/**
* @brief For a given set of variables, this will this will calculate a smaller covariance.
*
* That only includes the ones specified with all crossterms.
* Thus the size of the return will be the summed dimension of all the passed variables.
* Normal use for this is a chi-squared check before update (where you don't need the full covariance).
*
* @param state Pointer to state
* @param small_variables Vector of variables whose marginal covariance is desired
* @return marginal covariance of the passed variables
*/
static Eigen::MatrixXd get_marginal_covariance(std::shared_ptr<State> state,
const std::vector<std::shared_ptr<ov_type::Type>> &small_variables);
/**
* @brief This gets the full covariance matrix.
*
* Should only be used during simulation as operations on this covariance will be slow.
* This will return a copy, so this cannot be used to change the covariance by design.
* Please use the other interface functions in the StateHelper to progamatically change to covariance.
*
* @param state Pointer to state
* @return covariance of current state
*/
static Eigen::MatrixXd get_full_covariance(std::shared_ptr<State> state);
/**
* @brief Marginalizes a variable, properly modifying the ordering/covariances in the state
*
* This function can support any Type variable out of the box.
* Right now the marginalization of a sub-variable/type is not supported.
* For example if you wanted to just marginalize the orientation of a PoseJPL, that isn't supported.
* We will first remove the rows and columns corresponding to the type (i.e. do the marginalization).
* After we update all the type ids so that they take into account that the covariance has shrunk in parts of it.
*
* @param state Pointer to state
* @param marg Pointer to variable to marginalize
*/
static void marginalize(std::shared_ptr<State> state, std::shared_ptr<ov_type::Type> marg);
/**
* @brief Clones "variable to clone" and places it at end of covariance
* @param state Pointer to state
* @param variable_to_clone Pointer to variable that will be cloned
*/
static std::shared_ptr<ov_type::Type> clone(std::shared_ptr<State> state, std::shared_ptr<ov_type::Type> variable_to_clone);
/**
* @brief Initializes new variable into covariance.
*
* Uses Givens to separate into updating and initializing systems (therefore system must be fed as isotropic).
* If you are not isotropic first whiten your system (TODO: we should add a helper function to do this).
* If your H_L Jacobian is already directly invertable, the just call the initialize_invertible() instead of this function.
* Please refer to @ref update-delay page for detailed derivation.
*
* @param state Pointer to state
* @param new_variable Pointer to variable to be initialized
* @param H_order Vector of pointers in order they are contained in the condensed state Jacobian
* @param H_R Jacobian of initializing measurements wrt variables in H_order
* @param H_L Jacobian of initializing measurements wrt new variable
* @param R Covariance of initializing measurements (isotropic)
* @param res Residual of initializing measurements
* @param chi_2_mult Value we should multiply the chi2 threshold by (larger means it will be accepted more measurements)
*/
static bool initialize(std::shared_ptr<State> state, std::shared_ptr<ov_type::Type> new_variable,
const std::vector<std::shared_ptr<ov_type::Type>> &H_order, Eigen::MatrixXd &H_R, Eigen::MatrixXd &H_L,
Eigen::MatrixXd &R, Eigen::VectorXd &res, double chi_2_mult);
/**
* @brief Initializes new variable into covariance (H_L must be invertible)
*
* Please refer to @ref update-delay page for detailed derivation.
* This is just the update assuming that H_L is invertable (and thus square) and isotropic noise.
*
* @param state Pointer to state
* @param new_variable Pointer to variable to be initialized
* @param H_order Vector of pointers in order they are contained in the condensed state Jacobian
* @param H_R Jacobian of initializing measurements wrt variables in H_order
* @param H_L Jacobian of initializing measurements wrt new variable (needs to be invertible)
* @param R Covariance of initializing measurements
* @param res Residual of initializing measurements
*/
static void initialize_invertible(std::shared_ptr<State> state, std::shared_ptr<ov_type::Type> new_variable,
const std::vector<std::shared_ptr<ov_type::Type>> &H_order, const Eigen::MatrixXd &H_R,
const Eigen::MatrixXd &H_L, const Eigen::MatrixXd &R, const Eigen::VectorXd &res);
/**
* @brief Augment the state with a stochastic copy of the current IMU pose
*
* After propagation, normally we augment the state with an new clone that is at the new update timestep.
* This augmentation clones the IMU pose and adds it to our state's clone map.
* If we are doing time offset calibration we also make our cloning a function of the time offset.
* Time offset logic is based on Mingyang Li and Anastasios I. Mourikis paper:
* http://journals.sagepub.com/doi/pdf/10.1177/0278364913515286 We can write the current clone at the true imu base clock time as the
* follow: \f{align*}{
* {}^{I_{t+t_d}}_G\bar{q} &= \begin{bmatrix}\frac{1}{2} {}^{I_{t+\hat{t}_d}}\boldsymbol\omega \tilde{t}_d \\
* 1\end{bmatrix}\otimes{}^{I_{t+\hat{t}_d}}_G\bar{q} \\
* {}^G\mathbf{p}_{I_{t+t_d}} &= {}^G\mathbf{p}_{I_{t+\hat{t}_d}} + {}^G\mathbf{v}_{I_{t+\hat{t}_d}}\tilde{t}_d
* \f}
* where we say that we have propagated our state up to the current estimated true imaging time for the current image,
* \f${}^{I_{t+\hat{t}_d}}\boldsymbol\omega\f$ is the angular velocity at the end of propagation with biases removed.
* This is off by some smaller error, so to get to the true imaging time in the imu base clock, we can append some small timeoffset error.
* Thus the Jacobian in respect to our time offset during our cloning procedure is the following:
* \f{align*}{
* \frac{\partial {}^{I_{t+t_d}}_G\tilde{\boldsymbol\theta}}{\partial \tilde{t}_d} &= {}^{I_{t+\hat{t}_d}}\boldsymbol\omega \\
* \frac{\partial {}^G\tilde{\mathbf{p}}_{I_{t+t_d}}}{\partial \tilde{t}_d} &= {}^G\mathbf{v}_{I_{t+\hat{t}_d}}
* \f}
*
* @param state Pointer to state
* @param last_w The estimated angular velocity at cloning time (used to estimate imu-cam time offset)
*/
static void augment_clone(std::shared_ptr<State> state, Eigen::Matrix<double, 3, 1> last_w);
/**
* @brief Remove the oldest clone, if we have more then the max clone count!!
*
* This will marginalize the clone from our covariance, and remove it from our state.
* This is mainly a helper function that we can call after each update.
* It will marginalize the clone specified by State::margtimestep() which should return a clone timestamp.
*
* @param state Pointer to state
*/
static void marginalize_old_clone(std::shared_ptr<State> state) {
if ((int)state->_clones_IMU.size() > state->_options.max_clone_size) {
double marginal_time = state->margtimestep();
assert(marginal_time != INFINITY);
StateHelper::marginalize(state, state->_clones_IMU.at(marginal_time));
// Note that the marginalizer should have already deleted the clone
// Thus we just need to remove the pointer to it from our state
state->_clones_IMU.erase(marginal_time);
}
}
/**
* @brief Marginalize bad SLAM features
* @param state Pointer to state
*/
static void marginalize_slam(std::shared_ptr<State> state) {
// Remove SLAM features that have their marginalization flag set
// We also check that we do not remove any aruoctag landmarks
auto it0 = state->_features_SLAM.begin();
while (it0 != state->_features_SLAM.end()) {
if ((*it0).second->should_marg && (int)(*it0).first > 4 * state->_options.max_aruco_features) {
StateHelper::marginalize(state, (*it0).second);
it0 = state->_features_SLAM.erase(it0);
} else {
it0++;
}
}
}
private:
/**
* All function in this class should be static.
* Thus an instance of this class cannot be created.
*/
StateHelper() {}
};
} // namespace ov_msckf
#endif // OV_MSCKF_STATE_HELPER_H

View File

@@ -0,0 +1,128 @@
/*
* OpenVINS: An Open Platform for Visual-Inertial Research
* Copyright (C) 2018-2022 Patrick Geneva
* Copyright (C) 2018-2022 Guoquan Huang
* Copyright (C) 2018-2022 OpenVINS Contributors
* Copyright (C) 2018-2019 Kevin Eckenhoff
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
#ifndef OV_MSCKF_STATE_OPTIONS_H
#define OV_MSCKF_STATE_OPTIONS_H
#include "types/LandmarkRepresentation.h"
#include "utils/opencv_yaml_parse.h"
#include "utils/print.h"
#include <climits>
namespace ov_msckf {
/**
* @brief Struct which stores all our filter options
*/
struct StateOptions {
/// Bool to determine whether or not to do first estimate Jacobians
bool do_fej = true;
/// Bool to determine whether or not use imu message averaging
bool imu_avg = false;
/// Bool to determine if we should use Rk4 imu integration
bool use_rk4_integration = true;
/// Bool to determine whether or not to calibrate imu-to-camera pose
bool do_calib_camera_pose = false;
/// Bool to determine whether or not to calibrate camera intrinsics
bool do_calib_camera_intrinsics = false;
/// Bool to determine whether or not to calibrate camera to IMU time offset
bool do_calib_camera_timeoffset = false;
/// Max clone size of sliding window
int max_clone_size = 11;
/// Max number of estimated SLAM features
int max_slam_features = 25;
/// Max number of SLAM features we allow to be included in a single EKF update.
int max_slam_in_update = 1000;
/// Max number of MSCKF features we will use at a given image timestep.
int max_msckf_in_update = 1000;
/// Max number of estimated ARUCO features
int max_aruco_features = 1024;
/// Number of distinct cameras that we will observe features in
int num_cameras = 1;
/// What representation our features are in (msckf features)
ov_type::LandmarkRepresentation::Representation feat_rep_msckf = ov_type::LandmarkRepresentation::Representation::GLOBAL_3D;
/// What representation our features are in (slam features)
ov_type::LandmarkRepresentation::Representation feat_rep_slam = ov_type::LandmarkRepresentation::Representation::GLOBAL_3D;
/// What representation our features are in (aruco tag features)
ov_type::LandmarkRepresentation::Representation feat_rep_aruco = ov_type::LandmarkRepresentation::Representation::GLOBAL_3D;
/// Nice print function of what parameters we have loaded
void print(const std::shared_ptr<ov_core::YamlParser> &parser = nullptr) {
if (parser != nullptr) {
parser->parse_config("use_fej", do_fej);
parser->parse_config("use_imuavg", imu_avg);
parser->parse_config("use_rk4int", use_rk4_integration);
parser->parse_config("calib_cam_extrinsics", do_calib_camera_pose);
parser->parse_config("calib_cam_intrinsics", do_calib_camera_intrinsics);
parser->parse_config("calib_cam_timeoffset", do_calib_camera_timeoffset);
parser->parse_config("max_clones", max_clone_size);
parser->parse_config("max_slam", max_slam_features);
parser->parse_config("max_slam_in_update", max_slam_in_update);
parser->parse_config("max_msckf_in_update", max_msckf_in_update);
parser->parse_config("num_aruco", max_aruco_features);
parser->parse_config("max_cameras", num_cameras);
std::string rep1 = ov_type::LandmarkRepresentation::as_string(feat_rep_msckf);
parser->parse_config("feat_rep_msckf", rep1);
feat_rep_msckf = ov_type::LandmarkRepresentation::from_string(rep1);
std::string rep2 = ov_type::LandmarkRepresentation::as_string(feat_rep_slam);
parser->parse_config("feat_rep_slam", rep2);
feat_rep_slam = ov_type::LandmarkRepresentation::from_string(rep2);
std::string rep3 = ov_type::LandmarkRepresentation::as_string(feat_rep_aruco);
parser->parse_config("feat_rep_aruco", rep3);
feat_rep_aruco = ov_type::LandmarkRepresentation::from_string(rep3);
}
PRINT_DEBUG(" - use_fej: %d\n", do_fej);
PRINT_DEBUG(" - use_imuavg: %d\n", imu_avg);
PRINT_DEBUG(" - use_rk4int: %d\n", use_rk4_integration);
PRINT_DEBUG(" - calib_cam_extrinsics: %d\n", do_calib_camera_pose);
PRINT_DEBUG(" - calib_cam_intrinsics: %d\n", do_calib_camera_intrinsics);
PRINT_DEBUG(" - calib_cam_timeoffset: %d\n", do_calib_camera_timeoffset);
PRINT_DEBUG(" - max_clones: %d\n", max_clone_size);
PRINT_DEBUG(" - max_slam: %d\n", max_slam_features);
PRINT_DEBUG(" - max_slam_in_update: %d\n", max_slam_in_update);
PRINT_DEBUG(" - max_msckf_in_update: %d\n", max_msckf_in_update);
PRINT_DEBUG(" - max_aruco: %d\n", max_aruco_features);
PRINT_DEBUG(" - max_cameras: %d\n", num_cameras);
PRINT_DEBUG(" - feat_rep_msckf: %s\n", ov_type::LandmarkRepresentation::as_string(feat_rep_msckf).c_str());
PRINT_DEBUG(" - feat_rep_slam: %s\n", ov_type::LandmarkRepresentation::as_string(feat_rep_slam).c_str());
PRINT_DEBUG(" - feat_rep_aruco: %s\n", ov_type::LandmarkRepresentation::as_string(feat_rep_aruco).c_str());
}
};
} // namespace ov_msckf
#endif // OV_MSCKF_STATE_OPTIONS_H

View File

@@ -0,0 +1,102 @@
/*
* OpenVINS: An Open Platform for Visual-Inertial Research
* Copyright (C) 2018-2022 Patrick Geneva
* Copyright (C) 2018-2022 Guoquan Huang
* Copyright (C) 2018-2022 OpenVINS Contributors
* Copyright (C) 2018-2019 Kevin Eckenhoff
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
#include <cmath>
#include <csignal>
#include <deque>
#include <iomanip>
#include <sstream>
#include <unistd.h>
#include <vector>
#include <boost/date_time/posix_time/posix_time.hpp>
#include <boost/filesystem.hpp>
#if ROS_AVAILABLE == 1
#include <ros/ros.h>
#endif
#include "core/VioManagerOptions.h"
#include "sim/Simulator.h"
using namespace ov_msckf;
// Define the function to be called when ctrl-c (SIGINT) is sent to process
void signal_callback_handler(int signum) { std::exit(signum); }
// Main function
int main(int argc, char **argv) {
// Ensure we have a path, if the user passes it then we should use it
std::string config_path = "unset_path_to_config.yaml";
if (argc > 1) {
config_path = argv[1];
}
#if ROS_AVAILABLE == 1
// Launch our ros node
ros::init(argc, argv, "test_sim_meas");
auto nh = std::make_shared<ros::NodeHandle>("~");
nh->param<std::string>("config_path", config_path, config_path);
#endif
// Load the config
auto parser = std::make_shared<ov_core::YamlParser>(config_path);
#if ROS_AVAILABLE == 1
parser->set_node_handler(nh);
#endif
// Verbosity
std::string verbosity = "INFO";
parser->parse_config("verbosity", verbosity);
ov_core::Printer::setPrintLevel(verbosity);
// Create the simulator
VioManagerOptions params;
params.print_and_load(parser);
params.print_and_load_simulation(parser);
Simulator sim(params);
// Continue to simulate until we have processed all the measurements
signal(SIGINT, signal_callback_handler);
while (sim.ok()) {
// IMU: get the next simulated IMU measurement if we have it
double time_imu;
Eigen::Vector3d wm, am;
bool hasimu = sim.get_next_imu(time_imu, wm, am);
if (hasimu) {
PRINT_DEBUG("new imu measurement = %0.15g | w = %0.3g | a = %0.3g\n", time_imu, wm.norm(), am.norm());
}
// CAM: get the next simulated camera uv measurements if we have them
double time_cam;
std::vector<int> camids;
std::vector<std::vector<std::pair<size_t, Eigen::VectorXf>>> feats;
bool hascam = sim.get_next_cam(time_cam, camids, feats);
if (hascam) {
PRINT_DEBUG("new cam measurement = %0.15g | %u cameras | uvs(0) = %u \n", time_cam, camids.size(), feats.at(0).size());
}
}
// Done!
return EXIT_SUCCESS;
};

View File

@@ -0,0 +1,165 @@
/*
* OpenVINS: An Open Platform for Visual-Inertial Research
* Copyright (C) 2018-2022 Patrick Geneva
* Copyright (C) 2018-2022 Guoquan Huang
* Copyright (C) 2018-2022 OpenVINS Contributors
* Copyright (C) 2018-2019 Kevin Eckenhoff
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
#include <cmath>
#include <csignal>
#include <deque>
#include <sstream>
#include <unistd.h>
#include <vector>
#include <boost/date_time/posix_time/posix_time.hpp>
#include <boost/filesystem.hpp>
#if ROS_AVAILABLE == 1
#include <ros/ros.h>
#endif
#include "core/VioManagerOptions.h"
#include "sim/Simulator.h"
#include "utils/print.h"
using namespace ov_msckf;
// Define the function to be called when ctrl-c (SIGINT) is sent to process
void signal_callback_handler(int signum) { std::exit(signum); }
// Main function
int main(int argc, char **argv) {
// Register failure handler
signal(SIGINT, signal_callback_handler);
// Ensure we have a path, if the user passes it then we should use it
std::string config_path = "unset_path_to_config.yaml";
if (argc > 1) {
config_path = argv[1];
}
#if ROS_AVAILABLE == 1
// Launch our ros node
ros::init(argc, argv, "test_sim_repeat");
auto nh = std::make_shared<ros::NodeHandle>("~");
nh->param<std::string>("config_path", config_path, config_path);
#endif
// Load the config
auto parser = std::make_shared<ov_core::YamlParser>(config_path);
#if ROS_AVAILABLE == 1
parser->set_node_handler(nh);
#endif
// Verbosity
std::string verbosity = "INFO";
parser->parse_config("verbosity", verbosity);
ov_core::Printer::setPrintLevel(verbosity);
//===================================================
//===================================================
// Create the simulator
VioManagerOptions params1;
params1.print_and_load(parser);
params1.print_and_load_simulation(parser);
Simulator sim1(params1);
// Vector of stored measurements
std::vector<double> vec_imutime;
std::vector<double> vec_camtime;
std::vector<Eigen::Vector3d> vec_am;
std::vector<Eigen::Vector3d> vec_wm;
std::vector<std::vector<std::vector<std::pair<size_t, Eigen::VectorXf>>>> vec_feats;
// Continue to simulate until we have processed all the measurements
while (sim1.ok()) {
// IMU: get the next simulated IMU measurement if we have it
double time_imu;
Eigen::Vector3d wm, am;
bool hasimu = sim1.get_next_imu(time_imu, wm, am);
if (hasimu) {
vec_imutime.push_back(time_imu);
vec_am.push_back(am);
vec_wm.push_back(wm);
}
// CAM: get the next simulated camera uv measurements if we have them
double time_cam;
std::vector<int> camids;
std::vector<std::vector<std::pair<size_t, Eigen::VectorXf>>> feats;
bool hascam = sim1.get_next_cam(time_cam, camids, feats);
if (hascam) {
vec_camtime.push_back(time_cam);
vec_feats.push_back(feats);
}
}
//===================================================
//===================================================
// Create the simulator
VioManagerOptions params2;
params2.print_and_load(parser);
params2.print_and_load_simulation(parser);
Simulator sim2(params2);
size_t ct_imu = 0;
size_t ct_cam = 0;
// Continue to simulate until we have processed all the measurements
while (sim2.ok()) {
// IMU: get the next simulated IMU measurement if we have it
double time_imu;
Eigen::Vector3d wm, am;
bool hasimu = sim2.get_next_imu(time_imu, wm, am);
if (hasimu) {
assert(time_imu == vec_imutime.at(ct_imu));
assert(wm(0) == vec_wm.at(ct_imu)(0));
assert(wm(1) == vec_wm.at(ct_imu)(1));
assert(wm(2) == vec_wm.at(ct_imu)(2));
assert(am(0) == vec_am.at(ct_imu)(0));
assert(am(1) == vec_am.at(ct_imu)(1));
assert(am(2) == vec_am.at(ct_imu)(2));
ct_imu++;
}
// CAM: get the next simulated camera uv measurements if we have them
double time_cam;
std::vector<int> camids;
std::vector<std::vector<std::pair<size_t, Eigen::VectorXf>>> feats;
bool hascam = sim2.get_next_cam(time_cam, camids, feats);
if (hascam) {
assert(time_cam == vec_camtime.at(ct_cam));
for (size_t camid = 0; camid < feats.size(); camid++) {
for (size_t i = 0; i < feats.at(camid).size(); i++) {
assert(feats.at(camid).at(i).first == vec_feats.at(ct_cam).at(camid).at(i).first);
assert(feats.at(camid).at(i).second(0) == vec_feats.at(ct_cam).at(camid).at(i).second(0));
assert(feats.at(camid).at(i).second(1) == vec_feats.at(ct_cam).at(camid).at(i).second(1));
}
}
ct_cam++;
}
}
// Done!
PRINT_INFO("success! they all are the same!\n");
return EXIT_SUCCESS;
}

View File

@@ -0,0 +1,481 @@
/*
* OpenVINS: An Open Platform for Visual-Inertial Research
* Copyright (C) 2018-2022 Patrick Geneva
* Copyright (C) 2018-2022 Guoquan Huang
* Copyright (C) 2018-2022 OpenVINS Contributors
* Copyright (C) 2018-2019 Kevin Eckenhoff
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
#include "UpdaterHelper.h"
using namespace ov_core;
using namespace ov_type;
using namespace ov_msckf;
void UpdaterHelper::get_feature_jacobian_representation(std::shared_ptr<State> state, UpdaterHelperFeature &feature, Eigen::MatrixXd &H_f,
std::vector<Eigen::MatrixXd> &H_x, std::vector<std::shared_ptr<Type>> &x_order) {
// Global XYZ representation
if (feature.feat_representation == LandmarkRepresentation::Representation::GLOBAL_3D) {
H_f.resize(3, 3);
H_f.setIdentity();
return;
}
// Global inverse depth representation
if (feature.feat_representation == LandmarkRepresentation::Representation::GLOBAL_FULL_INVERSE_DEPTH) {
// Get the feature linearization point
Eigen::Matrix<double, 3, 1> p_FinG = (state->_options.do_fej) ? feature.p_FinG_fej : feature.p_FinG;
// Get inverse depth representation (should match what is in Landmark.cpp)
double g_rho = 1 / p_FinG.norm();
double g_phi = std::acos(g_rho * p_FinG(2));
// double g_theta = std::asin(g_rho*p_FinG(1)/std::sin(g_phi));
double g_theta = std::atan2(p_FinG(1), p_FinG(0));
Eigen::Matrix<double, 3, 1> p_invFinG;
p_invFinG(0) = g_theta;
p_invFinG(1) = g_phi;
p_invFinG(2) = g_rho;
// Get inverse depth bearings
double sin_th = std::sin(p_invFinG(0, 0));
double cos_th = std::cos(p_invFinG(0, 0));
double sin_phi = std::sin(p_invFinG(1, 0));
double cos_phi = std::cos(p_invFinG(1, 0));
double rho = p_invFinG(2, 0);
// Construct the Jacobian
H_f.resize(3, 3);
H_f << -(1.0 / rho) * sin_th * sin_phi, (1.0 / rho) * cos_th * cos_phi, -(1.0 / (rho * rho)) * cos_th * sin_phi,
(1.0 / rho) * cos_th * sin_phi, (1.0 / rho) * sin_th * cos_phi, -(1.0 / (rho * rho)) * sin_th * sin_phi, 0.0,
-(1.0 / rho) * sin_phi, -(1.0 / (rho * rho)) * cos_phi;
return;
}
//======================================================================
//======================================================================
//======================================================================
// Assert that we have an anchor pose for this feature
assert(feature.anchor_cam_id != -1);
// Anchor pose orientation and position, and camera calibration for our anchor camera
Eigen::Matrix3d R_ItoC = state->_calib_IMUtoCAM.at(feature.anchor_cam_id)->Rot();
Eigen::Vector3d p_IinC = state->_calib_IMUtoCAM.at(feature.anchor_cam_id)->pos();
Eigen::Matrix3d R_GtoI = state->_clones_IMU.at(feature.anchor_clone_timestamp)->Rot();
Eigen::Vector3d p_IinG = state->_clones_IMU.at(feature.anchor_clone_timestamp)->pos();
Eigen::Vector3d p_FinA = feature.p_FinA;
// If I am doing FEJ, I should FEJ the anchor states (should we fej calibration???)
// Also get the FEJ position of the feature if we are
if (state->_options.do_fej) {
// "Best" feature in the global frame
Eigen::Vector3d p_FinG_best = R_GtoI.transpose() * R_ItoC.transpose() * (feature.p_FinA - p_IinC) + p_IinG;
// Transform the best into our anchor frame using FEJ
R_GtoI = state->_clones_IMU.at(feature.anchor_clone_timestamp)->Rot_fej();
p_IinG = state->_clones_IMU.at(feature.anchor_clone_timestamp)->pos_fej();
p_FinA = (R_GtoI.transpose() * R_ItoC.transpose()).transpose() * (p_FinG_best - p_IinG) + p_IinC;
}
Eigen::Matrix3d R_CtoG = R_GtoI.transpose() * R_ItoC.transpose();
// Jacobian for our anchor pose
Eigen::Matrix<double, 3, 6> H_anc;
H_anc.block(0, 0, 3, 3).noalias() = -R_GtoI.transpose() * skew_x(R_ItoC.transpose() * (p_FinA - p_IinC));
H_anc.block(0, 3, 3, 3).setIdentity();
// Add anchor Jacobians to our return vector
x_order.push_back(state->_clones_IMU.at(feature.anchor_clone_timestamp));
H_x.push_back(H_anc);
// Get calibration Jacobians (for anchor clone)
if (state->_options.do_calib_camera_pose) {
Eigen::Matrix<double, 3, 6> H_calib;
H_calib.block(0, 0, 3, 3).noalias() = -R_CtoG * skew_x(p_FinA - p_IinC);
H_calib.block(0, 3, 3, 3) = -R_CtoG;
x_order.push_back(state->_calib_IMUtoCAM.at(feature.anchor_cam_id));
H_x.push_back(H_calib);
}
// If we are doing anchored XYZ feature
if (feature.feat_representation == LandmarkRepresentation::Representation::ANCHORED_3D) {
H_f = R_CtoG;
return;
}
// If we are doing full inverse depth
if (feature.feat_representation == LandmarkRepresentation::Representation::ANCHORED_FULL_INVERSE_DEPTH) {
// Get inverse depth representation (should match what is in Landmark.cpp)
double a_rho = 1 / p_FinA.norm();
double a_phi = std::acos(a_rho * p_FinA(2));
double a_theta = std::atan2(p_FinA(1), p_FinA(0));
Eigen::Matrix<double, 3, 1> p_invFinA;
p_invFinA(0) = a_theta;
p_invFinA(1) = a_phi;
p_invFinA(2) = a_rho;
// Using anchored inverse depth
double sin_th = std::sin(p_invFinA(0, 0));
double cos_th = std::cos(p_invFinA(0, 0));
double sin_phi = std::sin(p_invFinA(1, 0));
double cos_phi = std::cos(p_invFinA(1, 0));
double rho = p_invFinA(2, 0);
// assert(p_invFinA(2,0)>=0.0);
// Jacobian of anchored 3D position wrt inverse depth parameters
Eigen::Matrix<double, 3, 3> d_pfinA_dpinv;
d_pfinA_dpinv << -(1.0 / rho) * sin_th * sin_phi, (1.0 / rho) * cos_th * cos_phi, -(1.0 / (rho * rho)) * cos_th * sin_phi,
(1.0 / rho) * cos_th * sin_phi, (1.0 / rho) * sin_th * cos_phi, -(1.0 / (rho * rho)) * sin_th * sin_phi, 0.0,
-(1.0 / rho) * sin_phi, -(1.0 / (rho * rho)) * cos_phi;
H_f = R_CtoG * d_pfinA_dpinv;
return;
}
// If we are doing the MSCKF version of inverse depth
if (feature.feat_representation == LandmarkRepresentation::Representation::ANCHORED_MSCKF_INVERSE_DEPTH) {
// Get inverse depth representation (should match what is in Landmark.cpp)
Eigen::Matrix<double, 3, 1> p_invFinA_MSCKF;
p_invFinA_MSCKF(0) = p_FinA(0) / p_FinA(2);
p_invFinA_MSCKF(1) = p_FinA(1) / p_FinA(2);
p_invFinA_MSCKF(2) = 1 / p_FinA(2);
// Using the MSCKF version of inverse depth
double alpha = p_invFinA_MSCKF(0, 0);
double beta = p_invFinA_MSCKF(1, 0);
double rho = p_invFinA_MSCKF(2, 0);
// Jacobian of anchored 3D position wrt inverse depth parameters
Eigen::Matrix<double, 3, 3> d_pfinA_dpinv;
d_pfinA_dpinv << (1.0 / rho), 0.0, -(1.0 / (rho * rho)) * alpha, 0.0, (1.0 / rho), -(1.0 / (rho * rho)) * beta, 0.0, 0.0,
-(1.0 / (rho * rho));
H_f = R_CtoG * d_pfinA_dpinv;
return;
}
/// CASE: Estimate single depth of the feature using the initial bearing
if (feature.feat_representation == LandmarkRepresentation::Representation::ANCHORED_INVERSE_DEPTH_SINGLE) {
// Get inverse depth representation (should match what is in Landmark.cpp)
double rho = 1.0 / p_FinA(2);
Eigen::Vector3d bearing = rho * p_FinA;
// Jacobian of anchored 3D position wrt inverse depth parameters
Eigen::Vector3d d_pfinA_drho;
d_pfinA_drho << -(1.0 / (rho * rho)) * bearing;
H_f = R_CtoG * d_pfinA_drho;
return;
}
// Failure, invalid representation that is not programmed
assert(false);
}
void UpdaterHelper::get_feature_jacobian_full(std::shared_ptr<State> state, UpdaterHelperFeature &feature, Eigen::MatrixXd &H_f,
Eigen::MatrixXd &H_x, Eigen::VectorXd &res, std::vector<std::shared_ptr<Type>> &x_order) {
// Total number of measurements for this feature
int total_meas = 0;
for (auto const &pair : feature.timestamps) {
total_meas += (int)pair.second.size();
}
// Compute the size of the states involved with this feature
int total_hx = 0;
std::unordered_map<std::shared_ptr<Type>, size_t> map_hx;
for (auto const &pair : feature.timestamps) {
// Our extrinsics and intrinsics
std::shared_ptr<PoseJPL> calibration = state->_calib_IMUtoCAM.at(pair.first);
std::shared_ptr<Vec> distortion = state->_cam_intrinsics.at(pair.first);
// If doing calibration extrinsics
if (state->_options.do_calib_camera_pose) {
map_hx.insert({calibration, total_hx});
x_order.push_back(calibration);
total_hx += calibration->size();
}
// If doing calibration intrinsics
if (state->_options.do_calib_camera_intrinsics) {
map_hx.insert({distortion, total_hx});
x_order.push_back(distortion);
total_hx += distortion->size();
}
// Loop through all measurements for this specific camera
for (size_t m = 0; m < feature.timestamps[pair.first].size(); m++) {
// Add this clone if it is not added already
std::shared_ptr<PoseJPL> clone_Ci = state->_clones_IMU.at(feature.timestamps[pair.first].at(m));
if (map_hx.find(clone_Ci) == map_hx.end()) {
map_hx.insert({clone_Ci, total_hx});
x_order.push_back(clone_Ci);
total_hx += clone_Ci->size();
}
}
}
// If we are using an anchored representation, make sure that the anchor is also added
if (LandmarkRepresentation::is_relative_representation(feature.feat_representation)) {
// Assert we have a clone
assert(feature.anchor_cam_id != -1);
// Add this anchor if it is not added already
std::shared_ptr<PoseJPL> clone_Ai = state->_clones_IMU.at(feature.anchor_clone_timestamp);
if (map_hx.find(clone_Ai) == map_hx.end()) {
map_hx.insert({clone_Ai, total_hx});
x_order.push_back(clone_Ai);
total_hx += clone_Ai->size();
}
// Also add its calibration if we are doing calibration
if (state->_options.do_calib_camera_pose) {
// Add this anchor if it is not added already
std::shared_ptr<PoseJPL> clone_calib = state->_calib_IMUtoCAM.at(feature.anchor_cam_id);
if (map_hx.find(clone_calib) == map_hx.end()) {
map_hx.insert({clone_calib, total_hx});
x_order.push_back(clone_calib);
total_hx += clone_calib->size();
}
}
}
//=========================================================================
//=========================================================================
// Calculate the position of this feature in the global frame
// If anchored, then we need to calculate the position of the feature in the global
Eigen::Vector3d p_FinG = feature.p_FinG;
if (LandmarkRepresentation::is_relative_representation(feature.feat_representation)) {
// Assert that we have an anchor pose for this feature
assert(feature.anchor_cam_id != -1);
// Get calibration for our anchor camera
Eigen::Matrix3d R_ItoC = state->_calib_IMUtoCAM.at(feature.anchor_cam_id)->Rot();
Eigen::Vector3d p_IinC = state->_calib_IMUtoCAM.at(feature.anchor_cam_id)->pos();
// Anchor pose orientation and position
Eigen::Matrix3d R_GtoI = state->_clones_IMU.at(feature.anchor_clone_timestamp)->Rot();
Eigen::Vector3d p_IinG = state->_clones_IMU.at(feature.anchor_clone_timestamp)->pos();
// Feature in the global frame
p_FinG = R_GtoI.transpose() * R_ItoC.transpose() * (feature.p_FinA - p_IinC) + p_IinG;
}
// Calculate the position of this feature in the global frame FEJ
// If anchored, then we can use the "best" p_FinG since the value of p_FinA does not matter
Eigen::Vector3d p_FinG_fej = feature.p_FinG_fej;
if (LandmarkRepresentation::is_relative_representation(feature.feat_representation)) {
p_FinG_fej = p_FinG;
}
//=========================================================================
//=========================================================================
// Allocate our residual and Jacobians
int c = 0;
int jacobsize = (feature.feat_representation != LandmarkRepresentation::Representation::ANCHORED_INVERSE_DEPTH_SINGLE) ? 3 : 1;
res = Eigen::VectorXd::Zero(2 * total_meas);
H_f = Eigen::MatrixXd::Zero(2 * total_meas, jacobsize);
H_x = Eigen::MatrixXd::Zero(2 * total_meas, total_hx);
// Derivative of p_FinG in respect to feature representation.
// This only needs to be computed once and thus we pull it out of the loop
Eigen::MatrixXd dpfg_dlambda;
std::vector<Eigen::MatrixXd> dpfg_dx;
std::vector<std::shared_ptr<Type>> dpfg_dx_order;
UpdaterHelper::get_feature_jacobian_representation(state, feature, dpfg_dlambda, dpfg_dx, dpfg_dx_order);
// Assert that all the ones in our order are already in our local jacobian mapping
for (auto &type : dpfg_dx_order) {
assert(map_hx.find(type) != map_hx.end());
}
// Loop through each camera for this feature
for (auto const &pair : feature.timestamps) {
// Our calibration between the IMU and CAMi frames
std::shared_ptr<Vec> distortion = state->_cam_intrinsics.at(pair.first);
std::shared_ptr<PoseJPL> calibration = state->_calib_IMUtoCAM.at(pair.first);
Eigen::Matrix3d R_ItoC = calibration->Rot();
Eigen::Vector3d p_IinC = calibration->pos();
// Loop through all measurements for this specific camera
for (size_t m = 0; m < feature.timestamps[pair.first].size(); m++) {
//=========================================================================
//=========================================================================
// Get current IMU clone state
std::shared_ptr<PoseJPL> clone_Ii = state->_clones_IMU.at(feature.timestamps[pair.first].at(m));
Eigen::Matrix3d R_GtoIi = clone_Ii->Rot();
Eigen::Vector3d p_IiinG = clone_Ii->pos();
// Get current feature in the IMU
Eigen::Vector3d p_FinIi = R_GtoIi * (p_FinG - p_IiinG);
// Project the current feature into the current frame of reference
Eigen::Vector3d p_FinCi = R_ItoC * p_FinIi + p_IinC;
Eigen::Vector2d uv_norm;
uv_norm << p_FinCi(0) / p_FinCi(2), p_FinCi(1) / p_FinCi(2);
// Distort the normalized coordinates (radtan or fisheye)
Eigen::Vector2d uv_dist;
uv_dist = state->_cam_intrinsics_cameras.at(pair.first)->distort_d(uv_norm);
// Our residual
Eigen::Vector2d uv_m;
uv_m << (double)feature.uvs[pair.first].at(m)(0), (double)feature.uvs[pair.first].at(m)(1);
res.block(2 * c, 0, 2, 1) = uv_m - uv_dist;
//=========================================================================
//=========================================================================
// If we are doing first estimate Jacobians, then overwrite with the first estimates
if (state->_options.do_fej) {
R_GtoIi = clone_Ii->Rot_fej();
p_IiinG = clone_Ii->pos_fej();
// R_ItoC = calibration->Rot_fej();
// p_IinC = calibration->pos_fej();
p_FinIi = R_GtoIi * (p_FinG_fej - p_IiinG);
p_FinCi = R_ItoC * p_FinIi + p_IinC;
// uv_norm << p_FinCi(0)/p_FinCi(2),p_FinCi(1)/p_FinCi(2);
// cam_d = state->get_intrinsics_CAM(pair.first)->fej();
}
// Compute Jacobians in respect to normalized image coordinates and possibly the camera intrinsics
Eigen::MatrixXd dz_dzn, dz_dzeta;
state->_cam_intrinsics_cameras.at(pair.first)->compute_distort_jacobian(uv_norm, dz_dzn, dz_dzeta);
// Normalized coordinates in respect to projection function
Eigen::MatrixXd dzn_dpfc = Eigen::MatrixXd::Zero(2, 3);
dzn_dpfc << 1 / p_FinCi(2), 0, -p_FinCi(0) / (p_FinCi(2) * p_FinCi(2)), 0, 1 / p_FinCi(2), -p_FinCi(1) / (p_FinCi(2) * p_FinCi(2));
// Derivative of p_FinCi in respect to p_FinIi
Eigen::MatrixXd dpfc_dpfg = R_ItoC * R_GtoIi;
// Derivative of p_FinCi in respect to camera clone state
Eigen::MatrixXd dpfc_dclone = Eigen::MatrixXd::Zero(3, 6);
dpfc_dclone.block(0, 0, 3, 3).noalias() = R_ItoC * skew_x(p_FinIi);
dpfc_dclone.block(0, 3, 3, 3) = -dpfc_dpfg;
//=========================================================================
//=========================================================================
// Precompute some matrices
Eigen::MatrixXd dz_dpfc = dz_dzn * dzn_dpfc;
Eigen::MatrixXd dz_dpfg = dz_dpfc * dpfc_dpfg;
// CHAINRULE: get the total feature Jacobian
H_f.block(2 * c, 0, 2, H_f.cols()).noalias() = dz_dpfg * dpfg_dlambda;
// CHAINRULE: get state clone Jacobian
H_x.block(2 * c, map_hx[clone_Ii], 2, clone_Ii->size()).noalias() = dz_dpfc * dpfc_dclone;
// CHAINRULE: loop through all extra states and add their
// NOTE: we add the Jacobian here as we might be in the anchoring pose for this measurement
for (size_t i = 0; i < dpfg_dx_order.size(); i++) {
H_x.block(2 * c, map_hx[dpfg_dx_order.at(i)], 2, dpfg_dx_order.at(i)->size()).noalias() += dz_dpfg * dpfg_dx.at(i);
}
//=========================================================================
//=========================================================================
// Derivative of p_FinCi in respect to camera calibration (R_ItoC, p_IinC)
if (state->_options.do_calib_camera_pose) {
// Calculate the Jacobian
Eigen::MatrixXd dpfc_dcalib = Eigen::MatrixXd::Zero(3, 6);
dpfc_dcalib.block(0, 0, 3, 3) = skew_x(p_FinCi - p_IinC);
dpfc_dcalib.block(0, 3, 3, 3) = Eigen::Matrix<double, 3, 3>::Identity();
// Chainrule it and add it to the big jacobian
H_x.block(2 * c, map_hx[calibration], 2, calibration->size()).noalias() += dz_dpfc * dpfc_dcalib;
}
// Derivative of measurement in respect to distortion parameters
if (state->_options.do_calib_camera_intrinsics) {
H_x.block(2 * c, map_hx[distortion], 2, distortion->size()) = dz_dzeta;
}
// Move the Jacobian and residual index forward
c++;
}
}
}
void UpdaterHelper::nullspace_project_inplace(Eigen::MatrixXd &H_f, Eigen::MatrixXd &H_x, Eigen::VectorXd &res) {
// Apply the left nullspace of H_f to all variables
// Based on "Matrix Computations 4th Edition by Golub and Van Loan"
// See page 252, Algorithm 5.2.4 for how these two loops work
// They use "matlab" index notation, thus we need to subtract 1 from all index
Eigen::JacobiRotation<double> tempHo_GR;
for (int n = 0; n < H_f.cols(); ++n) {
for (int m = (int)H_f.rows() - 1; m > n; m--) {
// Givens matrix G
tempHo_GR.makeGivens(H_f(m - 1, n), H_f(m, n));
// Multiply G to the corresponding lines (m-1,m) in each matrix
// Note: we only apply G to the nonzero cols [n:Ho.cols()-n-1], while
// it is equivalent to applying G to the entire cols [0:Ho.cols()-1].
(H_f.block(m - 1, n, 2, H_f.cols() - n)).applyOnTheLeft(0, 1, tempHo_GR.adjoint());
(H_x.block(m - 1, 0, 2, H_x.cols())).applyOnTheLeft(0, 1, tempHo_GR.adjoint());
(res.block(m - 1, 0, 2, 1)).applyOnTheLeft(0, 1, tempHo_GR.adjoint());
}
}
// The H_f jacobian max rank is 3 if it is a 3d position, thus size of the left nullspace is Hf.rows()-3
// NOTE: need to eigen3 eval here since this experiences aliasing!
// H_f = H_f.block(H_f.cols(),0,H_f.rows()-H_f.cols(),H_f.cols()).eval();
H_x = H_x.block(H_f.cols(), 0, H_x.rows() - H_f.cols(), H_x.cols()).eval();
res = res.block(H_f.cols(), 0, res.rows() - H_f.cols(), res.cols()).eval();
// Sanity check
assert(H_x.rows() == res.rows());
}
void UpdaterHelper::measurement_compress_inplace(Eigen::MatrixXd &H_x, Eigen::VectorXd &res) {
// Return if H_x is a fat matrix (there is no need to compress in this case)
if (H_x.rows() <= H_x.cols())
return;
// Do measurement compression through givens rotations
// Based on "Matrix Computations 4th Edition by Golub and Van Loan"
// See page 252, Algorithm 5.2.4 for how these two loops work
// They use "matlab" index notation, thus we need to subtract 1 from all index
Eigen::JacobiRotation<double> tempHo_GR;
for (int n = 0; n < H_x.cols(); n++) {
for (int m = (int)H_x.rows() - 1; m > n; m--) {
// Givens matrix G
tempHo_GR.makeGivens(H_x(m - 1, n), H_x(m, n));
// Multiply G to the corresponding lines (m-1,m) in each matrix
// Note: we only apply G to the nonzero cols [n:Ho.cols()-n-1], while
// it is equivalent to applying G to the entire cols [0:Ho.cols()-1].
(H_x.block(m - 1, n, 2, H_x.cols() - n)).applyOnTheLeft(0, 1, tempHo_GR.adjoint());
(res.block(m - 1, 0, 2, 1)).applyOnTheLeft(0, 1, tempHo_GR.adjoint());
}
}
// If H is a fat matrix, then use the rows
// Else it should be same size as our state
int r = std::min(H_x.rows(), H_x.cols());
// Construct the smaller jacobian and residual after measurement compression
assert(r <= H_x.rows());
H_x.conservativeResize(r, H_x.cols());
res.conservativeResize(r, res.cols());
}

View File

@@ -0,0 +1,139 @@
/*
* OpenVINS: An Open Platform for Visual-Inertial Research
* Copyright (C) 2018-2022 Patrick Geneva
* Copyright (C) 2018-2022 Guoquan Huang
* Copyright (C) 2018-2022 OpenVINS Contributors
* Copyright (C) 2018-2019 Kevin Eckenhoff
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
#ifndef OV_MSCKF_UPDATER_HELPER_H
#define OV_MSCKF_UPDATER_HELPER_H
#include <Eigen/Eigen>
#include "feat/Feature.h"
#include "state/State.h"
#include "state/StateOptions.h"
#include "types/LandmarkRepresentation.h"
#include "utils/colors.h"
#include "utils/quat_ops.h"
namespace ov_msckf {
/**
* @brief Class that has helper functions for our updaters.
*
* Can compute the Jacobian for a single feature representation.
* This will create the Jacobian based on what representation our state is in.
* If we are using the anchor representation then we also have additional Jacobians in respect to the anchor state.
* Also has functions such as nullspace projection and full jacobian construction.
* For derivations look at @ref update-feat page which has detailed equations.
*
*/
class UpdaterHelper {
public:
/**
* @brief Feature object that our UpdaterHelper leverages, has all measurements and means
*/
struct UpdaterHelperFeature {
/// Unique ID of this feature
size_t featid;
/// UV coordinates that this feature has been seen from (mapped by camera ID)
std::unordered_map<size_t, std::vector<Eigen::VectorXf>> uvs;
// UV normalized coordinates that this feature has been seen from (mapped by camera ID)
std::unordered_map<size_t, std::vector<Eigen::VectorXf>> uvs_norm;
/// Timestamps of each UV measurement (mapped by camera ID)
std::unordered_map<size_t, std::vector<double>> timestamps;
/// What representation our feature is in
ov_type::LandmarkRepresentation::Representation feat_representation;
/// What camera ID our pose is anchored in!! By default the first measurement is the anchor.
int anchor_cam_id = -1;
/// Timestamp of anchor clone
double anchor_clone_timestamp = -1;
/// Triangulated position of this feature, in the anchor frame
Eigen::Vector3d p_FinA;
/// Triangulated position of this feature, in the anchor frame first estimate
Eigen::Vector3d p_FinA_fej;
/// Triangulated position of this feature, in the global frame
Eigen::Vector3d p_FinG;
/// Triangulated position of this feature, in the global frame first estimate
Eigen::Vector3d p_FinG_fej;
};
/**
* @brief This gets the feature and state Jacobian in respect to the feature representation
*
* @param[in] state State of the filter system
* @param[in] feature Feature we want to get Jacobians of (must have feature means)
* @param[out] H_f Jacobians in respect to the feature error state (will be either 3x3 or 3x1 for single depth)
* @param[out] H_x Extra Jacobians in respect to the state (for example anchored pose)
* @param[out] x_order Extra variables our extra Jacobian has (for example anchored pose)
*/
static void get_feature_jacobian_representation(std::shared_ptr<State> state, UpdaterHelperFeature &feature, Eigen::MatrixXd &H_f,
std::vector<Eigen::MatrixXd> &H_x, std::vector<std::shared_ptr<ov_type::Type>> &x_order);
/**
* @brief Will construct the "stacked" Jacobians for a single feature from all its measurements
*
* @param[in] state State of the filter system
* @param[in] feature Feature we want to get Jacobians of (must have feature means)
* @param[out] H_f Jacobians in respect to the feature error state
* @param[out] H_x Extra Jacobians in respect to the state (for example anchored pose)
* @param[out] res Measurement residual for this feature
* @param[out] x_order Extra variables our extra Jacobian has (for example anchored pose)
*/
static void get_feature_jacobian_full(std::shared_ptr<State> state, UpdaterHelperFeature &feature, Eigen::MatrixXd &H_f,
Eigen::MatrixXd &H_x, Eigen::VectorXd &res, std::vector<std::shared_ptr<ov_type::Type>> &x_order);
/**
* @brief This will project the left nullspace of H_f onto the linear system.
*
* Please see the @ref update-null for details on how this works.
* This is the MSCKF nullspace projection which removes the dependency on the feature state.
* Note that this is done **in place** so all matrices will be different after a function call.
*
* @param H_f Jacobian with nullspace we want to project onto the system [res = Hx*(x-xhat)+Hf(f-fhat)+n]
* @param H_x State jacobian
* @param res Measurement residual
*/
static void nullspace_project_inplace(Eigen::MatrixXd &H_f, Eigen::MatrixXd &H_x, Eigen::VectorXd &res);
/**
* @brief This will perform measurement compression
*
* Please see the @ref update-compress for details on how this works.
* Note that this is done **in place** so all matrices will be different after a function call.
*
* @param H_x State jacobian
* @param res Measurement residual
*/
static void measurement_compress_inplace(Eigen::MatrixXd &H_x, Eigen::VectorXd &res);
};
} // namespace ov_msckf
#endif // OV_MSCKF_UPDATER_HELPER_H

View File

@@ -0,0 +1,265 @@
/*
* OpenVINS: An Open Platform for Visual-Inertial Research
* Copyright (C) 2018-2022 Patrick Geneva
* Copyright (C) 2018-2022 Guoquan Huang
* Copyright (C) 2018-2022 OpenVINS Contributors
* Copyright (C) 2018-2019 Kevin Eckenhoff
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
#include "UpdaterMSCKF.h"
using namespace ov_core;
using namespace ov_type;
using namespace ov_msckf;
void UpdaterMSCKF::update(std::shared_ptr<State> state, std::vector<std::shared_ptr<Feature>> &feature_vec) {
// Return if no features
if (feature_vec.empty())
return;
// Start timing
boost::posix_time::ptime rT0, rT1, rT2, rT3, rT4, rT5;
rT0 = boost::posix_time::microsec_clock::local_time();
// 0. Get all timestamps our clones are at (and thus valid measurement times)
std::vector<double> clonetimes;
for (const auto &clone_imu : state->_clones_IMU) {
clonetimes.emplace_back(clone_imu.first);
}
// 1. Clean all feature measurements and make sure they all have valid clone times
auto it0 = feature_vec.begin();
while (it0 != feature_vec.end()) {
// Clean the feature
(*it0)->clean_old_measurements(clonetimes);
// Count how many measurements
int ct_meas = 0;
for (const auto &pair : (*it0)->timestamps) {
ct_meas += (*it0)->timestamps[pair.first].size();
}
// Remove if we don't have enough
if (ct_meas < 2) {
(*it0)->to_delete = true;
it0 = feature_vec.erase(it0);
} else {
it0++;
}
}
rT1 = boost::posix_time::microsec_clock::local_time();
// 2. Create vector of cloned *CAMERA* poses at each of our clone timesteps
std::unordered_map<size_t, std::unordered_map<double, FeatureInitializer::ClonePose>> clones_cam;
for (const auto &clone_calib : state->_calib_IMUtoCAM) {
// For this camera, create the vector of camera poses
std::unordered_map<double, FeatureInitializer::ClonePose> clones_cami;
for (const auto &clone_imu : state->_clones_IMU) {
// Get current camera pose
Eigen::Matrix<double, 3, 3> R_GtoCi = clone_calib.second->Rot() * clone_imu.second->Rot();
Eigen::Matrix<double, 3, 1> p_CioinG = clone_imu.second->pos() - R_GtoCi.transpose() * clone_calib.second->pos();
// Append to our map
clones_cami.insert({clone_imu.first, FeatureInitializer::ClonePose(R_GtoCi, p_CioinG)});
}
// Append to our map
clones_cam.insert({clone_calib.first, clones_cami});
}
// 3. Try to triangulate all MSCKF or new SLAM features that have measurements
auto it1 = feature_vec.begin();
while (it1 != feature_vec.end()) {
// Triangulate the feature and remove if it fails
bool success_tri = true;
if (initializer_feat->config().triangulate_1d) {
success_tri = initializer_feat->single_triangulation_1d(it1->get(), clones_cam);
} else {
success_tri = initializer_feat->single_triangulation(it1->get(), clones_cam);
}
// Gauss-newton refine the feature
bool success_refine = true;
if (initializer_feat->config().refine_features) {
success_refine = initializer_feat->single_gaussnewton(it1->get(), clones_cam);
}
// Remove the feature if not a success
if (!success_tri || !success_refine) {
(*it1)->to_delete = true;
it1 = feature_vec.erase(it1);
continue;
}
it1++;
}
rT2 = boost::posix_time::microsec_clock::local_time();
// Calculate the max possible measurement size
size_t max_meas_size = 0;
for (size_t i = 0; i < feature_vec.size(); i++) {
for (const auto &pair : feature_vec.at(i)->timestamps) {
max_meas_size += 2 * feature_vec.at(i)->timestamps[pair.first].size();
}
}
// Calculate max possible state size (i.e. the size of our covariance)
// NOTE: that when we have the single inverse depth representations, those are only 1dof in size
size_t max_hx_size = state->max_covariance_size();
for (auto &landmark : state->_features_SLAM) {
max_hx_size -= landmark.second->size();
}
// Large Jacobian and residual of *all* features for this update
Eigen::VectorXd res_big = Eigen::VectorXd::Zero(max_meas_size);
Eigen::MatrixXd Hx_big = Eigen::MatrixXd::Zero(max_meas_size, max_hx_size);
std::unordered_map<std::shared_ptr<Type>, size_t> Hx_mapping;
std::vector<std::shared_ptr<Type>> Hx_order_big;
size_t ct_jacob = 0;
size_t ct_meas = 0;
// 4. Compute linear system for each feature, nullspace project, and reject
auto it2 = feature_vec.begin();
while (it2 != feature_vec.end()) {
// Convert our feature into our current format
UpdaterHelper::UpdaterHelperFeature feat;
feat.featid = (*it2)->featid;
feat.uvs = (*it2)->uvs;
feat.uvs_norm = (*it2)->uvs_norm;
feat.timestamps = (*it2)->timestamps;
// If we are using single inverse depth, then it is equivalent to using the msckf inverse depth
feat.feat_representation = state->_options.feat_rep_msckf;
if (state->_options.feat_rep_msckf == LandmarkRepresentation::Representation::ANCHORED_INVERSE_DEPTH_SINGLE) {
feat.feat_representation = LandmarkRepresentation::Representation::ANCHORED_MSCKF_INVERSE_DEPTH;
}
// Save the position and its fej value
if (LandmarkRepresentation::is_relative_representation(feat.feat_representation)) {
feat.anchor_cam_id = (*it2)->anchor_cam_id;
feat.anchor_clone_timestamp = (*it2)->anchor_clone_timestamp;
feat.p_FinA = (*it2)->p_FinA;
feat.p_FinA_fej = (*it2)->p_FinA;
} else {
feat.p_FinG = (*it2)->p_FinG;
feat.p_FinG_fej = (*it2)->p_FinG;
}
// Our return values (feature jacobian, state jacobian, residual, and order of state jacobian)
Eigen::MatrixXd H_f;
Eigen::MatrixXd H_x;
Eigen::VectorXd res;
std::vector<std::shared_ptr<Type>> Hx_order;
// Get the Jacobian for this feature
UpdaterHelper::get_feature_jacobian_full(state, feat, H_f, H_x, res, Hx_order);
// Nullspace project
UpdaterHelper::nullspace_project_inplace(H_f, H_x, res);
/// Chi2 distance check
Eigen::MatrixXd P_marg = StateHelper::get_marginal_covariance(state, Hx_order);
Eigen::MatrixXd S = H_x * P_marg * H_x.transpose();
S.diagonal() += _options.sigma_pix_sq * Eigen::VectorXd::Ones(S.rows());
double chi2 = res.dot(S.llt().solve(res));
// Get our threshold (we precompute up to 500 but handle the case that it is more)
double chi2_check;
if (res.rows() < 500) {
chi2_check = chi_squared_table[res.rows()];
} else {
boost::math::chi_squared chi_squared_dist(res.rows());
chi2_check = boost::math::quantile(chi_squared_dist, 0.95);
PRINT_WARNING(YELLOW "chi2_check over the residual limit - %d\n" RESET, (int)res.rows());
}
// Check if we should delete or not
if (chi2 > _options.chi2_multipler * chi2_check) {
(*it2)->to_delete = true;
it2 = feature_vec.erase(it2);
// PRINT_DEBUG("featid = %d\n", feat.featid);
// PRINT_DEBUG("chi2 = %f > %f\n", chi2, _options.chi2_multipler*chi2_check);
// std::stringstream ss;
// ss << "res = " << std::endl << res.transpose() << std::endl;
// PRINT_DEBUG(ss.str().c_str());
continue;
}
// We are good!!! Append to our large H vector
size_t ct_hx = 0;
for (const auto &var : Hx_order) {
// Ensure that this variable is in our Jacobian
if (Hx_mapping.find(var) == Hx_mapping.end()) {
Hx_mapping.insert({var, ct_jacob});
Hx_order_big.push_back(var);
ct_jacob += var->size();
}
// Append to our large Jacobian
Hx_big.block(ct_meas, Hx_mapping[var], H_x.rows(), var->size()) = H_x.block(0, ct_hx, H_x.rows(), var->size());
ct_hx += var->size();
}
// Append our residual and move forward
res_big.block(ct_meas, 0, res.rows(), 1) = res;
ct_meas += res.rows();
it2++;
}
rT3 = boost::posix_time::microsec_clock::local_time();
// We have appended all features to our Hx_big, res_big
// Delete it so we do not reuse information
for (size_t f = 0; f < feature_vec.size(); f++) {
feature_vec[f]->to_delete = true;
}
// Return if we don't have anything and resize our matrices
if (ct_meas < 1) {
return;
}
assert(ct_meas <= max_meas_size);
assert(ct_jacob <= max_hx_size);
res_big.conservativeResize(ct_meas, 1);
Hx_big.conservativeResize(ct_meas, ct_jacob);
// 5. Perform measurement compression
UpdaterHelper::measurement_compress_inplace(Hx_big, res_big);
if (Hx_big.rows() < 1) {
return;
}
rT4 = boost::posix_time::microsec_clock::local_time();
// Our noise is isotropic, so make it here after our compression
Eigen::MatrixXd R_big = _options.sigma_pix_sq * Eigen::MatrixXd::Identity(res_big.rows(), res_big.rows());
// 6. With all good features update the state
StateHelper::EKFUpdate(state, Hx_order_big, Hx_big, res_big, R_big);
rT5 = boost::posix_time::microsec_clock::local_time();
// Debug print timing information
PRINT_DEBUG("[MSCKF-UP]: %.4f seconds to clean\n", (rT1 - rT0).total_microseconds() * 1e-6);
PRINT_DEBUG("[MSCKF-UP]: %.4f seconds to triangulate\n", (rT2 - rT1).total_microseconds() * 1e-6);
PRINT_DEBUG("[MSCKF-UP]: %.4f seconds create system (%d features)\n", (rT3 - rT2).total_microseconds() * 1e-6, (int)feature_vec.size());
PRINT_DEBUG("[MSCKF-UP]: %.4f seconds compress system\n", (rT4 - rT3).total_microseconds() * 1e-6);
PRINT_DEBUG("[MSCKF-UP]: %.4f seconds update state (%d size)\n", (rT5 - rT4).total_microseconds() * 1e-6, (int)res_big.rows());
PRINT_DEBUG("[MSCKF-UP]: %.4f seconds total\n", (rT5 - rT1).total_microseconds() * 1e-6);
}

View File

@@ -0,0 +1,103 @@
/*
* OpenVINS: An Open Platform for Visual-Inertial Research
* Copyright (C) 2018-2022 Patrick Geneva
* Copyright (C) 2018-2022 Guoquan Huang
* Copyright (C) 2018-2022 OpenVINS Contributors
* Copyright (C) 2018-2019 Kevin Eckenhoff
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
#ifndef OV_MSCKF_UPDATER_MSCKF_H
#define OV_MSCKF_UPDATER_MSCKF_H
#include <Eigen/Eigen>
#include <iostream>
#include <sstream>
#include "feat/Feature.h"
#include "feat/FeatureInitializer.h"
#include "feat/FeatureInitializerOptions.h"
#include "state/State.h"
#include "state/StateHelper.h"
#include "types/LandmarkRepresentation.h"
#include "utils/colors.h"
#include "utils/print.h"
#include "utils/quat_ops.h"
#include "UpdaterHelper.h"
#include "UpdaterOptions.h"
#include <boost/date_time/posix_time/posix_time.hpp>
#include <boost/math/distributions/chi_squared.hpp>
namespace ov_msckf {
/**
* @brief Will compute the system for our sparse features and update the filter.
*
* This class is responsible for computing the entire linear system for all features that are going to be used in an update.
* This follows the original MSCKF, where we first triangulate features, we then nullspace project the feature Jacobian.
* After this we compress all the measurements to have an efficient update and update the state.
*/
class UpdaterMSCKF {
public:
/**
* @brief Default constructor for our MSCKF updater
*
* Our updater has a feature initializer which we use to initialize features as needed.
* Also the options allow for one to tune the different parameters for update.
*
* @param options Updater options (include measurement noise value)
* @param feat_init_options Feature initializer options
*/
UpdaterMSCKF(UpdaterOptions &options, ov_core::FeatureInitializerOptions &feat_init_options) : _options(options) {
// Save our raw pixel noise squared
_options.sigma_pix_sq = std::pow(_options.sigma_pix, 2);
// Save our feature initializer
initializer_feat = std::unique_ptr<ov_core::FeatureInitializer>(new ov_core::FeatureInitializer(feat_init_options));
// Initialize the chi squared test table with confidence level 0.95
// https://github.com/KumarRobotics/msckf_vio/blob/050c50defa5a7fd9a04c1eed5687b405f02919b5/src/msckf_vio.cpp#L215-L221
for (int i = 1; i < 500; i++) {
boost::math::chi_squared chi_squared_dist(i);
chi_squared_table[i] = boost::math::quantile(chi_squared_dist, 0.95);
}
}
/**
* @brief Given tracked features, this will try to use them to update the state.
*
* @param state State of the filter
* @param feature_vec Features that can be used for update
*/
void update(std::shared_ptr<State> state, std::vector<std::shared_ptr<ov_core::Feature>> &feature_vec);
protected:
/// Options used during update
UpdaterOptions _options;
/// Feature initializer class object
std::unique_ptr<ov_core::FeatureInitializer> initializer_feat;
/// Chi squared 95th percentile table (lookup would be size of residual)
std::map<int, double> chi_squared_table;
};
} // namespace ov_msckf
#endif // OV_MSCKF_UPDATER_MSCKF_H

View File

@@ -0,0 +1,52 @@
/*
* OpenVINS: An Open Platform for Visual-Inertial Research
* Copyright (C) 2018-2022 Patrick Geneva
* Copyright (C) 2018-2022 Guoquan Huang
* Copyright (C) 2018-2022 OpenVINS Contributors
* Copyright (C) 2018-2019 Kevin Eckenhoff
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
#ifndef OV_MSCKF_UPDATER_OPTIONS_H
#define OV_MSCKF_UPDATER_OPTIONS_H
#include "utils/print.h"
namespace ov_msckf {
/**
* @brief Struct which stores general updater options
*/
struct UpdaterOptions {
/// What chi-squared multipler we should apply
double chi2_multipler = 5;
/// Noise sigma for our raw pixel measurements
double sigma_pix = 1;
/// Covariance for our raw pixel measurements
double sigma_pix_sq = 1;
/// Nice print function of what parameters we have loaded
void print() {
PRINT_DEBUG(" - chi2_multipler: %.1f\n", chi2_multipler);
PRINT_DEBUG(" - sigma_pix: %.2f\n", sigma_pix);
}
};
} // namespace ov_msckf
#endif // OV_MSCKF_UPDATER_OPTIONS_H

View File

@@ -0,0 +1,611 @@
/*
* OpenVINS: An Open Platform for Visual-Inertial Research
* Copyright (C) 2018-2022 Patrick Geneva
* Copyright (C) 2018-2022 Guoquan Huang
* Copyright (C) 2018-2022 OpenVINS Contributors
* Copyright (C) 2018-2019 Kevin Eckenhoff
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
#include "UpdaterSLAM.h"
using namespace ov_core;
using namespace ov_type;
using namespace ov_msckf;
void UpdaterSLAM::delayed_init(std::shared_ptr<State> state, std::vector<std::shared_ptr<Feature>> &feature_vec) {
// Return if no features
if (feature_vec.empty())
return;
// Start timing
boost::posix_time::ptime rT0, rT1, rT2, rT3;
rT0 = boost::posix_time::microsec_clock::local_time();
// 0. Get all timestamps our clones are at (and thus valid measurement times)
std::vector<double> clonetimes;
for (const auto &clone_imu : state->_clones_IMU) {
clonetimes.emplace_back(clone_imu.first);
}
// 1. Clean all feature measurements and make sure they all have valid clone times
auto it0 = feature_vec.begin();
while (it0 != feature_vec.end()) {
// Clean the feature
(*it0)->clean_old_measurements(clonetimes);
// Count how many measurements
int ct_meas = 0;
for (const auto &pair : (*it0)->timestamps) {
ct_meas += (*it0)->timestamps[pair.first].size();
}
// Remove if we don't have enough
if (ct_meas < 2) {
(*it0)->to_delete = true;
it0 = feature_vec.erase(it0);
} else {
it0++;
}
}
rT1 = boost::posix_time::microsec_clock::local_time();
// 2. Create vector of cloned *CAMERA* poses at each of our clone timesteps
std::unordered_map<size_t, std::unordered_map<double, FeatureInitializer::ClonePose>> clones_cam;
for (const auto &clone_calib : state->_calib_IMUtoCAM) {
// For this camera, create the vector of camera poses
std::unordered_map<double, FeatureInitializer::ClonePose> clones_cami;
for (const auto &clone_imu : state->_clones_IMU) {
// Get current camera pose
Eigen::Matrix<double, 3, 3> R_GtoCi = clone_calib.second->Rot() * clone_imu.second->Rot();
Eigen::Matrix<double, 3, 1> p_CioinG = clone_imu.second->pos() - R_GtoCi.transpose() * clone_calib.second->pos();
// Append to our map
clones_cami.insert({clone_imu.first, FeatureInitializer::ClonePose(R_GtoCi, p_CioinG)});
}
// Append to our map
clones_cam.insert({clone_calib.first, clones_cami});
}
// 3. Try to triangulate all MSCKF or new SLAM features that have measurements
auto it1 = feature_vec.begin();
while (it1 != feature_vec.end()) {
// Triangulate the feature and remove if it fails
bool success_tri = true;
if (initializer_feat->config().triangulate_1d) {
success_tri = initializer_feat->single_triangulation_1d(it1->get(), clones_cam);
} else {
success_tri = initializer_feat->single_triangulation(it1->get(), clones_cam);
}
// Gauss-newton refine the feature
bool success_refine = true;
if (initializer_feat->config().refine_features) {
success_refine = initializer_feat->single_gaussnewton(it1->get(), clones_cam);
}
// Remove the feature if not a success
if (!success_tri || !success_refine) {
(*it1)->to_delete = true;
it1 = feature_vec.erase(it1);
continue;
}
it1++;
}
rT2 = boost::posix_time::microsec_clock::local_time();
// 4. Compute linear system for each feature, nullspace project, and reject
auto it2 = feature_vec.begin();
while (it2 != feature_vec.end()) {
// Convert our feature into our current format
UpdaterHelper::UpdaterHelperFeature feat;
feat.featid = (*it2)->featid;
feat.uvs = (*it2)->uvs;
feat.uvs_norm = (*it2)->uvs_norm;
feat.timestamps = (*it2)->timestamps;
// If we are using single inverse depth, then it is equivalent to using the msckf inverse depth
auto feat_rep =
((int)feat.featid < state->_options.max_aruco_features) ? state->_options.feat_rep_aruco : state->_options.feat_rep_slam;
feat.feat_representation = feat_rep;
if (feat_rep == LandmarkRepresentation::Representation::ANCHORED_INVERSE_DEPTH_SINGLE) {
feat.feat_representation = LandmarkRepresentation::Representation::ANCHORED_MSCKF_INVERSE_DEPTH;
}
// Save the position and its fej value
if (LandmarkRepresentation::is_relative_representation(feat.feat_representation)) {
feat.anchor_cam_id = (*it2)->anchor_cam_id;
feat.anchor_clone_timestamp = (*it2)->anchor_clone_timestamp;
feat.p_FinA = (*it2)->p_FinA;
feat.p_FinA_fej = (*it2)->p_FinA;
} else {
feat.p_FinG = (*it2)->p_FinG;
feat.p_FinG_fej = (*it2)->p_FinG;
}
// Our return values (feature jacobian, state jacobian, residual, and order of state jacobian)
Eigen::MatrixXd H_f;
Eigen::MatrixXd H_x;
Eigen::VectorXd res;
std::vector<std::shared_ptr<Type>> Hx_order;
// Get the Jacobian for this feature
UpdaterHelper::get_feature_jacobian_full(state, feat, H_f, H_x, res, Hx_order);
// If we are doing the single feature representation, then we need to remove the bearing portion
// To do so, we project the bearing portion onto the state and depth Jacobians and the residual.
// This allows us to directly initialize the feature as a depth-old feature
if (feat_rep == LandmarkRepresentation::Representation::ANCHORED_INVERSE_DEPTH_SINGLE) {
// Append the Jacobian in respect to the depth of the feature
Eigen::MatrixXd H_xf = H_x;
H_xf.conservativeResize(H_x.rows(), H_x.cols() + 1);
H_xf.block(0, H_x.cols(), H_x.rows(), 1) = H_f.block(0, H_f.cols() - 1, H_f.rows(), 1);
H_f.conservativeResize(H_f.rows(), H_f.cols() - 1);
// Nullspace project the bearing portion
// This takes into account that we have marginalized the bearing already
// Thus this is crucial to ensuring estimator consistency as we are not taking the bearing to be true
UpdaterHelper::nullspace_project_inplace(H_f, H_xf, res);
// Split out the state portion and feature portion
H_x = H_xf.block(0, 0, H_xf.rows(), H_xf.cols() - 1);
H_f = H_xf.block(0, H_xf.cols() - 1, H_xf.rows(), 1);
}
// Create feature pointer (we will always create it of size three since we initialize the single invese depth as a msckf anchored
// representation)
int landmark_size = (feat_rep == LandmarkRepresentation::Representation::ANCHORED_INVERSE_DEPTH_SINGLE) ? 1 : 3;
auto landmark = std::make_shared<Landmark>(landmark_size);
landmark->_featid = feat.featid;
landmark->_feat_representation = feat_rep;
landmark->_unique_camera_id = (*it2)->anchor_cam_id;
if (LandmarkRepresentation::is_relative_representation(feat.feat_representation)) {
landmark->_anchor_cam_id = feat.anchor_cam_id;
landmark->_anchor_clone_timestamp = feat.anchor_clone_timestamp;
landmark->set_from_xyz(feat.p_FinA, false);
landmark->set_from_xyz(feat.p_FinA_fej, true);
} else {
landmark->set_from_xyz(feat.p_FinG, false);
landmark->set_from_xyz(feat.p_FinG_fej, true);
}
// Measurement noise matrix
double sigma_pix_sq =
((int)feat.featid < state->_options.max_aruco_features) ? _options_aruco.sigma_pix_sq : _options_slam.sigma_pix_sq;
Eigen::MatrixXd R = sigma_pix_sq * Eigen::MatrixXd::Identity(res.rows(), res.rows());
// Try to initialize, delete new pointer if we failed
double chi2_multipler =
((int)feat.featid < state->_options.max_aruco_features) ? _options_aruco.chi2_multipler : _options_slam.chi2_multipler;
if (StateHelper::initialize(state, landmark, Hx_order, H_x, H_f, R, res, chi2_multipler)) {
state->_features_SLAM.insert({(*it2)->featid, landmark});
(*it2)->to_delete = true;
it2++;
} else {
(*it2)->to_delete = true;
it2 = feature_vec.erase(it2);
}
}
rT3 = boost::posix_time::microsec_clock::local_time();
// Debug print timing information
if (!feature_vec.empty()) {
PRINT_DEBUG("[SLAM-DELAY]: %.4f seconds to clean\n", (rT1 - rT0).total_microseconds() * 1e-6);
PRINT_DEBUG("[SLAM-DELAY]: %.4f seconds to triangulate\n", (rT2 - rT1).total_microseconds() * 1e-6);
PRINT_DEBUG("[SLAM-DELAY]: %.4f seconds initialize (%d features)\n", (rT3 - rT2).total_microseconds() * 1e-6, (int)feature_vec.size());
PRINT_DEBUG("[SLAM-DELAY]: %.4f seconds total\n", (rT3 - rT1).total_microseconds() * 1e-6);
}
}
void UpdaterSLAM::update(std::shared_ptr<State> state, std::vector<std::shared_ptr<Feature>> &feature_vec) {
// Return if no features
if (feature_vec.empty())
return;
// Start timing
boost::posix_time::ptime rT0, rT1, rT2, rT3;
rT0 = boost::posix_time::microsec_clock::local_time();
// 0. Get all timestamps our clones are at (and thus valid measurement times)
std::vector<double> clonetimes;
for (const auto &clone_imu : state->_clones_IMU) {
clonetimes.emplace_back(clone_imu.first);
}
// 1. Clean all feature measurements and make sure they all have valid clone times
auto it0 = feature_vec.begin();
while (it0 != feature_vec.end()) {
// Clean the feature
(*it0)->clean_old_measurements(clonetimes);
// Count how many measurements
int ct_meas = 0;
for (const auto &pair : (*it0)->timestamps) {
ct_meas += (*it0)->timestamps[pair.first].size();
}
// Get the landmark and its representation
// For single depth representation we need at least two measurement
// This is because we do nullspace projection
std::shared_ptr<Landmark> landmark = state->_features_SLAM.at((*it0)->featid);
int required_meas = (landmark->_feat_representation == LandmarkRepresentation::Representation::ANCHORED_INVERSE_DEPTH_SINGLE) ? 2 : 1;
// Remove if we don't have enough
if (ct_meas < 1) {
(*it0)->to_delete = true;
it0 = feature_vec.erase(it0);
} else if (ct_meas < required_meas) {
it0 = feature_vec.erase(it0);
} else {
it0++;
}
}
rT1 = boost::posix_time::microsec_clock::local_time();
// Calculate the max possible measurement size
size_t max_meas_size = 0;
for (size_t i = 0; i < feature_vec.size(); i++) {
for (const auto &pair : feature_vec.at(i)->timestamps) {
max_meas_size += 2 * feature_vec.at(i)->timestamps[pair.first].size();
}
}
// Calculate max possible state size (i.e. the size of our covariance)
size_t max_hx_size = state->max_covariance_size();
// Large Jacobian, residual, and measurement noise of *all* features for this update
Eigen::VectorXd res_big = Eigen::VectorXd::Zero(max_meas_size);
Eigen::MatrixXd Hx_big = Eigen::MatrixXd::Zero(max_meas_size, max_hx_size);
Eigen::MatrixXd R_big = Eigen::MatrixXd::Identity(max_meas_size, max_meas_size);
std::unordered_map<std::shared_ptr<Type>, size_t> Hx_mapping;
std::vector<std::shared_ptr<Type>> Hx_order_big;
size_t ct_jacob = 0;
size_t ct_meas = 0;
// 4. Compute linear system for each feature, nullspace project, and reject
auto it2 = feature_vec.begin();
while (it2 != feature_vec.end()) {
// Ensure we have the landmark and it is the same
assert(state->_features_SLAM.find((*it2)->featid) != state->_features_SLAM.end());
assert(state->_features_SLAM.at((*it2)->featid)->_featid == (*it2)->featid);
// Get our landmark from the state
std::shared_ptr<Landmark> landmark = state->_features_SLAM.at((*it2)->featid);
// Convert the state landmark into our current format
UpdaterHelper::UpdaterHelperFeature feat;
feat.featid = (*it2)->featid;
feat.uvs = (*it2)->uvs;
feat.uvs_norm = (*it2)->uvs_norm;
feat.timestamps = (*it2)->timestamps;
// If we are using single inverse depth, then it is equivalent to using the msckf inverse depth
feat.feat_representation = landmark->_feat_representation;
if (landmark->_feat_representation == LandmarkRepresentation::Representation::ANCHORED_INVERSE_DEPTH_SINGLE) {
feat.feat_representation = LandmarkRepresentation::Representation::ANCHORED_MSCKF_INVERSE_DEPTH;
}
// Save the position and its fej value
if (LandmarkRepresentation::is_relative_representation(feat.feat_representation)) {
feat.anchor_cam_id = landmark->_anchor_cam_id;
feat.anchor_clone_timestamp = landmark->_anchor_clone_timestamp;
feat.p_FinA = landmark->get_xyz(false);
feat.p_FinA_fej = landmark->get_xyz(true);
} else {
feat.p_FinG = landmark->get_xyz(false);
feat.p_FinG_fej = landmark->get_xyz(true);
}
// Our return values (feature jacobian, state jacobian, residual, and order of state jacobian)
Eigen::MatrixXd H_f;
Eigen::MatrixXd H_x;
Eigen::VectorXd res;
std::vector<std::shared_ptr<Type>> Hx_order;
// Get the Jacobian for this feature
UpdaterHelper::get_feature_jacobian_full(state, feat, H_f, H_x, res, Hx_order);
// Place Jacobians in one big Jacobian, since the landmark is already in our state vector
Eigen::MatrixXd H_xf = H_x;
if (landmark->_feat_representation == LandmarkRepresentation::Representation::ANCHORED_INVERSE_DEPTH_SINGLE) {
// Append the Jacobian in respect to the depth of the feature
H_xf.conservativeResize(H_x.rows(), H_x.cols() + 1);
H_xf.block(0, H_x.cols(), H_x.rows(), 1) = H_f.block(0, H_f.cols() - 1, H_f.rows(), 1);
H_f.conservativeResize(H_f.rows(), H_f.cols() - 1);
// Nullspace project the bearing portion
// This takes into account that we have marginalized the bearing already
// Thus this is crucial to ensuring estimator consistency as we are not taking the bearing to be true
UpdaterHelper::nullspace_project_inplace(H_f, H_xf, res);
} else {
// Else we have the full feature in our state, so just append it
H_xf.conservativeResize(H_x.rows(), H_x.cols() + H_f.cols());
H_xf.block(0, H_x.cols(), H_x.rows(), H_f.cols()) = H_f;
}
// Append to our Jacobian order vector
std::vector<std::shared_ptr<Type>> Hxf_order = Hx_order;
Hxf_order.push_back(landmark);
// Chi2 distance check
Eigen::MatrixXd P_marg = StateHelper::get_marginal_covariance(state, Hxf_order);
Eigen::MatrixXd S = H_xf * P_marg * H_xf.transpose();
double sigma_pix_sq =
((int)feat.featid < state->_options.max_aruco_features) ? _options_aruco.sigma_pix_sq : _options_slam.sigma_pix_sq;
S.diagonal() += sigma_pix_sq * Eigen::VectorXd::Ones(S.rows());
double chi2 = res.dot(S.llt().solve(res));
// Get our threshold (we precompute up to 500 but handle the case that it is more)
double chi2_check;
if (res.rows() < 500) {
chi2_check = chi_squared_table[res.rows()];
} else {
boost::math::chi_squared chi_squared_dist(res.rows());
chi2_check = boost::math::quantile(chi_squared_dist, 0.95);
PRINT_WARNING(YELLOW "chi2_check over the residual limit - %d\n" RESET, (int)res.rows());
}
// Check if we should delete or not
double chi2_multipler =
((int)feat.featid < state->_options.max_aruco_features) ? _options_aruco.chi2_multipler : _options_slam.chi2_multipler;
if (chi2 > chi2_multipler * chi2_check) {
if ((int)feat.featid < state->_options.max_aruco_features)
PRINT_WARNING(YELLOW "[SLAM-UP]: rejecting aruco tag %d for chi2 thresh (%.3f > %.3f)\n" RESET, (int)feat.featid, chi2,
chi2_multipler * chi2_check);
(*it2)->to_delete = true;
it2 = feature_vec.erase(it2);
continue;
}
// Debug print when we are going to update the aruco tags
if ((int)feat.featid < state->_options.max_aruco_features) {
PRINT_DEBUG("[SLAM-UP]: accepted aruco tag %d for chi2 thresh (%.3f < %.3f)\n", (int)feat.featid, chi2, chi2_multipler * chi2_check);
}
// We are good!!! Append to our large H vector
size_t ct_hx = 0;
for (const auto &var : Hxf_order) {
// Ensure that this variable is in our Jacobian
if (Hx_mapping.find(var) == Hx_mapping.end()) {
Hx_mapping.insert({var, ct_jacob});
Hx_order_big.push_back(var);
ct_jacob += var->size();
}
// Append to our large Jacobian
Hx_big.block(ct_meas, Hx_mapping[var], H_xf.rows(), var->size()) = H_xf.block(0, ct_hx, H_xf.rows(), var->size());
ct_hx += var->size();
}
// Our isotropic measurement noise
R_big.block(ct_meas, ct_meas, res.rows(), res.rows()) *= sigma_pix_sq;
// Append our residual and move forward
res_big.block(ct_meas, 0, res.rows(), 1) = res;
ct_meas += res.rows();
it2++;
}
rT2 = boost::posix_time::microsec_clock::local_time();
// We have appended all features to our Hx_big, res_big
// Delete it so we do not reuse information
for (size_t f = 0; f < feature_vec.size(); f++) {
feature_vec[f]->to_delete = true;
}
// Return if we don't have anything and resize our matrices
if (ct_meas < 1) {
return;
}
assert(ct_meas <= max_meas_size);
assert(ct_jacob <= max_hx_size);
res_big.conservativeResize(ct_meas, 1);
Hx_big.conservativeResize(ct_meas, ct_jacob);
R_big.conservativeResize(ct_meas, ct_meas);
// 5. With all good SLAM features update the state
StateHelper::EKFUpdate(state, Hx_order_big, Hx_big, res_big, R_big);
rT3 = boost::posix_time::microsec_clock::local_time();
// Debug print timing information
PRINT_DEBUG("[SLAM-UP]: %.4f seconds to clean\n", (rT1 - rT0).total_microseconds() * 1e-6);
PRINT_DEBUG("[SLAM-UP]: %.4f seconds creating linear system\n", (rT2 - rT1).total_microseconds() * 1e-6);
PRINT_DEBUG("[SLAM-UP]: %.4f seconds to update (%d feats of %d size)\n", (rT3 - rT2).total_microseconds() * 1e-6, (int)feature_vec.size(),
(int)Hx_big.rows());
PRINT_DEBUG("[SLAM-UP]: %.4f seconds total\n", (rT3 - rT1).total_microseconds() * 1e-6);
}
void UpdaterSLAM::change_anchors(std::shared_ptr<State> state) {
// Return if we do not have enough clones
if ((int)state->_clones_IMU.size() <= state->_options.max_clone_size) {
return;
}
// Get the marginalization timestep, and change the anchor for any feature seen from it
// NOTE: for now we have anchor the feature in the same camera as it is before
// NOTE: this also does not change the representation of the feature at all right now
double marg_timestep = state->margtimestep();
for (auto &f : state->_features_SLAM) {
// Skip any features that are in the global frame
if (f.second->_feat_representation == LandmarkRepresentation::Representation::GLOBAL_3D ||
f.second->_feat_representation == LandmarkRepresentation::Representation::GLOBAL_FULL_INVERSE_DEPTH)
continue;
// Else lets see if it is anchored in the clone that will be marginalized
assert(marg_timestep <= f.second->_anchor_clone_timestamp);
if (f.second->_anchor_clone_timestamp == marg_timestep) {
perform_anchor_change(state, f.second, state->_timestamp, f.second->_anchor_cam_id);
}
}
}
void UpdaterSLAM::perform_anchor_change(std::shared_ptr<State> state, std::shared_ptr<Landmark> landmark, double new_anchor_timestamp,
size_t new_cam_id) {
// Assert that this is an anchored representation
assert(LandmarkRepresentation::is_relative_representation(landmark->_feat_representation));
assert(landmark->_anchor_cam_id != -1);
// Create current feature representation
UpdaterHelper::UpdaterHelperFeature old_feat;
old_feat.featid = landmark->_featid;
old_feat.feat_representation = landmark->_feat_representation;
old_feat.anchor_cam_id = landmark->_anchor_cam_id;
old_feat.anchor_clone_timestamp = landmark->_anchor_clone_timestamp;
old_feat.p_FinA = landmark->get_xyz(false);
old_feat.p_FinA_fej = landmark->get_xyz(true);
// Get Jacobians of p_FinG wrt old representation
Eigen::MatrixXd H_f_old;
std::vector<Eigen::MatrixXd> H_x_old;
std::vector<std::shared_ptr<Type>> x_order_old;
UpdaterHelper::get_feature_jacobian_representation(state, old_feat, H_f_old, H_x_old, x_order_old);
// Create future feature representation
UpdaterHelper::UpdaterHelperFeature new_feat;
new_feat.featid = landmark->_featid;
new_feat.feat_representation = landmark->_feat_representation;
new_feat.anchor_cam_id = new_cam_id;
new_feat.anchor_clone_timestamp = new_anchor_timestamp;
//==========================================================================
//==========================================================================
// OLD: anchor camera position and orientation
Eigen::Matrix<double, 3, 3> R_GtoIOLD = state->_clones_IMU.at(old_feat.anchor_clone_timestamp)->Rot();
Eigen::Matrix<double, 3, 3> R_GtoOLD = state->_calib_IMUtoCAM.at(old_feat.anchor_cam_id)->Rot() * R_GtoIOLD;
Eigen::Matrix<double, 3, 1> p_OLDinG = state->_clones_IMU.at(old_feat.anchor_clone_timestamp)->pos() -
R_GtoOLD.transpose() * state->_calib_IMUtoCAM.at(old_feat.anchor_cam_id)->pos();
// NEW: anchor camera position and orientation
Eigen::Matrix<double, 3, 3> R_GtoINEW = state->_clones_IMU.at(new_feat.anchor_clone_timestamp)->Rot();
Eigen::Matrix<double, 3, 3> R_GtoNEW = state->_calib_IMUtoCAM.at(new_feat.anchor_cam_id)->Rot() * R_GtoINEW;
Eigen::Matrix<double, 3, 1> p_NEWinG = state->_clones_IMU.at(new_feat.anchor_clone_timestamp)->pos() -
R_GtoNEW.transpose() * state->_calib_IMUtoCAM.at(new_feat.anchor_cam_id)->pos();
// Calculate transform between the old anchor and new one
Eigen::Matrix<double, 3, 3> R_OLDtoNEW = R_GtoNEW * R_GtoOLD.transpose();
Eigen::Matrix<double, 3, 1> p_OLDinNEW = R_GtoNEW * (p_OLDinG - p_NEWinG);
new_feat.p_FinA = R_OLDtoNEW * landmark->get_xyz(false) + p_OLDinNEW;
//==========================================================================
//==========================================================================
// OLD: anchor camera position and orientation
Eigen::Matrix<double, 3, 3> R_GtoIOLD_fej = state->_clones_IMU.at(old_feat.anchor_clone_timestamp)->Rot_fej();
Eigen::Matrix<double, 3, 3> R_GtoOLD_fej = state->_calib_IMUtoCAM.at(old_feat.anchor_cam_id)->Rot() * R_GtoIOLD_fej;
Eigen::Matrix<double, 3, 1> p_OLDinG_fej = state->_clones_IMU.at(old_feat.anchor_clone_timestamp)->pos_fej() -
R_GtoOLD_fej.transpose() * state->_calib_IMUtoCAM.at(old_feat.anchor_cam_id)->pos();
// NEW: anchor camera position and orientation
Eigen::Matrix<double, 3, 3> R_GtoINEW_fej = state->_clones_IMU.at(new_feat.anchor_clone_timestamp)->Rot_fej();
Eigen::Matrix<double, 3, 3> R_GtoNEW_fej = state->_calib_IMUtoCAM.at(new_feat.anchor_cam_id)->Rot() * R_GtoINEW_fej;
Eigen::Matrix<double, 3, 1> p_NEWinG_fej = state->_clones_IMU.at(new_feat.anchor_clone_timestamp)->pos_fej() -
R_GtoNEW_fej.transpose() * state->_calib_IMUtoCAM.at(new_feat.anchor_cam_id)->pos();
// Calculate transform between the old anchor and new one
Eigen::Matrix<double, 3, 3> R_OLDtoNEW_fej = R_GtoNEW_fej * R_GtoOLD_fej.transpose();
Eigen::Matrix<double, 3, 1> p_OLDinNEW_fej = R_GtoNEW_fej * (p_OLDinG_fej - p_NEWinG_fej);
new_feat.p_FinA_fej = R_OLDtoNEW_fej * landmark->get_xyz(true) + p_OLDinNEW_fej;
// Get Jacobians of p_FinG wrt new representation
Eigen::MatrixXd H_f_new;
std::vector<Eigen::MatrixXd> H_x_new;
std::vector<std::shared_ptr<Type>> x_order_new;
UpdaterHelper::get_feature_jacobian_representation(state, new_feat, H_f_new, H_x_new, x_order_new);
//==========================================================================
//==========================================================================
// New phi order is just the landmark
std::vector<std::shared_ptr<Type>> phi_order_NEW;
phi_order_NEW.push_back(landmark);
// Loop through all our orders and append them
std::vector<std::shared_ptr<Type>> phi_order_OLD;
int current_it = 0;
std::map<std::shared_ptr<Type>, int> Phi_id_map;
for (const auto &var : x_order_old) {
if (Phi_id_map.find(var) == Phi_id_map.end()) {
Phi_id_map.insert({var, current_it});
phi_order_OLD.push_back(var);
current_it += var->size();
}
}
for (const auto &var : x_order_new) {
if (Phi_id_map.find(var) == Phi_id_map.end()) {
Phi_id_map.insert({var, current_it});
phi_order_OLD.push_back(var);
current_it += var->size();
}
}
Phi_id_map.insert({landmark, current_it});
phi_order_OLD.push_back(landmark);
current_it += landmark->size();
// Anchor change Jacobian
int phisize = (new_feat.feat_representation != LandmarkRepresentation::Representation::ANCHORED_INVERSE_DEPTH_SINGLE) ? 3 : 1;
Eigen::MatrixXd Phi = Eigen::MatrixXd::Zero(phisize, current_it);
Eigen::MatrixXd Q = Eigen::MatrixXd::Zero(phisize, phisize);
// Inverse of our new representation
// pf_new_error = Hfnew^{-1}*(Hfold*pf_olderror+Hxold*x_olderror-Hxnew*x_newerror)
Eigen::MatrixXd H_f_new_inv;
if (phisize == 1) {
H_f_new_inv = 1.0 / H_f_new.squaredNorm() * H_f_new.transpose();
} else {
H_f_new_inv = H_f_new.colPivHouseholderQr().solve(Eigen::Matrix<double, 3, 3>::Identity());
}
// Place Jacobians for old anchor
for (size_t i = 0; i < H_x_old.size(); i++) {
Phi.block(0, Phi_id_map.at(x_order_old[i]), phisize, x_order_old[i]->size()).noalias() += H_f_new_inv * H_x_old[i];
}
// Place Jacobians for old feat
Phi.block(0, Phi_id_map.at(landmark), phisize, phisize) = H_f_new_inv * H_f_old;
// Place Jacobians for new anchor
for (size_t i = 0; i < H_x_new.size(); i++) {
Phi.block(0, Phi_id_map.at(x_order_new[i]), phisize, x_order_new[i]->size()).noalias() -= H_f_new_inv * H_x_new[i];
}
// Perform covariance propagation
StateHelper::EKFPropagation(state, phi_order_NEW, phi_order_OLD, Phi, Q);
// Set state from new feature
landmark->_featid = new_feat.featid;
landmark->_feat_representation = new_feat.feat_representation;
landmark->_anchor_cam_id = new_feat.anchor_cam_id;
landmark->_anchor_clone_timestamp = new_feat.anchor_clone_timestamp;
landmark->set_from_xyz(new_feat.p_FinA, false);
landmark->set_from_xyz(new_feat.p_FinA_fej, true);
landmark->has_had_anchor_change = true;
}

View File

@@ -0,0 +1,134 @@
/*
* OpenVINS: An Open Platform for Visual-Inertial Research
* Copyright (C) 2018-2022 Patrick Geneva
* Copyright (C) 2018-2022 Guoquan Huang
* Copyright (C) 2018-2022 OpenVINS Contributors
* Copyright (C) 2018-2019 Kevin Eckenhoff
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
#ifndef OV_MSCKF_UPDATER_SLAM_H
#define OV_MSCKF_UPDATER_SLAM_H
#include <Eigen/Eigen>
#include <memory>
#include "feat/Feature.h"
#include "feat/FeatureInitializer.h"
#include "feat/FeatureInitializerOptions.h"
#include "state/State.h"
#include "state/StateHelper.h"
#include "types/Landmark.h"
#include "types/LandmarkRepresentation.h"
#include "utils/colors.h"
#include "utils/print.h"
#include "utils/quat_ops.h"
#include "UpdaterHelper.h"
#include "UpdaterOptions.h"
#include <boost/date_time/posix_time/posix_time.hpp>
#include <boost/math/distributions/chi_squared.hpp>
namespace ov_msckf {
/**
* @brief Will compute the system for our sparse SLAM features and update the filter.
*
* This class is responsible for performing delayed feature initialization, SLAM update, and
* SLAM anchor change for anchored feature representations.
*/
class UpdaterSLAM {
public:
/**
* @brief Default constructor for our SLAM updater
*
* Our updater has a feature initializer which we use to initialize features as needed.
* Also the options allow for one to tune the different parameters for update.
*
* @param options_slam Updater options (include measurement noise value) for SLAM features
* @param options_aruco Updater options (include measurement noise value) for ARUCO features
* @param feat_init_options Feature initializer options
*/
UpdaterSLAM(UpdaterOptions &options_slam, UpdaterOptions &options_aruco, ov_core::FeatureInitializerOptions &feat_init_options)
: _options_slam(options_slam), _options_aruco(options_aruco) {
// Save our raw pixel noise squared
_options_slam.sigma_pix_sq = std::pow(_options_slam.sigma_pix, 2);
_options_aruco.sigma_pix_sq = std::pow(_options_aruco.sigma_pix, 2);
// Save our feature initializer
initializer_feat = std::unique_ptr<ov_core::FeatureInitializer>(new ov_core::FeatureInitializer(feat_init_options));
// Initialize the chi squared test table with confidence level 0.95
// https://github.com/KumarRobotics/msckf_vio/blob/050c50defa5a7fd9a04c1eed5687b405f02919b5/src/msckf_vio.cpp#L215-L221
for (int i = 1; i < 500; i++) {
boost::math::chi_squared chi_squared_dist(i);
chi_squared_table[i] = boost::math::quantile(chi_squared_dist, 0.95);
}
}
/**
* @brief Given tracked SLAM features, this will try to use them to update the state.
* @param state State of the filter
* @param feature_vec Features that can be used for update
*/
void update(std::shared_ptr<State> state, std::vector<std::shared_ptr<ov_core::Feature>> &feature_vec);
/**
* @brief Given max track features, this will try to use them to initialize them in the state.
* @param state State of the filter
* @param feature_vec Features that can be used for update
*/
void delayed_init(std::shared_ptr<State> state, std::vector<std::shared_ptr<ov_core::Feature>> &feature_vec);
/**
* @brief Will change SLAM feature anchors if it will be marginalized
*
* Makes sure that if any clone is about to be marginalized, it changes anchor representation.
* By default, this will shift the anchor into the newest IMU clone and keep the camera calibration anchor the same.
*
* @param state State of the filter
*/
void change_anchors(std::shared_ptr<State> state);
protected:
/**
* @brief Shifts landmark anchor to new clone
* @param state State of filter
* @param landmark landmark whose anchor is being shifter
* @param new_anchor_timestamp Clone timestamp we want to move to
* @param new_cam_id Which camera frame we want to move to
*/
void perform_anchor_change(std::shared_ptr<State> state, std::shared_ptr<ov_type::Landmark> landmark, double new_anchor_timestamp,
size_t new_cam_id);
/// Options used during update for slam features
UpdaterOptions _options_slam;
/// Options used during update for aruco features
UpdaterOptions _options_aruco;
/// Feature initializer class object
std::unique_ptr<ov_core::FeatureInitializer> initializer_feat;
/// Chi squared 95th percentile table (lookup would be size of residual)
std::map<int, double> chi_squared_table;
};
} // namespace ov_msckf
#endif // OV_MSCKF_UPDATER_SLAM_H

View File

@@ -0,0 +1,278 @@
/*
* OpenVINS: An Open Platform for Visual-Inertial Research
* Copyright (C) 2018-2022 Patrick Geneva
* Copyright (C) 2018-2022 Guoquan Huang
* Copyright (C) 2018-2022 OpenVINS Contributors
* Copyright (C) 2018-2019 Kevin Eckenhoff
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
#include "UpdaterZeroVelocity.h"
using namespace ov_core;
using namespace ov_type;
using namespace ov_msckf;
bool UpdaterZeroVelocity::try_update(std::shared_ptr<State> state, double timestamp) {
// Return if we don't have any imu data yet
if (imu_data.empty()) {
last_zupt_state_timestamp = 0.0;
return false;
}
// Return if the state is already at the desired time
if (state->_timestamp == timestamp) {
last_zupt_state_timestamp = 0.0;
return false;
}
// Set the last time offset value if we have just started the system up
if (!have_last_prop_time_offset) {
last_prop_time_offset = state->_calib_dt_CAMtoIMU->value()(0);
have_last_prop_time_offset = true;
}
// assert that the time we are requesting is in the future
// assert(timestamp > state->_timestamp);
// Get what our IMU-camera offset should be (t_imu = t_cam + calib_dt)
double t_off_new = state->_calib_dt_CAMtoIMU->value()(0);
// First lets construct an IMU vector of measurements we need
// double time0 = state->_timestamp+t_off_new;
double time0 = state->_timestamp + last_prop_time_offset;
double time1 = timestamp + t_off_new;
// Select bounding inertial measurements
std::vector<ov_core::ImuData> imu_recent = Propagator::select_imu_readings(imu_data, time0, time1);
// Move forward in time
last_prop_time_offset = t_off_new;
// Check that we have at least one measurement to propagate with
if (imu_recent.size() < 2) {
PRINT_WARNING(RED "[ZUPT]: There are no IMU data to check for zero velocity with!!\n" RESET);
last_zupt_state_timestamp = 0.0;
return false;
}
// If we should integrate the acceleration and say the velocity should be zero
// Also if we should still inflate the bias based on their random walk noises
bool integrated_accel_constraint = false;
bool model_time_varying_bias = true;
bool override_with_disparity_check = true;
bool explicitly_enforce_zero_motion = false;
// Order of our Jacobian
std::vector<std::shared_ptr<Type>> Hx_order;
Hx_order.push_back(state->_imu->q());
Hx_order.push_back(state->_imu->bg());
Hx_order.push_back(state->_imu->ba());
if (integrated_accel_constraint) {
Hx_order.push_back(state->_imu->v());
}
// Large final matrices used for update
int h_size = (integrated_accel_constraint) ? 12 : 9;
int m_size = 6 * ((int)imu_recent.size() - 1);
Eigen::MatrixXd H = Eigen::MatrixXd::Zero(m_size, h_size);
Eigen::VectorXd res = Eigen::VectorXd::Zero(m_size);
Eigen::MatrixXd R = Eigen::MatrixXd::Identity(m_size, m_size);
// Loop through all our IMU and construct the residual and Jacobian
// State order is: [q_GtoI, bg, ba, v_IinG]
// Measurement order is: [w_true = 0, a_true = 0 or v_k+1 = 0]
// w_true = w_m - bw - nw
// a_true = a_m - ba - R*g - na
// v_true = v_k - g*dt + R^T*(a_m - ba - na)*dt
double dt_summed = 0;
for (size_t i = 0; i < imu_recent.size() - 1; i++) {
// Precomputed values
double dt = imu_recent.at(i + 1).timestamp - imu_recent.at(i).timestamp;
Eigen::Vector3d a_hat = imu_recent.at(i).am - state->_imu->bias_a();
// Measurement residual (true value is zero)
res.block(6 * i + 0, 0, 3, 1) = -(imu_recent.at(i).wm - state->_imu->bias_g());
if (!integrated_accel_constraint) {
res.block(6 * i + 3, 0, 3, 1) = -(a_hat - state->_imu->Rot() * _gravity);
} else {
res.block(6 * i + 3, 0, 3, 1) = -(state->_imu->vel() - _gravity * dt + state->_imu->Rot().transpose() * a_hat * dt);
}
// Measurement Jacobian
Eigen::Matrix3d R_GtoI_jacob = (state->_options.do_fej) ? state->_imu->Rot_fej() : state->_imu->Rot();
H.block(6 * i + 0, 3, 3, 3) = -Eigen::Matrix3d::Identity();
if (!integrated_accel_constraint) {
H.block(6 * i + 3, 0, 3, 3) = -skew_x(R_GtoI_jacob * _gravity);
H.block(6 * i + 3, 6, 3, 3) = -Eigen::Matrix3d::Identity();
} else {
H.block(6 * i + 3, 0, 3, 3) = -R_GtoI_jacob.transpose() * skew_x(a_hat) * dt;
H.block(6 * i + 3, 6, 3, 3) = -R_GtoI_jacob.transpose() * dt;
H.block(6 * i + 3, 9, 3, 3) = Eigen::Matrix3d::Identity();
}
// Measurement noise (convert from continuous to discrete)
// Note the dt time might be different if we have "cut" any imu measurements
R.block(6 * i + 0, 6 * i + 0, 3, 3) *= _noises.sigma_w_2 / dt;
if (!integrated_accel_constraint) {
R.block(6 * i + 3, 6 * i + 3, 3, 3) *= _noises.sigma_a_2 / dt;
} else {
R.block(6 * i + 3, 6 * i + 3, 3, 3) *= _noises.sigma_a_2 * dt;
}
dt_summed += dt;
}
// Multiply our noise matrix by a fixed amount
// We typically need to treat the IMU as being "worst" to detect / not become over confident
R *= _zupt_noise_multiplier;
// Next propagate the biases forward in time
// NOTE: G*Qd*G^t = dt*Qd*dt = dt*Qc
Eigen::MatrixXd Q_bias = Eigen::MatrixXd::Identity(6, 6);
Q_bias.block(0, 0, 3, 3) *= dt_summed * _noises.sigma_wb;
Q_bias.block(3, 3, 3, 3) *= dt_summed * _noises.sigma_ab;
// Chi2 distance check
// NOTE: we also append the propagation we "would do before the update" if this was to be accepted
// NOTE: we don't propagate first since if we fail the chi2 then we just want to return and do normal logic
Eigen::MatrixXd P_marg = StateHelper::get_marginal_covariance(state, Hx_order);
P_marg.block(3, 3, 6, 6) += Q_bias;
Eigen::MatrixXd S = H * P_marg * H.transpose() + R;
double chi2 = res.dot(S.llt().solve(res));
// Get our threshold (we precompute up to 1000 but handle the case that it is more)
double chi2_check;
if (res.rows() < 1000) {
chi2_check = chi_squared_table[res.rows()];
} else {
boost::math::chi_squared chi_squared_dist(res.rows());
chi2_check = boost::math::quantile(chi_squared_dist, 0.95);
PRINT_WARNING(YELLOW "[ZUPT]: chi2_check over the residual limit - %d\n" RESET, (int)res.rows());
}
// Check if the image disparity
bool disparity_passed = false;
if (override_with_disparity_check) {
// Get the disparity statistics from this image to the previous
double time0_cam = state->_timestamp;
double time1_cam = timestamp;
int num_features = 0;
double average_disparity = 0.0;
double variance_disparity = 0.0;
FeatureHelper::compute_disparity(_db, time0_cam, time1_cam, average_disparity, variance_disparity, num_features);
// Check if this disparity is enough to be classified as moving
disparity_passed = (average_disparity < _zupt_max_disparity && num_features > 20);
if (disparity_passed) {
PRINT_INFO(CYAN "[ZUPT]: passed disparity (%.3f < %.3f, %d features)\n" RESET, average_disparity, _zupt_max_disparity,
(int)num_features);
} else {
PRINT_DEBUG(YELLOW "[ZUPT]: failed disparity (%.3f > %.3f, %d features)\n" RESET, average_disparity, _zupt_max_disparity,
(int)num_features);
}
}
// Check if we are currently zero velocity
// We need to pass the chi2 and not be above our velocity threshold
if (!disparity_passed && (chi2 > _options.chi2_multipler * chi2_check || state->_imu->vel().norm() > _zupt_max_velocity)) {
last_zupt_state_timestamp = 0.0;
PRINT_DEBUG(YELLOW "[ZUPT]: rejected |v_IinG| = %.3f (chi2 %.3f > %.3f)\n" RESET, state->_imu->vel().norm(), chi2,
_options.chi2_multipler * chi2_check);
return false;
}
PRINT_INFO(CYAN "[ZUPT]: accepted |v_IinG| = %.3f (chi2 %.3f < %.3f)\n" RESET, state->_imu->vel().norm(), chi2,
_options.chi2_multipler * chi2_check);
// Do our update, only do this update if we have previously detected
// If we have succeeded, then we should remove the current timestamp feature tracks
// This is because we will not clone at this timestep and instead do our zero velocity update
// We want to keep the tracks from the previous timestep, thus only delete measurements from the current timestep
if (last_zupt_state_timestamp > 0.0) {
_db->cleanup_measurements_exact(last_zupt_state_timestamp);
}
// Else we are good, update the system
// 1) update with our IMU measurements directly
// 2) propagate and then explicitly say that our ori, pos, and vel should be zero
if (!explicitly_enforce_zero_motion) {
// Next propagate the biases forward in time
// NOTE: G*Qd*G^t = dt*Qd*dt = dt*Qc
if (model_time_varying_bias) {
Eigen::MatrixXd Phi_bias = Eigen::MatrixXd::Identity(6, 6);
std::vector<std::shared_ptr<Type>> Phi_order;
Phi_order.push_back(state->_imu->bg());
Phi_order.push_back(state->_imu->ba());
StateHelper::EKFPropagation(state, Phi_order, Phi_order, Phi_bias, Q_bias);
}
// Finally move the state time forward
StateHelper::EKFUpdate(state, Hx_order, H, res, R);
state->_timestamp = timestamp;
} else {
// Propagate the state forward in time
double time0_cam = last_zupt_state_timestamp;
double time1_cam = timestamp;
_prop->propagate_and_clone(state, time1_cam);
// Create the update system!
H = Eigen::MatrixXd::Zero(9, 15);
res = Eigen::VectorXd::Zero(9);
R = Eigen::MatrixXd::Identity(9, 9);
// residual (order is ori, pos, vel)
Eigen::Matrix3d R_GtoI0 = state->_clones_IMU.at(time0_cam)->Rot();
Eigen::Vector3d p_I0inG = state->_clones_IMU.at(time0_cam)->pos();
Eigen::Matrix3d R_GtoI1 = state->_clones_IMU.at(time1_cam)->Rot();
Eigen::Vector3d p_I1inG = state->_clones_IMU.at(time1_cam)->pos();
res.block(0, 0, 3, 1) = -log_so3(R_GtoI0 * R_GtoI1.transpose());
res.block(3, 0, 3, 1) = p_I1inG - p_I0inG;
res.block(6, 0, 3, 1) = state->_imu->vel();
res *= -1;
// jacobian (order is q0, p0, q1, p1, v0)
Hx_order.clear();
Hx_order.push_back(state->_clones_IMU.at(time0_cam));
Hx_order.push_back(state->_clones_IMU.at(time1_cam));
Hx_order.push_back(state->_imu->v());
if (state->_options.do_fej) {
R_GtoI0 = state->_clones_IMU.at(time0_cam)->Rot_fej();
}
H.block(0, 0, 3, 3) = Eigen::Matrix3d::Identity();
H.block(0, 6, 3, 3) = -R_GtoI0;
H.block(3, 3, 3, 3) = -Eigen::Matrix3d::Identity();
H.block(3, 9, 3, 3) = Eigen::Matrix3d::Identity();
H.block(6, 12, 3, 3) = Eigen::Matrix3d::Identity();
// noise (order is ori, pos, vel)
R.block(0, 0, 3, 3) *= std::pow(1e-2, 2);
R.block(3, 3, 3, 3) *= std::pow(1e-1, 2);
R.block(6, 6, 3, 3) *= std::pow(1e-1, 2);
// finally update and remove the old clone
StateHelper::EKFUpdate(state, Hx_order, H, res, R);
StateHelper::marginalize(state, state->_clones_IMU.at(time1_cam));
state->_clones_IMU.erase(time1_cam);
}
// Finally return
last_zupt_state_timestamp = timestamp;
return true;
}

View File

@@ -0,0 +1,166 @@
/*
* OpenVINS: An Open Platform for Visual-Inertial Research
* Copyright (C) 2018-2022 Patrick Geneva
* Copyright (C) 2018-2022 Guoquan Huang
* Copyright (C) 2018-2022 OpenVINS Contributors
* Copyright (C) 2018-2019 Kevin Eckenhoff
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
#ifndef OV_MSCKF_UPDATER_ZEROVELOCITY_H
#define OV_MSCKF_UPDATER_ZEROVELOCITY_H
#include "feat/FeatureDatabase.h"
#include "feat/FeatureHelper.h"
#include "state/Propagator.h"
#include "state/State.h"
#include "state/StateHelper.h"
#include "utils/colors.h"
#include "utils/print.h"
#include "utils/quat_ops.h"
#include "utils/sensor_data.h"
#include "UpdaterHelper.h"
#include "UpdaterOptions.h"
#include <boost/date_time/posix_time/posix_time.hpp>
#include <boost/math/distributions/chi_squared.hpp>
namespace ov_msckf {
/**
* @brief Will try to *detect* and then update using zero velocity assumption.
*
* Consider the case that a VIO unit remains stationary for a period time.
* Typically this can cause issues in a monocular system without SLAM features since no features can be triangulated.
* Additional, if features could be triangulated (e.g. stereo) the quality can be poor and hurt performance.
* If we can detect the cases where we are stationary then we can leverage this to prevent the need to do feature update during this period.
* The main application would be using this on a **wheeled vehicle** which needs to stop (e.g. stop lights or parking).
*/
class UpdaterZeroVelocity {
public:
/**
* @brief Default constructor for our zero velocity detector and updater.
* @param options Updater options (chi2 multiplier)
* @param noises imu noise characteristics (continuous time)
* @param db Feature tracker database with all features in it
* @param prop Propagator class object which can predict the state forward in time
* @param gravity_mag Global gravity magnitude of the system (normally 9.81)
* @param zupt_max_velocity Max velocity we should consider to do a update with
* @param zupt_noise_multiplier Multiplier of our IMU noise matrix (default should be 1.0)
* @param zupt_max_disparity Max disparity we should consider to do a update with
*/
UpdaterZeroVelocity(UpdaterOptions &options, Propagator::NoiseManager &noises, std::shared_ptr<ov_core::FeatureDatabase> db,
std::shared_ptr<Propagator> prop, double gravity_mag, double zupt_max_velocity, double zupt_noise_multiplier,
double zupt_max_disparity)
: _options(options), _noises(noises), _db(db), _prop(prop), _zupt_max_velocity(zupt_max_velocity),
_zupt_noise_multiplier(zupt_noise_multiplier), _zupt_max_disparity(zupt_max_disparity) {
// Gravity
_gravity << 0.0, 0.0, gravity_mag;
// Save our raw pixel noise squared
_noises.sigma_w_2 = std::pow(_noises.sigma_w, 2);
_noises.sigma_a_2 = std::pow(_noises.sigma_a, 2);
_noises.sigma_wb_2 = std::pow(_noises.sigma_wb, 2);
_noises.sigma_ab_2 = std::pow(_noises.sigma_ab, 2);
// Initialize the chi squared test table with confidence level 0.95
// https://github.com/KumarRobotics/msckf_vio/blob/050c50defa5a7fd9a04c1eed5687b405f02919b5/src/msckf_vio.cpp#L215-L221
for (int i = 1; i < 1000; i++) {
boost::math::chi_squared chi_squared_dist(i);
chi_squared_table[i] = boost::math::quantile(chi_squared_dist, 0.95);
}
}
/**
* @brief Feed function for inertial data
* @param message Contains our timestamp and inertial information
* @param oldest_time Time that we can discard measurements before
*/
void feed_imu(const ov_core::ImuData &message, double oldest_time = -1) {
// Append it to our vector
imu_data.emplace_back(message);
// Sort our imu data (handles any out of order measurements)
// std::sort(imu_data.begin(), imu_data.end(), [](const IMUDATA i, const IMUDATA j) {
// return i.timestamp < j.timestamp;
//});
// Loop through and delete imu messages that are older than our requested time
if (oldest_time != -1) {
auto it0 = imu_data.begin();
while (it0 != imu_data.end()) {
if (message.timestamp < oldest_time) {
it0 = imu_data.erase(it0);
} else {
it0++;
}
}
}
}
/**
* @brief Will first detect if the system is zero velocity, then will update.
* @param state State of the filter
* @param timestamp Next camera timestamp we want to see if we should propagate to.
* @return True if the system is currently at zero velocity
*/
bool try_update(std::shared_ptr<State> state, double timestamp);
protected:
/// Options used during update (chi2 multiplier)
UpdaterOptions _options;
/// Container for the imu noise values
Propagator::NoiseManager _noises;
/// Feature tracker database with all features in it
std::shared_ptr<ov_core::FeatureDatabase> _db;
/// Our propagator!
std::shared_ptr<Propagator> _prop;
/// Gravity vector
Eigen::Vector3d _gravity;
/// Max velocity (m/s) that we should consider a zupt with
double _zupt_max_velocity = 1.0;
/// Multiplier of our IMU noise matrix (default should be 1.0)
double _zupt_noise_multiplier = 1.0;
/// Max disparity (pixels) that we should consider a zupt with
double _zupt_max_disparity = 1.0;
/// Chi squared 95th percentile table (lookup would be size of residual)
std::map<int, double> chi_squared_table;
/// Our history of IMU messages (time, angular, linear)
std::vector<ov_core::ImuData> imu_data;
/// Estimate for time offset at last propagation time
double last_prop_time_offset = 0.0;
bool have_last_prop_time_offset = false;
/// Last timestamp we did zero velocity update with
double last_zupt_state_timestamp = 0.0;
};
} // namespace ov_msckf
#endif // OV_MSCKF_UPDATER_ZEROVELOCITY_H