1169 lines
49 KiB
C++
1169 lines
49 KiB
C++
/*
|
||
* OpenVINS: An Open Platform for Visual-Inertial Research
|
||
* Copyright (C) 2018-2022 Patrick Geneva
|
||
* Copyright (C) 2018-2022 Guoquan Huang
|
||
* Copyright (C) 2018-2022 OpenVINS Contributors
|
||
* Copyright (C) 2018-2019 Kevin Eckenhoff
|
||
*
|
||
* This program is free software: you can redistribute it and/or modify
|
||
* it under the terms of the GNU General Public License as published by
|
||
* the Free Software Foundation, either version 3 of the License, or
|
||
* (at your option) any later version.
|
||
*
|
||
* This program is distributed in the hope that it will be useful,
|
||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||
* GNU General Public License for more details.
|
||
*
|
||
* You should have received a copy of the GNU General Public License
|
||
* along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||
*/
|
||
|
||
#include "ROS1Visualizer.h"
|
||
#include <cv_bridge/cv_bridge.h>
|
||
|
||
using namespace ov_core;
|
||
using namespace ov_type;
|
||
using namespace ov_msckf;
|
||
|
||
ROS1Visualizer::ROS1Visualizer(std::shared_ptr<ros::NodeHandle> nh, std::shared_ptr<VioManager> app, std::shared_ptr<Simulator> sim)
|
||
: _nh(nh), _app(app), _sim(sim), thread_update_running(false) {
|
||
|
||
// Setup our transform broadcaster
|
||
mTfBr = std::make_shared<tf::TransformBroadcaster>();
|
||
|
||
// Create image transport
|
||
image_transport::ImageTransport it(*_nh);
|
||
|
||
// Setup pose and path publisher
|
||
pub_poseimu = nh->advertise<geometry_msgs::PoseWithCovarianceStamped>("/ov_msckf/poseimu", 2);
|
||
PRINT_DEBUG("Publishing: %s\n", pub_poseimu.getTopic().c_str());
|
||
pub_odomimu = nh->advertise<nav_msgs::Odometry>("/ov_msckf/odomimu", 2);
|
||
PRINT_DEBUG("Publishing: %s\n", pub_odomimu.getTopic().c_str());
|
||
pub_pathimu = nh->advertise<nav_msgs::Path>("/ov_msckf/pathimu", 2);
|
||
PRINT_DEBUG("Publishing: %s\n", pub_pathimu.getTopic().c_str());
|
||
pub_poserec = nh->advertise<geometry_msgs::PoseStamped>("/ov_msckf/poserec", 2);
|
||
PRINT_DEBUG("Publishing: %s\n", pub_poserec.getTopic().c_str());
|
||
|
||
// 3D points publishing
|
||
pub_points_msckf = nh->advertise<sensor_msgs::PointCloud2>("/ov_msckf/points_msckf", 2);
|
||
PRINT_DEBUG("Publishing: %s\n", pub_points_msckf.getTopic().c_str());
|
||
pub_points_slam = nh->advertise<sensor_msgs::PointCloud2>("/ov_msckf/points_slam", 2);
|
||
PRINT_DEBUG("Publishing: %s\n", pub_points_msckf.getTopic().c_str());
|
||
pub_points_aruco = nh->advertise<sensor_msgs::PointCloud2>("/ov_msckf/points_aruco", 2);
|
||
PRINT_DEBUG("Publishing: %s\n", pub_points_aruco.getTopic().c_str());
|
||
pub_points_sim = nh->advertise<sensor_msgs::PointCloud2>("/ov_msckf/points_sim", 2);
|
||
PRINT_DEBUG("Publishing: %s\n", pub_points_sim.getTopic().c_str());
|
||
|
||
// Our tracking image
|
||
it_pub_tracks = it.advertise("/ov_msckf/trackhist", 2);
|
||
PRINT_DEBUG("Publishing: %s\n", it_pub_tracks.getTopic().c_str());
|
||
|
||
// Groundtruth publishers
|
||
pub_posegt = nh->advertise<geometry_msgs::PoseStamped>("/ov_msckf/posegt", 2);
|
||
PRINT_DEBUG("Publishing: %s\n", pub_posegt.getTopic().c_str());
|
||
pub_pathgt = nh->advertise<nav_msgs::Path>("/ov_msckf/pathgt", 2);
|
||
PRINT_DEBUG("Publishing: %s\n", pub_pathgt.getTopic().c_str());
|
||
|
||
// Loop closure publishers
|
||
pub_loop_pose = nh->advertise<nav_msgs::Odometry>("/ov_msckf/loop_pose", 2);
|
||
pub_loop_point = nh->advertise<sensor_msgs::PointCloud>("/ov_msckf/loop_feats", 2);
|
||
pub_loop_extrinsic = nh->advertise<nav_msgs::Odometry>("/ov_msckf/loop_extrinsic", 2);
|
||
pub_loop_intrinsics = nh->advertise<sensor_msgs::CameraInfo>("/ov_msckf/loop_intrinsics", 2);
|
||
it_pub_loop_img_depth = it.advertise("/ov_msckf/loop_depth", 2);
|
||
it_pub_loop_img_depth_color = it.advertise("/ov_msckf/loop_depth_colored", 2);
|
||
|
||
// Added by PI on 27.07.2022
|
||
// В какой топик публикуется ?
|
||
subs_T_align = nh->subscribe("/ov_msckf/T_align", 1000, &ROS1Visualizer::T_align_callback, this);
|
||
|
||
std::ifstream fileTraj;
|
||
fileTraj.open("/home/pi/workspace/catkin_ws_ov/src/open_vins/ov_data/merged.txt");
|
||
Eigen::Matrix<double, 7, 1> extractedValues;
|
||
std::vector<Eigen::Matrix<double, 7, 1>> loadedTrajectory = {};
|
||
char tmpLine[120];
|
||
|
||
// while(fileTraj){
|
||
// fileTraj.getline(tmpLine, 120);
|
||
// std::string line(tmpLine);
|
||
// int pos, counter;
|
||
// pos = 0, counter = 0;
|
||
//// std::cout << "line.substr(0, 1): " << line.substr(0, 1) << std::endl;
|
||
// if (line.substr(0, 1) == "#"){
|
||
// continue;
|
||
// }
|
||
// while (pos != std::string::npos){
|
||
// pos = line.find(' ');
|
||
// if (pos == std::string::npos){
|
||
// break;
|
||
// }
|
||
//// std::cout << "The extracted substr is: " << line.substr(0, pos) << std::endl;
|
||
// if (counter != 0){
|
||
// if (counter == 7){
|
||
// continue;
|
||
// }
|
||
//// std::cout << "the counter" << counter << std::endl;
|
||
// extractedValues[counter] = stod(line.substr(0, pos));
|
||
// line = line.substr(pos+1, std::string::npos);
|
||
//// std::cout << "the remaining line is: " << line << std::endl;
|
||
// loadedTrajectory.push_back(extractedValues);
|
||
//// std::cout << "The counter is: " << counter << std::endl;
|
||
// }
|
||
// counter += 1;
|
||
// }
|
||
// printf("Extracted values in the line are: %f %f %f %f %f %f %f \n", extractedValues[0], extractedValues[1], extractedValues[2],
|
||
// extractedValues[3], extractedValues[4], extractedValues[5], extractedValues[6]);
|
||
// }
|
||
|
||
pub_camera_info = nh->advertise<sensor_msgs::CameraInfo>("/ov_msckf/camera_info", 2);
|
||
vis_pub = nh->advertise<visualization_msgs::Marker>("/ov_msckf/vis_markers", 0);
|
||
// pub_camera_info = nh->advertise<sensor_msgs::CameraInfo>("/ov_mskcf/camera_info", 2);
|
||
// pub_camera_info_trackhist = nh->advertise<sensor_msgs::CameraInfo>("/ov_msckf/trackhist", 2);
|
||
// camPub = nh->advertise<image_transport::CameraPublisher>("ov_msckf/camera_info", 2);
|
||
//camPub = it.advertiseCamera("/ov_msckf/image_view", 2);
|
||
Marker.header.frame_id = "global";
|
||
Marker.header.stamp = ros::Time();
|
||
Marker.ns = "my_namespace";
|
||
Marker.id = 0;
|
||
Marker.type = visualization_msgs::Marker::CUBE_LIST;
|
||
Marker.action = visualization_msgs::Marker::ADD;
|
||
Marker.pose.orientation.x = 0.0;
|
||
Marker.pose.orientation.y = 0.0;
|
||
Marker.pose.orientation.z = 0.0;
|
||
Marker.pose.orientation.w = 1.0;
|
||
Marker.scale.x = 6.8f;
|
||
Marker.scale.y = 6.8f;
|
||
Marker.scale.z = 6.8f;
|
||
Marker.color.a = 1.0; // Don't forget to set the alpha!
|
||
Marker.color.r = 0.0;
|
||
Marker.color.g = 1.0;
|
||
Marker.color.b = 0.0;
|
||
//only if using a MESH_RESOURCE marker type:
|
||
Marker.mesh_resource = "package://pr2_description/meshes/base_v0/base.dae";
|
||
|
||
// cubeVisPoints = {{10.0, 10.0, 0.02}, {10, -10, 0.02},
|
||
// {50, 10, 0.02}, {50, 30, 0.02},
|
||
// {100, 30, 0.02}, {100, 50, 0.02},
|
||
// {140, 50, 0.02}, {140, 70, 0.02},
|
||
// {200, 70, 0.02}, {200, 90, 0.02},
|
||
// {240, 90, 0.02}, {80, 110, 0.02},
|
||
// {280, 110, 0.02}, {100, 130, 0.02},
|
||
// {320, 130, 0.02}, {320, 150, 0.02}};
|
||
float yCubesVisPos = -3.0;
|
||
float skew = 5.0;
|
||
cubeVisPoints = {
|
||
{-33.451, 231.074, yCubesVisPos},
|
||
{-74.510, 141.228, yCubesVisPos},
|
||
{-114.119, -45.468, yCubesVisPos},
|
||
{96.947, -52.157, yCubesVisPos},
|
||
{-183.654, 605.40, yCubesVisPos},
|
||
{228.287, 146.678, yCubesVisPos},
|
||
{363.666, 1769.109, yCubesVisPos},
|
||
{366.259, 1810.029, yCubesVisPos},
|
||
{241.346, 186.492, yCubesVisPos},
|
||
{424.114, 1789.749, yCubesVisPos},
|
||
{588.224, 322.999, yCubesVisPos},
|
||
{616.916, 1919.684, yCubesVisPos},
|
||
{845.480, 518.359, yCubesVisPos},
|
||
{1027.410, 713.207, yCubesVisPos},
|
||
{1027.410, 733.207, yCubesVisPos},
|
||
{1101.795, 667.008, yCubesVisPos},
|
||
{1100.890, 778.080, yCubesVisPos},
|
||
{1182.285, 788.335, yCubesVisPos},
|
||
{1593.721, 1111.198, yCubesVisPos},
|
||
{1635.912, 1542.791, yCubesVisPos},
|
||
{1936.860, 1898.863, yCubesVisPos},
|
||
{1865.827, 2190.633, yCubesVisPos},
|
||
{1603.437, 2262.750, yCubesVisPos},
|
||
|
||
// {307.369, 150.830, yCubesVisPos},
|
||
// {336.567, 223.124, yCubesVisPos},
|
||
// {406.839, 224.973, yCubesVisPos},
|
||
// {473.202, 296.579, yCubesVisPos},
|
||
// {663.727, 382.915, yCubesVisPos},
|
||
// {706.092, 446.382, yCubesVisPos},
|
||
// {773.465, 464.975, yCubesVisPos},
|
||
// {815.571, 524.754, yCubesVisPos},
|
||
// {862.600, 560.769, yCubesVisPos},
|
||
// {987.206, 590.868, yCubesVisPos},
|
||
// {1018.318, 661.753, yCubesVisPos}
|
||
|
||
{239.974, 72.267, yCubesVisPos},
|
||
{285.534, 147.012, yCubesVisPos},
|
||
{379.105, 103.735, yCubesVisPos},
|
||
{377.834, 190.505, yCubesVisPos},
|
||
{589.754, 207.650, yCubesVisPos},
|
||
{583.577, 299.558, yCubesVisPos},
|
||
{695.422, 271.515, yCubesVisPos},
|
||
{683.414, 342.212, yCubesVisPos},
|
||
{803.130, 295.558, yCubesVisPos},
|
||
{901.909, 445.780, yCubesVisPos},
|
||
{104.461, 397.006, yCubesVisPos},
|
||
{110.683, 534.233, yCubesVisPos},
|
||
{119.329, 502.730, yCubesVisPos},
|
||
{238.588, 137.661, yCubesVisPos}
|
||
};
|
||
// End of added by PI
|
||
|
||
// option `to enable publishing of global to IMU transformation
|
||
nh->param<bool>("publish_global_to_imu_tf", publish_global2imu_tf, true);
|
||
nh->param<bool>("publish_calibration_tf", publish_calibration_tf, true);
|
||
|
||
// Load groundtruth if we have it and are not doing simulation
|
||
if (nh->hasParam("path_gt") && _sim == nullptr) {
|
||
std::string path_to_gt;
|
||
nh->param<std::string>("path_gt", path_to_gt, "");
|
||
if (!path_to_gt.empty()) {
|
||
DatasetReader::load_gt_file(path_to_gt, gt_states);
|
||
PRINT_DEBUG("gt file path is: %s\n", path_to_gt.c_str());
|
||
}
|
||
}
|
||
|
||
// Load if we should save the total state to file
|
||
nh->param<bool>("save_total_state", save_total_state, false);
|
||
|
||
// If the file is not open, then open the file
|
||
if (save_total_state) {
|
||
|
||
// files we will open
|
||
std::string filepath_est, filepath_std, filepath_gt;
|
||
nh->param<std::string>("filepath_est", filepath_est, "state_estimate.txt");
|
||
nh->param<std::string>("filepath_std", filepath_std, "state_deviation.txt");
|
||
nh->param<std::string>("filepath_gt", filepath_gt, "state_groundtruth.txt");
|
||
|
||
// If it exists, then delete it
|
||
if (boost::filesystem::exists(filepath_est))
|
||
boost::filesystem::remove(filepath_est);
|
||
if (boost::filesystem::exists(filepath_std))
|
||
boost::filesystem::remove(filepath_std);
|
||
|
||
// Create folder path to this location if not exists
|
||
boost::filesystem::create_directories(boost::filesystem::path(filepath_est.c_str()).parent_path());
|
||
boost::filesystem::create_directories(boost::filesystem::path(filepath_std.c_str()).parent_path());
|
||
boost::filesystem::create_directories(boost::filesystem::path(filepath_gt.c_str()).parent_path());
|
||
|
||
// Open the files
|
||
of_state_est.open(filepath_est.c_str());
|
||
of_state_std.open(filepath_std.c_str());
|
||
of_state_est << "# timestamp(s) q p v bg ba cam_imu_dt num_cam cam0_k cam0_d cam0_rot cam0_trans .... etc" << std::endl;
|
||
of_state_std << "# timestamp(s) q p v bg ba cam_imu_dt num_cam cam0_k cam0_d cam0_rot cam0_trans .... etc" << std::endl;
|
||
|
||
// Groundtruth if we are simulating
|
||
if (_sim != nullptr) {
|
||
if (boost::filesystem::exists(filepath_gt))
|
||
boost::filesystem::remove(filepath_gt);
|
||
of_state_gt.open(filepath_gt.c_str());
|
||
of_state_gt << "# timestamp(s) q p v bg ba cam_imu_dt num_cam cam0_k cam0_d cam0_rot cam0_trans .... etc" << std::endl;
|
||
}
|
||
}
|
||
}
|
||
|
||
void ROS1Visualizer::setup_subscribers(std::shared_ptr<ov_core::YamlParser> parser) {
|
||
|
||
// We need a valid parser
|
||
assert(parser != nullptr);
|
||
|
||
// Create imu subscriber (handle legacy ros param info)
|
||
std::string topic_imu;
|
||
_nh->param<std::string>("topic_imu", topic_imu, "/imu0");
|
||
parser->parse_external("relative_config_imu", "imu0", "rostopic", topic_imu);
|
||
sub_imu = _nh->subscribe(topic_imu, 1000, &ROS1Visualizer::callback_inertial, this);
|
||
|
||
// Logic for sync stereo subscriber
|
||
// https://answers.ros.org/question/96346/subscribe-to-two-image_raws-with-one-function/?answer=96491#post-id-96491
|
||
if (_app->get_params().state_options.num_cameras == 2) {
|
||
// Read in the topics
|
||
std::string cam_topic0, cam_topic1;
|
||
_nh->param<std::string>("topic_camera" + std::to_string(0), cam_topic0, "/cam" + std::to_string(0) + "/image_raw");
|
||
_nh->param<std::string>("topic_camera" + std::to_string(1), cam_topic1, "/cam" + std::to_string(1) + "/image_raw");
|
||
parser->parse_external("relative_config_imucam", "cam" + std::to_string(0), "rostopic", cam_topic0);
|
||
parser->parse_external("relative_config_imucam", "cam" + std::to_string(1), "rostopic", cam_topic1);
|
||
// Create sync filter (they have unique pointers internally, so we have to use move logic here...)
|
||
auto image_sub0 = std::make_shared<message_filters::Subscriber<sensor_msgs::Image>>(*_nh, cam_topic0, 1);
|
||
auto image_sub1 = std::make_shared<message_filters::Subscriber<sensor_msgs::Image>>(*_nh, cam_topic1, 1);
|
||
auto sync = std::make_shared<message_filters::Synchronizer<sync_pol>>(sync_pol(10), *image_sub0, *image_sub1);
|
||
sync->registerCallback(boost::bind(&ROS1Visualizer::callback_stereo, this, _1, _2, 0, 1));
|
||
// Append to our vector of subscribers
|
||
sync_cam.push_back(sync);
|
||
sync_subs_cam.push_back(image_sub0);
|
||
sync_subs_cam.push_back(image_sub1);
|
||
PRINT_DEBUG("subscribing to cam (stereo): %s\n", cam_topic0.c_str());
|
||
PRINT_DEBUG("subscribing to cam (stereo): %s\n", cam_topic1.c_str());
|
||
} else {
|
||
// Now we should add any non-stereo callbacks here
|
||
for (int i = 0; i < _app->get_params().state_options.num_cameras; i++) {
|
||
// read in the topic
|
||
std::string cam_topic;
|
||
_nh->param<std::string>("topic_camera" + std::to_string(i), cam_topic, "/cam" + std::to_string(i) + "/image_raw");
|
||
parser->parse_external("relative_config_imucam", "cam" + std::to_string(i), "rostopic", cam_topic);
|
||
// create subscriber
|
||
subs_cam.push_back(_nh->subscribe<sensor_msgs::Image>(cam_topic, 10, boost::bind(&ROS1Visualizer::callback_monocular, this, _1, i)));
|
||
PRINT_DEBUG("subscribing to cam (mono): %s\n", cam_topic.c_str());
|
||
}
|
||
}
|
||
}
|
||
|
||
void ROS1Visualizer::visualize() {
|
||
|
||
// Return if we have already visualized
|
||
if (last_visualization_timestamp == _app->get_state()->_timestamp && _app->initialized())
|
||
return;
|
||
last_visualization_timestamp = _app->get_state()->_timestamp;
|
||
|
||
// Start timing
|
||
boost::posix_time::ptime rT0_1, rT0_2;
|
||
rT0_1 = boost::posix_time::microsec_clock::local_time();
|
||
|
||
// publish current image
|
||
publish_images();
|
||
|
||
// Return if we have not inited
|
||
if (!_app->initialized())
|
||
return;
|
||
|
||
// Save the start time of this dataset
|
||
if (!start_time_set) {
|
||
rT1 = boost::posix_time::microsec_clock::local_time();
|
||
start_time_set = true;
|
||
}
|
||
|
||
// publish state
|
||
publish_state();
|
||
|
||
// publish points
|
||
publish_features();
|
||
|
||
// Publish gt if we have it
|
||
publish_groundtruth();
|
||
|
||
// Publish keyframe information
|
||
publish_loopclosure_information();
|
||
|
||
// Save total state
|
||
if (save_total_state) {
|
||
RosVisualizerHelper::sim_save_total_state_to_file(_app->get_state(), _sim, of_state_est, of_state_std, of_state_gt);
|
||
}
|
||
|
||
// Print how much time it took to publish / displaying things
|
||
rT0_2 = boost::posix_time::microsec_clock::local_time();
|
||
double time_total = (rT0_2 - rT0_1).total_microseconds() * 1e-6;
|
||
PRINT_DEBUG(BLUE "[TIME]: %.4f seconds for visualization\n" RESET, time_total);
|
||
}
|
||
|
||
void ROS1Visualizer::visualize_odometry(double timestamp) {
|
||
|
||
// Return if we have not inited
|
||
if (!_app->initialized())
|
||
return;
|
||
|
||
// Get fast propagate state at the desired timestamp
|
||
std::shared_ptr<State> state = _app->get_state();
|
||
Eigen::Matrix<double, 13, 1> state_plus = Eigen::Matrix<double, 13, 1>::Zero();
|
||
Eigen::Matrix<double, 12, 12> cov_plus = Eigen::Matrix<double, 12, 12>::Zero();
|
||
if (!_app->get_propagator()->fast_state_propagate(state, timestamp, state_plus, cov_plus))
|
||
return;
|
||
|
||
// Publish our odometry message if requested
|
||
if (pub_odomimu.getNumSubscribers() != 0) {
|
||
|
||
nav_msgs::Odometry odomIinM;
|
||
odomIinM.header.stamp = ros::Time(timestamp);
|
||
odomIinM.header.frame_id = "global";
|
||
|
||
// The POSE component (orientation and position)
|
||
odomIinM.pose.pose.orientation.x = state_plus(0);
|
||
odomIinM.pose.pose.orientation.y = state_plus(1);
|
||
odomIinM.pose.pose.orientation.z = state_plus(2);
|
||
odomIinM.pose.pose.orientation.w = state_plus(3);
|
||
odomIinM.pose.pose.position.x = state_plus(4);
|
||
odomIinM.pose.pose.position.y = state_plus(5);
|
||
odomIinM.pose.pose.position.z = state_plus(6);
|
||
|
||
// The TWIST component (angular and linear velocities)
|
||
odomIinM.child_frame_id = "imu";
|
||
odomIinM.twist.twist.linear.x = state_plus(7); // vel in local frame
|
||
odomIinM.twist.twist.linear.y = state_plus(8); // vel in local frame
|
||
odomIinM.twist.twist.linear.z = state_plus(9); // vel in local frame
|
||
odomIinM.twist.twist.angular.x = state_plus(10); // we do not estimate this...
|
||
odomIinM.twist.twist.angular.y = state_plus(11); // we do not estimate this...
|
||
odomIinM.twist.twist.angular.z = state_plus(12); // we do not estimate this...
|
||
|
||
// Finally set the covariance in the message (in the order position then orientation as per ros convention)
|
||
Eigen::Matrix<double, 12, 12> Phi = Eigen::Matrix<double, 12, 12>::Zero();
|
||
Phi.block(0, 3, 3, 3).setIdentity();
|
||
Phi.block(3, 0, 3, 3).setIdentity();
|
||
Phi.block(6, 6, 6, 6).setIdentity();
|
||
cov_plus = Phi * cov_plus * Phi.transpose();
|
||
for (int r = 0; r < 6; r++) {
|
||
for (int c = 0; c < 6; c++) {
|
||
odomIinM.pose.covariance[6 * r + c] = cov_plus(r, c);
|
||
}
|
||
}
|
||
for (int r = 0; r < 6; r++) {
|
||
for (int c = 0; c < 6; c++) {
|
||
odomIinM.twist.covariance[6 * r + c] = cov_plus(r + 6, c + 6);
|
||
}
|
||
}
|
||
pub_odomimu.publish(odomIinM);
|
||
}
|
||
|
||
// Publish our transform on TF
|
||
// NOTE: since we use JPL we have an implicit conversion to Hamilton when we publish
|
||
// NOTE: a rotation from GtoI in JPL has the same xyzw as a ItoG Hamilton rotation
|
||
auto odom_pose = std::make_shared<ov_type::PoseJPL>();
|
||
odom_pose->set_value(state_plus.block(0, 0, 7, 1));
|
||
tf::StampedTransform trans = RosVisualizerHelper::get_stamped_transform_from_pose(odom_pose, false);
|
||
trans.frame_id_ = "global";
|
||
trans.child_frame_id_ = "imu";
|
||
if (publish_global2imu_tf) {
|
||
mTfBr->sendTransform(trans);
|
||
}
|
||
|
||
// Loop through each camera calibration and publish it
|
||
for (const auto &calib : state->_calib_IMUtoCAM) {
|
||
tf::StampedTransform trans_calib = RosVisualizerHelper::get_stamped_transform_from_pose(calib.second, true);
|
||
trans_calib.frame_id_ = "imu";
|
||
trans_calib.child_frame_id_ = "cam" + std::to_string(calib.first);
|
||
if (publish_calibration_tf) {
|
||
mTfBr->sendTransform(trans_calib);
|
||
}
|
||
}
|
||
}
|
||
|
||
void ROS1Visualizer::visualize_final() {
|
||
|
||
// Final time offset value
|
||
if (_app->get_state()->_options.do_calib_camera_timeoffset) {
|
||
PRINT_INFO(REDPURPLE "camera-imu timeoffset = %.5f\n\n" RESET, _app->get_state()->_calib_dt_CAMtoIMU->value()(0));
|
||
}
|
||
|
||
// Final camera intrinsics
|
||
if (_app->get_state()->_options.do_calib_camera_intrinsics) {
|
||
for (int i = 0; i < _app->get_state()->_options.num_cameras; i++) {
|
||
std::shared_ptr<Vec> calib = _app->get_state()->_cam_intrinsics.at(i);
|
||
PRINT_INFO(REDPURPLE "cam%d intrinsics:\n" RESET, (int)i);
|
||
PRINT_INFO(REDPURPLE "%.3f,%.3f,%.3f,%.3f\n" RESET, calib->value()(0), calib->value()(1), calib->value()(2), calib->value()(3));
|
||
PRINT_INFO(REDPURPLE "%.5f,%.5f,%.5f,%.5f\n\n" RESET, calib->value()(4), calib->value()(5), calib->value()(6), calib->value()(7));
|
||
}
|
||
}
|
||
|
||
// Final camera extrinsics
|
||
if (_app->get_state()->_options.do_calib_camera_pose) {
|
||
for (int i = 0; i < _app->get_state()->_options.num_cameras; i++) {
|
||
std::shared_ptr<PoseJPL> calib = _app->get_state()->_calib_IMUtoCAM.at(i);
|
||
Eigen::Matrix4d T_CtoI = Eigen::Matrix4d::Identity();
|
||
T_CtoI.block(0, 0, 3, 3) = quat_2_Rot(calib->quat()).transpose();
|
||
T_CtoI.block(0, 3, 3, 1) = -T_CtoI.block(0, 0, 3, 3) * calib->pos();
|
||
PRINT_INFO(REDPURPLE "T_C%dtoI:\n" RESET, i);
|
||
PRINT_INFO(REDPURPLE "%.3f,%.3f,%.3f,%.3f,\n" RESET, T_CtoI(0, 0), T_CtoI(0, 1), T_CtoI(0, 2), T_CtoI(0, 3));
|
||
PRINT_INFO(REDPURPLE "%.3f,%.3f,%.3f,%.3f,\n" RESET, T_CtoI(1, 0), T_CtoI(1, 1), T_CtoI(1, 2), T_CtoI(1, 3));
|
||
PRINT_INFO(REDPURPLE "%.3f,%.3f,%.3f,%.3f,\n" RESET, T_CtoI(2, 0), T_CtoI(2, 1), T_CtoI(2, 2), T_CtoI(2, 3));
|
||
PRINT_INFO(REDPURPLE "%.3f,%.3f,%.3f,%.3f\n\n" RESET, T_CtoI(3, 0), T_CtoI(3, 1), T_CtoI(3, 2), T_CtoI(3, 3));
|
||
}
|
||
}
|
||
|
||
// Publish RMSE if we have it
|
||
if (!gt_states.empty()) {
|
||
PRINT_INFO(REDPURPLE "RMSE: %.3f (deg) orientation\n" RESET, std::sqrt(summed_mse_ori / summed_number));
|
||
PRINT_INFO(REDPURPLE "RMSE: %.3f (m) position\n\n" RESET, std::sqrt(summed_mse_pos / summed_number));
|
||
}
|
||
|
||
// Publish RMSE and NEES if doing simulation
|
||
if (_sim != nullptr) {
|
||
PRINT_INFO(REDPURPLE "RMSE: %.3f (deg) orientation\n" RESET, std::sqrt(summed_mse_ori / summed_number));
|
||
PRINT_INFO(REDPURPLE "RMSE: %.3f (m) position\n\n" RESET, std::sqrt(summed_mse_pos / summed_number));
|
||
PRINT_INFO(REDPURPLE "NEES: %.3f (deg) orientation\n" RESET, summed_nees_ori / summed_number);
|
||
PRINT_INFO(REDPURPLE "NEES: %.3f (m) position\n\n" RESET, summed_nees_pos / summed_number);
|
||
}
|
||
|
||
// Print the total time
|
||
rT2 = boost::posix_time::microsec_clock::local_time();
|
||
PRINT_INFO(REDPURPLE "TIME: %.3f seconds\n\n" RESET, (rT2 - rT1).total_microseconds() * 1e-6);
|
||
}
|
||
|
||
void ROS1Visualizer::callback_inertial(const sensor_msgs::Imu::ConstPtr &msg) {
|
||
|
||
// convert into correct format
|
||
ov_core::ImuData message;
|
||
message.timestamp = msg->header.stamp.toSec();
|
||
message.wm << msg->angular_velocity.x, msg->angular_velocity.y, msg->angular_velocity.z;
|
||
message.am << msg->linear_acceleration.x, msg->linear_acceleration.y, msg->linear_acceleration.z;
|
||
|
||
// send it to our VIO system
|
||
_app->feed_measurement_imu(message);
|
||
visualize_odometry(message.timestamp);
|
||
|
||
// If the processing queue is currently active / running just return so we can keep getting measurements
|
||
// Otherwise create a second thread to do our update in an async manor
|
||
// The visualization of the state, images, and features will be synchronous with the update!
|
||
if (thread_update_running)
|
||
return;
|
||
thread_update_running = true;
|
||
std::thread thread([&] {
|
||
// Lock on the queue (prevents new images from appending)
|
||
std::lock_guard<std::mutex> lck(camera_queue_mtx);
|
||
|
||
// Count how many unique image streams
|
||
std::map<int, bool> unique_cam_ids;
|
||
for (const auto &cam_msg : camera_queue) {
|
||
unique_cam_ids[cam_msg.sensor_ids.at(0)] = true;
|
||
}
|
||
|
||
// If we do not have enough unique cameras then we need to wait
|
||
// We should wait till we have one of each camera to ensure we propagate in the correct order
|
||
auto params = _app->get_params();
|
||
size_t num_unique_cameras = (params.state_options.num_cameras == 2) ? 1 : params.state_options.num_cameras;
|
||
if (unique_cam_ids.size() == num_unique_cameras) {
|
||
|
||
// Loop through our queue and see if we are able to process any of our camera measurements
|
||
// We are able to process if we have at least one IMU measurement greater than the camera time
|
||
double timestamp_imu_inC = message.timestamp - _app->get_state()->_calib_dt_CAMtoIMU->value()(0);
|
||
while (!camera_queue.empty() && camera_queue.at(0).timestamp < timestamp_imu_inC) {
|
||
auto rT0_1 = boost::posix_time::microsec_clock::local_time();
|
||
_app->feed_measurement_camera(camera_queue.at(0));
|
||
visualize();
|
||
camera_queue.pop_front();
|
||
auto rT0_2 = boost::posix_time::microsec_clock::local_time();
|
||
double time_total = (rT0_2 - rT0_1).total_microseconds() * 1e-6;
|
||
PRINT_INFO(BLUE "[TIME]: %.4f seconds total (%.1f hz)\n" RESET, time_total, 1.0 / time_total);
|
||
}
|
||
}
|
||
thread_update_running = false;
|
||
});
|
||
|
||
// If we are single threaded, then run single threaded
|
||
// Otherwise detach this thread so it runs in the background!
|
||
if (!_app->get_params().use_multi_threading) {
|
||
thread.join();
|
||
} else {
|
||
thread.detach();
|
||
}
|
||
}
|
||
|
||
void ROS1Visualizer::callback_monocular(const sensor_msgs::ImageConstPtr &msg0, int cam_id0) {
|
||
|
||
// Check if we should drop this image
|
||
double timestamp = msg0->header.stamp.toSec();
|
||
double time_delta = 1.0 / _app->get_params().track_frequency;
|
||
if (camera_last_timestamp.find(cam_id0) != camera_last_timestamp.end() && timestamp < camera_last_timestamp.at(cam_id0) + time_delta) {
|
||
return;
|
||
}
|
||
camera_last_timestamp[cam_id0] = timestamp;
|
||
|
||
// Get the image
|
||
cv_bridge::CvImageConstPtr cv_ptr;
|
||
try {
|
||
cv_ptr = cv_bridge::toCvShare(msg0, sensor_msgs::image_encodings::MONO8);
|
||
} catch (cv_bridge::Exception &e) {
|
||
PRINT_ERROR("cv_bridge exception: %s", e.what());
|
||
return;
|
||
}
|
||
|
||
// Create the measurement
|
||
ov_core::CameraData message;
|
||
message.timestamp = cv_ptr->header.stamp.toSec();
|
||
message.sensor_ids.push_back(cam_id0);
|
||
message.images.push_back(cv_ptr->image.clone());
|
||
|
||
// Load the mask if we are using it, else it is empty
|
||
// TODO: in the future we should get this from external pixel segmentation
|
||
if (_app->get_params().use_mask) {
|
||
message.masks.push_back(_app->get_params().masks.at(cam_id0));
|
||
} else {
|
||
message.masks.push_back(cv::Mat::zeros(cv_ptr->image.rows, cv_ptr->image.cols, CV_8UC1));
|
||
}
|
||
|
||
// append it to our queue of images
|
||
std::lock_guard<std::mutex> lck(camera_queue_mtx);
|
||
camera_queue.push_back(message);
|
||
std::sort(camera_queue.begin(), camera_queue.end());
|
||
}
|
||
|
||
void ROS1Visualizer::callback_stereo(const sensor_msgs::ImageConstPtr &msg0, const sensor_msgs::ImageConstPtr &msg1, int cam_id0,
|
||
int cam_id1) {
|
||
|
||
// Check if we should drop this image
|
||
double timestamp = msg0->header.stamp.toSec();
|
||
double time_delta = 1.0 / _app->get_params().track_frequency;
|
||
if (camera_last_timestamp.find(cam_id0) != camera_last_timestamp.end() && timestamp < camera_last_timestamp.at(cam_id0) + time_delta) {
|
||
return;
|
||
}
|
||
camera_last_timestamp[cam_id0] = timestamp;
|
||
|
||
// Get the image
|
||
cv_bridge::CvImageConstPtr cv_ptr0;
|
||
try {
|
||
cv_ptr0 = cv_bridge::toCvShare(msg0, sensor_msgs::image_encodings::MONO8);
|
||
} catch (cv_bridge::Exception &e) {
|
||
PRINT_ERROR("cv_bridge exception: %s\n", e.what());
|
||
return;
|
||
}
|
||
|
||
// Get the image
|
||
cv_bridge::CvImageConstPtr cv_ptr1;
|
||
try {
|
||
cv_ptr1 = cv_bridge::toCvShare(msg1, sensor_msgs::image_encodings::MONO8);
|
||
} catch (cv_bridge::Exception &e) {
|
||
PRINT_ERROR("cv_bridge exception: %s\n", e.what());
|
||
return;
|
||
}
|
||
|
||
// Create the measurement
|
||
ov_core::CameraData message;
|
||
message.timestamp = cv_ptr0->header.stamp.toSec();
|
||
message.sensor_ids.push_back(cam_id0);
|
||
message.sensor_ids.push_back(cam_id1);
|
||
message.images.push_back(cv_ptr0->image.clone());
|
||
message.images.push_back(cv_ptr1->image.clone());
|
||
|
||
// Load the mask if we are using it, else it is empty
|
||
// TODO: in the future we should get this from external pixel segmentation
|
||
if (_app->get_params().use_mask) {
|
||
message.masks.push_back(_app->get_params().masks.at(cam_id0));
|
||
message.masks.push_back(_app->get_params().masks.at(cam_id1));
|
||
} else {
|
||
// message.masks.push_back(cv::Mat(cv_ptr0->image.rows, cv_ptr0->image.cols, CV_8UC1, cv::Scalar(255)));
|
||
message.masks.push_back(cv::Mat::zeros(cv_ptr0->image.rows, cv_ptr0->image.cols, CV_8UC1));
|
||
message.masks.push_back(cv::Mat::zeros(cv_ptr1->image.rows, cv_ptr1->image.cols, CV_8UC1));
|
||
}
|
||
|
||
// append it to our queue of images
|
||
std::lock_guard<std::mutex> lck(camera_queue_mtx);
|
||
camera_queue.push_back(message);
|
||
std::sort(camera_queue.begin(), camera_queue.end());
|
||
}
|
||
|
||
void ROS1Visualizer::publish_state() {
|
||
|
||
// Get the current state
|
||
std::shared_ptr<State> state = _app->get_state();
|
||
|
||
// We want to publish in the IMU clock frame
|
||
// The timestamp in the state will be the last camera time
|
||
double t_ItoC = state->_calib_dt_CAMtoIMU->value()(0);
|
||
double timestamp_inI = state->_timestamp + t_ItoC;
|
||
|
||
// Create pose of IMU (note we use the bag time)
|
||
geometry_msgs::PoseWithCovarianceStamped poseIinM;
|
||
poseIinM.header.stamp = ros::Time(timestamp_inI);
|
||
poseIinM.header.seq = poses_seq_imu;
|
||
poseIinM.header.frame_id = "global";
|
||
poseIinM.pose.pose.orientation.x = state->_imu->quat()(0);
|
||
poseIinM.pose.pose.orientation.y = state->_imu->quat()(1);
|
||
poseIinM.pose.pose.orientation.z = state->_imu->quat()(2);
|
||
poseIinM.pose.pose.orientation.w = state->_imu->quat()(3);
|
||
poseIinM.pose.pose.position.x = state->_imu->pos()(0);
|
||
poseIinM.pose.pose.position.y = state->_imu->pos()(1);
|
||
poseIinM.pose.pose.position.z = state->_imu->pos()(2);
|
||
|
||
// Finally set the covariance in the message (in the order position then orientation as per ros convention)
|
||
std::vector<std::shared_ptr<Type>> statevars;
|
||
statevars.push_back(state->_imu->pose()->p());
|
||
statevars.push_back(state->_imu->pose()->q());
|
||
Eigen::Matrix<double, 6, 6> covariance_posori = StateHelper::get_marginal_covariance(_app->get_state(), statevars);
|
||
for (int r = 0; r < 6; r++) {
|
||
for (int c = 0; c < 6; c++) {
|
||
poseIinM.pose.covariance[6 * r + c] = covariance_posori(r, c);
|
||
}
|
||
}
|
||
pub_poseimu.publish(poseIinM);
|
||
|
||
//=========================================================
|
||
//=========================================================
|
||
|
||
// Append to our pose vector
|
||
geometry_msgs::PoseStamped posetemp;
|
||
posetemp.header = poseIinM.header;
|
||
posetemp.pose = poseIinM.pose.pose;
|
||
|
||
// Added by PI
|
||
pub_poserec.publish(posetemp);
|
||
// End of added by PI
|
||
|
||
poses_imu.push_back(posetemp);
|
||
|
||
// Create our path (imu)
|
||
// NOTE: We downsample the number of poses as needed to prevent rviz crashes
|
||
// NOTE: https://github.com/ros-visualization/rviz/issues/1107
|
||
nav_msgs::Path arrIMU;
|
||
// Changed by PI; was:
|
||
// arrIMU.header.stamp = ros::Time::now();
|
||
// Became:
|
||
ROS_INFO("The sent timestamp is: %f", posetemp.header.stamp.toSec());
|
||
arrIMU.header.stamp = posetemp.header.stamp;
|
||
arrIMU.header.seq = poses_seq_imu;
|
||
arrIMU.header.frame_id = "global";
|
||
for (size_t i = 0; i < poses_imu.size(); i += std::floor((double)poses_imu.size() / 16384.0) + 1) {
|
||
arrIMU.poses.push_back(poses_imu.at(i));
|
||
}
|
||
pub_pathimu.publish(arrIMU);
|
||
|
||
// Move them forward in time
|
||
poses_seq_imu++;
|
||
}
|
||
|
||
void ROS1Visualizer::publish_images() {
|
||
|
||
// Check if we have subscribers
|
||
if (it_pub_tracks.getNumSubscribers() == 0)
|
||
return;
|
||
|
||
// Get our image of history tracks
|
||
cv::Mat img_history = _app->get_historical_viz_image();
|
||
|
||
// Create our message
|
||
std_msgs::Header header;
|
||
header.stamp = ros::Time::now();
|
||
header.frame_id = "cam0";
|
||
sensor_msgs::ImagePtr exl_msg = cv_bridge::CvImage(header, "bgr8", img_history).toImageMsg();
|
||
exl_msg_global = exl_msg;
|
||
|
||
// Publish
|
||
it_pub_tracks.publish(exl_msg);
|
||
|
||
bool is_fisheye = (std::dynamic_pointer_cast<ov_core::CamEqui>(_app->get_params().camera_intrinsics.at(0)) != nullptr);
|
||
sensor_msgs::CameraInfo cameraparams;
|
||
cameraparams.header = header;
|
||
cameraparams.header.frame_id = "cam0";
|
||
cameraparams.distortion_model = is_fisheye ? "equidistant" : "plumb_bob";
|
||
Eigen::VectorXd cparams = _app->get_state()->_cam_intrinsics.at(0)->value();
|
||
cameraparams.D = {cparams(4), cparams(5), cparams(6), cparams(7)};
|
||
cameraparams.K = {cparams(0), 0, cparams(2), 0, cparams(1), cparams(3), 0, 0, 1};
|
||
cameraparams.P = {cparams(0), 0, cparams(2), 0, 0, cparams(1), cparams(3), 0, 0, 0, 1, 0};
|
||
PRINT_DEBUG("cameraparams.P: %d", cameraparams.P.data());
|
||
|
||
camInfoGlob = sensor_msgs::CameraInfo();
|
||
camInfoGlob = cameraparams;
|
||
|
||
// Added by PI
|
||
pub_camera_info.publish(camInfoGlob);
|
||
|
||
for (auto & cubeVisPoint : cubeVisPoints){
|
||
geometry_msgs::Point tempPoint;
|
||
tempPoint.x = cubeVisPoint[0];
|
||
tempPoint.y = cubeVisPoint[1];
|
||
tempPoint.z = cubeVisPoint[2];
|
||
Marker.points.push_back(tempPoint);
|
||
}
|
||
vis_pub.publish( Marker );
|
||
|
||
// pub_camera_info.publish(cameraparams);
|
||
// pub_camera_info_trackhist.publish(cameraparams);
|
||
// sensor_msgs::CameraInfoPtr cameraParamsPtr;
|
||
// cameraParamsPtr->header = cameraparams.header;
|
||
// cameraParamsPtr->D = cameraparams.D;
|
||
// cameraParamsPtr->K = cameraparams.K;
|
||
// cameraParamsPtr->P = cameraparams.P;
|
||
// cameraParamsPtr->R = cameraparams.R;
|
||
// cameraParamsPtr->distortion_model = cameraparams.distortion_model;
|
||
// cameraParamsPtr->binning_x = cameraparams.binning_x;
|
||
// cameraParamsPtr->binning_y = cameraparams.binning_y;
|
||
// cameraParamsPtr->height = cameraparams.height;
|
||
// cameraParamsPtr->roi = cameraparams.roi;
|
||
// cameraParamsPtr->width = cameraparams.width;
|
||
// camPub.publish(exl_msg_global, cameraparams);
|
||
// camPub.publish(exl_msg_global, cameraParamsPtr);
|
||
}
|
||
|
||
void ROS1Visualizer::publish_features() {
|
||
|
||
// Check if we have subscribers
|
||
if (pub_points_msckf.getNumSubscribers() == 0 && pub_points_slam.getNumSubscribers() == 0 && pub_points_aruco.getNumSubscribers() == 0 &&
|
||
pub_points_sim.getNumSubscribers() == 0)
|
||
return;
|
||
|
||
// Get our good MSCKF features
|
||
std::vector<Eigen::Vector3d> feats_msckf = _app->get_good_features_MSCKF();
|
||
sensor_msgs::PointCloud2 cloud = RosVisualizerHelper::get_ros_pointcloud(feats_msckf);
|
||
pub_points_msckf.publish(cloud);
|
||
|
||
// Get our good SLAM features
|
||
std::vector<Eigen::Vector3d> feats_slam = _app->get_features_SLAM();
|
||
sensor_msgs::PointCloud2 cloud_SLAM = RosVisualizerHelper::get_ros_pointcloud(feats_slam);
|
||
pub_points_slam.publish(cloud_SLAM);
|
||
|
||
// Get our good ARUCO features
|
||
std::vector<Eigen::Vector3d> feats_aruco = _app->get_features_ARUCO();
|
||
sensor_msgs::PointCloud2 cloud_ARUCO = RosVisualizerHelper::get_ros_pointcloud(feats_aruco);
|
||
pub_points_aruco.publish(cloud_ARUCO);
|
||
|
||
// Skip the rest of we are not doing simulation
|
||
if (_sim == nullptr)
|
||
return;
|
||
|
||
// Get our good SIMULATION features
|
||
std::vector<Eigen::Vector3d> feats_sim = _sim->get_map_vec();
|
||
sensor_msgs::PointCloud2 cloud_SIM = RosVisualizerHelper::get_ros_pointcloud(feats_sim);
|
||
pub_points_sim.publish(cloud_SIM);
|
||
}
|
||
|
||
void ROS1Visualizer::publish_groundtruth() {
|
||
|
||
// Our groundtruth state
|
||
Eigen::Matrix<double, 17, 1> state_gt;
|
||
|
||
// We want to publish in the IMU clock frame
|
||
// The timestamp in the state will be the last camera time
|
||
double t_ItoC = _app->get_state()->_calib_dt_CAMtoIMU->value()(0);
|
||
double timestamp_inI = _app->get_state()->_timestamp + t_ItoC;
|
||
|
||
// Check that we have the timestamp in our GT file [time(sec),q_GtoI,p_IinG,v_IinG,b_gyro,b_accel]
|
||
if (_sim == nullptr && (gt_states.empty() || !DatasetReader::get_gt_state(timestamp_inI, state_gt, gt_states))) {
|
||
return;
|
||
}
|
||
|
||
// Get the simulated groundtruth
|
||
// NOTE: we get the true time in the IMU clock frame
|
||
if (_sim != nullptr) {
|
||
timestamp_inI = _app->get_state()->_timestamp + _sim->get_true_parameters().calib_camimu_dt;
|
||
if (!_sim->get_state(timestamp_inI, state_gt))
|
||
return;
|
||
}
|
||
|
||
// Get the GT and system state state
|
||
Eigen::Matrix<double, 16, 1> state_ekf = _app->get_state()->_imu->value();
|
||
|
||
// Create pose of IMU
|
||
geometry_msgs::PoseStamped poseIinM;
|
||
poseIinM.header.stamp = ros::Time(timestamp_inI);
|
||
poseIinM.header.seq = poses_seq_gt;
|
||
poseIinM.header.frame_id = "global";
|
||
poseIinM.pose.orientation.x = state_gt(1, 0);
|
||
poseIinM.pose.orientation.y = state_gt(2, 0);
|
||
poseIinM.pose.orientation.z = state_gt(3, 0);
|
||
poseIinM.pose.orientation.w = state_gt(4, 0);
|
||
poseIinM.pose.position.x = state_gt(5, 0);
|
||
poseIinM.pose.position.y = state_gt(6, 0);
|
||
poseIinM.pose.position.z = state_gt(7, 0);
|
||
pub_posegt.publish(poseIinM);
|
||
|
||
// Append to our pose vector
|
||
poses_gt.push_back(poseIinM);
|
||
|
||
// Create our path (imu)
|
||
// NOTE: We downsample the number of poses as needed to prevent rviz crashes
|
||
// NOTE: https://github.com/ros-visualization/rviz/issues/1107
|
||
nav_msgs::Path arrIMU;
|
||
arrIMU.header.stamp = ros::Time::now();
|
||
arrIMU.header.seq = poses_seq_gt;
|
||
arrIMU.header.frame_id = "global";
|
||
for (size_t i = 0; i < poses_gt.size(); i += std::floor((double)poses_gt.size() / 16384.0) + 1) {
|
||
arrIMU.poses.push_back(poses_gt.at(i));
|
||
}
|
||
pub_pathgt.publish(arrIMU);
|
||
|
||
// Move them forward in time
|
||
poses_seq_gt++;
|
||
|
||
// Publish our transform on TF
|
||
tf::StampedTransform trans;
|
||
trans.stamp_ = ros::Time::now();
|
||
trans.frame_id_ = "global";
|
||
trans.child_frame_id_ = "truth";
|
||
tf::Quaternion quat(state_gt(1, 0), state_gt(2, 0), state_gt(3, 0), state_gt(4, 0));
|
||
trans.setRotation(quat);
|
||
tf::Vector3 orig(state_gt(5, 0), state_gt(6, 0), state_gt(7, 0));
|
||
trans.setOrigin(orig);
|
||
if (publish_global2imu_tf) {
|
||
mTfBr->sendTransform(trans);
|
||
}
|
||
|
||
//==========================================================================
|
||
//==========================================================================
|
||
|
||
// Difference between positions
|
||
double dx = state_ekf(4, 0) - state_gt(5, 0);
|
||
double dy = state_ekf(5, 0) - state_gt(6, 0);
|
||
double dz = state_ekf(6, 0) - state_gt(7, 0);
|
||
double err_pos = std::sqrt(dx * dx + dy * dy + dz * dz);
|
||
|
||
// Quaternion error
|
||
Eigen::Matrix<double, 4, 1> quat_gt, quat_st, quat_diff;
|
||
quat_gt << state_gt(1, 0), state_gt(2, 0), state_gt(3, 0), state_gt(4, 0);
|
||
quat_st << state_ekf(0, 0), state_ekf(1, 0), state_ekf(2, 0), state_ekf(3, 0);
|
||
quat_diff = quat_multiply(quat_st, Inv(quat_gt));
|
||
double err_ori = (180 / M_PI) * 2 * quat_diff.block(0, 0, 3, 1).norm();
|
||
|
||
//==========================================================================
|
||
//==========================================================================
|
||
|
||
// Get covariance of pose
|
||
std::vector<std::shared_ptr<Type>> statevars;
|
||
statevars.push_back(_app->get_state()->_imu->q());
|
||
statevars.push_back(_app->get_state()->_imu->p());
|
||
Eigen::Matrix<double, 6, 6> covariance = StateHelper::get_marginal_covariance(_app->get_state(), statevars);
|
||
|
||
// Calculate NEES values
|
||
// NOTE: need to manually multiply things out to make static asserts work
|
||
// NOTE: https://github.com/rpng/open_vins/pull/226
|
||
// NOTE: https://github.com/rpng/open_vins/issues/236
|
||
// NOTE: https://gitlab.com/libeigen/eigen/-/issues/1664
|
||
Eigen::Vector3d quat_diff_vec = quat_diff.block(0, 0, 3, 1);
|
||
Eigen::Vector3d cov_vec = covariance.block(0, 0, 3, 3).inverse() * 2 * quat_diff.block(0, 0, 3, 1);
|
||
double ori_nees = 2 * quat_diff_vec.dot(cov_vec);
|
||
Eigen::Vector3d errpos = state_ekf.block(4, 0, 3, 1) - state_gt.block(5, 0, 3, 1);
|
||
double pos_nees = errpos.transpose() * covariance.block(3, 3, 3, 3).inverse() * errpos;
|
||
|
||
//==========================================================================
|
||
//==========================================================================
|
||
|
||
// Update our average variables
|
||
if (!std::isnan(ori_nees) && !std::isnan(pos_nees)) {
|
||
summed_mse_ori += err_ori * err_ori;
|
||
summed_mse_pos += err_pos * err_pos;
|
||
summed_nees_ori += ori_nees;
|
||
summed_nees_pos += pos_nees;
|
||
summed_number++;
|
||
}
|
||
|
||
// Nice display for the user
|
||
PRINT_INFO(REDPURPLE "error to gt => %.3f, %.3f (deg,m) | rmse => %.3f, %.3f (deg,m) | called %d times\n" RESET, err_ori, err_pos,
|
||
std::sqrt(summed_mse_ori / summed_number), std::sqrt(summed_mse_pos / summed_number), (int)summed_number);
|
||
PRINT_INFO(REDPURPLE "nees => %.1f, %.1f (ori,pos) | avg nees = %.1f, %.1f (ori,pos)\n" RESET, ori_nees, pos_nees,
|
||
summed_nees_ori / summed_number, summed_nees_pos / summed_number);
|
||
|
||
//==========================================================================
|
||
//==========================================================================
|
||
}
|
||
|
||
void ROS1Visualizer::publish_loopclosure_information() {
|
||
|
||
// Get the current tracks in this frame
|
||
double active_tracks_time1 = -1;
|
||
double active_tracks_time2 = -1;
|
||
std::unordered_map<size_t, Eigen::Vector3d> active_tracks_posinG;
|
||
std::unordered_map<size_t, Eigen::Vector3d> active_tracks_uvd;
|
||
cv::Mat active_cam0_image;
|
||
_app->get_active_tracks(active_tracks_time1, active_tracks_posinG, active_tracks_uvd);
|
||
_app->get_active_image(active_tracks_time2, active_cam0_image);
|
||
if (active_tracks_time1 == -1)
|
||
return;
|
||
if (_app->get_state()->_clones_IMU.find(active_tracks_time1) == _app->get_state()->_clones_IMU.end())
|
||
return;
|
||
Eigen::Vector4d quat = _app->get_state()->_clones_IMU.at(active_tracks_time1)->quat();
|
||
Eigen::Vector3d pos = _app->get_state()->_clones_IMU.at(active_tracks_time1)->pos();
|
||
if (active_tracks_time1 != active_tracks_time2)
|
||
return;
|
||
|
||
// Default header
|
||
std_msgs::Header header;
|
||
header.stamp = ros::Time(active_tracks_time1);
|
||
|
||
//======================================================
|
||
// Check if we have subscribers for the pose odometry, camera intrinsics, or extrinsics
|
||
if (pub_loop_pose.getNumSubscribers() != 0 || pub_loop_extrinsic.getNumSubscribers() != 0 ||
|
||
pub_loop_intrinsics.getNumSubscribers() != 0) {
|
||
|
||
// PUBLISH HISTORICAL POSE ESTIMATE
|
||
nav_msgs::Odometry odometry_pose;
|
||
odometry_pose.header = header;
|
||
odometry_pose.header.frame_id = "global";
|
||
odometry_pose.pose.pose.position.x = pos(0);
|
||
odometry_pose.pose.pose.position.y = pos(1);
|
||
odometry_pose.pose.pose.position.z = pos(2);
|
||
odometry_pose.pose.pose.orientation.x = quat(0);
|
||
odometry_pose.pose.pose.orientation.y = quat(1);
|
||
odometry_pose.pose.pose.orientation.z = quat(2);
|
||
odometry_pose.pose.pose.orientation.w = quat(3);
|
||
pub_loop_pose.publish(odometry_pose);
|
||
|
||
// PUBLISH IMU TO CAMERA0 EXTRINSIC
|
||
// need to flip the transform to the IMU frame
|
||
Eigen::Vector4d q_ItoC = _app->get_state()->_calib_IMUtoCAM.at(0)->quat();
|
||
Eigen::Vector3d p_CinI = -_app->get_state()->_calib_IMUtoCAM.at(0)->Rot().transpose() * _app->get_state()->_calib_IMUtoCAM.at(0)->pos();
|
||
nav_msgs::Odometry odometry_calib;
|
||
odometry_calib.header = header;
|
||
odometry_calib.header.frame_id = "imu";
|
||
odometry_calib.pose.pose.position.x = p_CinI(0);
|
||
odometry_calib.pose.pose.position.y = p_CinI(1);
|
||
odometry_calib.pose.pose.position.z = p_CinI(2);
|
||
odometry_calib.pose.pose.orientation.x = q_ItoC(0);
|
||
odometry_calib.pose.pose.orientation.y = q_ItoC(1);
|
||
odometry_calib.pose.pose.orientation.z = q_ItoC(2);
|
||
odometry_calib.pose.pose.orientation.w = q_ItoC(3);
|
||
pub_loop_extrinsic.publish(odometry_calib);
|
||
|
||
// PUBLISH CAMERA0 INTRINSICS
|
||
bool is_fisheye = (std::dynamic_pointer_cast<ov_core::CamEqui>(_app->get_params().camera_intrinsics.at(0)) != nullptr);
|
||
sensor_msgs::CameraInfo cameraparams;
|
||
cameraparams.header = header;
|
||
cameraparams.header.frame_id = "cam0";
|
||
cameraparams.distortion_model = is_fisheye ? "equidistant" : "plumb_bob";
|
||
Eigen::VectorXd cparams = _app->get_state()->_cam_intrinsics.at(0)->value();
|
||
cameraparams.D = {cparams(4), cparams(5), cparams(6), cparams(7)};
|
||
cameraparams.K = {cparams(0), 0, cparams(2), 0, cparams(1), cparams(3), 0, 0, 1};
|
||
pub_loop_intrinsics.publish(cameraparams);
|
||
|
||
// Added by PI
|
||
int helllo = 0;
|
||
// pub_camera_info.publish(cameraparams);
|
||
// pub_camera_info_trackhist.publish(cameraparams);
|
||
// {cparams(0), 0, cparams(2), 0, 0, cparams(1), cparams(3), 0, 0, 0, 1}
|
||
cameraparams.P = {cparams(0), 0, cparams(2), 0, 0, cparams(1), cparams(3), 0, 0, 0, 1, 0};
|
||
std::cout << "Testing message" << std::endl;
|
||
std::cout << "cameraparams.P: " << cameraparams.P.data() << std::endl;
|
||
PRINT_DEBUG("cameraparams.P: %d", cameraparams.P.data());
|
||
|
||
sensor_msgs::CameraInfoPtr cameraParamsPtr;
|
||
cameraParamsPtr->header = cameraparams.header;
|
||
cameraParamsPtr->D = cameraparams.D;
|
||
cameraParamsPtr->K = cameraparams.K;
|
||
cameraParamsPtr->P = cameraparams.P;
|
||
cameraParamsPtr->R = cameraparams.R;
|
||
cameraParamsPtr->distortion_model = cameraparams.distortion_model;
|
||
cameraParamsPtr->binning_x = cameraparams.binning_x;
|
||
cameraParamsPtr->binning_y = cameraparams.binning_y;
|
||
cameraParamsPtr->height = cameraparams.height;
|
||
cameraParamsPtr->roi = cameraparams.roi;
|
||
cameraParamsPtr->width = cameraparams.width;
|
||
|
||
camInfoGlob = sensor_msgs::CameraInfo();
|
||
camInfoPtrGlob = cameraParamsPtr;
|
||
camInfoGlob = cameraparams;
|
||
// camPub.publish(exl_msg_global, cameraparams);
|
||
camPub.publish(exl_msg_global, cameraParamsPtr);
|
||
}
|
||
|
||
//======================================================
|
||
// PUBLISH FEATURE TRACKS IN THE GLOBAL FRAME OF REFERENCE
|
||
if (pub_loop_point.getNumSubscribers() != 0) {
|
||
|
||
// Construct the message
|
||
sensor_msgs::PointCloud point_cloud;
|
||
point_cloud.header = header;
|
||
point_cloud.header.frame_id = "global";
|
||
for (const auto &feattimes : active_tracks_posinG) {
|
||
|
||
// Get this feature information
|
||
size_t featid = feattimes.first;
|
||
Eigen::Vector3d uvd = Eigen::Vector3d::Zero();
|
||
if (active_tracks_uvd.find(featid) != active_tracks_uvd.end()) {
|
||
uvd = active_tracks_uvd.at(featid);
|
||
}
|
||
Eigen::Vector3d pFinG = active_tracks_posinG.at(featid);
|
||
|
||
// Push back 3d point
|
||
geometry_msgs::Point32 p;
|
||
p.x = pFinG(0);
|
||
p.y = pFinG(1);
|
||
p.z = pFinG(2);
|
||
point_cloud.points.push_back(p);
|
||
|
||
// Push back the uv_norm, uv_raw, and feature id
|
||
// NOTE: we don't use the normalized coordinates to save time here
|
||
// NOTE: they will have to be re-normalized in the loop closure code
|
||
sensor_msgs::ChannelFloat32 p_2d;
|
||
p_2d.values.push_back(0);
|
||
p_2d.values.push_back(0);
|
||
p_2d.values.push_back(uvd(0));
|
||
p_2d.values.push_back(uvd(1));
|
||
p_2d.values.push_back(featid);
|
||
point_cloud.channels.push_back(p_2d);
|
||
}
|
||
pub_loop_point.publish(point_cloud);
|
||
}
|
||
|
||
//======================================================
|
||
// Depth images of sparse points and its colorized version
|
||
if (it_pub_loop_img_depth.getNumSubscribers() != 0 || it_pub_loop_img_depth_color.getNumSubscribers() != 0) {
|
||
|
||
// Create the images we will populate with the depths
|
||
std::pair<int, int> wh_pair = {active_cam0_image.cols, active_cam0_image.rows};
|
||
cv::Mat depthmap_viz;
|
||
cv::cvtColor(active_cam0_image, depthmap_viz, cv::COLOR_GRAY2RGB);
|
||
cv::Mat depthmap = cv::Mat::zeros(wh_pair.second, wh_pair.first, CV_16UC1);
|
||
|
||
// Loop through all points and append
|
||
for (const auto &feattimes : active_tracks_uvd) {
|
||
|
||
// Get this feature information
|
||
size_t featid = feattimes.first;
|
||
Eigen::Vector3d uvd = active_tracks_uvd.at(featid);
|
||
|
||
// Skip invalid points
|
||
double dw = 3;
|
||
if (uvd(0) < dw || uvd(0) > wh_pair.first - dw || uvd(1) < dw || uvd(1) > wh_pair.second - dw) {
|
||
continue;
|
||
}
|
||
|
||
// Append the depth
|
||
// NOTE: scaled by 1000 to fit the 16U
|
||
// NOTE: access order is y,x (stupid opencv convention stuff)
|
||
depthmap.at<uint16_t>((int)uvd(1), (int)uvd(0)) = (uint16_t)(1000 * uvd(2));
|
||
|
||
// Taken from LSD-SLAM codebase segment into 0-4 meter segments:
|
||
// https://github.com/tum-vision/lsd_slam/blob/d1e6f0e1a027889985d2e6b4c0fe7a90b0c75067/lsd_slam_core/src/util/globalFuncs.cpp#L87-L96
|
||
float id = 1.0f / (float)uvd(2);
|
||
float r = (0.0f - id) * 255 / 1.0f;
|
||
if (r < 0)
|
||
r = -r;
|
||
float g = (1.0f - id) * 255 / 1.0f;
|
||
if (g < 0)
|
||
g = -g;
|
||
float b = (2.0f - id) * 255 / 1.0f;
|
||
if (b < 0)
|
||
b = -b;
|
||
uchar rc = r < 0 ? 0 : (r > 255 ? 255 : r);
|
||
uchar gc = g < 0 ? 0 : (g > 255 ? 255 : g);
|
||
uchar bc = b < 0 ? 0 : (b > 255 ? 255 : b);
|
||
cv::Scalar color(255 - rc, 255 - gc, 255 - bc);
|
||
|
||
// Small square around the point (note the above bound check needs to take into account this width)
|
||
cv::Point p0(uvd(0) - dw, uvd(1) - dw);
|
||
cv::Point p1(uvd(0) + dw, uvd(1) + dw);
|
||
cv::rectangle(depthmap_viz, p0, p1, color, -1);
|
||
}
|
||
|
||
// Create our messages
|
||
header.frame_id = "cam0";
|
||
sensor_msgs::ImagePtr exl_msg1 = cv_bridge::CvImage(header, sensor_msgs::image_encodings::TYPE_16UC1, depthmap).toImageMsg();
|
||
it_pub_loop_img_depth.publish(exl_msg1);
|
||
header.stamp = ros::Time::now();
|
||
header.frame_id = "cam0";
|
||
sensor_msgs::ImagePtr exl_msg2 = cv_bridge::CvImage(header, "bgr8", depthmap_viz).toImageMsg();
|
||
it_pub_loop_img_depth_color.publish(exl_msg2);
|
||
}
|
||
}
|
||
|
||
void ROS1Visualizer::T_align_callback(const geometry_msgs::TransformStamped &msg){
|
||
// the main thing
|
||
const uint pointsLen = cubeVisPoints.size();
|
||
std::cout << "1" << std::endl;
|
||
Eigen::MatrixXd points(pointsLen, 4);
|
||
Eigen::Matrix4d T_align;
|
||
|
||
Eigen::Vector4d qRot;
|
||
qRot << msg.transform.rotation.x, msg.transform.rotation.y, msg.transform.rotation.z, msg.transform.rotation.w;
|
||
Eigen::Matrix3d mRot = ov_core::quat_2_Rot(qRot);
|
||
|
||
for (int i=0;i<3;i++){
|
||
for (int j=0;j<3;j++){
|
||
T_align(i,j) = mRot(i,j);
|
||
}
|
||
}
|
||
std::cout << "2" << std::endl;
|
||
T_align(0,3) = msg.transform.translation.x;
|
||
T_align(1,3) = msg.transform.translation.y;
|
||
T_align(2,3) = msg.transform.translation.z;
|
||
T_align(3,3) = 1;
|
||
std::cout << "3" << std::endl;
|
||
|
||
for (int i=0;i<cubeVisPoints.size();i++){
|
||
points(i, 0) = cubeVisPoints[0][0];
|
||
points(i, 1) = cubeVisPoints[0][1];
|
||
points(i, 2) = cubeVisPoints[0][2];
|
||
points(i, 3) = 1;
|
||
}
|
||
std::cout << "4" << std::endl;
|
||
|
||
points = (T_align * points.transpose()).transpose();
|
||
for (int i=0;i<cubeVisPoints.size();i++){
|
||
cubeVisPoints[0][0] = points(i, 0);
|
||
cubeVisPoints[0][1] = points(i, 1);
|
||
cubeVisPoints[0][2] = points(i, 2);
|
||
}
|
||
std::cout << "5" << std::endl;
|
||
|
||
}
|
||
|