initial commit

This commit is contained in:
2022-08-05 08:23:25 +03:00
commit 5ecdc6abcf
387 changed files with 3010095 additions and 0 deletions

View File

@@ -0,0 +1,166 @@
/*
* OpenVINS: An Open Platform for Visual-Inertial Research
* Copyright (C) 2018-2022 Patrick Geneva
* Copyright (C) 2018-2022 Guoquan Huang
* Copyright (C) 2018-2022 OpenVINS Contributors
* Copyright (C) 2018-2019 Kevin Eckenhoff
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
#include "AlignTrajectory.h"
using namespace ov_eval;
void AlignTrajectory::align_trajectory(const std::vector<Eigen::Matrix<double, 7, 1>> &traj_es,
const std::vector<Eigen::Matrix<double, 7, 1>> &traj_gt, Eigen::Matrix3d &R, Eigen::Vector3d &t,
double &s, std::string method, int n_aligned) {
// Use the correct method
if (method == "posyaw") {
s = 1;
align_posyaw(traj_es, traj_gt, R, t, n_aligned);
} else if (method == "posyawsingle") {
s = 1;
align_posyaw_single(traj_es, traj_gt, R, t);
} else if (method == "se3") {
s = 1;
align_se3(traj_es, traj_gt, R, t, n_aligned);
} else if (method == "se3single") {
s = 1;
align_se3_single(traj_es, traj_gt, R, t);
} else if (method == "sim3") {
assert(n_aligned >= 2 || n_aligned == -1);
align_sim3(traj_es, traj_gt, R, t, s, n_aligned);
} else if (method == "none") {
s = 1;
R.setIdentity();
t.setZero();
} else {
PRINT_ERROR(RED "[ALIGN]: Invalid alignment method!\n" RESET);
PRINT_ERROR(RED "[ALIGN]: Possible options: posyaw, posyawsingle, se3, se3single, sim3, none\n" RESET);
std::exit(EXIT_FAILURE);
}
}
void AlignTrajectory::align_posyaw_single(const std::vector<Eigen::Matrix<double, 7, 1>> &traj_es,
const std::vector<Eigen::Matrix<double, 7, 1>> &traj_gt, Eigen::Matrix3d &R, Eigen::Vector3d &t) {
// Get first ever poses
Eigen::Vector4d q_es_0 = traj_es.at(0).block(3, 0, 4, 1);
Eigen::Vector3d p_es_0 = traj_es.at(0).block(0, 0, 3, 1);
Eigen::Vector4d q_gt_0 = traj_gt.at(0).block(3, 0, 4, 1);
Eigen::Vector3d p_gt_0 = traj_gt.at(0).block(0, 0, 3, 1);
// Get rotations from IMU frame to World (note JPL!)
Eigen::Matrix3d g_rot = ov_core::quat_2_Rot(q_gt_0).transpose();
Eigen::Matrix3d est_rot = ov_core::quat_2_Rot(q_es_0).transpose();
// Data matrix for the Frobenius problem
Eigen::Matrix3d C_R = est_rot * g_rot.transpose();
// Recover yaw
double theta = AlignUtils::get_best_yaw(C_R);
// Compute rotation
R = ov_core::rot_z(theta);
// Compute translation
t.noalias() = p_gt_0 - R * p_es_0;
}
void AlignTrajectory::align_posyaw(const std::vector<Eigen::Matrix<double, 7, 1>> &traj_es,
const std::vector<Eigen::Matrix<double, 7, 1>> &traj_gt, Eigen::Matrix3d &R, Eigen::Vector3d &t,
int n_aligned) {
// If we only have one, just use the single alignment
if (n_aligned == 1) {
align_posyaw_single(traj_es, traj_gt, R, t);
} else {
// Get just position vectors
assert(!traj_es.empty());
std::vector<Eigen::Vector3d> pos_est, pos_gt;
for (size_t i = 0; i < traj_es.size() && i < traj_gt.size(); i++) {
pos_est.push_back(traj_es.at(i).block(0, 0, 3, 1));
pos_gt.push_back(traj_gt.at(i).block(0, 0, 3, 1));
}
// Align using the method of Umeyama
double s;
AlignUtils::align_umeyama(pos_est, pos_gt, R, t, s, true, true);
assert(s == 1);
}
}
void AlignTrajectory::align_se3_single(const std::vector<Eigen::Matrix<double, 7, 1>> &traj_es,
const std::vector<Eigen::Matrix<double, 7, 1>> &traj_gt, Eigen::Matrix3d &R, Eigen::Vector3d &t) {
// Get first ever poses
Eigen::Vector4d q_es_0 = traj_es.at(0).block(3, 0, 4, 1);
Eigen::Vector3d p_es_0 = traj_es.at(0).block(0, 0, 3, 1);
Eigen::Vector4d q_gt_0 = traj_gt.at(0).block(3, 0, 4, 1);
Eigen::Vector3d p_gt_0 = traj_gt.at(0).block(0, 0, 3, 1);
// Get rotations from IMU frame to World (note JPL!)
Eigen::Matrix3d g_rot = ov_core::quat_2_Rot(q_gt_0).transpose();
Eigen::Matrix3d est_rot = ov_core::quat_2_Rot(q_es_0).transpose();
R.noalias() = g_rot * est_rot.transpose();
t.noalias() = p_gt_0 - R * p_es_0;
}
void AlignTrajectory::align_se3(const std::vector<Eigen::Matrix<double, 7, 1>> &traj_es,
const std::vector<Eigen::Matrix<double, 7, 1>> &traj_gt, Eigen::Matrix3d &R, Eigen::Vector3d &t,
int n_aligned) {
// If we only have one, just use the single alignment
if (n_aligned == 1) {
align_se3_single(traj_es, traj_gt, R, t);
} else {
// Get just position vectors
assert(!traj_es.empty());
std::vector<Eigen::Vector3d> pos_est, pos_gt;
for (size_t i = 0; i < traj_es.size() && i < traj_gt.size(); i++) {
pos_est.push_back(traj_es.at(i).block(0, 0, 3, 1));
pos_gt.push_back(traj_gt.at(i).block(0, 0, 3, 1));
}
// Align using the method of Umeyama
double s;
AlignUtils::align_umeyama(pos_est, pos_gt, R, t, s, true, false);
}
}
void AlignTrajectory::align_sim3(const std::vector<Eigen::Matrix<double, 7, 1>> &traj_es,
const std::vector<Eigen::Matrix<double, 7, 1>> &traj_gt, Eigen::Matrix3d &R, Eigen::Vector3d &t, double &s,
int n_aligned) {
// Need to have more than two to get
assert(n_aligned >= 2 || n_aligned == -1);
// Get just position vectors
assert(!traj_es.empty());
std::vector<Eigen::Vector3d> pos_est, pos_gt;
for (size_t i = 0; i < traj_es.size() && i < traj_gt.size(); i++) {
pos_est.push_back(traj_es.at(i).block(0, 0, 3, 1));
pos_gt.push_back(traj_gt.at(i).block(0, 0, 3, 1));
}
// Align using the method of Umeyama
AlignUtils::align_umeyama(pos_est, pos_gt, R, t, s, false, false);
}

View File

@@ -0,0 +1,121 @@
/*
* OpenVINS: An Open Platform for Visual-Inertial Research
* Copyright (C) 2018-2022 Patrick Geneva
* Copyright (C) 2018-2022 Guoquan Huang
* Copyright (C) 2018-2022 OpenVINS Contributors
* Copyright (C) 2018-2019 Kevin Eckenhoff
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
#ifndef OV_EVAL_ALIGNTRAJECTORY_H
#define OV_EVAL_ALIGNTRAJECTORY_H
#include <Eigen/Eigen>
#include <iostream>
#include <sstream>
#include <string>
#include <vector>
#include "AlignUtils.h"
#include "utils/colors.h"
#include "utils/print.h"
namespace ov_eval {
/**
* @brief Class that given two trajectories, will align the two.
*
* Given two trajectories that have already been time synchronized we will compute the alignment transform between the two.
* We can do this using different alignment methods such as full SE(3) transform, just postiion and yaw, or SIM(3).
* These scripts are based on the [rpg_trajectory_evaluation](https://github.com/uzh-rpg/rpg_trajectory_evaluation) toolkit by Zhang and
* Scaramuzza. Please take a look at their [2018 IROS](http://rpg.ifi.uzh.ch/docs/IROS18_Zhang.pdf) paper on these methods.
*/
class AlignTrajectory {
public:
/**
* @brief Align estimate to GT using a desired method using a set of initial poses
* @param traj_es Estimated trajectory values in estimate frame [pos,quat]
* @param traj_gt Groundtruth trjaectory in groundtruth frame [pos,quat]
* @param R Rotation from estimate to GT frame that will be computed
* @param t translation from estimate to GT frame that will be computed
* @param s scale from estimate to GT frame that will be computed
* @param method Method used for alignment
* @param n_aligned Number of poses to use for alignment (-1 will use all)
*/
static void align_trajectory(const std::vector<Eigen::Matrix<double, 7, 1>> &traj_es,
const std::vector<Eigen::Matrix<double, 7, 1>> &traj_gt, Eigen::Matrix3d &R, Eigen::Vector3d &t, double &s,
std::string method, int n_aligned = -1);
protected:
/**
* @brief Align estimate to GT using only position and yaw (for gravity aligned trajectories) using only the first poses
* @param traj_es Estimated trajectory values in estimate frame [pos,quat]
* @param traj_gt Groundtruth trjaectory in groundtruth frame [pos,quat]
* @param R Rotation from estimate to GT frame that will be computed
* @param t translation from estimate to GT frame that will be computed
*/
static void align_posyaw_single(const std::vector<Eigen::Matrix<double, 7, 1>> &traj_es,
const std::vector<Eigen::Matrix<double, 7, 1>> &traj_gt, Eigen::Matrix3d &R, Eigen::Vector3d &t);
/**
* @brief Align estimate to GT using only position and yaw (for gravity aligned trajectories) using a set of initial poses
* @param traj_es Estimated trajectory values in estimate frame [pos,quat]
* @param traj_gt Groundtruth trjaectory in groundtruth frame [pos,quat]
* @param R Rotation from estimate to GT frame that will be computed
* @param t translation from estimate to GT frame that will be computed
* @param n_aligned Number of poses to use for alignment (-1 will use all)
*/
static void align_posyaw(const std::vector<Eigen::Matrix<double, 7, 1>> &traj_es, const std::vector<Eigen::Matrix<double, 7, 1>> &traj_gt,
Eigen::Matrix3d &R, Eigen::Vector3d &t, int n_aligned = -1);
/**
* @brief Align estimate to GT using a full SE(3) transform using only the first poses
* @param traj_es Estimated trajectory values in estimate frame [pos,quat]
* @param traj_gt Groundtruth trjaectory in groundtruth frame [pos,quat]
* @param R Rotation from estimate to GT frame that will be computed
* @param t translation from estimate to GT frame that will be computed
*/
static void align_se3_single(const std::vector<Eigen::Matrix<double, 7, 1>> &traj_es,
const std::vector<Eigen::Matrix<double, 7, 1>> &traj_gt, Eigen::Matrix3d &R, Eigen::Vector3d &t);
/**
* @brief Align estimate to GT using a full SE(3) transform using a set of initial poses
* @param traj_es Estimated trajectory values in estimate frame [pos,quat]
* @param traj_gt Groundtruth trjaectory in groundtruth frame [pos,quat]
* @param R Rotation from estimate to GT frame that will be computed
* @param t translation from estimate to GT frame that will be computed
* @param n_aligned Number of poses to use for alignment (-1 will use all)
*/
static void align_se3(const std::vector<Eigen::Matrix<double, 7, 1>> &traj_es, const std::vector<Eigen::Matrix<double, 7, 1>> &traj_gt,
Eigen::Matrix3d &R, Eigen::Vector3d &t, int n_aligned = -1);
/**
* @brief Align estimate to GT using a full SIM(3) transform using a set of initial poses
* @param traj_es Estimated trajectory values in estimate frame [pos,quat]
* @param traj_gt Groundtruth trjaectory in groundtruth frame [pos,quat]
* @param R Rotation from estimate to GT frame that will be computed
* @param t translation from estimate to GT frame that will be computed
* @param s scale from estimate to GT frame that will be computed
* @param n_aligned Number of poses to use for alignment (-1 will use all)
*/
static void align_sim3(const std::vector<Eigen::Matrix<double, 7, 1>> &traj_es, const std::vector<Eigen::Matrix<double, 7, 1>> &traj_gt,
Eigen::Matrix3d &R, Eigen::Vector3d &t, double &s, int n_aligned = -1);
};
} // namespace ov_eval
#endif /* OV_EVAL_ALIGNTRAJECTORY_H */

View File

@@ -0,0 +1,189 @@
/*
* OpenVINS: An Open Platform for Visual-Inertial Research
* Copyright (C) 2018-2022 Patrick Geneva
* Copyright (C) 2018-2022 Guoquan Huang
* Copyright (C) 2018-2022 OpenVINS Contributors
* Copyright (C) 2018-2019 Kevin Eckenhoff
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
#include "AlignUtils.h"
using namespace ov_eval;
void AlignUtils::align_umeyama(const std::vector<Eigen::Matrix<double, 3, 1>> &data, const std::vector<Eigen::Matrix<double, 3, 1>> &model,
Eigen::Matrix<double, 3, 3> &R, Eigen::Matrix<double, 3, 1> &t, double &s, bool known_scale, bool yaw_only) {
assert(model.size() == data.size());
// Substract mean of each trajectory
Eigen::Matrix<double, 3, 1> mu_M = get_mean(model);
Eigen::Matrix<double, 3, 1> mu_D = get_mean(data);
std::vector<Eigen::Matrix<double, 3, 1>> model_zerocentered, data_zerocentered;
for (size_t i = 0; i < model.size(); i++) {
model_zerocentered.push_back(model[i] - mu_M);
data_zerocentered.push_back(data[i] - mu_D);
}
// Get correlation matrix
double n = model.size();
Eigen::Matrix<double, 3, 3> C = Eigen::Matrix<double, 3, 3>::Zero();
for (size_t i = 0; i < model_zerocentered.size(); i++) {
C.noalias() += model_zerocentered[i] * data_zerocentered[i].transpose();
}
C *= 1.0 / n;
// Get data sigma
double sigma2 = 0;
for (size_t i = 0; i < data_zerocentered.size(); i++) {
sigma2 += data_zerocentered[i].dot(data_zerocentered[i]);
}
sigma2 *= 1.0 / n;
// SVD decomposition
Eigen::JacobiSVD<Eigen::Matrix<double, 3, 3>> svd(C, Eigen::ComputeFullV | Eigen::ComputeFullU);
Eigen::Matrix<double, 3, 3> U_svd = svd.matrixU();
Eigen::Matrix<double, 3, 1> D_svd = svd.singularValues();
Eigen::Matrix<double, 3, 3> V_svd = svd.matrixV();
Eigen::Matrix<double, 3, 3> S = Eigen::Matrix<double, 3, 3>::Identity();
if (U_svd.determinant() * V_svd.determinant() < 0) {
S(2, 2) = -1;
}
// If only yaw, use that specific solver (optimizes over yaw angle)
// Else get best full 3 dof rotation
if (yaw_only) {
Eigen::Matrix<double, 3, 3> rot_C = n * C.transpose();
double theta = AlignUtils::get_best_yaw(rot_C);
R = ov_core::rot_z(theta);
} else {
R.noalias() = U_svd * S * V_svd.transpose();
}
// If known scale, fix it
if (known_scale) {
s = 1;
} else {
// Get best scale
s = 1.0 / sigma2 * (D_svd.asDiagonal() * S).trace();
}
// Get best translation
t.noalias() = mu_M - s * R * mu_D;
// Debug printing
// std::stringstream ss;
// ss << "R- " << std::endl << R << std::endl;
// ss << "t- " << std::endl << t << std::endl;
// PRINT_DEBUG(ss.str().c_str());
}
void AlignUtils::perform_association(double offset, double max_difference, std::vector<double> &est_times, std::vector<double> &gt_times,
std::vector<Eigen::Matrix<double, 7, 1>> &est_poses,
std::vector<Eigen::Matrix<double, 7, 1>> &gt_poses) {
std::vector<Eigen::Matrix3d> est_covori, est_covpos, gt_covori, gt_covpos;
AlignUtils::perform_association(offset, max_difference, est_times, gt_times, est_poses, gt_poses, est_covori, est_covpos, gt_covori,
gt_covpos);
}
void AlignUtils::perform_association(double offset, double max_difference, std::vector<double> &est_times, std::vector<double> &gt_times,
std::vector<Eigen::Matrix<double, 7, 1>> &est_poses,
std::vector<Eigen::Matrix<double, 7, 1>> &gt_poses, std::vector<Eigen::Matrix3d> &est_covori,
std::vector<Eigen::Matrix3d> &est_covpos, std::vector<Eigen::Matrix3d> &gt_covori,
std::vector<Eigen::Matrix3d> &gt_covpos) {
// Temp results which keeps only the matches
std::vector<double> est_times_temp, gt_times_temp;
std::vector<Eigen::Matrix<double, 7, 1>> est_poses_temp, gt_poses_temp;
std::vector<Eigen::Matrix3d> est_covori_temp, est_covpos_temp, gt_covori_temp, gt_covpos_temp;
// Iterator over gt (only ever increases to enforce injectivity of matches)
size_t gt_pointer = 0;
// Try to find closest GT pose for each estimate
for (size_t i = 0; i < est_times.size(); i++) {
// Default params
double best_diff = max_difference;
int best_gt_idx = -1;
// Increment while too small and is not within our difference threshold
while (gt_pointer < gt_times.size() && gt_times.at(gt_pointer) < (est_times.at(i) + offset) &&
std::abs(gt_times.at(gt_pointer) - (est_times.at(i) + offset)) > max_difference) {
gt_pointer++;
}
// If we are closer than max difference, see if we can do any better
while (gt_pointer < gt_times.size() && std::abs(gt_times.at(gt_pointer) - (est_times.at(i) + offset)) <= max_difference) {
// Break if we found a good match but are getting worse, we are done
if (std::abs(gt_times.at(gt_pointer) - (est_times.at(i) + offset)) >= best_diff) {
break;
}
// We have a closer match, save it and move on
best_diff = std::abs(gt_times.at(gt_pointer) - (est_times.at(i) + offset));
best_gt_idx = gt_pointer;
gt_pointer++;
}
// Did we get a valid match
if (best_gt_idx != -1) {
// Save estimate and gt states for the match
est_times_temp.push_back(gt_times.at(best_gt_idx));
est_poses_temp.push_back(est_poses.at(i));
gt_times_temp.push_back(gt_times.at(best_gt_idx));
gt_poses_temp.push_back(gt_poses.at(best_gt_idx));
// If we have covariance then also append it
// If the groundtruth doesn't have covariance say it is 100% certain
if (!est_covori.empty()) {
assert(est_covori.size() == est_covpos.size());
est_covori_temp.push_back(est_covori.at(i));
est_covpos_temp.push_back(est_covpos.at(i));
if (gt_covori.empty()) {
gt_covori_temp.push_back(Eigen::Matrix3d::Zero());
gt_covpos_temp.push_back(Eigen::Matrix3d::Zero());
} else {
assert(gt_covori.size() == gt_covpos.size());
gt_covori_temp.push_back(gt_covori.at(best_gt_idx));
gt_covpos_temp.push_back(gt_covpos.at(best_gt_idx));
}
}
}
}
// Ensure that we have enough associations
if (est_times.size() < 3) {
PRINT_ERROR(RED "[ALIGN]: Was unable to associate groundtruth and estimate trajectories\n" RESET);
PRINT_ERROR(RED "[ALIGN]: %d total matches....\n" RESET, (int)est_times.size());
PRINT_ERROR(RED "[ALIGN]: Do the time of the files match??\n" RESET);
return;
}
assert(est_times_temp.size() == gt_times_temp.size());
// PRINT_DEBUG("[TRAJ]: %d est poses and %d gt poses => %d
// matches\n",(int)est_times.size(),(int)gt_times.size(),(int)est_times_temp.size());
// Overwrite with intersected values
est_times = est_times_temp;
est_poses = est_poses_temp;
est_covori = est_covori_temp;
est_covpos = est_covpos_temp;
gt_times = gt_times_temp;
gt_poses = gt_poses_temp;
gt_covori = gt_covori_temp;
gt_covpos = gt_covpos_temp;
}

View File

@@ -0,0 +1,110 @@
/*
* OpenVINS: An Open Platform for Visual-Inertial Research
* Copyright (C) 2018-2022 Patrick Geneva
* Copyright (C) 2018-2022 Guoquan Huang
* Copyright (C) 2018-2022 OpenVINS Contributors
* Copyright (C) 2018-2019 Kevin Eckenhoff
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
#ifndef OV_EVAL_ALIGNUTILS_H
#define OV_EVAL_ALIGNUTILS_H
#include <Eigen/Eigen>
#include <iostream>
#include <sstream>
#include <string>
#include <vector>
#include "utils/colors.h"
#include "utils/print.h"
#include "utils/quat_ops.h"
namespace ov_eval {
/**
* @brief Helper functions for the trajectory alignment class.
*
* The key function is an implementation of Umeyama's
* [Least-squares estimation of transformation parameters between two point patterns](https://ieeexplore.ieee.org/document/88573).
* This is what allows us to find the transform between the two
* trajectories without worrying about singularities for the absolute trajectory error.
*/
class AlignUtils {
public:
/**
* @brief Gets best yaw from Frobenius problem.
* Equation (17)-(18) in [Zhang et al. 2018 IROS](http://rpg.ifi.uzh.ch/docs/IROS18_Zhang.pdf) paper.
* @param C Data matrix
*/
static inline double get_best_yaw(const Eigen::Matrix<double, 3, 3> &C) {
double A = C(0, 1) - C(1, 0);
double B = C(0, 0) + C(1, 1);
// return M_PI_2 - atan2(B, A);
return atan2(A, B);
}
/**
* @brief Gets mean of the vector of data
* @param data Vector of data
* @return Mean value
*/
static inline Eigen::Matrix<double, 3, 1> get_mean(const std::vector<Eigen::Matrix<double, 3, 1>> &data) {
Eigen::Matrix<double, 3, 1> mean = Eigen::Matrix<double, 3, 1>::Zero();
for (size_t i = 0; i < data.size(); i++) {
mean.noalias() += data[i];
}
mean /= data.size();
return mean;
}
/**
* @brief Given a set of points in a model frame and a set of points in a data frame,
* finds best transform between frames (subject to constraints).
*
* @param data Vector of points in data frame (i.e., estimates)
* @param model Vector of points in model frame (i.e., gt)
* @param R Output rotation from data frame to model frame
* @param t Output translation from data frame to model frame
* @param s Output scale from data frame to model frame
* @param known_scale Whether to fix scale
* @param yaw_only Whether to only use yaw orientation (such as when frames are already gravity aligned)
*/
static void align_umeyama(const std::vector<Eigen::Matrix<double, 3, 1>> &data, const std::vector<Eigen::Matrix<double, 3, 1>> &model,
Eigen::Matrix<double, 3, 3> &R, Eigen::Matrix<double, 3, 1> &t, double &s, bool known_scale, bool yaw_only);
/**
* @brief Will intersect our loaded data so that we have common timestamps.
* @param offset Time offset to append to our estimate
* @param max_difference Biggest allowed difference between matched timesteps
*/
static void perform_association(double offset, double max_difference, std::vector<double> &est_times, std::vector<double> &gt_times,
std::vector<Eigen::Matrix<double, 7, 1>> &est_poses, std::vector<Eigen::Matrix<double, 7, 1>> &gt_poses);
/**
* @brief Will intersect our loaded data so that we have common timestamps.
* @param offset Time offset to append to our estimate
* @param max_difference Biggest allowed difference between matched timesteps
*/
static void perform_association(double offset, double max_difference, std::vector<double> &est_times, std::vector<double> &gt_times,
std::vector<Eigen::Matrix<double, 7, 1>> &est_poses, std::vector<Eigen::Matrix<double, 7, 1>> &gt_poses,
std::vector<Eigen::Matrix3d> &est_covori, std::vector<Eigen::Matrix3d> &est_covpos,
std::vector<Eigen::Matrix3d> &gt_covori, std::vector<Eigen::Matrix3d> &gt_covpos);
};
} // namespace ov_eval
#endif // OV_EVAL_ALIGNUTILS_H