initial commit

This commit is contained in:
2022-08-05 08:23:25 +03:00
commit 5ecdc6abcf
387 changed files with 3010095 additions and 0 deletions

198
ov_core/src/cam/CamBase.h Normal file
View File

@@ -0,0 +1,198 @@
/*
* OpenVINS: An Open Platform for Visual-Inertial Research
* Copyright (C) 2018-2022 Patrick Geneva
* Copyright (C) 2018-2022 Guoquan Huang
* Copyright (C) 2018-2022 OpenVINS Contributors
* Copyright (C) 2018-2019 Kevin Eckenhoff
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
#ifndef OV_CORE_CAM_BASE_H
#define OV_CORE_CAM_BASE_H
#include <Eigen/Eigen>
#include <unordered_map>
#include <opencv2/opencv.hpp>
namespace ov_core {
/**
* @brief Base pinhole camera model class
*
* This is the base class for all our camera models.
* All these models are pinhole cameras, thus just have standard reprojection logic.
*
* See each base class for detailed examples on each model:
* - @ref ov_core::CamEqui
* - @ref ov_core::CamRadtan
*/
class CamBase {
public:
/**
* @brief Default constructor
* @param width Width of the camera (raw pixels)
* @param height Height of the camera (raw pixels)
*/
CamBase(int width, int height) : _width(width), _height(height) {}
/**
* @brief This will set and update the camera calibration values.
* This should be called on startup for each camera and after update!
* @param calib Camera calibration information (f_x & f_y & c_x & c_y & k_1 & k_2 & k_3 & k_4)
*/
virtual void set_value(const Eigen::MatrixXd &calib) {
// Assert we are of size eight
assert(calib.rows() == 8);
camera_values = calib;
// Camera matrix
cv::Matx33d tempK;
tempK(0, 0) = calib(0);
tempK(0, 1) = 0;
tempK(0, 2) = calib(2);
tempK(1, 0) = 0;
tempK(1, 1) = calib(1);
tempK(1, 2) = calib(3);
tempK(2, 0) = 0;
tempK(2, 1) = 0;
tempK(2, 2) = 1;
camera_k_OPENCV = tempK;
// Distortion parameters
cv::Vec4d tempD;
tempD(0) = calib(4);
tempD(1) = calib(5);
tempD(2) = calib(6);
tempD(3) = calib(7);
camera_d_OPENCV = tempD;
}
/**
* @brief Given a raw uv point, this will undistort it based on the camera matrices into normalized camera coords.
* @param uv_dist Raw uv coordinate we wish to undistort
* @return 2d vector of normalized coordinates
*/
virtual Eigen::Vector2f undistort_f(const Eigen::Vector2f &uv_dist) = 0;
/**
* @brief Given a raw uv point, this will undistort it based on the camera matrices into normalized camera coords.
* @param uv_dist Raw uv coordinate we wish to undistort
* @return 2d vector of normalized coordinates
*/
Eigen::Vector2d undistort_d(const Eigen::Vector2d &uv_dist) {
Eigen::Vector2f ept1, ept2;
ept1 = uv_dist.cast<float>();
ept2 = undistort_f(ept1);
return ept2.cast<double>();
}
/**
* @brief Given a raw uv point, this will undistort it based on the camera matrices into normalized camera coords.
* @param uv_dist Raw uv coordinate we wish to undistort
* @return 2d vector of normalized coordinates
*/
cv::Point2f undistort_cv(const cv::Point2f &uv_dist) {
Eigen::Vector2f ept1, ept2;
ept1 << uv_dist.x, uv_dist.y;
ept2 = undistort_f(ept1);
cv::Point2f pt_out;
pt_out.x = ept2(0);
pt_out.y = ept2(1);
return pt_out;
}
/**
* @brief Given a normalized uv coordinate this will distort it to the raw image plane
* @param uv_norm Normalized coordinates we wish to distort
* @return 2d vector of raw uv coordinate
*/
virtual Eigen::Vector2f distort_f(const Eigen::Vector2f &uv_norm) = 0;
/**
* @brief Given a normalized uv coordinate this will distort it to the raw image plane
* @param uv_norm Normalized coordinates we wish to distort
* @return 2d vector of raw uv coordinate
*/
Eigen::Vector2d distort_d(const Eigen::Vector2d &uv_norm) {
Eigen::Vector2f ept1, ept2;
ept1 = uv_norm.cast<float>();
ept2 = distort_f(ept1);
return ept2.cast<double>();
}
/**
* @brief Given a normalized uv coordinate this will distort it to the raw image plane
* @param uv_norm Normalized coordinates we wish to distort
* @return 2d vector of raw uv coordinate
*/
cv::Point2f distort_cv(const cv::Point2f &uv_norm) {
Eigen::Vector2f ept1, ept2;
ept1 << uv_norm.x, uv_norm.y;
ept2 = distort_f(ept1);
cv::Point2f pt_out;
pt_out.x = ept2(0);
pt_out.y = ept2(1);
return pt_out;
}
/**
* @brief Computes the derivative of raw distorted to normalized coordinate.
* @param uv_norm Normalized coordinates we wish to distort
* @param H_dz_dzn Derivative of measurement z in respect to normalized
* @param H_dz_dzeta Derivative of measurement z in respect to intrinic parameters
*/
virtual void compute_distort_jacobian(const Eigen::Vector2d &uv_norm, Eigen::MatrixXd &H_dz_dzn, Eigen::MatrixXd &H_dz_dzeta) = 0;
/// Gets the complete intrinsic vector
Eigen::MatrixXd get_value() { return camera_values; }
/// Gets the camera matrix
cv::Matx33d get_K() { return camera_k_OPENCV; }
/// Gets the camera distortion
cv::Vec4d get_D() { return camera_d_OPENCV; }
/// Gets the width of the camera images
int w() { return _width; }
/// Gets the height of the camera images
int h() { return _height; }
protected:
// Cannot construct the base camera class, needs a distortion model
CamBase() = default;
/// Raw set of camera intrinic values (f_x & f_y & c_x & c_y & k_1 & k_2 & k_3 & k_4)
Eigen::MatrixXd camera_values;
/// Camera intrinsics in OpenCV format
cv::Matx33d camera_k_OPENCV;
/// Camera distortion in OpenCV format
cv::Vec4d camera_d_OPENCV;
/// Width of the camera (raw pixels)
int _width;
/// Height of the camera (raw pixels)
int _height;
};
} // namespace ov_core
#endif /* OV_CORE_CAM_BASE_H */

233
ov_core/src/cam/CamEqui.h Normal file
View File

@@ -0,0 +1,233 @@
/*
* OpenVINS: An Open Platform for Visual-Inertial Research
* Copyright (C) 2018-2022 Patrick Geneva
* Copyright (C) 2018-2022 Guoquan Huang
* Copyright (C) 2018-2022 OpenVINS Contributors
* Copyright (C) 2018-2019 Kevin Eckenhoff
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
#ifndef OV_CORE_CAM_EQUI_H
#define OV_CORE_CAM_EQUI_H
#include "CamBase.h"
namespace ov_core {
/**
* @brief Fisheye / equadistant model pinhole camera model class
*
* As fisheye or wide-angle lenses are widely used in practice, we here provide mathematical derivations
* of such distortion model as in [OpenCV fisheye](https://docs.opencv.org/3.4/db/d58/group__calib3d__fisheye.html#details).
*
* \f{align*}{
* \begin{bmatrix} u \\ v \end{bmatrix}:= \mathbf{z}_k &= \mathbf h_d(\mathbf{z}_{n,k}, ~\boldsymbol\zeta)
* = \begin{bmatrix} f_x * x + c_x \\
* f_y * y + c_y \end{bmatrix}\\[1em]
* \empty
* {\rm where}~~
* x &= \frac{x_n}{r} * \theta_d \\
* y &= \frac{y_n}{r} * \theta_d \\
* \theta_d &= \theta (1 + k_1 \theta^2 + k_2 \theta^4 + k_3 \theta^6 + k_4 \theta^8) \\
* \quad r^2 &= x_n^2 + y_n^2 \\
* \theta &= atan(r)
* \f}
*
* where \f$ \mathbf{z}_{n,k} = [ x_n ~ y_n ]^\top\f$ are the normalized coordinates of the 3D feature
* and u and v are the distorted image coordinates on the image plane.
* Clearly, the following distortion intrinsic parameters are used in the above model:
*
* \f{align*}{
* \boldsymbol\zeta = \begin{bmatrix} f_x & f_y & c_x & c_y & k_1 & k_2 & k_3 & k_4 \end{bmatrix}^\top
* \f}
*
* In analogy to the previous radial distortion (see @ref ov_core::CamRadtan) case, the following Jacobian for these
* parameters is needed for intrinsic calibration:
* \f{align*}{
* \frac{\partial \mathbf h_d (\cdot)}{\partial \boldsymbol\zeta} =
* \begin{bmatrix}
* x_n & 0 & 1 & 0 & f_x*(\frac{x_n}{r}\theta^3) & f_x*(\frac{x_n}{r}\theta^5) & f_x*(\frac{x_n}{r}\theta^7) & f_x*(\frac{x_n}{r}\theta^9)
* \\[5pt] 0 & y_n & 0 & 1 & f_y*(\frac{y_n}{r}\theta^3) & f_y*(\frac{y_n}{r}\theta^5) & f_y*(\frac{y_n}{r}\theta^7) &
* f_y*(\frac{y_n}{r}\theta^9) \end{bmatrix} \f}
*
* Similarly, with the chain rule of differentiation,
* we can compute the following Jacobian with respect to the normalized coordinates:
*
* \f{align*}{
* \frac{\partial \mathbf h_d(\cdot)}{\partial \mathbf{z}_{n,k}}
* &=
* \frac{\partial uv}{\partial xy}\frac{\partial xy}{\partial x_ny_n}+
* \frac{\partial uv}{\partial xy}\frac{\partial xy}{\partial r}\frac{\partial r}{\partial x_ny_n}+
* \frac{\partial uv}{\partial xy}\frac{\partial xy}{\partial \theta_d}\frac{\partial \theta_d}{\partial \theta}\frac{\partial
* \theta}{\partial r}\frac{\partial r}{\partial x_ny_n} \\[1em] \empty
* {\rm where}~~~~
* \frac{\partial uv}{\partial xy} &= \begin{bmatrix} f_x & 0 \\ 0 & f_y \end{bmatrix} \\
* \empty
* \frac{\partial xy}{\partial x_ny_n} &= \begin{bmatrix} \theta_d/r & 0 \\ 0 & \theta_d/r \end{bmatrix} \\
* \empty
* \frac{\partial xy}{\partial r} &= \begin{bmatrix} -\frac{x_n}{r^2}\theta_d \\ -\frac{y_n}{r^2}\theta_d \end{bmatrix} \\
* \empty
* \frac{\partial r}{\partial x_ny_n} &= \begin{bmatrix} \frac{x_n}{r} & \frac{y_n}{r} \end{bmatrix} \\
* \empty
* \frac{\partial xy}{\partial \theta_d} &= \begin{bmatrix} \frac{x_n}{r} \\ \frac{y_n}{r} \end{bmatrix} \\
* \empty
* \frac{\partial \theta_d}{\partial \theta} &= \begin{bmatrix} 1 + 3k_1 \theta^2 + 5k_2 \theta^4 + 7k_3 \theta^6 + 9k_4
* \theta^8\end{bmatrix} \\ \empty \frac{\partial \theta}{\partial r} &= \begin{bmatrix} \frac{1}{r^2+1} \end{bmatrix} \f}
*
* To equate this to one of Kalibr's models, this is what you would use for `pinhole-equi`.
*/
class CamEqui : public CamBase {
public:
/**
* @brief Default constructor
* @param width Width of the camera (raw pixels)
* @param height Height of the camera (raw pixels)
*/
CamEqui(int width, int height) : CamBase(width, height) {}
/**
* @brief Given a raw uv point, this will undistort it based on the camera matrices into normalized camera coords.
* @param uv_dist Raw uv coordinate we wish to undistort
* @return 2d vector of normalized coordinates
*/
Eigen::Vector2f undistort_f(const Eigen::Vector2f &uv_dist) override {
// Determine what camera parameters we should use
cv::Matx33d camK = camera_k_OPENCV;
cv::Vec4d camD = camera_d_OPENCV;
// Convert point to opencv format
cv::Mat mat(1, 2, CV_32F);
mat.at<float>(0, 0) = uv_dist(0);
mat.at<float>(0, 1) = uv_dist(1);
mat = mat.reshape(2); // Nx1, 2-channel
// Undistort it!
cv::fisheye::undistortPoints(mat, mat, camK, camD);
// Construct our return vector
Eigen::Vector2f pt_out;
mat = mat.reshape(1); // Nx2, 1-channel
pt_out(0) = mat.at<float>(0, 0);
pt_out(1) = mat.at<float>(0, 1);
return pt_out;
}
/**
* @brief Given a normalized uv coordinate this will distort it to the raw image plane
* @param uv_norm Normalized coordinates we wish to distort
* @return 2d vector of raw uv coordinate
*/
Eigen::Vector2f distort_f(const Eigen::Vector2f &uv_norm) override {
// Get our camera parameters
Eigen::MatrixXd cam_d = camera_values;
// Calculate distorted coordinates for fisheye
double r = std::sqrt(uv_norm(0) * uv_norm(0) + uv_norm(1) * uv_norm(1));
double theta = std::atan(r);
double theta_d = theta + cam_d(4) * std::pow(theta, 3) + cam_d(5) * std::pow(theta, 5) + cam_d(6) * std::pow(theta, 7) +
cam_d(7) * std::pow(theta, 9);
// Handle when r is small (meaning our xy is near the camera center)
double inv_r = (r > 1e-8) ? 1.0 / r : 1.0;
double cdist = (r > 1e-8) ? theta_d * inv_r : 1.0;
// Calculate distorted coordinates for fisheye
Eigen::Vector2f uv_dist;
double x1 = uv_norm(0) * cdist;
double y1 = uv_norm(1) * cdist;
uv_dist(0) = (float)(cam_d(0) * x1 + cam_d(2));
uv_dist(1) = (float)(cam_d(1) * y1 + cam_d(3));
return uv_dist;
}
/**
* @brief Computes the derivative of raw distorted to normalized coordinate.
* @param uv_norm Normalized coordinates we wish to distort
* @param H_dz_dzn Derivative of measurement z in respect to normalized
* @param H_dz_dzeta Derivative of measurement z in respect to intrinic parameters
*/
void compute_distort_jacobian(const Eigen::Vector2d &uv_norm, Eigen::MatrixXd &H_dz_dzn, Eigen::MatrixXd &H_dz_dzeta) override {
// Get our camera parameters
Eigen::MatrixXd cam_d = camera_values;
// Calculate distorted coordinates for fisheye
double r = std::sqrt(uv_norm(0) * uv_norm(0) + uv_norm(1) * uv_norm(1));
double theta = std::atan(r);
double theta_d = theta + cam_d(4) * std::pow(theta, 3) + cam_d(5) * std::pow(theta, 5) + cam_d(6) * std::pow(theta, 7) +
cam_d(7) * std::pow(theta, 9);
// Handle when r is small (meaning our xy is near the camera center)
double inv_r = (r > 1e-8) ? 1.0 / r : 1.0;
double cdist = (r > 1e-8) ? theta_d * inv_r : 1.0;
// Jacobian of distorted pixel to "normalized" pixel
Eigen::Matrix<double, 2, 2> duv_dxy = Eigen::Matrix<double, 2, 2>::Zero();
duv_dxy << cam_d(0), 0, 0, cam_d(1);
// Jacobian of "normalized" pixel to normalized pixel
Eigen::Matrix<double, 2, 2> dxy_dxyn = Eigen::Matrix<double, 2, 2>::Zero();
dxy_dxyn << theta_d * inv_r, 0, 0, theta_d * inv_r;
// Jacobian of "normalized" pixel to r
Eigen::Matrix<double, 2, 1> dxy_dr = Eigen::Matrix<double, 2, 1>::Zero();
dxy_dr << -uv_norm(0) * theta_d * inv_r * inv_r, -uv_norm(1) * theta_d * inv_r * inv_r;
// Jacobian of r pixel to normalized xy
Eigen::Matrix<double, 1, 2> dr_dxyn = Eigen::Matrix<double, 1, 2>::Zero();
dr_dxyn << uv_norm(0) * inv_r, uv_norm(1) * inv_r;
// Jacobian of "normalized" pixel to theta_d
Eigen::Matrix<double, 2, 1> dxy_dthd = Eigen::Matrix<double, 2, 1>::Zero();
dxy_dthd << uv_norm(0) * inv_r, uv_norm(1) * inv_r;
// Jacobian of theta_d to theta
double dthd_dth = 1 + 3 * cam_d(4) * std::pow(theta, 2) + 5 * cam_d(5) * std::pow(theta, 4) + 7 * cam_d(6) * std::pow(theta, 6) +
9 * cam_d(7) * std::pow(theta, 8);
// Jacobian of theta to r
double dth_dr = 1 / (r * r + 1);
// Total Jacobian wrt normalized pixel coordinates
H_dz_dzn = Eigen::MatrixXd::Zero(2, 2);
H_dz_dzn = duv_dxy * (dxy_dxyn + (dxy_dr + dxy_dthd * dthd_dth * dth_dr) * dr_dxyn);
// Calculate distorted coordinates for fisheye
double x1 = uv_norm(0) * cdist;
double y1 = uv_norm(1) * cdist;
// Compute the Jacobian in respect to the intrinsics
H_dz_dzeta = Eigen::MatrixXd::Zero(2, 8);
H_dz_dzeta(0, 0) = x1;
H_dz_dzeta(0, 2) = 1;
H_dz_dzeta(0, 4) = cam_d(0) * uv_norm(0) * inv_r * std::pow(theta, 3);
H_dz_dzeta(0, 5) = cam_d(0) * uv_norm(0) * inv_r * std::pow(theta, 5);
H_dz_dzeta(0, 6) = cam_d(0) * uv_norm(0) * inv_r * std::pow(theta, 7);
H_dz_dzeta(0, 7) = cam_d(0) * uv_norm(0) * inv_r * std::pow(theta, 9);
H_dz_dzeta(1, 1) = y1;
H_dz_dzeta(1, 3) = 1;
H_dz_dzeta(1, 4) = cam_d(1) * uv_norm(1) * inv_r * std::pow(theta, 3);
H_dz_dzeta(1, 5) = cam_d(1) * uv_norm(1) * inv_r * std::pow(theta, 5);
H_dz_dzeta(1, 6) = cam_d(1) * uv_norm(1) * inv_r * std::pow(theta, 7);
H_dz_dzeta(1, 7) = cam_d(1) * uv_norm(1) * inv_r * std::pow(theta, 9);
}
};
} // namespace ov_core
#endif /* OV_CORE_CAM_EQUI_H */

201
ov_core/src/cam/CamRadtan.h Normal file
View File

@@ -0,0 +1,201 @@
/*
* OpenVINS: An Open Platform for Visual-Inertial Research
* Copyright (C) 2018-2022 Patrick Geneva
* Copyright (C) 2018-2022 Guoquan Huang
* Copyright (C) 2018-2022 OpenVINS Contributors
* Copyright (C) 2018-2019 Kevin Eckenhoff
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
#ifndef OV_CORE_CAM_RADTAN_H
#define OV_CORE_CAM_RADTAN_H
#include "CamBase.h"
namespace ov_core {
/**
* @brief Radial-tangential / BrownConrady model pinhole camera model class
*
* To calibrate camera intrinsics, we need to know how to map our normalized coordinates
* into the raw pixel coordinates on the image plane. We first employ the radial distortion
* as in [OpenCV model](https://docs.opencv.org/3.4/da/d54/group__imgproc__transform.html#details):
*
* \f{align*}{
* \begin{bmatrix} u \\ v \end{bmatrix}:= \mathbf{z}_k &= \mathbf h_d(\mathbf{z}_{n,k}, ~\boldsymbol\zeta)
* = \begin{bmatrix} f_x * x + c_x \\
* f_y * y + c_y \end{bmatrix}\\[1em]
* \empty
* {\rm where}~~
* x &= x_n (1 + k_1 r^2 + k_2 r^4) + 2 p_1 x_n y_n + p_2(r^2 + 2 x_n^2) \\\
* y &= y_n (1 + k_1 r^2 + k_2 r^4) + p_1 (r^2 + 2 y_n^2) + 2 p_2 x_n y_n \\[1em]
* r^2 &= x_n^2 + y_n^2
* \f}
*
* where \f$ \mathbf{z}_{n,k} = [ x_n ~ y_n ]^\top\f$ are the normalized coordinates of the 3D feature and u and v are the distorted image
* coordinates on the image plane. The following distortion and camera intrinsic (focal length and image center) parameters are involved in
* the above distortion model, which can be estimated online:
*
* \f{align*}{
* \boldsymbol\zeta = \begin{bmatrix} f_x & f_y & c_x & c_y & k_1 & k_2 & p_1 & p_2 \end{bmatrix}^\top
* \f}
*
* Note that we do not estimate the higher order (i.e., higher than fourth order) terms as in most offline calibration
* methods such as [Kalibr](https://github.com/ethz-asl/kalibr). To estimate these intrinsic parameters (including the
* distortation parameters), the following Jacobian for these parameters is needed:
*
* \f{align*}{
* \frac{\partial \mathbf h_d(\cdot)}{\partial \boldsymbol\zeta} =
* \begin{bmatrix}
* x & 0 & 1 & 0 & f_x*(x_nr^2) & f_x*(x_nr^4) & f_x*(2x_ny_n) & f_x*(r^2+2x_n^2) \\[5pt]
* 0 & y & 0 & 1 & f_y*(y_nr^2) & f_y*(y_nr^4) & f_y*(r^2+2y_n^2) & f_y*(2x_ny_n)
* \end{bmatrix}
* \f}
*
* Similarly, the Jacobian with respect to the normalized coordinates can be obtained as follows:
*
* \f{align*}{
* \frac{\partial \mathbf h_d (\cdot)}{\partial \mathbf{z}_{n,k}} =
* \begin{bmatrix}
* f_x*((1+k_1r^2+k_2r^4)+(2k_1x_n^2+4k_2x_n^2(x_n^2+y_n^2))+2p_1y_n+(2p_2x_n+4p_2x_n)) &
* f_x*(2k_1x_ny_n+4k_2x_ny_n(x_n^2+y_n^2)+2p_1x_n+2p_2y_n) \\
* f_y*(2k_1x_ny_n+4k_2x_ny_n(x_n^2+y_n^2)+2p_1x_n+2p_2y_n) &
* f_y*((1+k_1r^2+k_2r^4)+(2k_1y_n^2+4k_2y_n^2(x_n^2+y_n^2))+(2p_1y_n+4p_1y_n)+2p_2x_n)
* \end{bmatrix}
* \f}
*
* To equate this camera class to Kalibr's models, this is what you would use for `pinhole-radtan`.
*
*/
class CamRadtan : public CamBase {
public:
/**
* @brief Default constructor
* @param width Width of the camera (raw pixels)
* @param height Height of the camera (raw pixels)
*/
CamRadtan(int width, int height) : CamBase(width, height) {}
/**
* @brief Given a raw uv point, this will undistort it based on the camera matrices into normalized camera coords.
* @param uv_dist Raw uv coordinate we wish to undistort
* @return 2d vector of normalized coordinates
*/
Eigen::Vector2f undistort_f(const Eigen::Vector2f &uv_dist) override {
// Determine what camera parameters we should use
cv::Matx33d camK = camera_k_OPENCV;
cv::Vec4d camD = camera_d_OPENCV;
// Convert to opencv format
cv::Mat mat(1, 2, CV_32F);
mat.at<float>(0, 0) = uv_dist(0);
mat.at<float>(0, 1) = uv_dist(1);
mat = mat.reshape(2); // Nx1, 2-channel
// Undistort it!
cv::undistortPoints(mat, mat, camK, camD);
// Construct our return vector
Eigen::Vector2f pt_out;
mat = mat.reshape(1); // Nx2, 1-channel
pt_out(0) = mat.at<float>(0, 0);
pt_out(1) = mat.at<float>(0, 1);
return pt_out;
}
/**
* @brief Given a normalized uv coordinate this will distort it to the raw image plane
* @param uv_norm Normalized coordinates we wish to distort
* @return 2d vector of raw uv coordinate
*/
Eigen::Vector2f distort_f(const Eigen::Vector2f &uv_norm) override {
// Get our camera parameters
Eigen::MatrixXd cam_d = camera_values;
// Calculate distorted coordinates for radial
double r = std::sqrt(uv_norm(0) * uv_norm(0) + uv_norm(1) * uv_norm(1));
double r_2 = r * r;
double r_4 = r_2 * r_2;
double x1 = uv_norm(0) * (1 + cam_d(4) * r_2 + cam_d(5) * r_4) + 2 * cam_d(6) * uv_norm(0) * uv_norm(1) +
cam_d(7) * (r_2 + 2 * uv_norm(0) * uv_norm(0));
double y1 = uv_norm(1) * (1 + cam_d(4) * r_2 + cam_d(5) * r_4) + cam_d(6) * (r_2 + 2 * uv_norm(1) * uv_norm(1)) +
2 * cam_d(7) * uv_norm(0) * uv_norm(1);
// Return the distorted point
Eigen::Vector2f uv_dist;
uv_dist(0) = (float)(cam_d(0) * x1 + cam_d(2));
uv_dist(1) = (float)(cam_d(1) * y1 + cam_d(3));
return uv_dist;
}
/**
* @brief Computes the derivative of raw distorted to normalized coordinate.
* @param uv_norm Normalized coordinates we wish to distort
* @param H_dz_dzn Derivative of measurement z in respect to normalized
* @param H_dz_dzeta Derivative of measurement z in respect to intrinic parameters
*/
void compute_distort_jacobian(const Eigen::Vector2d &uv_norm, Eigen::MatrixXd &H_dz_dzn, Eigen::MatrixXd &H_dz_dzeta) override {
// Get our camera parameters
Eigen::MatrixXd cam_d = camera_values;
// Calculate distorted coordinates for radial
double r = std::sqrt(uv_norm(0) * uv_norm(0) + uv_norm(1) * uv_norm(1));
double r_2 = r * r;
double r_4 = r_2 * r_2;
// Jacobian of distorted pixel to normalized pixel
H_dz_dzn = Eigen::MatrixXd::Zero(2, 2);
double x = uv_norm(0);
double y = uv_norm(1);
double x_2 = uv_norm(0) * uv_norm(0);
double y_2 = uv_norm(1) * uv_norm(1);
double x_y = uv_norm(0) * uv_norm(1);
H_dz_dzn(0, 0) = cam_d(0) * ((1 + cam_d(4) * r_2 + cam_d(5) * r_4) + (2 * cam_d(4) * x_2 + 4 * cam_d(5) * x_2 * r_2) +
2 * cam_d(6) * y + (2 * cam_d(7) * x + 4 * cam_d(7) * x));
H_dz_dzn(0, 1) = cam_d(0) * (2 * cam_d(4) * x_y + 4 * cam_d(5) * x_y * r_2 + 2 * cam_d(6) * x + 2 * cam_d(7) * y);
H_dz_dzn(1, 0) = cam_d(1) * (2 * cam_d(4) * x_y + 4 * cam_d(5) * x_y * r_2 + 2 * cam_d(6) * x + 2 * cam_d(7) * y);
H_dz_dzn(1, 1) = cam_d(1) * ((1 + cam_d(4) * r_2 + cam_d(5) * r_4) + (2 * cam_d(4) * y_2 + 4 * cam_d(5) * y_2 * r_2) +
2 * cam_d(7) * x + (2 * cam_d(6) * y + 4 * cam_d(6) * y));
// Calculate distorted coordinates for radtan
double x1 = uv_norm(0) * (1 + cam_d(4) * r_2 + cam_d(5) * r_4) + 2 * cam_d(6) * uv_norm(0) * uv_norm(1) +
cam_d(7) * (r_2 + 2 * uv_norm(0) * uv_norm(0));
double y1 = uv_norm(1) * (1 + cam_d(4) * r_2 + cam_d(5) * r_4) + cam_d(6) * (r_2 + 2 * uv_norm(1) * uv_norm(1)) +
2 * cam_d(7) * uv_norm(0) * uv_norm(1);
// Compute the Jacobian in respect to the intrinsics
H_dz_dzeta = Eigen::MatrixXd::Zero(2, 8);
H_dz_dzeta(0, 0) = x1;
H_dz_dzeta(0, 2) = 1;
H_dz_dzeta(0, 4) = cam_d(0) * uv_norm(0) * r_2;
H_dz_dzeta(0, 5) = cam_d(0) * uv_norm(0) * r_4;
H_dz_dzeta(0, 6) = 2 * cam_d(0) * uv_norm(0) * uv_norm(1);
H_dz_dzeta(0, 7) = cam_d(0) * (r_2 + 2 * uv_norm(0) * uv_norm(0));
H_dz_dzeta(1, 1) = y1;
H_dz_dzeta(1, 3) = 1;
H_dz_dzeta(1, 4) = cam_d(1) * uv_norm(1) * r_2;
H_dz_dzeta(1, 5) = cam_d(1) * uv_norm(1) * r_4;
H_dz_dzeta(1, 6) = cam_d(1) * (r_2 + 2 * uv_norm(1) * uv_norm(1));
H_dz_dzeta(1, 7) = 2 * cam_d(1) * uv_norm(0) * uv_norm(1);
}
};
} // namespace ov_core
#endif /* OV_CORE_CAM_RADTAN_H */

164
ov_core/src/cpi/CpiBase.h Normal file
View File

@@ -0,0 +1,164 @@
#ifndef CPI_BASE_H
#define CPI_BASE_H
/*
* MIT License
* Copyright (c) 2018 Kevin Eckenhoff
* Copyright (c) 2018 Patrick Geneva
* Copyright (c) 2018 Guoquan Huang
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include "utils/quat_ops.h"
#include <Eigen/Dense>
namespace ov_core {
/**
* @brief Base class for continuous preintegration integrators.
*
* This is the base class that both continuous-time preintegrators extend.
* Please take a look at the derived classes CpiV1 and CpiV2 for the actual implementation.
* Please see the following publication for details on the theory @cite Eckenhoff2019IJRR :
* > Continuous Preintegration Theory for Graph-based Visual-Inertial Navigation
* > Authors: Kevin Eckenhoff, Patrick Geneva, and Guoquan Huang
* > http://udel.edu/~ghuang/papers/tr_cpi.pdf
*
* The steps to use this preintegration class are as follows:
* 1. call setLinearizationPoints() to set the bias/orientation linearization point
* 2. call feed_IMU() will all IMU measurements you want to precompound over
* 3. access public varibles, to get means, Jacobians, and measurement covariance
*/
class CpiBase {
public:
/**
* @brief Default constructor
* @param sigma_w gyroscope white noise density (rad/s/sqrt(hz))
* @param sigma_wb gyroscope random walk (rad/s^2/sqrt(hz))
* @param sigma_a accelerometer white noise density (m/s^2/sqrt(hz))
* @param sigma_ab accelerometer random walk (m/s^3/sqrt(hz))
* @param imu_avg_ if we want to average the imu measurements (IJRR paper did not do this)
*/
CpiBase(double sigma_w, double sigma_wb, double sigma_a, double sigma_ab, bool imu_avg_ = false) {
// Calculate our covariance matrix
Q_c.block(0, 0, 3, 3) = std::pow(sigma_w, 2) * eye3;
Q_c.block(3, 3, 3, 3) = std::pow(sigma_wb, 2) * eye3;
Q_c.block(6, 6, 3, 3) = std::pow(sigma_a, 2) * eye3;
Q_c.block(9, 9, 3, 3) = std::pow(sigma_ab, 2) * eye3;
imu_avg = imu_avg_;
// Calculate our unit vectors, and their skews (used in bias jacobian calcs)
e_1 << 1, 0, 0;
e_2 << 0, 1, 0;
e_3 << 0, 0, 1;
e_1x = skew_x(e_1);
e_2x = skew_x(e_2);
e_3x = skew_x(e_3);
}
/**
* @brief Set linearization points of the integration.
* @param[in] b_w_lin_ gyroscope bias linearization point
* @param[in] b_a_lin_ accelerometer bias linearization point
* @param[in] q_k_lin_ orientation linearization point (only model 2 uses)
* @param[in] grav_ global gravity at the current timestep
*
* This function sets the linearization points we are to preintegrate about.
* For model 2 we will also pass the q_GtoK and current gravity estimate.
*/
void setLinearizationPoints(Eigen::Matrix<double, 3, 1> b_w_lin_, Eigen::Matrix<double, 3, 1> b_a_lin_,
Eigen::Matrix<double, 4, 1> q_k_lin_ = Eigen::Matrix<double, 4, 1>::Zero(),
Eigen::Matrix<double, 3, 1> grav_ = Eigen::Matrix<double, 3, 1>::Zero()) {
b_w_lin = b_w_lin_;
b_a_lin = b_a_lin_;
q_k_lin = q_k_lin_;
grav = grav_;
}
/**
* @brief Main function that will sequentially compute the preintegration measurement.
* @param[in] t_0 first IMU timestamp
* @param[in] t_1 second IMU timestamp
* @param[in] w_m_0 first imu gyroscope measurement
* @param[in] a_m_0 first imu acceleration measurement
* @param[in] w_m_1 second imu gyroscope measurement
* @param[in] a_m_1 second imu acceleration measurement
*
* This new IMU messages and will precompound our measurements, jacobians, and measurement covariance.
* Please see both CpiV1 and CpiV2 classes for implementation details on how this works.
*/
virtual void feed_IMU(double t_0, double t_1, Eigen::Matrix<double, 3, 1> w_m_0, Eigen::Matrix<double, 3, 1> a_m_0,
Eigen::Matrix<double, 3, 1> w_m_1 = Eigen::Matrix<double, 3, 1>::Zero(),
Eigen::Matrix<double, 3, 1> a_m_1 = Eigen::Matrix<double, 3, 1>::Zero()) = 0;
// Flag if we should perform IMU averaging or not
// For version 1 we should average the measurement
// For version 2 we average the local true
bool imu_avg = false;
// Measurement Means
double DT = 0; ///< measurement integration time
Eigen::Matrix<double, 3, 1> alpha_tau = Eigen::Matrix<double, 3, 1>::Zero(); ///< alpha measurement mean
Eigen::Matrix<double, 3, 1> beta_tau = Eigen::Matrix<double, 3, 1>::Zero(); ///< beta measurement mean
Eigen::Matrix<double, 4, 1> q_k2tau; ///< orientation measurement mean
Eigen::Matrix<double, 3, 3> R_k2tau = Eigen::Matrix<double, 3, 3>::Identity(); ///< orientation measurement mean
// Jacobians
Eigen::Matrix<double, 3, 3> J_q = Eigen::Matrix<double, 3, 3>::Zero(); ///< orientation Jacobian wrt b_w
Eigen::Matrix<double, 3, 3> J_a = Eigen::Matrix<double, 3, 3>::Zero(); ///< alpha Jacobian wrt b_w
Eigen::Matrix<double, 3, 3> J_b = Eigen::Matrix<double, 3, 3>::Zero(); ///< beta Jacobian wrt b_w
Eigen::Matrix<double, 3, 3> H_a = Eigen::Matrix<double, 3, 3>::Zero(); ///< alpha Jacobian wrt b_a
Eigen::Matrix<double, 3, 3> H_b = Eigen::Matrix<double, 3, 3>::Zero(); ///< beta Jacobian wrt b_a
// Linearization points
Eigen::Matrix<double, 3, 1> b_w_lin; ///< b_w linearization point (gyroscope)
Eigen::Matrix<double, 3, 1> b_a_lin; ///< b_a linearization point (accelerometer)
Eigen::Matrix<double, 4, 1> q_k_lin; ///< q_k linearization point (only model 2 uses)
/// Global gravity
Eigen::Matrix<double, 3, 1> grav = Eigen::Matrix<double, 3, 1>::Zero();
/// Our continous-time measurement noise matrix (computed from contructor noise values)
Eigen::Matrix<double, 12, 12> Q_c = Eigen::Matrix<double, 12, 12>::Zero();
/// Our final measurement covariance
Eigen::Matrix<double, 15, 15> P_meas = Eigen::Matrix<double, 15, 15>::Zero();
//==========================================================================
// HELPER VARIABLES
//==========================================================================
// 3x3 identity matrix
Eigen::Matrix<double, 3, 3> eye3 = Eigen::Matrix<double, 3, 3>::Identity();
// Simple unit vectors (used in bias jacobian calculations)
Eigen::Matrix<double, 3, 1> e_1; // = Eigen::Matrix<double,3,1>::Constant(1,0,0);
Eigen::Matrix<double, 3, 1> e_2; // = Eigen::Matrix<double,3,1>::Constant(0,1,0);
Eigen::Matrix<double, 3, 1> e_3; // = Eigen::Matrix<double,3,1>::Constant(0,0,1);
// Calculate the skew-symetric of our unit vectors
Eigen::Matrix<double, 3, 3> e_1x; // = skew_x(e_1);
Eigen::Matrix<double, 3, 3> e_2x; // = skew_x(e_2);
Eigen::Matrix<double, 3, 3> e_3x; // = skew_x(e_3);
};
} // namespace ov_core
#endif /* CPI_BASE_H */

366
ov_core/src/cpi/CpiV1.h Normal file
View File

@@ -0,0 +1,366 @@
#ifndef CPI_V1_H
#define CPI_V1_H
/*
* MIT License
* Copyright (c) 2018 Kevin Eckenhoff
* Copyright (c) 2018 Patrick Geneva
* Copyright (c) 2018 Guoquan Huang
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include "CpiBase.h"
#include "utils/quat_ops.h"
#include <Eigen/Dense>
namespace ov_core {
/**
* @brief Model 1 of continuous preintegration.
*
* This model is the "piecewise constant measurement assumption" which was first presented in:
* > Eckenhoff, Kevin, Patrick Geneva, and Guoquan Huang.
* > "High-accuracy preintegration for visual inertial navigation."
* > International Workshop on the Algorithmic Foundations of Robotics. 2016.
* Please see the following publication for details on the theory @cite Eckenhoff2019IJRR :
* > Continuous Preintegration Theory for Graph-based Visual-Inertial Navigation
* > Authors: Kevin Eckenhoff, Patrick Geneva, and Guoquan Huang
* > http://udel.edu/~ghuang/papers/tr_cpi.pdf
*
* The steps to use this preintegration class are as follows:
* 1. call setLinearizationPoints() to set the bias/orientation linearization point
* 2. call feed_IMU() will all IMU measurements you want to precompound over
* 3. access public varibles, to get means, Jacobians, and measurement covariance
*/
class CpiV1 : public CpiBase {
public:
/**
* @brief Default constructor for our Model 1 preintegration (piecewise constant measurement assumption)
* @param sigma_w gyroscope white noise density (rad/s/sqrt(hz))
* @param sigma_wb gyroscope random walk (rad/s^2/sqrt(hz))
* @param sigma_a accelerometer white noise density (m/s^2/sqrt(hz))
* @param sigma_ab accelerometer random walk (m/s^3/sqrt(hz))
* @param imu_avg_ if we want to average the imu measurements (IJRR paper did not do this)
*/
CpiV1(double sigma_w, double sigma_wb, double sigma_a, double sigma_ab, bool imu_avg_ = false)
: CpiBase(sigma_w, sigma_wb, sigma_a, sigma_ab, imu_avg_) {}
/**
* @brief Our precompound function for Model 1
* @param[in] t_0 first IMU timestamp
* @param[in] t_1 second IMU timestamp
* @param[in] w_m_0 first imu gyroscope measurement
* @param[in] a_m_0 first imu acceleration measurement
* @param[in] w_m_1 second imu gyroscope measurement
* @param[in] a_m_1 second imu acceleration measurement
*
* We will first analytically integrate our meansurements and Jacobians.
* Then we perform numerical integration for our measurement covariance.
*/
void feed_IMU(double t_0, double t_1, Eigen::Matrix<double, 3, 1> w_m_0, Eigen::Matrix<double, 3, 1> a_m_0,
Eigen::Matrix<double, 3, 1> w_m_1 = Eigen::Matrix<double, 3, 1>::Zero(),
Eigen::Matrix<double, 3, 1> a_m_1 = Eigen::Matrix<double, 3, 1>::Zero()) {
// Get time difference
double delta_t = t_1 - t_0;
DT += delta_t;
// If no time has passed do nothing
if (delta_t == 0) {
return;
}
// Get estimated imu readings
Eigen::Matrix<double, 3, 1> w_hat = w_m_0 - b_w_lin;
Eigen::Matrix<double, 3, 1> a_hat = a_m_0 - b_a_lin;
// If averaging, average
if (imu_avg) {
w_hat += w_m_1 - b_w_lin;
w_hat = 0.5 * w_hat;
a_hat += a_m_1 - b_a_lin;
a_hat = .5 * a_hat;
}
// Get angle change w*dt
Eigen::Matrix<double, 3, 1> w_hatdt = w_hat * delta_t;
// Get entries of w_hat
double w_1 = w_hat(0, 0);
double w_2 = w_hat(1, 0);
double w_3 = w_hat(2, 0);
// Get magnitude of w and wdt
double mag_w = w_hat.norm();
double w_dt = mag_w * delta_t;
// Threshold to determine if equations will be unstable
bool small_w = (mag_w < 0.008726646);
// Get some of the variables used in the preintegration equations
double dt_2 = pow(delta_t, 2);
double cos_wt = cos(w_dt);
double sin_wt = sin(w_dt);
Eigen::Matrix<double, 3, 3> w_x = skew_x(w_hat);
Eigen::Matrix<double, 3, 3> a_x = skew_x(a_hat);
Eigen::Matrix<double, 3, 3> w_tx = skew_x(w_hatdt);
Eigen::Matrix<double, 3, 3> w_x_2 = w_x * w_x;
//==========================================================================
// MEASUREMENT MEANS
//==========================================================================
// Get relative rotation
Eigen::Matrix<double, 3, 3> R_tau2tau1 = small_w ? eye3 - delta_t * w_x + (pow(delta_t, 2) / 2) * w_x_2
: eye3 - (sin_wt / mag_w) * w_x + ((1.0 - cos_wt) / (pow(mag_w, 2.0))) * w_x_2;
// Updated rotation and its transpose
Eigen::Matrix<double, 3, 3> R_k2tau1 = R_tau2tau1 * R_k2tau;
Eigen::Matrix<double, 3, 3> R_tau12k = R_k2tau1.transpose();
// Intermediate variables for evaluating the measurement/bias Jacobian update
double f_1;
double f_2;
double f_3;
double f_4;
if (small_w) {
f_1 = -(pow(delta_t, 3) / 3);
f_2 = (pow(delta_t, 4) / 8);
f_3 = -(pow(delta_t, 2) / 2);
f_4 = (pow(delta_t, 3) / 6);
} else {
f_1 = (w_dt * cos_wt - sin_wt) / (pow(mag_w, 3));
f_2 = (pow(w_dt, 2) - 2 * cos_wt - 2 * w_dt * sin_wt + 2) / (2 * pow(mag_w, 4));
f_3 = -(1 - cos_wt) / pow(mag_w, 2);
f_4 = (w_dt - sin_wt) / pow(mag_w, 3);
}
// Compute the main part of our analytical means
Eigen::Matrix<double, 3, 3> alpha_arg = ((dt_2 / 2.0) * eye3 + f_1 * w_x + f_2 * w_x_2);
Eigen::Matrix<double, 3, 3> Beta_arg = (delta_t * eye3 + f_3 * w_x + f_4 * w_x_2);
// Matrices that will multiply the a_hat in the update expressions
Eigen::MatrixXd H_al = R_tau12k * alpha_arg;
Eigen::MatrixXd H_be = R_tau12k * Beta_arg;
// Update the measurement means
alpha_tau += beta_tau * delta_t + H_al * a_hat;
beta_tau += H_be * a_hat;
//==========================================================================
// BIAS JACOBIANS (ANALYTICAL)
//==========================================================================
// Get right Jacobian
Eigen::Matrix<double, 3, 3> J_r_tau1 =
small_w ? eye3 - .5 * w_tx + (1.0 / 6.0) * w_tx * w_tx
: eye3 - ((1 - cos_wt) / (pow((w_dt), 2.0))) * w_tx + ((w_dt - sin_wt) / (pow(w_dt, 3.0))) * w_tx * w_tx;
// Update orientation in respect to gyro bias Jacobians
J_q = R_tau2tau1 * J_q + J_r_tau1 * delta_t;
// Update alpha and beta in respect to accel bias Jacobians
H_a -= H_al;
H_a += delta_t * H_b;
H_b -= H_be;
// Derivatives of R_tau12k wrt bias_w entries
Eigen::MatrixXd d_R_bw_1 = -R_tau12k * skew_x(J_q * e_1);
Eigen::MatrixXd d_R_bw_2 = -R_tau12k * skew_x(J_q * e_2);
Eigen::MatrixXd d_R_bw_3 = -R_tau12k * skew_x(J_q * e_3);
// Now compute the gyro bias Jacobian terms
double df_1_dbw_1;
double df_1_dbw_2;
double df_1_dbw_3;
double df_2_dbw_1;
double df_2_dbw_2;
double df_2_dbw_3;
double df_3_dbw_1;
double df_3_dbw_2;
double df_3_dbw_3;
double df_4_dbw_1;
double df_4_dbw_2;
double df_4_dbw_3;
if (small_w) {
double df_1_dw_mag = -(pow(delta_t, 5) / 15);
df_1_dbw_1 = w_1 * df_1_dw_mag;
df_1_dbw_2 = w_2 * df_1_dw_mag;
df_1_dbw_3 = w_3 * df_1_dw_mag;
double df_2_dw_mag = (pow(delta_t, 6) / 72);
df_2_dbw_1 = w_1 * df_2_dw_mag;
df_2_dbw_2 = w_2 * df_2_dw_mag;
df_2_dbw_3 = w_3 * df_2_dw_mag;
double df_3_dw_mag = -(pow(delta_t, 4) / 12);
df_3_dbw_1 = w_1 * df_3_dw_mag;
df_3_dbw_2 = w_2 * df_3_dw_mag;
df_3_dbw_3 = w_3 * df_3_dw_mag;
double df_4_dw_mag = (pow(delta_t, 5) / 60);
df_4_dbw_1 = w_1 * df_4_dw_mag;
df_4_dbw_2 = w_2 * df_4_dw_mag;
df_4_dbw_3 = w_3 * df_4_dw_mag;
} else {
double df_1_dw_mag = (pow(w_dt, 2) * sin_wt - 3 * sin_wt + 3 * w_dt * cos_wt) / pow(mag_w, 5);
df_1_dbw_1 = w_1 * df_1_dw_mag;
df_1_dbw_2 = w_2 * df_1_dw_mag;
df_1_dbw_3 = w_3 * df_1_dw_mag;
double df_2_dw_mag = (pow(w_dt, 2) - 4 * cos_wt - 4 * w_dt * sin_wt + pow(w_dt, 2) * cos_wt + 4) / (pow(mag_w, 6));
df_2_dbw_1 = w_1 * df_2_dw_mag;
df_2_dbw_2 = w_2 * df_2_dw_mag;
df_2_dbw_3 = w_3 * df_2_dw_mag;
double df_3_dw_mag = (2 * (cos_wt - 1) + w_dt * sin_wt) / (pow(mag_w, 4));
df_3_dbw_1 = w_1 * df_3_dw_mag;
df_3_dbw_2 = w_2 * df_3_dw_mag;
df_3_dbw_3 = w_3 * df_3_dw_mag;
double df_4_dw_mag = (2 * w_dt + w_dt * cos_wt - 3 * sin_wt) / (pow(mag_w, 5));
df_4_dbw_1 = w_1 * df_4_dw_mag;
df_4_dbw_2 = w_2 * df_4_dw_mag;
df_4_dbw_3 = w_3 * df_4_dw_mag;
}
// Update alpha and beta gyro bias Jacobians
J_a += J_b * delta_t;
J_a.block(0, 0, 3, 1) +=
(d_R_bw_1 * alpha_arg + R_tau12k * (df_1_dbw_1 * w_x - f_1 * e_1x + df_2_dbw_1 * w_x_2 - f_2 * (e_1x * w_x + w_x * e_1x))) * a_hat;
J_a.block(0, 1, 3, 1) +=
(d_R_bw_2 * alpha_arg + R_tau12k * (df_1_dbw_2 * w_x - f_1 * e_2x + df_2_dbw_2 * w_x_2 - f_2 * (e_2x * w_x + w_x * e_2x))) * a_hat;
J_a.block(0, 2, 3, 1) +=
(d_R_bw_3 * alpha_arg + R_tau12k * (df_1_dbw_3 * w_x - f_1 * e_3x + df_2_dbw_3 * w_x_2 - f_2 * (e_3x * w_x + w_x * e_3x))) * a_hat;
J_b.block(0, 0, 3, 1) +=
(d_R_bw_1 * Beta_arg + R_tau12k * (df_3_dbw_1 * w_x - f_3 * e_1x + df_4_dbw_1 * w_x_2 - f_4 * (e_1x * w_x + w_x * e_1x))) * a_hat;
J_b.block(0, 1, 3, 1) +=
(d_R_bw_2 * Beta_arg + R_tau12k * (df_3_dbw_2 * w_x - f_3 * e_2x + df_4_dbw_2 * w_x_2 - f_4 * (e_2x * w_x + w_x * e_2x))) * a_hat;
J_b.block(0, 2, 3, 1) +=
(d_R_bw_3 * Beta_arg + R_tau12k * (df_3_dbw_3 * w_x - f_3 * e_3x + df_4_dbw_3 * w_x_2 - f_4 * (e_3x * w_x + w_x * e_3x))) * a_hat;
//==========================================================================
// MEASUREMENT COVARIANCE
//==========================================================================
// Going to need orientation at intermediate time i.e. at .5*dt;
Eigen::Matrix<double, 3, 3> R_mid =
small_w ? eye3 - .5 * delta_t * w_x + (pow(.5 * delta_t, 2) / 2) * w_x_2
: eye3 - (sin(mag_w * .5 * delta_t) / mag_w) * w_x + ((1.0 - cos(mag_w * .5 * delta_t)) / (pow(mag_w, 2.0))) * w_x_2;
R_mid = R_mid * R_k2tau;
// Compute covariance (in this implementation, we use RK4)
// k1-------------------------------------------------------------------------------------------------
// Build state Jacobian
Eigen::Matrix<double, 15, 15> F_k1 = Eigen::Matrix<double, 15, 15>::Zero();
F_k1.block(0, 0, 3, 3) = -w_x;
F_k1.block(0, 3, 3, 3) = -eye3;
F_k1.block(6, 0, 3, 3) = -R_k2tau.transpose() * a_x;
F_k1.block(6, 9, 3, 3) = -R_k2tau.transpose();
F_k1.block(12, 6, 3, 3) = eye3;
// Build noise Jacobian
Eigen::Matrix<double, 15, 12> G_k1 = Eigen::Matrix<double, 15, 12>::Zero();
G_k1.block(0, 0, 3, 3) = -eye3;
G_k1.block(3, 3, 3, 3) = eye3;
G_k1.block(6, 6, 3, 3) = -R_k2tau.transpose();
G_k1.block(9, 9, 3, 3) = eye3;
// Get covariance derivative
Eigen::Matrix<double, 15, 15> P_dot_k1 = F_k1 * P_meas + P_meas * F_k1.transpose() + G_k1 * Q_c * G_k1.transpose();
// k2-------------------------------------------------------------------------------------------------
// Build state Jacobian
Eigen::Matrix<double, 15, 15> F_k2 = Eigen::Matrix<double, 15, 15>::Zero();
F_k2.block(0, 0, 3, 3) = -w_x;
F_k2.block(0, 3, 3, 3) = -eye3;
F_k2.block(6, 0, 3, 3) = -R_mid.transpose() * a_x;
F_k2.block(6, 9, 3, 3) = -R_mid.transpose();
F_k2.block(12, 6, 3, 3) = eye3;
// Build noise Jacobian
Eigen::Matrix<double, 15, 12> G_k2 = Eigen::Matrix<double, 15, 12>::Zero();
G_k2.block(0, 0, 3, 3) = -eye3;
G_k2.block(3, 3, 3, 3) = eye3;
G_k2.block(6, 6, 3, 3) = -R_mid.transpose();
G_k2.block(9, 9, 3, 3) = eye3;
// Get covariance derivative
Eigen::Matrix<double, 15, 15> P_k2 = P_meas + P_dot_k1 * delta_t / 2.0;
Eigen::Matrix<double, 15, 15> P_dot_k2 = F_k2 * P_k2 + P_k2 * F_k2.transpose() + G_k2 * Q_c * G_k2.transpose();
// k3-------------------------------------------------------------------------------------------------
// Our state and noise Jacobians are the same as k2
// Since k2 and k3 correspond to the same estimates for the midpoint
Eigen::Matrix<double, 15, 15> F_k3 = F_k2;
Eigen::Matrix<double, 15, 12> G_k3 = G_k2;
// Get covariance derivative
Eigen::Matrix<double, 15, 15> P_k3 = P_meas + P_dot_k2 * delta_t / 2.0;
Eigen::Matrix<double, 15, 15> P_dot_k3 = F_k3 * P_k3 + P_k3 * F_k3.transpose() + G_k3 * Q_c * G_k3.transpose();
// k4-------------------------------------------------------------------------------------------------
// Build state Jacobian
Eigen::Matrix<double, 15, 15> F_k4 = Eigen::Matrix<double, 15, 15>::Zero();
F_k4.block(0, 0, 3, 3) = -w_x;
F_k4.block(0, 3, 3, 3) = -eye3;
F_k4.block(6, 0, 3, 3) = -R_k2tau1.transpose() * a_x;
F_k4.block(6, 9, 3, 3) = -R_k2tau1.transpose();
F_k4.block(12, 6, 3, 3) = eye3;
// Build noise Jacobian
Eigen::Matrix<double, 15, 12> G_k4 = Eigen::Matrix<double, 15, 12>::Zero();
G_k4.block(0, 0, 3, 3) = -eye3;
G_k4.block(3, 3, 3, 3) = eye3;
G_k4.block(6, 6, 3, 3) = -R_k2tau1.transpose();
G_k4.block(9, 9, 3, 3) = eye3;
// Get covariance derivative
Eigen::Matrix<double, 15, 15> P_k4 = P_meas + P_dot_k3 * delta_t;
Eigen::Matrix<double, 15, 15> P_dot_k4 = F_k4 * P_k4 + P_k4 * F_k4.transpose() + G_k4 * Q_c * G_k4.transpose();
// done-------------------------------------------------------------------------------------------------
// Collect covariance solution
// Ensure it is positive definite
P_meas += (delta_t / 6.0) * (P_dot_k1 + 2.0 * P_dot_k2 + 2.0 * P_dot_k3 + P_dot_k4);
P_meas = 0.5 * (P_meas + P_meas.transpose());
// Update rotation mean
// Note we had to wait to do this, since we use the old orientation in our covariance calculation
R_k2tau = R_k2tau1;
q_k2tau = rot_2_quat(R_k2tau);
}
};
} // namespace ov_core
#endif /* CPI_V1_H */

455
ov_core/src/cpi/CpiV2.h Normal file
View File

@@ -0,0 +1,455 @@
#ifndef CPI_V2_H
#define CPI_V2_H
/*
* MIT License
* Copyright (c) 2018 Kevin Eckenhoff
* Copyright (c) 2018 Patrick Geneva
* Copyright (c) 2018 Guoquan Huang
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include "CpiBase.h"
#include "utils/quat_ops.h"
#include <Eigen/Dense>
namespace ov_core {
/**
* @brief Model 2 of continuous preintegration.
*
* This model is the "piecewise constant local acceleration assumption."
* Please see the following publication for details on the theory @cite Eckenhoff2019IJRR :
* > Continuous Preintegration Theory for Graph-based Visual-Inertial Navigation
* > Authors: Kevin Eckenhoff, Patrick Geneva, and Guoquan Huang
* > http://udel.edu/~ghuang/papers/tr_cpi.pdf
*
* The steps to use this preintegration class are as follows:
* 1. call setLinearizationPoints() to set the bias/orientation linearization point
* 2. call feed_IMU() will all IMU measurements you want to precompound over
* 3. access public varibles, to get means, Jacobians, and measurement covariance
*/
class CpiV2 : public CpiBase {
private:
// Extended covariance used to handle the sampling model
Eigen::Matrix<double, 21, 21> P_big = Eigen::Matrix<double, 21, 21>::Zero();
// Our large compounded state transition Jacobian matrix
Eigen::Matrix<double, 21, 21> Discrete_J_b = Eigen::Matrix<double, 21, 21>::Identity();
public:
/**
* @brief
* If we want to use analytical jacobians or not.
* In the paper we just numerically integrated the jacobians
* If set to false, we use a closed form version similar to model 1.
*/
bool state_transition_jacobians = true;
// Alpha and beta Jacobians wrt linearization orientation
Eigen::Matrix<double, 3, 3> O_a = Eigen::Matrix<double, 3, 3>::Zero();
Eigen::Matrix<double, 3, 3> O_b = Eigen::Matrix<double, 3, 3>::Zero();
/**
* @brief Default constructor for our Model 2 preintegration (piecewise constant local acceleration assumption)
* @param sigma_w gyroscope white noise density (rad/s/sqrt(hz))
* @param sigma_wb gyroscope random walk (rad/s^2/sqrt(hz))
* @param sigma_a accelerometer white noise density (m/s^2/sqrt(hz))
* @param sigma_ab accelerometer random walk (m/s^3/sqrt(hz))
* @param imu_avg_ if we want to average the imu measurements (IJRR paper did not do this)
*/
CpiV2(double sigma_w, double sigma_wb, double sigma_a, double sigma_ab, bool imu_avg_ = false)
: CpiBase(sigma_w, sigma_wb, sigma_a, sigma_ab, imu_avg_) {}
/**
* @brief Our precompound function for Model 2
* @param[in] t_0 first IMU timestamp
* @param[in] t_1 second IMU timestamp
* @param[in] w_m_0 first imu gyroscope measurement
* @param[in] a_m_0 first imu acceleration measurement
* @param[in] w_m_1 second imu gyroscope measurement
* @param[in] a_m_1 second imu acceleration measurement
*
* We will first analytically integrate our meansurement.
* We can numerically or analytically integrate our bias jacobians.
* Then we perform numerical integration for our measurement covariance.
*/
void feed_IMU(double t_0, double t_1, Eigen::Matrix<double, 3, 1> w_m_0, Eigen::Matrix<double, 3, 1> a_m_0,
Eigen::Matrix<double, 3, 1> w_m_1 = Eigen::Matrix<double, 3, 1>::Zero(),
Eigen::Matrix<double, 3, 1> a_m_1 = Eigen::Matrix<double, 3, 1>::Zero()) {
// Get time difference
double delta_t = t_1 - t_0;
DT += delta_t;
// If no time has passed do nothing
if (delta_t == 0) {
return;
}
// Get estimated imu readings
Eigen::Matrix<double, 3, 1> w_hat = w_m_0 - b_w_lin;
Eigen::Matrix<double, 3, 1> a_hat = a_m_0 - b_a_lin - R_k2tau * quat_2_Rot(q_k_lin) * grav;
// If averaging, average
// Note: we will average the LOCAL acceleration after getting the relative rotation
if (imu_avg) {
w_hat += w_m_1 - b_w_lin;
w_hat = 0.5 * w_hat;
}
// Get angle change w*dt
Eigen::Matrix<double, 3, 1> w_hatdt = w_hat * delta_t;
// Get entries of w_hat
double w_1 = w_hat(0, 0);
double w_2 = w_hat(1, 0);
double w_3 = w_hat(2, 0);
// Get magnitude of w and wdt
double mag_w = w_hat.norm();
double w_dt = mag_w * delta_t;
// Threshold to determine if equations will be unstable
bool small_w = (mag_w < 0.008726646);
// Get some of the variables used in the preintegration equations
double dt_2 = pow(delta_t, 2);
double cos_wt = cos(w_dt);
double sin_wt = sin(w_dt);
Eigen::Matrix<double, 3, 3> w_x = skew_x(w_hat);
Eigen::Matrix<double, 3, 3> w_tx = skew_x(w_hatdt);
Eigen::Matrix<double, 3, 3> w_x_2 = w_x * w_x;
//==========================================================================
// MEASUREMENT MEANS
//==========================================================================
// Get relative rotation
Eigen::Matrix<double, 3, 3> R_tau2tau1 = small_w ? eye3 - delta_t * w_x + (pow(delta_t, 2) / 2) * w_x_2
: eye3 - (sin_wt / mag_w) * w_x + ((1.0 - cos_wt) / (pow(mag_w, 2.0))) * w_x_2;
// Updated roation and its transpose
Eigen::Matrix<double, 3, 3> R_k2tau1 = R_tau2tau1 * R_k2tau;
Eigen::Matrix<double, 3, 3> R_tau12k = R_k2tau1.transpose();
// If averaging, average the LOCAL acceleration
if (imu_avg) {
a_hat += a_m_1 - b_a_lin - R_k2tau1 * quat_2_Rot(q_k_lin) * grav;
a_hat = 0.5 * a_hat;
}
Eigen::Matrix<double, 3, 3> a_x = skew_x(a_hat);
// Intermediate variables for evaluating the measurement/bias Jacobian update
double f_1;
double f_2;
double f_3;
double f_4;
if (small_w) {
f_1 = -(pow(delta_t, 3) / 3);
f_2 = (pow(delta_t, 4) / 8);
f_3 = -(pow(delta_t, 2) / 2);
f_4 = (pow(delta_t, 3) / 6);
} else {
f_1 = (w_dt * cos_wt - sin_wt) / (pow(mag_w, 3));
f_2 = (pow(w_dt, 2) - 2 * cos_wt - 2 * w_dt * sin_wt + 2) / (2 * pow(mag_w, 4));
f_3 = -(1 - cos_wt) / pow(mag_w, 2);
f_4 = (w_dt - sin_wt) / pow(mag_w, 3);
}
// Compute the main part of our analytical means
Eigen::Matrix<double, 3, 3> alpha_arg = ((dt_2 / 2.0) * eye3 + f_1 * w_x + f_2 * w_x_2);
Eigen::Matrix<double, 3, 3> Beta_arg = (delta_t * eye3 + f_3 * w_x + f_4 * w_x_2);
// Matrices that will multiply the a_hat in the update expressions
Eigen::Matrix<double, 3, 3> H_al = R_tau12k * alpha_arg;
Eigen::Matrix<double, 3, 3> H_be = R_tau12k * Beta_arg;
// Update the measurements
alpha_tau += beta_tau * delta_t + H_al * a_hat;
beta_tau += H_be * a_hat;
//==========================================================================
// BIAS JACOBIANS (ANALYTICAL)
//==========================================================================
// Get right Jacobian
Eigen::Matrix<double, 3, 3> J_r_tau1 =
small_w ? eye3 - .5 * w_tx + (1.0 / 6.0) * w_tx * w_tx
: eye3 - ((1 - cos_wt) / (pow((w_dt), 2.0))) * w_tx + ((w_dt - sin_wt) / (pow(w_dt, 3.0))) * w_tx * w_tx;
// Update orientation in respect to gyro bias Jacobians
Eigen::Matrix<double, 3, 3> J_save = J_q;
J_q = R_tau2tau1 * J_q + J_r_tau1 * delta_t;
// Update alpha and beta in respect to accel bias Jacobian
H_a -= H_al;
H_a += delta_t * H_b;
H_b -= H_be;
// Update alpha and beta in respect to q_GtoLIN Jacobian
Eigen::Matrix<double, 3, 1> g_k = quat_2_Rot(q_k_lin) * grav;
O_a += delta_t * O_b;
O_a += -H_al * R_k2tau * skew_x(g_k);
O_b += -H_be * R_k2tau * skew_x(g_k);
// Derivatives of R_tau12k wrt bias_w entries
Eigen::MatrixXd d_R_bw_1 = -R_tau12k * skew_x(J_q * e_1);
Eigen::MatrixXd d_R_bw_2 = -R_tau12k * skew_x(J_q * e_2);
Eigen::MatrixXd d_R_bw_3 = -R_tau12k * skew_x(J_q * e_3);
// Now compute the gyro bias Jacobian terms
double df_1_dbw_1;
double df_1_dbw_2;
double df_1_dbw_3;
double df_2_dbw_1;
double df_2_dbw_2;
double df_2_dbw_3;
double df_3_dbw_1;
double df_3_dbw_2;
double df_3_dbw_3;
double df_4_dbw_1;
double df_4_dbw_2;
double df_4_dbw_3;
if (small_w) {
double df_1_dw_mag = -(pow(delta_t, 5) / 15);
df_1_dbw_1 = w_1 * df_1_dw_mag;
df_1_dbw_2 = w_2 * df_1_dw_mag;
df_1_dbw_3 = w_3 * df_1_dw_mag;
double df_2_dw_mag = (pow(delta_t, 6) / 72);
df_2_dbw_1 = w_1 * df_2_dw_mag;
df_2_dbw_2 = w_2 * df_2_dw_mag;
df_2_dbw_3 = w_3 * df_2_dw_mag;
double df_3_dw_mag = -(pow(delta_t, 4) / 12);
df_3_dbw_1 = w_1 * df_3_dw_mag;
df_3_dbw_2 = w_2 * df_3_dw_mag;
df_3_dbw_3 = w_3 * df_3_dw_mag;
double df_4_dw_mag = (pow(delta_t, 5) / 60);
df_4_dbw_1 = w_1 * df_4_dw_mag;
df_4_dbw_2 = w_2 * df_4_dw_mag;
df_4_dbw_3 = w_3 * df_4_dw_mag;
} else {
double df_1_dw_mag = (pow(w_dt, 2) * sin_wt - 3 * sin_wt + 3 * w_dt * cos_wt) / pow(mag_w, 5);
df_1_dbw_1 = w_1 * df_1_dw_mag;
df_1_dbw_2 = w_2 * df_1_dw_mag;
df_1_dbw_3 = w_3 * df_1_dw_mag;
double df_2_dw_mag = (pow(w_dt, 2) - 4 * cos_wt - 4 * w_dt * sin_wt + pow(w_dt, 2) * cos_wt + 4) / (pow(mag_w, 6));
df_2_dbw_1 = w_1 * df_2_dw_mag;
df_2_dbw_2 = w_2 * df_2_dw_mag;
df_2_dbw_3 = w_3 * df_2_dw_mag;
double df_3_dw_mag = (2 * (cos_wt - 1) + w_dt * sin_wt) / (pow(mag_w, 4));
df_3_dbw_1 = w_1 * df_3_dw_mag;
df_3_dbw_2 = w_2 * df_3_dw_mag;
df_3_dbw_3 = w_3 * df_3_dw_mag;
double df_4_dw_mag = (2 * w_dt + w_dt * cos_wt - 3 * sin_wt) / (pow(mag_w, 5));
df_4_dbw_1 = w_1 * df_4_dw_mag;
df_4_dbw_2 = w_2 * df_4_dw_mag;
df_4_dbw_3 = w_3 * df_4_dw_mag;
}
// Gravity rotated into the "tau'th" frame (i.e. start of the measurement interval)
Eigen::Matrix<double, 3, 1> g_tau = R_k2tau * quat_2_Rot(q_k_lin) * grav;
// Update gyro bias Jacobians
J_a += J_b * delta_t;
J_a.block(0, 0, 3, 1) +=
(d_R_bw_1 * alpha_arg + R_tau12k * (df_1_dbw_1 * w_x - f_1 * e_1x + df_2_dbw_1 * w_x_2 - f_2 * (e_1x * w_x + w_x * e_1x))) * a_hat -
H_al * skew_x((J_save * e_1)) * g_tau;
J_a.block(0, 1, 3, 1) +=
(d_R_bw_2 * alpha_arg + R_tau12k * (df_1_dbw_2 * w_x - f_1 * e_2x + df_2_dbw_2 * w_x_2 - f_2 * (e_2x * w_x + w_x * e_2x))) * a_hat -
H_al * skew_x((J_save * e_2)) * g_tau;
J_a.block(0, 2, 3, 1) +=
(d_R_bw_3 * alpha_arg + R_tau12k * (df_1_dbw_3 * w_x - f_1 * e_3x + df_2_dbw_3 * w_x_2 - f_2 * (e_3x * w_x + w_x * e_3x))) * a_hat -
H_al * skew_x((J_save * e_3)) * g_tau;
J_b.block(0, 0, 3, 1) +=
(d_R_bw_1 * Beta_arg + R_tau12k * (df_3_dbw_1 * w_x - f_3 * e_1x + df_4_dbw_1 * w_x_2 - f_4 * (e_1x * w_x + w_x * e_1x))) * a_hat -
-H_be * skew_x((J_save * e_1)) * g_tau;
J_b.block(0, 1, 3, 1) +=
(d_R_bw_2 * Beta_arg + R_tau12k * (df_3_dbw_2 * w_x - f_3 * e_2x + df_4_dbw_2 * w_x_2 - f_4 * (e_2x * w_x + w_x * e_2x))) * a_hat -
H_be * skew_x((J_save * e_2)) * g_tau;
J_b.block(0, 2, 3, 1) +=
(d_R_bw_3 * Beta_arg + R_tau12k * (df_3_dbw_3 * w_x - f_3 * e_3x + df_4_dbw_3 * w_x_2 - f_4 * (e_3x * w_x + w_x * e_3x))) * a_hat -
H_be * skew_x((J_save * e_3)) * g_tau;
//==========================================================================
// MEASUREMENT COVARIANCE
//==========================================================================
// Going to need orientation at intermediate time i.e. at .5*dt;
Eigen::Matrix<double, 3, 3> R_G_to_k = quat_2_Rot(q_k_lin);
double dt_mid = delta_t / 2.0;
double w_dt_mid = mag_w * dt_mid;
Eigen::Matrix<double, 3, 3> R_mid;
// The middle of this interval (i.e., rotation from k to mid)
R_mid = small_w ? eye3 - dt_mid * w_x + (pow(dt_mid, 2) / 2) * w_x_2
: eye3 - (sin(w_dt_mid) / mag_w) * w_x + ((1.0 - cos(w_dt_mid)) / (pow(mag_w, 2.0))) * w_x_2;
R_mid = R_mid * R_k2tau;
// Compute covariance (in this implementation, we use RK4)
// k1-------------------------------------------------------------------------------------------------
// Build state Jacobian
Eigen::Matrix<double, 21, 21> F_k1 = Eigen::Matrix<double, 21, 21>::Zero();
F_k1.block(0, 0, 3, 3) = -w_x;
F_k1.block(0, 3, 3, 3) = -eye3;
F_k1.block(6, 0, 3, 3) = -R_k2tau.transpose() * a_x;
F_k1.block(6, 9, 3, 3) = -R_k2tau.transpose();
F_k1.block(6, 15, 3, 3) = -R_k2tau.transpose() * skew_x(R_k2tau * R_G_to_k * grav);
F_k1.block(6, 18, 3, 3) = -R_k2tau.transpose() * R_k2tau * skew_x(R_G_to_k * grav);
F_k1.block(12, 6, 3, 3) = eye3;
// Build noise Jacobian
Eigen::Matrix<double, 21, 12> G_k1 = Eigen::Matrix<double, 21, 12>::Zero();
G_k1.block(0, 0, 3, 3) = -eye3;
G_k1.block(3, 3, 3, 3) = eye3;
G_k1.block(6, 6, 3, 3) = -R_k2tau.transpose();
G_k1.block(9, 9, 3, 3) = eye3;
// Get state transition and covariance derivative
Eigen::Matrix<double, 21, 21> Phi_dot_k1 = F_k1;
Eigen::Matrix<double, 21, 21> P_dot_k1 = F_k1 * P_big + P_big * F_k1.transpose() + G_k1 * Q_c * G_k1.transpose();
// k2-------------------------------------------------------------------------------------------------
// Build state Jacobian
Eigen::Matrix<double, 21, 21> F_k2 = Eigen::Matrix<double, 21, 21>::Zero();
F_k2.block(0, 0, 3, 3) = -w_x;
F_k2.block(0, 3, 3, 3) = -eye3;
F_k2.block(6, 0, 3, 3) = -R_mid.transpose() * a_x;
F_k2.block(6, 9, 3, 3) = -R_mid.transpose();
F_k2.block(6, 15, 3, 3) = -R_mid.transpose() * skew_x(R_k2tau * R_G_to_k * grav);
F_k2.block(6, 18, 3, 3) = -R_mid.transpose() * R_k2tau * skew_x(R_G_to_k * grav);
F_k2.block(12, 6, 3, 3) = eye3;
// Build noise Jacobian
Eigen::Matrix<double, 21, 12> G_k2 = Eigen::Matrix<double, 21, 12>::Zero();
G_k2.block(0, 0, 3, 3) = -eye3;
G_k2.block(3, 3, 3, 3) = eye3;
G_k2.block(6, 6, 3, 3) = -R_mid.transpose();
G_k2.block(9, 9, 3, 3) = eye3;
// Get state transition and covariance derivative
Eigen::Matrix<double, 21, 21> Phi_k2 = Eigen::Matrix<double, 21, 21>::Identity() + Phi_dot_k1 * dt_mid;
Eigen::Matrix<double, 21, 21> P_k2 = P_big + P_dot_k1 * dt_mid;
Eigen::Matrix<double, 21, 21> Phi_dot_k2 = F_k2 * Phi_k2;
Eigen::Matrix<double, 21, 21> P_dot_k2 = F_k2 * P_k2 + P_k2 * F_k2.transpose() + G_k2 * Q_c * G_k2.transpose();
// k3-------------------------------------------------------------------------------------------------
// Our state and noise Jacobians are the same as k2
// Since k2 and k3 correspond to the same estimates for the midpoint
Eigen::Matrix<double, 21, 21> F_k3 = F_k2;
Eigen::Matrix<double, 21, 12> G_k3 = G_k2;
// Get state transition and covariance derivative
Eigen::Matrix<double, 21, 21> Phi_k3 = Eigen::Matrix<double, 21, 21>::Identity() + Phi_dot_k2 * dt_mid;
Eigen::Matrix<double, 21, 21> P_k3 = P_big + P_dot_k2 * dt_mid;
Eigen::Matrix<double, 21, 21> Phi_dot_k3 = F_k3 * Phi_k3;
Eigen::Matrix<double, 21, 21> P_dot_k3 = F_k3 * P_k3 + P_k3 * F_k3.transpose() + G_k3 * Q_c * G_k3.transpose();
// k4-------------------------------------------------------------------------------------------------
// Build state Jacobian
Eigen::Matrix<double, 21, 21> F_k4 = Eigen::Matrix<double, 21, 21>::Zero();
F_k4.block(0, 0, 3, 3) = -w_x;
F_k4.block(0, 3, 3, 3) = -eye3;
F_k4.block(6, 0, 3, 3) = -R_k2tau1.transpose() * a_x;
F_k4.block(6, 9, 3, 3) = -R_k2tau1.transpose();
F_k4.block(6, 15, 3, 3) = -R_k2tau1.transpose() * skew_x(R_k2tau * R_G_to_k * grav);
F_k4.block(6, 18, 3, 3) = -R_k2tau1.transpose() * R_k2tau * skew_x(R_G_to_k * grav);
F_k4.block(12, 6, 3, 3) = eye3;
// Build noise Jacobian
Eigen::Matrix<double, 21, 12> G_k4 = Eigen::Matrix<double, 21, 12>::Zero();
G_k4.block(0, 0, 3, 3) = -eye3;
G_k4.block(3, 3, 3, 3) = eye3;
G_k4.block(6, 6, 3, 3) = -R_k2tau1.transpose();
G_k4.block(9, 9, 3, 3) = eye3;
// Get state transition and covariance derivative
Eigen::Matrix<double, 21, 21> Phi_k4 = Eigen::Matrix<double, 21, 21>::Identity() + Phi_dot_k3 * delta_t;
Eigen::Matrix<double, 21, 21> P_k4 = P_big + P_dot_k3 * delta_t;
Eigen::Matrix<double, 21, 21> Phi_dot_k4 = F_k4 * Phi_k4;
Eigen::Matrix<double, 21, 21> P_dot_k4 = F_k4 * P_k4 + P_k4 * F_k4.transpose() + G_k4 * Q_c * G_k4.transpose();
// done-------------------------------------------------------------------------------------------------
// Collect covariance solution
// Ensure it is positive definite
P_big += (delta_t / 6.0) * (P_dot_k1 + 2.0 * P_dot_k2 + 2.0 * P_dot_k3 + P_dot_k4);
P_big = 0.5 * (P_big + P_big.transpose());
// Calculate the state transition from time k to tau
Eigen::Matrix<double, 21, 21> Phi =
Eigen::Matrix<double, 21, 21>::Identity() + (delta_t / 6.0) * (Phi_dot_k1 + 2.0 * Phi_dot_k2 + 2.0 * Phi_dot_k3 + Phi_dot_k4);
//==========================================================================
// CLONE TO NEW SAMPLE TIME AND MARGINALIZE OLD SAMPLE TIME
//==========================================================================
// Matrix that performs the clone and mariginalization
Eigen::Matrix<double, 21, 21> B_k = Eigen::Matrix<double, 21, 21>::Identity();
B_k.block(15, 15, 3, 3).setZero();
B_k.block(15, 0, 3, 3) = Eigen::Matrix<double, 3, 3>::Identity();
// Change our measurement covariance and state transition
P_big = B_k * P_big * B_k.transpose();
P_big = 0.5 * (P_big + P_big.transpose());
Discrete_J_b = B_k * Phi * Discrete_J_b;
// Our measurement covariance is the top 15x15 of our large covariance
P_meas = P_big.block(0, 0, 15, 15);
// If we are using the state transition Jacobian, then we should overwrite the analytical versions
// Note: we flip the sign for J_q to match the Model 1 derivation
if (state_transition_jacobians) {
J_q = -Discrete_J_b.block(0, 3, 3, 3);
J_a = Discrete_J_b.block(12, 3, 3, 3);
J_b = Discrete_J_b.block(6, 3, 3, 3);
H_a = Discrete_J_b.block(12, 9, 3, 3);
H_b = Discrete_J_b.block(6, 9, 3, 3);
O_a = Discrete_J_b.block(12, 18, 3, 3);
O_b = Discrete_J_b.block(6, 18, 3, 3);
}
// Update rotation mean
// Note we had to wait to do this, since we use the old orientation in our covariance calculation
R_k2tau = R_k2tau1;
q_k2tau = rot_2_quat(R_k2tau);
}
};
} // namespace ov_core
#endif /* CPI_V2_H */

73
ov_core/src/dummy.cpp Normal file
View File

@@ -0,0 +1,73 @@
/*
* OpenVINS: An Open Platform for Visual-Inertial Research
* Copyright (C) 2018-2022 Patrick Geneva
* Copyright (C) 2018-2022 Guoquan Huang
* Copyright (C) 2018-2022 OpenVINS Contributors
* Copyright (C) 2018-2019 Kevin Eckenhoff
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
/**
* @namespace ov_core
* @brief Core algorithms for OpenVINS
*
* This has the core algorithms that all projects within the OpenVINS ecosystem leverage.
* The purpose is to allow for the reuse of code that could be shared between different localization systems (i.e. msckf-based, batch-based,
* etc.). These algorithms are the foundation which is necessary before we can even write an estimator that can perform localization. The
* key components of the ov_core codebase are the following:
*
* - 3d feature initialization (see @ref ov_core::FeatureInitializer)
* - SE(3) b-spline (see @ref ov_core::BsplineSE3)
* - KLT, descriptor, aruco, and simulation feature trackers
* - Groundtruth dataset reader (see @ref ov_core::DatasetReader)
* - Quaternion and other manifold math operations
* - Generic type system and their implementations (see @ref ov_type namespace)
* - Closed-form preintegration @cite Eckenhoff2019IJRR
*
* Please take a look at classes that we offer for the user to leverage as each has its own documentation.
* If you are looking for the estimator please take a look at the ov_msckf project which leverages these algorithms.
* If you are looking for the different types please take a look at the ov_type namespace for the ones we have.
*
*/
namespace ov_core {}
/**
* @namespace ov_type
* @brief Dynamic type system types
*
* Types leveraged by the EKF system for covariance management.
* These types store where they are in the covariance along with their current estimate.
* Each also has an update function that takes a vector delta and updates their manifold representation.
* Please see each type for details on what they represent, but their names should be straightforward.
* See @ref dev-index for high level details on how the type system and covariance management works.
* Each type is described by the following:
*
* @code{.cpp}
* class Type {
* protected:
* // Current best estimate
* Eigen::MatrixXd _value;
* // Location of error state in covariance
* int _id = -1;
* // Dimension of error state
* int _size = -1;
* // Update eq. taking vector to their rep.
* void update(const Eigen::VectorXd dx);
* };
* @endcode
*
*
*/
namespace ov_type {}

View File

@@ -0,0 +1,111 @@
/*
* OpenVINS: An Open Platform for Visual-Inertial Research
* Copyright (C) 2018-2022 Patrick Geneva
* Copyright (C) 2018-2022 Guoquan Huang
* Copyright (C) 2018-2022 OpenVINS Contributors
* Copyright (C) 2018-2019 Kevin Eckenhoff
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
#include "Feature.h"
using namespace ov_core;
void Feature::clean_old_measurements(const std::vector<double> &valid_times) {
// Loop through each of the cameras we have
for (auto const &pair : timestamps) {
// Assert that we have all the parts of a measurement
assert(timestamps[pair.first].size() == uvs[pair.first].size());
assert(timestamps[pair.first].size() == uvs_norm[pair.first].size());
// Our iterators
auto it1 = timestamps[pair.first].begin();
auto it2 = uvs[pair.first].begin();
auto it3 = uvs_norm[pair.first].begin();
// Loop through measurement times, remove ones that are not in our timestamps
while (it1 != timestamps[pair.first].end()) {
if (std::find(valid_times.begin(), valid_times.end(), *it1) == valid_times.end()) {
it1 = timestamps[pair.first].erase(it1);
it2 = uvs[pair.first].erase(it2);
it3 = uvs_norm[pair.first].erase(it3);
} else {
++it1;
++it2;
++it3;
}
}
}
}
void Feature::clean_invalid_measurements(const std::vector<double> &invalid_times) {
// Loop through each of the cameras we have
for (auto const &pair : timestamps) {
// Assert that we have all the parts of a measurement
assert(timestamps[pair.first].size() == uvs[pair.first].size());
assert(timestamps[pair.first].size() == uvs_norm[pair.first].size());
// Our iterators
auto it1 = timestamps[pair.first].begin();
auto it2 = uvs[pair.first].begin();
auto it3 = uvs_norm[pair.first].begin();
// Loop through measurement times, remove ones that are in our timestamps
while (it1 != timestamps[pair.first].end()) {
if (std::find(invalid_times.begin(), invalid_times.end(), *it1) != invalid_times.end()) {
it1 = timestamps[pair.first].erase(it1);
it2 = uvs[pair.first].erase(it2);
it3 = uvs_norm[pair.first].erase(it3);
} else {
++it1;
++it2;
++it3;
}
}
}
}
void Feature::clean_older_measurements(double timestamp) {
// Loop through each of the cameras we have
for (auto const &pair : timestamps) {
// Assert that we have all the parts of a measurement
assert(timestamps[pair.first].size() == uvs[pair.first].size());
assert(timestamps[pair.first].size() == uvs_norm[pair.first].size());
// Our iterators
auto it1 = timestamps[pair.first].begin();
auto it2 = uvs[pair.first].begin();
auto it3 = uvs_norm[pair.first].begin();
// Loop through measurement times, remove ones that are older then the specified one
while (it1 != timestamps[pair.first].end()) {
if (*it1 <= timestamp) {
it1 = timestamps[pair.first].erase(it1);
it2 = uvs[pair.first].erase(it2);
it3 = uvs_norm[pair.first].erase(it3);
} else {
++it1;
++it2;
++it3;
}
}
}
}

100
ov_core/src/feat/Feature.h Normal file
View File

@@ -0,0 +1,100 @@
/*
* OpenVINS: An Open Platform for Visual-Inertial Research
* Copyright (C) 2018-2022 Patrick Geneva
* Copyright (C) 2018-2022 Guoquan Huang
* Copyright (C) 2018-2022 OpenVINS Contributors
* Copyright (C) 2018-2019 Kevin Eckenhoff
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
#ifndef OV_CORE_FEATURE_H
#define OV_CORE_FEATURE_H
#include <Eigen/Eigen>
#include <iostream>
#include <unordered_map>
#include <vector>
namespace ov_core {
/**
* @brief Sparse feature class used to collect measurements
*
* This feature class allows for holding of all tracking information for a given feature.
* Each feature has a unique ID assigned to it, and should have a set of feature tracks alongside it.
* See the FeatureDatabase class for details on how we load information into this, and how we delete features.
*/
class Feature {
public:
/// Unique ID of this feature
size_t featid;
/// If this feature should be deleted
bool to_delete;
/// UV coordinates that this feature has been seen from (mapped by camera ID)
std::unordered_map<size_t, std::vector<Eigen::VectorXf>> uvs;
/// UV normalized coordinates that this feature has been seen from (mapped by camera ID)
std::unordered_map<size_t, std::vector<Eigen::VectorXf>> uvs_norm;
/// Timestamps of each UV measurement (mapped by camera ID)
std::unordered_map<size_t, std::vector<double>> timestamps;
/// What camera ID our pose is anchored in!! By default the first measurement is the anchor.
int anchor_cam_id = -1;
/// Timestamp of anchor clone
double anchor_clone_timestamp;
/// Triangulated position of this feature, in the anchor frame
Eigen::Vector3d p_FinA;
/// Triangulated position of this feature, in the global frame
Eigen::Vector3d p_FinG;
/**
* @brief Remove measurements that do not occur at passed timestamps.
*
* Given a series of valid timestamps, this will remove all measurements that have not occurred at these times.
* This would normally be used to ensure that the measurements that we have occur at our clone times.
*
* @param valid_times Vector of timestamps that our measurements must occur at
*/
void clean_old_measurements(const std::vector<double> &valid_times);
/**
* @brief Remove measurements that occur at the invalid timestamps
*
* Given a series of invalid timestamps, this will remove all measurements that have occurred at these times.
*
* @param invalid_times Vector of timestamps that our measurements should not
*/
void clean_invalid_measurements(const std::vector<double> &invalid_times);
/**
* @brief Remove measurements that are older then the specified timestamp.
*
* Given a valid timestamp, this will remove all measurements that have occured earlier then this.
*
* @param timestamp Timestamps that our measurements must occur after
*/
void clean_older_measurements(double timestamp);
};
} // namespace ov_core
#endif /* OV_CORE_FEATURE_H */

View File

@@ -0,0 +1,419 @@
/*
* OpenVINS: An Open Platform for Visual-Inertial Research
* Copyright (C) 2018-2022 Patrick Geneva
* Copyright (C) 2018-2022 Guoquan Huang
* Copyright (C) 2018-2022 OpenVINS Contributors
* Copyright (C) 2018-2019 Kevin Eckenhoff
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
#ifndef OV_CORE_FEATURE_DATABASE_H
#define OV_CORE_FEATURE_DATABASE_H
#include <Eigen/Eigen>
#include <memory>
#include <mutex>
#include <vector>
#include "Feature.h"
#include "utils/print.h"
namespace ov_core {
/**
* @brief Database containing features we are currently tracking.
*
* Each visual tracker has this database in it and it contains all features that we are tracking.
* The trackers will insert information into this database when they get new measurements from doing tracking.
* A user would then query this database for features that can be used for update and remove them after they have been processed.
*
*
* @m_class{m-note m-warning}
*
* @par A Note on Multi-Threading Support
* There is some support for asynchronous multi-threaded access.
* Since each feature is a pointer just directly returning and using them is not thread safe.
* Thus, to be thread safe, use the "remove" flag for each function which will remove it from this feature database.
* This prevents the trackers from adding new measurements and editing the feature information.
* For example, if you are asynchronous tracking cameras and you chose to update the state, then remove all features you will use in update.
* The feature trackers will continue to add features while you update, whose measurements can be used in the next update step!
*
*/
class FeatureDatabase {
public:
/**
* @brief Default constructor
*/
FeatureDatabase() {}
/**
* @brief Get a specified feature
* @param id What feature we want to get
* @param remove Set to true if you want to remove the feature from the database (you will need to handle the freeing of memory)
* @return Either a feature object, or null if it is not in the database.
*/
std::shared_ptr<Feature> get_feature(size_t id, bool remove = false) {
std::lock_guard<std::mutex> lck(mtx);
if (features_idlookup.find(id) != features_idlookup.end()) {
std::shared_ptr<Feature> temp = features_idlookup.at(id);
if (remove)
features_idlookup.erase(id);
return temp;
} else {
return nullptr;
}
}
/**
* @brief Update a feature object
* @param id ID of the feature we will update
* @param timestamp time that this measurement occured at
* @param cam_id which camera this measurement was from
* @param u raw u coordinate
* @param v raw v coordinate
* @param u_n undistorted/normalized u coordinate
* @param v_n undistorted/normalized v coordinate
*
* This will update a given feature based on the passed ID it has.
* It will create a new feature, if it is an ID that we have not seen before.
*/
void update_feature(size_t id, double timestamp, size_t cam_id, float u, float v, float u_n, float v_n) {
// Find this feature using the ID lookup
std::lock_guard<std::mutex> lck(mtx);
if (features_idlookup.find(id) != features_idlookup.end()) {
// Get our feature
std::shared_ptr<Feature> feat = features_idlookup.at(id);
// Append this new information to it!
feat->uvs[cam_id].push_back(Eigen::Vector2f(u, v));
feat->uvs_norm[cam_id].push_back(Eigen::Vector2f(u_n, v_n));
feat->timestamps[cam_id].push_back(timestamp);
return;
}
// Debug info
// PRINT_DEBUG("featdb - adding new feature %d",(int)id);
// Else we have not found the feature, so lets make it be a new one!
std::shared_ptr<Feature> feat = std::make_shared<Feature>();
feat->featid = id;
feat->uvs[cam_id].push_back(Eigen::Vector2f(u, v));
feat->uvs_norm[cam_id].push_back(Eigen::Vector2f(u_n, v_n));
feat->timestamps[cam_id].push_back(timestamp);
// Append this new feature into our database
features_idlookup[id] = feat;
}
/**
* @brief Get features that do not have newer measurement then the specified time.
*
* This function will return all features that do not a measurement at a time greater than the specified time.
* For example this could be used to get features that have not been successfully tracked into the newest frame.
* All features returned will not have any measurements occurring at a time greater then the specified.
*/
std::vector<std::shared_ptr<Feature>> features_not_containing_newer(double timestamp, bool remove = false, bool skip_deleted = false) {
// Our vector of features that do not have measurements after the specified time
std::vector<std::shared_ptr<Feature>> feats_old;
// Now lets loop through all features, and just make sure they are not old
std::lock_guard<std::mutex> lck(mtx);
for (auto it = features_idlookup.begin(); it != features_idlookup.end();) {
// Skip if already deleted
if (skip_deleted && (*it).second->to_delete) {
it++;
continue;
}
// Loop through each camera
// If we have a measurement greater-than or equal to the specified, this measurement is find
bool has_newer_measurement = false;
for (auto const &pair : (*it).second->timestamps) {
has_newer_measurement = (!pair.second.empty() && pair.second.at(pair.second.size() - 1) >= timestamp);
if (has_newer_measurement) {
break;
}
}
// If it is not being actively tracked, then it is old
if (!has_newer_measurement) {
feats_old.push_back((*it).second);
if (remove)
features_idlookup.erase(it++);
else
it++;
} else {
it++;
}
}
// Debugging
// PRINT_DEBUG("feature db size = %u\n", features_idlookup.size())
// Return the old features
return feats_old;
}
/**
* @brief Get features that has measurements older then the specified time.
*
* This will collect all features that have measurements occurring before the specified timestamp.
* For example, we would want to remove all features older then the last clone/state in our sliding window.
*/
std::vector<std::shared_ptr<Feature>> features_containing_older(double timestamp, bool remove = false, bool skip_deleted = false) {
// Our vector of old features
std::vector<std::shared_ptr<Feature>> feats_old;
// Now lets loop through all features, and just make sure they are not old
std::lock_guard<std::mutex> lck(mtx);
for (auto it = features_idlookup.begin(); it != features_idlookup.end();) {
// Skip if already deleted
if (skip_deleted && (*it).second->to_delete) {
it++;
continue;
}
// Loop through each camera
// Check if we have at least one time older then the requested
bool found_containing_older = false;
for (auto const &pair : (*it).second->timestamps) {
found_containing_older = (!pair.second.empty() && pair.second.at(0) < timestamp);
if (found_containing_older) {
break;
}
}
// If it has an older timestamp, then add it
if (found_containing_older) {
feats_old.push_back((*it).second);
if (remove)
features_idlookup.erase(it++);
else
it++;
} else {
it++;
}
}
// Debugging
// PRINT_DEBUG("feature db size = %u\n", features_idlookup.size())
// Return the old features
return feats_old;
}
/**
* @brief Get features that has measurements at the specified time.
*
* This function will return all features that have the specified time in them.
* This would be used to get all features that occurred at a specific clone/state.
*/
std::vector<std::shared_ptr<Feature>> features_containing(double timestamp, bool remove = false, bool skip_deleted = false) {
// Our vector of old features
std::vector<std::shared_ptr<Feature>> feats_has_timestamp;
// Now lets loop through all features, and just make sure they are not
std::lock_guard<std::mutex> lck(mtx);
for (auto it = features_idlookup.begin(); it != features_idlookup.end();) {
// Skip if already deleted
if (skip_deleted && (*it).second->to_delete) {
it++;
continue;
}
// Boolean if it has the timestamp
// Break out if we found a single timestamp that is equal to the specified time
bool has_timestamp = false;
for (auto const &pair : (*it).second->timestamps) {
has_timestamp = (std::find(pair.second.begin(), pair.second.end(), timestamp) != pair.second.end());
if (has_timestamp) {
break;
}
}
// Remove this feature if it contains the specified timestamp
if (has_timestamp) {
feats_has_timestamp.push_back((*it).second);
if (remove)
features_idlookup.erase(it++);
else
it++;
} else {
it++;
}
}
// Debugging
// PRINT_DEBUG("feature db size = %u\n", features_idlookup.size())
// PRINT_DEBUG("return vector = %u\n", feats_has_timestamp.size())
// Return the features
return feats_has_timestamp;
}
/**
* @brief This function will delete all features that have been used up.
*
* If a feature was unable to be used, it will still remain since it will not have a delete flag set
*/
void cleanup() {
// Loop through all features
// int sizebefore = (int)features_idlookup.size();
std::lock_guard<std::mutex> lck(mtx);
for (auto it = features_idlookup.begin(); it != features_idlookup.end();) {
// If delete flag is set, then delete it
if ((*it).second->to_delete) {
features_idlookup.erase(it++);
} else {
it++;
}
}
// PRINT_DEBUG("feat db = %d -> %d\n", sizebefore, (int)features_idlookup.size() << std::endl;
}
/**
* @brief This function will delete all feature measurements that are older then the specified timestamp
*/
void cleanup_measurements(double timestamp) {
std::lock_guard<std::mutex> lck(mtx);
for (auto it = features_idlookup.begin(); it != features_idlookup.end();) {
// Remove the older measurements
(*it).second->clean_older_measurements(timestamp);
// Count how many measurements
int ct_meas = 0;
for (const auto &pair : (*it).second->timestamps) {
ct_meas += (int)(pair.second.size());
}
// If delete flag is set, then delete it
if (ct_meas < 1) {
features_idlookup.erase(it++);
} else {
it++;
}
}
}
/**
* @brief This function will delete all feature measurements that are at the specified timestamp
*/
void cleanup_measurements_exact(double timestamp) {
std::lock_guard<std::mutex> lck(mtx);
std::vector<double> timestamps = {timestamp};
for (auto it = features_idlookup.begin(); it != features_idlookup.end();) {
// Remove the older measurements
(*it).second->clean_invalid_measurements(timestamps);
// Count how many measurements
int ct_meas = 0;
for (const auto &pair : (*it).second->timestamps) {
ct_meas += (int)(pair.second.size());
}
// If delete flag is set, then delete it
if (ct_meas < 1) {
features_idlookup.erase(it++);
} else {
it++;
}
}
}
/**
* @brief Returns the size of the feature database
*/
size_t size() {
std::lock_guard<std::mutex> lck(mtx);
return features_idlookup.size();
}
/**
* @brief Returns the internal data (should not normally be used)
*/
std::unordered_map<size_t, std::shared_ptr<Feature>> get_internal_data() {
std::lock_guard<std::mutex> lck(mtx);
return features_idlookup;
}
/**
* @brief Gets the oldest time in the database
*/
double get_oldest_timestamp() {
std::lock_guard<std::mutex> lck(mtx);
double oldest_time = -1;
for (auto const &feat : features_idlookup) {
for (auto const &camtimepair : feat.second->timestamps) {
if (!camtimepair.second.empty() && (oldest_time == -1 || oldest_time < camtimepair.second.at(0))) {
oldest_time = camtimepair.second.at(0);
}
}
}
return oldest_time;
}
/**
* @brief Will update the passed database with this database's latest feature information.
*/
void append_new_measurements(const std::shared_ptr<FeatureDatabase> &database) {
std::lock_guard<std::mutex> lck(mtx);
// Loop through the other database's internal database
// int sizebefore = (int)features_idlookup.size();
for (const auto &feat : database->get_internal_data()) {
if (features_idlookup.find(feat.first) != features_idlookup.end()) {
// For this feature, now try to append the new measurement data
std::shared_ptr<Feature> temp = features_idlookup.at(feat.first);
for (const auto &times : feat.second->timestamps) {
// Append the whole camera vector is not seen
// Otherwise need to loop through each and append
size_t cam_id = times.first;
if (temp->timestamps.find(cam_id) == temp->timestamps.end()) {
temp->timestamps[cam_id] = feat.second->timestamps.at(cam_id);
temp->uvs[cam_id] = feat.second->uvs.at(cam_id);
temp->uvs_norm[cam_id] = feat.second->uvs_norm.at(cam_id);
} else {
auto temp_times = temp->timestamps.at(cam_id);
for (size_t i = 0; i < feat.second->timestamps.at(cam_id).size(); i++) {
double time_to_find = feat.second->timestamps.at(cam_id).at(i);
if (std::find(temp_times.begin(), temp_times.end(), time_to_find) == temp_times.end()) {
temp->timestamps.at(cam_id).push_back(feat.second->timestamps.at(cam_id).at(i));
temp->uvs.at(cam_id).push_back(feat.second->uvs.at(cam_id).at(i));
temp->uvs_norm.at(cam_id).push_back(feat.second->uvs_norm.at(cam_id).at(i));
}
}
}
}
} else {
// Else we have not found the feature, so lets make it be a new one!
std::shared_ptr<Feature> temp = std::make_shared<Feature>();
temp->featid = feat.second->featid;
temp->timestamps = feat.second->timestamps;
temp->uvs = feat.second->uvs;
temp->uvs_norm = feat.second->uvs_norm;
features_idlookup[feat.first] = temp;
}
}
// PRINT_DEBUG("feat db = %d -> %d\n", sizebefore, (int)features_idlookup.size() << std::endl;
}
protected:
/// Mutex lock for our map
std::mutex mtx;
/// Our lookup array that allow use to query based on ID
std::unordered_map<size_t, std::shared_ptr<Feature>> features_idlookup;
};
} // namespace ov_core
#endif /* OV_CORE_FEATURE_DATABASE_H */

View File

@@ -0,0 +1,168 @@
/*
* OpenVINS: An Open Platform for Visual-Inertial Research
* Copyright (C) 2018-2022 Patrick Geneva
* Copyright (C) 2018-2022 Guoquan Huang
* Copyright (C) 2018-2022 OpenVINS Contributors
* Copyright (C) 2018-2019 Kevin Eckenhoff
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
#ifndef OV_CORE_FEATURE_HELPER_H
#define OV_CORE_FEATURE_HELPER_H
#include <Eigen/Eigen>
#include <memory>
#include <mutex>
#include <vector>
#include "Feature.h"
#include "FeatureDatabase.h"
#include "utils/print.h"
namespace ov_core {
/**
* @brief Contains some nice helper functions for features.
*
* These functions should only depend on feature and the feature database.
*/
class FeatureHelper {
public:
/**
* @brief This functions will compute the disparity between common features in the two frames.
*
* First we find all features in the first frame.
* Then we loop through each and find the uv of it in the next requested frame.
* Features are skipped if no tracked feature is found (it was lost).
* NOTE: this is on the RAW coordinates of the feature not the normalized ones.
* NOTE: This computes the disparity over all cameras!
*
* @param db Feature database pointer
* @param time0 First camera frame timestamp
* @param time1 Second camera frame timestamp
* @param disp_mean Average raw disparity
* @param disp_var Variance of the disparities
* @param total_feats Total number of common features
*/
static void compute_disparity(std::shared_ptr<ov_core::FeatureDatabase> db, double time0, double time1, double &disp_mean,
double &disp_var, int &total_feats) {
// Get features seen from the first image
std::vector<std::shared_ptr<Feature>> feats0 = db->features_containing(time0, false, true);
// Compute the disparity
std::vector<double> disparities;
for (auto &feat : feats0) {
// Get the two uvs for both times
for (auto &campairs : feat->timestamps) {
// First find the two timestamps
size_t camid = campairs.first;
auto it0 = std::find(feat->timestamps.at(camid).begin(), feat->timestamps.at(camid).end(), time0);
auto it1 = std::find(feat->timestamps.at(camid).begin(), feat->timestamps.at(camid).end(), time1);
if (it0 == feat->timestamps.at(camid).end() || it1 == feat->timestamps.at(camid).end())
continue;
auto idx0 = std::distance(feat->timestamps.at(camid).begin(), it0);
auto idx1 = std::distance(feat->timestamps.at(camid).begin(), it1);
// Now lets calculate the disparity
Eigen::Vector2f uv0 = feat->uvs.at(camid).at(idx0).block(0, 0, 2, 1);
Eigen::Vector2f uv1 = feat->uvs.at(camid).at(idx1).block(0, 0, 2, 1);
disparities.push_back((uv1 - uv0).norm());
}
}
// If no disparities, just return
if (disparities.size() < 2) {
disp_mean = -1;
disp_var = -1;
total_feats = 0;
}
// Compute mean and standard deviation in respect to it
disp_mean = 0;
for (double disp_i : disparities) {
disp_mean += disp_i;
}
disp_mean /= (double)disparities.size();
disp_var = 0;
for (double &disp_i : disparities) {
disp_var += std::pow(disp_i - disp_mean, 2);
}
disp_var = std::sqrt(disp_var / (double)(disparities.size() - 1));
total_feats = (int)disparities.size();
}
/**
* @brief This functions will compute the disparity over all features we have
*
* NOTE: this is on the RAW coordinates of the feature not the normalized ones.
* NOTE: This computes the disparity over all cameras!
*
* @param db Feature database pointer
* @param disp_mean Average raw disparity
* @param disp_var Variance of the disparities
* @param total_feats Total number of common features
*/
static void compute_disparity(std::shared_ptr<ov_core::FeatureDatabase> db, double &disp_mean, double &disp_var, int &total_feats) {
// Compute the disparity
std::vector<double> disparities;
for (auto &feat : db->get_internal_data()) {
for (auto &campairs : feat.second->timestamps) {
// Skip if only one observation
if (campairs.second.size() < 2)
continue;
// Now lets calculate the disparity
size_t camid = campairs.first;
Eigen::Vector2f uv0 = feat.second->uvs.at(camid).at(0).block(0, 0, 2, 1);
Eigen::Vector2f uv1 = feat.second->uvs.at(camid).at(campairs.second.size() - 1).block(0, 0, 2, 1);
disparities.push_back((uv1 - uv0).norm());
}
}
// If no disparities, just return
if (disparities.size() < 2) {
disp_mean = -1;
disp_var = -1;
total_feats = 0;
}
// Compute mean and standard deviation in respect to it
disp_mean = 0;
for (double disp_i : disparities) {
disp_mean += disp_i;
}
disp_mean /= (double)disparities.size();
disp_var = 0;
for (double &disp_i : disparities) {
disp_var += std::pow(disp_i - disp_mean, 2);
}
disp_var = std::sqrt(disp_var / (double)(disparities.size() - 1));
total_feats = (int)disparities.size();
}
private:
// Cannot construct this class
FeatureHelper() {}
};
} // namespace ov_core
#endif /* OV_CORE_FEATURE_HELPER_H */

View File

@@ -0,0 +1,417 @@
/*
* OpenVINS: An Open Platform for Visual-Inertial Research
* Copyright (C) 2018-2022 Patrick Geneva
* Copyright (C) 2018-2022 Guoquan Huang
* Copyright (C) 2018-2022 OpenVINS Contributors
* Copyright (C) 2018-2019 Kevin Eckenhoff
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
#include "FeatureInitializer.h"
using namespace ov_core;
bool FeatureInitializer::single_triangulation(Feature *feat, std::unordered_map<size_t, std::unordered_map<double, ClonePose>> &clonesCAM) {
// Total number of measurements
// Also set the first measurement to be the anchor frame
int total_meas = 0;
size_t anchor_most_meas = 0;
size_t most_meas = 0;
for (auto const &pair : feat->timestamps) {
total_meas += (int)pair.second.size();
if (pair.second.size() > most_meas) {
anchor_most_meas = pair.first;
most_meas = pair.second.size();
}
}
feat->anchor_cam_id = anchor_most_meas;
feat->anchor_clone_timestamp = feat->timestamps.at(feat->anchor_cam_id).back();
// Our linear system matrices
Eigen::Matrix3d A = Eigen::Matrix3d::Zero();
Eigen::Vector3d b = Eigen::Vector3d::Zero();
// Get the position of the anchor pose
ClonePose anchorclone = clonesCAM.at(feat->anchor_cam_id).at(feat->anchor_clone_timestamp);
const Eigen::Matrix<double, 3, 3> &R_GtoA = anchorclone.Rot();
const Eigen::Matrix<double, 3, 1> &p_AinG = anchorclone.pos();
// Loop through each camera for this feature
for (auto const &pair : feat->timestamps) {
// Add CAM_I features
for (size_t m = 0; m < feat->timestamps.at(pair.first).size(); m++) {
// Get the position of this clone in the global
const Eigen::Matrix<double, 3, 3> &R_GtoCi = clonesCAM.at(pair.first).at(feat->timestamps.at(pair.first).at(m)).Rot();
const Eigen::Matrix<double, 3, 1> &p_CiinG = clonesCAM.at(pair.first).at(feat->timestamps.at(pair.first).at(m)).pos();
// Convert current position relative to anchor
Eigen::Matrix<double, 3, 3> R_AtoCi;
R_AtoCi.noalias() = R_GtoCi * R_GtoA.transpose();
Eigen::Matrix<double, 3, 1> p_CiinA;
p_CiinA.noalias() = R_GtoA * (p_CiinG - p_AinG);
// Get the UV coordinate normal
Eigen::Matrix<double, 3, 1> b_i;
b_i << feat->uvs_norm.at(pair.first).at(m)(0), feat->uvs_norm.at(pair.first).at(m)(1), 1;
b_i = R_AtoCi.transpose() * b_i;
b_i = b_i / b_i.norm();
Eigen::Matrix3d Bperp = skew_x(b_i);
// Append to our linear system
Eigen::Matrix3d Ai = Bperp.transpose() * Bperp;
A += Ai;
b += Ai * p_CiinA;
}
}
// Solve the linear system
Eigen::MatrixXd p_f = A.colPivHouseholderQr().solve(b);
// Check A and p_f
Eigen::JacobiSVD<Eigen::MatrixXd> svd(A, Eigen::ComputeThinU | Eigen::ComputeThinV);
Eigen::MatrixXd singularValues;
singularValues.resize(svd.singularValues().rows(), 1);
singularValues = svd.singularValues();
double condA = singularValues(0, 0) / singularValues(singularValues.rows() - 1, 0);
// std::stringstream ss;
// ss << feat->featid << " - cond " << std::abs(condA) << " - z " << p_f(2, 0) << std::endl;
// PRINT_DEBUG(ss.str().c_str());
// If we have a bad condition number, or it is too close
// Then set the flag for bad (i.e. set z-axis to nan)
if (std::abs(condA) > _options.max_cond_number || p_f(2, 0) < _options.min_dist || p_f(2, 0) > _options.max_dist ||
std::isnan(p_f.norm())) {
return false;
}
// Store it in our feature object
feat->p_FinA = p_f;
feat->p_FinG = R_GtoA.transpose() * feat->p_FinA + p_AinG;
return true;
}
bool FeatureInitializer::single_triangulation_1d(Feature *feat,
std::unordered_map<size_t, std::unordered_map<double, ClonePose>> &clonesCAM) {
// Total number of measurements
// Also set the first measurement to be the anchor frame
int total_meas = 0;
size_t anchor_most_meas = 0;
size_t most_meas = 0;
for (auto const &pair : feat->timestamps) {
total_meas += (int)pair.second.size();
if (pair.second.size() > most_meas) {
anchor_most_meas = pair.first;
most_meas = pair.second.size();
}
}
feat->anchor_cam_id = anchor_most_meas;
feat->anchor_clone_timestamp = feat->timestamps.at(feat->anchor_cam_id).back();
size_t idx_anchor_bearing = feat->timestamps.at(feat->anchor_cam_id).size() - 1;
// Our linear system matrices
double A = 0.0;
double b = 0.0;
// Get the position of the anchor pose
ClonePose anchorclone = clonesCAM.at(feat->anchor_cam_id).at(feat->anchor_clone_timestamp);
const Eigen::Matrix<double, 3, 3> &R_GtoA = anchorclone.Rot();
const Eigen::Matrix<double, 3, 1> &p_AinG = anchorclone.pos();
// Get bearing in anchor frame
Eigen::Matrix<double, 3, 1> bearing_inA;
bearing_inA << feat->uvs_norm.at(feat->anchor_cam_id).at(idx_anchor_bearing)(0),
feat->uvs_norm.at(feat->anchor_cam_id).at(idx_anchor_bearing)(1), 1;
bearing_inA = bearing_inA / bearing_inA.norm();
// Loop through each camera for this feature
for (auto const &pair : feat->timestamps) {
// Add CAM_I features
for (size_t m = 0; m < feat->timestamps.at(pair.first).size(); m++) {
// Skip the anchor bearing
if ((int)pair.first == feat->anchor_cam_id && m == idx_anchor_bearing)
continue;
// Get the position of this clone in the global
const Eigen::Matrix<double, 3, 3> &R_GtoCi = clonesCAM.at(pair.first).at(feat->timestamps.at(pair.first).at(m)).Rot();
const Eigen::Matrix<double, 3, 1> &p_CiinG = clonesCAM.at(pair.first).at(feat->timestamps.at(pair.first).at(m)).pos();
// Convert current position relative to anchor
Eigen::Matrix<double, 3, 3> R_AtoCi;
R_AtoCi.noalias() = R_GtoCi * R_GtoA.transpose();
Eigen::Matrix<double, 3, 1> p_CiinA;
p_CiinA.noalias() = R_GtoA * (p_CiinG - p_AinG);
// Get the UV coordinate normal
Eigen::Matrix<double, 3, 1> b_i;
b_i << feat->uvs_norm.at(pair.first).at(m)(0), feat->uvs_norm.at(pair.first).at(m)(1), 1;
b_i = R_AtoCi.transpose() * b_i;
b_i = b_i / b_i.norm();
Eigen::Matrix3d Bperp = skew_x(b_i);
// Append to our linear system
Eigen::Vector3d BperpBanchor = Bperp * bearing_inA;
A += BperpBanchor.dot(BperpBanchor);
b += BperpBanchor.dot(Bperp * p_CiinA);
}
}
// Solve the linear system
double depth = b / A;
Eigen::MatrixXd p_f = depth * bearing_inA;
// Then set the flag for bad (i.e. set z-axis to nan)
if (p_f(2, 0) < _options.min_dist || p_f(2, 0) > _options.max_dist || std::isnan(p_f.norm())) {
return false;
}
// Store it in our feature object
feat->p_FinA = p_f;
feat->p_FinG = R_GtoA.transpose() * feat->p_FinA + p_AinG;
return true;
}
bool FeatureInitializer::single_gaussnewton(Feature *feat, std::unordered_map<size_t, std::unordered_map<double, ClonePose>> &clonesCAM) {
// Get into inverse depth
double rho = 1 / feat->p_FinA(2);
double alpha = feat->p_FinA(0) / feat->p_FinA(2);
double beta = feat->p_FinA(1) / feat->p_FinA(2);
// Optimization parameters
double lam = _options.init_lamda;
double eps = 10000;
int runs = 0;
// Variables used in the optimization
bool recompute = true;
Eigen::Matrix<double, 3, 3> Hess = Eigen::Matrix<double, 3, 3>::Zero();
Eigen::Matrix<double, 3, 1> grad = Eigen::Matrix<double, 3, 1>::Zero();
// Cost at the last iteration
double cost_old = compute_error(clonesCAM, feat, alpha, beta, rho);
// Get the position of the anchor pose
const Eigen::Matrix<double, 3, 3> &R_GtoA = clonesCAM.at(feat->anchor_cam_id).at(feat->anchor_clone_timestamp).Rot();
const Eigen::Matrix<double, 3, 1> &p_AinG = clonesCAM.at(feat->anchor_cam_id).at(feat->anchor_clone_timestamp).pos();
// Loop till we have either
// 1. Reached our max iteration count
// 2. System is unstable
// 3. System has converged
while (runs < _options.max_runs && lam < _options.max_lamda && eps > _options.min_dx) {
// Triggers a recomputation of jacobians/information/gradients
if (recompute) {
Hess.setZero();
grad.setZero();
double err = 0;
// Loop through each camera for this feature
for (auto const &pair : feat->timestamps) {
// Add CAM_I features
for (size_t m = 0; m < feat->timestamps.at(pair.first).size(); m++) {
//=====================================================================================
//=====================================================================================
// Get the position of this clone in the global
const Eigen::Matrix<double, 3, 3> &R_GtoCi = clonesCAM.at(pair.first).at(feat->timestamps[pair.first].at(m)).Rot();
const Eigen::Matrix<double, 3, 1> &p_CiinG = clonesCAM.at(pair.first).at(feat->timestamps[pair.first].at(m)).pos();
// Convert current position relative to anchor
Eigen::Matrix<double, 3, 3> R_AtoCi;
R_AtoCi.noalias() = R_GtoCi * R_GtoA.transpose();
Eigen::Matrix<double, 3, 1> p_CiinA;
p_CiinA.noalias() = R_GtoA * (p_CiinG - p_AinG);
Eigen::Matrix<double, 3, 1> p_AinCi;
p_AinCi.noalias() = -R_AtoCi * p_CiinA;
//=====================================================================================
//=====================================================================================
// Middle variables of the system
double hi1 = R_AtoCi(0, 0) * alpha + R_AtoCi(0, 1) * beta + R_AtoCi(0, 2) + rho * p_AinCi(0, 0);
double hi2 = R_AtoCi(1, 0) * alpha + R_AtoCi(1, 1) * beta + R_AtoCi(1, 2) + rho * p_AinCi(1, 0);
double hi3 = R_AtoCi(2, 0) * alpha + R_AtoCi(2, 1) * beta + R_AtoCi(2, 2) + rho * p_AinCi(2, 0);
// Calculate jacobian
double d_z1_d_alpha = (R_AtoCi(0, 0) * hi3 - hi1 * R_AtoCi(2, 0)) / (pow(hi3, 2));
double d_z1_d_beta = (R_AtoCi(0, 1) * hi3 - hi1 * R_AtoCi(2, 1)) / (pow(hi3, 2));
double d_z1_d_rho = (p_AinCi(0, 0) * hi3 - hi1 * p_AinCi(2, 0)) / (pow(hi3, 2));
double d_z2_d_alpha = (R_AtoCi(1, 0) * hi3 - hi2 * R_AtoCi(2, 0)) / (pow(hi3, 2));
double d_z2_d_beta = (R_AtoCi(1, 1) * hi3 - hi2 * R_AtoCi(2, 1)) / (pow(hi3, 2));
double d_z2_d_rho = (p_AinCi(1, 0) * hi3 - hi2 * p_AinCi(2, 0)) / (pow(hi3, 2));
Eigen::Matrix<double, 2, 3> H;
H << d_z1_d_alpha, d_z1_d_beta, d_z1_d_rho, d_z2_d_alpha, d_z2_d_beta, d_z2_d_rho;
// Calculate residual
Eigen::Matrix<float, 2, 1> z;
z << hi1 / hi3, hi2 / hi3;
Eigen::Matrix<float, 2, 1> res = feat->uvs_norm.at(pair.first).at(m) - z;
//=====================================================================================
//=====================================================================================
// Append to our summation variables
err += std::pow(res.norm(), 2);
grad.noalias() += H.transpose() * res.cast<double>();
Hess.noalias() += H.transpose() * H;
}
}
}
// Solve Levenberg iteration
Eigen::Matrix<double, 3, 3> Hess_l = Hess;
for (size_t r = 0; r < (size_t)Hess.rows(); r++) {
Hess_l(r, r) *= (1.0 + lam);
}
Eigen::Matrix<double, 3, 1> dx = Hess_l.colPivHouseholderQr().solve(grad);
// Eigen::Matrix<double,3,1> dx = (Hess+lam*Eigen::MatrixXd::Identity(Hess.rows(), Hess.rows())).colPivHouseholderQr().solve(grad);
// Check if error has gone down
double cost = compute_error(clonesCAM, feat, alpha + dx(0, 0), beta + dx(1, 0), rho + dx(2, 0));
// Debug print
// std::stringstream ss;
// ss << "run = " << runs << " | cost = " << dx.norm() << " | lamda = " << lam << " | depth = " << 1/rho << endl;
// PRINT_DEBUG(ss.str().c_str());
// Check if converged
if (cost <= cost_old && (cost_old - cost) / cost_old < _options.min_dcost) {
alpha += dx(0, 0);
beta += dx(1, 0);
rho += dx(2, 0);
eps = 0;
break;
}
// If cost is lowered, accept step
// Else inflate lambda (try to make more stable)
if (cost <= cost_old) {
recompute = true;
cost_old = cost;
alpha += dx(0, 0);
beta += dx(1, 0);
rho += dx(2, 0);
runs++;
lam = lam / _options.lam_mult;
eps = dx.norm();
} else {
recompute = false;
lam = lam * _options.lam_mult;
continue;
}
}
// Revert to standard, and set to all
feat->p_FinA(0) = alpha / rho;
feat->p_FinA(1) = beta / rho;
feat->p_FinA(2) = 1 / rho;
// Get tangent plane to x_hat
Eigen::HouseholderQR<Eigen::MatrixXd> qr(feat->p_FinA);
Eigen::MatrixXd Q = qr.householderQ();
// Max baseline we have between poses
double base_line_max = 0.0;
// Check maximum baseline
// Loop through each camera for this feature
for (auto const &pair : feat->timestamps) {
// Loop through the other clones to see what the max baseline is
for (size_t m = 0; m < feat->timestamps.at(pair.first).size(); m++) {
// Get the position of this clone in the global
const Eigen::Matrix<double, 3, 1> &p_CiinG = clonesCAM.at(pair.first).at(feat->timestamps.at(pair.first).at(m)).pos();
// Convert current position relative to anchor
Eigen::Matrix<double, 3, 1> p_CiinA = R_GtoA * (p_CiinG - p_AinG);
// Dot product camera pose and nullspace
double base_line = ((Q.block(0, 1, 3, 2)).transpose() * p_CiinA).norm();
if (base_line > base_line_max)
base_line_max = base_line;
}
}
// std::stringstream ss;
// ss << feat->featid << " - max base " << (feat->p_FinA.norm() / base_line_max) << " - z " << feat->p_FinA(2) << std::endl;
// PRINT_DEBUG(ss.str().c_str());
// Check if this feature is bad or not
// 1. If the feature is too close
// 2. If the feature is invalid
// 3. If the baseline ratio is large
if (feat->p_FinA(2) < _options.min_dist || feat->p_FinA(2) > _options.max_dist ||
(feat->p_FinA.norm() / base_line_max) > _options.max_baseline || std::isnan(feat->p_FinA.norm())) {
return false;
}
// Finally get position in global frame
feat->p_FinG = R_GtoA.transpose() * feat->p_FinA + p_AinG;
return true;
}
double FeatureInitializer::compute_error(std::unordered_map<size_t, std::unordered_map<double, ClonePose>> &clonesCAM, Feature *feat,
double alpha, double beta, double rho) {
// Total error
double err = 0;
// Get the position of the anchor pose
const Eigen::Matrix<double, 3, 3> &R_GtoA = clonesCAM.at(feat->anchor_cam_id).at(feat->anchor_clone_timestamp).Rot();
const Eigen::Matrix<double, 3, 1> &p_AinG = clonesCAM.at(feat->anchor_cam_id).at(feat->anchor_clone_timestamp).pos();
// Loop through each camera for this feature
for (auto const &pair : feat->timestamps) {
// Add CAM_I features
for (size_t m = 0; m < feat->timestamps.at(pair.first).size(); m++) {
//=====================================================================================
//=====================================================================================
// Get the position of this clone in the global
const Eigen::Matrix<double, 3, 3> &R_GtoCi = clonesCAM.at(pair.first).at(feat->timestamps.at(pair.first).at(m)).Rot();
const Eigen::Matrix<double, 3, 1> &p_CiinG = clonesCAM.at(pair.first).at(feat->timestamps.at(pair.first).at(m)).pos();
// Convert current position relative to anchor
Eigen::Matrix<double, 3, 3> R_AtoCi;
R_AtoCi.noalias() = R_GtoCi * R_GtoA.transpose();
Eigen::Matrix<double, 3, 1> p_CiinA;
p_CiinA.noalias() = R_GtoA * (p_CiinG - p_AinG);
Eigen::Matrix<double, 3, 1> p_AinCi;
p_AinCi.noalias() = -R_AtoCi * p_CiinA;
//=====================================================================================
//=====================================================================================
// Middle variables of the system
double hi1 = R_AtoCi(0, 0) * alpha + R_AtoCi(0, 1) * beta + R_AtoCi(0, 2) + rho * p_AinCi(0, 0);
double hi2 = R_AtoCi(1, 0) * alpha + R_AtoCi(1, 1) * beta + R_AtoCi(1, 2) + rho * p_AinCi(1, 0);
double hi3 = R_AtoCi(2, 0) * alpha + R_AtoCi(2, 1) * beta + R_AtoCi(2, 2) + rho * p_AinCi(2, 0);
// Calculate residual
Eigen::Matrix<float, 2, 1> z;
z << hi1 / hi3, hi2 / hi3;
Eigen::Matrix<float, 2, 1> res = feat->uvs_norm.at(pair.first).at(m) - z;
// Append to our summation variables
err += pow(res.norm(), 2);
}
}
return err;
}

View File

@@ -0,0 +1,149 @@
/*
* OpenVINS: An Open Platform for Visual-Inertial Research
* Copyright (C) 2018-2022 Patrick Geneva
* Copyright (C) 2018-2022 Guoquan Huang
* Copyright (C) 2018-2022 OpenVINS Contributors
* Copyright (C) 2018-2019 Kevin Eckenhoff
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
#ifndef OPEN_VINS_FEATUREINITIALIZER_H
#define OPEN_VINS_FEATUREINITIALIZER_H
#include <unordered_map>
#include "Feature.h"
#include "FeatureInitializerOptions.h"
#include "utils/print.h"
#include "utils/quat_ops.h"
namespace ov_core {
/**
* @brief Class that triangulates feature
*
* This class has the functions needed to triangulate and then refine a given 3D feature.
* As in the standard MSCKF, we know the clones of the camera from propagation and past updates.
* Thus, we just need to triangulate a feature in 3D with the known poses and then refine it.
* One should first call the single_triangulation() function afterwhich single_gaussnewton() allows for refinement.
* Please see the @ref update-featinit page for detailed derivations.
*/
class FeatureInitializer {
public:
/**
* @brief Structure which stores pose estimates for use in triangulation
*
* - R_GtoC - rotation from global to camera
* - p_CinG - position of camera in global frame
*/
struct ClonePose {
/// Rotation
Eigen::Matrix<double, 3, 3> _Rot;
/// Position
Eigen::Matrix<double, 3, 1> _pos;
/// Constructs pose from rotation and position
ClonePose(const Eigen::Matrix<double, 3, 3> &R, const Eigen::Matrix<double, 3, 1> &p) {
_Rot = R;
_pos = p;
}
/// Constructs pose from quaternion and position
ClonePose(const Eigen::Matrix<double, 4, 1> &q, const Eigen::Matrix<double, 3, 1> &p) {
_Rot = quat_2_Rot(q);
_pos = p;
}
/// Default constructor
ClonePose() {
_Rot = Eigen::Matrix<double, 3, 3>::Identity();
_pos = Eigen::Matrix<double, 3, 1>::Zero();
}
/// Accessor for rotation
const Eigen::Matrix<double, 3, 3> &Rot() { return _Rot; }
/// Accessor for position
const Eigen::Matrix<double, 3, 1> &pos() { return _pos; }
};
/**
* @brief Default constructor
* @param options Options for the initializer
*/
FeatureInitializer(FeatureInitializerOptions &options) : _options(options) {}
/**
* @brief Uses a linear triangulation to get initial estimate for the feature
*
* The derivations for this method can be found in the @ref featinit-linear documentation page.
*
* @param feat Pointer to feature
* @param clonesCAM Map between camera ID to map of timestamp to camera pose estimate (rotation from global to camera, position of camera
* in global frame)
* @return Returns false if it fails to triangulate (based on the thresholds)
*/
bool single_triangulation(Feature *feat, std::unordered_map<size_t, std::unordered_map<double, ClonePose>> &clonesCAM);
/**
* @brief Uses a linear triangulation to get initial estimate for the feature, treating the anchor observation as a true bearing.
*
* The derivations for this method can be found in the @ref featinit-linear-1d documentation page.
* This function should be used if you want speed, or know your anchor bearing is reasonably accurate.
*
* @param feat Pointer to feature
* @param clonesCAM Map between camera ID to map of timestamp to camera pose estimate (rotation from global to camera, position of camera
* in global frame)
* @return Returns false if it fails to triangulate (based on the thresholds)
*/
bool single_triangulation_1d(Feature *feat, std::unordered_map<size_t, std::unordered_map<double, ClonePose>> &clonesCAM);
/**
* @brief Uses a nonlinear triangulation to refine initial linear estimate of the feature
* @param feat Pointer to feature
* @param clonesCAM Map between camera ID to map of timestamp to camera pose estimate (rotation from global to camera, position of camera
* in global frame)
* @return Returns false if it fails to be optimize (based on the thresholds)
*/
bool single_gaussnewton(Feature *feat, std::unordered_map<size_t, std::unordered_map<double, ClonePose>> &clonesCAM);
/**
* @brief Gets the current configuration of the feature initializer
* @return Const feature initializer config
*/
const FeatureInitializerOptions config() { return _options; }
protected:
/// Contains options for the initializer process
FeatureInitializerOptions _options;
/**
* @brief Helper function for the gauss newton method that computes error of the given estimate
* @param clonesCAM Map between camera ID to map of timestamp to camera pose estimate
* @param feat Pointer to the feature
* @param alpha x/z in anchor
* @param beta y/z in anchor
* @param rho 1/z inverse depth
*/
double compute_error(std::unordered_map<size_t, std::unordered_map<double, ClonePose>> &clonesCAM, Feature *feat, double alpha,
double beta, double rho);
};
} // namespace ov_core
#endif // OPEN_VINS_FEATUREINITIALIZER_H

View File

@@ -0,0 +1,104 @@
/*
* OpenVINS: An Open Platform for Visual-Inertial Research
* Copyright (C) 2018-2022 Patrick Geneva
* Copyright (C) 2018-2022 Guoquan Huang
* Copyright (C) 2018-2022 OpenVINS Contributors
* Copyright (C) 2018-2019 Kevin Eckenhoff
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
#ifndef OV_CORE_INITIALIZEROPTIONS_H
#define OV_CORE_INITIALIZEROPTIONS_H
#include "utils/opencv_yaml_parse.h"
#include "utils/print.h"
namespace ov_core {
/**
* @brief Struct which stores all our feature initializer options
*/
struct FeatureInitializerOptions {
/// If we should perform 1d triangulation instead of 3d
bool triangulate_1d = false;
/// If we should perform Levenberg-Marquardt refinment
bool refine_features = true;
/// Max runs for Levenberg-Marquardt
int max_runs = 5;
/// Init lambda for Levenberg-Marquardt optimization
double init_lamda = 1e-3;
/// Max lambda for Levenberg-Marquardt optimization
double max_lamda = 1e10;
/// Cutoff for dx increment to consider as converged
double min_dx = 1e-6;
/// Cutoff for cost decrement to consider as converged
double min_dcost = 1e-6;
/// Multiplier to increase/decrease lambda
double lam_mult = 10;
/// Minimum distance to accept triangulated features
double min_dist = 0.10;
/// Minimum distance to accept triangulated features
double max_dist = 60;
/// Max baseline ratio to accept triangulated features
double max_baseline = 40;
/// Max condition number of linear triangulation matrix accept triangulated features
double max_cond_number = 10000;
/// Nice print function of what parameters we have loaded
void print(const std::shared_ptr<ov_core::YamlParser> &parser = nullptr) {
if (parser != nullptr) {
parser->parse_config("fi_triangulate_1d", triangulate_1d, false);
parser->parse_config("fi_refine_features", refine_features, false);
parser->parse_config("fi_max_runs", max_runs, false);
parser->parse_config("fi_init_lamda", init_lamda, false);
parser->parse_config("fi_max_lamda", max_lamda, false);
parser->parse_config("fi_min_dx", min_dx, false);
parser->parse_config("fi_min_dcost", min_dcost, false);
parser->parse_config("fi_lam_mult", lam_mult, false);
parser->parse_config("fi_min_dist", min_dist, false);
parser->parse_config("fi_max_dist", max_dist, false);
parser->parse_config("fi_max_baseline", max_baseline, false);
parser->parse_config("fi_max_cond_number", max_cond_number, false);
}
PRINT_DEBUG("\t- triangulate_1d: %d\n", triangulate_1d);
PRINT_DEBUG("\t- refine_features: %d\n", refine_features);
PRINT_DEBUG("\t- max_runs: %d\n", max_runs);
PRINT_DEBUG("\t- init_lamda: %.3f\n", init_lamda);
PRINT_DEBUG("\t- max_lamda: %.3f\n", max_lamda);
PRINT_DEBUG("\t- min_dx: %.7f\n", min_dx);
PRINT_DEBUG("\t- min_dcost: %.7f\n", min_dcost);
PRINT_DEBUG("\t- lam_mult: %.3f\n", lam_mult);
PRINT_DEBUG("\t- min_dist: %.3f\n", min_dist);
PRINT_DEBUG("\t- max_dist: %.3f\n", max_dist);
PRINT_DEBUG("\t- max_baseline: %.3f\n", max_baseline);
PRINT_DEBUG("\t- max_cond_number: %.3f\n", max_cond_number);
}
};
} // namespace ov_core
#endif // OV_CORE_INITIALIZEROPTIONS_H

21
ov_core/src/plot/LICENSE Normal file
View File

@@ -0,0 +1,21 @@
The MIT License (MIT)
Copyright (c) 2014 Benno Evers
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,358 @@
/*
* OpenVINS: An Open Platform for Visual-Inertial Research
* Copyright (C) 2018-2022 Patrick Geneva
* Copyright (C) 2018-2022 Guoquan Huang
* Copyright (C) 2018-2022 OpenVINS Contributors
* Copyright (C) 2018-2019 Kevin Eckenhoff
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
#include "BsplineSE3.h"
using namespace ov_core;
void BsplineSE3::feed_trajectory(std::vector<Eigen::VectorXd> traj_points) {
// Find the average frequency to use as our uniform timesteps
double sumdt = 0;
for (size_t i = 0; i < traj_points.size() - 1; i++) {
sumdt += traj_points.at(i + 1)(0) - traj_points.at(i)(0);
}
dt = sumdt / (traj_points.size() - 1);
dt = (dt < 0.05) ? 0.05 : dt;
PRINT_DEBUG("[B-SPLINE]: control point dt = %.3f (original dt of %.3f)\n", dt, sumdt / (traj_points.size() - 1));
// convert all our trajectory points into SE(3) matrices
// we are given [timestamp, p_IinG, q_GtoI]
AlignedEigenMat4d trajectory_points;
for (size_t i = 0; i < traj_points.size() - 1; i++) {
Eigen::Matrix4d T_IinG = Eigen::Matrix4d::Identity();
T_IinG.block(0, 0, 3, 3) = quat_2_Rot(traj_points.at(i).block(4, 0, 4, 1)).transpose();
T_IinG.block(0, 3, 3, 1) = traj_points.at(i).block(1, 0, 3, 1);
trajectory_points.insert({traj_points.at(i)(0), T_IinG});
}
// Get the oldest timestamp
double timestamp_min = INFINITY;
double timestamp_max = -INFINITY;
for (const auto &pose : trajectory_points) {
if (pose.first <= timestamp_min) {
timestamp_min = pose.first;
}
if (pose.first >= timestamp_min) {
timestamp_max = pose.first;
}
}
PRINT_DEBUG("[B-SPLINE]: trajectory start time = %.6f\n", timestamp_min);
PRINT_DEBUG("[B-SPLINE]: trajectory end time = %.6f\n", timestamp_max);
// then create spline control points
double timestamp_curr = timestamp_min;
while (true) {
// Get bounding posed for the current time
double t0, t1;
Eigen::Matrix4d pose0, pose1;
bool success = find_bounding_poses(timestamp_curr, trajectory_points, t0, pose0, t1, pose1);
// PRINT_DEBUG("[SIM]: time curr = %.6f | lambda = %.3f | dt = %.3f | dtmeas =
// %.3f\n",timestamp_curr,(timestamp_curr-t0)/(t1-t0),dt,(t1-t0));
// If we didn't find a bounding pose, then that means we are at the end of the dataset
// Thus break out of this loop since we have created our max number of control points
if (!success)
break;
// Linear interpolation and append to our control points
double lambda = (timestamp_curr - t0) / (t1 - t0);
Eigen::Matrix4d pose_interp = exp_se3(lambda * log_se3(pose1 * Inv_se3(pose0))) * pose0;
control_points.insert({timestamp_curr, pose_interp});
timestamp_curr += dt;
// std::stringstream ss;
// ss << pose_interp(0,3) << "," << pose_interp(1,3) << "," << pose_interp(2,3) << std::endl;
// PRINT_DEBUG(ss.str().c_str());
}
// The start time of the system is two dt in since we need at least two older control points
timestamp_start = timestamp_min + 2 * dt;
PRINT_DEBUG("[B-SPLINE]: start trajectory time of %.6f\n", timestamp_start);
}
bool BsplineSE3::get_pose(double timestamp, Eigen::Matrix3d &R_GtoI, Eigen::Vector3d &p_IinG) {
// Get the bounding poses for the desired timestamp
double t0, t1, t2, t3;
Eigen::Matrix4d pose0, pose1, pose2, pose3;
bool success = find_bounding_control_points(timestamp, control_points, t0, pose0, t1, pose1, t2, pose2, t3, pose3);
// PRINT_DEBUG("[SIM]: time curr = %.6f | dt1 = %.3f | dt2 = %.3f | dt3 = %.3f | dt4 = %.3f | success =
// %d\n",timestamp,t0-timestamp,t1-timestamp,t2-timestamp,t3-timestamp,(int)success);
// Return failure if we can't get bounding poses
if (!success) {
R_GtoI.setIdentity();
p_IinG.setZero();
return false;
}
// Our De Boor-Cox matrix scalars
double DT = (t2 - t1);
double u = (timestamp - t1) / DT;
double b0 = 1.0 / 6.0 * (5 + 3 * u - 3 * u * u + u * u * u);
double b1 = 1.0 / 6.0 * (1 + 3 * u + 3 * u * u - 2 * u * u * u);
double b2 = 1.0 / 6.0 * (u * u * u);
// Calculate interpolated poses
Eigen::Matrix4d A0 = exp_se3(b0 * log_se3(Inv_se3(pose0) * pose1));
Eigen::Matrix4d A1 = exp_se3(b1 * log_se3(Inv_se3(pose1) * pose2));
Eigen::Matrix4d A2 = exp_se3(b2 * log_se3(Inv_se3(pose2) * pose3));
// Finally get the interpolated pose
Eigen::Matrix4d pose_interp = pose0 * A0 * A1 * A2;
R_GtoI = pose_interp.block(0, 0, 3, 3).transpose();
p_IinG = pose_interp.block(0, 3, 3, 1);
return true;
}
bool BsplineSE3::get_velocity(double timestamp, Eigen::Matrix3d &R_GtoI, Eigen::Vector3d &p_IinG, Eigen::Vector3d &w_IinI,
Eigen::Vector3d &v_IinG) {
// Get the bounding poses for the desired timestamp
double t0, t1, t2, t3;
Eigen::Matrix4d pose0, pose1, pose2, pose3;
bool success = find_bounding_control_points(timestamp, control_points, t0, pose0, t1, pose1, t2, pose2, t3, pose3);
// PRINT_DEBUG("[SIM]: time curr = %.6f | dt1 = %.3f | dt2 = %.3f | dt3 = %.3f | dt4 = %.3f | success =
// %d\n",timestamp,t0-timestamp,t1-timestamp,t2-timestamp,t3-timestamp,(int)success);
// Return failure if we can't get bounding poses
if (!success) {
w_IinI.setZero();
v_IinG.setZero();
return false;
}
// Our De Boor-Cox matrix scalars
double DT = (t2 - t1);
double u = (timestamp - t1) / DT;
double b0 = 1.0 / 6.0 * (5 + 3 * u - 3 * u * u + u * u * u);
double b1 = 1.0 / 6.0 * (1 + 3 * u + 3 * u * u - 2 * u * u * u);
double b2 = 1.0 / 6.0 * (u * u * u);
double b0dot = 1.0 / (6.0 * DT) * (3 - 6 * u + 3 * u * u);
double b1dot = 1.0 / (6.0 * DT) * (3 + 6 * u - 6 * u * u);
double b2dot = 1.0 / (6.0 * DT) * (3 * u * u);
// Cache some values we use alot
Eigen::Matrix<double, 6, 1> omega_10 = log_se3(Inv_se3(pose0) * pose1);
Eigen::Matrix<double, 6, 1> omega_21 = log_se3(Inv_se3(pose1) * pose2);
Eigen::Matrix<double, 6, 1> omega_32 = log_se3(Inv_se3(pose2) * pose3);
// Calculate interpolated poses
Eigen::Matrix4d A0 = exp_se3(b0 * omega_10);
Eigen::Matrix4d A1 = exp_se3(b1 * omega_21);
Eigen::Matrix4d A2 = exp_se3(b2 * omega_32);
Eigen::Matrix4d A0dot = b0dot * hat_se3(omega_10) * A0;
Eigen::Matrix4d A1dot = b1dot * hat_se3(omega_21) * A1;
Eigen::Matrix4d A2dot = b2dot * hat_se3(omega_32) * A2;
// Get the interpolated pose
Eigen::Matrix4d pose_interp = pose0 * A0 * A1 * A2;
R_GtoI = pose_interp.block(0, 0, 3, 3).transpose();
p_IinG = pose_interp.block(0, 3, 3, 1);
// Finally get the interpolated velocities
// NOTE: Rdot = R*skew(omega) => R^T*Rdot = skew(omega)
Eigen::Matrix4d vel_interp = pose0 * (A0dot * A1 * A2 + A0 * A1dot * A2 + A0 * A1 * A2dot);
w_IinI = vee(pose_interp.block(0, 0, 3, 3).transpose() * vel_interp.block(0, 0, 3, 3));
v_IinG = vel_interp.block(0, 3, 3, 1);
return true;
}
bool BsplineSE3::get_acceleration(double timestamp, Eigen::Matrix3d &R_GtoI, Eigen::Vector3d &p_IinG, Eigen::Vector3d &w_IinI,
Eigen::Vector3d &v_IinG, Eigen::Vector3d &alpha_IinI, Eigen::Vector3d &a_IinG) {
// Get the bounding poses for the desired timestamp
double t0, t1, t2, t3;
Eigen::Matrix4d pose0, pose1, pose2, pose3;
bool success = find_bounding_control_points(timestamp, control_points, t0, pose0, t1, pose1, t2, pose2, t3, pose3);
// Return failure if we can't get bounding poses
if (!success) {
alpha_IinI.setZero();
a_IinG.setZero();
return false;
}
// Our De Boor-Cox matrix scalars
double DT = (t2 - t1);
double u = (timestamp - t1) / DT;
double b0 = 1.0 / 6.0 * (5 + 3 * u - 3 * u * u + u * u * u);
double b1 = 1.0 / 6.0 * (1 + 3 * u + 3 * u * u - 2 * u * u * u);
double b2 = 1.0 / 6.0 * (u * u * u);
double b0dot = 1.0 / (6.0 * DT) * (3 - 6 * u + 3 * u * u);
double b1dot = 1.0 / (6.0 * DT) * (3 + 6 * u - 6 * u * u);
double b2dot = 1.0 / (6.0 * DT) * (3 * u * u);
double b0dotdot = 1.0 / (6.0 * DT * DT) * (-6 + 6 * u);
double b1dotdot = 1.0 / (6.0 * DT * DT) * (6 - 12 * u);
double b2dotdot = 1.0 / (6.0 * DT * DT) * (6 * u);
// Cache some values we use alot
Eigen::Matrix<double, 6, 1> omega_10 = log_se3(Inv_se3(pose0) * pose1);
Eigen::Matrix<double, 6, 1> omega_21 = log_se3(Inv_se3(pose1) * pose2);
Eigen::Matrix<double, 6, 1> omega_32 = log_se3(Inv_se3(pose2) * pose3);
Eigen::Matrix4d omega_10_hat = hat_se3(omega_10);
Eigen::Matrix4d omega_21_hat = hat_se3(omega_21);
Eigen::Matrix4d omega_32_hat = hat_se3(omega_32);
// Calculate interpolated poses
Eigen::Matrix4d A0 = exp_se3(b0 * omega_10);
Eigen::Matrix4d A1 = exp_se3(b1 * omega_21);
Eigen::Matrix4d A2 = exp_se3(b2 * omega_32);
Eigen::Matrix4d A0dot = b0dot * omega_10_hat * A0;
Eigen::Matrix4d A1dot = b1dot * omega_21_hat * A1;
Eigen::Matrix4d A2dot = b2dot * omega_32_hat * A2;
Eigen::Matrix4d A0dotdot = b0dot * omega_10_hat * A0dot + b0dotdot * omega_10_hat * A0;
Eigen::Matrix4d A1dotdot = b1dot * omega_21_hat * A1dot + b1dotdot * omega_21_hat * A1;
Eigen::Matrix4d A2dotdot = b2dot * omega_32_hat * A2dot + b2dotdot * omega_32_hat * A2;
// Get the interpolated pose
Eigen::Matrix4d pose_interp = pose0 * A0 * A1 * A2;
R_GtoI = pose_interp.block(0, 0, 3, 3).transpose();
p_IinG = pose_interp.block(0, 3, 3, 1);
// Get the interpolated velocities
// NOTE: Rdot = R*skew(omega) => R^T*Rdot = skew(omega)
Eigen::Matrix4d vel_interp = pose0 * (A0dot * A1 * A2 + A0 * A1dot * A2 + A0 * A1 * A2dot);
w_IinI = vee(pose_interp.block(0, 0, 3, 3).transpose() * vel_interp.block(0, 0, 3, 3));
v_IinG = vel_interp.block(0, 3, 3, 1);
// Finally get the interpolated velocities
// NOTE: Rdot = R*skew(omega)
// NOTE: Rdotdot = Rdot*skew(omega) + R*skew(alpha) => R^T*(Rdotdot-Rdot*skew(omega))=skew(alpha)
Eigen::Matrix4d acc_interp = pose0 * (A0dotdot * A1 * A2 + A0 * A1dotdot * A2 + A0 * A1 * A2dotdot + 2 * A0dot * A1dot * A2 +
2 * A0 * A1dot * A2dot + 2 * A0dot * A1 * A2dot);
Eigen::Matrix3d omegaskew = pose_interp.block(0, 0, 3, 3).transpose() * vel_interp.block(0, 0, 3, 3);
alpha_IinI = vee(pose_interp.block(0, 0, 3, 3).transpose() * (acc_interp.block(0, 0, 3, 3) - vel_interp.block(0, 0, 3, 3) * omegaskew));
a_IinG = acc_interp.block(0, 3, 3, 1);
return true;
}
bool BsplineSE3::find_bounding_poses(const double timestamp, const AlignedEigenMat4d &poses, double &t0, Eigen::Matrix4d &pose0, double &t1,
Eigen::Matrix4d &pose1) {
// Set the default values
t0 = -1;
t1 = -1;
pose0 = Eigen::Matrix4d::Identity();
pose1 = Eigen::Matrix4d::Identity();
// Find the bounding poses
bool found_older = false;
bool found_newer = false;
// Find the bounding poses for interpolation.
auto lower_bound = poses.lower_bound(timestamp); // Finds timestamp or next(timestamp) if not available
auto upper_bound = poses.upper_bound(timestamp); // Finds next(timestamp)
if (lower_bound != poses.end()) {
// Check that the lower bound is the timestamp.
// If not then we move iterator to previous timestamp so that the timestamp is bounded
if (lower_bound->first == timestamp) {
found_older = true;
} else if (lower_bound != poses.begin()) {
--lower_bound;
found_older = true;
}
}
if (upper_bound != poses.end()) {
found_newer = true;
}
// If we found the older one, set it
if (found_older) {
t0 = lower_bound->first;
pose0 = lower_bound->second;
}
// If we found the newest one, set it
if (found_newer) {
t1 = upper_bound->first;
pose1 = upper_bound->second;
}
// Assert the timestamps
if (found_older && found_newer)
assert(t0 < t1);
// Return true if we found both bounds
return (found_older && found_newer);
}
bool BsplineSE3::find_bounding_control_points(const double timestamp, const AlignedEigenMat4d &poses, double &t0, Eigen::Matrix4d &pose0,
double &t1, Eigen::Matrix4d &pose1, double &t2, Eigen::Matrix4d &pose2, double &t3,
Eigen::Matrix4d &pose3) {
// Set the default values
t0 = -1;
t1 = -1;
t2 = -1;
t3 = -1;
pose0 = Eigen::Matrix4d::Identity();
pose1 = Eigen::Matrix4d::Identity();
pose2 = Eigen::Matrix4d::Identity();
pose3 = Eigen::Matrix4d::Identity();
// Get the two bounding poses
bool success = find_bounding_poses(timestamp, poses, t1, pose1, t2, pose2);
// Return false if this was a failure
if (!success)
return false;
// Now find the poses that are below and above
auto iter_t1 = poses.find(t1);
auto iter_t2 = poses.find(t2);
// Check that t1 is not the first timestamp
if (iter_t1 == poses.begin()) {
return false;
}
// Move the older pose backwards in time
// Move the newer one forwards in time
auto iter_t0 = --iter_t1;
auto iter_t3 = ++iter_t2;
// Check that it is valid
if (iter_t3 == poses.end()) {
return false;
}
// Set the oldest one
t0 = iter_t0->first;
pose0 = iter_t0->second;
// Set the newest one
t3 = iter_t3->first;
pose3 = iter_t3->second;
// Assert the timestamps
if (success) {
assert(t0 < t1);
assert(t1 < t2);
assert(t2 < t3);
}
// Return true if we found all four bounding poses
return success;
}

View File

@@ -0,0 +1,212 @@
/*
* OpenVINS: An Open Platform for Visual-Inertial Research
* Copyright (C) 2018-2022 Patrick Geneva
* Copyright (C) 2018-2022 Guoquan Huang
* Copyright (C) 2018-2022 OpenVINS Contributors
* Copyright (C) 2018-2019 Kevin Eckenhoff
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
#ifndef OV_CORE_BSPLINESE3_H
#define OV_CORE_BSPLINESE3_H
#include <Eigen/Eigen>
#include <map>
#include <vector>
#include "utils/print.h"
#include "utils/quat_ops.h"
namespace ov_core {
/**
* @brief B-Spline which performs interpolation over SE(3) manifold.
*
* This class implements the b-spline functionality that allows for interpolation over the \f$\mathbb{SE}(3)\f$ manifold.
* This is based off of the derivations from [Continuous-Time Visual-Inertial Odometry for Event
* Cameras](https://ieeexplore.ieee.org/abstract/document/8432102/) and [A Spline-Based Trajectory Representation for Sensor Fusion and
* Rolling Shutter Cameras](https://link.springer.com/article/10.1007/s11263-015-0811-3) with some additional derivations being available in
* [these notes](http://udel.edu/~pgeneva/downloads/notes/2018_notes_mueffler2017arxiv.pdf). The use of b-splines for \f$\mathbb{SE}(3)\f$
* interpolation has the following properties:
*
* 1. Local control, allowing the system to function online as well as in batch
* 2. \f$C^2\f$-continuity to enable inertial predictions and calculations
* 3. Good approximation of minimal torque trajectories
* 4. A parameterization of rigid-body motion devoid of singularities
*
* The key idea is to convert a set of trajectory points into a continuous-time *uniform cubic cumulative* b-spline.
* As compared to standard b-spline representations, the cumulative form ensures local continuity which is needed for on-manifold
* interpolation. We leverage the cubic b-spline to ensure \f$C^2\f$-continuity to ensure that we can calculate accelerations at any point
* along the trajectory. The general equations are the following
*
* \f{align*}{
* {}^{w}_{s}\mathbf{T}(u(t))
* &= {}^{w}_{i-1}\mathbf{T}~\mathbf{A}_0~\mathbf{A}_1~\mathbf{A}_2 \\
* \empty
* {}^{w}_{s}\dot{\mathbf{T}}(u(t)) &=
* {}^{w}_{i-1}\mathbf{T}
* \Big(
* \dot{\mathbf{A}}_0~\mathbf{A}_1~\mathbf{A}_2 +
* \mathbf{A}_0~\dot{\mathbf{A}}_1~\mathbf{A}_2 +
* \mathbf{A}_0~\mathbf{A}_1~\dot{\mathbf{A}}_2
* \Big) \\
* \empty
* {}^{w}_{s}\ddot{\mathbf{T}}(u(t)) &=
* {}^{w}_{i-1}\mathbf{T}
* \Big(
* \ddot{\mathbf{A}}_0~\mathbf{A}_1~\mathbf{A}_2 +
* \mathbf{A}_0~\ddot{\mathbf{A}}_1~\mathbf{A}_2 +
* \mathbf{A}_0~\mathbf{A}_1~\ddot{\mathbf{A}}_2 \nonumber\\
* &\hspace{4cm}
* + 2\dot{\mathbf{A}}_0\dot{\mathbf{A}}_1\mathbf{A}_2 +
* 2\mathbf{A}_0\dot{\mathbf{A}}_1\dot{\mathbf{A}}_2 +
* 2\dot{\mathbf{A}}_0\mathbf{A}_1\dot{\mathbf{A}}_2
* \Big) \\[1em]
* \empty
* {}^{i-1}_{i}\mathbf{\Omega} &= \mathrm{log}\big( {}^{w}_{i-1}\mathbf{T}^{-1}~{}^{w}_{i}\mathbf{T} \big) \\
* \mathbf{A}_j &= \mathrm{exp}\Big({B}_j(u(t))~{}^{i-1+j}_{i+j}\mathbf{\Omega} \Big) \\
* \dot{\mathbf{A}}_j &= \dot{B}_j(u(t)) ~{}^{i-1+j}_{i+j}\mathbf{\Omega}^\wedge ~\mathbf{A}_j \\
* \ddot{\mathbf{A}}_j &=
* \dot{B}_j(u(t)) ~{}^{i-1+j}_{i+j}\mathbf{\Omega}^\wedge ~\dot{\mathbf{A}}_j +
* \ddot{B}_j(u(t)) ~{}^{i-1+j}_{i+j}\mathbf{\Omega}^\wedge ~\mathbf{A}_j \\[1em]
* \empty
* {B}_0(u(t)) &= \frac{1}{3!}~(5+3u-3u^2+u^3) \\
* {B}_1(u(t)) &= \frac{1}{3!}~(1+3u+3u^2-2u^3) \\
* {B}_2(u(t)) &= \frac{1}{3!}~(u^3) \\[1em]
* \empty
* \dot{{B}}_0(u(t)) &= \frac{1}{3!}~\frac{1}{\Delta t}~(3-6u+3u^2) \\
* \dot{{B}}_1(u(t)) &= \frac{1}{3!}~\frac{1}{\Delta t}~(3+6u-6u^2) \\
* \dot{{B}}_2(u(t)) &= \frac{1}{3!}~\frac{1}{\Delta t}~(3u^2) \\[1em]
* \empty
* \ddot{{B}}_0(u(t)) &= \frac{1}{3!}~\frac{1}{\Delta t^2}~(-6+6u) \\
* \ddot{{B}}_1(u(t)) &= \frac{1}{3!}~\frac{1}{\Delta t^2}~(6-12u) \\
* \ddot{{B}}_2(u(t)) &= \frac{1}{3!}~\frac{1}{\Delta t^2}~(6u)
* \f}
*
* where \f$u(t_s)=(t_s-t_i)/\Delta t=(t_s-t_i)/(t_{i+1}-t_i)\f$ is used for all values of *u*.
* Note that one needs to ensure that they use the SE(3) matrix expodential, logorithm, and hat operation for all above equations.
* The indexes correspond to the the two poses that are older and two poses that are newer then the current time we want to get (i.e. i-1
* and i are less than s, while i+1 and i+2 are both greater than time s). Some additional derivations are available in [these
* notes](http://udel.edu/~pgeneva/downloads/notes/2018_notes_mueffler2017arxiv.pdf).
*/
class BsplineSE3 {
public:
/**
* @brief Default constructor
*/
BsplineSE3() {}
/**
* @brief Will feed in a series of poses that we will then convert into control points.
*
* Our control points need to be uniformly spaced over the trajectory, thus given a trajectory we will
* uniformly sample based on the average spacing between the pose points specified.
*
* @param traj_points Trajectory poses that we will convert into control points (timestamp(s), q_GtoI, p_IinG)
*/
void feed_trajectory(std::vector<Eigen::VectorXd> traj_points);
/**
* @brief Gets the orientation and position at a given timestamp
* @param timestamp Desired time to get the pose at
* @param R_GtoI SO(3) orientation of the pose in the global frame
* @param p_IinG Position of the pose in the global
* @return False if we can't find it
*/
bool get_pose(double timestamp, Eigen::Matrix3d &R_GtoI, Eigen::Vector3d &p_IinG);
/**
* @brief Gets the angular and linear velocity at a given timestamp
* @param timestamp Desired time to get the pose at
* @param R_GtoI SO(3) orientation of the pose in the global frame
* @param p_IinG Position of the pose in the global
* @param w_IinI Angular velocity in the inertial frame
* @param v_IinG Linear velocity in the global frame
* @return False if we can't find it
*/
bool get_velocity(double timestamp, Eigen::Matrix3d &R_GtoI, Eigen::Vector3d &p_IinG, Eigen::Vector3d &w_IinI, Eigen::Vector3d &v_IinG);
/**
* @brief Gets the angular and linear acceleration at a given timestamp
* @param timestamp Desired time to get the pose at
* @param R_GtoI SO(3) orientation of the pose in the global frame
* @param p_IinG Position of the pose in the global
* @param w_IinI Angular velocity in the inertial frame
* @param v_IinG Linear velocity in the global frame
* @param alpha_IinI Angular acceleration in the inertial frame
* @param a_IinG Linear acceleration in the global frame
* @return False if we can't find it
*/
bool get_acceleration(double timestamp, Eigen::Matrix3d &R_GtoI, Eigen::Vector3d &p_IinG, Eigen::Vector3d &w_IinI,
Eigen::Vector3d &v_IinG, Eigen::Vector3d &alpha_IinI, Eigen::Vector3d &a_IinG);
/// Returns the simulation start time that we should start simulating from
double get_start_time() { return timestamp_start; }
protected:
/// Uniform sampling time for our control points
double dt;
/// Start time of the system
double timestamp_start;
/// Type defintion of our aligned eigen4d matrix: https://eigen.tuxfamily.org/dox/group__TopicStlContainers.html
typedef std::map<double, Eigen::Matrix4d, std::less<double>, Eigen::aligned_allocator<std::pair<const double, Eigen::Matrix4d>>>
AlignedEigenMat4d;
/// Our control SE3 control poses (R_ItoG, p_IinG)
AlignedEigenMat4d control_points;
/**
* @brief Will find the two bounding poses for a given timestamp.
*
* This will loop through the passed map of poses and find two bounding poses.
* If there are no bounding poses then this will return false.
*
* @param timestamp Desired timestamp we want to get two bounding poses of
* @param poses Map of poses and timestamps
* @param t0 Timestamp of the first pose
* @param pose0 SE(3) pose of the first pose
* @param t1 Timestamp of the second pose
* @param pose1 SE(3) pose of the second pose
* @return False if we are unable to find bounding poses
*/
static bool find_bounding_poses(const double timestamp, const AlignedEigenMat4d &poses, double &t0, Eigen::Matrix4d &pose0, double &t1,
Eigen::Matrix4d &pose1);
/**
* @brief Will find two older poses and two newer poses for the current timestamp
*
* @param timestamp Desired timestamp we want to get four bounding poses of
* @param poses Map of poses and timestamps
* @param t0 Timestamp of the first pose
* @param pose0 SE(3) pose of the first pose
* @param t1 Timestamp of the second pose
* @param pose1 SE(3) pose of the second pose
* @param t2 Timestamp of the third pose
* @param pose2 SE(3) pose of the third pose
* @param t3 Timestamp of the fourth pose
* @param pose3 SE(3) pose of the fourth pose
* @return False if we are unable to find bounding poses
*/
static bool find_bounding_control_points(const double timestamp, const AlignedEigenMat4d &poses, double &t0, Eigen::Matrix4d &pose0,
double &t1, Eigen::Matrix4d &pose1, double &t2, Eigen::Matrix4d &pose2, double &t3,
Eigen::Matrix4d &pose3);
};
} // namespace ov_core
#endif // OV_CORE_BSPLINESE3_H

View File

@@ -0,0 +1,369 @@
/*
* OpenVINS: An Open Platform for Visual-Inertial Research
* Copyright (C) 2018-2022 Patrick Geneva
* Copyright (C) 2018-2022 Guoquan Huang
* Copyright (C) 2018-2022 OpenVINS Contributors
* Copyright (C) 2018-2019 Kevin Eckenhoff
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
#include <cmath>
#include <deque>
#include <sstream>
#include <unistd.h>
#include <vector>
#include <cv_bridge/cv_bridge.h>
#include <ros/ros.h>
#include <rosbag/bag.h>
#include <rosbag/view.h>
#include <sensor_msgs/Image.h>
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include "cam/CamRadtan.h"
#include "track/TrackAruco.h"
#include "track/TrackDescriptor.h"
#include "track/TrackKLT.h"
#include "utils/opencv_yaml_parse.h"
#include "utils/print.h"
using namespace ov_core;
// Our feature extractor
TrackBase *extractor;
// FPS counter, and other statistics
// https://gamedev.stackexchange.com/a/83174
int frames = 0;
int num_lostfeats = 0;
int num_margfeats = 0;
int featslengths = 0;
int clone_states = 10;
std::deque<double> clonetimes;
ros::Time time_start;
// Our master function for tracking
void handle_stereo(double time0, double time1, cv::Mat img0, cv::Mat img1);
// Main function
int main(int argc, char **argv) {
// Ensure we have a path, if the user passes it then we should use it
std::string config_path = "unset_path.txt";
if (argc > 1) {
config_path = argv[1];
}
// Initialize this as a ROS node
ros::init(argc, argv, "test_tracking");
auto nh = std::make_shared<ros::NodeHandle>("~");
nh->param<std::string>("config_path", config_path, config_path);
// Load parameters
auto parser = std::make_shared<ov_core::YamlParser>(config_path, false);
parser->set_node_handler(nh);
// Verbosity
std::string verbosity = "INFO";
parser->parse_config("verbosity", verbosity);
ov_core::Printer::setPrintLevel(verbosity);
// Our camera topics (left and right stereo)
std::string topic_camera0, topic_camera1;
nh->param<std::string>("topic_camera0", topic_camera0, "/cam0/image_raw");
nh->param<std::string>("topic_camera1", topic_camera1, "/cam1/image_raw");
parser->parse_external("relative_config_imucam", "cam" + std::to_string(0), "rostopic", topic_camera0);
parser->parse_external("relative_config_imucam", "cam" + std::to_string(1), "rostopic", topic_camera1);
// Location of the ROS bag we want to read in
std::string path_to_bag;
nh->param<std::string>("path_bag", path_to_bag, "/home/patrick/datasets/eth/V1_01_easy.bag");
// nh->param<std::string>("path_bag", path_to_bag, "/home/patrick/datasets/open_vins/aruco_room_01.bag");
PRINT_INFO("ros bag path is: %s\n", path_to_bag.c_str());
// Get our start location and how much of the bag we want to play
// Make the bag duration < 0 to just process to the end of the bag
double bag_start, bag_durr;
nh->param<double>("bag_start", bag_start, 0);
nh->param<double>("bag_durr", bag_durr, -1);
//===================================================================================
//===================================================================================
//===================================================================================
// Parameters for our extractor
int num_pts = 400;
int num_aruco = 1024;
int fast_threshold = 15;
int grid_x = 9;
int grid_y = 7;
int min_px_dist = 10;
double knn_ratio = 0.70;
bool do_downsizing = false;
bool use_stereo = false;
parser->parse_config("num_pts", num_pts, false);
parser->parse_config("num_aruco", num_aruco, false);
parser->parse_config("clone_states", clone_states, false);
parser->parse_config("fast_threshold", fast_threshold, false);
parser->parse_config("grid_x", grid_x, false);
parser->parse_config("grid_y", grid_y, false);
parser->parse_config("min_px_dist", min_px_dist, false);
parser->parse_config("knn_ratio", knn_ratio, false);
parser->parse_config("do_downsizing", do_downsizing, false);
parser->parse_config("use_stereo", use_stereo, false);
// Histogram method
ov_core::TrackBase::HistogramMethod method;
std::string histogram_method_str = "HISTOGRAM";
parser->parse_config("histogram_method", histogram_method_str, false);
if (histogram_method_str == "NONE") {
method = ov_core::TrackBase::NONE;
} else if (histogram_method_str == "HISTOGRAM") {
method = ov_core::TrackBase::HISTOGRAM;
} else if (histogram_method_str == "CLAHE") {
method = ov_core::TrackBase::CLAHE;
} else {
printf(RED "invalid feature histogram specified:\n" RESET);
printf(RED "\t- NONE\n" RESET);
printf(RED "\t- HISTOGRAM\n" RESET);
printf(RED "\t- CLAHE\n" RESET);
std::exit(EXIT_FAILURE);
}
// Debug print!
PRINT_DEBUG("max features: %d\n", num_pts);
PRINT_DEBUG("max aruco: %d\n", num_aruco);
PRINT_DEBUG("clone states: %d\n", clone_states);
PRINT_DEBUG("grid size: %d x %d\n", grid_x, grid_y);
PRINT_DEBUG("fast threshold: %d\n", fast_threshold);
PRINT_DEBUG("min pixel distance: %d\n", min_px_dist);
PRINT_DEBUG("downsize aruco image: %d\n", do_downsizing);
// Fake camera info (we don't need this, as we are not using the normalized coordinates for anything)
std::unordered_map<size_t, std::shared_ptr<CamBase>> cameras;
for (int i = 0; i < 2; i++) {
Eigen::Matrix<double, 8, 1> cam0_calib;
cam0_calib << 1, 1, 0, 0, 0, 0, 0, 0;
std::shared_ptr<CamBase> camera_calib = std::make_shared<CamRadtan>(100, 100);
camera_calib->set_value(cam0_calib);
cameras.insert({i, camera_calib});
}
// Lets make a feature extractor
extractor = new TrackKLT(cameras, num_pts, num_aruco, !use_stereo, method, fast_threshold, grid_x, grid_y, min_px_dist);
// extractor = new TrackDescriptor(cameras, num_pts, num_aruco, !use_stereo, method, fast_threshold, grid_x, grid_y, min_px_dist,
// knn_ratio);
// extractor = new TrackAruco(cameras, num_aruco, !use_stereo, method, do_downsizing);
//===================================================================================
//===================================================================================
//===================================================================================
// Load rosbag here, and find messages we can play
rosbag::Bag bag;
bag.open(path_to_bag, rosbag::bagmode::Read);
// We should load the bag as a view
// Here we go from beginning of the bag to the end of the bag
rosbag::View view_full;
rosbag::View view;
// Start a few seconds in from the full view time
// If we have a negative duration then use the full bag length
view_full.addQuery(bag);
ros::Time time_init = view_full.getBeginTime();
time_init += ros::Duration(bag_start);
ros::Time time_finish = (bag_durr < 0) ? view_full.getEndTime() : time_init + ros::Duration(bag_durr);
PRINT_DEBUG("time start = %.6f\n", time_init.toSec());
PRINT_DEBUG("time end = %.6f\n", time_finish.toSec());
view.addQuery(bag, time_init, time_finish);
// Check to make sure we have data to play
if (view.size() == 0) {
PRINT_ERROR(RED "No messages to play on specified topics. Exiting.\n" RESET);
ros::shutdown();
return EXIT_FAILURE;
}
// Record the start time for our FPS counter
time_start = ros::Time::now();
// Our stereo pair we have
bool has_left = false;
bool has_right = false;
cv::Mat img0, img1;
double time0 = time_init.toSec();
double time1 = time_init.toSec();
// Step through the rosbag
for (const rosbag::MessageInstance &m : view) {
// If ros is wants us to stop, break out
if (!ros::ok())
break;
// Handle LEFT camera
sensor_msgs::Image::ConstPtr s0 = m.instantiate<sensor_msgs::Image>();
if (s0 != nullptr && m.getTopic() == topic_camera0) {
// Get the image
cv_bridge::CvImageConstPtr cv_ptr;
try {
cv_ptr = cv_bridge::toCvShare(s0, sensor_msgs::image_encodings::MONO8);
} catch (cv_bridge::Exception &e) {
PRINT_ERROR(RED "cv_bridge exception: %s\n" RESET, e.what());
continue;
}
// Save to our temp variable
has_left = true;
cv::equalizeHist(cv_ptr->image, img0);
// img0 = cv_ptr->image.clone();
time0 = cv_ptr->header.stamp.toSec();
}
// Handle RIGHT camera
sensor_msgs::Image::ConstPtr s1 = m.instantiate<sensor_msgs::Image>();
if (s1 != nullptr && m.getTopic() == topic_camera1) {
// Get the image
cv_bridge::CvImageConstPtr cv_ptr;
try {
cv_ptr = cv_bridge::toCvShare(s1, sensor_msgs::image_encodings::MONO8);
} catch (cv_bridge::Exception &e) {
PRINT_ERROR(RED "cv_bridge exception: %s\n" RESET, e.what());
continue;
}
// Save to our temp variable
has_right = true;
cv::equalizeHist(cv_ptr->image, img1);
// img1 = cv_ptr->image.clone();
time1 = cv_ptr->header.stamp.toSec();
}
// If we have both left and right, then process
if (has_left && has_right) {
// process
handle_stereo(time0, time1, img0, img1);
// reset bools
has_left = false;
has_right = false;
}
}
// Done!
return EXIT_SUCCESS;
}
/**
* This function will process the new stereo pair with the extractor!
*/
void handle_stereo(double time0, double time1, cv::Mat img0, cv::Mat img1) {
// Animate our dynamic mask moving
// Very simple ball bounding around the screen example
cv::Mat mask = cv::Mat::zeros(cv::Size(img0.cols, img0.rows), CV_8UC1);
static cv::Point2f ball_center;
static cv::Point2f ball_velocity;
if (ball_velocity.x == 0 || ball_velocity.y == 0) {
ball_center.x = (float)img0.cols / 2.0f;
ball_center.y = (float)img0.rows / 2.0f;
ball_velocity.x = 2.5;
ball_velocity.y = 2.5;
}
ball_center += ball_velocity;
if (ball_center.x < 0 || (int)ball_center.x > img0.cols)
ball_velocity.x *= -1;
if (ball_center.y < 0 || (int)ball_center.y > img0.rows)
ball_velocity.y *= -1;
cv::circle(mask, ball_center, 100, cv::Scalar(255), cv::FILLED);
// Process this new image
ov_core::CameraData message;
message.timestamp = time0;
message.sensor_ids.push_back(0);
message.sensor_ids.push_back(1);
message.images.push_back(img0);
message.images.push_back(img1);
message.masks.push_back(mask);
message.masks.push_back(mask);
extractor->feed_new_camera(message);
// Display the resulting tracks
cv::Mat img_active, img_history;
extractor->display_active(img_active, 255, 0, 0, 0, 0, 255);
extractor->display_history(img_history, 255, 255, 0, 255, 255, 255);
// Show our image!
cv::imshow("Active Tracks", img_active);
cv::imshow("Track History", img_history);
cv::waitKey(1);
// Get lost tracks
std::shared_ptr<FeatureDatabase> database = extractor->get_feature_database();
std::vector<std::shared_ptr<Feature>> feats_lost = database->features_not_containing_newer(time0);
num_lostfeats += feats_lost.size();
// Mark theses feature pointers as deleted
for (size_t i = 0; i < feats_lost.size(); i++) {
// Total number of measurements
int total_meas = 0;
for (auto const &pair : feats_lost[i]->timestamps) {
total_meas += (int)pair.second.size();
}
// Update stats
featslengths += total_meas;
feats_lost[i]->to_delete = true;
}
// Push back the current time, as a clone time
clonetimes.push_back(time0);
// Marginalized features if we have reached 5 frame tracks
if ((int)clonetimes.size() >= clone_states) {
// Remove features that have reached their max track length
double margtime = clonetimes.at(0);
clonetimes.pop_front();
std::vector<std::shared_ptr<Feature>> feats_marg = database->features_containing(margtime);
num_margfeats += feats_marg.size();
// Delete theses feature pointers
for (size_t i = 0; i < feats_marg.size(); i++) {
feats_marg[i]->to_delete = true;
}
}
// Tell the feature database to delete old features
database->cleanup();
// Debug print out what our current processing speed it
// We want the FPS to be as high as possible
ros::Time time_curr = ros::Time::now();
// if (time_curr.toSec()-time_start.toSec() > 2) {
if (frames > 60) {
// Calculate the FPS
double fps = (double)frames / (time_curr.toSec() - time_start.toSec());
double lpf = (double)num_lostfeats / frames;
double fpf = (double)featslengths / num_lostfeats;
double mpf = (double)num_margfeats / frames;
// DEBUG PRINT OUT
PRINT_DEBUG("fps = %.2f | lost_feats/frame = %.2f | track_length/lost_feat = %.2f | marg_tracks/frame = %.2f\n", fps, lpf, fpf, mpf);
// Reset variables
frames = 0;
time_start = time_curr;
num_lostfeats = 0;
num_margfeats = 0;
featslengths = 0;
}
frames++;
}

234
ov_core/src/test_webcam.cpp Normal file
View File

@@ -0,0 +1,234 @@
/*
* OpenVINS: An Open Platform for Visual-Inertial Research
* Copyright (C) 2018-2022 Patrick Geneva
* Copyright (C) 2018-2022 Guoquan Huang
* Copyright (C) 2018-2022 OpenVINS Contributors
* Copyright (C) 2018-2019 Kevin Eckenhoff
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
#include <cmath>
#include <deque>
#include <fstream>
#include <iomanip>
#include <sstream>
#include <unistd.h>
#include <vector>
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <boost/date_time/posix_time/posix_time.hpp>
#include <boost/filesystem.hpp>
#include "cam/CamRadtan.h"
#include "track/TrackAruco.h"
#include "track/TrackDescriptor.h"
#include "track/TrackKLT.h"
#include "utils/opencv_yaml_parse.h"
#include "utils/print.h"
#if ROS_AVAILABLE == 1
#include <ros/ros.h>
#endif
using namespace ov_core;
// Our feature extractor
TrackBase *extractor;
// Main function
int main(int argc, char **argv) {
// Ensure we have a path, if the user passes it then we should use it
std::string config_path = "unset_path.txt";
if (argc > 1) {
config_path = argv[1];
}
#if ROS_AVAILABLE == 1
// Initialize this as a ROS node
ros::init(argc, argv, "test_webcam");
auto nh = std::make_shared<ros::NodeHandle>("~");
nh->param<std::string>("config_path", config_path, config_path);
#endif
// Load parameters
auto parser = std::make_shared<ov_core::YamlParser>(config_path, false);
#if ROS_AVAILABLE == 1
parser->set_node_handler(nh);
#endif
// Verbosity
std::string verbosity = "INFO";
parser->parse_config("verbosity", verbosity);
ov_core::Printer::setPrintLevel(verbosity);
// Defaults
int num_pts = 500;
int num_aruco = 1024;
int clone_states = 20;
int fast_threshold = 10;
int grid_x = 5;
int grid_y = 4;
int min_px_dist = 20;
double knn_ratio = 0.85;
bool do_downsizing = false;
bool use_stereo = false;
parser->parse_config("num_pts", num_pts, false);
parser->parse_config("num_aruco", num_aruco, false);
parser->parse_config("clone_states", clone_states, false);
parser->parse_config("fast_threshold", fast_threshold, false);
parser->parse_config("grid_x", grid_x, false);
parser->parse_config("grid_y", grid_y, false);
parser->parse_config("min_px_dist", min_px_dist, false);
parser->parse_config("knn_ratio", knn_ratio, false);
parser->parse_config("do_downsizing", do_downsizing, false);
parser->parse_config("use_stereo", use_stereo, false);
// Histogram method
ov_core::TrackBase::HistogramMethod method;
std::string histogram_method_str = "HISTOGRAM";
parser->parse_config("histogram_method", histogram_method_str, false);
if (histogram_method_str == "NONE") {
method = ov_core::TrackBase::NONE;
} else if (histogram_method_str == "HISTOGRAM") {
method = ov_core::TrackBase::HISTOGRAM;
} else if (histogram_method_str == "CLAHE") {
method = ov_core::TrackBase::CLAHE;
} else {
printf(RED "invalid feature histogram specified:\n" RESET);
printf(RED "\t- NONE\n" RESET);
printf(RED "\t- HISTOGRAM\n" RESET);
printf(RED "\t- CLAHE\n" RESET);
std::exit(EXIT_FAILURE);
}
// Debug print!
PRINT_DEBUG("max features: %d\n", num_pts);
PRINT_DEBUG("max aruco: %d\n", num_aruco);
PRINT_DEBUG("clone states: %d\n", clone_states);
PRINT_DEBUG("grid size: %d x %d\n", grid_x, grid_y);
PRINT_DEBUG("fast threshold: %d\n", fast_threshold);
PRINT_DEBUG("min pixel distance: %d\n", min_px_dist);
PRINT_DEBUG("downsize aruco image: %d\n", do_downsizing);
// Fake camera info (we don't need this, as we are not using the normalized coordinates for anything)
std::unordered_map<size_t, std::shared_ptr<CamBase>> cameras;
for (int i = 0; i < 2; i++) {
Eigen::Matrix<double, 8, 1> cam0_calib;
cam0_calib << 1, 1, 0, 0, 0, 0, 0, 0;
std::shared_ptr<CamBase> camera_calib = std::make_shared<CamRadtan>(100, 100);
camera_calib->set_value(cam0_calib);
cameras.insert({i, camera_calib});
}
// Lets make a feature extractor
extractor = new TrackKLT(cameras, num_pts, num_aruco, !use_stereo, method, fast_threshold, grid_x, grid_y, min_px_dist);
// extractor = new TrackDescriptor(cameras, num_pts, num_aruco, !use_stereo, method, fast_threshold, grid_x, grid_y, min_px_dist,
// knn_ratio); extractor = new TrackAruco(cameras, num_aruco, !use_stereo, method, do_downsizing);
//===================================================================================
//===================================================================================
//===================================================================================
// Open the first webcam (0=laptop cam, 1=usb device)
cv::VideoCapture cap;
if (!cap.open(0)) {
PRINT_ERROR(RED "Unable to open a webcam feed!\n" RESET);
return EXIT_FAILURE;
}
//===================================================================================
//===================================================================================
//===================================================================================
// Loop forever until we break out
double current_time = 0.0;
std::deque<double> clonetimes;
while (true) {
// Get the next frame (and fake advance time forward)
cv::Mat frame;
cap >> frame;
current_time += 1.0 / 30.0;
// Stop capture if no more image feed
if (frame.empty())
break;
// Stop capturing by pressing ESC
if (cv::waitKey(10) == 27)
break;
// Convert to grayscale if not
if (frame.channels() != 1)
cv::cvtColor(frame, frame, cv::COLOR_RGB2GRAY);
// Else lets track this image
ov_core::CameraData message;
message.timestamp = current_time;
message.sensor_ids.push_back(0);
message.images.push_back(frame);
message.masks.push_back(cv::Mat::zeros(cv::Size(frame.cols, frame.rows), CV_8UC1));
extractor->feed_new_camera(message);
// Display the resulting tracks
cv::Mat img_active, img_history;
extractor->display_active(img_active, 255, 0, 0, 0, 0, 255);
extractor->display_history(img_history, 255, 255, 0, 255, 255, 255);
// Show our image!
cv::imshow("Active Tracks", img_active);
cv::imshow("Track History", img_history);
cv::waitKey(1);
// Get lost tracks
std::shared_ptr<FeatureDatabase> database = extractor->get_feature_database();
std::vector<std::shared_ptr<Feature>> feats_lost = database->features_not_containing_newer(current_time);
// Mark theses feature pointers as deleted
for (size_t i = 0; i < feats_lost.size(); i++) {
// Total number of measurements
int total_meas = 0;
for (auto const &pair : feats_lost[i]->timestamps) {
total_meas += (int)pair.second.size();
}
// Update stats
feats_lost[i]->to_delete = true;
}
// Push back the current time, as a clone time
clonetimes.push_back(current_time);
// Marginalized features if we have reached 5 frame tracks
if ((int)clonetimes.size() >= clone_states) {
// Remove features that have reached their max track length
double margtime = clonetimes.at(0);
clonetimes.pop_front();
std::vector<std::shared_ptr<Feature>> feats_marg = database->features_containing(margtime);
// Delete theses feature pointers
for (size_t i = 0; i < feats_marg.size(); i++) {
feats_marg[i]->to_delete = true;
}
}
// Tell the feature database to delete old features
database->cleanup();
}
// Done!
return EXIT_SUCCESS;
}

View File

@@ -0,0 +1,179 @@
/*
* OpenVINS: An Open Platform for Visual-Inertial Research
* Copyright (C) 2018-2022 Patrick Geneva
* Copyright (C) 2018-2022 Guoquan Huang
* Copyright (C) 2018-2022 OpenVINS Contributors
* Copyright (C) 2018-2019 Kevin Eckenhoff
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
#ifndef OV_CORE_GRIDER_FAST_H
#define OV_CORE_GRIDER_FAST_H
#include <Eigen/Eigen>
#include <functional>
#include <iostream>
#include <vector>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/opencv.hpp>
#include "utils/opencv_lambda_body.h"
namespace ov_core {
/**
* @brief Extracts FAST features in a grid pattern.
*
* As compared to just extracting fast features over the entire image,
* we want to have as uniform of extractions as possible over the image plane.
* Thus we split the image into a bunch of small grids, and extract points in each.
* We then pick enough top points in each grid so that we have the total number of desired points.
*/
class Grider_FAST {
public:
/**
* @brief Compare keypoints based on their response value.
* @param first First keypoint
* @param second Second keypoint
*
* We want to have the keypoints with the highest values!
* See: https://stackoverflow.com/a/10910921
*/
static bool compare_response(cv::KeyPoint first, cv::KeyPoint second) { return first.response > second.response; }
/**
* @brief This function will perform grid extraction using FAST.
* @param img Image we will do FAST extraction on
* @param mask Region of the image we do not want to extract features in (255 = do not detect features)
* @param pts vector of extracted points we will return
* @param num_features max number of features we want to extract
* @param grid_x size of grid in the x-direction / u-direction
* @param grid_y size of grid in the y-direction / v-direction
* @param threshold FAST threshold paramter (10 is a good value normally)
* @param nonmaxSuppression if FAST should perform non-max suppression (true normally)
*
* Given a specified grid size, this will try to extract fast features from each grid.
* It will then return the best from each grid in the return vector.
*/
static void perform_griding(const cv::Mat &img, const cv::Mat &mask, std::vector<cv::KeyPoint> &pts, int num_features, int grid_x,
int grid_y, int threshold, bool nonmaxSuppression) {
// We want to have equally distributed features
// NOTE: If we have more grids than number of total points, we calc the biggest grid we can do
// NOTE: Thus if we extract 1 point per grid we have
// NOTE: -> 1 = num_features / (grid_x * grid_y))
// NOTE: -> grid_x = ratio * grid_y (keep the original grid ratio)
// NOTE: -> grid_y = sqrt(num_features / ratio)
if (num_features < grid_x * grid_y) {
double ratio = (double)grid_x / (double)grid_y;
grid_y = std::ceil(std::sqrt(num_features / ratio));
grid_x = std::ceil(grid_y * ratio);
}
int num_features_grid = (int)((double)num_features / (double)(grid_x * grid_y)) + 1;
assert(grid_x > 0);
assert(grid_y > 0);
assert(num_features_grid > 0);
// Calculate the size our extraction boxes should be
int size_x = img.cols / grid_x;
int size_y = img.rows / grid_y;
// Make sure our sizes are not zero
assert(size_x > 0);
assert(size_y > 0);
// Parallelize our 2d grid extraction!!
int ct_cols = std::floor(img.cols / size_x);
int ct_rows = std::floor(img.rows / size_y);
std::vector<std::vector<cv::KeyPoint>> collection(ct_cols * ct_rows);
parallel_for_(cv::Range(0, ct_cols * ct_rows), LambdaBody([&](const cv::Range &range) {
for (int r = range.start; r < range.end; r++) {
// Calculate what cell xy value we are in
int x = r % ct_cols * size_x;
int y = r / ct_cols * size_y;
// Skip if we are out of bounds
if (x + size_x > img.cols || y + size_y > img.rows)
continue;
// Calculate where we should be extracting from
cv::Rect img_roi = cv::Rect(x, y, size_x, size_y);
// Extract FAST features for this part of the image
std::vector<cv::KeyPoint> pts_new;
cv::FAST(img(img_roi), pts_new, threshold, nonmaxSuppression);
// Now lets get the top number from this
std::sort(pts_new.begin(), pts_new.end(), Grider_FAST::compare_response);
// Append the "best" ones to our vector
// Note that we need to "correct" the point u,v since we extracted it in a ROI
// So we should append the location of that ROI in the image
for (size_t i = 0; i < (size_t)num_features_grid && i < pts_new.size(); i++) {
// Create keypoint
cv::KeyPoint pt_cor = pts_new.at(i);
pt_cor.pt.x += (float)x;
pt_cor.pt.y += (float)y;
// Reject if out of bounds (shouldn't be possible...)
if ((int)pt_cor.pt.x < 0 || (int)pt_cor.pt.x > img.cols || (int)pt_cor.pt.y < 0 || (int)pt_cor.pt.y > img.rows)
continue;
// Check if it is in the mask region
// NOTE: mask has max value of 255 (white) if it should be removed
if (mask.at<uint8_t>((int)pt_cor.pt.y, (int)pt_cor.pt.x) > 127)
continue;
collection.at(r).push_back(pt_cor);
}
}
}));
// Combine all the collections into our single vector
for (size_t r = 0; r < collection.size(); r++) {
pts.insert(pts.end(), collection.at(r).begin(), collection.at(r).end());
}
// Return if no points
if (pts.empty())
return;
// Sub-pixel refinement parameters
cv::Size win_size = cv::Size(5, 5);
cv::Size zero_zone = cv::Size(-1, -1);
cv::TermCriteria term_crit = cv::TermCriteria(cv::TermCriteria::COUNT + cv::TermCriteria::EPS, 20, 0.001);
// Get vector of points
std::vector<cv::Point2f> pts_refined;
for (size_t i = 0; i < pts.size(); i++) {
pts_refined.push_back(pts.at(i).pt);
}
// Finally get sub-pixel for all extracted features
cv::cornerSubPix(img, pts_refined, win_size, zero_zone, term_crit);
// Save the refined points!
for (size_t i = 0; i < pts.size(); i++) {
pts.at(i).pt = pts_refined.at(i);
}
}
};
} // namespace ov_core
#endif /* OV_CORE_GRIDER_FAST_H */

View File

@@ -0,0 +1,232 @@
/*
* OpenVINS: An Open Platform for Visual-Inertial Research
* Copyright (C) 2018-2022 Patrick Geneva
* Copyright (C) 2018-2022 Guoquan Huang
* Copyright (C) 2018-2022 OpenVINS Contributors
* Copyright (C) 2018-2019 Kevin Eckenhoff
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
#include "TrackAruco.h"
#include "utils/print.h"
using namespace ov_core;
void TrackAruco::feed_new_camera(const CameraData &message) {
// Error check that we have all the data
if (message.sensor_ids.empty() || message.sensor_ids.size() != message.images.size() || message.images.size() != message.masks.size()) {
PRINT_ERROR(RED "[ERROR]: MESSAGE DATA SIZES DO NOT MATCH OR EMPTY!!!\n" RESET);
PRINT_ERROR(RED "[ERROR]: - message.sensor_ids.size() = %zu\n" RESET, message.sensor_ids.size());
PRINT_ERROR(RED "[ERROR]: - message.images.size() = %zu\n" RESET, message.images.size());
PRINT_ERROR(RED "[ERROR]: - message.masks.size() = %zu\n" RESET, message.masks.size());
std::exit(EXIT_FAILURE);
}
// There is not such thing as stereo tracking for aruco
// Thus here we should just call the monocular version two times
#if ENABLE_ARUCO_TAGS
size_t num_images = message.images.size();
parallel_for_(cv::Range(0, (int)num_images), LambdaBody([&](const cv::Range &range) {
for (int i = range.start; i < range.end; i++) {
perform_tracking(message.timestamp, message.images.at(i), message.sensor_ids.at(i), message.masks.at(i));
}
}));
#else
PRINT_ERROR(RED "[ERROR]: you have not compiled with aruco tag support!!!\n" RESET);
std::exit(EXIT_FAILURE);
#endif
}
#if ENABLE_ARUCO_TAGS
void TrackAruco::perform_tracking(double timestamp, const cv::Mat &imgin, size_t cam_id, const cv::Mat &maskin) {
// Start timing
rT1 = boost::posix_time::microsec_clock::local_time();
// Lock this data feed for this camera
std::lock_guard<std::mutex> lck(mtx_feeds.at(cam_id));
// Histogram equalize
cv::Mat img;
if (histogram_method == HistogramMethod::HISTOGRAM) {
cv::equalizeHist(imgin, img);
} else if (histogram_method == HistogramMethod::CLAHE) {
double eq_clip_limit = 10.0;
cv::Size eq_win_size = cv::Size(8, 8);
cv::Ptr<cv::CLAHE> clahe = cv::createCLAHE(eq_clip_limit, eq_win_size);
clahe->apply(imgin, img);
} else {
img = imgin;
}
// Clear the old data from the last timestep
ids_aruco[cam_id].clear();
corners[cam_id].clear();
rejects[cam_id].clear();
// If we are downsizing, then downsize
cv::Mat img0;
if (do_downsizing) {
cv::pyrDown(img, img0, cv::Size(img.cols / 2, img.rows / 2));
} else {
img0 = img;
}
//===================================================================================
//===================================================================================
// Perform extraction
cv::aruco::detectMarkers(img0, aruco_dict, corners[cam_id], ids_aruco[cam_id], aruco_params, rejects[cam_id]);
rT2 = boost::posix_time::microsec_clock::local_time();
//===================================================================================
//===================================================================================
// If we downsized, scale all our u,v measurements by a factor of two
// Note: we do this so we can use these results for visulization later
// Note: and so that the uv added is in the same image size
if (do_downsizing) {
for (size_t i = 0; i < corners[cam_id].size(); i++) {
for (size_t j = 0; j < corners[cam_id].at(i).size(); j++) {
corners[cam_id].at(i).at(j).x *= 2;
corners[cam_id].at(i).at(j).y *= 2;
}
}
for (size_t i = 0; i < rejects[cam_id].size(); i++) {
for (size_t j = 0; j < rejects[cam_id].at(i).size(); j++) {
rejects[cam_id].at(i).at(j).x *= 2;
rejects[cam_id].at(i).at(j).y *= 2;
}
}
}
//===================================================================================
//===================================================================================
// ID vectors, of all currently tracked IDs
std::vector<size_t> ids_new;
std::vector<cv::KeyPoint> pts_new;
// Append to our feature database this new information
for (size_t i = 0; i < ids_aruco[cam_id].size(); i++) {
// Skip if ID is greater then our max
if (ids_aruco[cam_id].at(i) > max_tag_id)
continue;
// Assert we have 4 points (we will only use one of them)
assert(corners[cam_id].at(i).size() == 4);
for (int n = 0; n < 4; n++) {
// Check if it is in the mask
// NOTE: mask has max value of 255 (white) if it should be
if (maskin.at<uint8_t>((int)corners[cam_id].at(i).at(n).y, (int)corners[cam_id].at(i).at(n).x) > 127)
continue;
// Try to undistort the point
cv::Point2f npt_l = camera_calib.at(cam_id)->undistort_cv(corners[cam_id].at(i).at(n));
// Append to the ids vector and database
size_t tmp_id = (size_t)ids_aruco[cam_id].at(i) + n * max_tag_id;
database->update_feature(tmp_id, timestamp, cam_id, corners[cam_id].at(i).at(n).x, corners[cam_id].at(i).at(n).y, npt_l.x, npt_l.y);
// Append to active tracked point list
cv::KeyPoint kpt;
kpt.pt = corners[cam_id].at(i).at(n);
ids_new.push_back(tmp_id);
pts_new.push_back(kpt);
}
}
// Move forward in time
img_last[cam_id] = img;
img_mask_last[cam_id] = maskin;
ids_last[cam_id] = ids_new;
pts_last[cam_id] = pts_new;
rT3 = boost::posix_time::microsec_clock::local_time();
// Timing information
// PRINT_DEBUG("[TIME-ARUCO]: %.4f seconds for detection\n",(rT2-rT1).total_microseconds() * 1e-6);
// PRINT_DEBUG("[TIME-ARUCO]: %.4f seconds for feature DB update (%d features)\n",(rT3-rT2).total_microseconds() * 1e-6,
// (int)good_left.size()); PRINT_DEBUG("[TIME-ARUCO]: %.4f seconds for total\n",(rT3-rT1).total_microseconds() * 1e-6);
}
void TrackAruco::display_active(cv::Mat &img_out, int r1, int g1, int b1, int r2, int g2, int b2, std::string overlay) {
// Cache the images to prevent other threads from editing while we viz (which can be slow)
std::map<size_t, cv::Mat> img_last_cache, img_mask_last_cache;
for (auto const &pair : img_last) {
img_last_cache.insert({pair.first, pair.second.clone()});
}
for (auto const &pair : img_mask_last) {
img_mask_last_cache.insert({pair.first, pair.second.clone()});
}
// Get the largest width and height
int max_width = -1;
int max_height = -1;
for (auto const &pair : img_last_cache) {
if (max_width < pair.second.cols)
max_width = pair.second.cols;
if (max_height < pair.second.rows)
max_height = pair.second.rows;
}
// Return if we didn't have a last image
if (max_width == -1 || max_height == -1)
return;
// If the image is "small" thus we shoudl use smaller display codes
bool is_small = (std::min(max_width, max_height) < 400);
// If the image is "new" then draw the images from scratch
// Otherwise, we grab the subset of the main image and draw on top of it
bool image_new = ((int)img_last_cache.size() * max_width != img_out.cols || max_height != img_out.rows);
// If new, then resize the current image
if (image_new)
img_out = cv::Mat(max_height, (int)img_last_cache.size() * max_width, CV_8UC3, cv::Scalar(0, 0, 0));
// Loop through each image, and draw
int index_cam = 0;
for (auto const &pair : img_last_cache) {
// Lock this image
std::lock_guard<std::mutex> lck(mtx_feeds.at(pair.first));
// select the subset of the image
cv::Mat img_temp;
if (image_new)
cv::cvtColor(img_last_cache[pair.first], img_temp, cv::COLOR_GRAY2RGB);
else
img_temp = img_out(cv::Rect(max_width * index_cam, 0, max_width, max_height));
// draw...
if (!ids_aruco[pair.first].empty())
cv::aruco::drawDetectedMarkers(img_temp, corners[pair.first], ids_aruco[pair.first]);
if (!rejects[pair.first].empty())
cv::aruco::drawDetectedMarkers(img_temp, rejects[pair.first], cv::noArray(), cv::Scalar(100, 0, 255));
// Draw what camera this is
auto txtpt = (is_small) ? cv::Point(10, 30) : cv::Point(30, 60);
if (overlay == "") {
cv::putText(img_temp, "CAM:" + std::to_string((int)pair.first), txtpt, cv::FONT_HERSHEY_COMPLEX_SMALL, (is_small) ? 1.5 : 3.0,
cv::Scalar(0, 255, 0), 3);
} else {
cv::putText(img_temp, overlay, txtpt, cv::FONT_HERSHEY_COMPLEX_SMALL, (is_small) ? 1.5 : 3.0, cv::Scalar(0, 0, 255), 3);
}
// Overlay the mask
cv::Mat mask = cv::Mat::zeros(img_mask_last_cache[pair.first].rows, img_mask_last_cache[pair.first].cols, CV_8UC3);
mask.setTo(cv::Scalar(0, 0, 255), img_mask_last_cache[pair.first]);
cv::addWeighted(mask, 0.1, img_temp, 1.0, 0.0, img_temp);
// Replace the output image
img_temp.copyTo(img_out(cv::Rect(max_width * index_cam, 0, img_last_cache[pair.first].cols, img_last_cache[pair.first].rows)));
index_cam++;
}
}
#endif

View File

@@ -0,0 +1,119 @@
/*
* OpenVINS: An Open Platform for Visual-Inertial Research
* Copyright (C) 2018-2022 Patrick Geneva
* Copyright (C) 2018-2022 Guoquan Huang
* Copyright (C) 2018-2022 OpenVINS Contributors
* Copyright (C) 2018-2019 Kevin Eckenhoff
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
#ifndef OV_CORE_TRACK_ARUCO_H
#define OV_CORE_TRACK_ARUCO_H
#if ENABLE_ARUCO_TAGS
#include <opencv2/aruco.hpp>
#endif
#include "TrackBase.h"
#include "utils/print.h"
namespace ov_core {
/**
* @brief Tracking of OpenCV Aruoc tags.
*
* This class handles the tracking of [OpenCV Aruco tags](https://github.com/opencv/opencv_contrib/tree/master/modules/aruco).
* We track the corners of the tag as compared to the pose of the tag or any other corners.
* Right now we hardcode the dictionary to be `cv::aruco::DICT_6X6_1000`, so please generate tags in this family of tags.
* You can generate these tags using an online utility: https://chev.me/arucogen/
* The actual size of the tags do not matter since we do not recover the pose and instead just use this for re-detection and tracking of the
* four corners of the tag.
*/
class TrackAruco : public TrackBase {
public:
/**
* @brief Public constructor with configuration variables
* @param cameras camera calibration object which has all camera intrinsics in it
* @param numaruco the max id of the arucotags, we don't use any tags greater than this value even if we extract them
* @param binocular if we should do binocular feature tracking or stereo if there are multiple cameras
* @param histmethod what type of histogram pre-processing should be done (histogram eq?)
* @param downsize we can scale the image by 1/2 to increase Aruco tag extraction speed
*/
explicit TrackAruco(std::unordered_map<size_t, std::shared_ptr<CamBase>> cameras, int numaruco, bool binocular,
HistogramMethod histmethod, bool downsize)
: TrackBase(cameras, 0, numaruco, binocular, histmethod), max_tag_id(numaruco), do_downsizing(downsize) {
#if ENABLE_ARUCO_TAGS
aruco_dict = cv::aruco::getPredefinedDictionary(cv::aruco::DICT_6X6_1000);
aruco_params = cv::aruco::DetectorParameters::create();
// NOTE: people with newer opencv might fail here
// aruco_params->cornerRefinementMethod = cv::aruco::CornerRefineMethod::CORNER_REFINE_SUBPIX;
#else
PRINT_ERROR(RED "[ERROR]: you have not compiled with aruco tag support!!!\n" RESET);
std::exit(EXIT_FAILURE);
#endif
}
/**
* @brief Process a new image
* @param message Contains our timestamp, images, and camera ids
*/
void feed_new_camera(const CameraData &message);
#if ENABLE_ARUCO_TAGS
/**
* @brief We override the display equation so we can show the tags we extract.
* @param img_out image to which we will overlayed features on
* @param r1,g1,b1 first color to draw in
* @param r2,g2,b2 second color to draw in
* @param overlay Text overlay to replace to normal "cam0" in the top left of screen
*/
void display_active(cv::Mat &img_out, int r1, int g1, int b1, int r2, int g2, int b2, std::string overlay = "") override;
#endif
protected:
#if ENABLE_ARUCO_TAGS
/**
* @brief Process a new monocular image
* @param timestamp timestamp the new image occurred at
* @param imgin new cv:Mat grayscale image
* @param cam_id the camera id that this new image corresponds too
* @param maskin tracking mask for the given input image
*/
void perform_tracking(double timestamp, const cv::Mat &imgin, size_t cam_id, const cv::Mat &maskin);
#endif
// Max tag ID we should extract from (i.e., number of aruco tags starting from zero)
int max_tag_id;
// If we should downsize the image
bool do_downsizing;
#if ENABLE_ARUCO_TAGS
// Our dictionary that we will extract aruco tags with
cv::Ptr<cv::aruco::Dictionary> aruco_dict;
// Parameters the opencv extractor uses
cv::Ptr<cv::aruco::DetectorParameters> aruco_params;
// Our tag IDs and corner we will get from the extractor
std::unordered_map<size_t, std::vector<int>> ids_aruco;
std::unordered_map<size_t, std::vector<std::vector<cv::Point2f>>> corners, rejects;
#endif
};
} // namespace ov_core
#endif /* OV_CORE_TRACK_ARUCO_H */

View File

@@ -0,0 +1,210 @@
/*
* OpenVINS: An Open Platform for Visual-Inertial Research
* Copyright (C) 2018-2022 Patrick Geneva
* Copyright (C) 2018-2022 Guoquan Huang
* Copyright (C) 2018-2022 OpenVINS Contributors
* Copyright (C) 2018-2019 Kevin Eckenhoff
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
#include "TrackBase.h"
using namespace ov_core;
void TrackBase::display_active(cv::Mat &img_out, int r1, int g1, int b1, int r2, int g2, int b2, std::string overlay) {
// Cache the images to prevent other threads from editing while we viz (which can be slow)
std::map<size_t, cv::Mat> img_last_cache, img_mask_last_cache;
for (auto const &pair : img_last) {
img_last_cache.insert({pair.first, pair.second.clone()});
}
for (auto const &pair : img_mask_last) {
img_mask_last_cache.insert({pair.first, pair.second.clone()});
}
// Get the largest width and height
int max_width = -1;
int max_height = -1;
for (auto const &pair : img_last_cache) {
if (max_width < pair.second.cols)
max_width = pair.second.cols;
if (max_height < pair.second.rows)
max_height = pair.second.rows;
}
// Return if we didn't have a last image
if (max_width == -1 || max_height == -1)
return;
// If the image is "small" thus we should use smaller display codes
bool is_small = (std::min(max_width, max_height) < 400);
// If the image is "new" then draw the images from scratch
// Otherwise, we grab the subset of the main image and draw on top of it
bool image_new = ((int)img_last_cache.size() * max_width != img_out.cols || max_height != img_out.rows);
// If new, then resize the current image
if (image_new)
img_out = cv::Mat(max_height, (int)img_last_cache.size() * max_width, CV_8UC3, cv::Scalar(0, 0, 0));
// Loop through each image, and draw
int index_cam = 0;
for (auto const &pair : img_last_cache) {
// Lock this image
std::lock_guard<std::mutex> lck(mtx_feeds.at(pair.first));
// select the subset of the image
cv::Mat img_temp;
if (image_new)
cv::cvtColor(img_last_cache[pair.first], img_temp, cv::COLOR_GRAY2RGB);
else
img_temp = img_out(cv::Rect(max_width * index_cam, 0, max_width, max_height));
// draw, loop through all keypoints
for (size_t i = 0; i < pts_last[pair.first].size(); i++) {
// Get bounding pts for our boxes
cv::Point2f pt_l = pts_last[pair.first].at(i).pt;
// Draw the extracted points and ID
cv::circle(img_temp, pt_l, (is_small) ? 1 : 2, cv::Scalar(r1, g1, b1), cv::FILLED);
// cv::putText(img_out, std::to_string(ids_left_last.at(i)), pt_l, cv::FONT_HERSHEY_SIMPLEX,0.5,cv::Scalar(0,0,255),1,cv::LINE_AA);
// Draw rectangle around the point
cv::Point2f pt_l_top = cv::Point2f(pt_l.x - 5, pt_l.y - 5);
cv::Point2f pt_l_bot = cv::Point2f(pt_l.x + 5, pt_l.y + 5);
cv::rectangle(img_temp, pt_l_top, pt_l_bot, cv::Scalar(r2, g2, b2), 1);
}
// Draw what camera this is
auto txtpt = (is_small) ? cv::Point(10, 30) : cv::Point(30, 60);
if (overlay == "") {
cv::putText(img_temp, "CAM:" + std::to_string((int)pair.first), txtpt, cv::FONT_HERSHEY_COMPLEX_SMALL, (is_small) ? 1.5 : 3.0,
cv::Scalar(0, 255, 0), 3);
} else {
cv::putText(img_temp, overlay, txtpt, cv::FONT_HERSHEY_COMPLEX_SMALL, (is_small) ? 1.5 : 3.0, cv::Scalar(0, 0, 255), 3);
}
// Overlay the mask
cv::Mat mask = cv::Mat::zeros(img_mask_last_cache[pair.first].rows, img_mask_last_cache[pair.first].cols, CV_8UC3);
mask.setTo(cv::Scalar(0, 0, 255), img_mask_last_cache[pair.first]);
cv::addWeighted(mask, 0.1, img_temp, 1.0, 0.0, img_temp);
// Replace the output image
img_temp.copyTo(img_out(cv::Rect(max_width * index_cam, 0, img_last_cache[pair.first].cols, img_last_cache[pair.first].rows)));
index_cam++;
}
}
void TrackBase::display_history(cv::Mat &img_out, int r1, int g1, int b1, int r2, int g2, int b2, std::vector<size_t> highlighted,
std::string overlay) {
// Cache the images to prevent other threads from editing while we viz (which can be slow)
std::map<size_t, cv::Mat> img_last_cache, img_mask_last_cache;
for (auto const &pair : img_last) {
img_last_cache.insert({pair.first, pair.second.clone()});
}
for (auto const &pair : img_mask_last) {
img_mask_last_cache.insert({pair.first, pair.second.clone()});
}
// Get the largest width and height
int max_width = -1;
int max_height = -1;
for (auto const &pair : img_last_cache) {
if (max_width < pair.second.cols)
max_width = pair.second.cols;
if (max_height < pair.second.rows)
max_height = pair.second.rows;
}
// Return if we didn't have a last image
if (max_width == -1 || max_height == -1)
return;
// If the image is "small" thus we shoudl use smaller display codes
bool is_small = (std::min(max_width, max_height) < 400);
// If the image is "new" then draw the images from scratch
// Otherwise, we grab the subset of the main image and draw on top of it
bool image_new = ((int)img_last_cache.size() * max_width != img_out.cols || max_height != img_out.rows);
// If new, then resize the current image
if (image_new)
img_out = cv::Mat(max_height, (int)img_last_cache.size() * max_width, CV_8UC3, cv::Scalar(0, 0, 0));
// Max tracks to show (otherwise it clutters up the screen)
size_t maxtracks = 50;
// Loop through each image, and draw
int index_cam = 0;
for (auto const &pair : img_last_cache) {
// Lock this image
std::lock_guard<std::mutex> lck(mtx_feeds.at(pair.first));
// select the subset of the image
cv::Mat img_temp;
if (image_new)
cv::cvtColor(img_last_cache[pair.first], img_temp, cv::COLOR_GRAY2RGB);
else
img_temp = img_out(cv::Rect(max_width * index_cam, 0, max_width, max_height));
// draw, loop through all keypoints
for (size_t i = 0; i < ids_last[pair.first].size(); i++) {
// If a highlighted point, then put a nice box around it
if (std::find(highlighted.begin(), highlighted.end(), ids_last[pair.first].at(i)) != highlighted.end()) {
cv::Point2f pt_c = pts_last[pair.first].at(i).pt;
cv::Point2f pt_l_top = cv::Point2f(pt_c.x - ((is_small) ? 3 : 5), pt_c.y - ((is_small) ? 3 : 5));
cv::Point2f pt_l_bot = cv::Point2f(pt_c.x + ((is_small) ? 3 : 5), pt_c.y + ((is_small) ? 3 : 5));
cv::rectangle(img_temp, pt_l_top, pt_l_bot, cv::Scalar(0, 255, 0), 1);
cv::circle(img_temp, pt_c, (is_small) ? 1 : 2, cv::Scalar(0, 255, 0), cv::FILLED);
}
// Get the feature from the database
std::shared_ptr<Feature> feat = database->get_feature(ids_last[pair.first].at(i));
// Skip if the feature is null
if (feat == nullptr || feat->uvs[pair.first].empty() || feat->to_delete)
continue;
// Draw the history of this point (start at the last inserted one)
for (size_t z = feat->uvs[pair.first].size() - 1; z > 0; z--) {
// Check if we have reached the max
if (feat->uvs[pair.first].size() - z > maxtracks)
break;
// Calculate what color we are drawing in
bool is_stereo = (feat->uvs.size() > 1);
int color_r = (is_stereo ? b2 : r2) - (int)((is_stereo ? b1 : r1) / feat->uvs[pair.first].size() * z);
int color_g = (is_stereo ? r2 : g2) - (int)((is_stereo ? r1 : g1) / feat->uvs[pair.first].size() * z);
int color_b = (is_stereo ? g2 : b2) - (int)((is_stereo ? g1 : b1) / feat->uvs[pair.first].size() * z);
// Draw current point
cv::Point2f pt_c(feat->uvs[pair.first].at(z)(0), feat->uvs[pair.first].at(z)(1));
cv::circle(img_temp, pt_c, (is_small) ? 1 : 2, cv::Scalar(color_r, color_g, color_b), cv::FILLED);
// If there is a next point, then display the line from this point to the next
if (z + 1 < feat->uvs[pair.first].size()) {
cv::Point2f pt_n(feat->uvs[pair.first].at(z + 1)(0), feat->uvs[pair.first].at(z + 1)(1));
cv::line(img_temp, pt_c, pt_n, cv::Scalar(color_r, color_g, color_b));
}
// If the first point, display the ID
if (z == feat->uvs[pair.first].size() - 1) {
// cv::putText(img_out0, std::to_string(feat->featid), pt_c, cv::FONT_HERSHEY_SIMPLEX, 0.5, cv::Scalar(0, 0, 255), 1,
// cv::LINE_AA); cv::circle(img_out0, pt_c, 2, cv::Scalar(color,color,255), CV_FILLED);
}
}
}
// Draw what camera this is
auto txtpt = (is_small) ? cv::Point(10, 30) : cv::Point(30, 60);
if (overlay == "") {
cv::putText(img_temp, "CAM:" + std::to_string((int)pair.first), txtpt, cv::FONT_HERSHEY_COMPLEX_SMALL, (is_small) ? 1.5 : 3.0,
cv::Scalar(0, 255, 0), 3);
} else {
cv::putText(img_temp, overlay, txtpt, cv::FONT_HERSHEY_COMPLEX_SMALL, (is_small) ? 1.5 : 3.0, cv::Scalar(0, 0, 255), 3);
}
// Overlay the mask
cv::Mat mask = cv::Mat::zeros(img_mask_last_cache[pair.first].rows, img_mask_last_cache[pair.first].cols, CV_8UC3);
mask.setTo(cv::Scalar(0, 0, 255), img_mask_last_cache[pair.first]);
cv::addWeighted(mask, 0.1, img_temp, 1.0, 0.0, img_temp);
// Replace the output image
img_temp.copyTo(img_out(cv::Rect(max_width * index_cam, 0, img_last_cache[pair.first].cols, img_last_cache[pair.first].rows)));
index_cam++;
}
}

View File

@@ -0,0 +1,212 @@
/*
* OpenVINS: An Open Platform for Visual-Inertial Research
* Copyright (C) 2018-2022 Patrick Geneva
* Copyright (C) 2018-2022 Guoquan Huang
* Copyright (C) 2018-2022 OpenVINS Contributors
* Copyright (C) 2018-2019 Kevin Eckenhoff
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
#ifndef OV_CORE_TRACK_BASE_H
#define OV_CORE_TRACK_BASE_H
#include <atomic>
#include <iostream>
#include <mutex>
#include <thread>
#include <unordered_map>
#include <boost/date_time/posix_time/posix_time.hpp>
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/opencv.hpp>
#include "Grider_FAST.h"
#include "cam/CamBase.h"
#include "feat/FeatureDatabase.h"
#include "utils/colors.h"
#include "utils/opencv_lambda_body.h"
#include "utils/sensor_data.h"
namespace ov_core {
/**
* @brief Visual feature tracking base class
*
* This is the base class for all our visual trackers.
* The goal here is to provide a common interface so all underlying trackers can simply hide away all the complexities.
* We have something called the "feature database" which has all the tracking information inside of it.
* The user can ask this database for features which can then be used in an MSCKF or batch-based setting.
* The feature tracks store both the raw (distorted) and undistorted/normalized values.
* Right now we just support two camera models, see: undistort_point_brown() and undistort_point_fisheye().
*
* @m_class{m-note m-warning}
*
* @par A Note on Multi-Threading Support
* There is some support for asynchronous multi-threaded feature tracking of independent cameras.
* The key assumption during implementation is that the user will not try to track on the same camera in parallel, and instead call on
* different cameras. For example, if I have two cameras, I can either sequentially call the feed function, or I spin each of these into
* separate threads and wait for their return. The @ref currid is atomic to allow for multiple threads to access it without issue and ensure
* that all features have unique id values. We also have mutex for access for the calibration and previous images and tracks (used during
* visualization). It should be noted that if a thread calls visualization, it might hang or the feed thread might, due to acquiring the
* mutex for that specific camera id / feed.
*
* This base class also handles most of the heavy lifting with the visualization, but the sub-classes can override
* this and do their own logic if they want (i.e. the TrackAruco has its own logic for visualization).
* This visualization needs access to the prior images and their tracks, thus must synchronise in the case of multi-threading.
* This shouldn't impact performance, but high frequency visualization calls can negatively effect the performance.
*/
class TrackBase {
public:
/**
* @brief Desired pre-processing image method.
*/
enum HistogramMethod { NONE, HISTOGRAM, CLAHE };
/**
* @brief Public constructor with configuration variables
* @param cameras camera calibration object which has all camera intrinsics in it
* @param numfeats number of features we want want to track (i.e. track 200 points from frame to frame)
* @param numaruco the max id of the arucotags, so we ensure that we start our non-auroc features above this value
* @param stereo if we should do stereo feature tracking or binocular
* @param histmethod what type of histogram pre-processing should be done (histogram eq?)
*/
TrackBase(std::unordered_map<size_t, std::shared_ptr<CamBase>> cameras, int numfeats, int numaruco, bool stereo,
HistogramMethod histmethod)
: camera_calib(cameras), database(new FeatureDatabase()), num_features(numfeats), use_stereo(stereo), histogram_method(histmethod) {
// Our current feature ID should be larger then the number of aruco tags we have (each has 4 corners)
currid = 4 * (size_t)numaruco + 1;
// Create our mutex array based on the number of cameras we have
// See https://stackoverflow.com/a/24170141/7718197
if (mtx_feeds.empty() || mtx_feeds.size() != camera_calib.size()) {
std::vector<std::mutex> list(camera_calib.size());
mtx_feeds.swap(list);
}
}
virtual ~TrackBase() {}
/**
* @brief Process a new image
* @param message Contains our timestamp, images, and camera ids
*/
virtual void feed_new_camera(const CameraData &message) = 0;
/**
* @brief Shows features extracted in the last image
* @param img_out image to which we will overlayed features on
* @param r1,g1,b1 first color to draw in
* @param r2,g2,b2 second color to draw in
* @param overlay Text overlay to replace to normal "cam0" in the top left of screen
*/
virtual void display_active(cv::Mat &img_out, int r1, int g1, int b1, int r2, int g2, int b2, std::string overlay = "");
/**
* @brief Shows a "trail" for each feature (i.e. its history)
* @param img_out image to which we will overlayed features on
* @param r1,g1,b1 first color to draw in
* @param r2,g2,b2 second color to draw in
* @param highlighted unique ids which we wish to highlight (e.g. slam feats)
* @param overlay Text overlay to replace to normal "cam0" in the top left of screen
*/
virtual void display_history(cv::Mat &img_out, int r1, int g1, int b1, int r2, int g2, int b2, std::vector<size_t> highlighted = {},
std::string overlay = "");
/**
* @brief Get the feature database with all the track information
* @return FeatureDatabase pointer that one can query for features
*/
std::shared_ptr<FeatureDatabase> get_feature_database() { return database; }
/**
* @brief Changes the ID of an actively tracked feature to another one.
*
* This function can be helpfull if you detect a loop-closure with an old frame.
* One could then change the id of an active feature to match the old feature id!
*
* @param id_old Old id we want to change
* @param id_new Id we want to change the old id to
*/
void change_feat_id(size_t id_old, size_t id_new) {
// If found in db then replace
if (database->get_internal_data().find(id_old) != database->get_internal_data().end()) {
std::shared_ptr<Feature> feat = database->get_internal_data().at(id_old);
database->get_internal_data().erase(id_old);
feat->featid = id_new;
database->get_internal_data().insert({id_new, feat});
}
// Update current track IDs
for (auto &cam_ids_pair : ids_last) {
for (size_t i = 0; i < cam_ids_pair.second.size(); i++) {
if (cam_ids_pair.second.at(i) == id_old) {
ids_last.at(cam_ids_pair.first).at(i) = id_new;
}
}
}
}
/// Getter method for number of active features
int get_num_features() { return num_features; }
/// Setter method for number of active features
void set_num_features(int _num_features) { num_features = _num_features; }
protected:
/// Camera object which has all calibration in it
std::unordered_map<size_t, std::shared_ptr<CamBase>> camera_calib;
/// Database with all our current features
std::shared_ptr<FeatureDatabase> database;
/// If we are a fisheye model or not
std::map<size_t, bool> camera_fisheye;
/// Number of features we should try to track frame to frame
int num_features;
/// If we should use binocular tracking or stereo tracking for multi-camera
bool use_stereo;
/// What histogram equalization method we should pre-process images with?
HistogramMethod histogram_method;
/// Mutexs for our last set of image storage (img_last, pts_last, and ids_last)
std::vector<std::mutex> mtx_feeds;
/// Last set of images (use map so all trackers render in the same order)
std::map<size_t, cv::Mat> img_last;
/// Last set of images (use map so all trackers render in the same order)
std::map<size_t, cv::Mat> img_mask_last;
/// Last set of tracked points
std::unordered_map<size_t, std::vector<cv::KeyPoint>> pts_last;
/// Set of IDs of each current feature in the database
std::unordered_map<size_t, std::vector<size_t>> ids_last;
/// Master ID for this tracker (atomic to allow for multi-threading)
std::atomic<size_t> currid;
// Timing variables (most children use these...)
boost::posix_time::ptime rT1, rT2, rT3, rT4, rT5, rT6, rT7;
};
} // namespace ov_core
#endif /* OV_CORE_TRACK_BASE_H */

View File

@@ -0,0 +1,545 @@
/*
* OpenVINS: An Open Platform for Visual-Inertial Research
* Copyright (C) 2018-2022 Patrick Geneva
* Copyright (C) 2018-2022 Guoquan Huang
* Copyright (C) 2018-2022 OpenVINS Contributors
* Copyright (C) 2018-2019 Kevin Eckenhoff
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
#include "TrackDescriptor.h"
#include "utils/print.h"
using namespace ov_core;
void TrackDescriptor::feed_new_camera(const CameraData &message) {
// Error check that we have all the data
if (message.sensor_ids.empty() || message.sensor_ids.size() != message.images.size() || message.images.size() != message.masks.size()) {
PRINT_ERROR(RED "[ERROR]: MESSAGE DATA SIZES DO NOT MATCH OR EMPTY!!!\n" RESET);
PRINT_ERROR(RED "[ERROR]: - message.sensor_ids.size() = %zu\n" RESET, message.sensor_ids.size());
PRINT_ERROR(RED "[ERROR]: - message.images.size() = %zu\n" RESET, message.images.size());
PRINT_ERROR(RED "[ERROR]: - message.masks.size() = %zu\n" RESET, message.masks.size());
std::exit(EXIT_FAILURE);
}
// Either call our stereo or monocular version
// If we are doing binocular tracking, then we should parallize our tracking
size_t num_images = message.images.size();
if (num_images == 1) {
feed_monocular(message, 0);
} else if (num_images == 2 && use_stereo) {
feed_stereo(message, 0, 1);
} else if (!use_stereo) {
parallel_for_(cv::Range(0, (int)num_images), LambdaBody([&](const cv::Range &range) {
for (int i = range.start; i < range.end; i++) {
feed_monocular(message, i);
}
}));
} else {
PRINT_ERROR(RED "[ERROR]: invalid number of images passed %zu, we only support mono or stereo tracking", num_images);
std::exit(EXIT_FAILURE);
}
}
void TrackDescriptor::feed_monocular(const CameraData &message, size_t msg_id) {
// Start timing
rT1 = boost::posix_time::microsec_clock::local_time();
// Lock this data feed for this camera
size_t cam_id = message.sensor_ids.at(msg_id);
std::lock_guard<std::mutex> lck(mtx_feeds.at(cam_id));
// Histogram equalize
cv::Mat img, mask;
if (histogram_method == HistogramMethod::HISTOGRAM) {
cv::equalizeHist(message.images.at(msg_id), img);
} else if (histogram_method == HistogramMethod::CLAHE) {
double eq_clip_limit = 10.0;
cv::Size eq_win_size = cv::Size(8, 8);
cv::Ptr<cv::CLAHE> clahe = cv::createCLAHE(eq_clip_limit, eq_win_size);
clahe->apply(message.images.at(msg_id), img);
} else {
img = message.images.at(msg_id);
}
mask = message.masks.at(msg_id);
// If we are the first frame (or have lost tracking), initialize our descriptors
if (pts_last.find(cam_id) == pts_last.end() || pts_last[cam_id].empty()) {
perform_detection_monocular(img, mask, pts_last[cam_id], desc_last[cam_id], ids_last[cam_id]);
img_last[cam_id] = img;
img_mask_last[cam_id] = mask;
return;
}
// Our new keypoints and descriptor for the new image
std::vector<cv::KeyPoint> pts_new;
cv::Mat desc_new;
std::vector<size_t> ids_new;
// First, extract new descriptors for this new image
perform_detection_monocular(img, mask, pts_new, desc_new, ids_new);
rT2 = boost::posix_time::microsec_clock::local_time();
// Our matches temporally
std::vector<cv::DMatch> matches_ll;
// Lets match temporally
robust_match(pts_last[cam_id], pts_new, desc_last[cam_id], desc_new, cam_id, cam_id, matches_ll);
rT3 = boost::posix_time::microsec_clock::local_time();
// Get our "good tracks"
std::vector<cv::KeyPoint> good_left;
std::vector<size_t> good_ids_left;
cv::Mat good_desc_left;
// Count how many we have tracked from the last time
int num_tracklast = 0;
// Loop through all current left to right points
// We want to see if any of theses have matches to the previous frame
// If we have a match new->old then we want to use that ID instead of the new one
for (size_t i = 0; i < pts_new.size(); i++) {
// Loop through all left matches, and find the old "train" id
int idll = -1;
for (size_t j = 0; j < matches_ll.size(); j++) {
if (matches_ll[j].trainIdx == (int)i) {
idll = matches_ll[j].queryIdx;
}
}
// Then lets replace the current ID with the old ID if found
// Else just append the current feature and its unique ID
good_left.push_back(pts_new[i]);
good_desc_left.push_back(desc_new.row((int)i));
if (idll != -1) {
good_ids_left.push_back(ids_last[cam_id][idll]);
num_tracklast++;
} else {
good_ids_left.push_back(ids_new[i]);
}
}
rT4 = boost::posix_time::microsec_clock::local_time();
// Update our feature database, with theses new observations
for (size_t i = 0; i < good_left.size(); i++) {
cv::Point2f npt_l = camera_calib.at(cam_id)->undistort_cv(good_left.at(i).pt);
database->update_feature(good_ids_left.at(i), message.timestamp, cam_id, good_left.at(i).pt.x, good_left.at(i).pt.y, npt_l.x, npt_l.y);
}
// Debug info
// PRINT_DEBUG("LtoL = %d | good = %d | fromlast = %d\n",(int)matches_ll.size(),(int)good_left.size(),num_tracklast);
// Move forward in time
img_last[cam_id] = img;
img_mask_last[cam_id] = mask;
pts_last[cam_id] = good_left;
ids_last[cam_id] = good_ids_left;
desc_last[cam_id] = good_desc_left;
rT5 = boost::posix_time::microsec_clock::local_time();
// Our timing information
// PRINT_DEBUG("[TIME-DESC]: %.4f seconds for detection\n",(rT2-rT1).total_microseconds() * 1e-6);
// PRINT_DEBUG("[TIME-DESC]: %.4f seconds for matching\n",(rT3-rT2).total_microseconds() * 1e-6);
// PRINT_DEBUG("[TIME-DESC]: %.4f seconds for merging\n",(rT4-rT3).total_microseconds() * 1e-6);
// PRINT_DEBUG("[TIME-DESC]: %.4f seconds for feature DB update (%d features)\n",(rT5-rT4).total_microseconds() * 1e-6,
// (int)good_left.size()); PRINT_DEBUG("[TIME-DESC]: %.4f seconds for total\n",(rT5-rT1).total_microseconds() * 1e-6);
}
void TrackDescriptor::feed_stereo(const CameraData &message, size_t msg_id_left, size_t msg_id_right) {
// Start timing
rT1 = boost::posix_time::microsec_clock::local_time();
// Lock this data feed for this camera
size_t cam_id_left = message.sensor_ids.at(msg_id_left);
size_t cam_id_right = message.sensor_ids.at(msg_id_right);
std::lock_guard<std::mutex> lck1(mtx_feeds.at(cam_id_left));
std::lock_guard<std::mutex> lck2(mtx_feeds.at(cam_id_right));
// Histogram equalize images
cv::Mat img_left, img_right, mask_left, mask_right;
if (histogram_method == HistogramMethod::HISTOGRAM) {
cv::equalizeHist(message.images.at(msg_id_left), img_left);
cv::equalizeHist(message.images.at(msg_id_right), img_right);
} else if (histogram_method == HistogramMethod::CLAHE) {
double eq_clip_limit = 10.0;
cv::Size eq_win_size = cv::Size(8, 8);
cv::Ptr<cv::CLAHE> clahe = cv::createCLAHE(eq_clip_limit, eq_win_size);
clahe->apply(message.images.at(msg_id_left), img_left);
clahe->apply(message.images.at(msg_id_right), img_right);
} else {
img_left = message.images.at(msg_id_left);
img_right = message.images.at(msg_id_right);
}
mask_left = message.masks.at(msg_id_left);
mask_right = message.masks.at(msg_id_right);
// If we are the first frame (or have lost tracking), initialize our descriptors
if (pts_last[cam_id_left].empty() || pts_last[cam_id_right].empty()) {
perform_detection_stereo(img_left, img_right, mask_left, mask_right, pts_last[cam_id_left], pts_last[cam_id_right],
desc_last[cam_id_left], desc_last[cam_id_right], cam_id_left, cam_id_right, ids_last[cam_id_left],
ids_last[cam_id_right]);
img_last[cam_id_left] = img_left;
img_last[cam_id_right] = img_right;
img_mask_last[cam_id_left] = mask_left;
img_mask_last[cam_id_right] = mask_right;
return;
}
// Our new keypoints and descriptor for the new image
std::vector<cv::KeyPoint> pts_left_new, pts_right_new;
cv::Mat desc_left_new, desc_right_new;
std::vector<size_t> ids_left_new, ids_right_new;
// First, extract new descriptors for this new image
perform_detection_stereo(img_left, img_right, mask_left, mask_right, pts_left_new, pts_right_new, desc_left_new, desc_right_new,
cam_id_left, cam_id_right, ids_left_new, ids_right_new);
rT2 = boost::posix_time::microsec_clock::local_time();
// Our matches temporally
std::vector<cv::DMatch> matches_ll, matches_rr;
parallel_for_(cv::Range(0, 2), LambdaBody([&](const cv::Range &range) {
for (int i = range.start; i < range.end; i++) {
bool is_left = (i == 0);
robust_match(pts_last[is_left ? cam_id_left : cam_id_right], is_left ? pts_left_new : pts_right_new,
desc_last[is_left ? cam_id_left : cam_id_right], is_left ? desc_left_new : desc_right_new,
is_left ? cam_id_left : cam_id_right, is_left ? cam_id_left : cam_id_right,
is_left ? matches_ll : matches_rr);
}
}));
rT3 = boost::posix_time::microsec_clock::local_time();
// Get our "good tracks"
std::vector<cv::KeyPoint> good_left, good_right;
std::vector<size_t> good_ids_left, good_ids_right;
cv::Mat good_desc_left, good_desc_right;
// Points must be of equal size
assert(pts_last[cam_id_left].size() == pts_last[cam_id_right].size());
assert(pts_left_new.size() == pts_right_new.size());
// Count how many we have tracked from the last time
int num_tracklast = 0;
// Loop through all current left to right points
// We want to see if any of theses have matches to the previous frame
// If we have a match new->old then we want to use that ID instead of the new one
for (size_t i = 0; i < pts_left_new.size(); i++) {
// Loop through all left matches, and find the old "train" id
int idll = -1;
for (size_t j = 0; j < matches_ll.size(); j++) {
if (matches_ll[j].trainIdx == (int)i) {
idll = matches_ll[j].queryIdx;
}
}
// Loop through all left matches, and find the old "train" id
int idrr = -1;
for (size_t j = 0; j < matches_rr.size(); j++) {
if (matches_rr[j].trainIdx == (int)i) {
idrr = matches_rr[j].queryIdx;
}
}
// If we found a good stereo track from left to left, and right to right
// Then lets replace the current ID with the old ID
// We also check that we are linked to the same past ID value
if (idll != -1 && idrr != -1 && ids_last[cam_id_left][idll] == ids_last[cam_id_right][idrr]) {
good_left.push_back(pts_left_new[i]);
good_right.push_back(pts_right_new[i]);
good_desc_left.push_back(desc_left_new.row((int)i));
good_desc_right.push_back(desc_right_new.row((int)i));
good_ids_left.push_back(ids_last[cam_id_left][idll]);
good_ids_right.push_back(ids_last[cam_id_right][idrr]);
num_tracklast++;
} else {
// Else just append the current feature and its unique ID
good_left.push_back(pts_left_new[i]);
good_right.push_back(pts_right_new[i]);
good_desc_left.push_back(desc_left_new.row((int)i));
good_desc_right.push_back(desc_right_new.row((int)i));
good_ids_left.push_back(ids_left_new[i]);
good_ids_right.push_back(ids_left_new[i]);
}
}
rT4 = boost::posix_time::microsec_clock::local_time();
//===================================================================================
//===================================================================================
// Update our feature database, with theses new observations
for (size_t i = 0; i < good_left.size(); i++) {
// Assert that our IDs are the same
assert(good_ids_left.at(i) == good_ids_right.at(i));
// Try to undistort the point
cv::Point2f npt_l = camera_calib.at(cam_id_left)->undistort_cv(good_left.at(i).pt);
cv::Point2f npt_r = camera_calib.at(cam_id_right)->undistort_cv(good_right.at(i).pt);
// Append to the database
database->update_feature(good_ids_left.at(i), message.timestamp, cam_id_left, good_left.at(i).pt.x, good_left.at(i).pt.y, npt_l.x,
npt_l.y);
database->update_feature(good_ids_left.at(i), message.timestamp, cam_id_right, good_right.at(i).pt.x, good_right.at(i).pt.y, npt_r.x,
npt_r.y);
}
// Debug info
// PRINT_DEBUG("LtoL = %d | RtoR = %d | LtoR = %d | good = %d | fromlast = %d\n", (int)matches_ll.size(),
// (int)matches_rr.size(),(int)ids_left_new.size(),(int)good_left.size(),num_tracklast);
// Move forward in time
img_last[cam_id_left] = img_left;
img_last[cam_id_right] = img_right;
img_mask_last[cam_id_left] = mask_left;
img_mask_last[cam_id_right] = mask_right;
pts_last[cam_id_left] = good_left;
pts_last[cam_id_right] = good_right;
ids_last[cam_id_left] = good_ids_left;
ids_last[cam_id_right] = good_ids_right;
desc_last[cam_id_left] = good_desc_left;
desc_last[cam_id_right] = good_desc_right;
rT5 = boost::posix_time::microsec_clock::local_time();
// Our timing information
// PRINT_DEBUG("[TIME-DESC]: %.4f seconds for detection\n",(rT2-rT1).total_microseconds() * 1e-6);
// PRINT_DEBUG("[TIME-DESC]: %.4f seconds for matching\n",(rT3-rT2).total_microseconds() * 1e-6);
// PRINT_DEBUG("[TIME-DESC]: %.4f seconds for merging\n",(rT4-rT3).total_microseconds() * 1e-6);
// PRINT_DEBUG("[TIME-DESC]: %.4f seconds for feature DB update (%d features)\n",(rT5-rT4).total_microseconds() * 1e-6,
// (int)good_left.size()); PRINT_DEBUG("[TIME-DESC]: %.4f seconds for total\n",(rT5-rT1).total_microseconds() * 1e-6);
}
void TrackDescriptor::perform_detection_monocular(const cv::Mat &img0, const cv::Mat &mask0, std::vector<cv::KeyPoint> &pts0,
cv::Mat &desc0, std::vector<size_t> &ids0) {
// Assert that we need features
assert(pts0.empty());
// Extract our features (use FAST with griding)
std::vector<cv::KeyPoint> pts0_ext;
Grider_FAST::perform_griding(img0, mask0, pts0_ext, num_features, grid_x, grid_y, threshold, true);
// For all new points, extract their descriptors
cv::Mat desc0_ext;
this->orb0->compute(img0, pts0_ext, desc0_ext);
// Create a 2D occupancy grid for this current image
// Note that we scale this down, so that each grid point is equal to a set of pixels
// This means that we will reject points that less then grid_px_size points away then existing features
cv::Size size((int)((float)img0.cols / (float)min_px_dist), (int)((float)img0.rows / (float)min_px_dist));
cv::Mat grid_2d = cv::Mat::zeros(size, CV_8UC1);
// For all good matches, lets append to our returned vectors
// NOTE: if we multi-thread this atomic can cause some randomness due to multiple thread detecting features
// NOTE: this is due to the fact that we select update features based on feat id
// NOTE: thus the order will matter since we try to select oldest (smallest id) to update with
// NOTE: not sure how to remove... maybe a better way?
for (size_t i = 0; i < pts0_ext.size(); i++) {
// Get current left keypoint, check that it is in bounds
cv::KeyPoint kpt = pts0_ext.at(i);
int x = (int)kpt.pt.x;
int y = (int)kpt.pt.y;
int x_grid = (int)(kpt.pt.x / (float)min_px_dist);
int y_grid = (int)(kpt.pt.y / (float)min_px_dist);
if (x_grid < 0 || x_grid >= size.width || y_grid < 0 || y_grid >= size.height || x < 0 || x >= img0.cols || y < 0 || y >= img0.rows) {
continue;
}
// Check if this keypoint is near another point
if (grid_2d.at<uint8_t>(y_grid, x_grid) > 127)
continue;
// Else we are good, append our keypoints and descriptors
pts0.push_back(pts0_ext.at(i));
desc0.push_back(desc0_ext.row((int)i));
// Set our IDs to be unique IDs here, will later replace with corrected ones, after temporal matching
size_t temp = ++currid;
ids0.push_back(temp);
grid_2d.at<uint8_t>(y_grid, x_grid) = 255;
}
}
void TrackDescriptor::perform_detection_stereo(const cv::Mat &img0, const cv::Mat &img1, const cv::Mat &mask0, const cv::Mat &mask1,
std::vector<cv::KeyPoint> &pts0, std::vector<cv::KeyPoint> &pts1, cv::Mat &desc0,
cv::Mat &desc1, size_t cam_id0, size_t cam_id1, std::vector<size_t> &ids0,
std::vector<size_t> &ids1) {
// Assert that we need features
assert(pts0.empty());
assert(pts1.empty());
// Extract our features (use FAST with griding), and their descriptors
std::vector<cv::KeyPoint> pts0_ext, pts1_ext;
cv::Mat desc0_ext, desc1_ext;
parallel_for_(cv::Range(0, 2), LambdaBody([&](const cv::Range &range) {
for (int i = range.start; i < range.end; i++) {
bool is_left = (i == 0);
Grider_FAST::perform_griding(is_left ? img0 : img1, is_left ? mask0 : mask1, is_left ? pts0_ext : pts1_ext,
num_features, grid_x, grid_y, threshold, true);
(is_left ? orb0 : orb1)->compute(is_left ? img0 : img1, is_left ? pts0_ext : pts1_ext, is_left ? desc0_ext : desc1_ext);
}
}));
// Do matching from the left to the right image
std::vector<cv::DMatch> matches;
robust_match(pts0_ext, pts1_ext, desc0_ext, desc1_ext, cam_id0, cam_id1, matches);
// Create a 2D occupancy grid for this current image
// Note that we scale this down, so that each grid point is equal to a set of pixels
// This means that we will reject points that less then grid_px_size points away then existing features
cv::Size size0((int)((float)img0.cols / (float)min_px_dist), (int)((float)img0.rows / (float)min_px_dist));
cv::Mat grid_2d_0 = cv::Mat::zeros(size0, CV_8UC1);
cv::Size size1((int)((float)img1.cols / (float)min_px_dist), (int)((float)img1.rows / (float)min_px_dist));
cv::Mat grid_2d_1 = cv::Mat::zeros(size1, CV_8UC1);
// For all good matches, lets append to our returned vectors
for (size_t i = 0; i < matches.size(); i++) {
// Get our ids
int index_pt0 = matches.at(i).queryIdx;
int index_pt1 = matches.at(i).trainIdx;
// Get current left/right keypoint, check that it is in bounds
cv::KeyPoint kpt0 = pts0_ext.at(index_pt0);
cv::KeyPoint kpt1 = pts1_ext.at(index_pt1);
int x0 = (int)kpt0.pt.x;
int y0 = (int)kpt0.pt.y;
int x0_grid = (int)(kpt0.pt.x / (float)min_px_dist);
int y0_grid = (int)(kpt0.pt.y / (float)min_px_dist);
if (x0_grid < 0 || x0_grid >= size0.width || y0_grid < 0 || y0_grid >= size0.height || x0 < 0 || x0 >= img0.cols || y0 < 0 ||
y0 >= img0.rows) {
continue;
}
int x1 = (int)kpt1.pt.x;
int y1 = (int)kpt1.pt.y;
int x1_grid = (int)(kpt1.pt.x / (float)min_px_dist);
int y1_grid = (int)(kpt1.pt.y / (float)min_px_dist);
if (x1_grid < 0 || x1_grid >= size1.width || y1_grid < 0 || y1_grid >= size1.height || x1 < 0 || x1 >= img0.cols || y1 < 0 ||
y1 >= img0.rows) {
continue;
}
// Check if this keypoint is near another point
if (grid_2d_0.at<uint8_t>(y0_grid, x0_grid) > 127 || grid_2d_1.at<uint8_t>(y1_grid, x1_grid) > 127)
continue;
// Append our keypoints and descriptors
pts0.push_back(pts0_ext[index_pt0]);
pts1.push_back(pts1_ext[index_pt1]);
desc0.push_back(desc0_ext.row(index_pt0));
desc1.push_back(desc1_ext.row(index_pt1));
// Set our IDs to be unique IDs here, will later replace with corrected ones, after temporal matching
size_t temp = ++currid;
ids0.push_back(temp);
ids1.push_back(temp);
}
}
void TrackDescriptor::robust_match(std::vector<cv::KeyPoint> &pts0, std::vector<cv::KeyPoint> pts1, cv::Mat &desc0, cv::Mat &desc1,
size_t id0, size_t id1, std::vector<cv::DMatch> &matches) {
// Our 1to2 and 2to1 match vectors
std::vector<std::vector<cv::DMatch>> matches0to1, matches1to0;
// Match descriptors (return 2 nearest neighbours)
matcher->knnMatch(desc0, desc1, matches0to1, 2);
matcher->knnMatch(desc1, desc0, matches1to0, 2);
// Do a ratio test for both matches
robust_ratio_test(matches0to1);
robust_ratio_test(matches1to0);
// Finally do a symmetry test
std::vector<cv::DMatch> matches_good;
robust_symmetry_test(matches0to1, matches1to0, matches_good);
// Convert points into points for RANSAC
std::vector<cv::Point2f> pts0_rsc, pts1_rsc;
for (size_t i = 0; i < matches_good.size(); i++) {
// Get our ids
int index_pt0 = matches_good.at(i).queryIdx;
int index_pt1 = matches_good.at(i).trainIdx;
// Push back just the 2d point
pts0_rsc.push_back(pts0[index_pt0].pt);
pts1_rsc.push_back(pts1[index_pt1].pt);
}
// If we don't have enough points for ransac just return empty
if (pts0_rsc.size() < 10)
return;
// Normalize these points, so we can then do ransac
// We don't want to do ransac on distorted image uvs since the mapping is nonlinear
std::vector<cv::Point2f> pts0_n, pts1_n;
for (size_t i = 0; i < pts0_rsc.size(); i++) {
pts0_n.push_back(camera_calib.at(id0)->undistort_cv(pts0_rsc.at(i)));
pts1_n.push_back(camera_calib.at(id1)->undistort_cv(pts1_rsc.at(i)));
}
// Do RANSAC outlier rejection (note since we normalized the max pixel error is now in the normalized cords)
std::vector<uchar> mask_rsc;
double max_focallength_img0 = std::max(camera_calib.at(id0)->get_K()(0, 0), camera_calib.at(id0)->get_K()(1, 1));
double max_focallength_img1 = std::max(camera_calib.at(id1)->get_K()(0, 0), camera_calib.at(id1)->get_K()(1, 1));
double max_focallength = std::max(max_focallength_img0, max_focallength_img1);
cv::findFundamentalMat(pts0_n, pts1_n, cv::FM_RANSAC, 1 / max_focallength, 0.999, mask_rsc);
// Loop through all good matches, and only append ones that have passed RANSAC
for (size_t i = 0; i < matches_good.size(); i++) {
// Skip if bad ransac id
if (mask_rsc[i] != 1)
continue;
// Else, lets append this match to the return array!
matches.push_back(matches_good.at(i));
}
}
void TrackDescriptor::robust_ratio_test(std::vector<std::vector<cv::DMatch>> &matches) {
// Loop through all matches
for (auto &match : matches) {
// If 2 NN has been identified, else remove this feature
if (match.size() > 1) {
// check distance ratio, remove it if the ratio is larger
if (match[0].distance / match[1].distance > knn_ratio) {
match.clear();
}
} else {
// does not have 2 neighbours, so remove it
match.clear();
}
}
}
void TrackDescriptor::robust_symmetry_test(std::vector<std::vector<cv::DMatch>> &matches1, std::vector<std::vector<cv::DMatch>> &matches2,
std::vector<cv::DMatch> &good_matches) {
// for all matches image 1 -> image 2
for (auto &match1 : matches1) {
// ignore deleted matches
if (match1.empty() || match1.size() < 2)
continue;
// for all matches image 2 -> image 1
for (auto &match2 : matches2) {
// ignore deleted matches
if (match2.empty() || match2.size() < 2)
continue;
// Match symmetry test
if (match1[0].queryIdx == match2[0].trainIdx && match2[0].queryIdx == match1[0].trainIdx) {
// add symmetrical match
good_matches.emplace_back(cv::DMatch(match1[0].queryIdx, match1[0].trainIdx, match1[0].distance));
// next match in image 1 -> image 2
break;
}
}
}
}

View File

@@ -0,0 +1,175 @@
/*
* OpenVINS: An Open Platform for Visual-Inertial Research
* Copyright (C) 2018-2022 Patrick Geneva
* Copyright (C) 2018-2022 Guoquan Huang
* Copyright (C) 2018-2022 OpenVINS Contributors
* Copyright (C) 2018-2019 Kevin Eckenhoff
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
#ifndef OV_CORE_TRACK_DESC_H
#define OV_CORE_TRACK_DESC_H
#include <opencv2/features2d.hpp>
#include "TrackBase.h"
namespace ov_core {
/**
* @brief Descriptor-based visual tracking
*
* Here we use descriptor matching to track features from one frame to the next.
* We track both temporally, and across stereo pairs to get stereo constraints.
* Right now we use ORB descriptors as we have found it is the fastest when computing descriptors.
* Tracks are then rejected based on a ratio test and ransac.
*/
class TrackDescriptor : public TrackBase {
public:
/**
* @brief Public constructor with configuration variables
* @param cameras camera calibration object which has all camera intrinsics in it
* @param numfeats number of features we want want to track (i.e. track 200 points from frame to frame)
* @param numaruco the max id of the arucotags, so we ensure that we start our non-auroc features above this value
* @param binocular if we should do binocular feature tracking or stereo if there are multiple cameras
* @param histmethod what type of histogram pre-processing should be done (histogram eq?)
* @param fast_threshold FAST detection threshold
* @param gridx size of grid in the x-direction / u-direction
* @param gridy size of grid in the y-direction / v-direction
* @param minpxdist features need to be at least this number pixels away from each other
* @param knnratio matching ratio needed (smaller value forces top two descriptors during match to be more different)
*/
explicit TrackDescriptor(std::unordered_map<size_t, std::shared_ptr<CamBase>> cameras, int numfeats, int numaruco, bool binocular,
HistogramMethod histmethod, int fast_threshold, int gridx, int gridy, int minpxdist, double knnratio)
: TrackBase(cameras, numfeats, numaruco, binocular, histmethod), threshold(fast_threshold), grid_x(gridx), grid_y(gridy),
min_px_dist(minpxdist), knn_ratio(knnratio) {}
/**
* @brief Process a new image
* @param message Contains our timestamp, images, and camera ids
*/
void feed_new_camera(const CameraData &message);
protected:
/**
* @brief Process a new monocular image
* @param message Contains our timestamp, images, and camera ids
* @param msg_id the camera index in message data vector
*/
void feed_monocular(const CameraData &message, size_t msg_id);
/**
* @brief Process new stereo pair of images
* @param message Contains our timestamp, images, and camera ids
* @param msg_id_left first image index in message data vector
* @param msg_id_right second image index in message data vector
*/
void feed_stereo(const CameraData &message, size_t msg_id_left, size_t msg_id_right);
/**
* @brief Detects new features in the current image
* @param img0 image we will detect features on
* @param mask0 mask which has what ROI we do not want features in
* @param pts0 vector of extracted keypoints
* @param desc0 vector of the extracted descriptors
* @param ids0 vector of all new IDs
*
* Given a set of images, and their currently extracted features, this will try to add new features.
* We return all extracted descriptors here since we DO NOT need to do stereo tracking left to right.
* Our vector of IDs will be later overwritten when we match features temporally to the previous frame's features.
* See robust_match() for the matching.
*/
void perform_detection_monocular(const cv::Mat &img0, const cv::Mat &mask0, std::vector<cv::KeyPoint> &pts0, cv::Mat &desc0,
std::vector<size_t> &ids0);
/**
* @brief Detects new features in the current stereo pair
* @param img0 left image we will detect features on
* @param img1 right image we will detect features on
* @param mask0 mask which has what ROI we do not want features in
* @param mask1 mask which has what ROI we do not want features in
* @param pts0 left vector of new keypoints
* @param pts1 right vector of new keypoints
* @param desc0 left vector of extracted descriptors
* @param desc1 left vector of extracted descriptors
* @param cam_id0 id of the first camera
* @param cam_id1 id of the second camera
* @param ids0 left vector of all new IDs
* @param ids1 right vector of all new IDs
*
* This does the same logic as the perform_detection_monocular() function, but we also enforce stereo contraints.
* We also do STEREO matching from the left to right, and only return good matches that are found in both the left and right.
* Our vector of IDs will be later overwritten when we match features temporally to the previous frame's features.
* See robust_match() for the matching.
*/
void perform_detection_stereo(const cv::Mat &img0, const cv::Mat &img1, const cv::Mat &mask0, const cv::Mat &mask1,
std::vector<cv::KeyPoint> &pts0, std::vector<cv::KeyPoint> &pts1, cv::Mat &desc0, cv::Mat &desc1,
size_t cam_id0, size_t cam_id1, std::vector<size_t> &ids0, std::vector<size_t> &ids1);
/**
* @brief Find matches between two keypoint+descriptor sets.
* @param pts0 first vector of keypoints
* @param pts1 second vector of keypoints
* @param desc0 first vector of descriptors
* @param desc1 second vector of decriptors
* @param id0 id of the first camera
* @param id1 id of the second camera
* @param matches vector of matches that we have found
*
* This will perform a "robust match" between the two sets of points (slow but has great results).
* First we do a simple KNN match from 1to2 and 2to1, which is followed by a ratio check and symmetry check.
* Original code is from the "RobustMatcher" in the opencv examples, and seems to give very good results in the matches.
* https://github.com/opencv/opencv/blob/master/samples/cpp/tutorial_code/calib3d/real_time_pose_estimation/src/RobustMatcher.cpp
*/
void robust_match(std::vector<cv::KeyPoint> &pts0, std::vector<cv::KeyPoint> pts1, cv::Mat &desc0, cv::Mat &desc1, size_t id0, size_t id1,
std::vector<cv::DMatch> &matches);
// Helper functions for the robust_match function
// Original code is from the "RobustMatcher" in the opencv examples
// https://github.com/opencv/opencv/blob/master/samples/cpp/tutorial_code/calib3d/real_time_pose_estimation/src/RobustMatcher.cpp
void robust_ratio_test(std::vector<std::vector<cv::DMatch>> &matches);
void robust_symmetry_test(std::vector<std::vector<cv::DMatch>> &matches1, std::vector<std::vector<cv::DMatch>> &matches2,
std::vector<cv::DMatch> &good_matches);
// Timing variables
boost::posix_time::ptime rT1, rT2, rT3, rT4, rT5, rT6, rT7;
// Our orb extractor
cv::Ptr<cv::ORB> orb0 = cv::ORB::create();
cv::Ptr<cv::ORB> orb1 = cv::ORB::create();
// Our descriptor matcher
cv::Ptr<cv::DescriptorMatcher> matcher = cv::DescriptorMatcher::create("BruteForce-Hamming");
// Parameters for our FAST grid detector
int threshold;
int grid_x;
int grid_y;
// Minimum pixel distance to be "far away enough" to be a different extracted feature
int min_px_dist;
// The ratio between two kNN matches, if that ratio is larger then this threshold
// then the two features are too close, so should be considered ambiguous/bad match
double knn_ratio;
// Descriptor matrices
std::unordered_map<size_t, cv::Mat> desc_last;
};
} // namespace ov_core
#endif /* OV_CORE_TRACK_DESC_H */

View File

@@ -0,0 +1,729 @@
/*
* OpenVINS: An Open Platform for Visual-Inertial Research
* Copyright (C) 2018-2022 Patrick Geneva
* Copyright (C) 2018-2022 Guoquan Huang
* Copyright (C) 2018-2022 OpenVINS Contributors
* Copyright (C) 2018-2019 Kevin Eckenhoff
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
#include "TrackKLT.h"
#include "utils/print.h"
using namespace ov_core;
void TrackKLT::feed_new_camera(const CameraData &message) {
// Error check that we have all the data
if (message.sensor_ids.empty() || message.sensor_ids.size() != message.images.size() || message.images.size() != message.masks.size()) {
PRINT_ERROR(RED "[ERROR]: MESSAGE DATA SIZES DO NOT MATCH OR EMPTY!!!\n" RESET);
PRINT_ERROR(RED "[ERROR]: - message.sensor_ids.size() = %zu\n" RESET, message.sensor_ids.size());
PRINT_ERROR(RED "[ERROR]: - message.images.size() = %zu\n" RESET, message.images.size());
PRINT_ERROR(RED "[ERROR]: - message.masks.size() = %zu\n" RESET, message.masks.size());
std::exit(EXIT_FAILURE);
}
// Either call our stereo or monocular version
// If we are doing binocular tracking, then we should parallize our tracking
size_t num_images = message.images.size();
if (num_images == 1) {
feed_monocular(message, 0);
} else if (num_images == 2 && use_stereo) {
feed_stereo(message, 0, 1);
} else if (!use_stereo) {
parallel_for_(cv::Range(0, (int)num_images), LambdaBody([&](const cv::Range &range) {
for (int i = range.start; i < range.end; i++) {
feed_monocular(message, i);
}
}));
} else {
PRINT_ERROR(RED "[ERROR]: invalid number of images passed %zu, we only support mono or stereo tracking", num_images);
std::exit(EXIT_FAILURE);
}
}
void TrackKLT::feed_monocular(const CameraData &message, size_t msg_id) {
// Start timing
rT1 = boost::posix_time::microsec_clock::local_time();
// Lock this data feed for this camera
size_t cam_id = message.sensor_ids.at(msg_id);
std::lock_guard<std::mutex> lck(mtx_feeds.at(cam_id));
// Histogram equalize
cv::Mat img, mask;
if (histogram_method == HistogramMethod::HISTOGRAM) {
cv::equalizeHist(message.images.at(msg_id), img);
} else if (histogram_method == HistogramMethod::CLAHE) {
double eq_clip_limit = 10.0;
cv::Size eq_win_size = cv::Size(8, 8);
cv::Ptr<cv::CLAHE> clahe = cv::createCLAHE(eq_clip_limit, eq_win_size);
clahe->apply(message.images.at(msg_id), img);
} else {
img = message.images.at(msg_id);
}
mask = message.masks.at(msg_id);
// Extract the new image pyramid
std::vector<cv::Mat> imgpyr;
cv::buildOpticalFlowPyramid(img, imgpyr, win_size, pyr_levels);
rT2 = boost::posix_time::microsec_clock::local_time();
// If we didn't have any successful tracks last time, just extract this time
// This also handles, the tracking initalization on the first call to this extractor
if (pts_last[cam_id].empty()) {
// Detect new features
perform_detection_monocular(imgpyr, mask, pts_last[cam_id], ids_last[cam_id]);
// Save the current image and pyramid
img_last[cam_id] = img;
img_pyramid_last[cam_id] = imgpyr;
img_mask_last[cam_id] = mask;
return;
}
// First we should make that the last images have enough features so we can do KLT
// This will "top-off" our number of tracks so always have a constant number
perform_detection_monocular(img_pyramid_last[cam_id], img_mask_last[cam_id], pts_last[cam_id], ids_last[cam_id]);
rT3 = boost::posix_time::microsec_clock::local_time();
// Our return success masks, and predicted new features
std::vector<uchar> mask_ll;
std::vector<cv::KeyPoint> pts_left_new = pts_last[cam_id];
// Lets track temporally
perform_matching(img_pyramid_last[cam_id], imgpyr, pts_last[cam_id], pts_left_new, cam_id, cam_id, mask_ll);
assert(pts_left_new.size() == ids_last[cam_id].size());
rT4 = boost::posix_time::microsec_clock::local_time();
// If any of our mask is empty, that means we didn't have enough to do ransac, so just return
if (mask_ll.empty()) {
img_last[cam_id] = img;
img_pyramid_last[cam_id] = imgpyr;
img_mask_last[cam_id] = mask;
pts_last[cam_id].clear();
ids_last[cam_id].clear();
PRINT_ERROR(RED "[KLT-EXTRACTOR]: Failed to get enough points to do RANSAC, resetting.....\n" RESET);
return;
}
// Get our "good tracks"
std::vector<cv::KeyPoint> good_left;
std::vector<size_t> good_ids_left;
// Loop through all left points
for (size_t i = 0; i < pts_left_new.size(); i++) {
// Ensure we do not have any bad KLT tracks (i.e., points are negative)
if (pts_left_new.at(i).pt.x < 0 || pts_left_new.at(i).pt.y < 0 || (int)pts_left_new.at(i).pt.x >= img.cols ||
(int)pts_left_new.at(i).pt.y >= img.rows)
continue;
// Check if it is in the mask
// NOTE: mask has max value of 255 (white) if it should be
if ((int)message.masks.at(msg_id).at<uint8_t>((int)pts_left_new.at(i).pt.y, (int)pts_left_new.at(i).pt.x) > 127)
continue;
// If it is a good track, and also tracked from left to right
if (mask_ll[i]) {
good_left.push_back(pts_left_new[i]);
good_ids_left.push_back(ids_last[cam_id][i]);
}
}
// Update our feature database, with theses new observations
for (size_t i = 0; i < good_left.size(); i++) {
cv::Point2f npt_l = camera_calib.at(cam_id)->undistort_cv(good_left.at(i).pt);
database->update_feature(good_ids_left.at(i), message.timestamp, cam_id, good_left.at(i).pt.x, good_left.at(i).pt.y, npt_l.x, npt_l.y);
}
// Move forward in time
img_last[cam_id] = img;
img_pyramid_last[cam_id] = imgpyr;
img_mask_last[cam_id] = mask;
pts_last[cam_id] = good_left;
ids_last[cam_id] = good_ids_left;
rT5 = boost::posix_time::microsec_clock::local_time();
// Timing information
// PRINT_DEBUG("[TIME-KLT]: %.4f seconds for pyramid\n", (rT2 - rT1).total_microseconds() * 1e-6);
// PRINT_DEBUG("[TIME-KLT]: %.4f seconds for detection\n", (rT3 - rT2).total_microseconds() * 1e-6);
// PRINT_DEBUG("[TIME-KLT]: %.4f seconds for temporal klt\n", (rT4 - rT3).total_microseconds() * 1e-6);
// PRINT_DEBUG("[TIME-KLT]: %.4f seconds for feature DB update (%d features)\n", (rT5 - rT4).total_microseconds() * 1e-6,
// (int)good_left.size());
// PRINT_DEBUG("[TIME-KLT]: %.4f seconds for total\n", (rT5 - rT1).total_microseconds() * 1e-6);
}
void TrackKLT::feed_stereo(const CameraData &message, size_t msg_id_left, size_t msg_id_right) {
// Start timing
rT1 = boost::posix_time::microsec_clock::local_time();
// Lock this data feed for this camera
size_t cam_id_left = message.sensor_ids.at(msg_id_left);
size_t cam_id_right = message.sensor_ids.at(msg_id_right);
std::lock_guard<std::mutex> lck1(mtx_feeds.at(cam_id_left));
std::lock_guard<std::mutex> lck2(mtx_feeds.at(cam_id_right));
// Histogram equalize images
cv::Mat img_left, img_right, mask_left, mask_right;
if (histogram_method == HistogramMethod::HISTOGRAM) {
cv::equalizeHist(message.images.at(msg_id_left), img_left);
cv::equalizeHist(message.images.at(msg_id_right), img_right);
} else if (histogram_method == HistogramMethod::CLAHE) {
double eq_clip_limit = 10.0;
cv::Size eq_win_size = cv::Size(8, 8);
cv::Ptr<cv::CLAHE> clahe = cv::createCLAHE(eq_clip_limit, eq_win_size);
clahe->apply(message.images.at(msg_id_left), img_left);
clahe->apply(message.images.at(msg_id_right), img_right);
} else {
img_left = message.images.at(msg_id_left);
img_right = message.images.at(msg_id_right);
}
mask_left = message.masks.at(msg_id_left);
mask_right = message.masks.at(msg_id_right);
// Extract image pyramids
std::vector<cv::Mat> imgpyr_left, imgpyr_right;
parallel_for_(cv::Range(0, 2), LambdaBody([&](const cv::Range &range) {
for (int i = range.start; i < range.end; i++) {
bool is_left = (i == 0);
cv::buildOpticalFlowPyramid(is_left ? img_left : img_right, is_left ? imgpyr_left : imgpyr_right, win_size, pyr_levels);
}
}));
rT2 = boost::posix_time::microsec_clock::local_time();
// If we didn't have any successful tracks last time, just extract this time
// This also handles, the tracking initalization on the first call to this extractor
if (pts_last[cam_id_left].empty() && pts_last[cam_id_right].empty()) {
// Track into the new image
perform_detection_stereo(imgpyr_left, imgpyr_right, mask_left, mask_right, cam_id_left, cam_id_right, pts_last[cam_id_left],
pts_last[cam_id_right], ids_last[cam_id_left], ids_last[cam_id_right]);
// Save the current image and pyramid
img_last[cam_id_left] = img_left;
img_last[cam_id_right] = img_right;
img_pyramid_last[cam_id_left] = imgpyr_left;
img_pyramid_last[cam_id_right] = imgpyr_right;
img_mask_last[cam_id_left] = mask_left;
img_mask_last[cam_id_right] = mask_right;
return;
}
// First we should make that the last images have enough features so we can do KLT
// This will "top-off" our number of tracks so always have a constant number
perform_detection_stereo(img_pyramid_last[cam_id_left], img_pyramid_last[cam_id_right], img_mask_last[cam_id_left],
img_mask_last[cam_id_right], cam_id_left, cam_id_right, pts_last[cam_id_left], pts_last[cam_id_right],
ids_last[cam_id_left], ids_last[cam_id_right]);
rT3 = boost::posix_time::microsec_clock::local_time();
// Our return success masks, and predicted new features
std::vector<uchar> mask_ll, mask_rr;
std::vector<cv::KeyPoint> pts_left_new = pts_last[cam_id_left];
std::vector<cv::KeyPoint> pts_right_new = pts_last[cam_id_right];
// Lets track temporally
parallel_for_(cv::Range(0, 2), LambdaBody([&](const cv::Range &range) {
for (int i = range.start; i < range.end; i++) {
bool is_left = (i == 0);
perform_matching(img_pyramid_last[is_left ? cam_id_left : cam_id_right], is_left ? imgpyr_left : imgpyr_right,
pts_last[is_left ? cam_id_left : cam_id_right], is_left ? pts_left_new : pts_right_new,
is_left ? cam_id_left : cam_id_right, is_left ? cam_id_left : cam_id_right,
is_left ? mask_ll : mask_rr);
}
}));
rT4 = boost::posix_time::microsec_clock::local_time();
//===================================================================================
//===================================================================================
// left to right matching
// TODO: we should probably still do this to reject outliers
// TODO: maybe we should collect all tracks that are in both frames and make they pass this?
// std::vector<uchar> mask_lr;
// perform_matching(imgpyr_left, imgpyr_right, pts_left_new, pts_right_new, cam_id_left, cam_id_right, mask_lr);
rT5 = boost::posix_time::microsec_clock::local_time();
//===================================================================================
//===================================================================================
// If any of our masks are empty, that means we didn't have enough to do ransac, so just return
if (mask_ll.empty() && mask_rr.empty()) {
img_last[cam_id_left] = img_left;
img_last[cam_id_right] = img_right;
img_pyramid_last[cam_id_left] = imgpyr_left;
img_pyramid_last[cam_id_right] = imgpyr_right;
img_mask_last[cam_id_left] = mask_left;
img_mask_last[cam_id_right] = mask_right;
pts_last[cam_id_left].clear();
pts_last[cam_id_right].clear();
ids_last[cam_id_left].clear();
ids_last[cam_id_right].clear();
PRINT_ERROR(RED "[KLT-EXTRACTOR]: Failed to get enough points to do RANSAC, resetting.....\n" RESET);
return;
}
// Get our "good tracks"
std::vector<cv::KeyPoint> good_left, good_right;
std::vector<size_t> good_ids_left, good_ids_right;
// Loop through all left points
for (size_t i = 0; i < pts_left_new.size(); i++) {
// Ensure we do not have any bad KLT tracks (i.e., points are negative)
if (pts_left_new.at(i).pt.x < 0 || pts_left_new.at(i).pt.y < 0 || (int)pts_left_new.at(i).pt.x > img_left.cols ||
(int)pts_left_new.at(i).pt.y > img_left.rows)
continue;
// See if we have the same feature in the right
bool found_right = false;
size_t index_right = 0;
for (size_t n = 0; n < ids_last[cam_id_right].size(); n++) {
if (ids_last[cam_id_left].at(i) == ids_last[cam_id_right].at(n)) {
found_right = true;
index_right = n;
break;
}
}
// If it is a good track, and also tracked from left to right
// Else track it as a mono feature in just the left image
if (mask_ll[i] && found_right && mask_rr[index_right]) {
// Ensure we do not have any bad KLT tracks (i.e., points are negative)
if (pts_right_new.at(index_right).pt.x < 0 || pts_right_new.at(index_right).pt.y < 0 ||
(int)pts_right_new.at(index_right).pt.x >= img_right.cols || (int)pts_right_new.at(index_right).pt.y >= img_right.rows)
continue;
good_left.push_back(pts_left_new.at(i));
good_right.push_back(pts_right_new.at(index_right));
good_ids_left.push_back(ids_last[cam_id_left].at(i));
good_ids_right.push_back(ids_last[cam_id_right].at(index_right));
// PRINT_DEBUG("adding to stereo - %u , %u\n", ids_last[cam_id_left].at(i), ids_last[cam_id_right].at(index_right));
} else if (mask_ll[i]) {
good_left.push_back(pts_left_new.at(i));
good_ids_left.push_back(ids_last[cam_id_left].at(i));
// PRINT_DEBUG("adding to left - %u \n", ids_last[cam_id_left].at(i));
}
}
// Loop through all right points
for (size_t i = 0; i < pts_right_new.size(); i++) {
// Ensure we do not have any bad KLT tracks (i.e., points are negative)
if (pts_right_new.at(i).pt.x < 0 || pts_right_new.at(i).pt.y < 0 || (int)pts_right_new.at(i).pt.x >= img_right.cols ||
(int)pts_right_new.at(i).pt.y >= img_right.rows)
continue;
// See if we have the same feature in the right
bool added_already = (std::find(good_ids_right.begin(), good_ids_right.end(), ids_last[cam_id_right].at(i)) != good_ids_right.end());
// If it has not already been added as a good feature, add it as a mono track
if (mask_rr[i] && !added_already) {
good_right.push_back(pts_right_new.at(i));
good_ids_right.push_back(ids_last[cam_id_right].at(i));
// PRINT_DEBUG("adding to right - %u \n", ids_last[cam_id_right].at(i));
}
}
// Update our feature database, with theses new observations
for (size_t i = 0; i < good_left.size(); i++) {
cv::Point2f npt_l = camera_calib.at(cam_id_left)->undistort_cv(good_left.at(i).pt);
database->update_feature(good_ids_left.at(i), message.timestamp, cam_id_left, good_left.at(i).pt.x, good_left.at(i).pt.y, npt_l.x,
npt_l.y);
}
for (size_t i = 0; i < good_right.size(); i++) {
cv::Point2f npt_r = camera_calib.at(cam_id_right)->undistort_cv(good_right.at(i).pt);
database->update_feature(good_ids_right.at(i), message.timestamp, cam_id_right, good_right.at(i).pt.x, good_right.at(i).pt.y, npt_r.x,
npt_r.y);
}
// Move forward in time
img_last[cam_id_left] = img_left;
img_last[cam_id_right] = img_right;
img_pyramid_last[cam_id_left] = imgpyr_left;
img_pyramid_last[cam_id_right] = imgpyr_right;
img_mask_last[cam_id_left] = mask_left;
img_mask_last[cam_id_right] = mask_right;
pts_last[cam_id_left] = good_left;
pts_last[cam_id_right] = good_right;
ids_last[cam_id_left] = good_ids_left;
ids_last[cam_id_right] = good_ids_right;
rT6 = boost::posix_time::microsec_clock::local_time();
// Timing information
// PRINT_DEBUG("[TIME-KLT]: %.4f seconds for pyramid\n",(rT2-rT1).total_microseconds() * 1e-6);
// PRINT_DEBUG("[TIME-KLT]: %.4f seconds for detection\n",(rT3-rT2).total_microseconds() * 1e-6);
// PRINT_DEBUG("[TIME-KLT]: %.4f seconds for temporal klt\n",(rT4-rT3).total_microseconds() * 1e-6);
// PRINT_DEBUG("[TIME-KLT]: %.4f seconds for stereo klt\n",(rT5-rT4).total_microseconds() * 1e-6);
// PRINT_DEBUG("[TIME-KLT]: %.4f seconds for feature DB update (%d features)\n",(rT6-rT5).total_microseconds() * 1e-6,
// (int)good_left.size()); PRINT_DEBUG("[TIME-KLT]: %.4f seconds for total\n",(rT6-rT1).total_microseconds() * 1e-6);
}
void TrackKLT::perform_detection_monocular(const std::vector<cv::Mat> &img0pyr, const cv::Mat &mask0, std::vector<cv::KeyPoint> &pts0,
std::vector<size_t> &ids0) {
// Create a 2D occupancy grid for this current image
// Note that we scale this down, so that each grid point is equal to a set of pixels
// This means that we will reject points that less then grid_px_size points away then existing features
cv::Size size((int)((float)img0pyr.at(0).cols / (float)min_px_dist), (int)((float)img0pyr.at(0).rows / (float)min_px_dist));
cv::Mat grid_2d = cv::Mat::zeros(size, CV_8UC1);
cv::Mat mask0_updated = mask0.clone();
auto it0 = pts0.begin();
auto it1 = ids0.begin();
while (it0 != pts0.end()) {
// Get current left keypoint, check that it is in bounds
cv::KeyPoint kpt = *it0;
int x = (int)kpt.pt.x;
int y = (int)kpt.pt.y;
int x_grid = (int)(kpt.pt.x / (float)min_px_dist);
int y_grid = (int)(kpt.pt.y / (float)min_px_dist);
if (x_grid < 0 || x_grid >= size.width || y_grid < 0 || y_grid >= size.height || x < 0 || x >= img0pyr.at(0).cols || y < 0 ||
y >= img0pyr.at(0).rows) {
it0 = pts0.erase(it0);
it1 = ids0.erase(it1);
continue;
}
// Check if this keypoint is near another point
if (grid_2d.at<uint8_t>(y_grid, x_grid) > 127) {
it0 = pts0.erase(it0);
it1 = ids0.erase(it1);
continue;
}
// Now check if it is in a mask area or not
// NOTE: mask has max value of 255 (white) if it should be
if (mask0.at<uint8_t>(y, x) > 127) {
it0 = pts0.erase(it0);
it1 = ids0.erase(it1);
continue;
}
// Else we are good, move forward to the next point
grid_2d.at<uint8_t>(y_grid, x_grid) = 255;
// Append this to the local mask of the image
if (x - min_px_dist >= 0 && x + min_px_dist < img0pyr.at(0).cols && y - min_px_dist >= 0 && y + min_px_dist < img0pyr.at(0).rows) {
cv::Point pt1(x - min_px_dist, y - min_px_dist);
cv::Point pt2(x + min_px_dist, y + min_px_dist);
cv::rectangle(mask0_updated, pt1, pt2, cv::Scalar(255));
}
it0++;
it1++;
}
// First compute how many more features we need to extract from this image
int num_featsneeded = num_features - (int)pts0.size();
// If we don't need any features, just return
if (num_featsneeded < std::min(75, (int)(0.2 * num_features)))
return;
// Extract our features (use fast with griding)
std::vector<cv::KeyPoint> pts0_ext;
Grider_FAST::perform_griding(img0pyr.at(0), mask0_updated, pts0_ext, num_features, grid_x, grid_y, threshold, true);
// Now, reject features that are close a current feature
std::vector<cv::KeyPoint> kpts0_new;
std::vector<cv::Point2f> pts0_new;
for (auto &kpt : pts0_ext) {
// Check that it is in bounds
int x_grid = (int)(kpt.pt.x / (float)min_px_dist);
int y_grid = (int)(kpt.pt.y / (float)min_px_dist);
if (x_grid < 0 || x_grid >= size.width || y_grid < 0 || y_grid >= size.height)
continue;
// See if there is a point at this location
if (grid_2d.at<uint8_t>(y_grid, x_grid) > 127)
continue;
// Else lets add it!
kpts0_new.push_back(kpt);
pts0_new.push_back(kpt.pt);
grid_2d.at<uint8_t>(y_grid, x_grid) = 255;
}
// Loop through and record only ones that are valid
// NOTE: if we multi-thread this atomic can cause some randomness due to multiple thread detecting features
// NOTE: this is due to the fact that we select update features based on feat id
// NOTE: thus the order will matter since we try to select oldest (smallest id) to update with
// NOTE: not sure how to remove... maybe a better way?
for (size_t i = 0; i < pts0_new.size(); i++) {
// update the uv coordinates
kpts0_new.at(i).pt = pts0_new.at(i);
// append the new uv coordinate
pts0.push_back(kpts0_new.at(i));
// move id foward and append this new point
size_t temp = ++currid;
ids0.push_back(temp);
}
}
void TrackKLT::perform_detection_stereo(const std::vector<cv::Mat> &img0pyr, const std::vector<cv::Mat> &img1pyr, const cv::Mat &mask0,
const cv::Mat &mask1, size_t cam_id_left, size_t cam_id_right, std::vector<cv::KeyPoint> &pts0,
std::vector<cv::KeyPoint> &pts1, std::vector<size_t> &ids0, std::vector<size_t> &ids1) {
// Create a 2D occupancy grid for this current image
// Note that we scale this down, so that each grid point is equal to a set of pixels
// This means that we will reject points that less then grid_px_size points away then existing features
cv::Size size0((int)((float)img0pyr.at(0).cols / (float)min_px_dist), (int)((float)img0pyr.at(0).rows / (float)min_px_dist));
cv::Mat grid_2d_0 = cv::Mat::zeros(size0, CV_8UC1);
cv::Mat mask0_updated = mask0.clone();
auto it0 = pts0.begin();
auto it1 = ids0.begin();
while (it0 != pts0.end()) {
// Get current left keypoint, check that it is in bounds
cv::KeyPoint kpt = *it0;
int x = (int)kpt.pt.x;
int y = (int)kpt.pt.y;
int x_grid = (int)(kpt.pt.x / (float)min_px_dist);
int y_grid = (int)(kpt.pt.y / (float)min_px_dist);
if (x_grid < 0 || x_grid >= size0.width || y_grid < 0 || y_grid >= size0.height || x < 0 || x >= img0pyr.at(0).cols || y < 0 ||
y >= img0pyr.at(0).rows) {
it0 = pts0.erase(it0);
it1 = ids0.erase(it1);
continue;
}
// Check if this keypoint is near another point
if (grid_2d_0.at<uint8_t>(y_grid, x_grid) > 127) {
it0 = pts0.erase(it0);
it1 = ids0.erase(it1);
continue;
}
// Now check if it is in a mask area or not
// NOTE: mask has max value of 255 (white) if it should be
if (mask0.at<uint8_t>(y, x) > 127) {
it0 = pts0.erase(it0);
it1 = ids0.erase(it1);
continue;
}
// Else we are good, move forward to the next point
grid_2d_0.at<uint8_t>(y_grid, x_grid) = 255;
// Append this to the local mask of the image
if (x - min_px_dist >= 0 && x + min_px_dist < img0pyr.at(0).cols && y - min_px_dist >= 0 && y + min_px_dist < img0pyr.at(0).rows) {
cv::Point pt1(x - min_px_dist, y - min_px_dist);
cv::Point pt2(x + min_px_dist, y + min_px_dist);
cv::rectangle(mask0_updated, pt1, pt2, cv::Scalar(255));
}
it0++;
it1++;
}
// First compute how many more features we need to extract from this image
int num_featsneeded_0 = num_features - (int)pts0.size();
// LEFT: if we need features we should extract them in the current frame
// LEFT: we will also try to track them from this frame over to the right frame
// LEFT: in the case that we have two features that are the same, then we should merge them
if (num_featsneeded_0 > std::min(75, (int)(0.2 * num_features))) {
// Extract our features (use fast with griding)
std::vector<cv::KeyPoint> pts0_ext;
Grider_FAST::perform_griding(img0pyr.at(0), mask0_updated, pts0_ext, num_features, grid_x, grid_y, threshold, true);
// Now, reject features that are close a current feature
std::vector<cv::KeyPoint> kpts0_new;
std::vector<cv::Point2f> pts0_new;
for (auto &kpt : pts0_ext) {
// Check that it is in bounds
int x_grid = (int)(kpt.pt.x / (float)min_px_dist);
int y_grid = (int)(kpt.pt.y / (float)min_px_dist);
if (x_grid < 0 || x_grid >= size0.width || y_grid < 0 || y_grid >= size0.height)
continue;
// See if there is a point at this location
if (grid_2d_0.at<uint8_t>(y_grid, x_grid) > 127)
continue;
// Else lets add it!
grid_2d_0.at<uint8_t>(y_grid, x_grid) = 255;
kpts0_new.push_back(kpt);
pts0_new.push_back(kpt.pt);
}
// TODO: Project points from the left frame into the right frame
// TODO: This will not work for large baseline systems.....
// TODO: If we had some depth estimates we could do a better projection
// TODO: Or project and search along the epipolar line??
std::vector<cv::KeyPoint> kpts1_new;
std::vector<cv::Point2f> pts1_new;
kpts1_new = kpts0_new;
pts1_new = pts0_new;
// If we have points, do KLT tracking to get the valid projections into the right image
if (!pts0_new.empty()) {
// Do our KLT tracking from the left to the right frame of reference
// Note: we have a pretty big window size here since our projection might be bad
// Note: but this might cause failure in cases of repeated textures (eg. checkerboard)
std::vector<uchar> mask;
// perform_matching(img0pyr, img1pyr, kpts0_new, kpts1_new, cam_id_left, cam_id_right, mask);
std::vector<float> error;
cv::TermCriteria term_crit = cv::TermCriteria(cv::TermCriteria::COUNT + cv::TermCriteria::EPS, 10, 0.01);
cv::calcOpticalFlowPyrLK(img0pyr, img1pyr, pts0_new, pts1_new, mask, error, win_size, pyr_levels, term_crit,
cv::OPTFLOW_USE_INITIAL_FLOW);
// Loop through and record only ones that are valid
for (size_t i = 0; i < pts0_new.size(); i++) {
// Check that it is in bounds
if ((int)pts0_new.at(i).x < 0 || (int)pts0_new.at(i).x >= img0pyr.at(0).cols || (int)pts0_new.at(i).y < 0 ||
(int)pts0_new.at(i).y >= img0pyr.at(0).rows) {
continue;
}
if ((int)pts1_new.at(i).x < 0 || (int)pts1_new.at(i).x >= img1pyr.at(0).cols || (int)pts1_new.at(i).y < 0 ||
(int)pts1_new.at(i).y >= img1pyr.at(0).rows) {
continue;
}
// Check to see if it there is already a feature in the right image at this location
// 1) If this is not already in the right image, then we should treat it as a stereo
// 2) Otherwise we will treat this as just a monocular track of the feature
// TODO: we should check to see if we can combine this new feature and the one in the right
// TODO: seems if reject features which overlay with right features already we have very poor tracking perf
if (mask[i] == 1) {
// update the uv coordinates
kpts0_new.at(i).pt = pts0_new.at(i);
kpts1_new.at(i).pt = pts1_new.at(i);
// append the new uv coordinate
pts0.push_back(kpts0_new.at(i));
pts1.push_back(kpts1_new.at(i));
// move id forward and append this new point
size_t temp = ++currid;
ids0.push_back(temp);
ids1.push_back(temp);
} else {
// update the uv coordinates
kpts0_new.at(i).pt = pts0_new.at(i);
// append the new uv coordinate
pts0.push_back(kpts0_new.at(i));
// move id forward and append this new point
size_t temp = ++currid;
ids0.push_back(temp);
}
}
}
}
// RIGHT: Now summarise the number of tracks in the right image
// RIGHT: We will try to extract some monocular features if we have the room
// RIGHT: This will also remove features if there are multiple in the same location
cv::Size size1((int)((float)img1pyr.at(0).cols / (float)min_px_dist), (int)((float)img1pyr.at(0).rows / (float)min_px_dist));
cv::Mat grid_2d_1 = cv::Mat::zeros(size1, CV_8UC1);
it0 = pts1.begin();
it1 = ids1.begin();
while (it0 != pts1.end()) {
// Get current left keypoint, check that it is in bounds
cv::KeyPoint kpt = *it0;
int x = (int)kpt.pt.x;
int y = (int)kpt.pt.y;
int x_grid = (int)(kpt.pt.x / (float)min_px_dist);
int y_grid = (int)(kpt.pt.y / (float)min_px_dist);
if (x_grid < 0 || x_grid >= size1.width || y_grid < 0 || y_grid >= size1.height || x < 0 || x >= img1pyr.at(0).cols || y < 0 ||
y >= img1pyr.at(0).rows) {
it0 = pts1.erase(it0);
it1 = ids1.erase(it1);
continue;
}
// Check if this is a stereo point
bool is_stereo = (std::find(ids0.begin(), ids0.end(), *it1) != ids0.end());
// Check if this keypoint is near another point
// NOTE: if it is *not* a stereo point, then we will not delete the feature
// NOTE: this means we might have a mono and stereo feature near each other, but that is ok
if (grid_2d_1.at<uint8_t>(y_grid, x_grid) > 127 && !is_stereo) {
it0 = pts1.erase(it0);
it1 = ids1.erase(it1);
continue;
}
// Now check if it is in a mask area or not
// NOTE: mask has max value of 255 (white) if it should be
if (mask1.at<uint8_t>(y, x) > 127) {
it0 = pts1.erase(it0);
it1 = ids1.erase(it1);
continue;
}
// Else we are good, move forward to the next point
grid_2d_1.at<uint8_t>(y_grid, x_grid) = 255;
it0++;
it1++;
}
// RIGHT: if we need features we should extract them in the current frame
// RIGHT: note that we don't track them to the left as we already did left->right tracking above
int num_featsneeded_1 = num_features - (int)pts1.size();
if (num_featsneeded_1 > std::min(75, (int)(0.2 * num_features))) {
// Extract our features (use fast with griding)
std::vector<cv::KeyPoint> pts1_ext;
Grider_FAST::perform_griding(img1pyr.at(0), mask1, pts1_ext, num_features, grid_x, grid_y, threshold, true);
// Now, reject features that are close a current feature
for (auto &kpt : pts1_ext) {
// Check that it is in bounds
int x_grid = (int)(kpt.pt.x / (float)min_px_dist);
int y_grid = (int)(kpt.pt.y / (float)min_px_dist);
if (x_grid < 0 || x_grid >= size1.width || y_grid < 0 || y_grid >= size1.height)
continue;
// See if there is a point at this location
if (grid_2d_1.at<uint8_t>(y_grid, x_grid) > 127)
continue;
// Else lets add it!
pts1.push_back(kpt);
size_t temp = ++currid;
ids1.push_back(temp);
grid_2d_1.at<uint8_t>(y_grid, x_grid) = 255;
}
}
}
void TrackKLT::perform_matching(const std::vector<cv::Mat> &img0pyr, const std::vector<cv::Mat> &img1pyr, std::vector<cv::KeyPoint> &kpts0,
std::vector<cv::KeyPoint> &kpts1, size_t id0, size_t id1, std::vector<uchar> &mask_out) {
// We must have equal vectors
assert(kpts0.size() == kpts1.size());
// Return if we don't have any points
if (kpts0.empty() || kpts1.empty())
return;
// Convert keypoints into points (stupid opencv stuff)
std::vector<cv::Point2f> pts0, pts1;
for (size_t i = 0; i < kpts0.size(); i++) {
pts0.push_back(kpts0.at(i).pt);
pts1.push_back(kpts1.at(i).pt);
}
// If we don't have enough points for ransac just return empty
// We set the mask to be all zeros since all points failed RANSAC
if (pts0.size() < 10) {
for (size_t i = 0; i < pts0.size(); i++)
mask_out.push_back((uchar)0);
return;
}
// Now do KLT tracking to get the valid new points
std::vector<uchar> mask_klt;
std::vector<float> error;
cv::TermCriteria term_crit = cv::TermCriteria(cv::TermCriteria::COUNT + cv::TermCriteria::EPS, 20, 0.01);
cv::calcOpticalFlowPyrLK(img0pyr, img1pyr, pts0, pts1, mask_klt, error, win_size, pyr_levels, term_crit, cv::OPTFLOW_USE_INITIAL_FLOW);
// Normalize these points, so we can then do ransac
// We don't want to do ransac on distorted image uvs since the mapping is nonlinear
std::vector<cv::Point2f> pts0_n, pts1_n;
for (size_t i = 0; i < pts0.size(); i++) {
pts0_n.push_back(camera_calib.at(id0)->undistort_cv(pts0.at(i)));
pts1_n.push_back(camera_calib.at(id1)->undistort_cv(pts1.at(i)));
}
// Do RANSAC outlier rejection (note since we normalized the max pixel error is now in the normalized cords)
std::vector<uchar> mask_rsc;
double max_focallength_img0 = std::max(camera_calib.at(id0)->get_K()(0, 0), camera_calib.at(id0)->get_K()(1, 1));
double max_focallength_img1 = std::max(camera_calib.at(id1)->get_K()(0, 0), camera_calib.at(id1)->get_K()(1, 1));
double max_focallength = std::max(max_focallength_img0, max_focallength_img1);
cv::findFundamentalMat(pts0_n, pts1_n, cv::FM_RANSAC, 1.0 / max_focallength, 0.999, mask_rsc);
// Loop through and record only ones that are valid
for (size_t i = 0; i < mask_klt.size(); i++) {
auto mask = (uchar)((i < mask_klt.size() && mask_klt[i] && i < mask_rsc.size() && mask_rsc[i]) ? 1 : 0);
mask_out.push_back(mask);
}
// Copy back the updated positions
for (size_t i = 0; i < pts0.size(); i++) {
kpts0.at(i).pt = pts0.at(i);
kpts1.at(i).pt = pts1.at(i);
}
}

View File

@@ -0,0 +1,152 @@
/*
* OpenVINS: An Open Platform for Visual-Inertial Research
* Copyright (C) 2018-2022 Patrick Geneva
* Copyright (C) 2018-2022 Guoquan Huang
* Copyright (C) 2018-2022 OpenVINS Contributors
* Copyright (C) 2018-2019 Kevin Eckenhoff
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
#ifndef OV_CORE_TRACK_KLT_H
#define OV_CORE_TRACK_KLT_H
#include "TrackBase.h"
namespace ov_core {
/**
* @brief KLT tracking of features.
*
* This is the implementation of a KLT visual frontend for tracking sparse features.
* We can track either monocular cameras across time (temporally) along with
* stereo cameras which we also track across time (temporally) but track from left to right
* to find the stereo correspondence information also.
* This uses the [calcOpticalFlowPyrLK](https://github.com/opencv/opencv/blob/master/modules/video/src/lkpyramid.cpp)
* OpenCV function to do the KLT tracking.
*/
class TrackKLT : public TrackBase {
public:
/**
* @brief Public constructor with configuration variables
* @param cameras camera calibration object which has all camera intrinsics in it
* @param numfeats number of features we want want to track (i.e. track 200 points from frame to frame)
* @param numaruco the max id of the arucotags, so we ensure that we start our non-auroc features above this value
* @param binocular if we should do binocular feature tracking or stereo if there are multiple cameras
* @param histmethod what type of histogram pre-processing should be done (histogram eq?)
* @param fast_threshold FAST detection threshold
* @param gridx size of grid in the x-direction / u-direction
* @param gridy size of grid in the y-direction / v-direction
* @param minpxdist features need to be at least this number pixels away from each other
*/
explicit TrackKLT(std::unordered_map<size_t, std::shared_ptr<CamBase>> cameras, int numfeats, int numaruco, bool binocular,
HistogramMethod histmethod, int fast_threshold, int gridx, int gridy, int minpxdist)
: TrackBase(cameras, numfeats, numaruco, binocular, histmethod), threshold(fast_threshold), grid_x(gridx), grid_y(gridy),
min_px_dist(minpxdist) {}
/**
* @brief Process a new image
* @param message Contains our timestamp, images, and camera ids
*/
void feed_new_camera(const CameraData &message);
protected:
/**
* @brief Process a new monocular image
* @param message Contains our timestamp, images, and camera ids
* @param msg_id the camera index in message data vector
*/
void feed_monocular(const CameraData &message, size_t msg_id);
/**
* @brief Process new stereo pair of images
* @param message Contains our timestamp, images, and camera ids
* @param msg_id_left first image index in message data vector
* @param msg_id_right second image index in message data vector
*/
void feed_stereo(const CameraData &message, size_t msg_id_left, size_t msg_id_right);
/**
* @brief Detects new features in the current image
* @param img0pyr image we will detect features on (first level of pyramid)
* @param mask0 mask which has what ROI we do not want features in
* @param pts0 vector of currently extracted keypoints in this image
* @param ids0 vector of feature ids for each currently extracted keypoint
*
* Given an image and its currently extracted features, this will try to add new features if needed.
* Will try to always have the "max_features" being tracked through KLT at each timestep.
* Passed images should already be grayscaled.
*/
void perform_detection_monocular(const std::vector<cv::Mat> &img0pyr, const cv::Mat &mask0, std::vector<cv::KeyPoint> &pts0,
std::vector<size_t> &ids0);
/**
* @brief Detects new features in the current stereo pair
* @param img0pyr left image we will detect features on (first level of pyramid)
* @param img1pyr right image we will detect features on (first level of pyramid)
* @param mask0 mask which has what ROI we do not want features in
* @param mask1 mask which has what ROI we do not want features in
* @param cam_id_left first camera sensor id
* @param cam_id_right second camera sensor id
* @param pts0 left vector of currently extracted keypoints
* @param pts1 right vector of currently extracted keypoints
* @param ids0 left vector of feature ids for each currently extracted keypoint
* @param ids1 right vector of feature ids for each currently extracted keypoint
*
* This does the same logic as the perform_detection_monocular() function, but we also enforce stereo contraints.
* So we detect features in the left image, and then KLT track them onto the right image.
* If we have valid tracks, then we have both the keypoint on the left and its matching point in the right image.
* Will try to always have the "max_features" being tracked through KLT at each timestep.
*/
void perform_detection_stereo(const std::vector<cv::Mat> &img0pyr, const std::vector<cv::Mat> &img1pyr, const cv::Mat &mask0,
const cv::Mat &mask1, size_t cam_id_left, size_t cam_id_right, std::vector<cv::KeyPoint> &pts0,
std::vector<cv::KeyPoint> &pts1, std::vector<size_t> &ids0, std::vector<size_t> &ids1);
/**
* @brief KLT track between two images, and do RANSAC afterwards
* @param img0pyr starting image pyramid
* @param img1pyr image pyramid we want to track too
* @param pts0 starting points
* @param pts1 points we have tracked
* @param id0 id of the first camera
* @param id1 id of the second camera
* @param mask_out what points had valid tracks
*
* This will track features from the first image into the second image.
* The two point vectors will be of equal size, but the mask_out variable will specify which points are good or bad.
* If the second vector is non-empty, it will be used as an initial guess of where the keypoints are in the second image.
*/
void perform_matching(const std::vector<cv::Mat> &img0pyr, const std::vector<cv::Mat> &img1pyr, std::vector<cv::KeyPoint> &pts0,
std::vector<cv::KeyPoint> &pts1, size_t id0, size_t id1, std::vector<uchar> &mask_out);
// Parameters for our FAST grid detector
int threshold;
int grid_x;
int grid_y;
// Minimum pixel distance to be "far away enough" to be a different extracted feature
int min_px_dist;
// How many pyramid levels to track
int pyr_levels = 3;
cv::Size win_size = cv::Size(20, 20);
// Last set of image pyramids
std::map<size_t, std::vector<cv::Mat>> img_pyramid_last;
};
} // namespace ov_core
#endif /* OV_CORE_TRACK_KLT_H */

View File

@@ -0,0 +1,72 @@
/*
* OpenVINS: An Open Platform for Visual-Inertial Research
* Copyright (C) 2018-2022 Patrick Geneva
* Copyright (C) 2018-2022 Guoquan Huang
* Copyright (C) 2018-2022 OpenVINS Contributors
* Copyright (C) 2018-2019 Kevin Eckenhoff
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
#include "TrackSIM.h"
using namespace ov_core;
void TrackSIM::feed_measurement_simulation(double timestamp, const std::vector<int> &camids,
const std::vector<std::vector<std::pair<size_t, Eigen::VectorXf>>> &feats) {
// Assert our two vectors are equal
assert(camids.size() == feats.size());
// Loop through each camera
for (size_t i = 0; i < camids.size(); i++) {
// Current camera id
int cam_id = camids.at(i);
// Our good ids and points
std::vector<cv::KeyPoint> good_left;
std::vector<size_t> good_ids_left;
// Update our feature database, with theses new observations
// NOTE: we add the "currid" since we need to offset the simulator
// NOTE: ids by the number of aruoc tags we have specified as tracking
for (const auto &feat : feats.at(i)) {
// Get our id value
size_t id = feat.first + currid;
// Create the keypoint
cv::KeyPoint kpt;
kpt.pt.x = feat.second(0);
kpt.pt.y = feat.second(1);
good_left.push_back(kpt);
good_ids_left.push_back(id);
// Append to the database
cv::Point2f npt_l = camera_calib.at(cam_id)->undistort_cv(kpt.pt);
database->update_feature(id, timestamp, cam_id, kpt.pt.x, kpt.pt.y, npt_l.x, npt_l.y);
}
// Get our width and height
int width = camera_calib.at(cam_id)->w();
int height = camera_calib.at(cam_id)->h();
// Move forward in time
img_last[cam_id] = cv::Mat::zeros(cv::Size(width, height), CV_8UC1);
img_mask_last[cam_id] = cv::Mat::zeros(cv::Size(width, height), CV_8UC1);
pts_last[cam_id] = good_left;
ids_last[cam_id] = good_ids_left;
}
}

View File

@@ -0,0 +1,70 @@
/*
* OpenVINS: An Open Platform for Visual-Inertial Research
* Copyright (C) 2018-2022 Patrick Geneva
* Copyright (C) 2018-2022 Guoquan Huang
* Copyright (C) 2018-2022 OpenVINS Contributors
* Copyright (C) 2018-2019 Kevin Eckenhoff
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
#ifndef OV_CORE_TRACK_SIM_H
#define OV_CORE_TRACK_SIM_H
#include "TrackBase.h"
#include "utils/print.h"
namespace ov_core {
/**
* @brief Simulated tracker for when we already have uv measurements!
*
* This class should be used when we are using the @ref ov_msckf::Simulator class to generate measurements.
* It simply takes the resulting simulation data and appends it to the internal feature database.
*/
class TrackSIM : public TrackBase {
public:
/**
* @brief Public constructor with configuration variables
* @param cameras camera calibration object which has all camera intrinsics in it
* @param numaruco the max id of the arucotags, so we ensure that we start our non-auroc features above this value
*/
TrackSIM(std::unordered_map<size_t, std::shared_ptr<CamBase>> cameras, int numaruco)
: TrackBase(cameras, 0, numaruco, false, HistogramMethod::NONE) {}
/**
* @brief Process a new image
* @warning This function should not be used!! Use @ref feed_measurement_simulation() instead.
* @param message Contains our timestamp, images, and camera ids
*/
void feed_new_camera(const CameraData &message) override {
PRINT_ERROR(RED "[SIM]: SIM TRACKER FEED NEW CAMERA CALLED!!!\n" RESET);
PRINT_ERROR(RED "[SIM]: THIS SHOULD NEVER HAPPEN!\n" RESET);
std::exit(EXIT_FAILURE);
}
/**
* @brief Feed function for a synchronized simulated cameras
* @param timestamp Time that this image was collected
* @param camids Camera ids that we have simulated measurements for
* @param feats Raw uv simulated measurements
*/
void feed_measurement_simulation(double timestamp, const std::vector<int> &camids,
const std::vector<std::vector<std::pair<size_t, Eigen::VectorXf>>> &feats);
};
} // namespace ov_core
#endif /* OV_CORE_TRACK_SIM_H */

236
ov_core/src/types/IMU.h Normal file
View File

@@ -0,0 +1,236 @@
/*
* OpenVINS: An Open Platform for Visual-Inertial Research
* Copyright (C) 2018-2022 Patrick Geneva
* Copyright (C) 2018-2022 Guoquan Huang
* Copyright (C) 2018-2022 OpenVINS Contributors
* Copyright (C) 2018-2019 Kevin Eckenhoff
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
#ifndef OV_TYPE_TYPE_IMU_H
#define OV_TYPE_TYPE_IMU_H
#include "PoseJPL.h"
#include "utils/quat_ops.h"
namespace ov_type {
/**
* @brief Derived Type class that implements an IMU state
*
* Contains a PoseJPL, Vec velocity, Vec gyro bias, and Vec accel bias.
* This should be similar to that of the standard MSCKF state besides the ordering.
* The pose is first, followed by velocity, etc.
*/
class IMU : public Type {
public:
IMU() : Type(15) {
// Create all the sub-variables
_pose = std::shared_ptr<PoseJPL>(new PoseJPL());
_v = std::shared_ptr<Vec>(new Vec(3));
_bg = std::shared_ptr<Vec>(new Vec(3));
_ba = std::shared_ptr<Vec>(new Vec(3));
// Set our default state value
Eigen::VectorXd imu0 = Eigen::VectorXd::Zero(16, 1);
imu0(3) = 1.0;
set_value_internal(imu0);
set_fej_internal(imu0);
}
~IMU() {}
/**
* @brief Sets id used to track location of variable in the filter covariance
*
* Note that we update the sub-variables also.
*
* @param new_id entry in filter covariance corresponding to this variable
*/
void set_local_id(int new_id) override {
_id = new_id;
_pose->set_local_id(new_id);
_v->set_local_id(_pose->id() + ((new_id != -1) ? _pose->size() : 0));
_bg->set_local_id(_v->id() + ((new_id != -1) ? _v->size() : 0));
_ba->set_local_id(_bg->id() + ((new_id != -1) ? _bg->size() : 0));
}
/**
* @brief Performs update operation using JPLQuat update for orientation, then vector updates for
* position, velocity, gyro bias, and accel bias (in that order).
*
* @param dx 15 DOF vector encoding update using the following order (q, p, v, bg, ba)
*/
void update(const Eigen::VectorXd &dx) override {
assert(dx.rows() == _size);
Eigen::Matrix<double, 16, 1> newX = _value;
Eigen::Matrix<double, 4, 1> dq;
dq << .5 * dx.block(0, 0, 3, 1), 1.0;
dq = ov_core::quatnorm(dq);
newX.block(0, 0, 4, 1) = ov_core::quat_multiply(dq, quat());
newX.block(4, 0, 3, 1) += dx.block(3, 0, 3, 1);
newX.block(7, 0, 3, 1) += dx.block(6, 0, 3, 1);
newX.block(10, 0, 3, 1) += dx.block(9, 0, 3, 1);
newX.block(13, 0, 3, 1) += dx.block(12, 0, 3, 1);
set_value(newX);
}
/**
* @brief Sets the value of the estimate
* @param new_value New value we should set
*/
void set_value(const Eigen::MatrixXd &new_value) override { set_value_internal(new_value); }
/**
* @brief Sets the value of the first estimate
* @param new_value New value we should set
*/
void set_fej(const Eigen::MatrixXd &new_value) override { set_fej_internal(new_value); }
std::shared_ptr<Type> clone() override {
auto Clone = std::shared_ptr<Type>(new IMU());
Clone->set_value(value());
Clone->set_fej(fej());
return Clone;
}
std::shared_ptr<Type> check_if_subvariable(const std::shared_ptr<Type> check) override {
if (check == _pose) {
return _pose;
} else if (check == _pose->check_if_subvariable(check)) {
return _pose->check_if_subvariable(check);
} else if (check == _v) {
return _v;
} else if (check == _bg) {
return _bg;
} else if (check == _ba) {
return _ba;
}
return nullptr;
}
/// Rotation access
Eigen::Matrix<double, 3, 3> Rot() const { return _pose->Rot(); }
/// FEJ Rotation access
Eigen::Matrix<double, 3, 3> Rot_fej() const { return _pose->Rot_fej(); }
/// Rotation access quaternion
Eigen::Matrix<double, 4, 1> quat() const { return _pose->quat(); }
/// FEJ Rotation access quaternion
Eigen::Matrix<double, 4, 1> quat_fej() const { return _pose->quat_fej(); }
/// Position access
Eigen::Matrix<double, 3, 1> pos() const { return _pose->pos(); }
/// FEJ position access
Eigen::Matrix<double, 3, 1> pos_fej() const { return _pose->pos_fej(); }
/// Velocity access
Eigen::Matrix<double, 3, 1> vel() const { return _v->value(); }
// FEJ velocity access
Eigen::Matrix<double, 3, 1> vel_fej() const { return _v->fej(); }
/// Gyro bias access
Eigen::Matrix<double, 3, 1> bias_g() const { return _bg->value(); }
/// FEJ gyro bias access
Eigen::Matrix<double, 3, 1> bias_g_fej() const { return _bg->fej(); }
/// Accel bias access
Eigen::Matrix<double, 3, 1> bias_a() const { return _ba->value(); }
// FEJ accel bias access
Eigen::Matrix<double, 3, 1> bias_a_fej() const { return _ba->fej(); }
/// Pose type access
std::shared_ptr<PoseJPL> pose() { return _pose; }
/// Quaternion type access
std::shared_ptr<JPLQuat> q() { return _pose->q(); }
/// Position type access
std::shared_ptr<Vec> p() { return _pose->p(); }
/// Velocity type access
std::shared_ptr<Vec> v() { return _v; }
/// Gyroscope bias access
std::shared_ptr<Vec> bg() { return _bg; }
/// Acceleration bias access
std::shared_ptr<Vec> ba() { return _ba; }
protected:
/// Pose subvariable
std::shared_ptr<PoseJPL> _pose;
/// Velocity subvariable
std::shared_ptr<Vec> _v;
/// Gyroscope bias subvariable
std::shared_ptr<Vec> _bg;
/// Acceleration bias subvariable
std::shared_ptr<Vec> _ba;
/**
* @brief Sets the value of the estimate
* @param new_value New value we should set
*/
void set_value_internal(const Eigen::MatrixXd &new_value) {
assert(new_value.rows() == 16);
assert(new_value.cols() == 1);
_pose->set_value(new_value.block(0, 0, 7, 1));
_v->set_value(new_value.block(7, 0, 3, 1));
_bg->set_value(new_value.block(10, 0, 3, 1));
_ba->set_value(new_value.block(13, 0, 3, 1));
_value = new_value;
}
/**
* @brief Sets the value of the first estimate
* @param new_value New value we should set
*/
void set_fej_internal(const Eigen::MatrixXd &new_value) {
assert(new_value.rows() == 16);
assert(new_value.cols() == 1);
_pose->set_fej(new_value.block(0, 0, 7, 1));
_v->set_fej(new_value.block(7, 0, 3, 1));
_bg->set_fej(new_value.block(10, 0, 3, 1));
_ba->set_fej(new_value.block(13, 0, 3, 1));
_fej = new_value;
}
};
} // namespace ov_type
#endif // OV_TYPE_TYPE_IMU_H

139
ov_core/src/types/JPLQuat.h Normal file
View File

@@ -0,0 +1,139 @@
/*
* OpenVINS: An Open Platform for Visual-Inertial Research
* Copyright (C) 2018-2022 Patrick Geneva
* Copyright (C) 2018-2022 Guoquan Huang
* Copyright (C) 2018-2022 OpenVINS Contributors
* Copyright (C) 2018-2019 Kevin Eckenhoff
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
#ifndef OV_TYPE_TYPE_JPLQUAT_H
#define OV_TYPE_TYPE_JPLQUAT_H
#include "Type.h"
#include "utils/quat_ops.h"
namespace ov_type {
/**
* @brief Derived Type class that implements JPL quaternion
*
* This quaternion uses a left-multiplicative error state and follows the "JPL convention".
* Please checkout our utility functions in the quat_ops.h file.
* We recommend that people new quaternions check out the following resources:
* - http://mars.cs.umn.edu/tr/reports/Trawny05b.pdf
* - ftp://naif.jpl.nasa.gov/pub/naif/misc/Quaternion_White_Paper/Quaternions_White_Paper.pdf
*/
class JPLQuat : public Type {
public:
JPLQuat() : Type(3) {
Eigen::Vector4d q0 = Eigen::Vector4d::Zero();
q0(3) = 1.0;
set_value_internal(q0);
set_fej_internal(q0);
}
~JPLQuat() {}
/**
* @brief Implements update operation by left-multiplying the current
* quaternion with a quaternion built from a small axis-angle perturbation.
*
* @f[
* \bar{q}=norm\Big(\begin{bmatrix} 0.5*\mathbf{\theta_{dx}} \\ 1 \end{bmatrix}\Big) \hat{\bar{q}}
* @f]
*
* @param dx Axis-angle representation of the perturbing quaternion
*/
void update(const Eigen::VectorXd &dx) override {
assert(dx.rows() == _size);
// Build perturbing quaternion
Eigen::Matrix<double, 4, 1> dq;
dq << .5 * dx, 1.0;
dq = ov_core::quatnorm(dq);
// Update estimate and recompute R
set_value(ov_core::quat_multiply(dq, _value));
}
/**
* @brief Sets the value of the estimate and recomputes the internal rotation matrix
* @param new_value New value for the quaternion estimate
*/
void set_value(const Eigen::MatrixXd &new_value) override { set_value_internal(new_value); }
/**
* @brief Sets the fej value and recomputes the fej rotation matrix
* @param new_value New value for the quaternion estimate
*/
void set_fej(const Eigen::MatrixXd &new_value) override { set_fej_internal(new_value); }
std::shared_ptr<Type> clone() override {
auto Clone = std::shared_ptr<JPLQuat>(new JPLQuat());
Clone->set_value(value());
Clone->set_fej(fej());
return Clone;
}
/// Rotation access
Eigen::Matrix<double, 3, 3> Rot() const { return _R; }
/// FEJ Rotation access
Eigen::Matrix<double, 3, 3> Rot_fej() const { return _Rfej; }
protected:
// Stores the rotation
Eigen::Matrix<double, 3, 3> _R;
// Stores the first-estimate rotation
Eigen::Matrix<double, 3, 3> _Rfej;
/**
* @brief Sets the value of the estimate and recomputes the internal rotation matrix
* @param new_value New value for the quaternion estimate
*/
void set_value_internal(const Eigen::MatrixXd &new_value) {
assert(new_value.rows() == 4);
assert(new_value.cols() == 1);
_value = new_value;
// compute associated rotation
_R = ov_core::quat_2_Rot(new_value);
}
/**
* @brief Sets the fej value and recomputes the fej rotation matrix
* @param new_value New value for the quaternion estimate
*/
void set_fej_internal(const Eigen::MatrixXd &new_value) {
assert(new_value.rows() == 4);
assert(new_value.cols() == 1);
_fej = new_value;
// compute associated rotation
_Rfej = ov_core::quat_2_Rot(new_value);
}
};
} // namespace ov_type
#endif // OV_TYPE_TYPE_JPLQUAT_H

View File

@@ -0,0 +1,144 @@
/*
* OpenVINS: An Open Platform for Visual-Inertial Research
* Copyright (C) 2018-2022 Patrick Geneva
* Copyright (C) 2018-2022 Guoquan Huang
* Copyright (C) 2018-2022 OpenVINS Contributors
* Copyright (C) 2018-2019 Kevin Eckenhoff
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
#include "Landmark.h"
using namespace ov_type;
Eigen::Matrix<double, 3, 1> Landmark::get_xyz(bool getfej) const {
// CASE: Global 3d feature representation
// CASE: Anchored 3D feature representation
if (_feat_representation == LandmarkRepresentation::Representation::GLOBAL_3D ||
_feat_representation == LandmarkRepresentation::Representation::ANCHORED_3D) {
return (getfej) ? fej() : value();
}
// CASE: Global inverse depth feature representation
// CASE: Anchored full inverse depth feature representation
if (_feat_representation == LandmarkRepresentation::Representation::GLOBAL_FULL_INVERSE_DEPTH ||
_feat_representation == LandmarkRepresentation::Representation::ANCHORED_FULL_INVERSE_DEPTH) {
Eigen::Matrix<double, 3, 1> p_invFinG = (getfej) ? fej() : value();
Eigen::Matrix<double, 3, 1> p_FinG;
p_FinG << (1 / p_invFinG(2)) * std::cos(p_invFinG(0)) * std::sin(p_invFinG(1)),
(1 / p_invFinG(2)) * std::sin(p_invFinG(0)) * std::sin(p_invFinG(1)), (1 / p_invFinG(2)) * std::cos(p_invFinG(1));
return p_FinG;
}
// CASE: Anchored MSCKF inverse depth feature representation
if (_feat_representation == LandmarkRepresentation::Representation::ANCHORED_MSCKF_INVERSE_DEPTH) {
Eigen::Matrix<double, 3, 1> p_FinA;
Eigen::Matrix<double, 3, 1> p_invFinA = value();
p_FinA << (1 / p_invFinA(2)) * p_invFinA(0), (1 / p_invFinA(2)) * p_invFinA(1), 1 / p_invFinA(2);
return p_FinA;
}
// CASE: Estimate single depth of the feature using the initial bearing
if (_feat_representation == LandmarkRepresentation::Representation::ANCHORED_INVERSE_DEPTH_SINGLE) {
// if(getfej) return 1.0/fej()(0)*uv_norm_zero_fej;
return 1.0 / value()(0) * uv_norm_zero;
}
// Failure
assert(false);
return Eigen::Vector3d::Zero();
}
void Landmark::set_from_xyz(Eigen::Matrix<double, 3, 1> p_FinG, bool isfej) {
// CASE: Global 3d feature representation
// CASE: Anchored 3d feature representation
if (_feat_representation == LandmarkRepresentation::Representation::GLOBAL_3D ||
_feat_representation == LandmarkRepresentation::Representation::ANCHORED_3D) {
if (isfej)
set_fej(p_FinG);
else
set_value(p_FinG);
return;
}
// CASE: Global inverse depth feature representation
// CASE: Anchored inverse depth feature representation
if (_feat_representation == LandmarkRepresentation::Representation::GLOBAL_FULL_INVERSE_DEPTH ||
_feat_representation == LandmarkRepresentation::Representation::ANCHORED_FULL_INVERSE_DEPTH) {
// Feature inverse representation
// NOTE: This is not the MSCKF inverse form, but the standard form
// NOTE: Thus we go from p_FinG and convert it to this form
double g_rho = 1 / p_FinG.norm();
double g_phi = std::acos(g_rho * p_FinG(2));
// double g_theta = std::asin(g_rho*p_FinG(1)/std::sin(g_phi));
double g_theta = std::atan2(p_FinG(1), p_FinG(0));
Eigen::Matrix<double, 3, 1> p_invFinG;
p_invFinG(0) = g_theta;
p_invFinG(1) = g_phi;
p_invFinG(2) = g_rho;
// Set our feature value
if (isfej)
set_fej(p_invFinG);
else
set_value(p_invFinG);
return;
}
// CASE: MSCKF anchored inverse depth representation
if (_feat_representation == LandmarkRepresentation::Representation::ANCHORED_MSCKF_INVERSE_DEPTH) {
// MSCKF representation
Eigen::Matrix<double, 3, 1> p_invFinA_MSCKF;
p_invFinA_MSCKF(0) = p_FinG(0) / p_FinG(2);
p_invFinA_MSCKF(1) = p_FinG(1) / p_FinG(2);
p_invFinA_MSCKF(2) = 1 / p_FinG(2);
// Set our feature value
if (isfej)
set_fej(p_invFinA_MSCKF);
else
set_value(p_invFinA_MSCKF);
return;
}
// CASE: Estimate single depth of the feature using the initial bearing
if (_feat_representation == LandmarkRepresentation::Representation::ANCHORED_INVERSE_DEPTH_SINGLE) {
// Get the inverse depth
Eigen::VectorXd temp;
temp.resize(1, 1);
temp(0) = 1.0 / p_FinG(2);
// Set our bearing vector
if (!isfej)
uv_norm_zero = 1.0 / p_FinG(2) * p_FinG;
else
uv_norm_zero_fej = 1.0 / p_FinG(2) * p_FinG;
// Set our feature value
if (isfej)
set_fej(temp);
else
set_value(temp);
return;
}
// Failure
assert(false);
}

View File

@@ -0,0 +1,104 @@
/*
* OpenVINS: An Open Platform for Visual-Inertial Research
* Copyright (C) 2018-2022 Patrick Geneva
* Copyright (C) 2018-2022 Guoquan Huang
* Copyright (C) 2018-2022 OpenVINS Contributors
* Copyright (C) 2018-2019 Kevin Eckenhoff
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
#ifndef OV_TYPE_TYPE_LANDMARK_H
#define OV_TYPE_TYPE_LANDMARK_H
#include "LandmarkRepresentation.h"
#include "Vec.h"
#include "utils/colors.h"
#include "utils/print.h"
namespace ov_type {
/**
* @brief Type that implements a persistent SLAM feature.
*
* We store the feature ID that should match the IDs in the trackers.
* Additionally if this is an anchored representation we store what clone timestamp this is anchored from and what camera.
* If this features should be marginalized its flag can be set and during cleanup it will be removed.
*/
class Landmark : public Vec {
public:
/// Default constructor (feature is a Vec of size 3 or Vec of size 1)
Landmark(int dim) : Vec(dim) {}
/// Feature ID of this landmark (corresponds to frontend id)
size_t _featid;
/// What unique camera stream this slam feature was observed from
int _unique_camera_id = -1;
/// What camera ID our pose is anchored in!! By default the first measurement is the anchor.
int _anchor_cam_id = -1;
/// Timestamp of anchor clone
double _anchor_clone_timestamp = -1;
/// Boolean if this landmark has had at least one anchor change
bool has_had_anchor_change = false;
/// Boolean if this landmark should be marginalized out
bool should_marg = false;
/// First normalized uv coordinate bearing of this measurement (used for single depth representation)
Eigen::Vector3d uv_norm_zero;
/// First estimate normalized uv coordinate bearing of this measurement (used for single depth representation)
Eigen::Vector3d uv_norm_zero_fej;
/// What feature representation this feature currently has
LandmarkRepresentation::Representation _feat_representation;
/**
* @brief Overrides the default vector update rule
* We want to selectively update the FEJ value if we are using an anchored representation.
* @param dx Additive error state correction
*/
void update(const Eigen::VectorXd &dx) override {
// Update estimate
assert(dx.rows() == _size);
set_value(_value + dx);
// Ensure we are not near zero in the z-direction
// if (LandmarkRepresentation::is_relative_representation(_feat_representation) && _value(_value.rows() - 1) < 1e-8) {
// PRINT_DEBUG(YELLOW "WARNING DEPTH %.8f BECAME CLOSE TO ZERO IN UPDATE!!!\n" RESET, _value(_value.rows() - 1));
// should_marg = true;
// }
}
/**
* @brief Will return the position of the feature in the global frame of reference.
* @param getfej Set to true to get the landmark FEJ value
* @return Position of feature either in global or anchor frame
*/
Eigen::Matrix<double, 3, 1> get_xyz(bool getfej) const;
/**
* @brief Will set the current value based on the representation.
* @param p_FinG Position of the feature either in global or anchor frame
* @param isfej Set to true to set the landmark FEJ value
*/
void set_from_xyz(Eigen::Matrix<double, 3, 1> p_FinG, bool isfej);
};
} // namespace ov_type
#endif // OV_TYPE_TYPE_LANDMARK_H

View File

@@ -0,0 +1,113 @@
/*
* OpenVINS: An Open Platform for Visual-Inertial Research
* Copyright (C) 2018-2022 Patrick Geneva
* Copyright (C) 2018-2022 Guoquan Huang
* Copyright (C) 2018-2022 OpenVINS Contributors
* Copyright (C) 2018-2019 Kevin Eckenhoff
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
#ifndef OV_TYPE_LANDMARKREPRESENTATION_H
#define OV_TYPE_LANDMARKREPRESENTATION_H
#include <string>
namespace ov_type {
/**
* @brief Class has useful feature representation types
*/
class LandmarkRepresentation {
public:
/**
* @brief What feature representation our state can use
*/
enum Representation {
GLOBAL_3D,
GLOBAL_FULL_INVERSE_DEPTH,
ANCHORED_3D,
ANCHORED_FULL_INVERSE_DEPTH,
ANCHORED_MSCKF_INVERSE_DEPTH,
ANCHORED_INVERSE_DEPTH_SINGLE,
UNKNOWN
};
/**
* @brief Returns a string representation of this enum value.
* Used to debug print out what the user has selected as the representation.
* @param feat_representation Representation we want to check
* @return String version of the passed enum
*/
static inline std::string as_string(Representation feat_representation) {
if (feat_representation == GLOBAL_3D)
return "GLOBAL_3D";
if (feat_representation == GLOBAL_FULL_INVERSE_DEPTH)
return "GLOBAL_FULL_INVERSE_DEPTH";
if (feat_representation == ANCHORED_3D)
return "ANCHORED_3D";
if (feat_representation == ANCHORED_FULL_INVERSE_DEPTH)
return "ANCHORED_FULL_INVERSE_DEPTH";
if (feat_representation == ANCHORED_MSCKF_INVERSE_DEPTH)
return "ANCHORED_MSCKF_INVERSE_DEPTH";
if (feat_representation == ANCHORED_INVERSE_DEPTH_SINGLE)
return "ANCHORED_INVERSE_DEPTH_SINGLE";
return "UNKNOWN";
}
/**
* @brief Returns a string representation of this enum value.
* Used to debug print out what the user has selected as the representation.
* @param feat_representation String we want to find the enum of
* @return Representation, will be "unknown" if we coun't parse it
*/
static inline Representation from_string(const std::string &feat_representation) {
if (feat_representation == "GLOBAL_3D")
return GLOBAL_3D;
if (feat_representation == "GLOBAL_FULL_INVERSE_DEPTH")
return GLOBAL_FULL_INVERSE_DEPTH;
if (feat_representation == "ANCHORED_3D")
return ANCHORED_3D;
if (feat_representation == "ANCHORED_FULL_INVERSE_DEPTH")
return ANCHORED_FULL_INVERSE_DEPTH;
if (feat_representation == "ANCHORED_MSCKF_INVERSE_DEPTH")
return ANCHORED_MSCKF_INVERSE_DEPTH;
if (feat_representation == "ANCHORED_INVERSE_DEPTH_SINGLE")
return ANCHORED_INVERSE_DEPTH_SINGLE;
return UNKNOWN;
}
/**
* @brief Helper function that checks if the passed feature representation is a relative or global
* @param feat_representation Representation we want to check
* @return True if it is a relative representation
*/
static inline bool is_relative_representation(Representation feat_representation) {
return (feat_representation == Representation::ANCHORED_3D || feat_representation == Representation::ANCHORED_FULL_INVERSE_DEPTH ||
feat_representation == Representation::ANCHORED_MSCKF_INVERSE_DEPTH ||
feat_representation == Representation::ANCHORED_INVERSE_DEPTH_SINGLE);
}
private:
/**
* All function in this class should be static.
* Thus an instance of this class cannot be created.
*/
LandmarkRepresentation(){};
};
} // namespace ov_type
#endif // OV_TYPE_LANDMARKREPRESENTATION_H

191
ov_core/src/types/PoseJPL.h Normal file
View File

@@ -0,0 +1,191 @@
/*
* OpenVINS: An Open Platform for Visual-Inertial Research
* Copyright (C) 2018-2022 Patrick Geneva
* Copyright (C) 2018-2022 Guoquan Huang
* Copyright (C) 2018-2022 OpenVINS Contributors
* Copyright (C) 2018-2019 Kevin Eckenhoff
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
#ifndef OV_TYPE_TYPE_POSEJPL_H
#define OV_TYPE_TYPE_POSEJPL_H
#include "JPLQuat.h"
#include "Vec.h"
#include "utils/quat_ops.h"
namespace ov_type {
/**
* @brief Derived Type class that implements a 6 d.o.f pose
*
* Internally we use a JPLQuat quaternion representation for the orientation and 3D Vec position.
* Please see JPLQuat for details on its update procedure and its left multiplicative error.
*/
class PoseJPL : public Type {
public:
PoseJPL() : Type(6) {
// Initialize subvariables
_q = std::shared_ptr<JPLQuat>(new JPLQuat());
_p = std::shared_ptr<Vec>(new Vec(3));
// Set our default state value
Eigen::Matrix<double, 7, 1> pose0;
pose0.setZero();
pose0(3) = 1.0;
set_value_internal(pose0);
set_fej_internal(pose0);
}
~PoseJPL() {}
/**
* @brief Sets id used to track location of variable in the filter covariance
*
* Note that we update the sub-variables also.
*
* @param new_id entry in filter covariance corresponding to this variable
*/
void set_local_id(int new_id) override {
_id = new_id;
_q->set_local_id(new_id);
_p->set_local_id(new_id + ((new_id != -1) ? _q->size() : 0));
}
/**
* @brief Update q and p using a the JPLQuat update for orientation and vector update for position
*
* @param dx Correction vector (orientation then position)
*/
void update(const Eigen::VectorXd &dx) override {
assert(dx.rows() == _size);
Eigen::Matrix<double, 7, 1> newX = _value;
Eigen::Matrix<double, 4, 1> dq;
dq << .5 * dx.block(0, 0, 3, 1), 1.0;
dq = ov_core::quatnorm(dq);
// Update orientation
newX.block(0, 0, 4, 1) = ov_core::quat_multiply(dq, quat());
// Update position
newX.block(4, 0, 3, 1) += dx.block(3, 0, 3, 1);
set_value(newX);
}
/**
* @brief Sets the value of the estimate
* @param new_value New value we should set
*/
void set_value(const Eigen::MatrixXd &new_value) override { set_value_internal(new_value); }
/**
* @brief Sets the value of the first estimate
* @param new_value New value we should set
*/
void set_fej(const Eigen::MatrixXd &new_value) override { set_fej_internal(new_value); }
std::shared_ptr<Type> clone() override {
auto Clone = std::shared_ptr<PoseJPL>(new PoseJPL());
Clone->set_value(value());
Clone->set_fej(fej());
return Clone;
}
std::shared_ptr<Type> check_if_subvariable(const std::shared_ptr<Type> check) override {
if (check == _q) {
return _q;
} else if (check == _p) {
return _p;
}
return nullptr;
}
/// Rotation access
Eigen::Matrix<double, 3, 3> Rot() const { return _q->Rot(); }
/// FEJ Rotation access
Eigen::Matrix<double, 3, 3> Rot_fej() const { return _q->Rot_fej(); }
/// Rotation access as quaternion
Eigen::Matrix<double, 4, 1> quat() const { return _q->value(); }
/// FEJ Rotation access as quaternion
Eigen::Matrix<double, 4, 1> quat_fej() const { return _q->fej(); }
/// Position access
Eigen::Matrix<double, 3, 1> pos() const { return _p->value(); }
// FEJ position access
Eigen::Matrix<double, 3, 1> pos_fej() const { return _p->fej(); }
// Quaternion type access
std::shared_ptr<JPLQuat> q() { return _q; }
// Position type access
std::shared_ptr<Vec> p() { return _p; }
protected:
/// Subvariable containing orientation
std::shared_ptr<JPLQuat> _q;
/// Subvariable containing position
std::shared_ptr<Vec> _p;
/**
* @brief Sets the value of the estimate
* @param new_value New value we should set
*/
void set_value_internal(const Eigen::MatrixXd &new_value) {
assert(new_value.rows() == 7);
assert(new_value.cols() == 1);
// Set orientation value
_q->set_value(new_value.block(0, 0, 4, 1));
// Set position value
_p->set_value(new_value.block(4, 0, 3, 1));
_value = new_value;
}
/**
* @brief Sets the value of the first estimate
* @param new_value New value we should set
*/
void set_fej_internal(const Eigen::MatrixXd &new_value) {
assert(new_value.rows() == 7);
assert(new_value.cols() == 1);
// Set orientation fej value
_q->set_fej(new_value.block(0, 0, 4, 1));
// Set position fej value
_p->set_fej(new_value.block(4, 0, 3, 1));
_fej = new_value;
}
};
} // namespace ov_type
#endif // OV_TYPE_TYPE_POSEJPL_H

137
ov_core/src/types/Type.h Normal file
View File

@@ -0,0 +1,137 @@
/*
* OpenVINS: An Open Platform for Visual-Inertial Research
* Copyright (C) 2018-2022 Patrick Geneva
* Copyright (C) 2018-2022 Guoquan Huang
* Copyright (C) 2018-2022 OpenVINS Contributors
* Copyright (C) 2018-2019 Kevin Eckenhoff
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
#ifndef OV_TYPE_TYPE_BASE_H
#define OV_TYPE_TYPE_BASE_H
#include <Eigen/Eigen>
#include <memory>
namespace ov_type {
/**
* @brief Base class for estimated variables.
*
* This class is used how variables are represented or updated (e.g., vectors or quaternions).
* Each variable is defined by its error state size and its location in the covariance matrix.
* We additionally require all sub-types to have a update procedure.
*/
class Type {
public:
/**
* @brief Default constructor for our Type
*
* @param size_ degrees of freedom of variable (i.e., the size of the error state)
*/
Type(int size_) { _size = size_; }
virtual ~Type(){};
/**
* @brief Sets id used to track location of variable in the filter covariance
*
* Note that the minimum ID is -1 which says that the state is not in our covariance.
* If the ID is larger than -1 then this is the index location in the covariance matrix.
*
* @param new_id entry in filter covariance corresponding to this variable
*/
virtual void set_local_id(int new_id) { _id = new_id; }
/**
* @brief Access to variable id (i.e. its location in the covariance)
*/
int id() { return _id; }
/**
* @brief Access to variable size (i.e. its error state size)
*/
int size() { return _size; }
/**
* @brief Update variable due to perturbation of error state
*
* @param dx Perturbation used to update the variable through a defined "boxplus" operation
*/
virtual void update(const Eigen::VectorXd &dx) = 0;
/**
* @brief Access variable's estimate
*/
virtual const Eigen::MatrixXd &value() const { return _value; }
/**
* @brief Access variable's first-estimate
*/
virtual const Eigen::MatrixXd &fej() const { return _fej; }
/**
* @brief Overwrite value of state's estimate
* @param new_value New value that will overwrite state's value
*/
virtual void set_value(const Eigen::MatrixXd &new_value) {
assert(_value.rows() == new_value.rows());
assert(_value.cols() == new_value.cols());
_value = new_value;
}
/**
* @brief Overwrite value of first-estimate
* @param new_value New value that will overwrite state's fej
*/
virtual void set_fej(const Eigen::MatrixXd &new_value) {
assert(_fej.rows() == new_value.rows());
assert(_fej.cols() == new_value.cols());
_fej = new_value;
}
/**
* @brief Create a clone of this variable
*/
virtual std::shared_ptr<Type> clone() = 0;
/**
* @brief Determine if pass variable is a sub-variable
*
* If the passed variable is a sub-variable or the current variable this will return it.
* Otherwise it will return a nullptr, meaning that it was unable to be found.
*
* @param check Type pointer to compare our subvariables to
*/
virtual std::shared_ptr<Type> check_if_subvariable(const std::shared_ptr<Type> check) { return nullptr; }
protected:
/// First-estimate
Eigen::MatrixXd _fej;
/// Current best estimate
Eigen::MatrixXd _value;
/// Location of error state in covariance
int _id = -1;
/// Dimension of error state
int _size = -1;
};
} // namespace ov_type
#endif // OV_TYPE_TYPE_BASE_H

68
ov_core/src/types/Vec.h Normal file
View File

@@ -0,0 +1,68 @@
/*
* OpenVINS: An Open Platform for Visual-Inertial Research
* Copyright (C) 2018-2022 Patrick Geneva
* Copyright (C) 2018-2022 Guoquan Huang
* Copyright (C) 2018-2022 OpenVINS Contributors
* Copyright (C) 2018-2019 Kevin Eckenhoff
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
#ifndef OV_TYPE_TYPE_VEC_H
#define OV_TYPE_TYPE_VEC_H
#include "Type.h"
namespace ov_type {
/**
* @brief Derived Type class that implements vector variables
*/
class Vec : public Type {
public:
/**
* @brief Default constructor for Vec
* @param dim Size of the vector (will be same as error state)
*/
Vec(int dim) : Type(dim) {
_value = Eigen::VectorXd::Zero(dim);
_fej = Eigen::VectorXd::Zero(dim);
}
~Vec() {}
/**
* @brief Implements the update operation through standard vector addition
* @param dx Additive error state correction
*/
void update(const Eigen::VectorXd &dx) override {
assert(dx.rows() == _size);
set_value(_value + dx);
}
/**
* @brief Performs all the cloning
*/
std::shared_ptr<Type> clone() override {
auto Clone = std::shared_ptr<Type>(new Vec(_size));
Clone->set_value(value());
Clone->set_fej(fej());
return Clone;
}
};
} // namespace ov_type
#endif // OV_TYPE_TYPE_VEC_H

View File

@@ -0,0 +1,45 @@
/*
* OpenVINS: An Open Platform for Visual-Inertial Research
* Copyright (C) 2018-2022 Patrick Geneva
* Copyright (C) 2018-2022 Guoquan Huang
* Copyright (C) 2018-2022 OpenVINS Contributors
* Copyright (C) 2018-2019 Kevin Eckenhoff
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
#ifndef OV_CORE_COLOR_MACROS
#define OV_CORE_COLOR_MACROS
#define RESET "\033[0m"
#define BLACK "\033[30m" /* Black */
#define RED "\033[31m" /* Red */
#define GREEN "\033[32m" /* Green */
#define YELLOW "\033[33m" /* Yellow */
#define BLUE "\033[34m" /* Blue */
#define MAGENTA "\033[35m" /* Magenta */
#define CYAN "\033[36m" /* Cyan */
#define WHITE "\033[37m" /* White */
#define REDPURPLE "\033[95m" /* Red Purple */
#define BOLDBLACK "\033[1m\033[30m" /* Bold Black */
#define BOLDRED "\033[1m\033[31m" /* Bold Red */
#define BOLDGREEN "\033[1m\033[32m" /* Bold Green */
#define BOLDYELLOW "\033[1m\033[33m" /* Bold Yellow */
#define BOLDBLUE "\033[1m\033[34m" /* Bold Blue */
#define BOLDMAGENTA "\033[1m\033[35m" /* Bold Magenta */
#define BOLDCYAN "\033[1m\033[36m" /* Bold Cyan */
#define BOLDWHITE "\033[1m\033[37m" /* Bold White */
#define BOLDREDPURPLE "\033[1m\033[95m" /* Bold Red Purple */
#endif /* OV_CORE_COLOR_MACROS */

View File

@@ -0,0 +1,247 @@
/*
* OpenVINS: An Open Platform for Visual-Inertial Research
* Copyright (C) 2018-2022 Patrick Geneva
* Copyright (C) 2018-2022 Guoquan Huang
* Copyright (C) 2018-2022 OpenVINS Contributors
* Copyright (C) 2018-2019 Kevin Eckenhoff
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
#ifndef OV_CORE_DATASET_READER_H
#define OV_CORE_DATASET_READER_H
#include <Eigen/Eigen>
#include <fstream>
#include <iostream>
#include <sstream>
#include <string>
#include "colors.h"
#include "print.h"
using namespace std;
namespace ov_core {
/**
* @brief Helper functions to read in dataset files
*
* This file has some nice functions for reading dataset files.
* One of the main datasets that we test against is the EuRoC MAV dataset.
* We have some nice utility functions here that handle loading of the groundtruth data.
* This can be used to initialize the system or for plotting and calculation of RMSE values without needing any alignment.
*
* > M. Burri, J. Nikolic, P. Gohl, T. Schneider, J. Rehder, S. Omari,M. Achtelik and R. Siegwart,
* > "The EuRoC micro aerial vehicle datasets", International Journal of Robotic Research, DOI: 10.1177/0278364915620033, 2016.
* > https://projects.asl.ethz.ch/datasets/doku.php?id=kmavvisualinertialdatasets.
*/
class DatasetReader {
public:
/**
* @brief Load a ASL format groundtruth file
* @param path Path to the CSV file of groundtruth data
* @param gt_states Will be filled with groundtruth states
*
* Here we will try to load a groundtruth file that is in the ASL/EUROCMAV format.
* If we can't open the file, or it is in the wrong format we will error and exit the program.
* See get_gt_state() for a way to get the groundtruth state at a given timestep
*/
static void load_gt_file(std::string path, std::map<double, Eigen::Matrix<double, 17, 1>> &gt_states) {
// Clear any old data
gt_states.clear();
// Open the file
std::ifstream file;
std::string line;
file.open(path);
// Check that it was successfull
if (!file) {
PRINT_ERROR(RED "ERROR: Unable to open groundtruth file...\n" RESET);
PRINT_ERROR(RED "ERROR: %s\n" RESET, path.c_str());
std::exit(EXIT_FAILURE);
}
// Skip the first line as it is just the header
std::getline(file, line);
// Loop through each line in the file
while (std::getline(file, line)) {
// Loop variables
int i = 0;
std::istringstream s(line);
std::string field;
Eigen::Matrix<double, 17, 1> temp = Eigen::Matrix<double, 17, 1>::Zero();
// Loop through this line
while (getline(s, field, ',')) {
// Ensure we are in the range
if (i > 16) {
PRINT_ERROR(RED "ERROR: Invalid groudtruth line, too long!\n" RESET);
PRINT_ERROR(RED "ERROR: %s\n" RESET, line.c_str());
std::exit(EXIT_FAILURE);
}
// Save our groundtruth state value
temp(i, 0) = std::atof(field.c_str());
i++;
}
// Append to our groundtruth map
gt_states.insert({1e-9 * temp(0, 0), temp});
}
file.close();
}
/**
* @brief Gets the 17x1 groundtruth state at a given timestep
* @param timestep timestep we want to get the groundtruth for
* @param imustate groundtruth state [time(sec),q_GtoI,p_IinG,v_IinG,b_gyro,b_accel]
* @param gt_states Should be loaded with groundtruth states, see load_gt_file() for details
* @return true if we found the state, false otherwise
*/
static bool get_gt_state(double timestep, Eigen::Matrix<double, 17, 1> &imustate,
std::map<double, Eigen::Matrix<double, 17, 1>> &gt_states) {
// Check that we even have groundtruth loaded
if (gt_states.empty()) {
PRINT_ERROR(RED "Groundtruth data loaded is empty, make sure you call load before asking for a state.\n" RESET);
return false;
}
// Loop through gt states and find the closest time stamp
double closest_time = INFINITY;
auto it0 = gt_states.begin();
while (it0 != gt_states.end()) {
if (std::abs(it0->first - timestep) < std::abs(closest_time - timestep)) {
closest_time = it0->first;
}
it0++;
}
// If close to this timestamp, then use it
if (std::abs(closest_time - timestep) < 0.10) {
// PRINT_DEBUG("init DT = %.4f\n", std::abs(closest_time-timestep));
// PRINT_DEBUG("timestamp = %.15f\n", closest_time);
timestep = closest_time;
}
// Check that we have the timestamp in our GT file
if (gt_states.find(timestep) == gt_states.end()) {
PRINT_WARNING(YELLOW "Unable to find %.6f timestamp in GT file, wrong GT file loaded???\n" RESET, timestep);
return false;
}
// Get the GT state vector
Eigen::Matrix<double, 17, 1> state = gt_states[timestep];
// Our "fixed" state vector from the ETH GT format [q,p,v,bg,ba]
imustate(0, 0) = timestep; // time
imustate(1, 0) = state(5, 0); // quat
imustate(2, 0) = state(6, 0);
imustate(3, 0) = state(7, 0);
imustate(4, 0) = state(4, 0);
imustate(5, 0) = state(1, 0); // pos
imustate(6, 0) = state(2, 0);
imustate(7, 0) = state(3, 0);
imustate(8, 0) = state(8, 0); // vel
imustate(9, 0) = state(9, 0);
imustate(10, 0) = state(10, 0);
imustate(11, 0) = state(11, 0); // bg
imustate(12, 0) = state(12, 0);
imustate(13, 0) = state(13, 0);
imustate(14, 0) = state(14, 0); // ba
imustate(15, 0) = state(15, 0);
imustate(16, 0) = state(16, 0);
// Success!
return true;
}
/**
* @brief This will load the trajectory into memory (space separated)
* @param path Path to the trajectory file that we want to read in.
* @param traj_data Will be filled with groundtruth states (timestamp(s), q_GtoI, p_IinG)
*/
static void load_simulated_trajectory(std::string path, std::vector<Eigen::VectorXd> &traj_data) {
// Try to open our groundtruth file
std::ifstream file;
file.open(path);
if (!file) {
PRINT_ERROR(RED "ERROR: Unable to open simulation trajectory file...\n" RESET);
PRINT_ERROR(RED "ERROR: %s\n" RESET, path.c_str());
std::exit(EXIT_FAILURE);
}
// Debug print
std::string base_filename = path.substr(path.find_last_of("/\\") + 1);
PRINT_DEBUG("loaded trajectory %s\n", base_filename.c_str());
// Loop through each line of this file
std::string current_line;
while (std::getline(file, current_line)) {
// Skip if we start with a comment
if (!current_line.find("#"))
continue;
// Loop variables
int i = 0;
std::istringstream s(current_line);
std::string field;
Eigen::Matrix<double, 8, 1> data;
// Loop through this line (timestamp(s) tx ty tz qx qy qz qw)
while (std::getline(s, field, ' ')) {
// Skip if empty
if (field.empty() || i >= data.rows())
continue;
// save the data to our vector
data(i) = std::atof(field.c_str());
i++;
}
// Only a valid line if we have all the parameters
if (i > 7) {
traj_data.push_back(data);
// std::stringstream ss;
// ss << std::setprecision(15) << data.transpose() << std::endl;
// PRINT_DEBUG(ss.str().c_str());
}
}
// Finally close the file
file.close();
// Error if we don't have any data
if (traj_data.empty()) {
PRINT_ERROR(RED "ERROR: Could not parse any data from the file!!\n" RESET);
PRINT_ERROR(RED "ERROR: %s\n" RESET, path.c_str());
std::exit(EXIT_FAILURE);
}
}
private:
/**
* All function in this class should be static.
* Thus an instance of this class cannot be created.
*/
DatasetReader() {}
};
} // namespace ov_core
#endif /* OV_CORE_DATASET_READER_H */

View File

@@ -0,0 +1,48 @@
/*
* OpenVINS: An Open Platform for Visual-Inertial Research
* Copyright (C) 2018-2022 Patrick Geneva
* Copyright (C) 2018-2022 Guoquan Huang
* Copyright (C) 2018-2022 OpenVINS Contributors
* Copyright (C) 2018-2019 Kevin Eckenhoff
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
#ifndef OV_CORE_OPENCV_LAMBDA_BODY_H
#define OV_CORE_OPENCV_LAMBDA_BODY_H
#include <functional>
#include <opencv2/opencv.hpp>
namespace ov_core {
/**
* @brief Helper class to do OpenCV parallelization
*
* This is a utility class required to build with older version of opencv
* On newer versions this doesn't seem to be needed, but here we just use it to ensure we can work for more opencv version.
* https://answers.opencv.org/question/65800/how-to-use-lambda-as-a-parameter-to-parallel_for_/?answer=130691#post-id-130691
*/
class LambdaBody : public cv::ParallelLoopBody {
public:
explicit LambdaBody(const std::function<void(const cv::Range &)> &body) { _body = body; }
void operator()(const cv::Range &range) const override { _body(range); }
private:
std::function<void(const cv::Range &)> _body;
};
} /* namespace ov_core */
#endif /* OV_CORE_OPENCV_LAMBDA_BODY_H */

View File

@@ -0,0 +1,618 @@
/*
* OpenVINS: An Open Platform for Visual-Inertial Research
* Copyright (C) 2018-2022 Patrick Geneva
* Copyright (C) 2018-2022 Guoquan Huang
* Copyright (C) 2018-2022 OpenVINS Contributors
* Copyright (C) 2018-2019 Kevin Eckenhoff
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
#ifndef OPENCV_YAML_PARSER_H
#define OPENCV_YAML_PARSER_H
#include <Eigen/Eigen>
#include <boost/filesystem.hpp>
#include <memory>
#include <opencv2/opencv.hpp>
#if ROS_AVAILABLE == 1
#include <ros/ros.h>
#elif ROS_AVAILABLE == 2
#include <rclcpp/rclcpp.hpp>
#endif
#include "colors.h"
#include "print.h"
#include "quat_ops.h"
namespace ov_core {
/**
* @brief Helper class to do OpenCV yaml parsing from both file and ROS.
*
* The logic is as follows:
* - Given a path to the main config file we will load it into our cv::FileStorage object.
* - From there the user can request for different parameters of different types from the config.
* - If we have ROS, then we will also check to see if the user has overridden any config files via ROS.
* - The ROS parameters always take priority over the ones in our config.
*
* NOTE: There are no "nested" yaml parameters. They are all under the "root" of the yaml file!!!
* NOTE: The camera and imu have nested, but those are handled externally....
*/
class YamlParser {
public:
/**
* @brief Constructor that loads all three configuration files
* @param config_path Path to the YAML file we will parse
* @param fail_if_not_found If we should terminate the program if we can't open the config file
*/
explicit YamlParser(const std::string &config_path, bool fail_if_not_found = true) : config_path_(config_path) {
// Check if file exists
if (!fail_if_not_found && !boost::filesystem::exists(config_path)) {
config = nullptr;
return;
}
if (!boost::filesystem::exists(config_path)) {
PRINT_ERROR(RED "unable to open the configuration file!\n%s\n" RESET, config_path.c_str());
std::exit(EXIT_FAILURE);
}
// Open the file, error if we can't
config = std::make_shared<cv::FileStorage>(config_path, cv::FileStorage::READ);
if (!fail_if_not_found && !config->isOpened()) {
config = nullptr;
return;
}
if (!config->isOpened()) {
PRINT_ERROR(RED "unable to open the configuration file!\n%s\n" RESET, config_path.c_str());
std::exit(EXIT_FAILURE);
}
}
#if ROS_AVAILABLE == 1
/// Allows setting of the node handler if we have ROS to override parameters
void set_node_handler(std::shared_ptr<ros::NodeHandle> nh_) { this->nh = nh_; }
#endif
#if ROS_AVAILABLE == 2
/// Allows setting of the node if we have ROS to override parameters
void set_node(std::shared_ptr<rclcpp::Node> &node_) { this->node = node_; }
#endif
/**
* @brief Will get the folder this config file is in
* @return Config folder
*/
std::string get_config_folder() { return config_path_.substr(0, config_path_.find_last_of('/')) + "/"; }
/**
* @brief Check to see if all parameters were read succesfully
* @return True if we found all parameters
*/
bool successful() const { return all_params_found_successfully; }
/**
* @brief Custom parser for the ESTIMATOR parameters.
*
* This will load the data from the main config file.
* If it is unable it will give a warning to the user it couldn't be found.
*
* @tparam T Type of parameter we are looking for.
* @param node_name Name of the node
* @param node_result Resulting value (should already have default value in it)
* @param required If this parameter is required by the user to set
*/
template <class T> void parse_config(const std::string &node_name, T &node_result, bool required = true) {
#if ROS_AVAILABLE == 1
if (nh != nullptr && nh->getParam(node_name, node_result)) {
PRINT_INFO(GREEN "overriding node " BOLDGREEN "%s" RESET GREEN " with value from ROS!\n" RESET, node_name.c_str());
nh->param<T>(node_name, node_result);
return;
}
#elif ROS_AVAILABLE == 2
if (node != nullptr && node->has_parameter(node_name)) {
PRINT_INFO(GREEN "overriding node " BOLDGREEN "%s" RESET GREEN " with value from ROS!\n" RESET, node_name.c_str());
node->get_parameter<T>(node_name, node_result);
return;
}
#endif
// Else we just parse from the YAML file!
parse_config_yaml(node_name, node_result, required);
}
/**
* @brief Custom parser for the external parameter files with levels.
*
* This will first load the external file requested.
* From there it will try to find the first level requested (e.g. imu0, cam0, cam1).
* Then the requested node can be found under this sensor name.
* ROS can override the config with `<sensor_name>_<node_name>` (e.g., cam0_distortion).
*
* @tparam T Type of parameter we are looking for.
* @param external_node_name Name of the node we will get our relative path from
* @param sensor_name The first level node we will try to get the requested node under
* @param node_name Name of the node
* @param node_result Resulting value (should already have default value in it)
* @param required If this parameter is required by the user to set
*/
template <class T>
void parse_external(const std::string &external_node_name, const std::string &sensor_name, const std::string &node_name, T &node_result,
bool required = true) {
#if ROS_AVAILABLE == 1
std::string rosnode = sensor_name + "_" + node_name;
if (nh != nullptr && nh->getParam(rosnode, node_result)) {
PRINT_INFO(GREEN "overriding node " BOLDGREEN "%s" RESET GREEN " with value from ROS!\n" RESET, rosnode.c_str());
nh->param<T>(rosnode, node_result);
return;
}
#elif ROS_AVAILABLE == 2
std::string rosnode = sensor_name + "_" + node_name;
if (node != nullptr && node->has_parameter(rosnode)) {
PRINT_INFO(GREEN "overriding node " BOLDGREEN "%s" RESET GREEN " with value from ROS!\n" RESET, rosnode.c_str());
node->get_parameter<T>(rosnode, node_result);
return;
}
#endif
// Else we just parse from the YAML file!
parse_external_yaml(external_node_name, sensor_name, node_name, node_result, required);
}
/**
* @brief Custom parser for Matrix3d in the external parameter files with levels.
*
* This will first load the external file requested.
* From there it will try to find the first level requested (e.g. imu0, cam0, cam1).
* Then the requested node can be found under this sensor name.
* ROS can override the config with `<sensor_name>_<node_name>` (e.g., cam0_distortion).
*
* @param external_node_name Name of the node we will get our relative path from
* @param sensor_name The first level node we will try to get the requested node under
* @param node_name Name of the node
* @param node_result Resulting value (should already have default value in it)
* @param required If this parameter is required by the user to set
*/
void parse_external(const std::string &external_node_name, const std::string &sensor_name, const std::string &node_name,
Eigen::Matrix3d &node_result, bool required = true) {
#if ROS_AVAILABLE == 1
// If we have the ROS parameter, we should just get that one
// NOTE: for our 3x3 matrix we should read it as an array from ROS then covert it back into the 3x3
std::string rosnode = sensor_name + "_" + node_name;
std::vector<double> matrix_RCtoI;
if (nh != nullptr && nh->getParam(rosnode, matrix_RCtoI)) {
PRINT_INFO(GREEN "overriding node " BOLDGREEN "%s" RESET GREEN " with value from ROS!\n" RESET, rosnode.c_str());
nh->param<std::vector<double>>(rosnode, matrix_RCtoI);
node_result << matrix_RCtoI.at(0), matrix_RCtoI.at(1), matrix_RCtoI.at(2), matrix_RCtoI.at(3), matrix_RCtoI.at(4), matrix_RCtoI.at(5),
matrix_RCtoI.at(6), matrix_RCtoI.at(7), matrix_RCtoI.at(8);
return;
}
#elif ROS_AVAILABLE == 2
// If we have the ROS parameter, we should just get that one
// NOTE: for our 3x3 matrix we should read it as an array from ROS then covert it back into the 4x4
std::string rosnode = sensor_name + "_" + node_name;
std::vector<double> matrix_RCtoI;
if (node != nullptr && node->has_parameter(rosnode)) {
PRINT_INFO(GREEN "overriding node " BOLDGREEN "%s" RESET GREEN " with value from ROS!\n" RESET, rosnode.c_str());
node->get_parameter<std::vector<double>>(rosnode, matrix_RCtoI);
node_result << matrix_RCtoI.at(0), matrix_RCtoI.at(1), matrix_RCtoI.at(2), matrix_RCtoI.at(3), matrix_RCtoI.at(4), matrix_RCtoI.at(5),
matrix_RCtoI.at(6), matrix_RCtoI.at(7), matrix_RCtoI.at(8);
return;
}
#endif
// Else we just parse from the YAML file!
parse_external_yaml(external_node_name, sensor_name, node_name, node_result, required);
}
/**
* @brief Custom parser for Matrix4d in the external parameter files with levels.
*
* This will first load the external file requested.
* From there it will try to find the first level requested (e.g. imu0, cam0, cam1).
* Then the requested node can be found under this sensor name.
* ROS can override the config with `<sensor_name>_<node_name>` (e.g., cam0_distortion).
*
* @param external_node_name Name of the node we will get our relative path from
* @param sensor_name The first level node we will try to get the requested node under
* @param node_name Name of the node
* @param node_result Resulting value (should already have default value in it)
* @param required If this parameter is required by the user to set
*/
void parse_external(const std::string &external_node_name, const std::string &sensor_name, const std::string &node_name,
Eigen::Matrix4d &node_result, bool required = true) {
#if ROS_AVAILABLE == 1
// If we have the ROS parameter, we should just get that one
// NOTE: for our 4x4 matrix we should read it as an array from ROS then covert it back into the 4x4
std::string rosnode = sensor_name + "_" + node_name;
std::vector<double> matrix_TCtoI;
if (nh != nullptr && nh->getParam(rosnode, matrix_TCtoI)) {
PRINT_INFO(GREEN "overriding node " BOLDGREEN "%s" RESET GREEN " with value from ROS!\n" RESET, rosnode.c_str());
nh->param<std::vector<double>>(rosnode, matrix_TCtoI);
node_result << matrix_TCtoI.at(0), matrix_TCtoI.at(1), matrix_TCtoI.at(2), matrix_TCtoI.at(3), matrix_TCtoI.at(4), matrix_TCtoI.at(5),
matrix_TCtoI.at(6), matrix_TCtoI.at(7), matrix_TCtoI.at(8), matrix_TCtoI.at(9), matrix_TCtoI.at(10), matrix_TCtoI.at(11),
matrix_TCtoI.at(12), matrix_TCtoI.at(13), matrix_TCtoI.at(14), matrix_TCtoI.at(15);
return;
}
#elif ROS_AVAILABLE == 2
// If we have the ROS parameter, we should just get that one
// NOTE: for our 4x4 matrix we should read it as an array from ROS then covert it back into the 4x4
std::string rosnode = sensor_name + "_" + node_name;
std::vector<double> matrix_TCtoI;
if (node != nullptr && node->has_parameter(rosnode)) {
PRINT_INFO(GREEN "overriding node " BOLDGREEN "%s" RESET GREEN " with value from ROS!\n" RESET, rosnode.c_str());
node->get_parameter<std::vector<double>>(rosnode, matrix_TCtoI);
node_result << matrix_TCtoI.at(0), matrix_TCtoI.at(1), matrix_TCtoI.at(2), matrix_TCtoI.at(3), matrix_TCtoI.at(4), matrix_TCtoI.at(5),
matrix_TCtoI.at(6), matrix_TCtoI.at(7), matrix_TCtoI.at(8), matrix_TCtoI.at(9), matrix_TCtoI.at(10), matrix_TCtoI.at(11),
matrix_TCtoI.at(12), matrix_TCtoI.at(13), matrix_TCtoI.at(14), matrix_TCtoI.at(15);
return;
}
#endif
// Else we just parse from the YAML file!
parse_external_yaml(external_node_name, sensor_name, node_name, node_result, required);
}
#if ROS_AVAILABLE == 2
/// For ROS2 we need to override the int since it seems to only support int64_t types
/// https://docs.ros2.org/bouncy/api/rclcpp/classrclcpp_1_1_parameter.html
void parse_config(const std::string &node_name, int &node_result, bool required = true) {
int64_t val = node_result;
if (node != nullptr && node->has_parameter(node_name)) {
PRINT_INFO(GREEN "overriding node " BOLDGREEN "%s" RESET GREEN " with value from ROS!\n" RESET, node_name.c_str());
node->get_parameter<int64_t>(node_name, val);
node_result = (int)val;
return;
}
parse_config_yaml(node_name, node_result, required);
}
/// For ROS2 we need to override the int since it seems to only support int64_t types
/// https://docs.ros2.org/bouncy/api/rclcpp/classrclcpp_1_1_parameter.html
void parse_external(const std::string &external_node_name, const std::string &sensor_name, const std::string &node_name,
std::vector<int> &node_result, bool required = true) {
std::vector<int64_t> val;
for (auto tmp : node_result)
val.push_back(tmp);
std::string rosnode = sensor_name + "_" + node_name;
if (node != nullptr && node->has_parameter(rosnode)) {
PRINT_INFO(GREEN "overriding node " BOLDGREEN "%s" RESET GREEN " with value from ROS!\n" RESET, rosnode.c_str());
node->get_parameter<std::vector<int64_t>>(rosnode, val);
node_result.clear();
for (auto tmp : val)
node_result.push_back((int)tmp);
return;
}
parse_external_yaml(external_node_name, sensor_name, node_name, node_result, required);
}
#endif
private:
/// Path to the config file
std::string config_path_;
/// Our config file with the data in it
std::shared_ptr<cv::FileStorage> config;
/// Record if all parameters were found
bool all_params_found_successfully = true;
#if ROS_AVAILABLE == 1
/// ROS1 node handler that will override values
std::shared_ptr<ros::NodeHandle> nh;
#endif
#if ROS_AVAILABLE == 2
/// Our ROS2 rclcpp node pointer
std::shared_ptr<rclcpp::Node> node = nullptr;
#endif
/**
* @brief Given a YAML node object, this check to see if we have a valid key
* @param file_node OpenCV file node we will get the data from
* @param node_name Name of the node
* @return True if we can get the data
*/
static bool node_found(const cv::FileNode &file_node, const std::string &node_name) {
bool found_node = false;
for (const auto &item : file_node) {
if (item.name() == node_name) {
found_node = true;
}
}
return found_node;
}
/**
* @brief This function will try to get the requested parameter from our config.
*
* If it is unable to find it, it will give a warning to the user it couldn't be found.
*
* @tparam T Type of parameter we are looking for.
* @param file_node OpenCV file node we will get the data from
* @param node_name Name of the node
* @param node_result Resulting value (should already have default value in it)
* @param required If this parameter is required by the user to set
*/
template <class T> void parse(const cv::FileNode &file_node, const std::string &node_name, T &node_result, bool required = true) {
// Check that we have the requested node
if (!node_found(file_node, node_name)) {
if (required) {
PRINT_WARNING(YELLOW "the node %s of type [%s] was not found...\n" RESET, node_name.c_str(), typeid(node_result).name());
all_params_found_successfully = false;
} else {
PRINT_DEBUG("the node %s of type [%s] was not found (not required)...\n", node_name.c_str(), typeid(node_result).name());
}
return;
}
// Now try to get it from the config
try {
file_node[node_name] >> node_result;
} catch (...) {
if (required) {
PRINT_WARNING(YELLOW "unable to parse %s node of type [%s] in the config file!\n" RESET, node_name.c_str(),
typeid(node_result).name());
all_params_found_successfully = false;
} else {
PRINT_DEBUG("unable to parse %s node of type [%s] in the config file (not required)\n", node_name.c_str(),
typeid(node_result).name());
}
}
}
/**
* @brief Custom parser for booleans (0,false,False,FALSE=>false and 1,true,True,TRUE=>false)
* @param file_node OpenCV file node we will get the data from
* @param node_name Name of the node
* @param node_result Resulting value (should already have default value in it)
* @param required If this parameter is required by the user to set
*/
void parse(const cv::FileNode &file_node, const std::string &node_name, bool &node_result, bool required = true) {
// Check that we have the requested node
if (!node_found(file_node, node_name)) {
if (required) {
PRINT_WARNING(YELLOW "the node %s of type [%s] was not found...\n" RESET, node_name.c_str(), typeid(node_result).name());
all_params_found_successfully = false;
} else {
PRINT_DEBUG("the node %s of type [%s] was not found (not required)...\n", node_name.c_str(), typeid(node_result).name());
}
return;
}
// Now try to get it from the config
try {
if (file_node[node_name].isInt() && (int)file_node[node_name] == 1) {
node_result = true;
return;
}
if (file_node[node_name].isInt() && (int)file_node[node_name] == 0) {
node_result = false;
return;
}
// NOTE: we select the first bit of text as there can be a comment afterwards
// NOTE: for example we could have "key: true #comment here..." where we only want "true"
std::string value;
file_node[node_name] >> value;
value = value.substr(0, value.find_first_of('#'));
value = value.substr(0, value.find_first_of(' '));
if (value == "1" || value == "true" || value == "True" || value == "TRUE") {
node_result = true;
} else if (value == "0" || value == "false" || value == "False" || value == "FALSE") {
node_result = false;
} else {
PRINT_WARNING(YELLOW "the node %s has an invalid boolean type of [%s]\n" RESET, node_name.c_str(), value.c_str());
all_params_found_successfully = false;
}
} catch (...) {
if (required) {
PRINT_WARNING(YELLOW "unable to parse %s node of type [%s] in the config file!\n" RESET, node_name.c_str(),
typeid(node_result).name());
all_params_found_successfully = false;
} else {
PRINT_DEBUG("unable to parse %s node of type [%s] in the config file (not required)\n", node_name.c_str(),
typeid(node_result).name());
}
}
}
/**
* @brief Custom parser for camera extrinsic 3x3 transformations
* @param file_node OpenCV file node we will get the data from
* @param node_name Name of the node
* @param node_result Resulting value (should already have default value in it)
* @param required If this parameter is required by the user to set
*/
void parse(const cv::FileNode &file_node, const std::string &node_name, Eigen::Matrix3d &node_result, bool required = true) {
// Check that we have the requested node
if (!node_found(file_node, node_name)) {
if (required) {
PRINT_WARNING(YELLOW "the node %s of type [%s] was not found...\n" RESET, node_name.c_str(), typeid(node_result).name());
all_params_found_successfully = false;
} else {
PRINT_DEBUG("the node %s of type [%s] was not found (not required)...\n", node_name.c_str(), typeid(node_result).name());
}
return;
}
// Now try to get it from the config
node_result = Eigen::Matrix3d::Identity();
try {
for (int r = 0; r < (int)file_node[node_name].size() && r < 3; r++) {
for (int c = 0; c < (int)file_node[node_name][r].size() && c < 3; c++) {
node_result(r, c) = (double)file_node[node_name][r][c];
}
}
} catch (...) {
if (required) {
PRINT_WARNING(YELLOW "unable to parse %s node of type [%s] in the config file!\n" RESET, node_name.c_str(),
typeid(node_result).name());
all_params_found_successfully = false;
} else {
PRINT_DEBUG("unable to parse %s node of type [%s] in the config file (not required)\n", node_name.c_str(),
typeid(node_result).name());
}
}
}
/**
* @brief Custom parser for camera extrinsic 4x4 transformations
* @param file_node OpenCV file node we will get the data from
* @param node_name Name of the node
* @param node_result Resulting value (should already have default value in it)
* @param required If this parameter is required by the user to set
*/
void parse(const cv::FileNode &file_node, const std::string &node_name, Eigen::Matrix4d &node_result, bool required = true) {
// See if we need to flip the node name
std::string node_name_local = node_name;
if (node_name == "T_cam_imu" && !node_found(file_node, node_name)) {
PRINT_INFO("parameter T_cam_imu not found, trying T_imu_cam instead (will return T_cam_imu still)!\n");
node_name_local = "T_imu_cam";
} else if (node_name == "T_imu_cam" && !node_found(file_node, node_name)) {
PRINT_INFO("parameter T_imu_cam not found, trying T_cam_imu instead (will return T_imu_cam still)!\n");
node_name_local = "T_cam_imu";
}
// Check that we have the requested node
if (!node_found(file_node, node_name_local)) {
if (required) {
PRINT_WARNING(YELLOW "the node %s of type [%s] was not found...\n" RESET, node_name_local.c_str(), typeid(node_result).name());
all_params_found_successfully = false;
} else {
PRINT_DEBUG("the node %s of type [%s] was not found (not required)...\n", node_name_local.c_str(), typeid(node_result).name());
}
return;
}
// Now try to get it from the config
node_result = Eigen::Matrix4d::Identity();
try {
for (int r = 0; r < (int)file_node[node_name_local].size() && r < 4; r++) {
for (int c = 0; c < (int)file_node[node_name_local][r].size() && c < 4; c++) {
node_result(r, c) = (double)file_node[node_name_local][r][c];
}
}
} catch (...) {
if (required) {
PRINT_WARNING(YELLOW "unable to parse %s node of type [%s] in the config file!\n" RESET, node_name.c_str(),
typeid(node_result).name());
all_params_found_successfully = false;
} else {
PRINT_DEBUG("unable to parse %s node of type [%s] in the config file (not required)\n", node_name.c_str(),
typeid(node_result).name());
}
}
// Finally if we flipped the transform, get the correct value
if (node_name_local != node_name) {
Eigen::Matrix4d tmp(node_result);
node_result = ov_core::Inv_se3(tmp);
}
}
/**
* @brief Custom parser for the ESTIMATOR parameters.
*
* This will load the data from the main config file.
* If it is unable it will give a warning to the user it couldn't be found.
*
* @tparam T Type of parameter we are looking for.
* @param node_name Name of the node
* @param node_result Resulting value (should already have default value in it)
* @param required If this parameter is required by the user to set
*/
template <class T> void parse_config_yaml(const std::string &node_name, T &node_result, bool required = true) {
// Directly return if the config hasn't been opened
if (config == nullptr)
return;
// Else lets get the one from the config
try {
parse(config->root(), node_name, node_result, required);
} catch (...) {
PRINT_WARNING(YELLOW "unable to parse %s node of type [%s] in the config file!\n" RESET, node_name.c_str(),
typeid(node_result).name());
all_params_found_successfully = false;
}
}
/**
* @brief Custom parser for the EXTERNAL parameter files with levels.
*
* This will first load the external file requested.
* From there it will try to find the first level requested (e.g. imu0, cam0, cam1).
* Then the requested node can be found under this sensor name.
* ROS can override the config with `<sensor_name>_<node_name>` (e.g., cam0_distortion).
*
* @tparam T Type of parameter we are looking for.
* @param external_node_name Name of the node we will get our relative path from
* @param sensor_name The first level node we will try to get the requested node under
* @param node_name Name of the node
* @param node_result Resulting value (should already have default value in it)
* @param required If this parameter is required by the user to set
*/
template <class T>
void parse_external_yaml(const std::string &external_node_name, const std::string &sensor_name, const std::string &node_name,
T &node_result, bool required = true) {
// Directly return if the config hasn't been opened
if (config == nullptr)
return;
// Create the path the external yaml file
std::string path;
if (!node_found(config->root(), external_node_name)) {
PRINT_ERROR(RED "the external node %s could not be found!\n" RESET, external_node_name.c_str());
std::exit(EXIT_FAILURE);
}
(*config)[external_node_name] >> path;
std::string relative_folder = config_path_.substr(0, config_path_.find_last_of('/')) + "/";
// Now actually try to load them from file!
auto config_external = std::make_shared<cv::FileStorage>(relative_folder + path, cv::FileStorage::READ);
if (!config_external->isOpened()) {
PRINT_ERROR(RED "unable to open the configuration file!\n%s\n" RESET, (relative_folder + path).c_str());
std::exit(EXIT_FAILURE);
}
// Check that we have the requested node
if (!node_found(config_external->root(), sensor_name)) {
PRINT_WARNING(YELLOW "the sensor %s of type [%s] was not found...\n" RESET, sensor_name.c_str(), typeid(node_result).name());
all_params_found_successfully = false;
return;
}
// Else lets get it!
try {
parse((*config_external)[sensor_name], node_name, node_result, required);
} catch (...) {
PRINT_WARNING(YELLOW "unable to parse %s node of type [%s] in [%s] in the external %s config file!\n" RESET, node_name.c_str(),
typeid(node_result).name(), sensor_name.c_str(), external_node_name.c_str());
all_params_found_successfully = false;
}
}
};
} /* namespace ov_core */
#endif /* OPENCV_YAML_PARSER_H */

104
ov_core/src/utils/print.cpp Normal file
View File

@@ -0,0 +1,104 @@
/*
* OpenVINS: An Open Platform for Visual-Inertial Research
* Copyright (C) 2018-2022 Patrick Geneva
* Copyright (C) 2018-2022 Guoquan Huang
* Copyright (C) 2018-2022 OpenVINS Contributors
* Copyright (C) 2018-2019 Kevin Eckenhoff
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
#include "print.h"
using namespace ov_core;
// Need to define the static variable for everything to work
Printer::PrintLevel Printer::current_print_level = PrintLevel::INFO;
void Printer::setPrintLevel(const std::string &level) {
if (level == "ALL")
setPrintLevel(PrintLevel::ALL);
else if (level == "DEBUG")
setPrintLevel(PrintLevel::DEBUG);
else if (level == "INFO")
setPrintLevel(PrintLevel::INFO);
else if (level == "WARNING")
setPrintLevel(PrintLevel::WARNING);
else if (level == "ERROR")
setPrintLevel(PrintLevel::ERROR);
else if (level == "SILENT")
setPrintLevel(PrintLevel::SILENT);
else {
std::cout << "Invalid print level requested: " << level << std::endl;
std::cout << "Valid levels are: ALL, DEBUG, INFO, WARNING, ERROR, SILENT" << std::endl;
std::exit(EXIT_FAILURE);
}
}
void Printer::setPrintLevel(PrintLevel level) {
Printer::current_print_level = level;
std::cout << "Setting printing level to: ";
switch (current_print_level) {
case PrintLevel::ALL:
std::cout << "ALL";
break;
case PrintLevel::DEBUG:
std::cout << "DEBUG";
break;
case PrintLevel::INFO:
std::cout << "INFO";
break;
case PrintLevel::WARNING:
std::cout << "WARNING";
break;
case PrintLevel::ERROR:
std::cout << "ERROR";
break;
case PrintLevel::SILENT:
std::cout << "SILENT";
break;
default:
std::cout << std::endl;
std::cout << "Invalid print level requested: " << level << std::endl;
std::cout << "Valid levels are: ALL, DEBUG, INFO, WARNING, ERROR, SILENT" << std::endl;
std::exit(EXIT_FAILURE);
}
std::cout << std::endl;
}
void Printer::debugPrint(PrintLevel level, const char location[], const char line[], const char *format, ...) {
// Only print for the current debug level
if (static_cast<int>(level) < static_cast<int>(Printer::current_print_level)) {
return;
}
// Print the location info first for our debug output
// Truncate the filename to the max size for the filepath
if (static_cast<int>(Printer::current_print_level) <= static_cast<int>(Printer::PrintLevel::DEBUG)) {
std::string path(location);
std::string base_filename = path.substr(path.find_last_of("/\\") + 1);
if (base_filename.size() > MAX_FILE_PATH_LEGTH) {
printf("%s", base_filename.substr(base_filename.size() - MAX_FILE_PATH_LEGTH, base_filename.size()).c_str());
} else {
printf("%s", base_filename.c_str());
}
printf(":%s ", line);
}
// Print the rest of the args
va_list args;
va_start(args, format);
vprintf(format, args);
va_end(args);
}

102
ov_core/src/utils/print.h Normal file
View File

@@ -0,0 +1,102 @@
/*
* OpenVINS: An Open Platform for Visual-Inertial Research
* Copyright (C) 2018-2022 Patrick Geneva
* Copyright (C) 2018-2022 Guoquan Huang
* Copyright (C) 2018-2022 OpenVINS Contributors
* Copyright (C) 2018-2019 Kevin Eckenhoff
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
#ifndef OV_CORE_PRINT_H
#define OV_CORE_PRINT_H
#include <cstdarg>
#include <cstdint>
#include <cstring>
#include <iostream>
#include <string>
namespace ov_core {
/**
* @brief Printer for open_vins that allows for various levels of printing to be done
*
* To set the global verbosity level one can do the following:
* @code{.cpp}
* ov_core::Printer::setPrintLevel("WARNING");
* ov_core::Printer::setPrintLevel(ov_core::Printer::PrintLevel::WARNING);
* @endcode
*/
class Printer {
public:
/**
* @brief The different print levels possible
*
* - PrintLevel::ALL : All PRINT_XXXX will output to the console
* - PrintLevel::DEBUG : "DEBUG", "INFO", "WARNING" and "ERROR" will be printed. "ALL" will be silenced
* - PrintLevel::INFO : "INFO", "WARNING" and "ERROR" will be printed. "ALL" and "DEBUG" will be silenced
* - PrintLevel::WARNING : "WARNING" and "ERROR" will be printed. "ALL", "DEBUG" and "INFO" will be silenced
* - PrintLevel::ERROR : Only "ERROR" will be printed. All the rest are silenced
* - PrintLevel::SILENT : All PRINT_XXXX will be silenced.
*/
enum PrintLevel { ALL = 0, DEBUG = 1, INFO = 2, WARNING = 3, ERROR = 4, SILENT = 5 };
/**
* @brief Set the print level to use for all future printing to stdout.
* @param level The debug level to use
*/
static void setPrintLevel(const std::string &level);
/**
* @brief Set the print level to use for all future printing to stdout.
* @param level The debug level to use
*/
static void setPrintLevel(PrintLevel level);
/**
* @brief The print function that prints to stdout.
* @param level the print level for this print call
* @param location the location the print was made from
* @param line the line the print was made from
* @param format The printf format
*/
static void debugPrint(PrintLevel level, const char location[], const char line[], const char *format, ...);
/// The current print level
static PrintLevel current_print_level;
private:
/// The max length for the file path. This is to avoid very long file paths from
static constexpr uint32_t MAX_FILE_PATH_LEGTH = 30;
};
} /* namespace ov_core */
/*
* Converts anything to a string
*/
#define STRINGIFY(x) #x
#define TOSTRING(x) STRINGIFY(x)
/*
* The different Types of print levels
*/
#define PRINT_ALL(x...) ov_core::Printer::debugPrint(ov_core::Printer::PrintLevel::ALL, __FILE__, TOSTRING(__LINE__), x);
#define PRINT_DEBUG(x...) ov_core::Printer::debugPrint(ov_core::Printer::PrintLevel::DEBUG, __FILE__, TOSTRING(__LINE__), x);
#define PRINT_INFO(x...) ov_core::Printer::debugPrint(ov_core::Printer::PrintLevel::INFO, __FILE__, TOSTRING(__LINE__), x);
#define PRINT_WARNING(x...) ov_core::Printer::debugPrint(ov_core::Printer::PrintLevel::WARNING, __FILE__, TOSTRING(__LINE__), x);
#define PRINT_ERROR(x...) ov_core::Printer::debugPrint(ov_core::Printer::PrintLevel::ERROR, __FILE__, TOSTRING(__LINE__), x);
#endif /* OV_CORE_PRINT_H */

View File

@@ -0,0 +1,588 @@
/*
* OpenVINS: An Open Platform for Visual-Inertial Research
* Copyright (C) 2018-2022 Patrick Geneva
* Copyright (C) 2018-2022 Guoquan Huang
* Copyright (C) 2018-2022 OpenVINS Contributors
* Copyright (C) 2018-2019 Kevin Eckenhoff
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
#ifndef OV_CORE_QUAT_OPS_H
#define OV_CORE_QUAT_OPS_H
/*
* @section Summary
* This file contains the common utility functions for operating on JPL quaternions.
* We need to take special care to handle edge cases when converting to and from other rotation formats.
* All equations are based on the following tech report:
* > Trawny, Nikolas, and Stergios I. Roumeliotis. "Indirect Kalman filter for 3D attitude estimation."
* > University of Minnesota, Dept. of Comp. Sci. & Eng., Tech. Rep 2 (2005): 2005.
* > http://mars.cs.umn.edu/tr/reports/Trawny05b.pdf
*
* @section JPL Quaternion Definition
*
* We define the quaternion as the following linear combination:
* @f[
* \bar{q} = q_4+q_1\mathbf{i}+q_2\mathbf{j}+q_3\mathbf{k}
* @f]
* Where i,j,k are defined as the following:
* @f[
* \mathbf{i}^2=-1~,~\mathbf{j}^2=-1~,~\mathbf{k}^2=-1
* @f]
* @f[
* -\mathbf{i}\mathbf{j}=\mathbf{j}\mathbf{i}=\mathbf{k}
* ~,~
* -\mathbf{j}\mathbf{k}=\mathbf{k}\mathbf{j}=\mathbf{i}
* ~,~
* -\mathbf{k}\mathbf{i}=\mathbf{i}\mathbf{k}=\mathbf{j}
* @f]
* As noted in [Trawny2005] this does not correspond to the Hamilton notation, and follows the "JPL Proposed Standard Conventions".
* The q_4 quantity is the "scalar" portion of the quaternion, while q_1,q_2,q_3 are part of the "vector" portion.
* We split the 4x1 vector into the following convention:
* @f[
* \bar{q} = \begin{bmatrix}q_1\\q_2\\q_3\\q_4\end{bmatrix} = \begin{bmatrix}\mathbf{q}\\q_4\end{bmatrix}
* @f]
* It is also important to note that the quaternion is constrainted to the unit circle:
* @f[
* |\bar{q}| = \sqrt{\bar{q}^\top\bar{q}} = \sqrt{|\mathbf{q}|^2+q_4^2} = 1
* @f]
*
*/
#include <Eigen/Eigen>
namespace ov_core {
/**
* @brief Returns a JPL quaternion from a rotation matrix
*
* This is based on the equation 74 in [Indirect Kalman Filter for 3D Attitude Estimation](http://mars.cs.umn.edu/tr/reports/Trawny05b.pdf).
* In the implementation, we have 4 statements so that we avoid a division by zero and
* instead always divide by the largest diagonal element. This all comes from the
* definition of a rotation matrix, using the diagonal elements and an off-diagonal.
* \f{align*}{
* \mathbf{R}(\bar{q})=
* \begin{bmatrix}
* q_1^2-q_2^2-q_3^2+q_4^2 & 2(q_1q_2+q_3q_4) & 2(q_1q_3-q_2q_4) \\
* 2(q_1q_2-q_3q_4) & -q_2^2+q_2^2-q_3^2+q_4^2 & 2(q_2q_3+q_1q_4) \\
* 2(q_1q_3+q_2q_4) & 2(q_2q_3-q_1q_4) & -q_1^2-q_2^2+q_3^2+q_4^2
* \end{bmatrix}
* \f}
*
* @param[in] rot 3x3 rotation matrix
* @return 4x1 quaternion
*/
inline Eigen::Matrix<double, 4, 1> rot_2_quat(const Eigen::Matrix<double, 3, 3> &rot) {
Eigen::Matrix<double, 4, 1> q;
double T = rot.trace();
if ((rot(0, 0) >= T) && (rot(0, 0) >= rot(1, 1)) && (rot(0, 0) >= rot(2, 2))) {
q(0) = sqrt((1 + (2 * rot(0, 0)) - T) / 4);
q(1) = (1 / (4 * q(0))) * (rot(0, 1) + rot(1, 0));
q(2) = (1 / (4 * q(0))) * (rot(0, 2) + rot(2, 0));
q(3) = (1 / (4 * q(0))) * (rot(1, 2) - rot(2, 1));
} else if ((rot(1, 1) >= T) && (rot(1, 1) >= rot(0, 0)) && (rot(1, 1) >= rot(2, 2))) {
q(1) = sqrt((1 + (2 * rot(1, 1)) - T) / 4);
q(0) = (1 / (4 * q(1))) * (rot(0, 1) + rot(1, 0));
q(2) = (1 / (4 * q(1))) * (rot(1, 2) + rot(2, 1));
q(3) = (1 / (4 * q(1))) * (rot(2, 0) - rot(0, 2));
} else if ((rot(2, 2) >= T) && (rot(2, 2) >= rot(0, 0)) && (rot(2, 2) >= rot(1, 1))) {
q(2) = sqrt((1 + (2 * rot(2, 2)) - T) / 4);
q(0) = (1 / (4 * q(2))) * (rot(0, 2) + rot(2, 0));
q(1) = (1 / (4 * q(2))) * (rot(1, 2) + rot(2, 1));
q(3) = (1 / (4 * q(2))) * (rot(0, 1) - rot(1, 0));
} else {
q(3) = sqrt((1 + T) / 4);
q(0) = (1 / (4 * q(3))) * (rot(1, 2) - rot(2, 1));
q(1) = (1 / (4 * q(3))) * (rot(2, 0) - rot(0, 2));
q(2) = (1 / (4 * q(3))) * (rot(0, 1) - rot(1, 0));
}
if (q(3) < 0) {
q = -q;
}
// normalize and return
q = q / (q.norm());
return q;
}
/**
* @brief Skew-symmetric matrix from a given 3x1 vector
*
* This is based on equation 6 in [Indirect Kalman Filter for 3D Attitude Estimation](http://mars.cs.umn.edu/tr/reports/Trawny05b.pdf):
* \f{align*}{
* \lfloor\mathbf{v}\times\rfloor =
* \begin{bmatrix}
* 0 & -v_3 & v_2 \\ v_3 & 0 & -v_1 \\ -v_2 & v_1 & 0
* \end{bmatrix}
* @f}
*
* @param[in] w 3x1 vector to be made a skew-symmetric
* @return 3x3 skew-symmetric matrix
*/
inline Eigen::Matrix<double, 3, 3> skew_x(const Eigen::Matrix<double, 3, 1> &w) {
Eigen::Matrix<double, 3, 3> w_x;
w_x << 0, -w(2), w(1), w(2), 0, -w(0), -w(1), w(0), 0;
return w_x;
}
/**
* @brief Converts JPL quaterion to SO(3) rotation matrix
*
* This is based on equation 62 in [Indirect Kalman Filter for 3D Attitude Estimation](http://mars.cs.umn.edu/tr/reports/Trawny05b.pdf):
* \f{align*}{
* \mathbf{R} = (2q_4^2-1)\mathbf{I}_3-2q_4\lfloor\mathbf{q}\times\rfloor+2\mathbf{q}\mathbf{q}^\top
* @f}
*
* @param[in] q JPL quaternion
* @return 3x3 SO(3) rotation matrix
*/
inline Eigen::Matrix<double, 3, 3> quat_2_Rot(const Eigen::Matrix<double, 4, 1> &q) {
Eigen::Matrix<double, 3, 3> q_x = skew_x(q.block(0, 0, 3, 1));
Eigen::MatrixXd Rot = (2 * std::pow(q(3, 0), 2) - 1) * Eigen::MatrixXd::Identity(3, 3) - 2 * q(3, 0) * q_x +
2 * q.block(0, 0, 3, 1) * (q.block(0, 0, 3, 1).transpose());
return Rot;
}
/**
* @brief Multiply two JPL quaternions
*
* This is based on equation 9 in [Indirect Kalman Filter for 3D Attitude Estimation](http://mars.cs.umn.edu/tr/reports/Trawny05b.pdf).
* We also enforce that the quaternion is unique by having q_4 be greater than zero.
* \f{align*}{
* \bar{q}\otimes\bar{p}=
* \mathcal{L}(\bar{q})\bar{p}=
* \begin{bmatrix}
* q_4\mathbf{I}_3+\lfloor\mathbf{q}\times\rfloor & \mathbf{q} \\
* -\mathbf{q}^\top & q_4
* \end{bmatrix}
* \begin{bmatrix}
* \mathbf{p} \\ p_4
* \end{bmatrix}
* @f}
*
* @param[in] q First JPL quaternion
* @param[in] p Second JPL quaternion
* @return 4x1 resulting q*p quaternion
*/
inline Eigen::Matrix<double, 4, 1> quat_multiply(const Eigen::Matrix<double, 4, 1> &q, const Eigen::Matrix<double, 4, 1> &p) {
Eigen::Matrix<double, 4, 1> q_t;
Eigen::Matrix<double, 4, 4> Qm;
// create big L matrix
Qm.block(0, 0, 3, 3) = q(3, 0) * Eigen::MatrixXd::Identity(3, 3) - skew_x(q.block(0, 0, 3, 1));
Qm.block(0, 3, 3, 1) = q.block(0, 0, 3, 1);
Qm.block(3, 0, 1, 3) = -q.block(0, 0, 3, 1).transpose();
Qm(3, 3) = q(3, 0);
q_t = Qm * p;
// ensure unique by forcing q_4 to be >0
if (q_t(3, 0) < 0) {
q_t *= -1;
}
// normalize and return
return q_t / q_t.norm();
}
/**
* @brief Returns vector portion of skew-symmetric
*
* See skew_x() for details.
*
* @param[in] w_x skew-symmetric matrix
* @return 3x1 vector portion of skew
*/
inline Eigen::Matrix<double, 3, 1> vee(const Eigen::Matrix<double, 3, 3> &w_x) {
Eigen::Matrix<double, 3, 1> w;
w << w_x(2, 1), w_x(0, 2), w_x(1, 0);
return w;
}
/**
* @brief SO(3) matrix exponential
*
* SO(3) matrix exponential mapping from the vector to SO(3) lie group.
* This formula ends up being the [Rodrigues formula](https://en.wikipedia.org/wiki/Rodrigues%27_rotation_formula).
* This definition was taken from "Lie Groups for 2D and 3D Transformations" by Ethan Eade equation 15.
* http://ethaneade.com/lie.pdf
*
* \f{align*}{
* \exp\colon\mathfrak{so}(3)&\to SO(3) \\
* \exp(\mathbf{v}) &=
* \mathbf{I}
* +\frac{\sin{\theta}}{\theta}\lfloor\mathbf{v}\times\rfloor
* +\frac{1-\cos{\theta}}{\theta^2}\lfloor\mathbf{v}\times\rfloor^2 \\
* \mathrm{where}&\quad \theta^2 = \mathbf{v}^\top\mathbf{v}
* @f}
*
* @param[in] w 3x1 vector in R(3) we will take the exponential of
* @return SO(3) rotation matrix
*/
inline Eigen::Matrix<double, 3, 3> exp_so3(const Eigen::Matrix<double, 3, 1> &w) {
// get theta
Eigen::Matrix<double, 3, 3> w_x = skew_x(w);
double theta = w.norm();
// Handle small angle values
double A, B;
if (theta < 1e-7) {
A = 1;
B = 0.5;
} else {
A = sin(theta) / theta;
B = (1 - cos(theta)) / (theta * theta);
}
// compute so(3) rotation
Eigen::Matrix<double, 3, 3> R;
if (theta == 0) {
R = Eigen::MatrixXd::Identity(3, 3);
} else {
R = Eigen::MatrixXd::Identity(3, 3) + A * w_x + B * w_x * w_x;
}
return R;
}
/**
* @brief SO(3) matrix logarithm
*
* This definition was taken from "Lie Groups for 2D and 3D Transformations" by Ethan Eade equation 17 & 18.
* http://ethaneade.com/lie.pdf
* \f{align*}{
* \theta &= \textrm{arccos}(0.5(\textrm{trace}(\mathbf{R})-1)) \\
* \lfloor\mathbf{v}\times\rfloor &= \frac{\theta}{2\sin{\theta}}(\mathbf{R}-\mathbf{R}^\top)
* @f}
*
* This function is based on the GTSAM one as the original implementation was a bit unstable.
* See the following:
* - https://github.com/borglab/gtsam/
* - https://github.com/borglab/gtsam/issues/746
* - https://github.com/borglab/gtsam/pull/780
*
* @param[in] R 3x3 SO(3) rotation matrix
* @return 3x1 in the R(3) space [omegax, omegay, omegaz]
*/
inline Eigen::Matrix<double, 3, 1> log_so3(const Eigen::Matrix<double, 3, 3> &R) {
// note switch to base 1
double R11 = R(0, 0), R12 = R(0, 1), R13 = R(0, 2);
double R21 = R(1, 0), R22 = R(1, 1), R23 = R(1, 2);
double R31 = R(2, 0), R32 = R(2, 1), R33 = R(2, 2);
// Get trace(R)
const double tr = R.trace();
Eigen::Vector3d omega;
// when trace == -1, i.e., when theta = +-pi, +-3pi, +-5pi, etc.
// we do something special
if (tr + 1.0 < 1e-10) {
if (std::abs(R33 + 1.0) > 1e-5)
omega = (M_PI / sqrt(2.0 + 2.0 * R33)) * Eigen::Vector3d(R13, R23, 1.0 + R33);
else if (std::abs(R22 + 1.0) > 1e-5)
omega = (M_PI / sqrt(2.0 + 2.0 * R22)) * Eigen::Vector3d(R12, 1.0 + R22, R32);
else
// if(std::abs(R.r1_.x()+1.0) > 1e-5) This is implicit
omega = (M_PI / sqrt(2.0 + 2.0 * R11)) * Eigen::Vector3d(1.0 + R11, R21, R31);
} else {
double magnitude;
const double tr_3 = tr - 3.0; // always negative
if (tr_3 < -1e-7) {
double theta = acos((tr - 1.0) / 2.0);
magnitude = theta / (2.0 * sin(theta));
} else {
// when theta near 0, +-2pi, +-4pi, etc. (trace near 3.0)
// use Taylor expansion: theta \approx 1/2-(t-3)/12 + O((t-3)^2)
// see https://github.com/borglab/gtsam/issues/746 for details
magnitude = 0.5 - tr_3 / 12.0;
}
omega = magnitude * Eigen::Vector3d(R32 - R23, R13 - R31, R21 - R12);
}
return omega;
}
/**
* @brief SE(3) matrix exponential function
*
* Equation is from Ethan Eade's reference: http://ethaneade.com/lie.pdf
* \f{align*}{
* \exp([\boldsymbol\omega,\mathbf u])&=\begin{bmatrix} \mathbf R & \mathbf V \mathbf u \\ \mathbf 0 & 1 \end{bmatrix} \\[1em]
* \mathbf R &= \mathbf I + A \lfloor \boldsymbol\omega \times\rfloor + B \lfloor \boldsymbol\omega \times\rfloor^2 \\
* \mathbf V &= \mathbf I + B \lfloor \boldsymbol\omega \times\rfloor + C \lfloor \boldsymbol\omega \times\rfloor^2
* \f}
* where we have the following definitions
* \f{align*}{
* \theta &= \sqrt{\boldsymbol\omega^\top\boldsymbol\omega} \\
* A &= \sin\theta/\theta \\
* B &= (1-\cos\theta)/\theta^2 \\
* C &= (1-A)/\theta^2
* \f}
*
* @param vec 6x1 in the R(6) space [omega, u]
* @return 4x4 SE(3) matrix
*/
inline Eigen::Matrix4d exp_se3(Eigen::Matrix<double, 6, 1> vec) {
// Precompute our values
Eigen::Vector3d w = vec.head(3);
Eigen::Vector3d u = vec.tail(3);
double theta = sqrt(w.dot(w));
Eigen::Matrix3d wskew;
wskew << 0, -w(2), w(1), w(2), 0, -w(0), -w(1), w(0), 0;
// Handle small angle values
double A, B, C;
if (theta < 1e-7) {
A = 1;
B = 0.5;
C = 1.0 / 6.0;
} else {
A = sin(theta) / theta;
B = (1 - cos(theta)) / (theta * theta);
C = (1 - A) / (theta * theta);
}
// Matrices we need V and Identity
Eigen::Matrix3d I_33 = Eigen::Matrix3d::Identity();
Eigen::Matrix3d V = I_33 + B * wskew + C * wskew * wskew;
// Get the final matrix to return
Eigen::Matrix4d mat = Eigen::Matrix4d::Zero();
mat.block(0, 0, 3, 3) = I_33 + A * wskew + B * wskew * wskew;
mat.block(0, 3, 3, 1) = V * u;
mat(3, 3) = 1;
return mat;
}
/**
* @brief SE(3) matrix logarithm
*
* Equation is from Ethan Eade's reference: http://ethaneade.com/lie.pdf
* \f{align*}{
* \boldsymbol\omega &=\mathrm{skew\_offdiags}\Big(\frac{\theta}{2\sin\theta}(\mathbf R - \mathbf R^\top)\Big) \\
* \mathbf u &= \mathbf V^{-1}\mathbf t
* \f}
* where we have the following definitions
* \f{align*}{
* \theta &= \mathrm{arccos}((\mathrm{tr}(\mathbf R)-1)/2) \\
* \mathbf V^{-1} &= \mathbf I - \frac{1}{2} \lfloor \boldsymbol\omega \times\rfloor + \frac{1}{\theta^2}\Big(1-\frac{A}{2B}\Big)\lfloor
* \boldsymbol\omega \times\rfloor^2 \f}
*
* This function is based on the GTSAM one as the original implementation was a bit unstable.
* See the following:
* - https://github.com/borglab/gtsam/
* - https://github.com/borglab/gtsam/issues/746
* - https://github.com/borglab/gtsam/pull/780
*
* @param mat 4x4 SE(3) matrix
* @return 6x1 in the R(6) space [omega, u]
*/
inline Eigen::Matrix<double, 6, 1> log_se3(Eigen::Matrix4d mat) {
Eigen::Vector3d w = log_so3(mat.block<3, 3>(0, 0));
Eigen::Vector3d T = mat.block<3, 1>(0, 3);
const double t = w.norm();
if (t < 1e-10) {
Eigen::Matrix<double, 6, 1> log;
log << w, T;
return log;
} else {
Eigen::Matrix3d W = skew_x(w / t);
// Formula from Agrawal06iros, equation (14)
// simplified with Mathematica, and multiplying in T to avoid matrix math
double Tan = tan(0.5 * t);
Eigen::Vector3d WT = W * T;
Eigen::Vector3d u = T - (0.5 * t) * WT + (1 - t / (2. * Tan)) * (W * WT);
Eigen::Matrix<double, 6, 1> log;
log << w, u;
return log;
}
}
/**
* @brief Hat operator for R^6 -> Lie Algebra se(3)
*
* \f{align*}{
* \boldsymbol\Omega^{\wedge} = \begin{bmatrix} \lfloor \boldsymbol\omega \times\rfloor & \mathbf u \\ \mathbf 0 & 0 \end{bmatrix}
* \f}
*
* @param vec 6x1 in the R(6) space [omega, u]
* @return Lie algebra se(3) 4x4 matrix
*/
inline Eigen::Matrix4d hat_se3(const Eigen::Matrix<double, 6, 1> &vec) {
Eigen::Matrix4d mat = Eigen::Matrix4d::Zero();
mat.block(0, 0, 3, 3) = skew_x(vec.head(3));
mat.block(0, 3, 3, 1) = vec.tail(3);
return mat;
}
/**
* @brief SE(3) matrix analytical inverse
*
* It seems that using the .inverse() function is not a good way.
* This should be used in all cases we need the inverse instead of numerical inverse.
* https://github.com/rpng/open_vins/issues/12
* \f{align*}{
* \mathbf{T}^{-1} = \begin{bmatrix} \mathbf{R}^\top & -\mathbf{R}^\top\mathbf{p} \\ \mathbf{0} & 1 \end{bmatrix}
* \f}
*
* @param[in] T SE(3) matrix
* @return inversed SE(3) matrix
*/
inline Eigen::Matrix4d Inv_se3(const Eigen::Matrix4d &T) {
Eigen::Matrix4d Tinv = Eigen::Matrix4d::Identity();
Tinv.block(0, 0, 3, 3) = T.block(0, 0, 3, 3).transpose();
Tinv.block(0, 3, 3, 1) = -Tinv.block(0, 0, 3, 3) * T.block(0, 3, 3, 1);
return Tinv;
}
/**
* @brief JPL Quaternion inverse
*
* See equation 21 in [Indirect Kalman Filter for 3D Attitude Estimation](http://mars.cs.umn.edu/tr/reports/Trawny05b.pdf).
* \f{align*}{
* \bar{q}^{-1} = \begin{bmatrix} -\mathbf{q} \\ q_4 \end{bmatrix}
* \f}
*
* @param[in] q quaternion we want to change
* @return inversed quaternion
*/
inline Eigen::Matrix<double, 4, 1> Inv(Eigen::Matrix<double, 4, 1> q) {
Eigen::Matrix<double, 4, 1> qinv;
qinv.block(0, 0, 3, 1) = -q.block(0, 0, 3, 1);
qinv(3, 0) = q(3, 0);
return qinv;
}
/**
* @brief Integrated quaternion from angular velocity
*
* See equation (48) of trawny tech report [Indirect Kalman Filter for 3D Attitude
* Estimation](http://mars.cs.umn.edu/tr/reports/Trawny05b.pdf).
*
*/
inline Eigen::Matrix<double, 4, 4> Omega(Eigen::Matrix<double, 3, 1> w) {
Eigen::Matrix<double, 4, 4> mat;
mat.block(0, 0, 3, 3) = -skew_x(w);
mat.block(3, 0, 1, 3) = -w.transpose();
mat.block(0, 3, 3, 1) = w;
mat(3, 3) = 0;
return mat;
}
/**
* @brief Normalizes a quaternion to make sure it is unit norm
* @param q_t Quaternion to normalized
* @return Normalized quaterion
*/
inline Eigen::Matrix<double, 4, 1> quatnorm(Eigen::Matrix<double, 4, 1> q_t) {
if (q_t(3, 0) < 0) {
q_t *= -1;
}
return q_t / q_t.norm();
}
/**
* @brief Computes left Jacobian of SO(3)
*
* The left Jacobian of SO(3) is defined equation (7.77b) in [State Estimation for
* Robotics](http://asrl.utias.utoronto.ca/~tdb/bib/barfoot_ser17.pdf) by Timothy D. Barfoot. Specifically it is the following (with
* \f$\theta=|\boldsymbol\theta|\f$ and \f$\mathbf a=\boldsymbol\theta/|\boldsymbol\theta|\f$): \f{align*}{ J_l(\boldsymbol\theta) =
* \frac{\sin\theta}{\theta}\mathbf I + \Big(1-\frac{\sin\theta}{\theta}\Big)\mathbf a \mathbf a^\top + \frac{1-\cos\theta}{\theta}\lfloor
* \mathbf a \times\rfloor \f}
*
* @param w axis-angle
* @return The left Jacobian of SO(3)
*/
inline Eigen::Matrix<double, 3, 3> Jl_so3(const Eigen::Matrix<double, 3, 1> &w) {
double theta = w.norm();
if (theta < 1e-6) {
return Eigen::MatrixXd::Identity(3, 3);
} else {
Eigen::Matrix<double, 3, 1> a = w / theta;
Eigen::Matrix<double, 3, 3> J = sin(theta) / theta * Eigen::MatrixXd::Identity(3, 3) + (1 - sin(theta) / theta) * a * a.transpose() +
((1 - cos(theta)) / theta) * skew_x(a);
return J;
}
}
/**
* @brief Computes right Jacobian of SO(3)
*
* The right Jacobian of SO(3) is related to the left by Jl(-w)=Jr(w).
* See equation (7.87) in [State Estimation for Robotics](http://asrl.utias.utoronto.ca/~tdb/bib/barfoot_ser17.pdf) by Timothy D. Barfoot.
* See @ref Jl_so3() for the definition of the left Jacobian of SO(3).
*
* @param w axis-angle
* @return The right Jacobian of SO(3)
*/
inline Eigen::Matrix<double, 3, 3> Jr_so3(const Eigen::Matrix<double, 3, 1> &w) { return Jl_so3(-w); }
/**
* @brief Gets roll, pitch, yaw of argument rotation (in that order).
*
* To recover the matrix: R_input = R_z(yaw)*R_y(pitch)*R_x(roll)
* If you are interested in how to compute Jacobians checkout this report:
* http://mars.cs.umn.edu/tr/reports/Trawny05c.pdf
*
* @param rot Rotation matrix
* @return roll, pitch, yaw values (in that order)
*/
inline Eigen::Matrix<double, 3, 1> rot2rpy(const Eigen::Matrix<double, 3, 3> &rot) {
Eigen::Matrix<double, 3, 1> rpy;
rpy(1, 0) = atan2(-rot(2, 0), sqrt(rot(0, 0) * rot(0, 0) + rot(1, 0) * rot(1, 0)));
if (std::abs(cos(rpy(1, 0)) > 1.0e-12)) {
rpy(2, 0) = atan2(rot(1, 0) / cos(rpy(1, 0)), rot(0, 0) / cos(rpy(1, 0)));
rpy(0, 0) = atan2(rot(2, 1) / cos(rpy(1, 0)), rot(2, 2) / cos(rpy(1, 0)));
} else {
rpy(2, 0) = 0;
rpy(0, 0) = atan2(rot(0, 1), rot(1, 1));
}
return rpy;
}
/**
* @brief Construct rotation matrix from given roll
* @param t roll angle
*/
inline Eigen::Matrix<double, 3, 3> rot_x(double t) {
Eigen::Matrix<double, 3, 3> r;
double ct = cos(t);
double st = sin(t);
r << 1.0, 0.0, 0.0, 0.0, ct, -st, 0.0, st, ct;
return r;
}
/**
* @brief Construct rotation matrix from given pitch
* @param t pitch angle
*/
inline Eigen::Matrix<double, 3, 3> rot_y(double t) {
Eigen::Matrix<double, 3, 3> r;
double ct = cos(t);
double st = sin(t);
r << ct, 0.0, st, 0.0, 1.0, 0.0, -st, 0.0, ct;
return r;
}
/**
* @brief Construct rotation matrix from given yaw
* @param t yaw angle
*/
inline Eigen::Matrix<double, 3, 3> rot_z(double t) {
Eigen::Matrix<double, 3, 3> r;
double ct = cos(t);
double st = sin(t);
r << ct, -st, 0.0, st, ct, 0.0, 0.0, 0.0, 1.0;
return r;
}
} // namespace ov_core
#endif /* OV_CORE_QUAT_OPS_H */

View File

@@ -0,0 +1,83 @@
/*
* OpenVINS: An Open Platform for Visual-Inertial Research
* Copyright (C) 2018-2022 Patrick Geneva
* Copyright (C) 2018-2022 Guoquan Huang
* Copyright (C) 2018-2022 OpenVINS Contributors
* Copyright (C) 2018-2019 Kevin Eckenhoff
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
#ifndef OV_CORE_SENSOR_DATA_H
#define OV_CORE_SENSOR_DATA_H
#include <Eigen/Eigen>
#include <opencv2/opencv.hpp>
#include <vector>
namespace ov_core {
/**
* @brief Struct for a single imu measurement (time, wm, am)
*/
struct ImuData {
/// Timestamp of the reading
double timestamp;
/// Gyroscope reading, angular velocity (rad/s)
Eigen::Matrix<double, 3, 1> wm;
/// Accelerometer reading, linear acceleration (m/s^2)
Eigen::Matrix<double, 3, 1> am;
/// Sort function to allow for using of STL containers
bool operator<(const ImuData &other) const { return timestamp < other.timestamp; }
};
/**
* @brief Struct for a collection of camera measurements.
*
* For each image we have a camera id and timestamp that it occured at.
* If there are multiple cameras we will treat it as pair-wise stereo tracking.
*/
struct CameraData {
/// Timestamp of the reading
double timestamp;
/// Camera ids for each of the images collected
std::vector<int> sensor_ids;
/// Raw image we have collected for each camera
std::vector<cv::Mat> images;
/// Tracking masks for each camera we have
std::vector<cv::Mat> masks;
/// Sort function to allow for using of STL containers
bool operator<(const CameraData &other) const {
if (timestamp == other.timestamp) {
int id = *std::min_element(sensor_ids.begin(), sensor_ids.end());
int id_other = *std::min_element(other.sensor_ids.begin(), other.sensor_ids.end());
return id < id_other;
} else {
return timestamp < other.timestamp;
}
}
};
} // namespace ov_core
#endif // OV_CORE_SENSOR_DATA_H