This commit is contained in:
Ivan
2022-04-05 11:42:28 +03:00
commit 6dc0eb0fcf
5565 changed files with 1200500 additions and 0 deletions

View File

@@ -0,0 +1,604 @@
/**
BSD 3-Clause License
This file is part of the Basalt project.
https://gitlab.com/VladyslavUsenko/basalt.git
Copyright (c) 2019, Vladyslav Usenko and Nikolaus Demmel.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <basalt/vi_estimator/ba_base.h>
#include <tbb/blocked_range.h>
#include <tbb/parallel_for.h>
#include <tbb/parallel_reduce.h>
#include <basalt/utils/ba_utils.h>
namespace basalt {
template <class Scalar>
void BundleAdjustmentBase<Scalar>::optimize_single_frame_pose(
PoseStateWithLin<Scalar>& state_t,
const std::vector<std::vector<int>>& connected_obs) const {
const int num_iter = 2;
struct AbsLinData {
Mat4 T_t_h;
Mat6 d_rel_d_t;
EIGEN_MAKE_ALIGNED_OPERATOR_NEW;
};
for (int iter = 0; iter < num_iter; iter++) {
Scalar error = 0;
Mat6 Ht;
Vec6 bt;
Ht.setZero();
bt.setZero();
std::unordered_map<std::pair<TimeCamId, TimeCamId>, AbsLinData>
abs_lin_data;
for (size_t cam_id = 0; cam_id < connected_obs.size(); cam_id++) {
TimeCamId tcid_t(state_t.getT_ns(), cam_id);
for (const auto& lm_id : connected_obs[cam_id]) {
const Keypoint<Scalar>& kpt_pos = lmdb.getLandmark(lm_id);
std::pair<TimeCamId, TimeCamId> map_key(kpt_pos.host_kf_id, tcid_t);
if (abs_lin_data.count(map_key) == 0) {
const PoseStateWithLin<Scalar>& state_h =
frame_poses.at(kpt_pos.host_kf_id.frame_id);
BASALT_ASSERT(kpt_pos.host_kf_id.frame_id != state_t.getT_ns());
AbsLinData& ald = abs_lin_data[map_key];
SE3 T_t_h_sophus = computeRelPose<Scalar>(
state_h.getPose(), calib.T_i_c[kpt_pos.host_kf_id.cam_id],
state_t.getPose(), calib.T_i_c[cam_id], nullptr, &ald.d_rel_d_t);
ald.T_t_h = T_t_h_sophus.matrix();
}
}
}
for (size_t cam_id = 0; cam_id < connected_obs.size(); cam_id++) {
std::visit(
[&](const auto& cam) {
for (const auto& lm_id : connected_obs[cam_id]) {
TimeCamId tcid_t(state_t.getT_ns(), cam_id);
const Keypoint<Scalar>& kpt_pos = lmdb.getLandmark(lm_id);
const Vec2& kpt_obs = kpt_pos.obs.at(tcid_t);
const AbsLinData& ald =
abs_lin_data.at(std::make_pair(kpt_pos.host_kf_id, tcid_t));
Vec2 res;
Eigen::Matrix<Scalar, 2, POSE_SIZE> d_res_d_xi;
bool valid = linearizePoint(kpt_obs, kpt_pos, ald.T_t_h, cam, res,
&d_res_d_xi);
if (valid) {
Scalar e = res.norm();
Scalar huber_weight =
e < huber_thresh ? Scalar(1.0) : huber_thresh / e;
Scalar obs_weight = huber_weight / (obs_std_dev * obs_std_dev);
error += Scalar(0.5) * (2 - huber_weight) * obs_weight *
res.transpose() * res;
d_res_d_xi *= ald.d_rel_d_t;
Ht.noalias() += d_res_d_xi.transpose() * d_res_d_xi;
bt.noalias() += d_res_d_xi.transpose() * res;
}
}
},
calib.intrinsics[cam_id].variant);
}
// Add small damping for GN
constexpr Scalar lambda = 1e-6;
Vec6 diag = Ht.diagonal();
diag *= lambda;
Ht.diagonal().array() += diag.array().max(lambda);
// std::cout << "pose opt error " << error << std::endl;
Vec6 inc = -Ht.ldlt().solve(bt);
state_t.applyInc(inc);
}
// std::cout << "=============================" << std::endl;
}
template <class Scalar_>
void BundleAdjustmentBase<Scalar_>::computeError(
Scalar& error,
std::map<int, std::vector<std::pair<TimeCamId, Scalar>>>* outliers,
Scalar outlier_threshold) const {
std::vector<TimeCamId> host_frames;
for (const auto& [tcid, _] : lmdb.getObservations()) {
host_frames.push_back(tcid);
}
tbb::concurrent_unordered_map<int, std::vector<std::pair<TimeCamId, Scalar>>>
outliers_concurrent;
auto body = [&](const tbb::blocked_range<size_t>& range, Scalar local_error) {
for (size_t r = range.begin(); r != range.end(); ++r) {
const TimeCamId& tcid_h = host_frames[r];
for (const auto& obs_kv : lmdb.getObservations().at(tcid_h)) {
const TimeCamId& tcid_t = obs_kv.first;
Mat4 T_t_h;
if (tcid_h != tcid_t) {
PoseStateWithLin state_h = getPoseStateWithLin(tcid_h.frame_id);
PoseStateWithLin state_t = getPoseStateWithLin(tcid_t.frame_id);
Sophus::SE3<Scalar> T_t_h_sophus =
computeRelPose(state_h.getPose(), calib.T_i_c[tcid_h.cam_id],
state_t.getPose(), calib.T_i_c[tcid_t.cam_id]);
T_t_h = T_t_h_sophus.matrix();
} else {
T_t_h.setIdentity();
}
std::visit(
[&](const auto& cam) {
for (KeypointId kpt_id : obs_kv.second) {
const Keypoint<Scalar>& kpt_pos = lmdb.getLandmark(kpt_id);
const Vec2& kpt_obs = kpt_pos.obs.at(tcid_t);
Vec2 res;
bool valid = linearizePoint(kpt_obs, kpt_pos, T_t_h, cam, res);
if (valid) {
Scalar e = res.norm();
if (outliers && e > outlier_threshold) {
outliers_concurrent[kpt_id].emplace_back(
tcid_t, tcid_h != tcid_t ? e : -2);
}
Scalar huber_weight =
e < huber_thresh ? Scalar(1.0) : huber_thresh / e;
Scalar obs_weight =
huber_weight / (obs_std_dev * obs_std_dev);
local_error += Scalar(0.5) * (2 - huber_weight) * obs_weight *
res.transpose() * res;
} else {
if (outliers) {
outliers_concurrent[kpt_id].emplace_back(
tcid_t, tcid_h != tcid_t ? -1 : -2);
}
}
}
},
calib.intrinsics[tcid_t.cam_id].variant);
}
}
return local_error;
};
tbb::blocked_range<size_t> range(0, host_frames.size());
Scalar init = 0;
auto join = std::plus<Scalar>();
error = tbb::parallel_reduce(range, init, body, join);
if (outliers) {
outliers->clear();
for (auto& [k, v] : outliers_concurrent) {
outliers->emplace(k, std::move(v));
}
}
}
template <class Scalar_>
template <class Scalar2>
void BundleAdjustmentBase<Scalar_>::get_current_points(
Eigen::aligned_vector<Eigen::Matrix<Scalar2, 3, 1>>& points,
std::vector<int>& ids) const {
points.clear();
ids.clear();
for (const auto& tcid_host : lmdb.getHostKfs()) {
Sophus::SE3<Scalar> T_w_i;
int64_t id = tcid_host.frame_id;
if (frame_states.count(id) > 0) {
PoseVelBiasStateWithLin<Scalar> state = frame_states.at(id);
T_w_i = state.getState().T_w_i;
} else if (frame_poses.count(id) > 0) {
PoseStateWithLin<Scalar> state = frame_poses.at(id);
T_w_i = state.getPose();
} else {
std::cout << "Unknown frame id: " << id << std::endl;
std::abort();
}
const Sophus::SE3<Scalar>& T_i_c = calib.T_i_c[tcid_host.cam_id];
Mat4 T_w_c = (T_w_i * T_i_c).matrix();
for (const Keypoint<Scalar>* kpt_pos :
lmdb.getLandmarksForHost(tcid_host)) {
Vec4 pt_cam = StereographicParam<Scalar>::unproject(kpt_pos->direction);
pt_cam[3] = kpt_pos->inv_dist;
Vec4 pt_w = T_w_c * pt_cam;
points.emplace_back(
(pt_w.template head<3>() / pt_w[3]).template cast<Scalar2>());
ids.emplace_back(1);
}
}
}
template <class Scalar_>
void BundleAdjustmentBase<Scalar_>::filterOutliers(Scalar outlier_threshold,
int min_num_obs) {
Scalar error;
std::map<int, std::vector<std::pair<TimeCamId, Scalar>>> outliers;
computeError(error, &outliers, outlier_threshold);
// std::cout << "============================================" <<
// std::endl; std::cout << "Num landmarks: " << lmdb.numLandmarks() << "
// with outliners
// "
// << outliers.size() << std::endl;
for (const auto& kv : outliers) {
int num_obs = lmdb.numObservations(kv.first);
int num_outliers = kv.second.size();
bool remove = false;
if (num_obs - num_outliers < min_num_obs) remove = true;
// std::cout << "\tlm_id: " << kv.first << " num_obs: " << num_obs
// << " outliers: " << num_outliers << " [";
for (const auto& kv2 : kv.second) {
if (kv2.second == -2) remove = true;
// std::cout << kv2.second << ", ";
}
// std::cout << "] " << std::endl;
if (remove) {
lmdb.removeLandmark(kv.first);
} else {
std::set<TimeCamId> outliers;
for (const auto& kv2 : kv.second) outliers.emplace(kv2.first);
lmdb.removeObservations(kv.first, outliers);
}
}
// std::cout << "============================================" <<
// std::endl;
}
template <class Scalar_>
void BundleAdjustmentBase<Scalar_>::computeDelta(const AbsOrderMap& marg_order,
VecX& delta) const {
size_t marg_size = marg_order.total_size;
delta.setZero(marg_size);
for (const auto& kv : marg_order.abs_order_map) {
if (kv.second.second == POSE_SIZE) {
BASALT_ASSERT(frame_poses.at(kv.first).isLinearized());
delta.template segment<POSE_SIZE>(kv.second.first) =
frame_poses.at(kv.first).getDelta();
} else if (kv.second.second == POSE_VEL_BIAS_SIZE) {
BASALT_ASSERT(frame_states.at(kv.first).isLinearized());
delta.template segment<POSE_VEL_BIAS_SIZE>(kv.second.first) =
frame_states.at(kv.first).getDelta();
} else {
BASALT_ASSERT(false);
}
}
}
template <class Scalar_>
Scalar_ BundleAdjustmentBase<Scalar_>::computeModelCostChange(
const MatX& H, const VecX& b, const VecX& inc) const {
// Linearized model cost
//
// L(x) = 0.5 || J*x + r ||^2
// = 0.5 x^T J^T J x + x^T J r + 0.5 r^T r
// = 0.5 x^T H x + x^T b + 0.5 r^T r,
//
// given in normal equation form as
//
// H = J^T J,
// b = J^T r.
//
// The expected decrease in cost for the computed increment is
//
// l_diff = L(0) - L(inc)
// = - 0.5 inc^T H inc - inc^T b
// = - inc^T (0.5 H inc + b)
Scalar l_diff = -inc.dot(Scalar(0.5) * (H * inc) + b);
return l_diff;
}
template <class Scalar_>
template <class Scalar2>
void BundleAdjustmentBase<Scalar_>::computeProjections(
std::vector<Eigen::aligned_vector<Eigen::Matrix<Scalar2, 4, 1>>>& data,
FrameId last_state_t_ns) const {
for (const auto& kv : lmdb.getObservations()) {
const TimeCamId& tcid_h = kv.first;
for (const auto& obs_kv : kv.second) {
const TimeCamId& tcid_t = obs_kv.first;
if (tcid_t.frame_id != last_state_t_ns) continue;
Mat4 T_t_h;
if (tcid_h != tcid_t) {
PoseStateWithLin<Scalar> state_h = getPoseStateWithLin(tcid_h.frame_id);
PoseStateWithLin<Scalar> state_t = getPoseStateWithLin(tcid_t.frame_id);
Sophus::SE3<Scalar> T_t_h_sophus =
computeRelPose(state_h.getPose(), calib.T_i_c[tcid_h.cam_id],
state_t.getPose(), calib.T_i_c[tcid_t.cam_id]);
T_t_h = T_t_h_sophus.matrix();
} else {
T_t_h.setIdentity();
}
std::visit(
[&](const auto& cam) {
for (KeypointId kpt_id : obs_kv.second) {
const Keypoint<Scalar>& kpt_pos = lmdb.getLandmark(kpt_id);
Vec2 res;
Vec4 proj;
using CamT = std::decay_t<decltype(cam)>;
linearizePoint<Scalar, CamT>(Vec2::Zero(), kpt_pos, T_t_h, cam,
res, nullptr, nullptr, &proj);
proj[3] = kpt_id;
data[tcid_t.cam_id].emplace_back(proj.template cast<Scalar2>());
}
},
calib.intrinsics[tcid_t.cam_id].variant);
}
}
}
template <class Scalar_>
void BundleAdjustmentBase<Scalar_>::linearizeMargPrior(
const MargLinData<Scalar>& mld, const AbsOrderMap& aom, MatX& abs_H,
VecX& abs_b, Scalar& marg_prior_error) const {
// Prior is ordered to be in the top left corner of Hessian
BASALT_ASSERT(size_t(mld.H.cols()) == mld.order.total_size);
// Check if the order of variables is the same, and it's indeed top-left
// corner
for (const auto& kv : mld.order.abs_order_map) {
UNUSED(aom);
UNUSED(kv);
BASALT_ASSERT(aom.abs_order_map.at(kv.first) == kv.second);
BASALT_ASSERT(kv.second.first < int(mld.order.total_size));
}
// Quadratic prior and "delta" of the current state to the original
// linearization point give cost function
//
// P(x) = 0.5 || J*(delta+x) + r ||^2,
//
// alternatively stored in quadratic form as
//
// Hmarg = J^T J,
// bmarg = J^T r.
//
// This is now to be linearized at x=0, so we get linearization
//
// P(x) = 0.5 || J*x + (J*delta + r) ||^2,
//
// with Jacobian J and residual J*delta + r. The normal equations are
//
// H*x + b = 0,
// H = J^T J = Hmarg,
// b = J^T (J*delta + r) = Hmarg*delta + bmarg.
//
// The current cost is
//
// P(0) = 0.5 || J*delta + r ||^2
// = 0.5 delta^T J^T J delta + delta^T J^T r + 0.5 r^T r.
// = 0.5 delta^T Hmarg delta + delta^T bmarg + 0.5 r^T r.
//
// Note: Since the r^T r term does not change with delta, we drop it from the
// error computation. The main need for the error value is for comparing
// the cost before and after an update to delta in the optimization loop. This
// also means the computed error can be negative.
const size_t marg_size = mld.order.total_size;
VecX delta;
computeDelta(mld.order, delta);
if (mld.is_sqrt) {
abs_H.topLeftCorner(marg_size, marg_size) += mld.H.transpose() * mld.H;
abs_b.head(marg_size) += mld.H.transpose() * (mld.b + mld.H * delta);
marg_prior_error = delta.transpose() * mld.H.transpose() *
(Scalar(0.5) * mld.H * delta + mld.b);
} else {
abs_H.topLeftCorner(marg_size, marg_size) += mld.H;
abs_b.head(marg_size) += mld.H * delta + mld.b;
marg_prior_error =
delta.transpose() * (Scalar(0.5) * mld.H * delta + mld.b);
}
}
template <class Scalar_>
void BundleAdjustmentBase<Scalar_>::computeMargPriorError(
const MargLinData<Scalar>& mld, Scalar& marg_prior_error) const {
BASALT_ASSERT(size_t(mld.H.cols()) == mld.order.total_size);
// The current cost is (see above in linearizeMargPrior())
//
// P(0) = 0.5 || J*delta + r ||^2
// = 0.5 delta^T J^T J delta + delta^T J^T r + 0.5 r^T r
// = 0.5 delta^T Hmarg delta + delta^T bmarg + 0.5 r^T r.
//
// Note: Since the r^T r term does not change with delta, we drop it from the
// error computation. The main need for the error value is for comparing
// the cost before and after an update to delta in the optimization loop. This
// also means the computed error can be negative.
VecX delta;
computeDelta(mld.order, delta);
if (mld.is_sqrt) {
marg_prior_error = delta.transpose() * mld.H.transpose() *
(Scalar(0.5) * mld.H * delta + mld.b);
} else {
marg_prior_error =
delta.transpose() * (Scalar(0.5) * mld.H * delta + mld.b);
}
}
template <class Scalar_>
Scalar_ BundleAdjustmentBase<Scalar_>::computeMargPriorModelCostChange(
const MargLinData<Scalar>& mld, const VecX& marg_scaling,
const VecX& marg_pose_inc) const {
// Quadratic prior and "delta" of the current state to the original
// linearization point give cost function
//
// P(x) = 0.5 || J*(delta+x) + r ||^2,
//
// alternatively stored in quadratic form as
//
// Hmarg = J^T J,
// bmarg = J^T r.
//
// We want to compute the model cost change. The model function is
//
// L(inc) = P(inc) = 0.5 || J*inc + (J*delta + r) ||^2
//
// By setting rlin = J*delta + r we get
//
// L(inc) = 0.5 || J*inc + rlin ||^2
// = P(0) + inc^T J^T rlin + 0.5 inc^T J^T J inc
//
// and thus the expected decrease in cost for the computed increment is
//
// l_diff = L(0) - L(inc)
// = - inc^T J^T rlin - 0.5 inc^T J^T J inc
// = - inc^T J^T (rlin + 0.5 J inc)
// = - (J inc)^T (rlin + 0.5 (J inc))
// = - (J inc)^T (J*delta + r + 0.5 (J inc)).
//
// Alternatively, for squared prior storage, we get
//
// l_diff = - inc^T (Hmarg delta + bmarg + 0.5 Hmarg inc)
// = - inc^T (Hmarg (delta + 0.5 inc) + bmarg)
//
// For Jacobian scaling we need to take extra care. Note that we store the
// scale separately and don't actually update marg_sqrt_H and marg_sqrt_b
// in place with the scale. So in the computation above, we need to scale
// marg_sqrt_H whenever it is multiplied with inc, but NOT when it is
// multiplied with delta, since delta is also WITHOUT scaling.
VecX delta;
computeDelta(mld.order, delta);
VecX J_inc = marg_pose_inc;
if (marg_scaling.rows() > 0) J_inc = marg_scaling.asDiagonal() * J_inc;
Scalar l_diff;
if (mld.is_sqrt) {
// No scaling here. This is b part not Jacobian
const VecX b_Jdelta = mld.H * delta + mld.b;
J_inc = mld.H * J_inc;
l_diff = -J_inc.transpose() * (b_Jdelta + Scalar(0.5) * J_inc);
} else {
l_diff = -J_inc.dot(mld.H * (delta + Scalar(0.5) * J_inc) + mld.b);
}
return l_diff;
}
// //////////////////////////////////////////////////////////////////
// instatiate templates
// Note: double specialization is unconditional, b/c NfrMapper depends on it.
//#ifdef BASALT_INSTANTIATIONS_DOUBLE
template class BundleAdjustmentBase<double>;
template void BundleAdjustmentBase<double>::get_current_points<double>(
Eigen::aligned_vector<Eigen::Matrix<double, 3, 1>>& points,
std::vector<int>& ids) const;
template void BundleAdjustmentBase<double>::computeProjections<double>(
std::vector<Eigen::aligned_vector<Eigen::Matrix<double, 4, 1>>>& data,
FrameId last_state_t_ns) const;
//#endif
#ifdef BASALT_INSTANTIATIONS_FLOAT
template class BundleAdjustmentBase<float>;
// template void BundleAdjustmentBase<float>::get_current_points<float>(
// Eigen::aligned_vector<Eigen::Matrix<float, 3, 1>>& points,
// std::vector<int>& ids) const;
template void BundleAdjustmentBase<float>::get_current_points<double>(
Eigen::aligned_vector<Eigen::Matrix<double, 3, 1>>& points,
std::vector<int>& ids) const;
// template void BundleAdjustmentBase<float>::computeProjections<float>(
// std::vector<Eigen::aligned_vector<Eigen::Matrix<float, 4, 1>>>& data,
// FrameId last_state_t_ns) const;
template void BundleAdjustmentBase<float>::computeProjections<double>(
std::vector<Eigen::aligned_vector<Eigen::Matrix<double, 4, 1>>>& data,
FrameId last_state_t_ns) const;
#endif
} // namespace basalt

View File

@@ -0,0 +1,247 @@
/**
BSD 3-Clause License
This file is part of the Basalt project.
https://gitlab.com/VladyslavUsenko/basalt.git
Copyright (c) 2019, Vladyslav Usenko and Nikolaus Demmel.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <algorithm>
#include <basalt/vi_estimator/landmark_database.h>
namespace basalt {
template <class Scalar_>
void LandmarkDatabase<Scalar_>::addLandmark(KeypointId lm_id,
const Keypoint<Scalar> &pos) {
auto &kpt = kpts[lm_id];
kpt.direction = pos.direction;
kpt.inv_dist = pos.inv_dist;
kpt.host_kf_id = pos.host_kf_id;
}
template <class Scalar_>
void LandmarkDatabase<Scalar_>::removeFrame(const FrameId &frame) {
for (auto it = kpts.begin(); it != kpts.end();) {
for (auto it2 = it->second.obs.begin(); it2 != it->second.obs.end();) {
if (it2->first.frame_id == frame)
it2 = removeLandmarkObservationHelper(it, it2);
else
it2++;
}
if (it->second.obs.size() < min_num_obs) {
it = removeLandmarkHelper(it);
} else {
++it;
}
}
}
template <class Scalar_>
void LandmarkDatabase<Scalar_>::removeKeyframes(
const std::set<FrameId> &kfs_to_marg,
const std::set<FrameId> &poses_to_marg,
const std::set<FrameId> &states_to_marg_all) {
for (auto it = kpts.begin(); it != kpts.end();) {
if (kfs_to_marg.count(it->second.host_kf_id.frame_id) > 0) {
it = removeLandmarkHelper(it);
} else {
for (auto it2 = it->second.obs.begin(); it2 != it->second.obs.end();) {
FrameId fid = it2->first.frame_id;
if (poses_to_marg.count(fid) > 0 || states_to_marg_all.count(fid) > 0 ||
kfs_to_marg.count(fid) > 0)
it2 = removeLandmarkObservationHelper(it, it2);
else
it2++;
}
if (it->second.obs.size() < min_num_obs) {
it = removeLandmarkHelper(it);
} else {
++it;
}
}
}
}
template <class Scalar_>
std::vector<TimeCamId> LandmarkDatabase<Scalar_>::getHostKfs() const {
std::vector<TimeCamId> res;
for (const auto &kv : observations) res.emplace_back(kv.first);
return res;
}
template <class Scalar_>
std::vector<const Keypoint<Scalar_> *>
LandmarkDatabase<Scalar_>::getLandmarksForHost(const TimeCamId &tcid) const {
std::vector<const Keypoint<Scalar> *> res;
for (const auto &[k, obs] : observations.at(tcid))
for (const auto &v : obs) res.emplace_back(&kpts.at(v));
return res;
}
template <class Scalar_>
void LandmarkDatabase<Scalar_>::addObservation(
const TimeCamId &tcid_target, const KeypointObservation<Scalar> &o) {
auto it = kpts.find(o.kpt_id);
BASALT_ASSERT(it != kpts.end());
it->second.obs[tcid_target] = o.pos;
observations[it->second.host_kf_id][tcid_target].insert(it->first);
}
template <class Scalar_>
Keypoint<Scalar_> &LandmarkDatabase<Scalar_>::getLandmark(KeypointId lm_id) {
return kpts.at(lm_id);
}
template <class Scalar_>
const Keypoint<Scalar_> &LandmarkDatabase<Scalar_>::getLandmark(
KeypointId lm_id) const {
return kpts.at(lm_id);
}
template <class Scalar_>
const std::unordered_map<TimeCamId, std::map<TimeCamId, std::set<KeypointId>>>
&LandmarkDatabase<Scalar_>::getObservations() const {
return observations;
}
template <class Scalar_>
const Eigen::aligned_unordered_map<KeypointId, Keypoint<Scalar_>>
&LandmarkDatabase<Scalar_>::getLandmarks() const {
return kpts;
}
template <class Scalar_>
bool LandmarkDatabase<Scalar_>::landmarkExists(int lm_id) const {
return kpts.count(lm_id) > 0;
}
template <class Scalar_>
size_t LandmarkDatabase<Scalar_>::numLandmarks() const {
return kpts.size();
}
template <class Scalar_>
int LandmarkDatabase<Scalar_>::numObservations() const {
int num_observations = 0;
for (const auto &[_, val_map] : observations) {
for (const auto &[_, val] : val_map) {
num_observations += val.size();
}
}
return num_observations;
}
template <class Scalar_>
int LandmarkDatabase<Scalar_>::numObservations(KeypointId lm_id) const {
return kpts.at(lm_id).obs.size();
}
template <class Scalar_>
typename LandmarkDatabase<Scalar_>::MapIter
LandmarkDatabase<Scalar_>::removeLandmarkHelper(
LandmarkDatabase<Scalar>::MapIter it) {
auto host_it = observations.find(it->second.host_kf_id);
for (const auto &[k, v] : it->second.obs) {
auto target_it = host_it->second.find(k);
target_it->second.erase(it->first);
if (target_it->second.empty()) host_it->second.erase(target_it);
}
if (host_it->second.empty()) observations.erase(host_it);
return kpts.erase(it);
}
template <class Scalar_>
typename Keypoint<Scalar_>::MapIter
LandmarkDatabase<Scalar_>::removeLandmarkObservationHelper(
LandmarkDatabase<Scalar>::MapIter it,
typename Keypoint<Scalar>::MapIter it2) {
auto host_it = observations.find(it->second.host_kf_id);
auto target_it = host_it->second.find(it2->first);
target_it->second.erase(it->first);
if (target_it->second.empty()) host_it->second.erase(target_it);
if (host_it->second.empty()) observations.erase(host_it);
return it->second.obs.erase(it2);
}
template <class Scalar_>
void LandmarkDatabase<Scalar_>::removeLandmark(KeypointId lm_id) {
auto it = kpts.find(lm_id);
if (it != kpts.end()) removeLandmarkHelper(it);
}
template <class Scalar_>
void LandmarkDatabase<Scalar_>::removeObservations(
KeypointId lm_id, const std::set<TimeCamId> &obs) {
auto it = kpts.find(lm_id);
BASALT_ASSERT(it != kpts.end());
for (auto it2 = it->second.obs.begin(); it2 != it->second.obs.end();) {
if (obs.count(it2->first) > 0) {
it2 = removeLandmarkObservationHelper(it, it2);
} else
it2++;
}
if (it->second.obs.size() < min_num_obs) {
removeLandmarkHelper(it);
}
}
// //////////////////////////////////////////////////////////////////
// instatiate templates
// Note: double specialization is unconditional, b/c NfrMapper depends on it.
//#ifdef BASALT_INSTANTIATIONS_DOUBLE
template class LandmarkDatabase<double>;
//#endif
#ifdef BASALT_INSTANTIATIONS_FLOAT
template class LandmarkDatabase<float>;
#endif
} // namespace basalt

View File

@@ -0,0 +1,361 @@
/**
BSD 3-Clause License
This file is part of the Basalt project.
https://gitlab.com/VladyslavUsenko/basalt.git
Copyright (c) 2019, Vladyslav Usenko and Nikolaus Demmel.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <basalt/utils/assert.h>
#include <basalt/vi_estimator/marg_helper.h>
namespace basalt {
template <class Scalar_>
void MargHelper<Scalar_>::marginalizeHelperSqToSq(
MatX& abs_H, VecX& abs_b, const std::set<int>& idx_to_keep,
const std::set<int>& idx_to_marg, MatX& marg_H, VecX& marg_b) {
int keep_size = idx_to_keep.size();
int marg_size = idx_to_marg.size();
BASALT_ASSERT(keep_size + marg_size == abs_H.cols());
// Fill permutation matrix
Eigen::Matrix<int, Eigen::Dynamic, 1> indices(idx_to_keep.size() +
idx_to_marg.size());
{
auto it = idx_to_keep.begin();
for (size_t i = 0; i < idx_to_keep.size(); i++) {
indices[i] = *it;
it++;
}
}
{
auto it = idx_to_marg.begin();
for (size_t i = 0; i < idx_to_marg.size(); i++) {
indices[idx_to_keep.size() + i] = *it;
it++;
}
}
const Eigen::PermutationWrapper<Eigen::Matrix<int, Eigen::Dynamic, 1>> p(
indices);
const Eigen::PermutationMatrix<Eigen::Dynamic> pt = p.transpose();
abs_b.applyOnTheLeft(pt);
abs_H.applyOnTheLeft(pt);
abs_H.applyOnTheRight(p);
// Use of fullPivLu compared to alternatives was determined experimentally. It
// is more stable than ldlt when the matrix is numerically close ot
// indefinite, but it is faster than the even more stable
// fullPivHouseholderQr.
// auto H_mm_decomposition =
// abs_H.bottomRightCorner(marg_size, marg_size).ldlt();
// auto H_mm_decomposition =
// abs_H.bottomRightCorner(marg_size, marg_size).fullPivLu();
// MatX H_mm_inv = H_mm_decomposition.inverse();
// auto H_mm_decomposition =
// abs_H.bottomRightCorner(marg_size, marg_size).colPivHouseholderQr();
// DO NOT USE!!!
// Pseudoinverse with SVD. Uses iterative method and results in severe low
// accuracy. Use the version below (COD) for pseudoinverse
// auto H_mm_decomposition =
// abs_H.bottomRightCorner(marg_size, marg_size)
// .jacobiSvd(Eigen::ComputeThinU | Eigen::ComputeThinV);
// Eigen version of pseudoinverse
auto H_mm_decomposition = abs_H.bottomRightCorner(marg_size, marg_size)
.completeOrthogonalDecomposition();
MatX H_mm_inv = H_mm_decomposition.pseudoInverse();
// Should be more numerially stable version of:
abs_H.topRightCorner(keep_size, marg_size) *= H_mm_inv;
// abs_H.topRightCorner(keep_size, marg_size) =
// H_mm_decomposition
// .solve(abs_H.topRightCorner(keep_size, marg_size).transpose())
// .transpose();
marg_H = abs_H.topLeftCorner(keep_size, keep_size);
marg_b = abs_b.head(keep_size);
marg_H -= abs_H.topRightCorner(keep_size, marg_size) *
abs_H.bottomLeftCorner(marg_size, keep_size);
marg_b -= abs_H.topRightCorner(keep_size, marg_size) * abs_b.tail(marg_size);
abs_H.resize(0, 0);
abs_b.resize(0);
}
template <class Scalar_>
void MargHelper<Scalar_>::marginalizeHelperSqToSqrt(
MatX& abs_H, VecX& abs_b, const std::set<int>& idx_to_keep,
const std::set<int>& idx_to_marg, MatX& marg_sqrt_H, VecX& marg_sqrt_b) {
// TODO: We might lose the strong initial pose prior if there are no obs
// during marginalization (e.g. during first marginalization). Unclear when
// this can happen. Keeping keyframes w/o any points in the optimization
// window might not make sense. --> Double check if and when this currently
// occurs. If no, add asserts. If yes, somehow deal with it differently.
int keep_size = idx_to_keep.size();
int marg_size = idx_to_marg.size();
BASALT_ASSERT(keep_size + marg_size == abs_H.cols());
// Fill permutation matrix
Eigen::Matrix<int, Eigen::Dynamic, 1> indices(idx_to_keep.size() +
idx_to_marg.size());
{
auto it = idx_to_keep.begin();
for (size_t i = 0; i < idx_to_keep.size(); i++) {
indices[i] = *it;
it++;
}
}
{
auto it = idx_to_marg.begin();
for (size_t i = 0; i < idx_to_marg.size(); i++) {
indices[idx_to_keep.size() + i] = *it;
it++;
}
}
const Eigen::PermutationWrapper<Eigen::Matrix<int, Eigen::Dynamic, 1>> p(
indices);
const Eigen::PermutationMatrix<Eigen::Dynamic> pt = p.transpose();
abs_b.applyOnTheLeft(pt);
abs_H.applyOnTheLeft(pt);
abs_H.applyOnTheRight(p);
// Use of fullPivLu compared to alternatives was determined experimentally. It
// is more stable than ldlt when the matrix is numerically close ot
// indefinite, but it is faster than the even more stable
// fullPivHouseholderQr.
// auto H_mm_decomposition =
// abs_H.bottomRightCorner(marg_size, marg_size).ldlt();
// auto H_mm_decomposition =
// abs_H.bottomRightCorner(marg_size, marg_size).fullPivLu();
// MatX H_mm_inv = H_mm_decomposition.inverse();
// auto H_mm_decomposition =
// abs_H.bottomRightCorner(marg_size, marg_size).colPivHouseholderQr();
// DO NOT USE!!!
// Pseudoinverse with SVD. Uses iterative method and results in severe low
// accuracy. Use the version below (COD) for pseudoinverse
// auto H_mm_decomposition =
// abs_H.bottomRightCorner(marg_size, marg_size)
// .jacobiSvd(Eigen::ComputeThinU | Eigen::ComputeThinV);
// Eigen version of pseudoinverse
auto H_mm_decomposition = abs_H.bottomRightCorner(marg_size, marg_size)
.completeOrthogonalDecomposition();
MatX H_mm_inv = H_mm_decomposition.pseudoInverse();
// Should be more numerially stable version of:
abs_H.topRightCorner(keep_size, marg_size) *= H_mm_inv;
// abs_H.topRightCorner(keep_size, marg_size).noalias() =
// H_mm_decomposition.solve(abs_H.bottomLeftCorner(marg_size, keep_size))
// .transpose();
MatX marg_H;
VecX marg_b;
marg_H = abs_H.topLeftCorner(keep_size, keep_size);
marg_b = abs_b.head(keep_size);
marg_H -= abs_H.topRightCorner(keep_size, marg_size) *
abs_H.bottomLeftCorner(marg_size, keep_size);
marg_b -= abs_H.topRightCorner(keep_size, marg_size) * abs_b.tail(marg_size);
Eigen::LDLT<Eigen::Ref<MatX>> ldlt(marg_H);
VecX D_sqrt = ldlt.vectorD().array().max(0).sqrt().matrix();
// After LDLT, we have
// marg_H == P^T*L*D*L^T*P,
// so we compute the square root as
// marg_sqrt_H = sqrt(D)*L^T*P,
// such that
// marg_sqrt_H^T marg_sqrt_H == marg_H.
marg_sqrt_H.setIdentity(keep_size, keep_size);
marg_sqrt_H = ldlt.transpositionsP() * marg_sqrt_H;
marg_sqrt_H = ldlt.matrixU() * marg_sqrt_H; // U == L^T
marg_sqrt_H = D_sqrt.asDiagonal() * marg_sqrt_H;
// For the right hand side, we want
// marg_b == marg_sqrt_H^T * marg_sqrt_b
// so we compute
// marg_sqrt_b = (marg_sqrt_H^T)^-1 * marg_b
// = (P^T*L*sqrt(D))^-1 * marg_b
// = sqrt(D)^-1 * L^-1 * P * marg_b
marg_sqrt_b = ldlt.transpositionsP() * marg_b;
ldlt.matrixL().solveInPlace(marg_sqrt_b);
// We already clamped negative values in D_sqrt to 0 above, but for values
// close to 0 we set b to 0.
for (int i = 0; i < marg_sqrt_b.size(); ++i) {
if (D_sqrt(i) > std::sqrt(std::numeric_limits<Scalar>::min()))
marg_sqrt_b(i) /= D_sqrt(i);
else
marg_sqrt_b(i) = 0;
}
// std::cout << "marg_sqrt_H diff: "
// << (marg_sqrt_H.transpose() * marg_sqrt_H - marg_H).norm() << "
// "
// << marg_H.norm() << std::endl;
// std::cout << "marg_sqrt_b diff: "
// << (marg_sqrt_H.transpose() * marg_sqrt_b - marg_b).norm()
// << std::endl;
abs_H.resize(0, 0);
abs_b.resize(0);
}
template <class Scalar_>
void MargHelper<Scalar_>::marginalizeHelperSqrtToSqrt(
MatX& Q2Jp, VecX& Q2r, const std::set<int>& idx_to_keep,
const std::set<int>& idx_to_marg, MatX& marg_sqrt_H, VecX& marg_sqrt_b) {
Eigen::Index keep_size = idx_to_keep.size();
Eigen::Index marg_size = idx_to_marg.size();
BASALT_ASSERT(keep_size + marg_size == Q2Jp.cols());
BASALT_ASSERT(Q2Jp.rows() == Q2r.rows());
// Fill permutation matrix
Eigen::Matrix<int, Eigen::Dynamic, 1> indices(idx_to_keep.size() +
idx_to_marg.size());
{
auto it = idx_to_marg.begin();
for (size_t i = 0; i < idx_to_marg.size(); i++) {
indices[i] = *it;
it++;
}
}
{
auto it = idx_to_keep.begin();
for (size_t i = 0; i < idx_to_keep.size(); i++) {
indices[idx_to_marg.size() + i] = *it;
it++;
}
}
// TODO: check if using TranspositionMatrix is faster
const Eigen::PermutationWrapper<Eigen::Matrix<int, Eigen::Dynamic, 1>> p(
indices);
Q2Jp.applyOnTheRight(p);
Eigen::Index marg_rank = 0;
Eigen::Index total_rank = 0;
{
const Scalar rank_threshold =
std::sqrt(std::numeric_limits<Scalar>::epsilon());
const Eigen::Index rows = Q2Jp.rows();
const Eigen::Index cols = Q2Jp.cols();
VecX tempVector;
tempVector.resize(cols + 1);
Scalar* tempData = tempVector.data();
for (Eigen::Index k = 0; k < cols && total_rank < rows; ++k) {
Eigen::Index remainingRows = rows - total_rank;
Eigen::Index remainingCols = cols - k - 1;
Scalar beta;
Scalar hCoeff;
Q2Jp.col(k).tail(remainingRows).makeHouseholderInPlace(hCoeff, beta);
if (std::abs(beta) > rank_threshold) {
Q2Jp.coeffRef(total_rank, k) = beta;
Q2Jp.bottomRightCorner(remainingRows, remainingCols)
.applyHouseholderOnTheLeft(Q2Jp.col(k).tail(remainingRows - 1),
hCoeff, tempData + k + 1);
Q2r.tail(remainingRows)
.applyHouseholderOnTheLeft(Q2Jp.col(k).tail(remainingRows - 1),
hCoeff, tempData + cols);
total_rank++;
} else {
Q2Jp.coeffRef(total_rank, k) = 0;
}
// Overwrite householder vectors with 0
Q2Jp.col(k).tail(remainingRows - 1).setZero();
// Save the rank of marginalize-out part
if (k == marg_size - 1) {
marg_rank = total_rank;
}
}
}
Eigen::Index keep_valid_rows =
std::max(total_rank - marg_rank, Eigen::Index(1));
marg_sqrt_H = Q2Jp.block(marg_rank, marg_size, keep_valid_rows, keep_size);
marg_sqrt_b = Q2r.segment(marg_rank, keep_valid_rows);
Q2Jp.resize(0, 0);
Q2r.resize(0);
}
// //////////////////////////////////////////////////////////////////
// instatiate templates
// Note: double specialization is unconditional, b/c NfrMapper depends on it.
//#ifdef BASALT_INSTANTIATIONS_DOUBLE
template class MargHelper<double>;
//#endif
#ifdef BASALT_INSTANTIATIONS_FLOAT
template class MargHelper<float>;
#endif
} // namespace basalt

View File

@@ -0,0 +1,758 @@
/**
BSD 3-Clause License
This file is part of the Basalt project.
https://gitlab.com/VladyslavUsenko/basalt.git
Copyright (c) 2019, Vladyslav Usenko and Nikolaus Demmel.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <basalt/optimization/accumulator.h>
#include <basalt/utils/keypoints.h>
#include <basalt/utils/nfr.h>
#include <basalt/utils/tracks.h>
#include <basalt/vi_estimator/marg_helper.h>
#include <basalt/vi_estimator/nfr_mapper.h>
#include <basalt/hash_bow/hash_bow.h>
namespace basalt {
NfrMapper::NfrMapper(const Calibration<double>& calib, const VioConfig& config)
: config(config),
lambda(config.mapper_lm_lambda_min),
min_lambda(config.mapper_lm_lambda_min),
max_lambda(config.mapper_lm_lambda_max),
lambda_vee(2) {
this->calib = calib;
this->obs_std_dev = config.mapper_obs_std_dev;
this->huber_thresh = config.mapper_obs_huber_thresh;
hash_bow_database.reset(new HashBow<256>(config.mapper_bow_num_bits));
}
void NfrMapper::addMargData(MargData::Ptr& data) {
processMargData(*data);
bool valid = extractNonlinearFactors(*data);
if (valid) {
for (const auto& kv : data->frame_poses) {
PoseStateWithLin<double> p(kv.second.getT_ns(), kv.second.getPose());
frame_poses[kv.first] = p;
}
for (const auto& kv : data->frame_states) {
if (data->kfs_all.count(kv.first) > 0) {
auto state = kv.second;
PoseStateWithLin<double> p(state.getState().t_ns,
state.getState().T_w_i);
frame_poses[kv.first] = p;
}
}
}
}
void NfrMapper::processMargData(MargData& m) {
BASALT_ASSERT(m.aom.total_size == size_t(m.abs_H.cols()));
// std::cout << "rank " << m.abs_H.fullPivLu().rank() << " size "
// << m.abs_H.cols() << std::endl;
AbsOrderMap aom_new;
std::set<int> idx_to_keep;
std::set<int> idx_to_marg;
for (const auto& kv : m.aom.abs_order_map) {
if (kv.second.second == POSE_SIZE) {
for (size_t i = 0; i < POSE_SIZE; i++)
idx_to_keep.emplace(kv.second.first + i);
aom_new.abs_order_map.emplace(kv);
aom_new.total_size += POSE_SIZE;
} else if (kv.second.second == POSE_VEL_BIAS_SIZE) {
if (m.kfs_all.count(kv.first) > 0) {
for (size_t i = 0; i < POSE_SIZE; i++)
idx_to_keep.emplace(kv.second.first + i);
for (size_t i = POSE_SIZE; i < POSE_VEL_BIAS_SIZE; i++)
idx_to_marg.emplace(kv.second.first + i);
aom_new.abs_order_map[kv.first] =
std::make_pair(aom_new.total_size, POSE_SIZE);
aom_new.total_size += POSE_SIZE;
PoseStateWithLin<double> p(m.frame_states.at(kv.first));
m.frame_poses[kv.first] = p;
m.frame_states.erase(kv.first);
} else {
for (size_t i = 0; i < POSE_VEL_BIAS_SIZE; i++)
idx_to_marg.emplace(kv.second.first + i);
m.frame_states.erase(kv.first);
}
} else {
std::cerr << "Unknown size" << std::endl;
std::abort();
}
// std::cout << kv.first << " " << kv.second.first << " " <<
// kv.second.second
// << std::endl;
}
if (!idx_to_marg.empty()) {
Eigen::MatrixXd marg_H_new;
Eigen::VectorXd marg_b_new;
MargHelper<Scalar>::marginalizeHelperSqToSq(
m.abs_H, m.abs_b, idx_to_keep, idx_to_marg, marg_H_new, marg_b_new);
// std::cout << "new rank " << marg_H_new.fullPivLu().rank() << " size "
// << marg_H_new.cols() << std::endl;
m.abs_H = marg_H_new;
m.abs_b = marg_b_new;
m.aom = aom_new;
}
BASALT_ASSERT(m.aom.total_size == size_t(m.abs_H.cols()));
// save image data
{
for (const auto& v : m.opt_flow_res) {
img_data[v->t_ns] = v->input_images;
}
}
}
bool NfrMapper::extractNonlinearFactors(MargData& m) {
size_t asize = m.aom.total_size;
// std::cout << "asize " << asize << std::endl;
Eigen::FullPivHouseholderQR<Eigen::MatrixXd> qr(m.abs_H);
if (qr.rank() != m.abs_H.cols()) return false;
Eigen::MatrixXd cov_old = qr.solve(Eigen::MatrixXd::Identity(asize, asize));
int64_t kf_id = *m.kfs_to_marg.cbegin();
int kf_start_idx = m.aom.abs_order_map.at(kf_id).first;
auto state_kf = m.frame_poses.at(kf_id);
Sophus::SE3d T_w_i_kf = state_kf.getPose();
Eigen::Vector3d pos = T_w_i_kf.translation();
Eigen::Vector3d yaw_dir_body =
T_w_i_kf.so3().inverse() * Eigen::Vector3d::UnitX();
Sophus::Matrix<double, 3, POSE_SIZE> d_pos_d_T_w_i;
Sophus::Matrix<double, 1, POSE_SIZE> d_yaw_d_T_w_i;
Sophus::Matrix<double, 2, POSE_SIZE> d_rp_d_T_w_i;
absPositionError(T_w_i_kf, pos, &d_pos_d_T_w_i);
yawError(T_w_i_kf, yaw_dir_body, &d_yaw_d_T_w_i);
rollPitchError(T_w_i_kf, T_w_i_kf.so3(), &d_rp_d_T_w_i);
{
Eigen::MatrixXd J;
J.setZero(POSE_SIZE, asize);
J.block<3, POSE_SIZE>(0, kf_start_idx) = d_pos_d_T_w_i;
J.block<1, POSE_SIZE>(3, kf_start_idx) = d_yaw_d_T_w_i;
J.block<2, POSE_SIZE>(4, kf_start_idx) = d_rp_d_T_w_i;
Sophus::Matrix6d cov_new = J * cov_old * J.transpose();
// std::cout << "cov_new\n" << cov_new << std::endl;
RollPitchFactor rpf;
rpf.t_ns = kf_id;
rpf.R_w_i_meas = T_w_i_kf.so3();
if (!config.mapper_no_factor_weights) {
rpf.cov_inv = cov_new.block<2, 2>(4, 4).inverse();
} else {
rpf.cov_inv.setIdentity();
}
if (m.use_imu) {
roll_pitch_factors.emplace_back(rpf);
}
}
for (int64_t other_id : m.kfs_all) {
if (m.frame_poses.count(other_id) == 0 || other_id == kf_id) {
continue;
}
auto state_o = m.frame_poses.at(other_id);
Sophus::SE3d T_w_i_o = state_o.getPose();
Sophus::SE3d T_kf_o = T_w_i_kf.inverse() * T_w_i_o;
int o_start_idx = m.aom.abs_order_map.at(other_id).first;
Sophus::Matrix6d d_res_d_T_w_i, d_res_d_T_w_j;
relPoseError(T_kf_o, T_w_i_kf, T_w_i_o, &d_res_d_T_w_i, &d_res_d_T_w_j);
Eigen::MatrixXd J;
J.setZero(POSE_SIZE, asize);
J.block<POSE_SIZE, POSE_SIZE>(0, kf_start_idx) = d_res_d_T_w_i;
J.block<POSE_SIZE, POSE_SIZE>(0, o_start_idx) = d_res_d_T_w_j;
Sophus::Matrix6d cov_new = J * cov_old * J.transpose();
RelPoseFactor rpf;
rpf.t_i_ns = kf_id;
rpf.t_j_ns = other_id;
rpf.T_i_j = T_kf_o;
rpf.cov_inv.setIdentity();
if (!config.mapper_no_factor_weights) {
cov_new.ldlt().solveInPlace(rpf.cov_inv);
}
// std::cout << "rpf.cov_inv\n" << rpf.cov_inv << std::endl;
rel_pose_factors.emplace_back(rpf);
}
return true;
}
void NfrMapper::optimize(int num_iterations) {
AbsOrderMap aom;
for (const auto& kv : frame_poses) {
aom.abs_order_map[kv.first] = std::make_pair(aom.total_size, POSE_SIZE);
aom.total_size += POSE_SIZE;
}
for (int iter = 0; iter < num_iterations; iter++) {
auto t1 = std::chrono::high_resolution_clock::now();
double rld_error;
Eigen::aligned_vector<RelLinData> rld_vec;
linearizeHelper(rld_vec, lmdb.getObservations(), rld_error);
// SparseHashAccumulator<double> accum;
// accum.reset(aom.total_size);
// for (auto& rld : rld_vec) {
// rld.invert_keypoint_hessians();
// Eigen::MatrixXd rel_H;
// Eigen::VectorXd rel_b;
// linearizeRel(rld, rel_H, rel_b);
// linearizeAbs(rel_H, rel_b, rld, aom, accum);
// }
MapperLinearizeAbsReduce<SparseHashAccumulator<double>> lopt(aom,
&frame_poses);
tbb::blocked_range<Eigen::aligned_vector<RelLinData>::const_iterator> range(
rld_vec.begin(), rld_vec.end());
tbb::blocked_range<Eigen::aligned_vector<RollPitchFactor>::const_iterator>
range1(roll_pitch_factors.begin(), roll_pitch_factors.end());
tbb::blocked_range<Eigen::aligned_vector<RelPoseFactor>::const_iterator>
range2(rel_pose_factors.begin(), rel_pose_factors.end());
tbb::parallel_reduce(range, lopt);
if (config.mapper_use_factors) {
tbb::parallel_reduce(range1, lopt);
tbb::parallel_reduce(range2, lopt);
}
double error_total = rld_error + lopt.rel_error + lopt.roll_pitch_error;
std::cout << "[LINEARIZE] iter " << iter
<< " before_update_error: vision: " << rld_error
<< " rel_error: " << lopt.rel_error
<< " roll_pitch_error: " << lopt.roll_pitch_error
<< " total: " << error_total << std::endl;
lopt.accum.iterative_solver = true;
lopt.accum.print_info = true;
lopt.accum.setup_solver();
const Eigen::VectorXd Hdiag = lopt.accum.Hdiagonal();
bool converged = false;
if (config.mapper_use_lm) { // Use LevenbergMarquardt
bool step = false;
int max_iter = 10;
while (!step && max_iter > 0 && !converged) {
Eigen::VectorXd Hdiag_lambda = Hdiag * lambda;
for (int i = 0; i < Hdiag_lambda.size(); i++)
Hdiag_lambda[i] = std::max(Hdiag_lambda[i], min_lambda);
const Eigen::VectorXd inc = lopt.accum.solve(&Hdiag_lambda);
double max_inc = inc.array().abs().maxCoeff();
if (max_inc < 1e-5) converged = true;
backup();
// apply increment to poses
for (auto& kv : frame_poses) {
int idx = aom.abs_order_map.at(kv.first).first;
BASALT_ASSERT(!kv.second.isLinearized());
kv.second.applyInc(-inc.segment<POSE_SIZE>(idx));
}
// Update points
tbb::blocked_range<size_t> keys_range(0, rld_vec.size());
auto update_points_func = [&](const tbb::blocked_range<size_t>& r) {
for (size_t i = r.begin(); i != r.end(); ++i) {
const auto& rld = rld_vec[i];
updatePoints(aom, rld, inc, lmdb);
}
};
tbb::parallel_for(keys_range, update_points_func);
double after_update_vision_error = 0;
double after_rel_error = 0;
double after_roll_pitch_error = 0;
computeError(after_update_vision_error);
if (config.mapper_use_factors) {
computeRelPose(after_rel_error);
computeRollPitch(after_roll_pitch_error);
}
double after_error_total = after_update_vision_error + after_rel_error +
after_roll_pitch_error;
double f_diff = (error_total - after_error_total);
if (f_diff < 0) {
std::cout << "\t[REJECTED] lambda:" << lambda << " f_diff: " << f_diff
<< " max_inc: " << max_inc
<< " vision_error: " << after_update_vision_error
<< " rel_error: " << after_rel_error
<< " roll_pitch_error: " << after_roll_pitch_error
<< " total: " << after_error_total << std::endl;
lambda = std::min(max_lambda, lambda_vee * lambda);
lambda_vee *= 2;
restore();
} else {
std::cout << "\t[ACCEPTED] lambda:" << lambda << " f_diff: " << f_diff
<< " max_inc: " << max_inc
<< " vision_error: " << after_update_vision_error
<< " rel_error: " << after_rel_error
<< " roll_pitch_error: " << after_roll_pitch_error
<< " total: " << after_error_total << std::endl;
lambda = std::max(min_lambda, lambda / 3);
lambda_vee = 2;
step = true;
}
max_iter--;
if (after_error_total > error_total) {
std::cout << "increased error after update!!!" << std::endl;
}
}
} else { // Use Gauss-Newton
Eigen::VectorXd Hdiag_lambda = Hdiag * min_lambda;
for (int i = 0; i < Hdiag_lambda.size(); i++)
Hdiag_lambda[i] = std::max(Hdiag_lambda[i], min_lambda);
const Eigen::VectorXd inc = lopt.accum.solve(&Hdiag_lambda);
double max_inc = inc.array().abs().maxCoeff();
if (max_inc < 1e-5) converged = true;
// apply increment to poses
for (auto& kv : frame_poses) {
int idx = aom.abs_order_map.at(kv.first).first;
BASALT_ASSERT(!kv.second.isLinearized());
kv.second.applyInc(-inc.segment<POSE_SIZE>(idx));
}
// Update points
tbb::blocked_range<size_t> keys_range(0, rld_vec.size());
auto update_points_func = [&](const tbb::blocked_range<size_t>& r) {
for (size_t i = r.begin(); i != r.end(); ++i) {
const auto& rld = rld_vec[i];
updatePoints(aom, rld, inc, lmdb);
}
};
tbb::parallel_for(keys_range, update_points_func);
}
auto t2 = std::chrono::high_resolution_clock::now();
auto elapsed =
std::chrono::duration_cast<std::chrono::microseconds>(t2 - t1);
std::cout << "iter " << iter << " time : " << elapsed.count()
<< "(us), num_states " << frame_states.size() << " num_poses "
<< frame_poses.size() << std::endl;
if (converged) break;
// std::cerr << "LT\n" << LT << std::endl;
// std::cerr << "z_p\n" << z_p.transpose() << std::endl;
// std::cerr << "inc\n" << inc.transpose() << std::endl;
}
}
Eigen::aligned_map<int64_t, PoseStateWithLin<double>>&
NfrMapper::getFramePoses() {
return frame_poses;
}
void NfrMapper::computeRelPose(double& rel_error) {
rel_error = 0;
for (const RelPoseFactor& rpf : rel_pose_factors) {
const Sophus::SE3d& pose_i = frame_poses.at(rpf.t_i_ns).getPose();
const Sophus::SE3d& pose_j = frame_poses.at(rpf.t_j_ns).getPose();
Sophus::Vector6d res = relPoseError(rpf.T_i_j, pose_i, pose_j);
rel_error += res.transpose() * rpf.cov_inv * res;
}
}
void NfrMapper::computeRollPitch(double& roll_pitch_error) {
roll_pitch_error = 0;
for (const RollPitchFactor& rpf : roll_pitch_factors) {
const Sophus::SE3d& pose = frame_poses.at(rpf.t_ns).getPose();
Sophus::Vector2d res = rollPitchError(pose, rpf.R_w_i_meas);
roll_pitch_error += res.transpose() * rpf.cov_inv * res;
}
}
void NfrMapper::detect_keypoints() {
std::vector<int64_t> keys;
for (const auto& kv : img_data) {
if (frame_poses.count(kv.first) > 0) {
keys.emplace_back(kv.first);
}
}
auto t1 = std::chrono::high_resolution_clock::now();
tbb::parallel_for(
tbb::blocked_range<size_t>(0, keys.size()),
[&](const tbb::blocked_range<size_t>& r) {
for (size_t j = r.begin(); j != r.end(); ++j) {
auto kv = img_data.find(keys[j]);
if (kv->second.get()) {
for (size_t i = 0; i < kv->second->img_data.size(); i++) {
TimeCamId tcid(kv->first, i);
KeypointsData& kd = feature_corners[tcid];
if (!kv->second->img_data[i].img.get()) continue;
const Image<const uint16_t> img =
kv->second->img_data[i].img->Reinterpret<const uint16_t>();
detectKeypointsMapping(img, kd,
config.mapper_detection_num_points);
computeAngles(img, kd, true);
computeDescriptors(img, kd);
std::vector<bool> success;
calib.intrinsics[tcid.cam_id].unproject(kd.corners, kd.corners_3d,
success);
hash_bow_database->compute_bow(kd.corner_descriptors, kd.hashes,
kd.bow_vector);
hash_bow_database->add_to_database(tcid, kd.bow_vector);
// std::cout << "bow " << kd.bow_vector.size() << " desc "
// << kd.corner_descriptors.size() << std::endl;
}
}
}
});
auto t2 = std::chrono::high_resolution_clock::now();
auto elapsed1 =
std::chrono::duration_cast<std::chrono::microseconds>(t2 - t1);
std::cout << "Processed " << feature_corners.size() << " frames."
<< std::endl;
std::cout << "Detection time: " << elapsed1.count() * 1e-6 << "s."
<< std::endl;
}
void NfrMapper::match_stereo() {
// Pose of camera 1 (right) w.r.t camera 0 (left)
const Sophus::SE3d T_0_1 = calib.T_i_c[0].inverse() * calib.T_i_c[1];
// Essential matrix
Eigen::Matrix4d E;
computeEssential(T_0_1, E);
std::cout << "Matching " << img_data.size() << " stereo pairs..."
<< std::endl;
int num_matches = 0;
int num_inliers = 0;
for (const auto& kv : img_data) {
const TimeCamId tcid1(kv.first, 0), tcid2(kv.first, 1);
MatchData md;
md.T_i_j = T_0_1;
const KeypointsData& kd1 = feature_corners[tcid1];
const KeypointsData& kd2 = feature_corners[tcid2];
matchDescriptors(kd1.corner_descriptors, kd2.corner_descriptors, md.matches,
config.mapper_max_hamming_distance,
config.mapper_second_best_test_ratio);
num_matches += md.matches.size();
findInliersEssential(kd1, kd2, E, 1e-3, md);
if (md.inliers.size() > 16) {
num_inliers += md.inliers.size();
feature_matches[std::make_pair(tcid1, tcid2)] = md;
}
}
std::cout << "Matched " << img_data.size() << " stereo pairs with "
<< num_inliers << " inlier matches (" << num_matches << " total)."
<< std::endl;
}
void NfrMapper::match_all() {
std::vector<TimeCamId> keys;
std::unordered_map<TimeCamId, size_t> id_to_key_idx;
for (const auto& kv : feature_corners) {
id_to_key_idx[kv.first] = keys.size();
keys.push_back(kv.first);
}
auto t1 = std::chrono::high_resolution_clock::now();
struct match_pair {
size_t i;
size_t j;
double score;
};
tbb::concurrent_vector<match_pair> ids_to_match;
tbb::blocked_range<size_t> keys_range(0, keys.size());
auto compute_pairs = [&](const tbb::blocked_range<size_t>& r) {
for (size_t i = r.begin(); i != r.end(); ++i) {
const TimeCamId& tcid = keys[i];
const KeypointsData& kd = feature_corners.at(tcid);
std::vector<std::pair<TimeCamId, double>> results;
hash_bow_database->querry_database(kd.bow_vector,
config.mapper_num_frames_to_match,
results, &tcid.frame_id);
// std::cout << "Closest frames for " << tcid << ": ";
for (const auto& otcid_score : results) {
// std::cout << otcid_score.first << "(" << otcid_score.second << ")
// ";
if (otcid_score.first.frame_id != tcid.frame_id &&
otcid_score.second > config.mapper_frames_to_match_threshold) {
match_pair m;
m.i = i;
m.j = id_to_key_idx.at(otcid_score.first);
m.score = otcid_score.second;
ids_to_match.emplace_back(m);
}
}
// std::cout << std::endl;
}
};
tbb::parallel_for(keys_range, compute_pairs);
// compute_pairs(keys_range);
auto t2 = std::chrono::high_resolution_clock::now();
std::cout << "Matching " << ids_to_match.size() << " image pairs..."
<< std::endl;
std::atomic<int> total_matched = 0;
tbb::blocked_range<size_t> range(0, ids_to_match.size());
auto match_func = [&](const tbb::blocked_range<size_t>& r) {
int matched = 0;
for (size_t j = r.begin(); j != r.end(); ++j) {
const TimeCamId& id1 = keys[ids_to_match[j].i];
const TimeCamId& id2 = keys[ids_to_match[j].j];
const KeypointsData& f1 = feature_corners[id1];
const KeypointsData& f2 = feature_corners[id2];
MatchData md;
matchDescriptors(f1.corner_descriptors, f2.corner_descriptors, md.matches,
70, 1.2);
if (int(md.matches.size()) > config.mapper_min_matches) {
matched++;
findInliersRansac(f1, f2, config.mapper_ransac_threshold,
config.mapper_min_matches, md);
}
if (!md.inliers.empty()) feature_matches[std::make_pair(id1, id2)] = md;
}
total_matched += matched;
};
tbb::parallel_for(range, match_func);
// match_func(range);
auto t3 = std::chrono::high_resolution_clock::now();
auto elapsed1 =
std::chrono::duration_cast<std::chrono::microseconds>(t2 - t1);
auto elapsed2 =
std::chrono::duration_cast<std::chrono::microseconds>(t3 - t2);
//
int num_matches = 0;
int num_inliers = 0;
for (const auto& kv : feature_matches) {
num_matches += kv.second.matches.size();
num_inliers += kv.second.inliers.size();
}
std::cout << "Matched " << ids_to_match.size() << " image pairs with "
<< num_inliers << " inlier matches (" << num_matches << " total)."
<< std::endl;
std::cout << "DB query " << elapsed1.count() * 1e-6 << "s. matching "
<< elapsed2.count() * 1e-6
<< "s. Geometric verification attemts: " << total_matched << "."
<< std::endl;
}
void NfrMapper::build_tracks() {
TrackBuilder trackBuilder;
// Build: Efficient fusion of correspondences
trackBuilder.Build(feature_matches);
// Filter: Remove tracks that have conflict
trackBuilder.Filter(config.mapper_min_track_length);
// Export tree to usable data structure
trackBuilder.Export(feature_tracks);
// info
size_t inlier_match_count = 0;
for (const auto& it : feature_matches) {
inlier_match_count += it.second.inliers.size();
}
size_t total_track_obs_count = 0;
for (const auto& it : feature_tracks) {
total_track_obs_count += it.second.size();
}
std::cout << "Built " << feature_tracks.size() << " feature tracks from "
<< inlier_match_count << " matches. Average track length is "
<< total_track_obs_count / (double)feature_tracks.size() << "."
<< std::endl;
}
void NfrMapper::setup_opt() {
const double min_triang_distance2 = config.mapper_min_triangulation_dist *
config.mapper_min_triangulation_dist;
for (const auto& kv : feature_tracks) {
if (kv.second.size() < 2) continue;
// Take first observation as host
auto it = kv.second.begin();
TimeCamId tcid_h = it->first;
FeatureId feat_id_h = it->second;
Eigen::Vector2d pos_2d_h = feature_corners.at(tcid_h).corners[feat_id_h];
Eigen::Vector4d pos_3d_h;
calib.intrinsics[tcid_h.cam_id].unproject(pos_2d_h, pos_3d_h);
it++;
for (; it != kv.second.end(); it++) {
TimeCamId tcid_o = it->first;
FeatureId feat_id_o = it->second;
Eigen::Vector2d pos_2d_o = feature_corners.at(tcid_o).corners[feat_id_o];
Eigen::Vector4d pos_3d_o;
calib.intrinsics[tcid_o.cam_id].unproject(pos_2d_o, pos_3d_o);
Sophus::SE3d T_w_h = frame_poses.at(tcid_h.frame_id).getPose() *
calib.T_i_c[tcid_h.cam_id];
Sophus::SE3d T_w_o = frame_poses.at(tcid_o.frame_id).getPose() *
calib.T_i_c[tcid_o.cam_id];
Sophus::SE3d T_h_o = T_w_h.inverse() * T_w_o;
if (T_h_o.translation().squaredNorm() < min_triang_distance2) continue;
Eigen::Vector4d pos_3d =
triangulate(pos_3d_h.head<3>(), pos_3d_o.head<3>(), T_h_o);
if (!pos_3d.array().isFinite().all() || pos_3d[3] <= 0 || pos_3d[3] > 2.0)
continue;
Keypoint<Scalar> pos;
pos.host_kf_id = tcid_h;
pos.direction = StereographicParam<double>::project(pos_3d);
pos.inv_dist = pos_3d[3];
lmdb.addLandmark(kv.first, pos);
for (const auto& obs_kv : kv.second) {
KeypointObservation<Scalar> ko;
ko.kpt_id = kv.first;
ko.pos = feature_corners.at(obs_kv.first).corners[obs_kv.second];
lmdb.addObservation(obs_kv.first, ko);
// obs[tcid_h][obs_kv.first].emplace_back(ko);
}
break;
}
}
}
} // namespace basalt

View File

@@ -0,0 +1,808 @@
/**
BSD 3-Clause License
This file is part of the Basalt project.
https://gitlab.com/VladyslavUsenko/basalt.git
Copyright (c) 2019, Vladyslav Usenko and Nikolaus Demmel.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <basalt/vi_estimator/sc_ba_base.h>
#include <tbb/parallel_for.h>
#include <basalt/imu/preintegration.h>
#include <basalt/utils/ba_utils.h>
namespace basalt {
template <class Scalar_>
void ScBundleAdjustmentBase<Scalar_>::updatePoints(
const AbsOrderMap& aom, const RelLinData& rld, const VecX& inc,
LandmarkDatabase<Scalar>& lmdb, Scalar* l_diff) {
// We want to compute the model cost change. The model fuction is
//
// L(inc) = 0.5 r^T r + inc^T b + 0.5 inc^T H inc,
//
// and thus the expected decrease in cost for the computed increment is
//
// l_diff = L(0) - L(inc)
// = - inc^T b - 0.5 inc^T H inc.
//
// Here we have
//
// | incp | | bp | | Hpp Hpl |
// inc = | | b = | | H = | |
// | incl |, | bl |, | Hlp Hll |,
//
// and thus
//
// l_diff = - incp^T bp - incl^T bl -
// 0.5 incp^T Hpp incp - incp^T Hpl incl - 0.5 incl^T Hll incl
// = - sum_{p} (incp^T (bp + 0.5 Hpp incp)) -
// sum_{l} (incl^T (bl + 0.5 Hll incl +
// sum_{obs} (Hpl^T incp))),
//
// where sum_{p} sums over all (relative) poses, sum_{l} over all landmarks,
// and sum_{obs} over all observations of each landmark.
//
// Note: l_diff acts like an accumulator; we just add w/o initializing to 0.
VecX rel_inc;
rel_inc.setZero(rld.order.size() * POSE_SIZE);
for (size_t i = 0; i < rld.order.size(); i++) {
const TimeCamId& tcid_h = rld.order[i].first;
const TimeCamId& tcid_t = rld.order[i].second;
if (tcid_h.frame_id != tcid_t.frame_id) {
int abs_h_idx = aom.abs_order_map.at(tcid_h.frame_id).first;
int abs_t_idx = aom.abs_order_map.at(tcid_t.frame_id).first;
Eigen::Matrix<Scalar, POSE_SIZE, 1> inc_p =
rld.d_rel_d_h[i] * inc.template segment<POSE_SIZE>(abs_h_idx) +
rld.d_rel_d_t[i] * inc.template segment<POSE_SIZE>(abs_t_idx);
rel_inc.template segment<POSE_SIZE>(i * POSE_SIZE) = inc_p;
if (l_diff) {
// Note: inc_p is still negated b/c we solve H(-x) = b
const FrameRelLinData& frld = rld.Hpppl[i];
*l_diff -= -inc_p.dot((frld.bp - Scalar(0.5) * (frld.Hpp * inc_p)));
}
}
}
for (const auto& kv : rld.lm_to_obs) {
int lm_idx = kv.first;
const auto& lm_obs = kv.second;
Vec3 H_l_p_x;
H_l_p_x.setZero();
for (size_t k = 0; k < lm_obs.size(); k++) {
int rel_idx = lm_obs[k].first;
const FrameRelLinData& frld = rld.Hpppl.at(rel_idx);
const Eigen::Matrix<Scalar, POSE_SIZE, 3>& H_p_l_other =
frld.Hpl.at(lm_obs[k].second);
// Note: pose increment is still negated b/c we solve "H (-inc) = b"
H_l_p_x += H_p_l_other.transpose() *
rel_inc.template segment<POSE_SIZE>(rel_idx * POSE_SIZE);
}
// final negation of inc_l b/c we solve "H (-inc) = b"
Vec3 inc_l = -(rld.Hllinv.at(lm_idx) * (rld.bl.at(lm_idx) - H_l_p_x));
Keypoint<Scalar>& kpt = lmdb.getLandmark(lm_idx);
kpt.direction += inc_l.template head<2>();
kpt.inv_dist += inc_l[2];
kpt.inv_dist = std::max(Scalar(0), kpt.inv_dist);
if (l_diff) {
// Note: rel_inc and thus H_l_p_x are still negated b/c we solve H(-x) = b
*l_diff -= inc_l.transpose() *
(rld.bl.at(lm_idx) +
Scalar(0.5) * (rld.Hll.at(lm_idx) * inc_l) - H_l_p_x);
}
}
}
template <class Scalar_>
void ScBundleAdjustmentBase<Scalar_>::updatePointsAbs(
const AbsOrderMap& aom, const AbsLinData& ald, const VecX& inc,
LandmarkDatabase<Scalar>& lmdb, Scalar* l_diff) {
// We want to compute the model cost change. The model fuction is
//
// L(inc) = 0.5 r^T r + inc^T b + 0.5 inc^T H inc,
//
// and thus the expected decrease in cost for the computed increment is
//
// l_diff = L(0) - L(inc)
// = - inc^T b - 0.5 inc^T H inc.
//
// Here we have
//
// | incp | | bp | | Hpp Hpl |
// inc = | | b = | | H = | |
// | incl |, | bl |, | Hlp Hll |,
//
// and thus
//
// l_diff = - incp^T bp - incl^T bl -
// 0.5 incp^T Hpp incp - incp^T Hpl incl - 0.5 incl^T Hll incl
// = - sum_{p} (incp^T (bp + 0.5 Hpp incp)) -
// sum_{l} (incl^T (bl + 0.5 Hll incl +
// sum_{obs} (Hpl^T incp))),
//
// where sum_{p} sums over all (relative) poses, sum_{l} over all landmarks,
// and sum_{obs} over all observations of each landmark.
//
// Note: l_diff acts like an accumulator; we just add w/o initializing to 0.
if (l_diff) {
for (size_t i = 0; i < ald.order.size(); i++) {
const TimeCamId& tcid_h = ald.order[i].first;
const TimeCamId& tcid_t = ald.order[i].second;
int abs_h_idx = aom.abs_order_map.at(tcid_h.frame_id).first;
int abs_t_idx = aom.abs_order_map.at(tcid_t.frame_id).first;
auto inc_h = inc.template segment<POSE_SIZE>(abs_h_idx);
auto inc_t = inc.template segment<POSE_SIZE>(abs_t_idx);
// Note: inc_p is still negated b/c we solve H(-x) = b
const FrameAbsLinData& fald = ald.Hpppl[i];
*l_diff -= -inc_h.dot((fald.bph - Scalar(0.5) * (fald.Hphph * inc_h)) -
fald.Hphpt * inc_t) -
inc_t.dot((fald.bpt - Scalar(0.5) * (fald.Hptpt * inc_t)));
}
}
for (const auto& kv : ald.lm_to_obs) {
int lm_idx = kv.first;
const auto& lm_obs = kv.second;
Vec3 H_l_p_x;
H_l_p_x.setZero();
for (size_t k = 0; k < lm_obs.size(); k++) {
int rel_idx = lm_obs[k].first;
const TimeCamId& tcid_h = ald.order.at(rel_idx).first;
const TimeCamId& tcid_t = ald.order.at(rel_idx).second;
int abs_h_idx = aom.abs_order_map.at(tcid_h.frame_id).first;
int abs_t_idx = aom.abs_order_map.at(tcid_t.frame_id).first;
auto inc_h = inc.template segment<POSE_SIZE>(abs_h_idx);
auto inc_t = inc.template segment<POSE_SIZE>(abs_t_idx);
const FrameAbsLinData& fald = ald.Hpppl.at(rel_idx);
const Eigen::Matrix<Scalar, POSE_SIZE, 3>& H_ph_l_other =
fald.Hphl.at(lm_obs[k].second);
const Eigen::Matrix<Scalar, POSE_SIZE, 3>& H_pt_l_other =
fald.Hptl.at(lm_obs[k].second);
// Note: pose increment is still negated b/c we solve "H (-inc) = b"
H_l_p_x += H_ph_l_other.transpose() * inc_h;
H_l_p_x += H_pt_l_other.transpose() * inc_t;
}
// final negation of inc_l b/c we solve "H (-inc) = b"
Vec3 inc_l = -(ald.Hllinv.at(lm_idx) * (ald.bl.at(lm_idx) - H_l_p_x));
Keypoint<Scalar>& kpt = lmdb.getLandmark(lm_idx);
kpt.direction += inc_l.template head<2>();
kpt.inv_dist += inc_l[2];
kpt.inv_dist = std::max(Scalar(0), kpt.inv_dist);
if (l_diff) {
// Note: rel_inc and thus H_l_p_x are still negated b/c we solve H(-x) = b
*l_diff -= inc_l.transpose() *
(ald.bl.at(lm_idx) +
Scalar(0.5) * (ald.Hll.at(lm_idx) * inc_l) - H_l_p_x);
}
}
}
template <class Scalar_>
void ScBundleAdjustmentBase<Scalar_>::linearizeHelperStatic(
Eigen::aligned_vector<RelLinData>& rld_vec,
const std::unordered_map<
TimeCamId, std::map<TimeCamId, std::set<KeypointId>>>& obs_to_lin,
const BundleAdjustmentBase<Scalar>* ba_base, Scalar& error) {
error = 0;
rld_vec.clear();
std::vector<TimeCamId> obs_tcid_vec;
for (const auto& kv : obs_to_lin) {
obs_tcid_vec.emplace_back(kv.first);
rld_vec.emplace_back(ba_base->lmdb.numLandmarks(), kv.second.size());
}
tbb::parallel_for(
tbb::blocked_range<size_t>(0, obs_tcid_vec.size()),
[&](const tbb::blocked_range<size_t>& range) {
for (size_t r = range.begin(); r != range.end(); ++r) {
auto kv = obs_to_lin.find(obs_tcid_vec[r]);
RelLinData& rld = rld_vec[r];
rld.error = Scalar(0);
const TimeCamId& tcid_h = kv->first;
for (const auto& obs_kv : kv->second) {
const TimeCamId& tcid_t = obs_kv.first;
rld.order.emplace_back(std::make_pair(tcid_h, tcid_t));
Mat4 T_t_h;
if (tcid_h != tcid_t) {
// target and host are not the same
PoseStateWithLin state_h =
ba_base->getPoseStateWithLin(tcid_h.frame_id);
PoseStateWithLin state_t =
ba_base->getPoseStateWithLin(tcid_t.frame_id);
Mat6 d_rel_d_h, d_rel_d_t;
SE3 T_t_h_sophus = computeRelPose(
state_h.getPoseLin(), ba_base->calib.T_i_c[tcid_h.cam_id],
state_t.getPoseLin(), ba_base->calib.T_i_c[tcid_t.cam_id],
&d_rel_d_h, &d_rel_d_t);
rld.d_rel_d_h.emplace_back(d_rel_d_h);
rld.d_rel_d_t.emplace_back(d_rel_d_t);
if (state_h.isLinearized() || state_t.isLinearized()) {
T_t_h_sophus = computeRelPose(
state_h.getPose(), ba_base->calib.T_i_c[tcid_h.cam_id],
state_t.getPose(), ba_base->calib.T_i_c[tcid_t.cam_id]);
}
T_t_h = T_t_h_sophus.matrix();
} else {
T_t_h.setIdentity();
rld.d_rel_d_h.emplace_back(Mat6::Zero());
rld.d_rel_d_t.emplace_back(Mat6::Zero());
}
FrameRelLinData frld;
std::visit(
[&](const auto& cam) {
for (KeypointId kpt_id : obs_kv.second) {
const Keypoint<Scalar>& kpt_pos =
ba_base->lmdb.getLandmark(kpt_id);
const Vec2& kpt_obs = kpt_pos.obs.at(tcid_t);
Vec2 res;
Eigen::Matrix<Scalar, 2, POSE_SIZE> d_res_d_xi;
Eigen::Matrix<Scalar, 2, 3> d_res_d_p;
bool valid = linearizePoint(kpt_obs, kpt_pos, T_t_h, cam,
res, &d_res_d_xi, &d_res_d_p);
if (valid) {
Scalar e = res.norm();
Scalar huber_weight = e < ba_base->huber_thresh
? Scalar(1.0)
: ba_base->huber_thresh / e;
Scalar obs_weight = huber_weight / (ba_base->obs_std_dev *
ba_base->obs_std_dev);
rld.error += Scalar(0.5) * (2 - huber_weight) *
obs_weight * res.transpose() * res;
if (rld.Hll.count(kpt_id) == 0) {
rld.Hll[kpt_id].setZero();
rld.bl[kpt_id].setZero();
}
rld.Hll[kpt_id] +=
obs_weight * d_res_d_p.transpose() * d_res_d_p;
rld.bl[kpt_id] +=
obs_weight * d_res_d_p.transpose() * res;
frld.Hpp +=
obs_weight * d_res_d_xi.transpose() * d_res_d_xi;
frld.bp += obs_weight * d_res_d_xi.transpose() * res;
frld.Hpl.emplace_back(obs_weight *
d_res_d_xi.transpose() * d_res_d_p);
frld.lm_id.emplace_back(kpt_id);
rld.lm_to_obs[kpt_id].emplace_back(rld.Hpppl.size(),
frld.lm_id.size() - 1);
}
}
},
ba_base->calib.intrinsics[tcid_t.cam_id].variant);
rld.Hpppl.emplace_back(frld);
}
rld.invert_keypoint_hessians();
}
});
for (const auto& rld : rld_vec) error += rld.error;
}
template <class Scalar_>
void ScBundleAdjustmentBase<Scalar_>::linearizeHelperAbsStatic(
Eigen::aligned_vector<AbsLinData>& ald_vec,
const std::unordered_map<
TimeCamId, std::map<TimeCamId, std::set<KeypointId>>>& obs_to_lin,
const BundleAdjustmentBase<Scalar>* ba_base, Scalar& error) {
error = 0;
ald_vec.clear();
std::vector<TimeCamId> obs_tcid_vec;
for (const auto& kv : obs_to_lin) {
obs_tcid_vec.emplace_back(kv.first);
ald_vec.emplace_back(ba_base->lmdb.numLandmarks(), kv.second.size());
}
tbb::parallel_for(
tbb::blocked_range<size_t>(0, obs_tcid_vec.size()),
[&](const tbb::blocked_range<size_t>& range) {
for (size_t r = range.begin(); r != range.end(); ++r) {
auto kv = obs_to_lin.find(obs_tcid_vec[r]);
AbsLinData& ald = ald_vec[r];
ald.error = Scalar(0);
const TimeCamId& tcid_h = kv->first;
for (const auto& obs_kv : kv->second) {
const TimeCamId& tcid_t = obs_kv.first;
ald.order.emplace_back(std::make_pair(tcid_h, tcid_t));
Mat4 T_t_h;
Mat6 d_rel_d_h, d_rel_d_t;
if (tcid_h != tcid_t) {
// target and host are not the same
PoseStateWithLin state_h =
ba_base->getPoseStateWithLin(tcid_h.frame_id);
PoseStateWithLin state_t =
ba_base->getPoseStateWithLin(tcid_t.frame_id);
SE3 T_t_h_sophus = computeRelPose(
state_h.getPoseLin(), ba_base->calib.T_i_c[tcid_h.cam_id],
state_t.getPoseLin(), ba_base->calib.T_i_c[tcid_t.cam_id],
&d_rel_d_h, &d_rel_d_t);
if (state_h.isLinearized() || state_t.isLinearized()) {
T_t_h_sophus = computeRelPose(
state_h.getPose(), ba_base->calib.T_i_c[tcid_h.cam_id],
state_t.getPose(), ba_base->calib.T_i_c[tcid_t.cam_id]);
}
T_t_h = T_t_h_sophus.matrix();
} else {
T_t_h.setIdentity();
d_rel_d_h.setZero();
d_rel_d_t.setZero();
}
FrameAbsLinData fald;
std::visit(
[&](const auto& cam) {
for (KeypointId kpt_id : obs_kv.second) {
const Keypoint<Scalar>& kpt_pos =
ba_base->lmdb.getLandmark(kpt_id);
const Vec2& kpt_obs = kpt_pos.obs.at(tcid_t);
Vec2 res;
Eigen::Matrix<Scalar, 2, POSE_SIZE> d_res_d_xi_h,
d_res_d_xi_t;
Eigen::Matrix<Scalar, 2, 3> d_res_d_p;
bool valid;
{
Eigen::Matrix<Scalar, 2, POSE_SIZE> d_res_d_xi;
valid = linearizePoint(kpt_obs, kpt_pos, T_t_h, cam, res,
&d_res_d_xi, &d_res_d_p);
d_res_d_xi_h = d_res_d_xi * d_rel_d_h;
d_res_d_xi_t = d_res_d_xi * d_rel_d_t;
}
if (valid) {
Scalar e = res.norm();
Scalar huber_weight = e < ba_base->huber_thresh
? Scalar(1.0)
: ba_base->huber_thresh / e;
Scalar obs_weight = huber_weight / (ba_base->obs_std_dev *
ba_base->obs_std_dev);
ald.error += Scalar(0.5) * (2 - huber_weight) *
obs_weight * res.transpose() * res;
if (ald.Hll.count(kpt_id) == 0) {
ald.Hll[kpt_id].setZero();
ald.bl[kpt_id].setZero();
}
ald.Hll[kpt_id] +=
obs_weight * d_res_d_p.transpose() * d_res_d_p;
ald.bl[kpt_id] +=
obs_weight * d_res_d_p.transpose() * res;
fald.Hphph +=
obs_weight * d_res_d_xi_h.transpose() * d_res_d_xi_h;
fald.Hptpt +=
obs_weight * d_res_d_xi_t.transpose() * d_res_d_xi_t;
fald.Hphpt +=
obs_weight * d_res_d_xi_h.transpose() * d_res_d_xi_t;
fald.bph += obs_weight * d_res_d_xi_h.transpose() * res;
fald.bpt += obs_weight * d_res_d_xi_t.transpose() * res;
fald.Hphl.emplace_back(
obs_weight * d_res_d_xi_h.transpose() * d_res_d_p);
fald.Hptl.emplace_back(
obs_weight * d_res_d_xi_t.transpose() * d_res_d_p);
fald.lm_id.emplace_back(kpt_id);
ald.lm_to_obs[kpt_id].emplace_back(ald.Hpppl.size(),
fald.lm_id.size() - 1);
}
}
},
ba_base->calib.intrinsics[tcid_t.cam_id].variant);
ald.Hpppl.emplace_back(fald);
}
ald.invert_keypoint_hessians();
}
});
for (const auto& rld : ald_vec) error += rld.error;
}
template <class Scalar_>
void ScBundleAdjustmentBase<Scalar_>::linearizeRel(const RelLinData& rld,
MatX& H, VecX& b) {
// std::cout << "linearizeRel: KF " << frame_states.size() << " obs "
// << obs.size() << std::endl;
// Do schur complement
size_t msize = rld.order.size();
H.setZero(POSE_SIZE * msize, POSE_SIZE * msize);
b.setZero(POSE_SIZE * msize);
for (size_t i = 0; i < rld.order.size(); i++) {
const FrameRelLinData& frld = rld.Hpppl.at(i);
H.template block<POSE_SIZE, POSE_SIZE>(POSE_SIZE * i, POSE_SIZE * i) +=
frld.Hpp;
b.template segment<POSE_SIZE>(POSE_SIZE * i) += frld.bp;
for (size_t j = 0; j < frld.lm_id.size(); j++) {
Eigen::Matrix<Scalar, POSE_SIZE, 3> H_pl_H_ll_inv;
int lm_id = frld.lm_id[j];
H_pl_H_ll_inv = frld.Hpl[j] * rld.Hllinv.at(lm_id);
b.template segment<POSE_SIZE>(POSE_SIZE * i) -=
H_pl_H_ll_inv * rld.bl.at(lm_id);
const auto& other_obs = rld.lm_to_obs.at(lm_id);
for (size_t k = 0; k < other_obs.size(); k++) {
const FrameRelLinData& frld_other = rld.Hpppl.at(other_obs[k].first);
int other_i = other_obs[k].first;
Eigen::Matrix<Scalar, 3, POSE_SIZE> H_l_p_other =
frld_other.Hpl[other_obs[k].second].transpose();
H.template block<POSE_SIZE, POSE_SIZE>(
POSE_SIZE * i, POSE_SIZE * other_i) -= H_pl_H_ll_inv * H_l_p_other;
}
}
}
}
template <class Scalar_>
Eigen::VectorXd ScBundleAdjustmentBase<Scalar_>::checkNullspace(
const MatX& H, const VecX& b, const AbsOrderMap& order,
const Eigen::aligned_map<int64_t, PoseVelBiasStateWithLin<Scalar>>&
frame_states,
const Eigen::aligned_map<int64_t, PoseStateWithLin<Scalar>>& frame_poses,
bool verbose) {
using Vec3d = Eigen::Vector3d;
using VecXd = Eigen::VectorXd;
using Mat3d = Eigen::Matrix3d;
using MatXd = Eigen::MatrixXd;
using SO3d = Sophus::SO3d;
BASALT_ASSERT(size_t(H.cols()) == order.total_size);
size_t marg_size = order.total_size;
// Idea: We construct increments that we know should lie in the null-space of
// the prior from marginalized observations (except for the initial pose prior
// we set at initialization) --> shift global translations (x,y,z separately),
// or global rotations (r,p,y separately); for VIO only yaw rotation shift is
// in nullspace. Compared to a random increment, we expect them to stay small
// (at initial level of the initial pose prior). If they increase over time,
// we accumulate spurious information on unobservable degrees of freedom.
//
// Poses are cam-to-world and we use left-increment in optimization, so
// perturbations are in world frame. Hence translational increments are
// independent. For rotational increments we also need to rotate translations
// and velocities, both of which are expressed in world frame. For
// translations, we move the center of rotation to the camera center centroid
// for better numerics.
VecXd inc_x, inc_y, inc_z, inc_roll, inc_pitch, inc_yaw;
inc_x.setZero(marg_size);
inc_y.setZero(marg_size);
inc_z.setZero(marg_size);
inc_roll.setZero(marg_size);
inc_pitch.setZero(marg_size);
inc_yaw.setZero(marg_size);
int num_trans = 0;
Vec3d mean_trans;
mean_trans.setZero();
// Compute mean translation
for (const auto& kv : order.abs_order_map) {
Vec3d trans;
if (kv.second.second == POSE_SIZE) {
mean_trans += frame_poses.at(kv.first)
.getPoseLin()
.translation()
.template cast<double>();
num_trans++;
} else if (kv.second.second == POSE_VEL_BIAS_SIZE) {
mean_trans += frame_states.at(kv.first)
.getStateLin()
.T_w_i.translation()
.template cast<double>();
num_trans++;
} else {
std::cerr << "Unknown size of the state: " << kv.second.second
<< std::endl;
std::abort();
}
}
mean_trans /= num_trans;
double eps = 0.01;
// Compute nullspace increments
for (const auto& kv : order.abs_order_map) {
inc_x(kv.second.first + 0) = eps;
inc_y(kv.second.first + 1) = eps;
inc_z(kv.second.first + 2) = eps;
inc_roll(kv.second.first + 3) = eps;
inc_pitch(kv.second.first + 4) = eps;
inc_yaw(kv.second.first + 5) = eps;
Vec3d trans;
if (kv.second.second == POSE_SIZE) {
trans = frame_poses.at(kv.first)
.getPoseLin()
.translation()
.template cast<double>();
} else if (kv.second.second == POSE_VEL_BIAS_SIZE) {
trans = frame_states.at(kv.first)
.getStateLin()
.T_w_i.translation()
.template cast<double>();
} else {
BASALT_ASSERT(false);
}
// combine rotation with global translation to make it rotation around
// translation centroid, not around origin (better numerics). Note that
// velocities are not affected by global translation.
trans -= mean_trans;
// Jacobian of translation w.r.t. the rotation increment (one column each
// for the 3 different increments)
Mat3d J = -SO3d::hat(trans);
J *= eps;
inc_roll.template segment<3>(kv.second.first) = J.col(0);
inc_pitch.template segment<3>(kv.second.first) = J.col(1);
inc_yaw.template segment<3>(kv.second.first) = J.col(2);
if (kv.second.second == POSE_VEL_BIAS_SIZE) {
Vec3d vel = frame_states.at(kv.first)
.getStateLin()
.vel_w_i.template cast<double>();
// Jacobian of velocity w.r.t. the rotation increment (one column each
// for the 3 different increments)
Mat3d J_vel = -SO3d::hat(vel);
J_vel *= eps;
inc_roll.template segment<3>(kv.second.first + POSE_SIZE) = J_vel.col(0);
inc_pitch.template segment<3>(kv.second.first + POSE_SIZE) = J_vel.col(1);
inc_yaw.template segment<3>(kv.second.first + POSE_SIZE) = J_vel.col(2);
}
}
inc_x.normalize();
inc_y.normalize();
inc_z.normalize();
inc_roll.normalize();
inc_pitch.normalize();
inc_yaw.normalize();
// std::cout << "inc_x " << inc_x.transpose() << std::endl;
// std::cout << "inc_y " << inc_y.transpose() << std::endl;
// std::cout << "inc_z " << inc_z.transpose() << std::endl;
// std::cout << "inc_yaw " << inc_yaw.transpose() << std::endl;
VecXd inc_random;
inc_random.setRandom(marg_size);
inc_random.normalize();
MatXd H_d = H.template cast<double>();
VecXd b_d = b.template cast<double>();
VecXd xHx(7);
VecXd xb(7);
xHx[0] = inc_x.transpose() * H_d * inc_x;
xHx[1] = inc_y.transpose() * H_d * inc_y;
xHx[2] = inc_z.transpose() * H_d * inc_z;
xHx[3] = inc_roll.transpose() * H_d * inc_roll;
xHx[4] = inc_pitch.transpose() * H_d * inc_pitch;
xHx[5] = inc_yaw.transpose() * H_d * inc_yaw;
xHx[6] = inc_random.transpose() * H_d * inc_random;
// b == J^T r, so the increments should also lie in left-nullspace of b
xb[0] = inc_x.transpose() * b_d;
xb[1] = inc_y.transpose() * b_d;
xb[2] = inc_z.transpose() * b_d;
xb[3] = inc_roll.transpose() * b_d;
xb[4] = inc_pitch.transpose() * b_d;
xb[5] = inc_yaw.transpose() * b_d;
xb[6] = inc_random.transpose() * b_d;
if (verbose) {
std::cout << "nullspace x_trans: " << xHx[0] << " + " << xb[0] << std::endl;
std::cout << "nullspace y_trans: " << xHx[1] << " + " << xb[1] << std::endl;
std::cout << "nullspace z_trans: " << xHx[2] << " + " << xb[2] << std::endl;
std::cout << "nullspace roll : " << xHx[3] << " + " << xb[3] << std::endl;
std::cout << "nullspace pitch : " << xHx[4] << " + " << xb[4] << std::endl;
std::cout << "nullspace yaw : " << xHx[5] << " + " << xb[5] << std::endl;
std::cout << "nullspace random : " << xHx[6] << " + " << xb[6] << std::endl;
}
return xHx + xb;
}
template <class Scalar_>
Eigen::VectorXd ScBundleAdjustmentBase<Scalar_>::checkEigenvalues(
const MatX& H, bool verbose) {
// For EV, we use SelfAdjointEigenSolver to avoid getting (numerically)
// complex eigenvalues.
// We do this computation in double precision to avoid any inaccuracies that
// come from the squaring.
Eigen::SelfAdjointEigenSolver<Eigen::MatrixXd> eigensolver(
H.template cast<double>());
if (eigensolver.info() != Eigen::Success) {
BASALT_LOG_FATAL("eigen solver failed");
}
if (verbose) {
std::cout << "EV:\n" << eigensolver.eigenvalues() << std::endl;
}
return eigensolver.eigenvalues();
}
template <class Scalar_>
void ScBundleAdjustmentBase<Scalar_>::computeImuError(
const AbsOrderMap& aom, Scalar& imu_error, Scalar& bg_error,
Scalar& ba_error,
const Eigen::aligned_map<int64_t, PoseVelBiasStateWithLin<Scalar>>& states,
const Eigen::aligned_map<int64_t, IntegratedImuMeasurement<Scalar>>&
imu_meas,
const Vec3& gyro_bias_weight, const Vec3& accel_bias_weight,
const Vec3& g) {
imu_error = 0;
bg_error = 0;
ba_error = 0;
for (const auto& kv : imu_meas) {
if (kv.second.get_dt_ns() != 0) {
int64_t start_t = kv.second.get_start_t_ns();
int64_t end_t = kv.second.get_start_t_ns() + kv.second.get_dt_ns();
if (aom.abs_order_map.count(start_t) == 0 ||
aom.abs_order_map.count(end_t) == 0)
continue;
PoseVelBiasStateWithLin<Scalar> start_state = states.at(start_t);
PoseVelBiasStateWithLin<Scalar> end_state = states.at(end_t);
const typename PoseVelState<Scalar>::VecN res = kv.second.residual(
start_state.getState(), g, end_state.getState(),
start_state.getState().bias_gyro, start_state.getState().bias_accel);
// std::cout << "res: (" << start_t << "," << end_t << ") "
// << res.transpose() << std::endl;
// std::cerr << "cov_inv:\n" << kv.second.get_cov_inv() <<
// std::endl;
imu_error +=
Scalar(0.5) * res.transpose() * kv.second.get_cov_inv() * res;
Scalar dt = kv.second.get_dt_ns() * Scalar(1e-9);
{
Vec3 gyro_bias_weight_dt = gyro_bias_weight / dt;
Vec3 res_bg =
start_state.getState().bias_gyro - end_state.getState().bias_gyro;
bg_error += Scalar(0.5) * res_bg.transpose() *
gyro_bias_weight_dt.asDiagonal() * res_bg;
}
{
Vec3 accel_bias_weight_dt = accel_bias_weight / dt;
Vec3 res_ba =
start_state.getState().bias_accel - end_state.getState().bias_accel;
ba_error += Scalar(0.5) * res_ba.transpose() *
accel_bias_weight_dt.asDiagonal() * res_ba;
}
}
}
}
// //////////////////////////////////////////////////////////////////
// instatiate templates
// Note: double specialization is unconditional, b/c NfrMapper depends on it.
//#ifdef BASALT_INSTANTIATIONS_DOUBLE
template class ScBundleAdjustmentBase<double>;
//#endif
#ifdef BASALT_INSTANTIATIONS_FLOAT
template class ScBundleAdjustmentBase<float>;
#endif
} // namespace basalt

View File

@@ -0,0 +1,270 @@
/**
BSD 3-Clause License
This file is part of the Basalt project.
https://gitlab.com/VladyslavUsenko/basalt.git
Copyright (c) 2019, Vladyslav Usenko and Nikolaus Demmel.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <basalt/vi_estimator/sqrt_ba_base.h>
#include <tbb/parallel_for.h>
namespace basalt {
template <class Scalar_>
Eigen::VectorXd SqrtBundleAdjustmentBase<Scalar_>::checkNullspace(
const MargLinData<Scalar_>& mld,
const Eigen::aligned_map<int64_t, PoseVelBiasStateWithLin<Scalar>>&
frame_states,
const Eigen::aligned_map<int64_t, PoseStateWithLin<Scalar>>& frame_poses,
bool verbose) {
using Vec3d = Eigen::Vector3d;
using VecXd = Eigen::VectorXd;
using Mat3d = Eigen::Matrix3d;
using MatXd = Eigen::MatrixXd;
using SO3d = Sophus::SO3d;
BASALT_ASSERT_STREAM(size_t(mld.H.cols()) == mld.order.total_size,
mld.H.cols() << " " << mld.order.total_size);
size_t marg_size = mld.order.total_size;
// Idea: We construct increments that we know should lie in the null-space of
// the prior from marginalized observations (except for the initial pose prior
// we set at initialization) --> shift global translations (x,y,z separately),
// or global rotations (r,p,y separately); for VIO only yaw rotation shift is
// in nullspace. Compared to a random increment, we expect them to stay small
// (at initial level of the initial pose prior). If they increase over time,
// we accumulate spurious information on unobservable degrees of freedom.
//
// Poses are cam-to-world and we use left-increment in optimization, so
// perturbations are in world frame. Hence translational increments are
// independent. For rotational increments we also need to rotate translations
// and velocities, both of which are expressed in world frame. For
// translations, we move the center of rotation to the camera center centroid
// for better numerics.
VecXd inc_x, inc_y, inc_z, inc_roll, inc_pitch, inc_yaw;
inc_x.setZero(marg_size);
inc_y.setZero(marg_size);
inc_z.setZero(marg_size);
inc_roll.setZero(marg_size);
inc_pitch.setZero(marg_size);
inc_yaw.setZero(marg_size);
int num_trans = 0;
Vec3d mean_trans;
mean_trans.setZero();
// Compute mean translation
for (const auto& kv : mld.order.abs_order_map) {
Vec3d trans;
if (kv.second.second == POSE_SIZE) {
mean_trans += frame_poses.at(kv.first)
.getPoseLin()
.translation()
.template cast<double>();
num_trans++;
} else if (kv.second.second == POSE_VEL_BIAS_SIZE) {
mean_trans += frame_states.at(kv.first)
.getStateLin()
.T_w_i.translation()
.template cast<double>();
num_trans++;
} else {
std::cerr << "Unknown size of the state: " << kv.second.second
<< std::endl;
std::abort();
}
}
mean_trans /= num_trans;
double eps = 0.01;
// Compute nullspace increments
for (const auto& kv : mld.order.abs_order_map) {
inc_x(kv.second.first + 0) = eps;
inc_y(kv.second.first + 1) = eps;
inc_z(kv.second.first + 2) = eps;
inc_roll(kv.second.first + 3) = eps;
inc_pitch(kv.second.first + 4) = eps;
inc_yaw(kv.second.first + 5) = eps;
Vec3d trans;
if (kv.second.second == POSE_SIZE) {
trans = frame_poses.at(kv.first)
.getPoseLin()
.translation()
.template cast<double>();
} else if (kv.second.second == POSE_VEL_BIAS_SIZE) {
trans = frame_states.at(kv.first)
.getStateLin()
.T_w_i.translation()
.template cast<double>();
} else {
BASALT_ASSERT(false);
}
// combine rotation with global translation to make it rotation around
// translation centroid, not around origin (better numerics). Note that
// velocities are not affected by global translation.
trans -= mean_trans;
// Jacobian of translation w.r.t. the rotation increment (one column each
// for the 3 different increments)
Mat3d J = -SO3d::hat(trans);
J *= eps;
inc_roll.template segment<3>(kv.second.first) = J.col(0);
inc_pitch.template segment<3>(kv.second.first) = J.col(1);
inc_yaw.template segment<3>(kv.second.first) = J.col(2);
if (kv.second.second == POSE_VEL_BIAS_SIZE) {
Vec3d vel = frame_states.at(kv.first)
.getStateLin()
.vel_w_i.template cast<double>();
// Jacobian of velocity w.r.t. the rotation increment (one column each
// for the 3 different increments)
Mat3d J_vel = -SO3d::hat(vel);
J_vel *= eps;
inc_roll.template segment<3>(kv.second.first + POSE_SIZE) = J_vel.col(0);
inc_pitch.template segment<3>(kv.second.first + POSE_SIZE) = J_vel.col(1);
inc_yaw.template segment<3>(kv.second.first + POSE_SIZE) = J_vel.col(2);
}
}
inc_x.normalize();
inc_y.normalize();
inc_z.normalize();
inc_roll.normalize();
inc_pitch.normalize();
inc_yaw.normalize();
// std::cout << "inc_x " << inc_x.transpose() << std::endl;
// std::cout << "inc_y " << inc_y.transpose() << std::endl;
// std::cout << "inc_z " << inc_z.transpose() << std::endl;
// std::cout << "inc_yaw " << inc_yaw.transpose() << std::endl;
VecXd inc_random;
inc_random.setRandom(marg_size);
inc_random.normalize();
MatXd H_d;
VecXd b_d;
if (mld.is_sqrt) {
MatXd H_sqrt_d = mld.H.template cast<double>();
VecXd b_sqrt_d = mld.b.template cast<double>();
H_d = H_sqrt_d.transpose() * H_sqrt_d;
b_d = H_sqrt_d.transpose() * b_sqrt_d;
} else {
H_d = mld.H.template cast<double>();
b_d = mld.b.template cast<double>();
}
VecXd xHx(7);
VecXd xb(7);
xHx[0] = inc_x.transpose() * H_d * inc_x;
xHx[1] = inc_y.transpose() * H_d * inc_y;
xHx[2] = inc_z.transpose() * H_d * inc_z;
xHx[3] = inc_roll.transpose() * H_d * inc_roll;
xHx[4] = inc_pitch.transpose() * H_d * inc_pitch;
xHx[5] = inc_yaw.transpose() * H_d * inc_yaw;
xHx[6] = inc_random.transpose() * H_d * inc_random;
// b == J^T r, so the increments should also lie in left-nullspace of b
xb[0] = inc_x.transpose() * b_d;
xb[1] = inc_y.transpose() * b_d;
xb[2] = inc_z.transpose() * b_d;
xb[3] = inc_roll.transpose() * b_d;
xb[4] = inc_pitch.transpose() * b_d;
xb[5] = inc_yaw.transpose() * b_d;
xb[6] = inc_random.transpose() * b_d;
if (verbose) {
std::cout << "nullspace x_trans: " << xHx[0] << " + " << xb[0] << std::endl;
std::cout << "nullspace y_trans: " << xHx[1] << " + " << xb[1] << std::endl;
std::cout << "nullspace z_trans: " << xHx[2] << " + " << xb[2] << std::endl;
std::cout << "nullspace roll : " << xHx[3] << " + " << xb[3] << std::endl;
std::cout << "nullspace pitch : " << xHx[4] << " + " << xb[4] << std::endl;
std::cout << "nullspace yaw : " << xHx[5] << " + " << xb[5] << std::endl;
std::cout << "nullspace random : " << xHx[6] << " + " << xb[6] << std::endl;
}
return xHx + xb;
}
template <class Scalar_>
Eigen::VectorXd SqrtBundleAdjustmentBase<Scalar_>::checkEigenvalues(
const MargLinData<Scalar_>& mld, bool verbose) {
// Check EV of J^T J explicitly instead of doing SVD on J to easily notice if
// we have negative EVs (numerically). We do this computation in double
// precision to avoid any inaccuracies that come from the squaring.
// For EV, we use SelfAdjointEigenSolver to avoid getting (numerically)
// complex eigenvalues.
Eigen::MatrixXd H;
if (mld.is_sqrt) {
Eigen::MatrixXd sqrt_H_double = mld.H.template cast<double>();
H = sqrt_H_double.transpose() * sqrt_H_double;
} else {
H = mld.H.template cast<double>();
}
Eigen::SelfAdjointEigenSolver<Eigen::MatrixXd> eigensolver(H);
if (eigensolver.info() != Eigen::Success) {
BASALT_LOG_FATAL("eigen solver failed");
}
if (verbose) {
std::cout << "EV:\n" << eigensolver.eigenvalues() << std::endl;
}
return eigensolver.eigenvalues();
}
// //////////////////////////////////////////////////////////////////
// instatiate templates
#ifdef BASALT_INSTANTIATIONS_DOUBLE
template class SqrtBundleAdjustmentBase<double>;
#endif
#ifdef BASALT_INSTANTIATIONS_FLOAT
template class SqrtBundleAdjustmentBase<float>;
#endif
} // namespace basalt

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,176 @@
/**
BSD 3-Clause License
This file is part of the Basalt project.
https://gitlab.com/VladyslavUsenko/basalt.git
Copyright (c) 2019, Vladyslav Usenko and Nikolaus Demmel.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <basalt/vi_estimator/vio_estimator.h>
#include <basalt/vi_estimator/sqrt_keypoint_vio.h>
#include <basalt/vi_estimator/sqrt_keypoint_vo.h>
namespace basalt {
namespace {
template <class Scalar>
VioEstimatorBase::Ptr factory_helper(const VioConfig& config,
const Calibration<double>& cam,
const Eigen::Vector3d& g, bool use_imu) {
VioEstimatorBase::Ptr res;
if (use_imu) {
res.reset(new SqrtKeypointVioEstimator<Scalar>(g, cam, config));
} else {
res.reset(new SqrtKeypointVoEstimator<Scalar>(cam, config));
}
return res;
}
} // namespace
VioEstimatorBase::Ptr VioEstimatorFactory::getVioEstimator(
const VioConfig& config, const Calibration<double>& cam,
const Eigen::Vector3d& g, bool use_imu, bool use_double) {
if (use_double) {
#ifdef BASALT_INSTANTIATIONS_DOUBLE
return factory_helper<double>(config, cam, g, use_imu);
#else
BASALT_LOG_FATAL("Compiled without double support.");
#endif
} else {
#ifdef BASALT_INSTANTIATIONS_FLOAT
return factory_helper<float>(config, cam, g, use_imu);
#else
BASALT_LOG_FATAL("Compiled without float support.");
#endif
}
}
double alignSVD(const std::vector<int64_t>& filter_t_ns,
const Eigen::aligned_vector<Eigen::Vector3d>& filter_t_w_i,
const std::vector<int64_t>& gt_t_ns,
Eigen::aligned_vector<Eigen::Vector3d>& gt_t_w_i) {
Eigen::aligned_vector<Eigen::Vector3d> est_associations;
Eigen::aligned_vector<Eigen::Vector3d> gt_associations;
for (size_t i = 0; i < filter_t_w_i.size(); i++) {
int64_t t_ns = filter_t_ns[i];
size_t j;
for (j = 0; j < gt_t_ns.size(); j++) {
if (gt_t_ns.at(j) > t_ns) break;
}
j--;
if (j >= gt_t_ns.size() - 1) {
continue;
}
double dt_ns = t_ns - gt_t_ns.at(j);
double int_t_ns = gt_t_ns.at(j + 1) - gt_t_ns.at(j);
BASALT_ASSERT_STREAM(dt_ns >= 0, "dt_ns " << dt_ns);
BASALT_ASSERT_STREAM(int_t_ns > 0, "int_t_ns " << int_t_ns);
// Skip if the interval between gt larger than 100ms
if (int_t_ns > 1.1e8) continue;
double ratio = dt_ns / int_t_ns;
BASALT_ASSERT(ratio >= 0);
BASALT_ASSERT(ratio < 1);
Eigen::Vector3d gt = (1 - ratio) * gt_t_w_i[j] + ratio * gt_t_w_i[j + 1];
gt_associations.emplace_back(gt);
est_associations.emplace_back(filter_t_w_i[i]);
}
int num_kfs = est_associations.size();
Eigen::Matrix<double, 3, Eigen::Dynamic> gt, est;
gt.setZero(3, num_kfs);
est.setZero(3, num_kfs);
for (size_t i = 0; i < est_associations.size(); i++) {
gt.col(i) = gt_associations[i];
est.col(i) = est_associations[i];
}
Eigen::Vector3d mean_gt = gt.rowwise().mean();
Eigen::Vector3d mean_est = est.rowwise().mean();
gt.colwise() -= mean_gt;
est.colwise() -= mean_est;
Eigen::Matrix3d cov = gt * est.transpose();
Eigen::JacobiSVD<Eigen::Matrix3d> svd(
cov, Eigen::ComputeFullU | Eigen::ComputeFullV);
Eigen::Matrix3d S;
S.setIdentity();
if (svd.matrixU().determinant() * svd.matrixV().determinant() < 0)
S(2, 2) = -1;
Eigen::Matrix3d rot_gt_est = svd.matrixU() * S * svd.matrixV().transpose();
Eigen::Vector3d trans = mean_gt - rot_gt_est * mean_est;
Sophus::SE3d T_gt_est(rot_gt_est, trans);
Sophus::SE3d T_est_gt = T_gt_est.inverse();
for (size_t i = 0; i < gt_t_w_i.size(); i++) {
gt_t_w_i[i] = T_est_gt * gt_t_w_i[i];
}
double error = 0;
for (size_t i = 0; i < est_associations.size(); i++) {
est_associations[i] = T_gt_est * est_associations[i];
Eigen::Vector3d res = est_associations[i] - gt_associations[i];
error += res.transpose() * res;
}
error /= est_associations.size();
error = std::sqrt(error);
std::cout << "T_align\n" << T_gt_est.matrix() << std::endl;
std::cout << "error " << error << std::endl;
std::cout << "number of associations " << num_kfs << std::endl;
return error;
}
} // namespace basalt