This commit is contained in:
Ivan
2022-04-05 11:42:28 +03:00
commit 6dc0eb0fcf
5565 changed files with 1200500 additions and 0 deletions

View File

@@ -0,0 +1,89 @@
#pragma once
#include <unordered_map>
#include <Eigen/Dense>
#include <basalt/utils/cast_utils.hpp>
namespace basalt {
// TODO: expand IndexedBlocks to small class / struct that also holds info on
// block size and number of blocks, so we don't have to pass it around all the
// time and we can directly implement things link adding diagonal and matrix
// vector products in this sparse block diagonal matrix.
// map of camera index to block; used to represent sparse block diagonal matrix
template <typename Scalar>
using IndexedBlocks =
std::unordered_map<size_t,
Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic>>;
// scale dimensions of JTJ as you would do for jacobian scaling of J beforehand,
// with diagonal scaling matrix D: For jacobain we would use JD, so for JTJ we
// use DJTJD.
template <typename Scalar>
void scale_jacobians(
IndexedBlocks<Scalar>& block_diagonal, size_t num_blocks, size_t block_size,
const Eigen::Matrix<Scalar, Eigen::Dynamic, 1>& scaling_vector) {
BASALT_ASSERT(num_blocks * block_size ==
unsigned_cast(scaling_vector.size()));
for (auto& [cam_idx, block] : block_diagonal) {
auto D =
scaling_vector.segment(block_size * cam_idx, block_size).asDiagonal();
block = D * block * D;
}
}
// add diagonal to block-diagonal matrix; missing blocks are assumed to be 0
template <typename Scalar>
void add_diagonal(IndexedBlocks<Scalar>& block_diagonal, size_t num_blocks,
size_t block_size,
const Eigen::Matrix<Scalar, Eigen::Dynamic, 1>& diagonal) {
BASALT_ASSERT(num_blocks * block_size == unsigned_cast(diagonal.size()));
for (size_t idx = 0; idx < num_blocks; ++idx) {
auto [it, is_new] = block_diagonal.try_emplace(idx);
if (is_new) {
it->second = diagonal.segment(block_size * idx, block_size).asDiagonal();
} else {
it->second += diagonal.segment(block_size * idx, block_size).asDiagonal();
}
}
}
// sum up diagonal blocks in hash map for parallel reduction
template <typename Scalar>
class BlockDiagonalAccumulator {
public:
using VecX = Eigen::Matrix<Scalar, Eigen::Dynamic, 1>;
using MatX = Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic>;
inline void add(size_t idx, MatX&& block) {
auto [it, is_new] = block_diagonal_.try_emplace(idx);
if (is_new) {
it->second = std::move(block);
} else {
it->second += block;
}
}
inline void add_diag(size_t num_blocks, size_t block_size,
const VecX& diagonal) {
add_diagonal(block_diagonal_, num_blocks, block_size, diagonal);
}
inline void join(BlockDiagonalAccumulator& b) {
for (auto& [k, v] : b.block_diagonal_) {
auto [it, is_new] = block_diagonal_.try_emplace(k);
if (is_new) {
it->second = std::move(v);
} else {
it->second += v;
}
}
}
IndexedBlocks<Scalar> block_diagonal_;
};
} // namespace basalt

View File

@@ -0,0 +1,235 @@
#pragma once
#include <basalt/imu/preintegration.h>
#include <basalt/optimization/accumulator.h>
#include <basalt/utils/imu_types.h>
namespace basalt {
template <class Scalar_>
class ImuBlock {
public:
using Scalar = Scalar_;
using Vec3 = Eigen::Matrix<Scalar, 3, 1>;
using VecX = Eigen::Matrix<Scalar, Eigen::Dynamic, 1>;
using MatX = Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic>;
ImuBlock(const IntegratedImuMeasurement<Scalar>* meas,
const ImuLinData<Scalar>* imu_lin_data, const AbsOrderMap& aom)
: imu_meas(meas), imu_lin_data(imu_lin_data), aom(aom) {
Jp.resize(POSE_VEL_BIAS_SIZE, 2 * POSE_VEL_BIAS_SIZE);
r.resize(POSE_VEL_BIAS_SIZE);
}
Scalar linearizeImu(
const Eigen::aligned_map<int64_t, PoseVelBiasStateWithLin<Scalar>>&
frame_states) {
Jp.setZero();
r.setZero();
const int64_t start_t = imu_meas->get_start_t_ns();
const int64_t end_t = imu_meas->get_start_t_ns() + imu_meas->get_dt_ns();
const size_t start_idx = 0;
const size_t end_idx = POSE_VEL_BIAS_SIZE;
PoseVelBiasStateWithLin<Scalar> start_state = frame_states.at(start_t);
PoseVelBiasStateWithLin<Scalar> end_state = frame_states.at(end_t);
typename IntegratedImuMeasurement<Scalar>::MatNN d_res_d_start, d_res_d_end;
typename IntegratedImuMeasurement<Scalar>::MatN3 d_res_d_bg, d_res_d_ba;
typename PoseVelState<Scalar>::VecN res = imu_meas->residual(
start_state.getStateLin(), imu_lin_data->g, end_state.getStateLin(),
start_state.getStateLin().bias_gyro,
start_state.getStateLin().bias_accel, &d_res_d_start, &d_res_d_end,
&d_res_d_bg, &d_res_d_ba);
if (start_state.isLinearized() || end_state.isLinearized()) {
res = imu_meas->residual(
start_state.getState(), imu_lin_data->g, end_state.getState(),
start_state.getState().bias_gyro, start_state.getState().bias_accel);
}
// error
Scalar imu_error =
Scalar(0.5) * (imu_meas->get_sqrt_cov_inv() * res).squaredNorm();
// imu residual linearization
Jp.template block<9, 9>(0, start_idx) =
imu_meas->get_sqrt_cov_inv() * d_res_d_start;
Jp.template block<9, 9>(0, end_idx) =
imu_meas->get_sqrt_cov_inv() * d_res_d_end;
Jp.template block<9, 3>(0, start_idx + 9) =
imu_meas->get_sqrt_cov_inv() * d_res_d_bg;
Jp.template block<9, 3>(0, start_idx + 12) =
imu_meas->get_sqrt_cov_inv() * d_res_d_ba;
r.template segment<9>(0) = imu_meas->get_sqrt_cov_inv() * res;
// difference between biases
Scalar dt = imu_meas->get_dt_ns() * Scalar(1e-9);
Vec3 gyro_bias_weight_dt =
imu_lin_data->gyro_bias_weight_sqrt / std::sqrt(dt);
Vec3 res_bg =
start_state.getState().bias_gyro - end_state.getState().bias_gyro;
Jp.template block<3, 3>(9, start_idx + 9) =
gyro_bias_weight_dt.asDiagonal();
Jp.template block<3, 3>(9, end_idx + 9) =
(-gyro_bias_weight_dt).asDiagonal();
r.template segment<3>(9) += gyro_bias_weight_dt.asDiagonal() * res_bg;
Scalar bg_error =
Scalar(0.5) * (gyro_bias_weight_dt.asDiagonal() * res_bg).squaredNorm();
Vec3 accel_bias_weight_dt =
imu_lin_data->accel_bias_weight_sqrt / std::sqrt(dt);
Vec3 res_ba =
start_state.getState().bias_accel - end_state.getState().bias_accel;
Jp.template block<3, 3>(12, start_idx + 12) =
accel_bias_weight_dt.asDiagonal();
Jp.template block<3, 3>(12, end_idx + 12) =
(-accel_bias_weight_dt).asDiagonal();
r.template segment<3>(12) += accel_bias_weight_dt.asDiagonal() * res_ba;
Scalar ba_error =
Scalar(0.5) *
(accel_bias_weight_dt.asDiagonal() * res_ba).squaredNorm();
return imu_error + bg_error + ba_error;
}
void add_dense_Q2Jp_Q2r(MatX& Q2Jp, VecX& Q2r, size_t row_start_idx) const {
int64_t start_t = imu_meas->get_start_t_ns();
int64_t end_t = imu_meas->get_start_t_ns() + imu_meas->get_dt_ns();
const size_t start_idx = aom.abs_order_map.at(start_t).first;
const size_t end_idx = aom.abs_order_map.at(end_t).first;
Q2Jp.template block<POSE_VEL_BIAS_SIZE, POSE_VEL_BIAS_SIZE>(row_start_idx,
start_idx) +=
Jp.template topLeftCorner<POSE_VEL_BIAS_SIZE, POSE_VEL_BIAS_SIZE>();
Q2Jp.template block<POSE_VEL_BIAS_SIZE, POSE_VEL_BIAS_SIZE>(row_start_idx,
end_idx) +=
Jp.template topRightCorner<POSE_VEL_BIAS_SIZE, POSE_VEL_BIAS_SIZE>();
Q2r.template segment<POSE_VEL_BIAS_SIZE>(row_start_idx) += r;
}
void add_dense_H_b(DenseAccumulator<Scalar>& accum) const {
int64_t start_t = imu_meas->get_start_t_ns();
int64_t end_t = imu_meas->get_start_t_ns() + imu_meas->get_dt_ns();
const size_t start_idx = aom.abs_order_map.at(start_t).first;
const size_t end_idx = aom.abs_order_map.at(end_t).first;
const MatX H = Jp.transpose() * Jp;
const VecX b = Jp.transpose() * r;
accum.template addH<POSE_VEL_BIAS_SIZE, POSE_VEL_BIAS_SIZE>(
start_idx, start_idx,
H.template block<POSE_VEL_BIAS_SIZE, POSE_VEL_BIAS_SIZE>(0, 0));
accum.template addH<POSE_VEL_BIAS_SIZE, POSE_VEL_BIAS_SIZE>(
end_idx, start_idx,
H.template block<POSE_VEL_BIAS_SIZE, POSE_VEL_BIAS_SIZE>(
POSE_VEL_BIAS_SIZE, 0));
accum.template addH<POSE_VEL_BIAS_SIZE, POSE_VEL_BIAS_SIZE>(
start_idx, end_idx,
H.template block<POSE_VEL_BIAS_SIZE, POSE_VEL_BIAS_SIZE>(
0, POSE_VEL_BIAS_SIZE));
accum.template addH<POSE_VEL_BIAS_SIZE, POSE_VEL_BIAS_SIZE>(
end_idx, end_idx,
H.template block<POSE_VEL_BIAS_SIZE, POSE_VEL_BIAS_SIZE>(
POSE_VEL_BIAS_SIZE, POSE_VEL_BIAS_SIZE));
accum.template addB<POSE_VEL_BIAS_SIZE>(
start_idx, b.template segment<POSE_VEL_BIAS_SIZE>(0));
accum.template addB<POSE_VEL_BIAS_SIZE>(
end_idx, b.template segment<POSE_VEL_BIAS_SIZE>(POSE_VEL_BIAS_SIZE));
}
void scaleJp_cols(const VecX& jacobian_scaling) {
int64_t start_t = imu_meas->get_start_t_ns();
int64_t end_t = imu_meas->get_start_t_ns() + imu_meas->get_dt_ns();
const size_t start_idx = aom.abs_order_map.at(start_t).first;
const size_t end_idx = aom.abs_order_map.at(end_t).first;
Jp.template topLeftCorner<POSE_VEL_BIAS_SIZE, POSE_VEL_BIAS_SIZE>() *=
jacobian_scaling.template segment<POSE_VEL_BIAS_SIZE>(start_idx)
.asDiagonal();
Jp.template topRightCorner<POSE_VEL_BIAS_SIZE, POSE_VEL_BIAS_SIZE>() *=
jacobian_scaling.template segment<POSE_VEL_BIAS_SIZE>(end_idx)
.asDiagonal();
}
void addJp_diag2(VecX& res) const {
int64_t start_t = imu_meas->get_start_t_ns();
int64_t end_t = imu_meas->get_start_t_ns() + imu_meas->get_dt_ns();
const size_t start_idx = aom.abs_order_map.at(start_t).first;
const size_t end_idx = aom.abs_order_map.at(end_t).first;
res.template segment<POSE_VEL_BIAS_SIZE>(start_idx) +=
Jp.template topLeftCorner<POSE_VEL_BIAS_SIZE, POSE_VEL_BIAS_SIZE>()
.colwise()
.squaredNorm();
res.template segment<POSE_VEL_BIAS_SIZE>(end_idx) +=
Jp.template topRightCorner<POSE_VEL_BIAS_SIZE, POSE_VEL_BIAS_SIZE>()
.colwise()
.squaredNorm();
}
void backSubstitute(const VecX& pose_inc, Scalar& l_diff) {
int64_t start_t = imu_meas->get_start_t_ns();
int64_t end_t = imu_meas->get_start_t_ns() + imu_meas->get_dt_ns();
const size_t start_idx = aom.abs_order_map.at(start_t).first;
const size_t end_idx = aom.abs_order_map.at(end_t).first;
VecX pose_inc_reduced(2 * POSE_VEL_BIAS_SIZE);
pose_inc_reduced.template head<POSE_VEL_BIAS_SIZE>() =
pose_inc.template segment<POSE_VEL_BIAS_SIZE>(start_idx);
pose_inc_reduced.template tail<POSE_VEL_BIAS_SIZE>() =
pose_inc.template segment<POSE_VEL_BIAS_SIZE>(end_idx);
// We want to compute the model cost change. The model function is
//
// L(inc) = F(x) + incT JT r + 0.5 incT JT J inc
//
// and thus the expect decrease in cost for the computed increment is
//
// l_diff = L(0) - L(inc)
// = - incT JT r - 0.5 incT JT J inc.
// = - incT JT (r + 0.5 J inc)
// = - (J inc)T (r + 0.5 (J inc))
VecX Jinc = Jp * pose_inc_reduced;
l_diff -= Jinc.transpose() * (Scalar(0.5) * Jinc + r);
}
protected:
std::array<FrameId, 2> frame_ids;
MatX Jp;
VecX r;
const IntegratedImuMeasurement<Scalar>* imu_meas;
const ImuLinData<Scalar>* imu_lin_data;
const AbsOrderMap& aom;
}; // namespace basalt
} // namespace basalt

View File

@@ -0,0 +1,140 @@
#pragma once
#include <Eigen/Dense>
#include <Eigen/Sparse>
#include <basalt/optimization/accumulator.h>
#include <basalt/vi_estimator/landmark_database.h>
#include <basalt/linearization/block_diagonal.hpp>
namespace basalt {
template <class Scalar>
struct RelPoseLin {
using Mat4 = Eigen::Matrix<Scalar, 4, 4>;
using Mat6 = Eigen::Matrix<Scalar, 6, 6>;
Mat4 T_t_h;
Mat6 d_rel_d_h;
Mat6 d_rel_d_t;
EIGEN_MAKE_ALIGNED_OPERATOR_NEW
};
template <typename Scalar>
class LandmarkBlock {
public:
struct Options {
// use Householder instead of Givens for marginalization
bool use_householder = true;
// use_valid_projections_only: if true, set invalid projection's
// residual and jacobian to 0; invalid means z <= 0
bool use_valid_projections_only = true;
// if > 0, use huber norm with given threshold, else squared norm
Scalar huber_parameter = 0;
// Standard deviation of reprojection error to weight visual measurements
Scalar obs_std_dev = 1;
// ceres uses 1.0 / (1.0 + sqrt(SquaredColumnNorm))
// we use 1.0 / (eps + sqrt(SquaredColumnNorm))
Scalar jacobi_scaling_eps = 1e-6;
};
enum State {
Uninitialized = 0,
Allocated,
NumericalFailure,
Linearized,
Marginalized
};
using Vec2 = Eigen::Matrix<Scalar, 2, 1>;
using Vec3 = Eigen::Matrix<Scalar, 3, 1>;
using Vec4 = Eigen::Matrix<Scalar, 4, 1>;
using VecX = Eigen::Matrix<Scalar, Eigen::Dynamic, 1>;
using Mat36 = Eigen::Matrix<Scalar, 3, 6>;
using MatX = Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic>;
using RowMatX =
Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic, Eigen::RowMajor>;
using RowMat3 = Eigen::Matrix<Scalar, 3, 3, Eigen::RowMajor>;
virtual ~LandmarkBlock(){};
virtual bool isNumericalFailure() const = 0;
virtual void allocateLandmark(
Keypoint<Scalar>& lm,
const Eigen::aligned_unordered_map<std::pair<TimeCamId, TimeCamId>,
RelPoseLin<Scalar>>& relative_pose_lin,
const Calibration<Scalar>& calib, const AbsOrderMap& aom,
const Options& options,
const std::map<TimeCamId, size_t>* rel_order = nullptr) = 0;
// may set state to NumericalFailure --> linearization at this state is
// unusable. Numeric check is only performed for residuals that were
// considered to be used (valid), which depends on
// use_valid_projections_only setting.
virtual Scalar linearizeLandmark() = 0;
virtual void performQR() = 0;
// Sets damping and maintains upper triangular matrix for landmarks.
virtual void setLandmarkDamping(Scalar lambda) = 0;
// lambda < 0 means computing exact model cost change
virtual void backSubstitute(const VecX& pose_inc, Scalar& l_diff) = 0;
virtual void addQ2JpTQ2Jp_mult_x(VecX& res, const VecX& x_pose) const = 0;
virtual void addQ2JpTQ2r(VecX& res) const = 0;
virtual void addJp_diag2(VecX& res) const = 0;
virtual void addQ2JpTQ2Jp_blockdiag(
BlockDiagonalAccumulator<Scalar>& accu) const = 0;
virtual void scaleJl_cols() = 0;
virtual void scaleJp_cols(const VecX& jacobian_scaling) = 0;
virtual void printStorage(const std::string& filename) const = 0;
virtual State getState() const = 0;
virtual size_t numReducedCams() const = 0;
virtual void get_dense_Q2Jp_Q2r(MatX& Q2Jp, VecX& Q2r,
size_t start_idx) const = 0;
virtual void get_dense_Q2Jp_Q2r_rel(
MatX& Q2Jp, VecX& Q2r, size_t start_idx,
const std::map<TimeCamId, size_t>& rel_order) const = 0;
virtual void add_dense_H_b(DenseAccumulator<Scalar>& accum) const = 0;
virtual void add_dense_H_b(MatX& H, VecX& b) const = 0;
virtual void add_dense_H_b_rel(
MatX& H_rel, VecX& b_rel,
const std::map<TimeCamId, size_t>& rel_order) const = 0;
virtual const Eigen::PermutationMatrix<Eigen::Dynamic>& get_rel_permutation()
const = 0;
virtual Eigen::PermutationMatrix<Eigen::Dynamic> compute_rel_permutation(
const std::map<TimeCamId, size_t>& rel_order) const = 0;
virtual void add_dense_H_b_rel_2(MatX& H_rel, VecX& b_rel) const = 0;
virtual TimeCamId getHostKf() const = 0;
virtual size_t numQ2rows() const = 0;
// factory method
template <int POSE_SIZE>
static std::unique_ptr<LandmarkBlock<Scalar>> createLandmarkBlock();
};
} // namespace basalt

View File

@@ -0,0 +1,585 @@
#pragma once
#include <fstream>
#include <mutex>
#include <basalt/utils/ba_utils.h>
#include <Eigen/Dense>
#include <Eigen/Sparse>
#include <basalt/linearization/landmark_block.hpp>
#include <basalt/utils/sophus_utils.hpp>
namespace basalt {
template <typename Scalar, int POSE_SIZE>
class LandmarkBlockAbsDynamic : public LandmarkBlock<Scalar> {
public:
using Options = typename LandmarkBlock<Scalar>::Options;
using State = typename LandmarkBlock<Scalar>::State;
inline bool isNumericalFailure() const override {
return state == State::NumericalFailure;
}
using Vec2 = Eigen::Matrix<Scalar, 2, 1>;
using Vec3 = Eigen::Matrix<Scalar, 3, 1>;
using Vec4 = Eigen::Matrix<Scalar, 4, 1>;
using VecX = Eigen::Matrix<Scalar, Eigen::Dynamic, 1>;
using Mat36 = Eigen::Matrix<Scalar, 3, 6>;
using MatX = Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic>;
using RowMatX =
Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic, Eigen::RowMajor>;
virtual inline void allocateLandmark(
Keypoint<Scalar>& lm,
const Eigen::aligned_unordered_map<std::pair<TimeCamId, TimeCamId>,
RelPoseLin<Scalar>>& relative_pose_lin,
const Calibration<Scalar>& calib, const AbsOrderMap& aom,
const Options& options,
const std::map<TimeCamId, size_t>* rel_order = nullptr) override {
// some of the logic assumes the members are at their initial values
BASALT_ASSERT(state == State::Uninitialized);
UNUSED(rel_order);
lm_ptr = &lm;
options_ = &options;
calib_ = &calib;
// TODO: consider for VIO that we have a lot of 0 columns if we just use aom
// --> add option to use different AOM with reduced size and/or just
// involved poses --> when accumulating results, check which case we have;
// if both aom are identical, we don't have to do block-wise operations.
aom_ = &aom;
pose_lin_vec.clear();
pose_lin_vec.reserve(lm.obs.size());
pose_tcid_vec.clear();
pose_tcid_vec.reserve(lm.obs.size());
// LMBs without host frame should not be created
BASALT_ASSERT(aom.abs_order_map.count(lm.host_kf_id.frame_id) > 0);
for (const auto& [tcid_t, pos] : lm.obs) {
size_t i = pose_lin_vec.size();
auto it = relative_pose_lin.find(std::make_pair(lm.host_kf_id, tcid_t));
BASALT_ASSERT(it != relative_pose_lin.end());
if (aom.abs_order_map.count(tcid_t.frame_id) > 0) {
pose_lin_vec.push_back(&it->second);
} else {
// Observation droped for marginalization
pose_lin_vec.push_back(nullptr);
}
pose_tcid_vec.push_back(&it->first);
res_idx_by_abs_pose_[it->first.first.frame_id].insert(i); // host
res_idx_by_abs_pose_[it->first.second.frame_id].insert(i); // target
}
// number of pose-jacobian columns is determined by oam
padding_idx = aom_->total_size;
num_rows = pose_lin_vec.size() * 2 + 3; // residuals and lm damping
size_t pad = padding_idx % 4;
if (pad != 0) {
padding_size = 4 - pad;
}
lm_idx = padding_idx + padding_size;
res_idx = lm_idx + 3;
num_cols = res_idx + 1;
// number of columns should now be multiple of 4 for good memory alignment
// TODO: test extending this to 8 --> 32byte alignment for float?
BASALT_ASSERT(num_cols % 4 == 0);
storage.resize(num_rows, num_cols);
damping_rotations.clear();
damping_rotations.reserve(6);
state = State::Allocated;
}
// may set state to NumericalFailure --> linearization at this state is
// unusable. Numeric check is only performed for residuals that were
// considered to be used (valid), which depends on
// use_valid_projections_only setting.
virtual inline Scalar linearizeLandmark() override {
BASALT_ASSERT(state == State::Allocated ||
state == State::NumericalFailure ||
state == State::Linearized || state == State::Marginalized);
// storage.setZero(num_rows, num_cols);
storage.setZero();
damping_rotations.clear();
damping_rotations.reserve(6);
bool numerically_valid = true;
Scalar error_sum = 0;
size_t i = 0;
for (const auto& [tcid_t, obs] : lm_ptr->obs) {
std::visit(
[&, obs = obs](const auto& cam) {
// TODO: The pose_lin_vec[i] == nullptr is intended to deal with
// dropped measurements during marginalization. However, dropped
// measurements should only occur for the remaining frames, not for
// the marginalized frames. Maybe these are observations bewtween
// two marginalized frames, if more than one is marginalized at the
// same time? But those we would not have to drop... Double check if
// and when this happens and possibly resolve by fixing handling
// here, or else updating the measurements in lmdb before calling
// linearization. Otherwise, check where else we need a `if
// (pose_lin_vec[i])` check or `pose_lin_vec[i] != nullptr` assert
// in this class.
if (pose_lin_vec[i]) {
size_t obs_idx = i * 2;
size_t abs_h_idx =
aom_->abs_order_map.at(pose_tcid_vec[i]->first.frame_id)
.first;
size_t abs_t_idx =
aom_->abs_order_map.at(pose_tcid_vec[i]->second.frame_id)
.first;
Vec2 res;
Eigen::Matrix<Scalar, 2, POSE_SIZE> d_res_d_xi;
Eigen::Matrix<Scalar, 2, 3> d_res_d_p;
using CamT = std::decay_t<decltype(cam)>;
bool valid = linearizePoint<Scalar, CamT>(
obs, *lm_ptr, pose_lin_vec[i]->T_t_h, cam, res, &d_res_d_xi,
&d_res_d_p);
if (!options_->use_valid_projections_only || valid) {
numerically_valid = numerically_valid &&
d_res_d_xi.array().isFinite().all() &&
d_res_d_p.array().isFinite().all();
const Scalar res_squared = res.squaredNorm();
const auto [weighted_error, weight] =
compute_error_weight(res_squared);
const Scalar sqrt_weight =
std::sqrt(weight) / options_->obs_std_dev;
error_sum += weighted_error /
(options_->obs_std_dev * options_->obs_std_dev);
storage.template block<2, 3>(obs_idx, lm_idx) =
sqrt_weight * d_res_d_p;
storage.template block<2, 1>(obs_idx, res_idx) =
sqrt_weight * res;
d_res_d_xi *= sqrt_weight;
storage.template block<2, 6>(obs_idx, abs_h_idx) +=
d_res_d_xi * pose_lin_vec[i]->d_rel_d_h;
storage.template block<2, 6>(obs_idx, abs_t_idx) +=
d_res_d_xi * pose_lin_vec[i]->d_rel_d_t;
}
}
i++;
},
calib_->intrinsics[tcid_t.cam_id].variant);
}
if (numerically_valid) {
state = State::Linearized;
} else {
state = State::NumericalFailure;
}
return error_sum;
}
virtual inline void performQR() override {
BASALT_ASSERT(state == State::Linearized);
// Since we use dense matrices Householder QR might be better:
// https://mathoverflow.net/questions/227543/why-householder-reflection-is-better-than-givens-rotation-in-dense-linear-algebr
if (options_->use_householder) {
performQRHouseholder();
} else {
performQRGivens();
}
state = State::Marginalized;
}
// Sets damping and maintains upper triangular matrix for landmarks.
virtual inline void setLandmarkDamping(Scalar lambda) override {
BASALT_ASSERT(state == State::Marginalized);
BASALT_ASSERT(lambda >= 0);
if (hasLandmarkDamping()) {
BASALT_ASSERT(damping_rotations.size() == 6);
// undo dampening
for (int n = 2; n >= 0; n--) {
for (int m = n; m >= 0; m--) {
storage.applyOnTheLeft(num_rows - 3 + n - m, n,
damping_rotations.back().adjoint());
damping_rotations.pop_back();
}
}
}
if (lambda == 0) {
storage.template block<3, 3>(num_rows - 3, lm_idx).diagonal().setZero();
} else {
BASALT_ASSERT(Jl_col_scale.array().isFinite().all());
storage.template block<3, 3>(num_rows - 3, lm_idx)
.diagonal()
.setConstant(sqrt(lambda));
BASALT_ASSERT(damping_rotations.empty());
// apply dampening and remember rotations to undo
for (int n = 0; n < 3; n++) {
for (int m = 0; m <= n; m++) {
damping_rotations.emplace_back();
damping_rotations.back().makeGivens(
storage(n, lm_idx + n),
storage(num_rows - 3 + n - m, lm_idx + n));
storage.applyOnTheLeft(num_rows - 3 + n - m, n,
damping_rotations.back());
}
}
}
}
// lambda < 0 means computing exact model cost change
virtual inline void backSubstitute(const VecX& pose_inc,
Scalar& l_diff) override {
BASALT_ASSERT(state == State::Marginalized);
// For now we include all columns in LMB
BASALT_ASSERT(pose_inc.size() == signed_cast(padding_idx));
const auto Q1Jl = storage.template block<3, 3>(0, lm_idx)
.template triangularView<Eigen::Upper>();
const auto Q1Jr = storage.col(res_idx).template head<3>();
const auto Q1Jp = storage.topLeftCorner(3, padding_idx);
Vec3 inc = -Q1Jl.solve(Q1Jr + Q1Jp * pose_inc);
// We want to compute the model cost change. The model function is
//
// L(inc) = F(x) + inc^T J^T r + 0.5 inc^T J^T J inc
//
// and thus the expected decrease in cost for the computed increment is
//
// l_diff = L(0) - L(inc)
// = - inc^T J^T r - 0.5 inc^T J^T J inc
// = - inc^T J^T (r + 0.5 J inc)
// = - (J inc)^T (r + 0.5 (J inc)).
//
// Here we have J = [Jp, Jl] under the orthogonal projection Q = [Q1, Q2],
// i.e. the linearized system (model cost) is
//
// L(inc) = 0.5 || J inc + r ||^2 = 0.5 || Q^T J inc + Q^T r ||^2
//
// and below we thus compute
//
// l_diff = - (Q^T J inc)^T (Q^T r + 0.5 (Q^T J inc)).
//
// We have
// | Q1^T | | Q1^T Jp Q1^T Jl |
// Q^T J = | | [Jp, Jl] = | |
// | Q2^T | | Q2^T Jp 0 |.
//
// Note that Q2 is the nullspace of Jl, and Q1^T Jl == R. So with inc =
// [incp^T, incl^T]^T we have
//
// | Q1^T Jp incp + Q1^T Jl incl |
// Q^T J inc = | |
// | Q2^T Jp incp |
//
// undo damping before we compute the model cost difference
setLandmarkDamping(0);
// compute "Q^T J incp"
VecX QJinc = storage.topLeftCorner(num_rows - 3, padding_idx) * pose_inc;
// add "Q1^T Jl incl" to the first 3 rows
QJinc.template head<3>() += Q1Jl * inc;
auto Qr = storage.col(res_idx).head(num_rows - 3);
l_diff -= QJinc.transpose() * (Scalar(0.5) * QJinc + Qr);
// TODO: detect and handle case like ceres, allowing a few iterations but
// stopping eventually
if (!inc.array().isFinite().all() ||
!lm_ptr->direction.array().isFinite().all() ||
!std::isfinite(lm_ptr->inv_dist)) {
std::cerr << "Numerical failure in backsubstitution\n";
}
// Note: scale only after computing model cost change
inc.array() *= Jl_col_scale.array();
lm_ptr->direction += inc.template head<2>();
lm_ptr->inv_dist = std::max(Scalar(0), lm_ptr->inv_dist + inc[2]);
}
virtual inline size_t numReducedCams() const override {
BASALT_LOG_FATAL("check what we mean by numReducedCams for absolute poses");
return pose_lin_vec.size();
}
inline void addQ2JpTQ2Jp_mult_x(VecX& res,
const VecX& x_pose) const override {
UNUSED(res);
UNUSED(x_pose);
BASALT_LOG_FATAL("not implemented");
}
virtual inline void addQ2JpTQ2r(VecX& res) const override {
UNUSED(res);
BASALT_LOG_FATAL("not implemented");
}
virtual inline void addJp_diag2(VecX& res) const override {
BASALT_ASSERT(state == State::Linearized);
for (const auto& [frame_id, idx_set] : res_idx_by_abs_pose_) {
const int pose_idx = aom_->abs_order_map.at(frame_id).first;
for (const int i : idx_set) {
const auto block = storage.block(2 * i, pose_idx, 2, POSE_SIZE);
res.template segment<POSE_SIZE>(pose_idx) +=
block.colwise().squaredNorm();
}
}
}
virtual inline void addQ2JpTQ2Jp_blockdiag(
BlockDiagonalAccumulator<Scalar>& accu) const override {
UNUSED(accu);
BASALT_LOG_FATAL("not implemented");
}
virtual inline void scaleJl_cols() override {
BASALT_ASSERT(state == State::Linearized);
// ceres uses 1.0 / (1.0 + sqrt(SquaredColumnNorm))
// we use 1.0 / (eps + sqrt(SquaredColumnNorm))
Jl_col_scale =
(options_->jacobi_scaling_eps +
storage.block(0, lm_idx, num_rows - 3, 3).colwise().norm().array())
.inverse();
storage.block(0, lm_idx, num_rows - 3, 3) *= Jl_col_scale.asDiagonal();
}
virtual inline void scaleJp_cols(const VecX& jacobian_scaling) override {
BASALT_ASSERT(state == State::Marginalized);
// we assume we apply scaling before damping (we exclude the last 3 rows)
BASALT_ASSERT(!hasLandmarkDamping());
storage.topLeftCorner(num_rows - 3, padding_idx) *=
jacobian_scaling.asDiagonal();
}
inline bool hasLandmarkDamping() const { return !damping_rotations.empty(); }
virtual inline void printStorage(const std::string& filename) const override {
std::ofstream f(filename);
Eigen::IOFormat CleanFmt(4, 0, " ", "\n", "", "");
f << "Storage (state: " << state
<< ", damping: " << (hasLandmarkDamping() ? "yes" : "no")
<< " Jl_col_scale: " << Jl_col_scale.transpose() << "):\n"
<< storage.format(CleanFmt) << std::endl;
f.close();
}
#if 0
virtual inline void stage2(
Scalar lambda, const VecX* jacobian_scaling, VecX* precond_diagonal2,
BlockDiagonalAccumulator<Scalar>* precond_block_diagonal,
VecX& bref) override {
// 1. scale jacobian
if (jacobian_scaling) {
scaleJp_cols(*jacobian_scaling);
}
// 2. dampen landmarks
setLandmarkDamping(lambda);
// 3a. compute diagonal preconditioner (SCHUR_JACOBI_DIAGONAL)
if (precond_diagonal2) {
addQ2Jp_diag2(*precond_diagonal2);
}
// 3b. compute block diagonal preconditioner (SCHUR_JACOBI)
if (precond_block_diagonal) {
addQ2JpTQ2Jp_blockdiag(*precond_block_diagonal);
}
// 4. compute rhs of reduced camera normal equations
addQ2JpTQ2r(bref);
}
#endif
inline State getState() const override { return state; }
virtual inline size_t numQ2rows() const override { return num_rows - 3; }
protected:
inline void performQRGivens() {
// Based on "Matrix Computations 4th Edition by Golub and Van Loan"
// See page 252, Algorithm 5.2.4 for how these two loops work
Eigen::JacobiRotation<Scalar> gr;
for (size_t n = 0; n < 3; n++) {
for (size_t m = num_rows - 4; m > n; m--) {
gr.makeGivens(storage(m - 1, lm_idx + n), storage(m, lm_idx + n));
storage.applyOnTheLeft(m, m - 1, gr);
}
}
}
inline void performQRHouseholder() {
VecX tempVector1(num_cols);
VecX tempVector2(num_rows - 3);
for (size_t k = 0; k < 3; ++k) {
size_t remainingRows = num_rows - k - 3;
Scalar beta;
Scalar tau;
storage.col(lm_idx + k)
.segment(k, remainingRows)
.makeHouseholder(tempVector2, tau, beta);
storage.block(k, 0, remainingRows, num_cols)
.applyHouseholderOnTheLeft(tempVector2, tau, tempVector1.data());
}
}
inline std::tuple<Scalar, Scalar> compute_error_weight(
Scalar res_squared) const {
// Note: Definition of cost is 0.5 ||r(x)||^2 to be in line with ceres
if (options_->huber_parameter > 0) {
// use huber norm
const Scalar huber_weight =
res_squared <= options_->huber_parameter * options_->huber_parameter
? Scalar(1)
: options_->huber_parameter / std::sqrt(res_squared);
const Scalar error =
Scalar(0.5) * (2 - huber_weight) * huber_weight * res_squared;
return {error, huber_weight};
} else {
// use squared norm
return {Scalar(0.5) * res_squared, Scalar(1)};
}
}
void get_dense_Q2Jp_Q2r(MatX& Q2Jp, VecX& Q2r,
size_t start_idx) const override {
Q2r.segment(start_idx, num_rows - 3) =
storage.col(res_idx).tail(num_rows - 3);
BASALT_ASSERT(Q2Jp.cols() == signed_cast(padding_idx));
Q2Jp.block(start_idx, 0, num_rows - 3, padding_idx) =
storage.block(3, 0, num_rows - 3, padding_idx);
}
void get_dense_Q2Jp_Q2r_rel(
MatX& Q2Jp, VecX& Q2r, size_t start_idx,
const std::map<TimeCamId, size_t>& rel_order) const override {
UNUSED(Q2Jp);
UNUSED(Q2r);
UNUSED(start_idx);
UNUSED(rel_order);
BASALT_LOG_FATAL("not implemented");
}
void add_dense_H_b(DenseAccumulator<Scalar>& accum) const override {
UNUSED(accum);
BASALT_LOG_FATAL("not implemented");
}
void add_dense_H_b(MatX& H, VecX& b) const override {
const auto r = storage.col(res_idx).tail(num_rows - 3);
const auto J = storage.block(3, 0, num_rows - 3, padding_idx);
H.noalias() += J.transpose() * J;
b.noalias() += J.transpose() * r;
}
void add_dense_H_b_rel(
MatX& H_rel, VecX& b_rel,
const std::map<TimeCamId, size_t>& rel_order) const override {
UNUSED(H_rel);
UNUSED(b_rel);
UNUSED(rel_order);
BASALT_LOG_FATAL("not implemented");
}
const Eigen::PermutationMatrix<Eigen::Dynamic>& get_rel_permutation()
const override {
BASALT_LOG_FATAL("not implemented");
}
Eigen::PermutationMatrix<Eigen::Dynamic> compute_rel_permutation(
const std::map<TimeCamId, size_t>& rel_order) const override {
UNUSED(rel_order);
BASALT_LOG_FATAL("not implemented");
}
void add_dense_H_b_rel_2(MatX& H_rel, VecX& b_rel) const override {
UNUSED(H_rel);
UNUSED(b_rel);
BASALT_LOG_FATAL("not implemented");
}
virtual TimeCamId getHostKf() const override { return lm_ptr->host_kf_id; }
private:
// Dense storage for pose Jacobians, padding, landmark Jacobians and
// residuals [J_p | pad | J_l | res]
Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic, Eigen::RowMajor>
storage;
Vec3 Jl_col_scale = Vec3::Ones();
std::vector<Eigen::JacobiRotation<Scalar>> damping_rotations;
std::vector<const RelPoseLin<Scalar>*> pose_lin_vec;
std::vector<const std::pair<TimeCamId, TimeCamId>*> pose_tcid_vec;
size_t padding_idx = 0;
size_t padding_size = 0;
size_t lm_idx = 0;
size_t res_idx = 0;
size_t num_cols = 0;
size_t num_rows = 0;
const Options* options_ = nullptr;
State state = State::Uninitialized;
Keypoint<Scalar>* lm_ptr = nullptr;
const Calibration<Scalar>* calib_ = nullptr;
const AbsOrderMap* aom_ = nullptr;
std::map<int64_t, std::set<int>> res_idx_by_abs_pose_;
};
} // namespace basalt

View File

@@ -0,0 +1,141 @@
#pragma once
#include <basalt/linearization/linearization_base.hpp>
#include <basalt/optimization/accumulator.h>
#include <basalt/vi_estimator/landmark_database.h>
#include <basalt/linearization/landmark_block.hpp>
#include <basalt/utils/cast_utils.hpp>
#include <basalt/utils/time_utils.hpp>
namespace basalt {
template <class Scalar>
class ImuBlock;
template <typename Scalar_, int POSE_SIZE_>
class LinearizationAbsQR : public LinearizationBase<Scalar_, POSE_SIZE_> {
public:
using Scalar = Scalar_;
static constexpr int POSE_SIZE = POSE_SIZE_;
using Base = LinearizationBase<Scalar, POSE_SIZE>;
using Vec2 = Eigen::Matrix<Scalar, 2, 1>;
using Vec3 = Eigen::Matrix<Scalar, 3, 1>;
using Vec4 = Eigen::Matrix<Scalar, 4, 1>;
using VecP = Eigen::Matrix<Scalar, POSE_SIZE, 1>;
using VecX = Eigen::Matrix<Scalar, Eigen::Dynamic, 1>;
using Mat36 = Eigen::Matrix<Scalar, 3, 6>;
using Mat4 = Eigen::Matrix<Scalar, 4, 4>;
using Mat6 = Eigen::Matrix<Scalar, 6, 6>;
using MatX = Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic>;
using LandmarkBlockPtr = std::unique_ptr<LandmarkBlock<Scalar>>;
using ImuBlockPtr = std::unique_ptr<ImuBlock<Scalar>>;
using typename Base::Options;
LinearizationAbsQR(
BundleAdjustmentBase<Scalar>* estimator, const AbsOrderMap& aom,
const Options& options,
const MargLinData<Scalar>* marg_lin_data = nullptr,
const ImuLinData<Scalar>* imu_lin_data = nullptr,
const std::set<FrameId>* used_frames = nullptr,
const std::unordered_set<KeypointId>* lost_landmarks = nullptr,
int64_t last_state_to_marg = std::numeric_limits<int64_t>::max());
// destructor defined in cpp b/c of unique_ptr destructor (ImuBlock)
// copy/move constructor/assignment-operator for rule-of-five
~LinearizationAbsQR();
LinearizationAbsQR(const LinearizationAbsQR&) = default;
LinearizationAbsQR(LinearizationAbsQR&&) = default;
LinearizationAbsQR& operator=(const LinearizationAbsQR&) = default;
LinearizationAbsQR& operator=(LinearizationAbsQR&&) = default;
void log_problem_stats(ExecutionStats& stats) const override;
Scalar linearizeProblem(bool* numerically_valid = nullptr) override;
void performQR() override;
void setPoseDamping(const Scalar lambda);
bool hasPoseDamping() const { return pose_damping_diagonal > 0; }
Scalar backSubstitute(const VecX& pose_inc) override;
VecX getJp_diag2() const;
void scaleJl_cols();
void scaleJp_cols(const VecX& jacobian_scaling);
void setLandmarkDamping(Scalar lambda);
void get_dense_Q2Jp_Q2r(MatX& Q2Jp, VecX& Q2r) const override;
void get_dense_H_b(MatX& H, VecX& b) const override;
protected: // types
using PoseLinMapType =
Eigen::aligned_unordered_map<std::pair<TimeCamId, TimeCamId>,
RelPoseLin<Scalar>>;
using PoseLinMapTypeConstIter = typename PoseLinMapType::const_iterator;
using HostLandmarkMapType =
std::unordered_map<TimeCamId, std::vector<const LandmarkBlock<Scalar>*>>;
protected: // helper
void get_dense_Q2Jp_Q2r_pose_damping(MatX& Q2Jp, size_t start_idx) const;
void get_dense_Q2Jp_Q2r_marg_prior(MatX& Q2Jp, VecX& Q2r,
size_t start_idx) const;
void add_dense_H_b_pose_damping(MatX& H) const;
void add_dense_H_b_marg_prior(MatX& H, VecX& b) const;
void add_dense_H_b_imu(DenseAccumulator<Scalar>& accum) const;
void add_dense_H_b_imu(MatX& H, VecX& b) const;
protected:
Options options_;
std::vector<KeypointId> landmark_ids;
std::vector<LandmarkBlockPtr> landmark_blocks;
std::vector<ImuBlockPtr> imu_blocks;
std::unordered_map<TimeCamId, size_t> host_to_idx_;
HostLandmarkMapType host_to_landmark_block;
std::vector<size_t> landmark_block_idx;
const BundleAdjustmentBase<Scalar>* estimator;
LandmarkDatabase<Scalar>& lmdb_;
const Eigen::aligned_map<int64_t, PoseStateWithLin<Scalar>>& frame_poses;
const Calibration<Scalar>& calib;
const AbsOrderMap& aom;
const std::set<FrameId>* used_frames;
const MargLinData<Scalar>* marg_lin_data;
const ImuLinData<Scalar>* imu_lin_data;
PoseLinMapType relative_pose_lin;
std::vector<std::vector<PoseLinMapTypeConstIter>> relative_pose_per_host;
Scalar pose_damping_diagonal;
Scalar pose_damping_diagonal_sqrt;
VecX marg_scaling;
size_t num_cameras;
size_t num_rows_Q2r;
};
} // namespace basalt

View File

@@ -0,0 +1,142 @@
#pragma once
#include <basalt/linearization/linearization_base.hpp>
#include <basalt/optimization/accumulator.h>
#include <basalt/vi_estimator/landmark_database.h>
#include <basalt/vi_estimator/sqrt_ba_base.h>
#include <basalt/linearization/landmark_block.hpp>
#include <basalt/utils/cast_utils.hpp>
#include <basalt/utils/time_utils.hpp>
namespace basalt {
template <class Scalar>
class ImuBlock;
template <typename Scalar_, int POSE_SIZE_>
class LinearizationAbsSC : public LinearizationBase<Scalar_, POSE_SIZE_> {
public:
using Scalar = Scalar_;
static constexpr int POSE_SIZE = POSE_SIZE_;
using Base = LinearizationBase<Scalar, POSE_SIZE>;
using Vec2 = Eigen::Matrix<Scalar, 2, 1>;
using Vec3 = Eigen::Matrix<Scalar, 3, 1>;
using Vec4 = Eigen::Matrix<Scalar, 4, 1>;
using VecP = Eigen::Matrix<Scalar, POSE_SIZE, 1>;
using VecX = Eigen::Matrix<Scalar, Eigen::Dynamic, 1>;
using Mat36 = Eigen::Matrix<Scalar, 3, 6>;
using Mat4 = Eigen::Matrix<Scalar, 4, 4>;
using Mat6 = Eigen::Matrix<Scalar, 6, 6>;
using MatX = Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic>;
using LandmarkBlockPtr = std::unique_ptr<LandmarkBlock<Scalar>>;
using ImuBlockPtr = std::unique_ptr<ImuBlock<Scalar>>;
using AbsLinData = typename ScBundleAdjustmentBase<Scalar>::AbsLinData;
using typename Base::Options;
LinearizationAbsSC(
BundleAdjustmentBase<Scalar>* estimator, const AbsOrderMap& aom,
const Options& options,
const MargLinData<Scalar>* marg_lin_data = nullptr,
const ImuLinData<Scalar>* imu_lin_data = nullptr,
const std::set<FrameId>* used_frames = nullptr,
const std::unordered_set<KeypointId>* lost_landmarks = nullptr,
int64_t last_state_to_marg = std::numeric_limits<int64_t>::max());
// destructor defined in cpp b/c of unique_ptr destructor (ImuBlock)
// copy/move constructor/assignment-operator for rule-of-five
~LinearizationAbsSC();
LinearizationAbsSC(const LinearizationAbsSC&) = default;
LinearizationAbsSC(LinearizationAbsSC&&) = default;
LinearizationAbsSC& operator=(const LinearizationAbsSC&) = default;
LinearizationAbsSC& operator=(LinearizationAbsSC&&) = default;
void log_problem_stats(ExecutionStats& stats) const override;
Scalar linearizeProblem(bool* numerically_valid = nullptr) override;
void performQR() override;
void setPoseDamping(const Scalar lambda);
bool hasPoseDamping() const { return pose_damping_diagonal > 0; }
Scalar backSubstitute(const VecX& pose_inc) override;
VecX getJp_diag2() const;
void scaleJl_cols();
void scaleJp_cols(const VecX& jacobian_scaling);
void setLandmarkDamping(Scalar lambda);
void get_dense_Q2Jp_Q2r(MatX& Q2Jp, VecX& Q2r) const override;
void get_dense_H_b(MatX& H, VecX& b) const override;
protected: // types
using PoseLinMapType =
Eigen::aligned_unordered_map<std::pair<TimeCamId, TimeCamId>,
RelPoseLin<Scalar>>;
using PoseLinMapTypeConstIter = typename PoseLinMapType::const_iterator;
using HostLandmarkMapType =
std::unordered_map<TimeCamId, std::vector<const LandmarkBlock<Scalar>*>>;
protected: // helper
void get_dense_Q2Jp_Q2r_pose_damping(MatX& Q2Jp, size_t start_idx) const;
void get_dense_Q2Jp_Q2r_marg_prior(MatX& Q2Jp, VecX& Q2r,
size_t start_idx) const;
void add_dense_H_b_pose_damping(MatX& H) const;
void add_dense_H_b_marg_prior(MatX& H, VecX& b) const;
void add_dense_H_b_imu(DenseAccumulator<Scalar>& accum) const;
void add_dense_H_b_imu(MatX& H, VecX& b) const;
// Transform to abs
static void accumulate_dense_H_b_rel_to_abs(
const MatX& rel_H, const VecX& rel_b,
const std::vector<PoseLinMapTypeConstIter>& rpph, const AbsOrderMap& aom,
DenseAccumulator<Scalar>& accum);
protected:
Options options_;
std::vector<ImuBlockPtr> imu_blocks;
const BundleAdjustmentBase<Scalar>* estimator;
LandmarkDatabase<Scalar>& lmdb_;
const Calibration<Scalar>& calib;
const AbsOrderMap& aom;
const std::set<FrameId>* used_frames;
const MargLinData<Scalar>* marg_lin_data;
const ImuLinData<Scalar>* imu_lin_data;
const std::unordered_set<KeypointId>* lost_landmarks;
int64_t last_state_to_marg;
Eigen::aligned_vector<AbsLinData> ald_vec;
Scalar pose_damping_diagonal;
Scalar pose_damping_diagonal_sqrt;
VecX marg_scaling;
size_t num_rows_Q2r;
};
} // namespace basalt

View File

@@ -0,0 +1,63 @@
#pragma once
#include <Eigen/Dense>
#include <basalt/vi_estimator/ba_base.h>
#include <basalt/linearization/landmark_block.hpp>
#include <basalt/utils/time_utils.hpp>
namespace basalt {
template <typename Scalar_, int POSE_SIZE_>
class LinearizationBase {
public:
using Scalar = Scalar_;
static constexpr int POSE_SIZE = POSE_SIZE_;
using VecX = Eigen::Matrix<Scalar, Eigen::Dynamic, 1>;
using MatX = Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic>;
struct Options {
typename LandmarkBlock<Scalar>::Options lb_options;
LinearizationType linearization_type;
};
virtual ~LinearizationBase() = default;
virtual void log_problem_stats(ExecutionStats& stats) const = 0;
virtual Scalar linearizeProblem(bool* numerically_valid = nullptr) = 0;
virtual void performQR() = 0;
// virtual void setPoseDamping(const Scalar lambda) = 0;
// virtual bool hasPoseDamping() const = 0;
virtual Scalar backSubstitute(const VecX& pose_inc) = 0;
// virtual VecX getJp_diag2() const = 0;
// virtual void scaleJl_cols() = 0;
// virtual void scaleJp_cols(const VecX& jacobian_scaling) = 0;
// virtual void setLandmarkDamping(Scalar lambda) = 0;
virtual void get_dense_Q2Jp_Q2r(MatX& Q2Jp, VecX& Q2r) const = 0;
virtual void get_dense_H_b(MatX& H, VecX& b) const = 0;
static std::unique_ptr<LinearizationBase> create(
BundleAdjustmentBase<Scalar>* estimator, const AbsOrderMap& aom,
const Options& options,
const MargLinData<Scalar>* marg_lin_data = nullptr,
const ImuLinData<Scalar>* imu_lin_data = nullptr,
const std::set<FrameId>* used_frames = nullptr,
const std::unordered_set<KeypointId>* lost_landmarks = nullptr,
int64_t last_state_to_marg = std::numeric_limits<int64_t>::max());
};
bool isLinearizationSqrt(const LinearizationType& type);
} // namespace basalt

View File

@@ -0,0 +1,143 @@
#pragma once
#include <basalt/linearization/linearization_base.hpp>
#include <basalt/optimization/accumulator.h>
#include <basalt/vi_estimator/landmark_database.h>
#include <basalt/vi_estimator/sqrt_ba_base.h>
#include <basalt/linearization/landmark_block.hpp>
#include <basalt/utils/cast_utils.hpp>
#include <basalt/utils/time_utils.hpp>
namespace basalt {
template <class Scalar>
class ImuBlock;
template <typename Scalar_, int POSE_SIZE_>
class LinearizationRelSC : public LinearizationBase<Scalar_, POSE_SIZE_> {
public:
using Scalar = Scalar_;
static constexpr int POSE_SIZE = POSE_SIZE_;
using Base = LinearizationBase<Scalar, POSE_SIZE>;
using Vec2 = Eigen::Matrix<Scalar, 2, 1>;
using Vec3 = Eigen::Matrix<Scalar, 3, 1>;
using Vec4 = Eigen::Matrix<Scalar, 4, 1>;
using VecP = Eigen::Matrix<Scalar, POSE_SIZE, 1>;
using VecX = Eigen::Matrix<Scalar, Eigen::Dynamic, 1>;
using Mat36 = Eigen::Matrix<Scalar, 3, 6>;
using Mat4 = Eigen::Matrix<Scalar, 4, 4>;
using Mat6 = Eigen::Matrix<Scalar, 6, 6>;
using MatX = Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic>;
using LandmarkBlockPtr = std::unique_ptr<LandmarkBlock<Scalar>>;
using ImuBlockPtr = std::unique_ptr<ImuBlock<Scalar>>;
using RelLinData = typename ScBundleAdjustmentBase<Scalar>::RelLinData;
using typename Base::Options;
LinearizationRelSC(
BundleAdjustmentBase<Scalar>* estimator, const AbsOrderMap& aom,
const Options& options,
const MargLinData<Scalar>* marg_lin_data = nullptr,
const ImuLinData<Scalar>* imu_lin_data = nullptr,
const std::set<FrameId>* used_frames = nullptr,
const std::unordered_set<KeypointId>* lost_landmarks = nullptr,
int64_t last_state_to_marg = std::numeric_limits<int64_t>::max());
// destructor defined in cpp b/c of unique_ptr destructor (ImuBlock)
// copy/move constructor/assignment-operator for rule-of-five
~LinearizationRelSC();
LinearizationRelSC(const LinearizationRelSC&) = default;
LinearizationRelSC(LinearizationRelSC&&) = default;
LinearizationRelSC& operator=(const LinearizationRelSC&) = default;
LinearizationRelSC& operator=(LinearizationRelSC&&) = default;
void log_problem_stats(ExecutionStats& stats) const override;
Scalar linearizeProblem(bool* numerically_valid = nullptr) override;
void performQR() override;
void setPoseDamping(const Scalar lambda);
bool hasPoseDamping() const { return pose_damping_diagonal > 0; }
Scalar backSubstitute(const VecX& pose_inc) override;
VecX getJp_diag2() const;
void scaleJl_cols();
void scaleJp_cols(const VecX& jacobian_scaling);
void setLandmarkDamping(Scalar lambda);
void get_dense_Q2Jp_Q2r(MatX& Q2Jp, VecX& Q2r) const override;
void get_dense_H_b(MatX& H, VecX& b) const override;
protected: // types
using PoseLinMapType =
Eigen::aligned_unordered_map<std::pair<TimeCamId, TimeCamId>,
RelPoseLin<Scalar>>;
using PoseLinMapTypeConstIter = typename PoseLinMapType::const_iterator;
using HostLandmarkMapType =
std::unordered_map<TimeCamId, std::vector<const LandmarkBlock<Scalar>*>>;
protected: // helper
void get_dense_Q2Jp_Q2r_pose_damping(MatX& Q2Jp, size_t start_idx) const;
void get_dense_Q2Jp_Q2r_marg_prior(MatX& Q2Jp, VecX& Q2r,
size_t start_idx) const;
void add_dense_H_b_pose_damping(MatX& H) const;
void add_dense_H_b_marg_prior(MatX& H, VecX& b) const;
void add_dense_H_b_imu(DenseAccumulator<Scalar>& accum) const;
void add_dense_H_b_imu(MatX& H, VecX& b) const;
// Transform to abs
static void accumulate_dense_H_b_rel_to_abs(
const MatX& rel_H, const VecX& rel_b,
const std::vector<PoseLinMapTypeConstIter>& rpph, const AbsOrderMap& aom,
DenseAccumulator<Scalar>& accum);
protected:
Options options_;
std::vector<ImuBlockPtr> imu_blocks;
const BundleAdjustmentBase<Scalar>* estimator;
LandmarkDatabase<Scalar>& lmdb_;
const Calibration<Scalar>& calib;
const AbsOrderMap& aom;
const std::set<FrameId>* used_frames;
const MargLinData<Scalar>* marg_lin_data;
const ImuLinData<Scalar>* imu_lin_data;
const std::unordered_set<KeypointId>* lost_landmarks;
int64_t last_state_to_marg;
Eigen::aligned_vector<RelLinData> rld_vec;
Scalar pose_damping_diagonal;
Scalar pose_damping_diagonal_sqrt;
VecX marg_scaling;
size_t num_rows_Q2r;
};
} // namespace basalt