1401 lines
44 KiB
C++
1401 lines
44 KiB
C++
/*
|
|
* COPYRIGHT AND PERMISSION NOTICE
|
|
* Penn Software MSCKF_VIO
|
|
* Copyright (C) 2017 The Trustees of the University of Pennsylvania
|
|
* All rights reserved.
|
|
*/
|
|
|
|
#ifndef MSCKF_VIO_FEATURE_H
|
|
#define MSCKF_VIO_FEATURE_H
|
|
|
|
#include <iostream>
|
|
#include <map>
|
|
#include <vector>
|
|
|
|
#include <Eigen/Dense>
|
|
#include <Eigen/Geometry>
|
|
#include <Eigen/StdVector>
|
|
#include <math.h>
|
|
#include <visualization_msgs/Marker.h>
|
|
#include <visualization_msgs/MarkerArray.h>
|
|
#include <geometry_msgs/Point.h>
|
|
|
|
#include "image_handler.h"
|
|
|
|
#include "math_utils.hpp"
|
|
#include "imu_state.h"
|
|
#include "cam_state.h"
|
|
|
|
|
|
namespace msckf_vio {
|
|
|
|
/*
|
|
* @brief Feature Salient part of an image. Please refer
|
|
* to the Appendix of "A Multi-State Constraint Kalman
|
|
* Filter for Vision-aided Inertial Navigation" for how
|
|
* the 3d position of a feature is initialized.
|
|
*/
|
|
struct Feature {
|
|
EIGEN_MAKE_ALIGNED_OPERATOR_NEW
|
|
typedef long long int FeatureIDType;
|
|
|
|
/*
|
|
* @brief OptimizationConfig Configuration parameters
|
|
* for 3d feature position optimization.
|
|
*/
|
|
struct OptimizationConfig {
|
|
double translation_threshold;
|
|
double huber_epsilon;
|
|
double estimation_precision;
|
|
double initial_damping;
|
|
int outer_loop_max_iteration;
|
|
int inner_loop_max_iteration;
|
|
|
|
OptimizationConfig():
|
|
translation_threshold(0.2),
|
|
huber_epsilon(0.01),
|
|
estimation_precision(5e-7),
|
|
initial_damping(1e-3),
|
|
outer_loop_max_iteration(10),
|
|
inner_loop_max_iteration(10) {
|
|
return;
|
|
}
|
|
};
|
|
|
|
// Constructors for the struct.
|
|
Feature(): id(0), position(Eigen::Vector3d::Zero()),
|
|
is_initialized(false), is_anchored(false) {}
|
|
|
|
Feature(const FeatureIDType& new_id): id(new_id),
|
|
position(Eigen::Vector3d::Zero()),
|
|
is_initialized(false), is_anchored(false) {}
|
|
|
|
|
|
void Rhocost(const Eigen::Isometry3d& T_c0_ci,
|
|
const double x, const Eigen::Vector2d& z1, const Eigen::Vector2d& z2,
|
|
double& e) const;
|
|
|
|
/*
|
|
* @brief cost Compute the cost of the camera observations
|
|
* @param T_c0_c1 A rigid body transformation takes
|
|
* a vector in c0 frame to ci frame.
|
|
* @param x The current estimation.
|
|
* @param z The ith measurement of the feature j in ci frame.
|
|
* @return e The cost of this observation.
|
|
*/
|
|
inline void cost(const Eigen::Isometry3d& T_c0_ci,
|
|
const Eigen::Vector3d& x, const Eigen::Vector2d& z,
|
|
double& e) const;
|
|
|
|
bool initializeRho(const CamStateServer& cam_states);
|
|
|
|
inline void RhoJacobian(const Eigen::Isometry3d& T_c0_ci,
|
|
const double x, const Eigen::Vector2d& z1, const Eigen::Vector2d& z2,
|
|
Eigen::Matrix<double, 2, 1>& J, Eigen::Vector2d& r,
|
|
double& w) const;
|
|
|
|
/*
|
|
* @brief jacobian Compute the Jacobian of the camera observation
|
|
* @param T_c0_c1 A rigid body transformation takes
|
|
* a vector in c0 frame to ci frame.
|
|
* @param x The current estimation.
|
|
* @param z The actual measurement of the feature in ci frame.
|
|
* @return J The computed Jacobian.
|
|
* @return r The computed residual.
|
|
* @return w Weight induced by huber kernel.
|
|
*/
|
|
inline void jacobian(const Eigen::Isometry3d& T_c0_ci,
|
|
const Eigen::Vector3d& x, const Eigen::Vector2d& z,
|
|
Eigen::Matrix<double, 2, 3>& J, Eigen::Vector2d& r,
|
|
double& w) const;
|
|
|
|
inline double generateInitialDepth(
|
|
const Eigen::Isometry3d& T_c1_c2, const Eigen::Vector2d& z1,
|
|
const Eigen::Vector2d& z2) const;
|
|
|
|
/*
|
|
* @brief generateInitialGuess Compute the initial guess of
|
|
* the feature's 3d position using only two views.
|
|
* @param T_c1_c2: A rigid body transformation taking
|
|
* a vector from c2 frame to c1 frame.
|
|
* @param z1: feature observation in c1 frame.
|
|
* @param z2: feature observation in c2 frame.
|
|
* @return p: Computed feature position in c1 frame.
|
|
*/
|
|
inline void generateInitialGuess(
|
|
const Eigen::Isometry3d& T_c1_c2, const Eigen::Vector2d& z1,
|
|
const Eigen::Vector2d& z2, Eigen::Vector3d& p) const;
|
|
|
|
/*
|
|
* @brief checkMotion Check the input camera poses to ensure
|
|
* there is enough translation to triangulate the feature
|
|
* positon.
|
|
* @param cam_states : input camera poses.
|
|
* @return True if the translation between the input camera
|
|
* poses is sufficient.
|
|
*/
|
|
inline bool checkMotion(
|
|
const CamStateServer& cam_states) const;
|
|
|
|
/*
|
|
* @brief InitializeAnchor generates the NxN patch around the
|
|
* feature in the Anchor image
|
|
* @param cam_states: A map containing all recorded images
|
|
* currently presented in the camera state vector
|
|
* @return the irradiance of the Anchor NxN Patch
|
|
* @return True if the Anchor can be estimated
|
|
*/
|
|
|
|
bool initializeAnchor(
|
|
const CameraCalibration& cam, int N);
|
|
|
|
|
|
/*
|
|
* @brief InitializePosition Intialize the feature position
|
|
* based on all current available measurements.
|
|
* @param cam_states: A map containing the camera poses with its
|
|
* ID as the associated key value.
|
|
* @return The computed 3d position is used to set the position
|
|
* member variable. Note the resulted position is in world
|
|
* frame.
|
|
* @return True if the estimated 3d position of the feature
|
|
* is valid.
|
|
*/
|
|
inline bool initializePosition(
|
|
const CamStateServer& cam_states);
|
|
|
|
cv::Point2f pixelDistanceAt(
|
|
const CAMState& cam_state,
|
|
const StateIDType& cam_state_id,
|
|
const CameraCalibration& cam,
|
|
Eigen::Vector3d& in_p) const;
|
|
|
|
|
|
|
|
/*
|
|
* @brief project PositionToCamera Takes a 3d position in a world frame
|
|
* and projects it into the passed camera frame using pinhole projection
|
|
* then distorts it using camera information to get
|
|
* the resulting distorted pixel position
|
|
*/
|
|
inline cv::Point2f projectPositionToCamera(
|
|
const CAMState& cam_state,
|
|
const StateIDType& cam_state_id,
|
|
const CameraCalibration& cam,
|
|
Eigen::Vector3d& in_p) const;
|
|
|
|
double cvKernel(
|
|
const cv::Point2f pose,
|
|
std::string type) const;
|
|
|
|
double Kernel(
|
|
const cv::Point2f pose,
|
|
const cv::Mat frame,
|
|
std::string type) const;
|
|
|
|
/*
|
|
* @brief IrradianceAnchorPatch_Camera returns irradiance values
|
|
* of the Anchor Patch position in a camera frame
|
|
*
|
|
*/
|
|
|
|
bool estimate_FrameIrradiance(
|
|
const CAMState& cam_state,
|
|
const StateIDType& cam_state_id,
|
|
CameraCalibration& cam0,
|
|
std::vector<double>& anchorPatch_estimate,
|
|
IlluminationParameter& estimatedIllumination) const;
|
|
|
|
bool MarkerGeneration(
|
|
ros::Publisher& marker_pub,
|
|
const CamStateServer& cam_states) const;
|
|
|
|
bool VisualizeKernel(
|
|
const CAMState& cam_state,
|
|
const StateIDType& cam_state_id,
|
|
CameraCalibration& cam0) const;
|
|
|
|
bool VisualizePatch(
|
|
const CAMState& cam_state,
|
|
const StateIDType& cam_state_id,
|
|
CameraCalibration& cam0,
|
|
const Eigen::VectorXd& photo_r,
|
|
std::stringstream& ss) const;
|
|
/*
|
|
* @brief AnchorPixelToPosition uses the calcualted pixels
|
|
* of the anchor patch to generate 3D positions of all of em
|
|
*/
|
|
inline Eigen::Vector3d AnchorPixelToPosition(cv::Point2f in_p,
|
|
const CameraCalibration& cam);
|
|
|
|
/*
|
|
* @brief Irradiance returns irradiance value of a pixel
|
|
*/
|
|
|
|
inline float PixelIrradiance(cv::Point2f pose, cv::Mat image) const;
|
|
|
|
// An unique identifier for the feature.
|
|
// In case of long time running, the variable
|
|
// type of id is set to FeatureIDType in order
|
|
// to avoid duplication.
|
|
FeatureIDType id;
|
|
|
|
// id for next feature
|
|
static FeatureIDType next_id;
|
|
|
|
// Store the observations of the features in the
|
|
// state_id(key)-image_coordinates(value) manner.
|
|
std::map<StateIDType, Eigen::Vector4d, std::less<StateIDType>,
|
|
Eigen::aligned_allocator<
|
|
std::pair<const StateIDType, Eigen::Vector4d> > > observations;
|
|
|
|
// NxN Patch of Anchor Image
|
|
std::vector<double> anchorPatch;
|
|
std::vector<cv::Point2f> anchorPatch_ideal;
|
|
std::vector<cv::Point2f> anchorPatch_real;
|
|
|
|
// Position of NxN Patch in 3D space in anchor camera frame
|
|
std::vector<Eigen::Vector3d> anchorPatch_3d;
|
|
|
|
// Anchor Isometry
|
|
Eigen::Isometry3d T_anchor_w;
|
|
|
|
// 3d postion of the feature in the world frame.
|
|
Eigen::Vector3d position;
|
|
|
|
// inverse depth representation
|
|
double anchor_rho;
|
|
|
|
// A indicator to show if the 3d postion of the feature
|
|
// has been initialized or not.
|
|
bool is_initialized;
|
|
bool is_anchored;
|
|
|
|
|
|
cv::Mat abs_xderImage;
|
|
cv::Mat abs_yderImage;
|
|
|
|
cv::Mat xderImage;
|
|
cv::Mat yderImage;
|
|
|
|
cv::Mat anchorImage_blurred;
|
|
cv::Point2f anchor_center_pos;
|
|
cv::Point2f undist_anchor_center_pos;
|
|
// Noise for a normalized feature measurement.
|
|
static double observation_noise;
|
|
|
|
// Optimization configuration for solving the 3d position.
|
|
static OptimizationConfig optimization_config;
|
|
|
|
};
|
|
|
|
typedef Feature::FeatureIDType FeatureIDType;
|
|
typedef std::map<FeatureIDType, Feature, std::less<int>,
|
|
Eigen::aligned_allocator<
|
|
std::pair<const FeatureIDType, Feature> > > MapServer;
|
|
|
|
void Feature::Rhocost(const Eigen::Isometry3d& T_c0_ci,
|
|
const double x, const Eigen::Vector2d& z1, const Eigen::Vector2d& z2,
|
|
double& e) const
|
|
{
|
|
// Compute hi1, hi2, and hi3 as Equation (37).
|
|
const double& rho = x;
|
|
|
|
Eigen::Vector3d h = T_c0_ci.linear()*
|
|
Eigen::Vector3d(z1(0), z1(1), 1.0) + rho*T_c0_ci.translation();
|
|
double& h1 = h(0);
|
|
double& h2 = h(1);
|
|
double& h3 = h(2);
|
|
|
|
// Predict the feature observation in ci frame.
|
|
Eigen::Vector2d z_hat(h1/h3, h2/h3);
|
|
|
|
// Compute the residual.
|
|
e = (z_hat-z2).squaredNorm();
|
|
return;
|
|
}
|
|
|
|
void Feature::cost(const Eigen::Isometry3d& T_c0_ci,
|
|
const Eigen::Vector3d& x, const Eigen::Vector2d& z,
|
|
double& e) const
|
|
{
|
|
// Compute hi1, hi2, and hi3 as Equation (37).
|
|
const double& alpha = x(0);
|
|
const double& beta = x(1);
|
|
const double& rho = x(2);
|
|
|
|
Eigen::Vector3d h = T_c0_ci.linear()*
|
|
Eigen::Vector3d(alpha, beta, 1.0) + rho*T_c0_ci.translation();
|
|
double h1 = h(0);
|
|
double h2 = h(1);
|
|
double h3 = h(2);
|
|
|
|
// Predict the feature observation in ci frame.
|
|
Eigen::Vector2d z_hat(h1/h3, h2/h3);
|
|
|
|
// Compute the residual.
|
|
e = (z_hat-z).squaredNorm();
|
|
return;
|
|
}
|
|
|
|
void Feature::RhoJacobian(const Eigen::Isometry3d& T_c0_ci,
|
|
const double x, const Eigen::Vector2d& z1, const Eigen::Vector2d& z2,
|
|
Eigen::Matrix<double, 2, 1>& J, Eigen::Vector2d& r,
|
|
double& w) const
|
|
{
|
|
|
|
const double& rho = x;
|
|
|
|
Eigen::Vector3d h = T_c0_ci.linear()*
|
|
Eigen::Vector3d(z1(0), z2(1), 1.0) + rho*T_c0_ci.translation();
|
|
double& h1 = h(0);
|
|
double& h2 = h(1);
|
|
double& h3 = h(2);
|
|
|
|
// Compute the Jacobian.
|
|
Eigen::Matrix3d W;
|
|
W.leftCols<2>() = T_c0_ci.linear().leftCols<2>();
|
|
W.rightCols<1>() = T_c0_ci.translation();
|
|
|
|
J(0,0) = -h1/(h3*h3);
|
|
J(1,0) = -h2/(h3*h3);
|
|
|
|
// Compute the residual.
|
|
Eigen::Vector2d z_hat(h1/h3, h2/h3);
|
|
r = z_hat - z2;
|
|
|
|
// Compute the weight based on the residual.
|
|
double e = r.norm();
|
|
if (e <= optimization_config.huber_epsilon)
|
|
w = 1.0;
|
|
else
|
|
w = optimization_config.huber_epsilon / (2*e);
|
|
|
|
return;
|
|
}
|
|
|
|
void Feature::jacobian(const Eigen::Isometry3d& T_c0_ci,
|
|
const Eigen::Vector3d& x, const Eigen::Vector2d& z,
|
|
Eigen::Matrix<double, 2, 3>& J, Eigen::Vector2d& r,
|
|
double& w) const
|
|
{
|
|
|
|
// Compute hi1, hi2, and hi3 as Equation (37).
|
|
const double& alpha = x(0);
|
|
const double& beta = x(1);
|
|
const double& rho = x(2);
|
|
|
|
Eigen::Vector3d h = T_c0_ci.linear()*
|
|
Eigen::Vector3d(alpha, beta, 1.0) + rho*T_c0_ci.translation();
|
|
double& h1 = h(0);
|
|
double& h2 = h(1);
|
|
double& h3 = h(2);
|
|
|
|
// Compute the Jacobian.
|
|
Eigen::Matrix3d W;
|
|
W.leftCols<2>() = T_c0_ci.linear().leftCols<2>();
|
|
W.rightCols<1>() = T_c0_ci.translation();
|
|
|
|
J.row(0) = 1/h3*W.row(0) - h1/(h3*h3)*W.row(2);
|
|
J.row(1) = 1/h3*W.row(1) - h2/(h3*h3)*W.row(2);
|
|
|
|
// Compute the residual.
|
|
Eigen::Vector2d z_hat(h1/h3, h2/h3);
|
|
r = z_hat - z;
|
|
|
|
// Compute the weight based on the residual.
|
|
double e = r.norm();
|
|
if (e <= optimization_config.huber_epsilon)
|
|
w = 1.0;
|
|
else
|
|
w = optimization_config.huber_epsilon / (2*e);
|
|
|
|
return;
|
|
}
|
|
|
|
double Feature::generateInitialDepth(
|
|
const Eigen::Isometry3d& T_c1_c2, const Eigen::Vector2d& z1,
|
|
const Eigen::Vector2d& z2) const
|
|
{
|
|
// Construct a least square problem to solve the depth.
|
|
Eigen::Vector3d m = T_c1_c2.linear() * Eigen::Vector3d(z1(0), z1(1), 1.0);
|
|
|
|
Eigen::Vector2d A(0.0, 0.0);
|
|
A(0) = m(0) - z2(0)*m(2);
|
|
A(1) = m(1) - z2(1)*m(2);
|
|
|
|
Eigen::Vector2d b(0.0, 0.0);
|
|
b(0) = z2(0)*T_c1_c2.translation()(2) - T_c1_c2.translation()(0);
|
|
b(1) = z2(1)*T_c1_c2.translation()(2) - T_c1_c2.translation()(1);
|
|
|
|
// Solve for the depth.
|
|
double depth = (A.transpose() * A).inverse() * A.transpose() * b;
|
|
return depth;
|
|
}
|
|
|
|
|
|
void Feature::generateInitialGuess(
|
|
const Eigen::Isometry3d& T_c1_c2, const Eigen::Vector2d& z1,
|
|
const Eigen::Vector2d& z2, Eigen::Vector3d& p) const
|
|
{
|
|
double depth = generateInitialDepth(T_c1_c2, z1, z2);
|
|
p(0) = z1(0) * depth;
|
|
p(1) = z1(1) * depth;
|
|
p(2) = depth;
|
|
return;
|
|
}
|
|
|
|
bool Feature::checkMotion(const CamStateServer& cam_states) const
|
|
{
|
|
|
|
const StateIDType& first_cam_id = observations.begin()->first;
|
|
const StateIDType& last_cam_id = (--observations.end())->first;
|
|
|
|
Eigen::Isometry3d first_cam_pose;
|
|
first_cam_pose.linear() = quaternionToRotation(
|
|
cam_states.find(first_cam_id)->second.orientation).transpose();
|
|
first_cam_pose.translation() =
|
|
cam_states.find(first_cam_id)->second.position;
|
|
|
|
Eigen::Isometry3d last_cam_pose;
|
|
last_cam_pose.linear() = quaternionToRotation(
|
|
cam_states.find(last_cam_id)->second.orientation).transpose();
|
|
last_cam_pose.translation() =
|
|
cam_states.find(last_cam_id)->second.position;
|
|
|
|
// Get the direction of the feature when it is first observed.
|
|
// This direction is represented in the world frame.
|
|
Eigen::Vector3d feature_direction(
|
|
observations.begin()->second(0),
|
|
observations.begin()->second(1), 1.0);
|
|
feature_direction = feature_direction / feature_direction.norm();
|
|
feature_direction = first_cam_pose.linear()*feature_direction;
|
|
|
|
// Compute the translation between the first frame
|
|
// and the last frame. We assume the first frame and
|
|
// the last frame will provide the largest motion to
|
|
// speed up the checking process.
|
|
Eigen::Vector3d translation = last_cam_pose.translation() -
|
|
first_cam_pose.translation();
|
|
double parallel_translation =
|
|
translation.transpose()*feature_direction;
|
|
Eigen::Vector3d orthogonal_translation = translation -
|
|
parallel_translation*feature_direction;
|
|
|
|
if (orthogonal_translation.norm() >
|
|
optimization_config.translation_threshold)
|
|
return true;
|
|
else return false;
|
|
}
|
|
|
|
double Feature::cvKernel(
|
|
const cv::Point2f pose,
|
|
std::string type) const
|
|
{
|
|
|
|
double delta = 0;
|
|
|
|
if(type == "Sobel_x")
|
|
delta = ((double)xderImage.at<short>(pose.y, pose.x))/255.;
|
|
else if (type == "Sobel_y")
|
|
delta = ((double)yderImage.at<short>(pose.y, pose.x))/255.;
|
|
return delta;
|
|
|
|
}
|
|
|
|
double Feature::Kernel(
|
|
const cv::Point2f pose,
|
|
const cv::Mat frame,
|
|
std::string type) const
|
|
{
|
|
Eigen::Matrix<double, 3, 3> kernel = Eigen::Matrix<double, 3, 3>::Zero();
|
|
if(type == "Sobel_x")
|
|
kernel << -1., 0., 1.,-2., 0., 2. , -1., 0., 1.;
|
|
else if(type == "Sobel_y")
|
|
kernel << -1., -2., -1., 0., 0., 0., 1., 2., 1.;
|
|
|
|
double delta = 0;
|
|
int offs = (int)(kernel.rows()-1)/2;
|
|
|
|
for(int i = 0; i < kernel.rows(); i++)
|
|
for(int j = 0; j < kernel.cols(); j++)
|
|
delta += ((float)frame.at<uint8_t>(pose.y+j-offs , pose.x+i-offs))/255. * (float)kernel(j,i);
|
|
|
|
return delta;
|
|
}
|
|
bool Feature::estimate_FrameIrradiance(
|
|
const CAMState& cam_state,
|
|
const StateIDType& cam_state_id,
|
|
CameraCalibration& cam0,
|
|
std::vector<double>& anchorPatch_estimate,
|
|
IlluminationParameter& estimated_illumination) const
|
|
{
|
|
// get irradiance of patch in anchor frame
|
|
// subtract estimated b and divide by a of anchor frame
|
|
// muliply by a and add b of this frame
|
|
|
|
auto anchor = observations.begin();
|
|
if(cam0.moving_window.find(anchor->first) == cam0.moving_window.end())
|
|
return false;
|
|
|
|
double anchorExposureTime_ms = cam0.moving_window.find(anchor->first)->second.exposureTime_ms;
|
|
double frameExposureTime_ms = cam0.moving_window.find(cam_state_id)->second.exposureTime_ms;
|
|
|
|
|
|
double a_A = anchorExposureTime_ms;
|
|
double b_A = 0;
|
|
double a_l = frameExposureTime_ms;
|
|
double b_l = 0;
|
|
|
|
estimated_illumination.frame_gain = a_l;
|
|
estimated_illumination.frame_bias = b_l;
|
|
estimated_illumination.feature_gain = 1;
|
|
estimated_illumination.feature_bias = 0;
|
|
//printf("frames: %lld, %lld\n", anchor->first, cam_state_id);
|
|
//printf("exposure: %f, %f\n", a_A, a_l);
|
|
|
|
for (double anchorPixel : anchorPatch)
|
|
{
|
|
float irradiance = (anchorPixel - b_A) / a_A ;
|
|
anchorPatch_estimate.push_back(irradiance);
|
|
}
|
|
}
|
|
|
|
// generates markers for every camera position/observation
|
|
// and estimated feature/path position
|
|
bool Feature::MarkerGeneration(
|
|
ros::Publisher& marker_pub,
|
|
const CamStateServer& cam_states) const
|
|
{
|
|
visualization_msgs::MarkerArray ma;
|
|
|
|
// add all camera states used for estimation
|
|
int count = 0;
|
|
|
|
for(auto observation : observations)
|
|
{
|
|
visualization_msgs::Marker marker;
|
|
marker.header.frame_id = "world";
|
|
marker.header.stamp = ros::Time::now();
|
|
|
|
marker.ns = "cameras";
|
|
marker.id = count++;
|
|
marker.type = visualization_msgs::Marker::ARROW;
|
|
marker.action = visualization_msgs::Marker::ADD;
|
|
|
|
marker.pose.position.x = cam_states.find(observation.first)->second.position(0);
|
|
marker.pose.position.y = cam_states.find(observation.first)->second.position(1);
|
|
marker.pose.position.z = cam_states.find(observation.first)->second.position(2);
|
|
|
|
// rotate form x to z axis
|
|
Eigen::Vector4d q = quaternionMultiplication(Eigen::Vector4d(0, -0.707, 0, 0.707), cam_states.find(observation.first)->second.orientation);
|
|
|
|
marker.pose.orientation.x = q(0);
|
|
marker.pose.orientation.y = q(1);
|
|
marker.pose.orientation.z = q(2);
|
|
marker.pose.orientation.w = q(3);
|
|
|
|
marker.scale.x = 0.15;
|
|
marker.scale.y = 0.05;
|
|
marker.scale.z = 0.05;
|
|
|
|
if(count == 1)
|
|
{
|
|
marker.color.r = 0.0f;
|
|
marker.color.g = 0.0f;
|
|
marker.color.b = 1.0f;
|
|
}
|
|
else
|
|
{
|
|
marker.color.r = 0.0f;
|
|
marker.color.g = 1.0f;
|
|
marker.color.b = 0.0f;
|
|
}
|
|
marker.color.a = 1.0;
|
|
|
|
marker.lifetime = ros::Duration(0);
|
|
|
|
ma.markers.push_back(marker);
|
|
}
|
|
|
|
// 'delete' any existing cameras (make invisible)
|
|
for(int i = count; i < 20; i++)
|
|
{
|
|
visualization_msgs::Marker marker;
|
|
|
|
marker.header.frame_id = "world";
|
|
marker.header.stamp = ros::Time::now();
|
|
|
|
marker.ns = "cameras";
|
|
marker.id = i;
|
|
marker.type = visualization_msgs::Marker::ARROW;
|
|
marker.action = visualization_msgs::Marker::ADD;
|
|
|
|
marker.pose.orientation.w = 1;
|
|
|
|
marker.color.a = 0.0;
|
|
|
|
marker.lifetime = ros::Duration(1);
|
|
|
|
ma.markers.push_back(marker);
|
|
}
|
|
|
|
//generate feature patch points position
|
|
visualization_msgs::Marker marker;
|
|
|
|
marker.header.frame_id = "world";
|
|
marker.header.stamp = ros::Time::now();
|
|
|
|
marker.ns = "patch";
|
|
marker.id = 0;
|
|
marker.type = visualization_msgs::Marker::POINTS;
|
|
marker.action = visualization_msgs::Marker::ADD;
|
|
|
|
marker.pose.orientation.w = 1;
|
|
|
|
marker.scale.x = 0.02;
|
|
marker.scale.y = 0.02;
|
|
|
|
marker.color.r = 1.0f;
|
|
marker.color.g = 0.0f;
|
|
marker.color.b = 0.0f;
|
|
marker.color.a = 1.0;
|
|
|
|
for(auto point : anchorPatch_3d)
|
|
{
|
|
geometry_msgs::Point p;
|
|
p.x = point(0);
|
|
p.y = point(1);
|
|
p.z = point(2);
|
|
marker.points.push_back(p);
|
|
}
|
|
ma.markers.push_back(marker);
|
|
|
|
marker_pub.publish(ma);
|
|
}
|
|
|
|
|
|
bool Feature::VisualizeKernel(
|
|
const CAMState& cam_state,
|
|
const StateIDType& cam_state_id,
|
|
CameraCalibration& cam0) const
|
|
{
|
|
auto anchor = observations.begin();
|
|
cv::Mat anchorImage = cam0.moving_window.find(anchor->first)->second.image;
|
|
|
|
//cv::Mat xderImage;
|
|
//cv::Mat yderImage;
|
|
|
|
//cv::Sobel(anchorImage, xderImage, CV_8UC1, 1, 0, 3);
|
|
//cv::Sobel(anchorImage, yderImage, CV_8UC1, 0, 1, 3);
|
|
|
|
|
|
cv::Mat xderImage2(anchorImage.rows, anchorImage.cols, anchorImage_blurred.type());
|
|
cv::Mat yderImage2(anchorImage.rows, anchorImage.cols, anchorImage_blurred.type());
|
|
|
|
|
|
cv::Mat norm_abs_xderImage;
|
|
cv::normalize(abs_xderImage, norm_abs_xderImage, 0, 255, cv::NORM_MINMAX, CV_8UC1);
|
|
|
|
cv::imshow("xder", norm_abs_xderImage);
|
|
cv::imshow("yder", abs_yderImage);
|
|
|
|
for(int i = 1; i < anchorImage.rows-1; i++)
|
|
for(int j = 1; j < anchorImage.cols-1; j++)
|
|
xderImage2.at<uint8_t>(j,i) = 255.*fabs(Kernel(cv::Point2f(i,j), anchorImage_blurred, "Sobel_x"));
|
|
|
|
for(int i = 1; i < anchorImage.rows-1; i++)
|
|
for(int j = 1; j < anchorImage.cols-1; j++)
|
|
yderImage2.at<uint8_t>(j,i) = 255.*fabs(Kernel(cv::Point2f(i,j), anchorImage_blurred, "Sobel_y"));
|
|
cv::imshow("anchor", anchorImage);
|
|
cv::imshow("xder2", xderImage2);
|
|
cv::imshow("yder2", yderImage2);
|
|
|
|
cvWaitKey(0);
|
|
}
|
|
bool Feature::VisualizePatch(
|
|
const CAMState& cam_state,
|
|
const StateIDType& cam_state_id,
|
|
CameraCalibration& cam0,
|
|
const Eigen::VectorXd& photo_r,
|
|
std::stringstream& ss) const
|
|
{
|
|
|
|
double rescale = 1;
|
|
|
|
//visu - anchor
|
|
auto anchor = observations.begin();
|
|
cv::Mat anchorImage = cam0.moving_window.find(anchor->first)->second.image;
|
|
cv::Mat dottedFrame(anchorImage.size(), CV_8UC3);
|
|
cv::cvtColor(anchorImage, dottedFrame, CV_GRAY2RGB);
|
|
|
|
// visualize the true anchor points (the surrounding of the original measurements)
|
|
for(auto point : anchorPatch_real)
|
|
{
|
|
// visu - feature
|
|
cv::Point xs(point.x, point.y);
|
|
cv::Point ys(point.x, point.y);
|
|
cv::rectangle(dottedFrame, xs, ys, cv::Scalar(0,255,255));
|
|
}
|
|
cam0.featureVisu = dottedFrame.clone();
|
|
|
|
// visu - feature
|
|
cv::Mat current_image = cam0.moving_window.find(cam_state_id)->second.image;
|
|
cv::cvtColor(current_image, dottedFrame, CV_GRAY2RGB);
|
|
|
|
// set position in frame
|
|
// save irradiance of projection
|
|
std::vector<double> projectionPatch;
|
|
for(auto point : anchorPatch_3d)
|
|
{
|
|
cv::Point2f p_in_c0 = projectPositionToCamera(cam_state, cam_state_id, cam0, point);
|
|
projectionPatch.push_back(PixelIrradiance(p_in_c0, current_image));
|
|
// visu - feature
|
|
cv::Point xs(p_in_c0.x, p_in_c0.y);
|
|
cv::Point ys(p_in_c0.x, p_in_c0.y);
|
|
cv::rectangle(dottedFrame, xs, ys, cv::Scalar(0,255,0));
|
|
}
|
|
|
|
cv::hconcat(cam0.featureVisu, dottedFrame, cam0.featureVisu);
|
|
|
|
|
|
// patches visualization
|
|
int N = sqrt(anchorPatch_3d.size());
|
|
int scale = 30;
|
|
cv::Mat irradianceFrame(anchorImage.size(), CV_8UC3, cv::Scalar(255, 240, 255));
|
|
cv::resize(irradianceFrame, irradianceFrame, cv::Size(), rescale, rescale);
|
|
|
|
// irradiance grid anchor
|
|
std::stringstream namer;
|
|
namer << "anchor";
|
|
cv::putText(irradianceFrame, namer.str() , cvPoint(30, 25),
|
|
cv::FONT_HERSHEY_COMPLEX_SMALL, 0.8, cvScalar(0,0,0), 1, CV_AA);
|
|
|
|
|
|
for(int i = 0; i<N; i++)
|
|
for(int j = 0; j<N; j++)
|
|
cv::rectangle(irradianceFrame,
|
|
cv::Point(30+scale*(i+1), 30+scale*j),
|
|
cv::Point(30+scale*i, 30+scale*(j+1)),
|
|
cv::Scalar(anchorPatch[i*N+j]*255, anchorPatch[i*N+j]*255, anchorPatch[i*N+j]*255),
|
|
CV_FILLED);
|
|
|
|
// irradiance grid projection
|
|
|
|
namer.str(std::string());
|
|
namer << "projection";
|
|
cv::putText(irradianceFrame, namer.str() , cvPoint(30, 45+scale*N),
|
|
cv::FONT_HERSHEY_COMPLEX_SMALL, 0.8, cvScalar(0,0,0), 1, CV_AA);
|
|
|
|
for(int i = 0; i<N; i++)
|
|
for(int j = 0; j<N ; j++)
|
|
cv::rectangle(irradianceFrame,
|
|
cv::Point(30+scale*(i+1), 50+scale*(N+j)),
|
|
cv::Point(30+scale*(i), 50+scale*(N+j+1)),
|
|
cv::Scalar(projectionPatch[i*N+j]*255, projectionPatch[i*N+j]*255, projectionPatch[i*N+j]*255),
|
|
CV_FILLED);
|
|
|
|
// true irradiance at feature
|
|
// get current observation
|
|
|
|
namer.str(std::string());
|
|
namer << "feature";
|
|
cv::putText(irradianceFrame, namer.str() , cvPoint(30, 65+scale*2*N),
|
|
cv::FONT_HERSHEY_COMPLEX_SMALL, 0.8, cvScalar(0,0,0), 1, CV_AA);
|
|
|
|
cv::Point2f p_f(observations.find(cam_state_id)->second(0),observations.find(cam_state_id)->second(1));
|
|
// move to real pixels
|
|
p_f = image_handler::distortPoint(p_f, cam0.intrinsics, cam0.distortion_model, cam0.distortion_coeffs);
|
|
|
|
for(int i = 0; i<N; i++)
|
|
{
|
|
for(int j = 0; j<N ; j++)
|
|
{
|
|
float irr = PixelIrradiance(cv::Point2f(p_f.x + (i-(N-1)/2), p_f.y + (j-(N-1)/2)), current_image);
|
|
cv::rectangle(irradianceFrame,
|
|
cv::Point(30+scale*(i+1), 70+scale*(2*N+j)),
|
|
cv::Point(30+scale*(i), 70+scale*(2*N+j+1)),
|
|
cv::Scalar(irr*255, irr*255, irr*255),
|
|
CV_FILLED);
|
|
}
|
|
}
|
|
|
|
// residual grid projection, positive - red, negative - blue colored
|
|
namer.str(std::string());
|
|
namer << "residual";
|
|
cv::putText(irradianceFrame, namer.str() , cvPoint(30+scale*N, scale*N/2-5),
|
|
cv::FONT_HERSHEY_COMPLEX_SMALL, 0.8, cvScalar(0,0,0), 1, CV_AA);
|
|
|
|
for(int i = 0; i<N; i++)
|
|
for(int j = 0; j<N; j++)
|
|
if(photo_r(i*N+j)>0)
|
|
cv::rectangle(irradianceFrame,
|
|
cv::Point(40+scale*(N+i+1), 15+scale*(N/2+j)),
|
|
cv::Point(40+scale*(N+i), 15+scale*(N/2+j+1)),
|
|
cv::Scalar(255 - photo_r(i*N+j)*255, 255 - photo_r(i*N+j)*255, 255),
|
|
CV_FILLED);
|
|
else
|
|
cv::rectangle(irradianceFrame,
|
|
cv::Point(40+scale*(N+i+1), 15+scale*(N/2+j)),
|
|
cv::Point(40+scale*(N+i), 15+scale*(N/2+j+1)),
|
|
cv::Scalar(255, 255 + photo_r(i*N+j)*255, 255 + photo_r(i*N+j)*255),
|
|
CV_FILLED);
|
|
|
|
// gradient arrow
|
|
/*
|
|
cv::arrowedLine(irradianceFrame,
|
|
cv::Point(30+scale*(N/2 +0.5), 50+scale*(N+(N/2)+0.5)),
|
|
cv::Point(30+scale*(N/2+0.5)+scale*gradientVector.x, 50+scale*(N+(N/2)+0.5)+scale*gradientVector.y),
|
|
cv::Scalar(100, 0, 255),
|
|
1);
|
|
*/
|
|
|
|
// residual gradient direction
|
|
/*
|
|
cv::arrowedLine(irradianceFrame,
|
|
cv::Point(40+scale*(N+N/2+0.5), 15+scale*((N-0.5))),
|
|
cv::Point(40+scale*(N+N/2+0.5)+scale*residualVector.x, 15+scale*(N-0.5)+scale*residualVector.y),
|
|
cv::Scalar(0, 255, 175),
|
|
3);
|
|
*/
|
|
|
|
cv::hconcat(cam0.featureVisu, irradianceFrame, cam0.featureVisu);
|
|
|
|
/*
|
|
// visualize position of used observations and resulting feature position
|
|
cv::Mat positionFrame(anchorImage.size(), CV_8UC3, cv::Scalar(255, 240, 255));
|
|
cv::resize(positionFrame, positionFrame, cv::Size(), rescale, rescale);
|
|
|
|
|
|
// draw world zero
|
|
cv::line(positionFrame,
|
|
cv::Point(20,20),
|
|
cv::Point(20,30),
|
|
cv::Scalar(0,0,255),
|
|
CV_FILLED);
|
|
|
|
cv::line(positionFrame,
|
|
cv::Point(20,20),
|
|
cv::Point(30,20),
|
|
cv::Scalar(255,0,0),
|
|
CV_FILLED);
|
|
// for every observation, get cam state
|
|
for(auto obs : observations)
|
|
{
|
|
cv::line(positionFrame,
|
|
cv::Point(20,20),
|
|
cv::Point(30,20),
|
|
cv::Scalar(255,0,0),
|
|
CV_FILLED);
|
|
}
|
|
|
|
// draw, x y position and arrow with direction - write z next to it
|
|
|
|
cv::resize(cam0.featureVisu, cam0.featureVisu, cv::Size(), rescale, rescale);
|
|
|
|
|
|
cv::hconcat(cam0.featureVisu, positionFrame, cam0.featureVisu);
|
|
*/
|
|
// write feature position
|
|
std::stringstream pos_s;
|
|
pos_s << "u: " << observations.begin()->second(0) << " v: " << observations.begin()->second(1);
|
|
cv::putText(cam0.featureVisu, ss.str() , cvPoint(anchorImage.rows + 100, anchorImage.cols - 30),
|
|
cv::FONT_HERSHEY_COMPLEX_SMALL, 0.8, cvScalar(200,200,250), 1, CV_AA);
|
|
// create line?
|
|
|
|
//save image
|
|
|
|
std::stringstream loc;
|
|
// loc << "/home/raphael/dev/MSCKF_ws/img/feature_" << std::to_string(ros::Time::now().toSec()) << ".jpg";
|
|
//cv::imwrite(loc.str(), cam0.featureVisu);
|
|
|
|
cv::imshow("patch", cam0.featureVisu);
|
|
cvWaitKey(0);
|
|
}
|
|
|
|
float Feature::PixelIrradiance(cv::Point2f pose, cv::Mat image) const
|
|
{
|
|
|
|
return ((float)image.at<uint8_t>(pose.y, pose.x))/255.;
|
|
}
|
|
|
|
cv::Point2f Feature::pixelDistanceAt(
|
|
const CAMState& cam_state,
|
|
const StateIDType& cam_state_id,
|
|
const CameraCalibration& cam,
|
|
Eigen::Vector3d& in_p) const
|
|
{
|
|
|
|
cv::Point2f cam_p = projectPositionToCamera(cam_state, cam_state_id, cam, in_p);
|
|
|
|
// create vector of patch in pixel plane
|
|
std::vector<cv::Point2f> surroundingPoints;
|
|
surroundingPoints.push_back(cv::Point2f(cam_p.x+1, cam_p.y));
|
|
surroundingPoints.push_back(cv::Point2f(cam_p.x-1, cam_p.y));
|
|
surroundingPoints.push_back(cv::Point2f(cam_p.x, cam_p.y+1));
|
|
surroundingPoints.push_back(cv::Point2f(cam_p.x, cam_p.y-1));
|
|
|
|
std::vector<cv::Point2f> pure;
|
|
image_handler::undistortPoints(surroundingPoints,
|
|
cam.intrinsics,
|
|
cam.distortion_model,
|
|
cam.distortion_coeffs,
|
|
pure);
|
|
|
|
// transfrom position to camera frame
|
|
// to get distance multiplier
|
|
Eigen::Matrix3d R_w_c0 = quaternionToRotation(cam_state.orientation);
|
|
const Eigen::Vector3d& t_c0_w = cam_state.position;
|
|
Eigen::Vector3d p_c0 = R_w_c0 * (in_p-t_c0_w);
|
|
|
|
// returns the distance between the pixel points in space
|
|
cv::Point2f distance(fabs(pure[0].x - pure[1].x), fabs(pure[2].y - pure[3].y));
|
|
|
|
return distance;
|
|
}
|
|
|
|
|
|
|
|
cv::Point2f Feature::projectPositionToCamera(
|
|
const CAMState& cam_state,
|
|
const StateIDType& cam_state_id,
|
|
const CameraCalibration& cam,
|
|
Eigen::Vector3d& in_p) const
|
|
{
|
|
Eigen::Isometry3d T_c0_w;
|
|
|
|
cv::Point2f out_p;
|
|
cv::Point2f my_p;
|
|
// transfrom position to camera frame
|
|
Eigen::Matrix3d R_w_c0 = quaternionToRotation(cam_state.orientation);
|
|
const Eigen::Vector3d& t_c0_w = cam_state.position;
|
|
Eigen::Vector3d p_c0 = R_w_c0 * (in_p-t_c0_w);
|
|
|
|
out_p = cv::Point2f(p_c0(0)/p_c0(2), p_c0(1)/p_c0(2));
|
|
|
|
// if(cam_state_id == observations.begin()->first)
|
|
//printf("undist:\n \tproj pos: %f, %f\n\ttrue pos: %f, %f\n", out_p.x, out_p.y, undist_anchor_center_pos.x, undist_anchor_center_pos.y);
|
|
|
|
if (cam.distortion_model.substr(0,3) == "pre-")
|
|
my_p = cv::Point2f(out_p.x * cam.intrinsics[0] + cam.intrinsics[2], out_p.y * cam.intrinsics[1] + cam.intrinsics[3]);
|
|
else
|
|
my_p = image_handler::distortPoint(out_p,
|
|
cam.intrinsics,
|
|
cam.distortion_model,
|
|
cam.distortion_coeffs);
|
|
|
|
|
|
// printf("truPosition: %f, %f, %f\n", position.x(), position.y(), position.z());
|
|
// printf("camPosition: %f, %f, %f\n", p_c0(0), p_c0(1), p_c0(2));
|
|
// printf("Photo projection: %f, %f\n", my_p[0].x, my_p[0].y);
|
|
|
|
return my_p;
|
|
}
|
|
|
|
Eigen::Vector3d Feature::AnchorPixelToPosition(cv::Point2f in_p, const CameraCalibration& cam)
|
|
{
|
|
// use undistorted position of point of interest
|
|
// project it back into 3D space using pinhole model
|
|
// save resulting NxN positions for this feature
|
|
|
|
Eigen::Vector3d PositionInCamera(in_p.x/anchor_rho, in_p.y/anchor_rho, 1/anchor_rho);
|
|
Eigen::Vector3d PositionInWorld = T_anchor_w.linear()*PositionInCamera + T_anchor_w.translation();
|
|
return PositionInWorld;
|
|
//printf("%f, %f, %f\n",PositionInWorld[0], PositionInWorld[1], PositionInWorld[2]);
|
|
}
|
|
|
|
//@test center projection must always be initial feature projection
|
|
bool Feature::initializeAnchor(const CameraCalibration& cam, int N)
|
|
{
|
|
|
|
//initialize patch Size
|
|
int n = (int)(N-1)/2;
|
|
|
|
auto anchor = observations.begin();
|
|
if(cam.moving_window.find(anchor->first) == cam.moving_window.end())
|
|
return false;
|
|
|
|
cv::Mat anchorImage = cam.moving_window.find(anchor->first)->second.image;
|
|
cv::Mat anchorImage_deeper;
|
|
anchorImage.convertTo(anchorImage_deeper,CV_16S);
|
|
//TODO remove this?
|
|
|
|
|
|
cv::Sobel(anchorImage_deeper, xderImage, -1, 1, 0, 3);
|
|
cv::Sobel(anchorImage_deeper, yderImage, -1, 0, 1, 3);
|
|
|
|
xderImage/=8.;
|
|
yderImage/=8.;
|
|
|
|
cv::convertScaleAbs(xderImage, abs_xderImage);
|
|
cv::convertScaleAbs(yderImage, abs_yderImage);
|
|
|
|
cv::GaussianBlur(anchorImage, anchorImage_blurred, cv::Size(3,3), 0, 0, cv::BORDER_DEFAULT);
|
|
|
|
auto u = anchor->second(0);//*cam.intrinsics[0] + cam.intrinsics[2];
|
|
auto v = anchor->second(1);//*cam.intrinsics[1] + cam.intrinsics[3];
|
|
|
|
|
|
// check if image has been pre-undistorted
|
|
if(cam.distortion_model.substr(0,3) == "pre-")
|
|
{
|
|
std::cout << "is a pre" << std::endl;
|
|
//project onto pixel plane
|
|
undist_anchor_center_pos = cv::Point2f(u * cam.intrinsics[0] + cam.intrinsics[2], v * cam.intrinsics[1] + cam.intrinsics[3]);
|
|
|
|
// create vector of patch in pixel plane
|
|
for(double u_run = -n; u_run <= n; u_run++)
|
|
for(double v_run = -n; v_run <= n; v_run++)
|
|
anchorPatch_real.push_back(cv::Point2f(undist_anchor_center_pos.x+u_run, undist_anchor_center_pos.y+v_run));
|
|
|
|
//project back into u,v
|
|
for(int i = 0; i < N*N; i++)
|
|
anchorPatch_ideal.push_back(cv::Point2f((anchorPatch_real[i].x-cam.intrinsics[2])/cam.intrinsics[0], (anchorPatch_real[i].y-cam.intrinsics[3])/cam.intrinsics[1]));
|
|
}
|
|
|
|
else
|
|
{
|
|
// get feature in undistorted pixel space
|
|
// this only reverts from 'pure' space into undistorted pixel space using camera matrix
|
|
cv::Point2f und_pix_p = image_handler::distortPoint(cv::Point2f(u, v),
|
|
cam.intrinsics,
|
|
cam.distortion_model,
|
|
cam.distortion_coeffs);
|
|
// create vector of patch in pixel plane
|
|
for(double u_run = -n; u_run <= n; u_run++)
|
|
for(double v_run = -n; v_run <= n; v_run++)
|
|
anchorPatch_real.push_back(cv::Point2f(und_pix_p.x+u_run, und_pix_p.y+v_run));
|
|
|
|
|
|
//create undistorted pure points
|
|
image_handler::undistortPoints(anchorPatch_real,
|
|
cam.intrinsics,
|
|
cam.distortion_model,
|
|
cam.distortion_coeffs,
|
|
anchorPatch_ideal);
|
|
|
|
}
|
|
|
|
// save anchor position for later visualisaztion
|
|
anchor_center_pos = anchorPatch_real[(N*N-1)/2];
|
|
|
|
// save true pixel Patch position
|
|
for(auto point : anchorPatch_real)
|
|
if(point.x - n < 0 || point.x + n >= cam.resolution(0)-1 || point.y - n < 0 || point.y + n >= cam.resolution(1)-1)
|
|
return false;
|
|
|
|
for(auto point : anchorPatch_real)
|
|
anchorPatch.push_back(PixelIrradiance(point, anchorImage));
|
|
|
|
// project patch pixel to 3D space in camera coordinate system
|
|
for(auto point : anchorPatch_ideal)
|
|
anchorPatch_3d.push_back(AnchorPixelToPosition(point, cam));
|
|
|
|
is_anchored = true;
|
|
|
|
return true;
|
|
}
|
|
|
|
|
|
bool Feature::initializeRho(const CamStateServer& cam_states) {
|
|
|
|
// Organize camera poses and feature observations properly.
|
|
std::vector<Eigen::Isometry3d,
|
|
Eigen::aligned_allocator<Eigen::Isometry3d> > cam_poses(0);
|
|
std::vector<Eigen::Vector2d,
|
|
Eigen::aligned_allocator<Eigen::Vector2d> > measurements(0);
|
|
|
|
for (auto& m : observations) {
|
|
auto cam_state_iter = cam_states.find(m.first);
|
|
if (cam_state_iter == cam_states.end()) continue;
|
|
|
|
// Add the measurement.
|
|
measurements.push_back(m.second.head<2>());
|
|
measurements.push_back(m.second.tail<2>());
|
|
|
|
// This camera pose will take a vector from this camera frame
|
|
// to the world frame.
|
|
Eigen::Isometry3d cam0_pose;
|
|
cam0_pose.linear() = quaternionToRotation(
|
|
cam_state_iter->second.orientation).transpose();
|
|
cam0_pose.translation() = cam_state_iter->second.position;
|
|
|
|
Eigen::Isometry3d cam1_pose;
|
|
cam1_pose = cam0_pose * CAMState::T_cam0_cam1.inverse();
|
|
|
|
cam_poses.push_back(cam0_pose);
|
|
cam_poses.push_back(cam1_pose);
|
|
}
|
|
|
|
// All camera poses should be modified such that it takes a
|
|
// vector from the first camera frame in the buffer to this
|
|
// camera frame.
|
|
Eigen::Isometry3d T_c0_w = cam_poses[0];
|
|
T_anchor_w = T_c0_w;
|
|
for (auto& pose : cam_poses)
|
|
pose = pose.inverse() * T_c0_w;
|
|
|
|
// Generate initial guess
|
|
double initial_depth = 0;
|
|
initial_depth = generateInitialDepth(cam_poses[cam_poses.size()-1], measurements[0],
|
|
measurements[measurements.size()-1]);
|
|
|
|
double solution = 1.0/initial_depth;
|
|
|
|
// Apply Levenberg-Marquart method to solve for the 3d position.
|
|
double lambda = optimization_config.initial_damping;
|
|
int inner_loop_cntr = 0;
|
|
int outer_loop_cntr = 0;
|
|
bool is_cost_reduced = false;
|
|
double delta_norm = 0;
|
|
|
|
// Compute the initial cost.
|
|
double total_cost = 0.0;
|
|
for (int i = 0; i < cam_poses.size(); ++i) {
|
|
double this_cost = 0.0;
|
|
Rhocost(cam_poses[i], solution, measurements[0], measurements[i], this_cost);
|
|
total_cost += this_cost;
|
|
}
|
|
|
|
// Outer loop.
|
|
do {
|
|
Eigen::Matrix<double, 1, 1> A = Eigen::Matrix<double, 1, 1>::Zero();
|
|
Eigen::Matrix<double, 1, 1> b = Eigen::Matrix<double, 1, 1>::Zero();
|
|
|
|
for (int i = 0; i < cam_poses.size(); ++i) {
|
|
Eigen::Matrix<double, 2, 1> J;
|
|
Eigen::Vector2d r;
|
|
double w;
|
|
|
|
RhoJacobian(cam_poses[i], solution, measurements[0], measurements[i], J, r, w);
|
|
|
|
if (w == 1) {
|
|
A += J.transpose() * J;
|
|
b += J.transpose() * r;
|
|
} else {
|
|
double w_square = w * w;
|
|
A += w_square * J.transpose() * J;
|
|
b += w_square * J.transpose() * r;
|
|
}
|
|
}
|
|
|
|
// Inner loop.
|
|
// Solve for the delta that can reduce the total cost.
|
|
do {
|
|
Eigen::Matrix<double, 1, 1> damper = lambda*Eigen::Matrix<double, 1, 1>::Identity();
|
|
Eigen::Matrix<double, 1, 1> delta = (A+damper).ldlt().solve(b);
|
|
double new_solution = solution - delta(0,0);
|
|
delta_norm = delta.norm();
|
|
|
|
double new_cost = 0.0;
|
|
for (int i = 0; i < cam_poses.size(); ++i) {
|
|
double this_cost = 0.0;
|
|
Rhocost(cam_poses[i], new_solution, measurements[0], measurements[i], this_cost);
|
|
new_cost += this_cost;
|
|
}
|
|
|
|
if (new_cost < total_cost) {
|
|
is_cost_reduced = true;
|
|
solution = new_solution;
|
|
total_cost = new_cost;
|
|
lambda = lambda/10 > 1e-10 ? lambda/10 : 1e-10;
|
|
} else {
|
|
is_cost_reduced = false;
|
|
lambda = lambda*10 < 1e12 ? lambda*10 : 1e12;
|
|
}
|
|
|
|
} while (inner_loop_cntr++ <
|
|
optimization_config.inner_loop_max_iteration && !is_cost_reduced);
|
|
|
|
inner_loop_cntr = 0;
|
|
|
|
} while (outer_loop_cntr++ <
|
|
optimization_config.outer_loop_max_iteration &&
|
|
delta_norm > optimization_config.estimation_precision);
|
|
|
|
// Covert the feature position from inverse depth
|
|
// representation to its 3d coordinate.
|
|
Eigen::Vector3d final_position(measurements[0](0)/solution,
|
|
measurements[0](1)/solution, 1.0/solution);
|
|
|
|
// Check if the solution is valid. Make sure the feature
|
|
// is in front of every camera frame observing it.
|
|
bool is_valid_solution = true;
|
|
for (const auto& pose : cam_poses) {
|
|
Eigen::Vector3d position =
|
|
pose.linear()*final_position + pose.translation();
|
|
if (position(2) <= 0) {
|
|
is_valid_solution = false;
|
|
break;
|
|
}
|
|
}
|
|
|
|
//save inverse depth distance from camera
|
|
anchor_rho = solution;
|
|
|
|
// Convert the feature position to the world frame.
|
|
position = T_c0_w.linear()*final_position + T_c0_w.translation();
|
|
|
|
if (is_valid_solution)
|
|
is_initialized = true;
|
|
|
|
return is_valid_solution;
|
|
}
|
|
|
|
|
|
bool Feature::initializePosition(const CamStateServer& cam_states) {
|
|
|
|
// Organize camera poses and feature observations properly.
|
|
|
|
std::vector<Eigen::Isometry3d,
|
|
Eigen::aligned_allocator<Eigen::Isometry3d> > cam_poses(0);
|
|
std::vector<Eigen::Vector2d,
|
|
Eigen::aligned_allocator<Eigen::Vector2d> > measurements(0);
|
|
|
|
for (auto& m : observations) {
|
|
// TODO: This should be handled properly. Normally, the
|
|
// required camera states should all be available in
|
|
// the input cam_states buffer.
|
|
auto cam_state_iter = cam_states.find(m.first);
|
|
if (cam_state_iter == cam_states.end()) continue;
|
|
|
|
// Add the measurement.
|
|
measurements.push_back(m.second.head<2>());
|
|
measurements.push_back(m.second.tail<2>());
|
|
|
|
// This camera pose will take a vector from this camera frame
|
|
// to the world frame.
|
|
Eigen::Isometry3d cam0_pose;
|
|
cam0_pose.linear() = quaternionToRotation(
|
|
cam_state_iter->second.orientation).transpose();
|
|
cam0_pose.translation() = cam_state_iter->second.position;
|
|
|
|
Eigen::Isometry3d cam1_pose;
|
|
cam1_pose = cam0_pose * CAMState::T_cam0_cam1.inverse();
|
|
|
|
cam_poses.push_back(cam0_pose);
|
|
cam_poses.push_back(cam1_pose);
|
|
}
|
|
|
|
// All camera poses should be modified such that it takes a
|
|
// vector from the first camera frame in the buffer to this
|
|
// camera frame.
|
|
Eigen::Isometry3d T_c0_w = cam_poses[0];
|
|
T_anchor_w = T_c0_w;
|
|
for (auto& pose : cam_poses)
|
|
pose = pose.inverse() * T_c0_w;
|
|
|
|
// Generate initial guess
|
|
Eigen::Vector3d initial_position(0.0, 0.0, 0.0);
|
|
generateInitialGuess(cam_poses[cam_poses.size()-1], measurements[0],
|
|
measurements[measurements.size()-1], initial_position);
|
|
Eigen::Vector3d solution(
|
|
initial_position(0)/initial_position(2),
|
|
initial_position(1)/initial_position(2),
|
|
1.0/initial_position(2));
|
|
|
|
// Apply Levenberg-Marquart method to solve for the 3d position.
|
|
double lambda = optimization_config.initial_damping;
|
|
int inner_loop_cntr = 0;
|
|
int outer_loop_cntr = 0;
|
|
bool is_cost_reduced = false;
|
|
double delta_norm = 0;
|
|
|
|
// Compute the initial cost.
|
|
double total_cost = 0.0;
|
|
for (int i = 0; i < cam_poses.size(); ++i) {
|
|
double this_cost = 0.0;
|
|
cost(cam_poses[i], solution, measurements[i], this_cost);
|
|
total_cost += this_cost;
|
|
}
|
|
|
|
// Outer loop.
|
|
do {
|
|
Eigen::Matrix3d A = Eigen::Matrix3d::Zero();
|
|
Eigen::Vector3d b = Eigen::Vector3d::Zero();
|
|
|
|
for (int i = 0; i < cam_poses.size(); ++i) {
|
|
Eigen::Matrix<double, 2, 3> J;
|
|
Eigen::Vector2d r;
|
|
double w;
|
|
|
|
jacobian(cam_poses[i], solution, measurements[i], J, r, w);
|
|
|
|
if (w == 1) {
|
|
A += J.transpose() * J;
|
|
b += J.transpose() * r;
|
|
} else {
|
|
double w_square = w * w;
|
|
A += w_square * J.transpose() * J;
|
|
b += w_square * J.transpose() * r;
|
|
}
|
|
}
|
|
|
|
// Inner loop.
|
|
// Solve for the delta that can reduce the total cost.
|
|
do {
|
|
Eigen::Matrix3d damper = lambda * Eigen::Matrix3d::Identity();
|
|
Eigen::Vector3d delta = (A+damper).ldlt().solve(b);
|
|
Eigen::Vector3d new_solution = solution - delta;
|
|
delta_norm = delta.norm();
|
|
|
|
double new_cost = 0.0;
|
|
for (int i = 0; i < cam_poses.size(); ++i) {
|
|
double this_cost = 0.0;
|
|
cost(cam_poses[i], new_solution, measurements[i], this_cost);
|
|
new_cost += this_cost;
|
|
}
|
|
|
|
if (new_cost < total_cost) {
|
|
is_cost_reduced = true;
|
|
solution = new_solution;
|
|
total_cost = new_cost;
|
|
lambda = lambda/10 > 1e-10 ? lambda/10 : 1e-10;
|
|
} else {
|
|
is_cost_reduced = false;
|
|
lambda = lambda*10 < 1e12 ? lambda*10 : 1e12;
|
|
}
|
|
|
|
} while (inner_loop_cntr++ <
|
|
optimization_config.inner_loop_max_iteration && !is_cost_reduced);
|
|
|
|
inner_loop_cntr = 0;
|
|
|
|
} while (outer_loop_cntr++ <
|
|
optimization_config.outer_loop_max_iteration &&
|
|
delta_norm > optimization_config.estimation_precision);
|
|
|
|
// Covert the feature position from inverse depth
|
|
// representation to its 3d coordinate.
|
|
Eigen::Vector3d final_position(solution(0)/solution(2),
|
|
solution(1)/solution(2), 1.0/solution(2));
|
|
|
|
// Check if the solution is valid. Make sure the feature
|
|
// is in front of every camera frame observing it.
|
|
bool is_valid_solution = true;
|
|
for (const auto& pose : cam_poses) {
|
|
Eigen::Vector3d position =
|
|
pose.linear()*final_position + pose.translation();
|
|
if (position(2) <= 0) {
|
|
is_valid_solution = false;
|
|
break;
|
|
}
|
|
}
|
|
|
|
//save inverse depth distance from camera
|
|
anchor_rho = solution(2);
|
|
std::cout << "from feature: " << anchor_rho << std::endl;
|
|
|
|
// Convert the feature position to the world frame.
|
|
position = T_c0_w.linear()*final_position + T_c0_w.translation();
|
|
|
|
if (is_valid_solution)
|
|
is_initialized = true;
|
|
|
|
return is_valid_solution;
|
|
}
|
|
|
|
} // namespace msckf_vio
|
|
|
|
#endif // MSCKF_VIO_FEATURE_H
|