11 Commits

8 changed files with 524 additions and 513 deletions

View File

@ -70,11 +70,6 @@ struct Feature {
position(Eigen::Vector3d::Zero()),
is_initialized(false), is_anchored(false) {}
void Rhocost(const Eigen::Isometry3d& T_c0_ci,
const double x, const Eigen::Vector2d& z1, const Eigen::Vector2d& z2,
double& e) const;
/*
* @brief cost Compute the cost of the camera observations
* @param T_c0_c1 A rigid body transformation takes
@ -87,13 +82,6 @@ void Rhocost(const Eigen::Isometry3d& T_c0_ci,
const Eigen::Vector3d& x, const Eigen::Vector2d& z,
double& e) const;
bool initializeRho(const CamStateServer& cam_states);
inline void RhoJacobian(const Eigen::Isometry3d& T_c0_ci,
const double x, const Eigen::Vector2d& z1, const Eigen::Vector2d& z2,
Eigen::Matrix<double, 2, 1>& J, Eigen::Vector2d& r,
double& w) const;
/*
* @brief jacobian Compute the Jacobian of the camera observation
* @param T_c0_c1 A rigid body transformation takes
@ -109,10 +97,6 @@ void Rhocost(const Eigen::Isometry3d& T_c0_ci,
Eigen::Matrix<double, 2, 3>& J, Eigen::Vector2d& r,
double& w) const;
inline double generateInitialDepth(
const Eigen::Isometry3d& T_c1_c2, const Eigen::Vector2d& z1,
const Eigen::Vector2d& z2) const;
/*
* @brief generateInitialGuess Compute the initial guess of
* the feature's 3d position using only two views.
@ -137,6 +121,15 @@ void Rhocost(const Eigen::Isometry3d& T_c0_ci,
inline bool checkMotion(
const CamStateServer& cam_states) const;
/*
* @brief AnchorPixelToPosition projects an undistorted point in the
* anchor frame back into the real world using the rho calculated
* based on featur position
*/
inline Eigen::Vector3d AnchorPixelToPosition(cv::Point2f in_p, const CameraCalibration& cam);
/*
* @brief InitializeAnchor generates the NxN patch around the
* feature in the Anchor image
@ -164,14 +157,14 @@ void Rhocost(const Eigen::Isometry3d& T_c0_ci,
inline bool initializePosition(
const CamStateServer& cam_states);
cv::Point2f pixelDistanceAt(
cv::Point2f pixelDistanceAt(
const CAMState& cam_state,
const StateIDType& cam_state_id,
const CameraCalibration& cam,
Eigen::Vector3d& in_p) const;
/*
* @brief project PositionToCamera Takes a 3d position in a world frame
* and projects it into the passed camera frame using pinhole projection
@ -184,11 +177,6 @@ void Rhocost(const Eigen::Isometry3d& T_c0_ci,
const CameraCalibration& cam,
Eigen::Vector3d& in_p) const;
double Kernel(
const cv::Point2f pose,
const cv::Mat frame,
std::string type) const;
/*
* @brief IrradianceAnchorPatch_Camera returns irradiance values
* of the Anchor Patch position in a camera frame
@ -202,7 +190,7 @@ void Rhocost(const Eigen::Isometry3d& T_c0_ci,
std::vector<double>& anchorPatch_estimate,
IlluminationParameter& estimatedIllumination) const;
bool MarkerGeneration(
bool MarkerGeneration(
ros::Publisher& marker_pub,
const CamStateServer& cam_states) const;
@ -210,16 +198,20 @@ bool MarkerGeneration(
const CAMState& cam_state,
const StateIDType& cam_state_id,
CameraCalibration& cam0,
const Eigen::VectorXd& photo_r,
std::stringstream& ss,
cv::Point2f gradientVector,
cv::Point2f residualVector) const;
/*
* @brief AnchorPixelToPosition uses the calcualted pixels
* of the anchor patch to generate 3D positions of all of em
const std::vector<double> photo_r,
std::stringstream& ss) const;
/* @brief takes a pure pixel position (1m from image)
* converts to actual pixel value and returns patch irradiance
* around this pixel
*/
inline Eigen::Vector3d AnchorPixelToPosition(cv::Point2f in_p,
const CameraCalibration& cam);
void PatchAroundPurePixel(cv::Point2f p,
int N,
const CameraCalibration& cam,
const StateIDType& cam_state_id,
std::vector<float>& return_i) const;
/*
* @brief Irradiance returns irradiance value of a pixel
@ -279,26 +271,6 @@ typedef std::map<FeatureIDType, Feature, std::less<int>,
Eigen::aligned_allocator<
std::pair<const FeatureIDType, Feature> > > MapServer;
void Feature::Rhocost(const Eigen::Isometry3d& T_c0_ci,
const double x, const Eigen::Vector2d& z1, const Eigen::Vector2d& z2,
double& e) const
{
// Compute hi1, hi2, and hi3 as Equation (37).
const double& rho = x;
Eigen::Vector3d h = T_c0_ci.linear()*
Eigen::Vector3d(z1(0), z1(1), 1.0) + rho*T_c0_ci.translation();
double& h1 = h(0);
double& h2 = h(1);
double& h3 = h(2);
// Predict the feature observation in ci frame.
Eigen::Vector2d z_hat(h1/h3, h2/h3);
// Compute the residual.
e = (z_hat-z2).squaredNorm();
return;
}
void Feature::cost(const Eigen::Isometry3d& T_c0_ci,
const Eigen::Vector3d& x, const Eigen::Vector2d& z,
@ -311,9 +283,9 @@ void Feature::cost(const Eigen::Isometry3d& T_c0_ci,
Eigen::Vector3d h = T_c0_ci.linear()*
Eigen::Vector3d(alpha, beta, 1.0) + rho*T_c0_ci.translation();
double h1 = h(0);
double h2 = h(1);
double h3 = h(2);
double& h1 = h(0);
double& h2 = h(1);
double& h3 = h(2);
// Predict the feature observation in ci frame.
Eigen::Vector2d z_hat(h1/h3, h2/h3);
@ -323,42 +295,6 @@ void Feature::cost(const Eigen::Isometry3d& T_c0_ci,
return;
}
void Feature::RhoJacobian(const Eigen::Isometry3d& T_c0_ci,
const double x, const Eigen::Vector2d& z1, const Eigen::Vector2d& z2,
Eigen::Matrix<double, 2, 1>& J, Eigen::Vector2d& r,
double& w) const
{
const double& rho = x;
Eigen::Vector3d h = T_c0_ci.linear()*
Eigen::Vector3d(z1(0), z2(1), 1.0) + rho*T_c0_ci.translation();
double& h1 = h(0);
double& h2 = h(1);
double& h3 = h(2);
// Compute the Jacobian.
Eigen::Matrix3d W;
W.leftCols<2>() = T_c0_ci.linear().leftCols<2>();
W.rightCols<1>() = T_c0_ci.translation();
J(0,0) = -h1/(h3*h3);
J(1,0) = -h2/(h3*h3);
// Compute the residual.
Eigen::Vector2d z_hat(h1/h3, h2/h3);
r = z_hat - z2;
// Compute the weight based on the residual.
double e = r.norm();
if (e <= optimization_config.huber_epsilon)
w = 1.0;
else
w = optimization_config.huber_epsilon / (2*e);
return;
}
void Feature::jacobian(const Eigen::Isometry3d& T_c0_ci,
const Eigen::Vector3d& x, const Eigen::Vector2d& z,
Eigen::Matrix<double, 2, 3>& J, Eigen::Vector2d& r,
@ -398,9 +334,9 @@ void Feature::jacobian(const Eigen::Isometry3d& T_c0_ci,
return;
}
double Feature::generateInitialDepth(
void Feature::generateInitialGuess(
const Eigen::Isometry3d& T_c1_c2, const Eigen::Vector2d& z1,
const Eigen::Vector2d& z2) const
const Eigen::Vector2d& z2, Eigen::Vector3d& p) const
{
// Construct a least square problem to solve the depth.
Eigen::Vector3d m = T_c1_c2.linear() * Eigen::Vector3d(z1(0), z1(1), 1.0);
@ -415,15 +351,6 @@ double Feature::generateInitialDepth(
// Solve for the depth.
double depth = (A.transpose() * A).inverse() * A.transpose() * b;
return depth;
}
void Feature::generateInitialGuess(
const Eigen::Isometry3d& T_c1_c2, const Eigen::Vector2d& z1,
const Eigen::Vector2d& z2, Eigen::Vector3d& p) const
{
double depth = generateInitialDepth(T_c1_c2, z1, z2);
p(0) = z1(0) * depth;
p(1) = z1(1) * depth;
p(2) = depth;
@ -473,26 +400,6 @@ bool Feature::checkMotion(const CamStateServer& cam_states) const
else return false;
}
double Feature::Kernel(
const cv::Point2f pose,
const cv::Mat frame,
std::string type) const
{
Eigen::Matrix<double, 3, 3> kernel = Eigen::Matrix<double, 3, 3>::Zero();
if(type == "Sobel_x")
kernel << -1., 0., 1.,-2., 0., 2. , -1., 0., 1.;
else if(type == "Sobel_y")
kernel << -1., -2., -1., 0., 0., 0., 1., 2., 1.;
double delta = 0;
int offs = (int)(kernel.rows()-1)/2;
for(int i = 0; i < kernel.rows(); i++)
for(int j = 0; j < kernel.cols(); j++)
delta += ((float)frame.at<uint8_t>(pose.y+j-offs , pose.x+i-offs))/255 * (float)kernel(j,i);
return delta;
}
bool Feature::estimate_FrameIrradiance(
const CAMState& cam_state,
const StateIDType& cam_state_id,
@ -506,7 +413,10 @@ bool Feature::estimate_FrameIrradiance(
auto anchor = observations.begin();
if(cam0.moving_window.find(anchor->first) == cam0.moving_window.end())
{
std::cout << "anchor not in buffer anymore!" << std::endl;
return false;
}
double anchorExposureTime_ms = cam0.moving_window.find(anchor->first)->second.exposureTime_ms;
double frameExposureTime_ms = cam0.moving_window.find(cam_state_id)->second.exposureTime_ms;
@ -648,10 +558,8 @@ bool Feature::VisualizePatch(
const CAMState& cam_state,
const StateIDType& cam_state_id,
CameraCalibration& cam0,
const Eigen::VectorXd& photo_r,
std::stringstream& ss,
cv::Point2f gradientVector,
cv::Point2f residualVector) const
const std::vector<double> photo_r,
std::stringstream& ss) const
{
double rescale = 1;
@ -714,10 +622,9 @@ bool Feature::VisualizePatch(
CV_FILLED);
// irradiance grid projection
namer.str(std::string());
namer << "projection";
cv::putText(irradianceFrame, namer.str() , cvPoint(30, 45+scale*N),
cv::putText(irradianceFrame, namer.str(), cvPoint(30, 45+scale*N),
cv::FONT_HERSHEY_COMPLEX_SMALL, 0.8, cvScalar(0,0,0), 1, CV_AA);
for(int i = 0; i<N; i++)
@ -761,17 +668,17 @@ bool Feature::VisualizePatch(
for(int i = 0; i<N; i++)
for(int j = 0; j<N; j++)
if(photo_r(i*N+j)>0)
if(photo_r[i*N+j]>0)
cv::rectangle(irradianceFrame,
cv::Point(40+scale*(N+i+1), 15+scale*(N/2+j)),
cv::Point(40+scale*(N+i), 15+scale*(N/2+j+1)),
cv::Scalar(255 - photo_r(i*N+j)*255, 255 - photo_r(i*N+j)*255, 255),
cv::Scalar(255 - photo_r[i*N+j]*255, 255 - photo_r[i*N+j]*255, 255),
CV_FILLED);
else
cv::rectangle(irradianceFrame,
cv::Point(40+scale*(N+i+1), 15+scale*(N/2+j)),
cv::Point(40+scale*(N+i), 15+scale*(N/2+j+1)),
cv::Scalar(255, 255 + photo_r(i*N+j)*255, 255 + photo_r(i*N+j)*255),
cv::Scalar(255, 255 + photo_r[i*N+j]*255, 255 + photo_r[i*N+j]*255),
CV_FILLED);
// gradient arrow
@ -781,7 +688,6 @@ bool Feature::VisualizePatch(
cv::Point(30+scale*(N/2+0.5)+scale*gradientVector.x, 50+scale*(N+(N/2)+0.5)+scale*gradientVector.y),
cv::Scalar(100, 0, 255),
1);
*/
// residual gradient direction
cv::arrowedLine(irradianceFrame,
@ -790,11 +696,14 @@ bool Feature::VisualizePatch(
cv::Scalar(0, 255, 175),
3);
*/
cv::hconcat(cam0.featureVisu, irradianceFrame, cam0.featureVisu);
/*
// visualize position of used observations and resulting feature position
/*
cv::Mat positionFrame(anchorImage.size(), CV_8UC3, cv::Scalar(255, 240, 255));
cv::resize(positionFrame, positionFrame, cv::Size(), rescale, rescale);
@ -827,7 +736,9 @@ bool Feature::VisualizePatch(
cv::hconcat(cam0.featureVisu, positionFrame, cam0.featureVisu);
*/
*/
// write feature position
std::stringstream pos_s;
pos_s << "u: " << observations.begin()->second(0) << " v: " << observations.begin()->second(1);
@ -845,6 +756,25 @@ bool Feature::VisualizePatch(
cvWaitKey(0);
}
void Feature::PatchAroundPurePixel(cv::Point2f p,
int N,
const CameraCalibration& cam,
const StateIDType& cam_state_id,
std::vector<float>& return_i) const
{
int n = (int)(N-1)/2;
cv::Mat image = cam.moving_window.find(cam_state_id)->second.image;
cv::Point2f img_p = image_handler::distortPoint(p,
cam.intrinsics,
cam.distortion_model,
cam.distortion_coeffs);
for(double u_run = -n; u_run <= n; u_run++)
for(double v_run = -n; v_run <= n; v_run++)
return_i.push_back(PixelIrradiance(cv::Point2f(img_p.x+u_run, img_p.y+v_run), image));
}
float Feature::PixelIrradiance(cv::Point2f pose, cv::Mat image) const
{
@ -881,7 +811,6 @@ cv::Point2f Feature::pixelDistanceAt(
}
cv::Point2f Feature::projectPositionToCamera(
const CAMState& cam_state,
const StateIDType& cam_state_id,
@ -986,157 +915,10 @@ bool Feature::initializeAnchor(const CameraCalibration& cam, int N)
is_anchored = true;
return true;
}
/*
bool Feature::initializeRho(const CamStateServer& cam_states) {
// Organize camera poses and feature observations properly.
std::vector<Eigen::Isometry3d,
Eigen::aligned_allocator<Eigen::Isometry3d> > cam_poses(0);
std::vector<Eigen::Vector2d,
Eigen::aligned_allocator<Eigen::Vector2d> > measurements(0);
for (auto& m : observations) {
auto cam_state_iter = cam_states.find(m.first);
if (cam_state_iter == cam_states.end()) continue;
// Add the measurement.
measurements.push_back(m.second.head<2>());
measurements.push_back(m.second.tail<2>());
// This camera pose will take a vector from this camera frame
// to the world frame.
Eigen::Isometry3d cam0_pose;
cam0_pose.linear() = quaternionToRotation(
cam_state_iter->second.orientation).transpose();
cam0_pose.translation() = cam_state_iter->second.position;
Eigen::Isometry3d cam1_pose;
cam1_pose = cam0_pose * CAMState::T_cam0_cam1.inverse();
cam_poses.push_back(cam0_pose);
cam_poses.push_back(cam1_pose);
}
// All camera poses should be modified such that it takes a
// vector from the first camera frame in the buffer to this
// camera frame.
Eigen::Isometry3d T_c0_w = cam_poses[0];
T_anchor_w = T_c0_w;
for (auto& pose : cam_poses)
pose = pose.inverse() * T_c0_w;
// Generate initial guess
double initial_depth = 0;
initial_depth = generateInitialDepth(cam_poses[cam_poses.size()-1], measurements[0],
measurements[measurements.size()-1]);
double solution = 1.0/initial_depth;
// Apply Levenberg-Marquart method to solve for the 3d position.
double lambda = optimization_config.initial_damping;
int inner_loop_cntr = 0;
int outer_loop_cntr = 0;
bool is_cost_reduced = false;
double delta_norm = 0;
// Compute the initial cost.
double total_cost = 0.0;
for (int i = 0; i < cam_poses.size(); ++i) {
double this_cost = 0.0;
Rhocost(cam_poses[i], solution, measurements[0], measurements[i], this_cost);
total_cost += this_cost;
}
// Outer loop.
do {
Eigen::Matrix<double, 1, 1> A = Eigen::Matrix<double, 1, 1>::Zero();
Eigen::Matrix<double, 1, 1> b = Eigen::Matrix<double, 1, 1>::Zero();
for (int i = 0; i < cam_poses.size(); ++i) {
Eigen::Matrix<double, 2, 1> J;
Eigen::Vector2d r;
double w;
RhoJacobian(cam_poses[i], solution, measurements[0], measurements[i], J, r, w);
if (w == 1) {
A += J.transpose() * J;
b += J.transpose() * r;
} else {
double w_square = w * w;
A += w_square * J.transpose() * J;
b += w_square * J.transpose() * r;
}
}
// Inner loop.
// Solve for the delta that can reduce the total cost.
do {
Eigen::Matrix<double, 1, 1> damper = lambda*Eigen::Matrix<double, 1, 1>::Identity();
Eigen::Matrix<double, 1, 1> delta = (A+damper).ldlt().solve(b);
double new_solution = solution - delta(0,0);
delta_norm = delta.norm();
double new_cost = 0.0;
for (int i = 0; i < cam_poses.size(); ++i) {
double this_cost = 0.0;
Rhocost(cam_poses[i], new_solution, measurements[0], measurements[i], this_cost);
new_cost += this_cost;
}
if (new_cost < total_cost) {
is_cost_reduced = true;
solution = new_solution;
total_cost = new_cost;
lambda = lambda/10 > 1e-10 ? lambda/10 : 1e-10;
} else {
is_cost_reduced = false;
lambda = lambda*10 < 1e12 ? lambda*10 : 1e12;
}
} while (inner_loop_cntr++ <
optimization_config.inner_loop_max_iteration && !is_cost_reduced);
inner_loop_cntr = 0;
} while (outer_loop_cntr++ <
optimization_config.outer_loop_max_iteration &&
delta_norm > optimization_config.estimation_precision);
// Covert the feature position from inverse depth
// representation to its 3d coordinate.
Eigen::Vector3d final_position(measurements[0](0)/solution,
measurements[0](1)/solution, 1.0/solution);
// Check if the solution is valid. Make sure the feature
// is in front of every camera frame observing it.
bool is_valid_solution = true;
for (const auto& pose : cam_poses) {
Eigen::Vector3d position =
pose.linear()*final_position + pose.translation();
if (position(2) <= 0) {
is_valid_solution = false;
break;
}
}
//save inverse depth distance from camera
anchor_rho = solution;
// Convert the feature position to the world frame.
position = T_c0_w.linear()*final_position + T_c0_w.translation();
if (is_valid_solution)
is_initialized = true;
return is_valid_solution;
}
bool Feature::initializePosition(const CamStateServer& cam_states) {
// Organize camera poses and feature observations properly.
std::vector<Eigen::Isometry3d,
Eigen::aligned_allocator<Eigen::Isometry3d> > cam_poses(0);
std::vector<Eigen::Vector2d,
@ -1274,7 +1056,156 @@ bool Feature::initializePosition(const CamStateServer& cam_states) {
//save inverse depth distance from camera
anchor_rho = solution(2);
std::cout << "from feature: " << anchor_rho << std::endl;
// Convert the feature position to the world frame.
position = T_c0_w.linear()*final_position + T_c0_w.translation();
if (is_valid_solution)
is_initialized = true;
return is_valid_solution;
}
*/
bool Feature::initializePosition(const CamStateServer& cam_states) {
// Organize camera poses and feature observations properly.
std::vector<Eigen::Isometry3d,
Eigen::aligned_allocator<Eigen::Isometry3d> > cam_poses(0);
std::vector<Eigen::Vector2d,
Eigen::aligned_allocator<Eigen::Vector2d> > measurements(0);
for (auto& m : observations) {
// TODO: This should be handled properly. Normally, the
// required camera states should all be available in
// the input cam_states buffer.
auto cam_state_iter = cam_states.find(m.first);
if (cam_state_iter == cam_states.end()) continue;
// Add the measurement.
measurements.push_back(m.second.head<2>());
measurements.push_back(m.second.tail<2>());
// This camera pose will take a vector from this camera frame
// to the world frame.
Eigen::Isometry3d cam0_pose;
cam0_pose.linear() = quaternionToRotation(
cam_state_iter->second.orientation).transpose();
cam0_pose.translation() = cam_state_iter->second.position;
Eigen::Isometry3d cam1_pose;
cam1_pose = cam0_pose * CAMState::T_cam0_cam1.inverse();
cam_poses.push_back(cam0_pose);
cam_poses.push_back(cam1_pose);
}
// All camera poses should be modified such that it takes a
// vector from the first camera frame in the buffer to this
// camera frame.
Eigen::Isometry3d T_c0_w = cam_poses[0];
T_anchor_w = T_c0_w;
for (auto& pose : cam_poses)
pose = pose.inverse() * T_c0_w;
// Generate initial guess
Eigen::Vector3d initial_position(0.0, 0.0, 0.0);
generateInitialGuess(cam_poses[cam_poses.size()-1], measurements[0],
measurements[measurements.size()-1], initial_position);
Eigen::Vector3d solution(
initial_position(0)/initial_position(2),
initial_position(1)/initial_position(2),
1.0/initial_position(2));
// Apply Levenberg-Marquart method to solve for the 3d position.
double lambda = optimization_config.initial_damping;
int inner_loop_cntr = 0;
int outer_loop_cntr = 0;
bool is_cost_reduced = false;
double delta_norm = 0;
// Compute the initial cost.
double total_cost = 0.0;
for (int i = 0; i < cam_poses.size(); ++i) {
double this_cost = 0.0;
cost(cam_poses[i], solution, measurements[i], this_cost);
total_cost += this_cost;
}
// Outer loop.
do {
Eigen::Matrix3d A = Eigen::Matrix3d::Zero();
Eigen::Vector3d b = Eigen::Vector3d::Zero();
for (int i = 0; i < cam_poses.size(); ++i) {
Eigen::Matrix<double, 2, 3> J;
Eigen::Vector2d r;
double w;
jacobian(cam_poses[i], solution, measurements[i], J, r, w);
if (w == 1) {
A += J.transpose() * J;
b += J.transpose() * r;
} else {
double w_square = w * w;
A += w_square * J.transpose() * J;
b += w_square * J.transpose() * r;
}
}
// Inner loop.
// Solve for the delta that can reduce the total cost.
do {
Eigen::Matrix3d damper = lambda * Eigen::Matrix3d::Identity();
Eigen::Vector3d delta = (A+damper).ldlt().solve(b);
Eigen::Vector3d new_solution = solution - delta;
delta_norm = delta.norm();
double new_cost = 0.0;
for (int i = 0; i < cam_poses.size(); ++i) {
double this_cost = 0.0;
cost(cam_poses[i], new_solution, measurements[i], this_cost);
new_cost += this_cost;
}
if (new_cost < total_cost) {
is_cost_reduced = true;
solution = new_solution;
total_cost = new_cost;
lambda = lambda/10 > 1e-10 ? lambda/10 : 1e-10;
} else {
is_cost_reduced = false;
lambda = lambda*10 < 1e12 ? lambda*10 : 1e12;
}
} while (inner_loop_cntr++ <
optimization_config.inner_loop_max_iteration && !is_cost_reduced);
inner_loop_cntr = 0;
} while (outer_loop_cntr++ <
optimization_config.outer_loop_max_iteration &&
delta_norm > optimization_config.estimation_precision);
// Covert the feature position from inverse depth
// representation to its 3d coordinate.
Eigen::Vector3d final_position(solution(0)/solution(2),
solution(1)/solution(2), 1.0/solution(2));
// Check if the solution is valid. Make sure the feature
// is in front of every camera frame observing it.
bool is_valid_solution = true;
for (const auto& pose : cam_poses) {
Eigen::Vector3d position =
pose.linear()*final_position + pose.translation();
if (position(2) <= 0) {
is_valid_solution = false;
break;
}
}
//save inverse depth distance from camera
anchor_rho = solution(2);
// Convert the feature position to the world frame.
position = T_c0_w.linear()*final_position + T_c0_w.translation();

View File

@ -45,6 +45,7 @@ void undistortPoint(
cv::Point2f& pt_out,
const cv::Matx33d &rectification_matrix = cv::Matx33d::eye(),
const cv::Vec4d &new_intrinsics = cv::Vec4d(1,1,0,0));
}
}
#endif

View File

@ -13,6 +13,13 @@
namespace msckf_vio {
inline double absoluted(double a){
if(a>0)
return a;
else return -a;
}
/*
* @brief Create a skew-symmetric matrix from a 3-element vector.
* @note Performs the operation:

View File

@ -14,7 +14,7 @@
#include <string>
#include <Eigen/Dense>
#include <Eigen/Geometry>
#include <math.h>
#include <boost/shared_ptr.hpp>
#include <opencv2/opencv.hpp>
#include <opencv2/video.hpp>
@ -38,6 +38,8 @@
#include <message_filters/subscriber.h>
#include <message_filters/time_synchronizer.h>
#define PI 3.14159265
namespace msckf_vio {
/*
* @brief MsckfVio Implements the algorithm in
@ -195,9 +197,9 @@ class MsckfVio {
// for a single feature observed at a single camera frame.
void measurementJacobian(const StateIDType& cam_state_id,
const FeatureIDType& feature_id,
Eigen::Matrix<double, 2, 6>& H_x,
Eigen::Matrix<double, 2, 3>& H_f,
Eigen::Vector2d& r);
Eigen::Matrix<double, 4, 6>& H_x,
Eigen::Matrix<double, 4, 3>& H_f,
Eigen::Vector4d& r);
// This function computes the Jacobian of all measurements viewed
// in the given camera states of this feature.
void featureJacobian(const FeatureIDType& feature_id,

View File

@ -22,7 +22,7 @@
<param name="PHOTOMETRIC" value="true"/>
<!-- Debugging Flaggs -->
<param name="PrintImages" value="true"/>
<param name="PrintImages" value="false"/>
<param name="GroundTruth" value="false"/>
<param name="patch_size_n" value="7"/>

View File

@ -24,7 +24,7 @@
<param name="PrintImages" value="true"/>
<param name="GroundTruth" value="false"/>
<param name="patch_size_n" value="1"/>
<param name="patch_size_n" value="7"/>
<!-- Calibration parameters -->
<rosparam command="load" file="$(arg calibration_file)"/>

View File

@ -14,7 +14,6 @@ namespace msckf_vio {
namespace image_handler {
void undistortPoint(
const cv::Point2f& pt_in,
const cv::Vec4d& intrinsics,

View File

@ -404,18 +404,8 @@ void MsckfVio::imageCallback(
const sensor_msgs::ImageConstPtr& cam1_img,
const CameraMeasurementConstPtr& feature_msg)
{
if(PRINTIMAGES)
{
std::cout << "stopped playpack" << std::endl;
nh.setParam("/play_bag", false);
}
// Return if the gravity vector has not been set.
if (!is_gravity_set)
{
nh.setParam("/play_bag", true);
return;
}
if (!is_gravity_set) return;
// Start the system if the first image is received.
// The frame where the first image is received will be
@ -438,9 +428,7 @@ void MsckfVio::imageCallback(
// save true state in seperate state vector
//if(ErrState)
//{
//batchTruthProcessing(feature_msg->header.stamp.toSec());
batchTruthProcessing(feature_msg->header.stamp.toSec());
if(GROUNDTRUTH)
{
@ -452,7 +440,6 @@ void MsckfVio::imageCallback(
state_server.imu_state.R_imu_cam0 = true_state_server.imu_state.R_imu_cam0;
state_server.imu_state.t_cam0_imu = true_state_server.imu_state.t_cam0_imu;
}
//}
double imu_processing_time = (
ros::Time::now()-start_time).toSec();
@ -464,7 +451,7 @@ void MsckfVio::imageCallback(
PhotometricStateAugmentation(feature_msg->header.stamp.toSec());
}
else
PhotometricStateAugmentation(feature_msg->header.stamp.toSec());
stateAugmentation(feature_msg->header.stamp.toSec());
double state_augmentation_time = (
ros::Time::now()-start_time).toSec();
@ -522,13 +509,6 @@ void MsckfVio::imageCallback(
publish_time, publish_time/processing_time);
}
if(PRINTIMAGES)
{
std::cout << "stopped playpack" << std::endl;
nh.setParam("/play_bag", true);
}
return;
}
@ -1192,7 +1172,7 @@ void MsckfVio::PhotometricStateAugmentation(const double& time)
J * P11 * J.transpose();
// Add photometry P_eta and surrounding Zeros
state_server.state_cov(old_rows+6, old_cols+6) = 0;
state_server.state_cov(old_rows+6, old_cols+6) = irradiance_frame_bias;
// Fix the covariance to be symmetric
MatrixXd state_cov_fixed = (state_server.state_cov +
@ -1245,116 +1225,176 @@ void MsckfVio::PhotometricMeasurementJacobian(
const CAMState& cam_state = state_server.cam_states[cam_state_id];
const Feature& feature = map_server[feature_id];
const StateIDType anchor_state_id = feature.observations.begin()->first;
const CAMState anchor_state = state_server.cam_states[anchor_state_id];
// Cam0 pose.
Matrix3d R_w_c0 = quaternionToRotation(cam_state.orientation);
const Vector3d& t_c0_w = cam_state.position;
//photometric observation
std::vector<double> photo_z;
// individual Jacobians
Matrix<double, 2, 3> dh_dCpij = Matrix<double, 2, 3>::Zero();
Matrix<double, 2, 3> dh_dGpij = Matrix<double, 2, 3>::Zero();
Matrix<double, 2, 6> dh_dXplj = Matrix<double, 2, 6>::Zero();
Matrix<double, 3, 1> dGpj_drhoj = Matrix<double, 3, 1>::Zero();
Matrix<double, 3, 6> dGpj_XpAj = Matrix<double, 3, 6>::Zero();
Matrix<double, 3, 3> dCpij_dGpij = Matrix<double, 3, 3>::Zero();
Matrix<double, 3, 3> dCpij_dCGtheta = Matrix<double, 3, 3>::Zero();
Matrix<double, 3, 3> dCpij_dGpC = Matrix<double, 3, 3>::Zero();
// one line of the NxN Jacobians
Eigen::Matrix<double, 2, 1> H_rho;
Eigen::Matrix<double, 2, 6> H_plj;
Eigen::Matrix<double, 2, 6> H_pAj;
Matrix<double, 1, 1> H_rho;
Matrix<double, 1, 6> H_xi;
Matrix<double, 1, 6> H_xA;
/*
MatrixXd H_xl = MatrixXd::Zero(feature.anchorPatch_3d.size(), 6);
MatrixXd H_xAl = MatrixXd::Zero(feature.anchorPatch_3d.size(), 6);
MatrixXd H_rhol = MatrixXd::Zero(feature.anchorPatch_3d.size(), 1);
*/
MatrixXd H_xl = MatrixXd::Zero(1, 6);
MatrixXd H_xAl = MatrixXd::Zero(1, 6);
MatrixXd H_rhol = MatrixXd::Zero(1, 1);
auto frame = cam0.moving_window.find(cam_state_id)->second.image;
int count = 0;
double dx, dy;
auto point = feature.anchorPatch_3d[0];
std::vector<float> z_irr_est;
// gradient visualization parameters
cv::Point2f gradientVector(0,0);
// residual change visualization
cv::Point2f residualVector(0,0);
double res_sum = 0;
// get patch around feature in current image
cv::Point2f p_f(feature.observations.find(cam_state_id)->second(0),feature.observations.find(cam_state_id)->second(1));
// move to real pixels
p_f = image_handler::distortPoint(p_f, cam0.intrinsics, cam0.distortion_model, cam0.distortion_coeffs);
std::vector<double> cur_image_irr;
std::vector<cv::Point2f> cur_image_p;
for(int i = 0; i<N; i++)
{
for(int j = 0; j<N ; j++)
{
cur_image_p.push_back(cv::Point2f(p_f.x + (i-(N-1)/2), p_f.y + (j-(N-1)/2)));
cur_image_irr.push_back(feature.PixelIrradiance(cv::Point2f(p_f.x + (i-(N-1)/2), p_f.y + (j-(N-1)/2)), frame));
}
}
for (auto point : feature.anchorPatch_3d)
{
Eigen::Vector3d p_c0 = R_w_c0 * (point-t_c0_w);
// add jacobian
//dh / d{}^Cp_{ij}
dh_dCpij(0, 0) = 1 / p_c0(2);
dh_dCpij(1, 1) = 1 / p_c0(2);
dh_dCpij(0, 2) = -(p_c0(0))/(p_c0(2)*p_c0(2));
dh_dCpij(1, 2) = -(p_c0(1))/(p_c0(2)*p_c0(2));
// add jacobians
cv::Point2f pixelDistance = feature.pixelDistanceAt(cam_state, cam_state_id, cam0, point);
dCpij_dGpij = quaternionToRotation(cam_state.orientation);
// calculate derivation for anchor frame, use position for derivation calculation
// frame derivative calculated convoluting with kernel [-1, 0, 1]
Matrix<double, 1, 2> dI_dhj = Matrix<double, 1, 2>::Zero();
dx = feature.PixelIrradiance(cv::Point2f(cur_image_p[count].x+1, cur_image_p[count].y), frame) - feature.PixelIrradiance(cv::Point2f(cur_image_p[count].x-1, cur_image_p[count].y), frame);
dy = feature.PixelIrradiance(cv::Point2f(cur_image_p[count].x, cur_image_p[count].y+1), frame) - feature.PixelIrradiance(cv::Point2f(cur_image_p[count].x, cur_image_p[count].y-1), frame);
dI_dhj(0, 0) = (double)dx/pixelDistance.x;
dI_dhj(0, 1) = (double)dy/pixelDistance.y;
//orientation takes camera frame to world frame
dh_dGpij = dh_dCpij * dCpij_dGpij;
// Compute the Jacobians.
Matrix<double, 2, 3> dz_dpc0 = Matrix<double, 2, 3>::Zero();
dz_dpc0(0, 0) = 1 / p_c0(2);
dz_dpc0(1, 1) = 1 / p_c0(2);
dz_dpc0(0, 2) = -p_c0(0) / (p_c0(2)*p_c0(2));
dz_dpc0(1, 2) = -p_c0(1) / (p_c0(2)*p_c0(2));
//dh / d X_{pl}
dCpij_dCGtheta = skewSymmetric(p_c0);
dCpij_dGpC = -quaternionToRotation(cam_state.orientation);
dh_dXplj.block<2, 3>(0, 0) = dh_dCpij * dCpij_dCGtheta;
dh_dXplj.block<2, 3>(0, 3) = dh_dCpij * dCpij_dGpC;
Matrix<double, 3, 6> dpc0_dxc = Matrix<double, 3, 6>::Zero();
// original jacobi
dpc0_dxc.leftCols(3) = skewSymmetric(p_c0);
dpc0_dxc.rightCols(3) = -R_w_c0;
Matrix3d dCpij_dGpij = quaternionToRotation(cam_state.orientation);
//orientation takes camera frame to world frame, we wa
Matrix<double, 2, 3> dh_dGpij = dz_dpc0 * dCpij_dGpij;
//d{}^Gp_P{ij} / \rho_i
double rho = feature.anchor_rho;
// Isometry T_anchor_w takes a vector in anchor frame to world frame
dGpj_drhoj = -feature.T_anchor_w.linear() * Eigen::Vector3d(feature.anchorPatch_ideal[count].x/(rho*rho), feature.anchorPatch_ideal[count].y/(rho*rho), 1/(rho*rho));
Matrix<double, 3, 6> dGpj_XpAj = Matrix<double, 3, 6>::Zero();
// alternative derivation towards feature
Matrix3d dCpc0_dpg = R_w_c0;
dGpj_XpAj.block<3, 3>(0, 0) = - feature.T_anchor_w.linear()
* skewSymmetric(Eigen::Vector3d(feature.anchorPatch_ideal[count].x/(rho),
feature.anchorPatch_ideal[count].y/(rho),
1/(rho)));
dGpj_XpAj.block<3, 3>(0, 3) = Matrix<double, 3, 3>::Identity();
// Intermediate Jakobians
H_rho = dh_dGpij * dGpj_drhoj; // 2 x 1
H_plj = dh_dXplj; // 2 x 6
H_pAj = dh_dGpij * dGpj_XpAj; // 2 x 6
// calculate residual
// Isometry T_anchor_w takes a vector in anchor frame to world frame
Matrix<double, 3, 1> dGpj_drhoj = Matrix<double, 3, 1>::Zero();
dGpj_drhoj = -feature.T_anchor_w.linear() * Eigen::Vector3d(feature.anchorPatch_ideal[count].x/(rho*rho), feature.anchorPatch_ideal[count].y/(rho*rho), 1/(rho*rho));
//observation
const Vector4d& total_z = feature.observations.find(cam_state_id)->second;
const Vector2d z = Vector2d(total_z[0], total_z[1]);
H_xi = dI_dhj * dz_dpc0 * dpc0_dxc;
H_xA = dI_dhj * dh_dGpij * dGpj_XpAj;
H_rho = dI_dhj * dh_dGpij * dGpj_drhoj; // 1 x 1
VectorXd r_i = VectorXd::Zero(2);
/*
H_xl.block<1, 6>(count, 0) = H_xi;
H_xAl.block<1, 6>(count, 0) = H_xA;
H_rhol.block<1, 1>(count, 0) = H_rho;
*/
H_xl += H_xi;
H_xAl += H_xA;
H_rhol += H_rho;
//calculate residual
count++;
}
r_i[0] = z[0] - p_c0(0)/p_c0(2);
r_i[1] = z[1] - p_c0(1)/p_c0(2);
// calculate projected irradiance
std::vector<double> projectionPatch;
for(auto point : feature.anchorPatch_3d)
{
cv::Point2f p_in_c0 = feature.projectPositionToCamera(cam_state, cam_state_id, cam0, point);
projectionPatch.push_back(feature.PixelIrradiance(p_in_c0, frame));
}
MatrixXd H_xl = MatrixXd::Zero(2, 21+state_server.cam_states.size()*7);
//Eigen::VectorXd r_l = Eigen::VectorXd::Zero(count);
Eigen::VectorXd r_l = Eigen::VectorXd::Zero(1);
std::vector<double> residual_v;
double residual_v_sum = 0;
for(int i = 0; i < N*N; i++)
{
residual_v_sum += cur_image_irr[i] - projectionPatch[i];
residual_v.push_back(cur_image_irr[i] - projectionPatch[i]);
//r_l(i) = cur_image_irr[i] - projectionPatch[i];
}
r_l(0) = residual_v_sum/(N*N);
H_xl /=(N*N);
H_xAl /= (N*N);
H_rhol /= (N*N);
cout << "dI/dxl\n" << H_xl << endl;
cout << "dI/dAl\n" << H_xAl << endl;
cout << "dI/drhol\n" << H_rhol << endl;
r = r_l;
MatrixXd H_xt = MatrixXd::Zero(1, 21+state_server.cam_states.size()*7);
// set anchor Jakobi
// get position of anchor in cam states
auto cam_state_anchor = state_server.cam_states.find(feature.observations.begin()->first);
int cam_state_cntr_anchor = std::distance(state_server.cam_states.begin(), cam_state_anchor);
H_xl.block(0, 21+cam_state_cntr_anchor*7, 2, 6) = H_pAj;
//H_xt.block(0, 21+cam_state_cntr_anchor*7, N*N, 6) = H_xAl;
H_xt.block(0, 21+cam_state_cntr_anchor*7, 1, 6) = H_xAl;
// set frame Jakobi
//get position of current frame in cam states
auto cam_state_iter = state_server.cam_states.find(cam_state_id);
int cam_state_cntr = std::distance(state_server.cam_states.begin(), cam_state_iter);
//H_xt.block(0, 21+cam_state_cntr*7, N*N, 6) = H_xl;
H_xt.block(0, 21+cam_state_cntr*7, 1, 6) = H_xl;
// set jakobi of state
H_xl.block(0, 21+cam_state_cntr*7, 2, 6) = H_plj;
H_x = H_xt;
H_y = H_rhol;
H_x = H_xl;
H_y = H_rho;
r = r_i;
//TODO make this more fluent as well
std::stringstream ss;
ss << "INFO:"; // << " anchor: " << cam_state_cntr_anchor << " frame: " << cam_state_cntr;
if(PRINTIMAGES)
{
//std::stringstream ss;
//ss << "INFO:" << " anchor: " << cam_state_cntr_anchor << " frame: " << cam_state_cntr;
//feature.MarkerGeneration(marker_pub, state_server.cam_states);
//feature.VisualizePatch(cam_state, cam_state_id, cam0, photo_r, ss);
feature.MarkerGeneration(marker_pub, state_server.cam_states);
feature.VisualizePatch(cam_state, cam_state_id, cam0, residual_v, ss);
}
return;
@ -1367,6 +1407,28 @@ void MsckfVio::PhotometricFeatureJacobian(
{
// stop playing bagfile if printing images
if(PRINTIMAGES)
{
std::cout << "stopped playpack" << std::endl;
nh.setParam("/play_bag", false);
}
// Errstate
calcErrorState();
/*
std::cout << "--- error state: ---\n " << std::endl;
std::cout << "imu orientation:\n " << err_state_server.imu_state.orientation << std::endl;
std::cout << "imu position\n" << err_state_server.imu_state.position << std::endl;
int count = 0;
for(auto cam_state : err_state_server.cam_states)
{
std::cout << " - cam " << count++ << " - \n" << std::endl;
std::cout << "orientation: " << cam_state.second.orientation(0) << cam_state.second.orientation(1) << " " << cam_state.second.orientation(2) << " " << std::endl;
std::cout << "position:" << cam_state.second.position(0) << " " << cam_state.second.position(1) << " " << cam_state.second.position(2) << std::endl;
}
*/
const auto& feature = map_server[feature_id];
@ -1378,13 +1440,14 @@ void MsckfVio::PhotometricFeatureJacobian(
if (feature.observations.find(cam_id) ==
feature.observations.end()) continue;
if (feature.observations.find(cam_id) ==
feature.observations.begin()) continue;
// if not anchor frame
if (cam_id != feature.observations.begin()->first)
valid_cam_state_ids.push_back(cam_id);
}
int jacobian_row_size = 0;
jacobian_row_size = 2 * valid_cam_state_ids.size();
//jacobian_row_size = N * N * valid_cam_state_ids.size();
jacobian_row_size = valid_cam_state_ids.size();
MatrixXd H_xi = MatrixXd::Zero(jacobian_row_size,
21+state_server.cam_states.size()*7);
@ -1396,68 +1459,52 @@ void MsckfVio::PhotometricFeatureJacobian(
MatrixXd H_xl;
MatrixXd H_yl;
Eigen::VectorXd r_l = VectorXd::Zero(2);
Eigen::VectorXd r_l = VectorXd::Zero(N*N);
PhotometricMeasurementJacobian(cam_id, feature.id, H_xl, H_yl, r_l);
auto cam_state_iter = state_server.cam_states.find(cam_id);
int cam_state_cntr = std::distance(
state_server.cam_states.begin(), cam_state_iter);
auto cam_state_anchor = state_server.cam_states.find(feature.observations.begin()->first);
int cam_state_cntr_anchor = std::distance(state_server.cam_states.begin(), cam_state_anchor);
// Stack the Jacobians.
H_xi.block(stack_cntr, 0, H_xl.rows(), H_xl.cols()) = H_xl;
H_yi.block(stack_cntr, 0, H_yl.rows(), H_yl.cols()) = H_yl;
r_i.segment(stack_cntr, 2) = r_l;
stack_cntr += 2;
H_yi.block(stack_cntr,0, H_yl.rows(), H_yl.cols()) = H_yl;
//r_i.segment(stack_cntr, N*N) = r_l;
//stack_cntr += N*N;
r_i.segment(stack_cntr, 1) = r_l;
stack_cntr += 1;
}
// Project the residual and Jacobians onto the nullspace
// of H_yj.
// get Nullspace
FullPivLU<MatrixXd> lu(H_yi.transpose());
MatrixXd A_null_space = lu.kernel();
MatrixXd A = lu.kernel();
H_x = A.transpose() * H_xi;
r = A.transpose() * r_i;
H_x = A_null_space.transpose() * H_xi;
r = A_null_space.transpose() * r_i;
/*
if(PRINTIMAGES)
{
ofstream myfile;
myfile.open("/home/raphael/dev/octave/log2octave");
myfile << "# Created by Octave 3.8.1, Wed Jun 12 14:36:37 2019 CEST <raphael@raphael-desktop>\n"
<< "# name: Hx\n"
<< "# type: matrix\n"
<< "# rows: " << H_xi.rows() << "\n"
<< "# columns: " << H_xi.cols() << "\n"
<< H_xi << endl;
myfile << "# name: Hy\n"
<< "# type: matrix\n"
<< "# rows: " << H_yi.rows() << "\n"
<< "# columns: " << H_yi.cols() << "\n"
<< H_yi << endl;
myfile << "# name: r\n"
<< "# type: matrix\n"
<< "# rows: " << r_i.rows() << "\n"
<< "# columns: " << 1 << "\n"
<< r_i << endl;
myfile.open ("/home/raphael/dev/MSCKF_ws/log.txt");
myfile << "Hyi\n" << H_yi << "Hxi\n" << H_xi << "Hx\n" << H_x << "r\n" << r << "\n x\n" << H_x.colPivHouseholderQr().solve(r) << endl;
myfile.close();
cout << "---------- LOGGED -------- " << endl;
std::cout << "resume playback" << std::endl;
nh.setParam("/play_bag", true);
}*/
}
return;
}
void MsckfVio::measurementJacobian(
const StateIDType& cam_state_id,
const FeatureIDType& feature_id,
Matrix<double, 2, 6>& H_x, Matrix<double, 2, 3>& H_f, Vector2d& r)
{
Matrix<double, 4, 6>& H_x, Matrix<double, 4, 3>& H_f, Vector4d& r) {
// Prepare all the required data.
const CAMState& cam_state = state_server.cam_states[cam_state_id];
@ -1475,51 +1522,66 @@ void MsckfVio::measurementJacobian(
// 3d feature position in the world frame.
// And its observation with the stereo cameras.
const Vector3d& p_w = feature.position;
const Vector2d& z = feature.observations.find(cam_state_id)->second.topRows(2);
const Vector4d& z = feature.observations.find(cam_state_id)->second;
// Convert the feature position from the world frame to
// the cam0 and cam1 frame.
Vector3d p_c0 = R_w_c0 * (p_w-t_c0_w);
//Vector3d p_c1 = R_w_c1 * (p_w-t_c1_w);
Vector3d p_c1 = R_w_c1 * (p_w-t_c1_w);
// Compute the Jacobians.
Matrix<double, 2, 3> dz_dpc0 = Matrix<double, 2, 3>::Zero();
Matrix<double, 4, 3> dz_dpc0 = Matrix<double, 4, 3>::Zero();
dz_dpc0(0, 0) = 1 / p_c0(2);
dz_dpc0(1, 1) = 1 / p_c0(2);
dz_dpc0(0, 2) = -p_c0(0) / (p_c0(2)*p_c0(2));
dz_dpc0(1, 2) = -p_c0(1) / (p_c0(2)*p_c0(2));
/*
Matrix<double, 4, 3> dz_dpc1 = Matrix<double, 4, 3>::Zero();
dz_dpc1(2, 0) = 1 / p_c1(2);
dz_dpc1(3, 1) = 1 / p_c1(2);
dz_dpc1(2, 2) = -p_c1(0) / (p_c1(2)*p_c1(2));
dz_dpc1(3, 2) = -p_c1(1) / (p_c1(2)*p_c1(2));
*/
Matrix<double, 3, 6> dpc0_dxc = Matrix<double, 3, 6>::Zero();
// original jacobi
Matrix<double, 3, 6> dpc0_dxc = Matrix<double, 3, 6>::Zero();
dpc0_dxc.leftCols(3) = skewSymmetric(p_c0);
dpc0_dxc.rightCols(3) = -R_w_c0;
Matrix<double, 3, 6> dpc1_dxc = Matrix<double, 3, 6>::Zero();
dpc1_dxc.leftCols(3) = R_c0_c1 * skewSymmetric(p_c0);
dpc1_dxc.rightCols(3) = -R_w_c1;
Matrix3d dpc0_dpg = R_w_c0;
Matrix3d dpc1_dpg = R_w_c1;
H_x = dz_dpc0*dpc0_dxc; //+ dz_dpc1*dpc1_dxc;
H_f = dz_dpc0*dpc0_dpg; // + dz_dpc1*dpc1_dpg;
H_x = dz_dpc0*dpc0_dxc + dz_dpc1*dpc1_dxc;
H_f = dz_dpc0*dpc0_dpg + dz_dpc1*dpc1_dpg;
// Modifty the measurement Jacobian to ensure
// observability constrain.
Matrix<double, 4, 6> A = H_x;
Matrix<double, 6, 1> u = Matrix<double, 6, 1>::Zero();
u.block<3, 1>(0, 0) = quaternionToRotation(
cam_state.orientation_null) * IMUState::gravity;
u.block<3, 1>(3, 0) = skewSymmetric(
p_w-cam_state.position_null) * IMUState::gravity;
H_x = A - A*u*(u.transpose()*u).inverse()*u.transpose();
H_f = -H_x.block<4, 3>(0, 3);
// Compute the residual.
r = z - Vector2d(p_c0(0)/p_c0(2), p_c0(1)/p_c0(2));//,
//p_c1(0)/p_c1(2), p_c1(1)/p_c1(2));
r = z - Vector4d(p_c0(0)/p_c0(2), p_c0(1)/p_c0(2),
p_c1(0)/p_c1(2), p_c1(1)/p_c1(2));
return;
}
void MsckfVio::featureJacobian(
const FeatureIDType& feature_id,
const std::vector<StateIDType>& cam_state_ids,
MatrixXd& H_x, VectorXd& r)
{
if(PRINTIMAGES)
nh.setParam("/play_bag", false);
const auto& feature = map_server[feature_id];
@ -1534,19 +1596,19 @@ void MsckfVio::featureJacobian(
}
int jacobian_row_size = 0;
jacobian_row_size = 2 * valid_cam_state_ids.size();
jacobian_row_size = 4 * valid_cam_state_ids.size();
MatrixXd H_xj = MatrixXd::Zero(jacobian_row_size,
21+state_server.cam_states.size()*7);
21+state_server.cam_states.size()*6);
MatrixXd H_fj = MatrixXd::Zero(jacobian_row_size, 3);
VectorXd r_j = VectorXd::Zero(jacobian_row_size);
int stack_cntr = 0;
for (const auto& cam_id : valid_cam_state_ids) {
Matrix<double, 2, 6> H_xi = Matrix<double, 2, 6>::Zero();
Matrix<double, 2, 3> H_fi = Matrix<double, 2, 3>::Zero();
Vector2d r_i = Vector2d::Zero();
Matrix<double, 4, 6> H_xi = Matrix<double, 4, 6>::Zero();
Matrix<double, 4, 3> H_fi = Matrix<double, 4, 3>::Zero();
Vector4d r_i = Vector4d::Zero();
measurementJacobian(cam_id, feature.id, H_xi, H_fi, r_i);
auto cam_state_iter = state_server.cam_states.find(cam_id);
@ -1554,10 +1616,10 @@ void MsckfVio::featureJacobian(
state_server.cam_states.begin(), cam_state_iter);
// Stack the Jacobians.
H_xj.block<2, 6>(stack_cntr, 21+7*cam_state_cntr) = H_xi;
H_fj.block<2, 3>(stack_cntr, 0) = H_fi;
r_j.segment<2>(stack_cntr) = r_i;
stack_cntr += 2;
H_xj.block<4, 6>(stack_cntr, 21+6*cam_state_cntr) = H_xi;
H_fj.block<4, 3>(stack_cntr, 0) = H_fi;
r_j.segment<4>(stack_cntr) = r_i;
stack_cntr += 4;
}
// Project the residual and Jacobians onto the nullspace
@ -1581,16 +1643,20 @@ void MsckfVio::featureJacobian(
H_x = A.transpose() * H_xj;
r = A.transpose() * r_j;
/*
if(PRINTIMAGES)
{
ofstream myfile;
myfile.open ("/home/raphael/dev/MSCKF_ws/log.txt");
myfile << "Hx\n" << H_x << "r\n" << r << "from residual estimated error state: " << H_x.colPivHouseholderQr().solve(r) << endl;
myfile << H_x.colPivHouseholderQr().solve(r) << endl;
myfile.close();
cout << "---------- LOGGED -------- " << endl;
nh.setParam("/play_bag", false);
*/
std::cout << "resume playback" << std::endl;
nh.setParam("/play_bag", true);
}
return;
}
void MsckfVio::measurementUpdate(const MatrixXd& H, const VectorXd& r) {
@ -1601,7 +1667,11 @@ void MsckfVio::measurementUpdate(const MatrixXd& H, const VectorXd& r) {
// complexity as in Equation (28), (29).
MatrixXd H_thin;
VectorXd r_thin;
/*
int augmentationSize = 6;
if(PHOTOMETRIC)
augmentationSize = 7;
/*
if (H.rows() > H.cols()) {
// Convert H to a sparse matrix.
SparseMatrix<double> H_sparse = H.sparseView();
@ -1616,8 +1686,8 @@ void MsckfVio::measurementUpdate(const MatrixXd& H, const VectorXd& r) {
(spqr_helper.matrixQ().transpose() * H).evalTo(H_temp);
(spqr_helper.matrixQ().transpose() * r).evalTo(r_temp);
H_thin = H_temp.topRows(21+state_server.cam_states.size()*7);
r_thin = r_temp.head(21+state_server.cam_states.size()*7);
H_thin = H_temp.topRows(21+state_server.cam_states.size()*augmentationSize);
r_thin = r_temp.head(21+state_server.cam_states.size()*augmentationSize);
//HouseholderQR<MatrixXd> qr_helper(H);
//MatrixXd Q = qr_helper.householderQ();
@ -1625,22 +1695,23 @@ void MsckfVio::measurementUpdate(const MatrixXd& H, const VectorXd& r) {
//H_thin = Q1.transpose() * H;
//r_thin = Q1.transpose() * r;
} else {*/
} else {
H_thin = H;
r_thin = r;
//}
}
*/
// Compute the Kalman gain.
const MatrixXd& P = state_server.state_cov;
MatrixXd S = H_thin*P*H_thin.transpose() +
MatrixXd S = H*P*H.transpose() +
Feature::observation_noise*MatrixXd::Identity(
H_thin.rows(), H_thin.rows());
//MatrixXd K_transpose = S.fullPivHouseholderQr().solve(H_thin*P);
MatrixXd K_transpose = S.ldlt().solve(H_thin*P);
H.rows(), H.rows());
//MatrixXd K_transpose = S.fullPivHouseholderQr().solve(H*P);
MatrixXd K_transpose = S.ldlt().solve(H*P);
MatrixXd K = K_transpose.transpose();
// Compute the error of the state.
VectorXd delta_x = K * r_thin;
VectorXd delta_x = K * r;
// Update the IMU state.
const VectorXd& delta_x_imu = delta_x.head<21>();
@ -1675,7 +1746,7 @@ void MsckfVio::measurementUpdate(const MatrixXd& H, const VectorXd& r) {
auto cam_state_iter = state_server.cam_states.begin();
for (int i = 0; i < state_server.cam_states.size();
++i, ++cam_state_iter) {
const VectorXd& delta_x_cam = delta_x.segment<6>(21+i*7);
const VectorXd& delta_x_cam = delta_x.segment(21+i*augmentationSize, augmentationSize);
const Vector4d dq_cam = smallAngleQuaternion(delta_x_cam.head<3>());
cam_state_iter->second.orientation = quaternionMultiplication(
dq_cam, cam_state_iter->second.orientation);
@ -1683,7 +1754,7 @@ void MsckfVio::measurementUpdate(const MatrixXd& H, const VectorXd& r) {
}
// Update state covariance.
MatrixXd I_KH = MatrixXd::Identity(K.rows(), H_thin.cols()) - K*H_thin;
MatrixXd I_KH = MatrixXd::Identity(K.rows(), H.cols()) - K*H;
//state_server.state_cov = I_KH*state_server.state_cov*I_KH.transpose() +
// K*K.transpose()*Feature::observation_noise;
state_server.state_cov = I_KH*state_server.state_cov;
@ -1697,7 +1768,6 @@ void MsckfVio::measurementUpdate(const MatrixXd& H, const VectorXd& r) {
}
bool MsckfVio::gatingTest(const MatrixXd& H, const VectorXd& r, const int& dof) {
MatrixXd P1 = H * state_server.state_cov * H.transpose();
@ -1750,7 +1820,7 @@ void MsckfVio::removeLostFeatures() {
invalid_feature_ids.push_back(feature.id);
continue;
} else {
if(!feature.initializeRho(state_server.cam_states)) {
if(!feature.initializePosition(state_server.cam_states)) {
invalid_feature_ids.push_back(feature.id);
continue;
}
@ -1767,9 +1837,9 @@ void MsckfVio::removeLostFeatures() {
if(PHOTOMETRIC)
//just use max. size, as gets shrunken down after anyway
jacobian_row_size += 2*feature.observations.size();
jacobian_row_size += N*N*feature.observations.size();
else
jacobian_row_size += 2*feature.observations.size() - 3;
jacobian_row_size += 4*feature.observations.size() - 3;
processed_feature_ids.push_back(feature.id);
}
@ -1786,7 +1856,7 @@ void MsckfVio::removeLostFeatures() {
if (processed_feature_ids.size() == 0) return;
MatrixXd H_x = MatrixXd::Zero(jacobian_row_size,
21+7*state_server.cam_states.size());
21+augmentationSize*state_server.cam_states.size());
VectorXd r = VectorXd::Zero(jacobian_row_size);
int stack_cntr = 0;
@ -1806,7 +1876,8 @@ void MsckfVio::removeLostFeatures() {
else
featureJacobian(feature.id, cam_state_ids, H_xj, r_j);
if (gatingTest(H_xj, r_j, cam_state_ids.size()-1)) {
if (gatingTest(H_xj, r_j, state_server.cam_states.size() - 1)){ //cam_state_ids.size()-1)) {
H_x.block(stack_cntr, 0, H_xj.rows(), H_xj.cols()) = H_xj;
r.segment(stack_cntr, r_j.rows()) = r_j;
stack_cntr += H_xj.rows();
@ -1920,7 +1991,7 @@ void MsckfVio::pruneCamStateBuffer() {
feature.observations.erase(cam_id);
continue;
} else {
if(!feature.initializeRho(state_server.cam_states)) {
if(!feature.initializePosition(state_server.cam_states)) {
for (const auto& cam_id : involved_cam_state_ids)
feature.observations.erase(cam_id);
continue;
@ -1937,9 +2008,9 @@ void MsckfVio::pruneCamStateBuffer() {
}
}
if(PHOTOMETRIC)
jacobian_row_size += 2*involved_cam_state_ids.size();
jacobian_row_size += N*N*involved_cam_state_ids.size();
else
jacobian_row_size += 2*involved_cam_state_ids.size() - 3;
jacobian_row_size += 4*involved_cam_state_ids.size() - 3;
}
//cout << "jacobian row #: " << jacobian_row_size << endl;
@ -1947,7 +2018,7 @@ void MsckfVio::pruneCamStateBuffer() {
// Compute the Jacobian and residual.
MatrixXd H_xj;
VectorXd r_j;
MatrixXd H_x = MatrixXd::Zero(jacobian_row_size, 21+7*state_server.cam_states.size());
MatrixXd H_x = MatrixXd::Zero(jacobian_row_size, 21+augmentationSize*state_server.cam_states.size());
VectorXd r = VectorXd::Zero(jacobian_row_size);
int stack_cntr = 0;
for (auto& item : map_server) {
@ -1968,7 +2039,7 @@ void MsckfVio::pruneCamStateBuffer() {
else
featureJacobian(feature.id, involved_cam_state_ids, H_xj, r_j);
if (gatingTest(H_xj, r_j, involved_cam_state_ids.size())) {
if (gatingTest(H_xj, r_j, involved_cam_state_ids.size() - 1)) { //involved_cam_state_ids.size())) {
H_x.block(stack_cntr, 0, H_xj.rows(), H_xj.cols()) = H_xj;
r.segment(stack_cntr, r_j.rows()) = r_j;
stack_cntr += H_xj.rows();
@ -1992,8 +2063,8 @@ void MsckfVio::pruneCamStateBuffer() {
for (const auto& cam_id : rm_cam_state_ids) {
int cam_sequence = std::distance(state_server.cam_states.begin(),
state_server.cam_states.find(cam_id));
int cam_state_start = 21 + 7*cam_sequence;
int cam_state_end = cam_state_start + 7;
int cam_state_start = 21 + augmentationSize*cam_sequence;
int cam_state_end = cam_state_start + augmentationSize;
// Remove the corresponding rows and columns in the state
@ -2014,10 +2085,10 @@ void MsckfVio::pruneCamStateBuffer() {
state_server.state_cov.cols()-cam_state_end);
state_server.state_cov.conservativeResize(
state_server.state_cov.rows()-7, state_server.state_cov.cols()-7);
state_server.state_cov.rows()-augmentationSize, state_server.state_cov.cols()-augmentationSize);
} else {
state_server.state_cov.conservativeResize(
state_server.state_cov.rows()-7, state_server.state_cov.cols()-7);
state_server.state_cov.rows()-augmentationSize, state_server.state_cov.cols()-augmentationSize);
}
// Remove this camera state in the state vector.