added position calculation

This commit is contained in:
Raphael Maenle 2019-04-12 17:37:01 +02:00
parent a85a4745f2
commit 8227a8e48d
8 changed files with 241 additions and 20 deletions

View File

@ -79,6 +79,7 @@ include_directories(
add_library(msckf_vio add_library(msckf_vio
src/msckf_vio.cpp src/msckf_vio.cpp
src/utils.cpp src/utils.cpp
src/image_handler.cpp
) )
add_dependencies(msckf_vio add_dependencies(msckf_vio
${${PROJECT_NAME}_EXPORTED_TARGETS} ${${PROJECT_NAME}_EXPORTED_TARGETS}
@ -106,6 +107,7 @@ target_link_libraries(msckf_vio_nodelet
add_library(image_processor add_library(image_processor
src/image_processor.cpp src/image_processor.cpp
src/utils.cpp src/utils.cpp
src/image_handler.cpp
) )
add_dependencies(image_processor add_dependencies(image_processor
${${PROJECT_NAME}_EXPORTED_TARGETS} ${${PROJECT_NAME}_EXPORTED_TARGETS}

View File

@ -16,6 +16,8 @@
#include <Eigen/Geometry> #include <Eigen/Geometry>
#include <Eigen/StdVector> #include <Eigen/StdVector>
#include "image_handler.h"
#include "math_utils.hpp" #include "math_utils.hpp"
#include "imu_state.h" #include "imu_state.h"
#include "cam_state.h" #include "cam_state.h"
@ -123,8 +125,11 @@ struct Feature {
* @return True if the Anchor can be estimated * @return True if the Anchor can be estimated
*/ */
inline bool initializeAnchor( bool initializeAnchor(
const movingWindow& cam0_moving_window); const movingWindow& cam0_moving_window,
const cv::Vec4d& intrinsics,
const std::string& distortion_model,
const cv::Vec4d& distortion_coeffs);
/* /*
@ -141,6 +146,15 @@ struct Feature {
inline bool initializePosition( inline bool initializePosition(
const CamStateServer& cam_states); const CamStateServer& cam_states);
/*
* @brief projectPixelToPosition uses the calcualted pixels
* of the anchor patch to generate 3D positions of all of em
*/
bool projectPixelToPosition(cv::Point2f in_p,
Eigen::Vector3d& out_p,
const cv::Vec4d& intrinsics,
const std::string& distortion_model,
const cv::Vec4d& distortion_coeffs);
// An unique identifier for the feature. // An unique identifier for the feature.
// In case of long time running, the variable // In case of long time running, the variable
@ -159,9 +173,16 @@ struct Feature {
// NxN Patch of Anchor Image // NxN Patch of Anchor Image
std::vector<double> anchorPatch; std::vector<double> anchorPatch;
// Position of NxN Patch in 3D space
std::vector<Eigen::Vector3d> anchorPatch_3d;
// 3d postion of the feature in the world frame. // 3d postion of the feature in the world frame.
Eigen::Vector3d position; Eigen::Vector3d position;
// inverse depth representation
double rho;
// A indicator to show if the 3d postion of the feature // A indicator to show if the 3d postion of the feature
// has been initialized or not. // has been initialized or not.
bool is_initialized; bool is_initialized;
@ -305,8 +326,24 @@ bool Feature::checkMotion(
else return false; else return false;
} }
bool Feature::projectPixelToPosition(cv::Point2f in_p,
Eigen::Vector3d& out_p,
const cv::Vec4d& intrinsics,
const std::string& distortion_model,
const cv::Vec4d& distortion_coeffs)
{
// use undistorted position of point of interest
// project it back into 3D space using pinhole model
// save resulting NxN positions for this feature
anchorPatch_3d.push_back(Eigen::Vector3d(in_p.x/rho, in_p.y/rho, 1/rho));
printf("%f, %f, %f\n",in_p.x/rho, in_p.y/rho, 1/rho);
}
bool Feature::initializeAnchor( bool Feature::initializeAnchor(
const movingWindow& cam0_moving_window) const movingWindow& cam0_moving_window,
const cv::Vec4d& intrinsics,
const std::string& distortion_model,
const cv::Vec4d& distortion_coeffs)
{ {
int N = 5; int N = 5;
@ -317,14 +354,24 @@ bool Feature::initializeAnchor(
return false; return false;
cv::Mat anchorImage = cam0_moving_window.find(anchor->first)->second; cv::Mat anchorImage = cam0_moving_window.find(anchor->first)->second;
auto u = anchor->second(0)*anchorImage.rows/2 + anchorImage.rows/2; auto u = anchor->second(0)*intrinsics[0] + intrinsics[2];
auto v = anchor->second(1)*anchorImage.cols/2 + anchorImage.cols/2; auto v = anchor->second(1)*intrinsics[1] + intrinsics[3];
int count = 0; int count = 0;
for(int u_run = (int)u - n; u_run <= (int)u + n; u_run++) printf("estimated NxN position: \n");
for(int v_run = v - n; v_run <= v + n; v_run++) for(double u_run = u - n; u_run <= u + n; u_run = u_run + 1)
anchorPatch.push_back(anchorImage.at<uint8_t>(u_run,v_run)); {
for(double v_run = v - n; v_run <= v + n; v_run = v_run + 1)
{
anchorPatch.push_back(anchorImage.at<uint8_t>((int)u_run,(int)v_run));
Eigen::Vector3d Npose;
projectPixelToPosition(cv::Point2f((u_run-intrinsics[2])/intrinsics[0], (v_run-intrinsics[1])/intrinsics[3]),
Npose,
intrinsics,
distortion_model,
distortion_coeffs);
}
}
return true; return true;
} }
@ -465,6 +512,9 @@ bool Feature::initializePosition(
} }
} }
//save inverse depth distance from camera
rho = solution(2);
// Convert the feature position to the world frame. // Convert the feature position to the world frame.
position = T_c0_w.linear()*final_position + T_c0_w.translation(); position = T_c0_w.linear()*final_position + T_c0_w.translation();

View File

@ -0,0 +1,34 @@
#ifndef MSCKF_VIO_IMAGE_HANDLER_H
#define MSCKF_VIO_IMAGE_HANDLER_H
#include <ros/ros.h>
#include <string>
#include <opencv2/opencv.hpp>
#include <opencv2/video.hpp>
#include <cv_bridge/cv_bridge.h>
#include <Eigen/Geometry>
#include <vector>
namespace msckf_vio {
/*
* @brief utilities for msckf_vio
*/
namespace image_handler {
void undistortPoints(
const std::vector<cv::Point2f>& pts_in,
const cv::Vec4d& intrinsics,
const std::string& distortion_model,
const cv::Vec4d& distortion_coeffs,
std::vector<cv::Point2f>& pts_out,
const cv::Matx33d &rectification_matrix = cv::Matx33d::eye(),
const cv::Vec4d &new_intrinsics = cv::Vec4d(1,1,0,0));
std::vector<cv::Point2f> distortPoints(
const std::vector<cv::Point2f>& pts_in,
const cv::Vec4d& intrinsics,
const std::string& distortion_model,
const cv::Vec4d& distortion_coeffs);
}
}
#endif

View File

@ -205,6 +205,17 @@ class MsckfVio {
movingWindow cam0_moving_window; movingWindow cam0_moving_window;
movingWindow cam1_moving_window; movingWindow cam1_moving_window;
// Camera calibration parameters
std::string cam0_distortion_model;
cv::Vec2i cam0_resolution;
cv::Vec4d cam0_intrinsics;
cv::Vec4d cam0_distortion_coeffs;
std::string cam1_distortion_model;
cv::Vec2i cam1_resolution;
cv::Vec4d cam1_intrinsics;
cv::Vec4d cam1_distortion_coeffs;
// Indicate if the gravity vector is set. // Indicate if the gravity vector is set.
bool is_gravity_set; bool is_gravity_set;

View File

@ -5,7 +5,7 @@
<version>0.0.1</version> <version>0.0.1</version>
<description>Multi-State Constraint Kalman Filter - Photometric expansion</description> <description>Multi-State Constraint Kalman Filter - Photometric expansion</description>
<maintainer email="sunke.polyu@gmail.com">Raphael Maenle</maintainer> <maintainer email="raphael@maenle.net">Raphael Maenle</maintainer>
<license>Penn Software License</license> <license>Penn Software License</license>
<author email="raphael@maenle.net">Raphael Maenle</author> <author email="raphael@maenle.net">Raphael Maenle</author>

75
src/image_handler.cpp Normal file
View File

@ -0,0 +1,75 @@
#include <msckf_vio/image_handler.h>
namespace msckf_vio {
namespace image_handler {
void undistortPoints(
const std::vector<cv::Point2f>& pts_in,
const cv::Vec4d& intrinsics,
const std::string& distortion_model,
const cv::Vec4d& distortion_coeffs,
std::vector<cv::Point2f>& pts_out,
const cv::Matx33d &rectification_matrix,
const cv::Vec4d &new_intrinsics) {
if (pts_in.size() == 0) return;
const cv::Matx33d K(
intrinsics[0], 0.0, intrinsics[2],
0.0, intrinsics[1], intrinsics[3],
0.0, 0.0, 1.0);
const cv::Matx33d K_new(
new_intrinsics[0], 0.0, new_intrinsics[2],
0.0, new_intrinsics[1], new_intrinsics[3],
0.0, 0.0, 1.0);
if (distortion_model == "radtan") {
cv::undistortPoints(pts_in, pts_out, K, distortion_coeffs,
rectification_matrix, K_new);
} else if (distortion_model == "equidistant") {
cv::fisheye::undistortPoints(pts_in, pts_out, K, distortion_coeffs,
rectification_matrix, K_new);
} else {
ROS_WARN_ONCE("The model %s is unrecognized, use radtan instead...",
distortion_model.c_str());
cv::undistortPoints(pts_in, pts_out, K, distortion_coeffs,
rectification_matrix, K_new);
}
return;
}
std::vector<cv::Point2f> distortPoints(
const std::vector<cv::Point2f>& pts_in,
const cv::Vec4d& intrinsics,
const std::string& distortion_model,
const cv::Vec4d& distortion_coeffs) {
const cv::Matx33d K(intrinsics[0], 0.0, intrinsics[2],
0.0, intrinsics[1], intrinsics[3],
0.0, 0.0, 1.0);
std::vector<cv::Point2f> pts_out;
if (distortion_model == "radtan") {
std::vector<cv::Point3f> homogenous_pts;
cv::convertPointsToHomogeneous(pts_in, homogenous_pts);
cv::projectPoints(homogenous_pts, cv::Vec3d::zeros(), cv::Vec3d::zeros(), K,
distortion_coeffs, pts_out);
} else if (distortion_model == "equidistant") {
cv::fisheye::distortPoints(pts_in, pts_out, K, distortion_coeffs);
} else {
ROS_WARN_ONCE("The model %s is unrecognized, using radtan instead...",
distortion_model.c_str());
std::vector<cv::Point3f> homogenous_pts;
cv::convertPointsToHomogeneous(pts_in, homogenous_pts);
cv::projectPoints(homogenous_pts, cv::Vec3d::zeros(), cv::Vec3d::zeros(), K,
distortion_coeffs, pts_out);
}
return pts_out;
}
} // namespace image_handler
} // namespace msckf_vio

View File

@ -17,6 +17,7 @@
#include <msckf_vio/TrackingInfo.h> #include <msckf_vio/TrackingInfo.h>
#include <msckf_vio/image_processor.h> #include <msckf_vio/image_processor.h>
#include <msckf_vio/utils.h> #include <msckf_vio/utils.h>
#include <msckf_vio/image_handler.h>
using namespace std; using namespace std;
using namespace cv; using namespace cv;
@ -618,10 +619,10 @@ void ImageProcessor::stereoMatch(
// rotation from stereo extrinsics // rotation from stereo extrinsics
const cv::Matx33d R_cam0_cam1 = R_cam1_imu.t() * R_cam0_imu; const cv::Matx33d R_cam0_cam1 = R_cam1_imu.t() * R_cam0_imu;
vector<cv::Point2f> cam0_points_undistorted; vector<cv::Point2f> cam0_points_undistorted;
undistortPoints(cam0_points, cam0_intrinsics, cam0_distortion_model, image_handler::undistortPoints(cam0_points, cam0_intrinsics, cam0_distortion_model,
cam0_distortion_coeffs, cam0_points_undistorted, cam0_distortion_coeffs, cam0_points_undistorted,
R_cam0_cam1); R_cam0_cam1);
cam1_points = distortPoints(cam0_points_undistorted, cam1_intrinsics, cam1_points = image_handler::distortPoints(cam0_points_undistorted, cam1_intrinsics,
cam1_distortion_model, cam1_distortion_coeffs); cam1_distortion_model, cam1_distortion_coeffs);
} }
@ -662,10 +663,10 @@ void ImageProcessor::stereoMatch(
// essential matrix. // essential matrix.
vector<cv::Point2f> cam0_points_undistorted(0); vector<cv::Point2f> cam0_points_undistorted(0);
vector<cv::Point2f> cam1_points_undistorted(0); vector<cv::Point2f> cam1_points_undistorted(0);
undistortPoints( image_handler::undistortPoints(
cam0_points, cam0_intrinsics, cam0_distortion_model, cam0_points, cam0_intrinsics, cam0_distortion_model,
cam0_distortion_coeffs, cam0_points_undistorted); cam0_distortion_coeffs, cam0_points_undistorted);
undistortPoints( image_handler::undistortPoints(
cam1_points, cam1_intrinsics, cam1_distortion_model, cam1_points, cam1_intrinsics, cam1_distortion_model,
cam1_distortion_coeffs, cam1_points_undistorted); cam1_distortion_coeffs, cam1_points_undistorted);
@ -1009,10 +1010,10 @@ void ImageProcessor::twoPointRansac(
// Undistort all the points. // Undistort all the points.
vector<Point2f> pts1_undistorted(pts1.size()); vector<Point2f> pts1_undistorted(pts1.size());
vector<Point2f> pts2_undistorted(pts2.size()); vector<Point2f> pts2_undistorted(pts2.size());
undistortPoints( image_handler::undistortPoints(
pts1, intrinsics, distortion_model, pts1, intrinsics, distortion_model,
distortion_coeffs, pts1_undistorted); distortion_coeffs, pts1_undistorted);
undistortPoints( image_handler::undistortPoints(
pts2, intrinsics, distortion_model, pts2, intrinsics, distortion_model,
distortion_coeffs, pts2_undistorted); distortion_coeffs, pts2_undistorted);
@ -1250,10 +1251,10 @@ void ImageProcessor::publish() {
vector<Point2f> curr_cam0_points_undistorted(0); vector<Point2f> curr_cam0_points_undistorted(0);
vector<Point2f> curr_cam1_points_undistorted(0); vector<Point2f> curr_cam1_points_undistorted(0);
undistortPoints( image_handler::undistortPoints(
curr_cam0_points, cam0_intrinsics, cam0_distortion_model, curr_cam0_points, cam0_intrinsics, cam0_distortion_model,
cam0_distortion_coeffs, curr_cam0_points_undistorted); cam0_distortion_coeffs, curr_cam0_points_undistorted);
undistortPoints( image_handler::undistortPoints(
curr_cam1_points, cam1_intrinsics, cam1_distortion_model, curr_cam1_points, cam1_intrinsics, cam1_distortion_model,
cam1_distortion_coeffs, curr_cam1_points_undistorted); cam1_distortion_coeffs, curr_cam1_points_undistorted);

View File

@ -117,6 +117,54 @@ bool MsckfVio::loadParameters() {
nh.param<double>("initial_covariance/extrinsic_translation_cov", nh.param<double>("initial_covariance/extrinsic_translation_cov",
extrinsic_translation_cov, 1e-4); extrinsic_translation_cov, 1e-4);
// get camera information (used for back projection)
nh.param<string>("cam0/distortion_model",
cam0_distortion_model, string("radtan"));
nh.param<string>("cam1/distortion_model",
cam1_distortion_model, string("radtan"));
vector<int> cam0_resolution_temp(2);
nh.getParam("cam0/resolution", cam0_resolution_temp);
cam0_resolution[0] = cam0_resolution_temp[0];
cam0_resolution[1] = cam0_resolution_temp[1];
vector<int> cam1_resolution_temp(2);
nh.getParam("cam1/resolution", cam1_resolution_temp);
cam1_resolution[0] = cam1_resolution_temp[0];
cam1_resolution[1] = cam1_resolution_temp[1];
vector<double> cam0_intrinsics_temp(4);
nh.getParam("cam0/intrinsics", cam0_intrinsics_temp);
cam0_intrinsics[0] = cam0_intrinsics_temp[0];
cam0_intrinsics[1] = cam0_intrinsics_temp[1];
cam0_intrinsics[2] = cam0_intrinsics_temp[2];
cam0_intrinsics[3] = cam0_intrinsics_temp[3];
vector<double> cam1_intrinsics_temp(4);
nh.getParam("cam1/intrinsics", cam1_intrinsics_temp);
cam1_intrinsics[0] = cam1_intrinsics_temp[0];
cam1_intrinsics[1] = cam1_intrinsics_temp[1];
cam1_intrinsics[2] = cam1_intrinsics_temp[2];
cam1_intrinsics[3] = cam1_intrinsics_temp[3];
vector<double> cam0_distortion_coeffs_temp(4);
nh.getParam("cam0/distortion_coeffs",
cam0_distortion_coeffs_temp);
cam0_distortion_coeffs[0] = cam0_distortion_coeffs_temp[0];
cam0_distortion_coeffs[1] = cam0_distortion_coeffs_temp[1];
cam0_distortion_coeffs[2] = cam0_distortion_coeffs_temp[2];
cam0_distortion_coeffs[3] = cam0_distortion_coeffs_temp[3];
vector<double> cam1_distortion_coeffs_temp(4);
nh.getParam("cam1/distortion_coeffs",
cam1_distortion_coeffs_temp);
cam1_distortion_coeffs[0] = cam1_distortion_coeffs_temp[0];
cam1_distortion_coeffs[1] = cam1_distortion_coeffs_temp[1];
cam1_distortion_coeffs[2] = cam1_distortion_coeffs_temp[2];
cam1_distortion_coeffs[3] = cam1_distortion_coeffs_temp[3];
state_server.state_cov = MatrixXd::Zero(21, 21); state_server.state_cov = MatrixXd::Zero(21, 21);
for (int i = 3; i < 6; ++i) for (int i = 3; i < 6; ++i)
state_server.state_cov(i, i) = gyro_bias_cov; state_server.state_cov(i, i) = gyro_bias_cov;
@ -1114,7 +1162,7 @@ void MsckfVio::removeLostFeatures() {
} }
} }
if(!feature.initializeAnchor(cam0_moving_window)) if(!feature.initializeAnchor(cam0_moving_window, cam0_intrinsics, cam0_distortion_model, cam0_distortion_coeffs))
{ {
invalid_feature_ids.push_back(feature.id); invalid_feature_ids.push_back(feature.id);
continue; continue;