First commit of the code

This commit is contained in:
Ke Sun
2018-01-08 14:41:37 -05:00
parent 37dd131d33
commit 700a3c625a
32 changed files with 5921 additions and 1 deletions

View File

@ -0,0 +1,67 @@
/*
* COPYRIGHT AND PERMISSION NOTICE
* Penn Software MSCKF_VIO
* Copyright (C) 2017 The Trustees of the University of Pennsylvania
* All rights reserved.
*/
#ifndef MSCKF_VIO_CAM_STATE_H
#define MSCKF_VIO_CAM_STATE_H
#include <map>
#include <vector>
#include <Eigen/Dense>
#include "imu_state.h"
namespace msckf_vio {
/*
* @brief CAMState Stored camera state in order to
* form measurement model.
*/
struct CAMState {
EIGEN_MAKE_ALIGNED_OPERATOR_NEW
// An unique identifier for the CAM state.
StateIDType id;
// Time when the state is recorded
double time;
// Orientation
// Take a vector from the world frame to the camera frame.
Eigen::Vector4d orientation;
// Position of the camera frame in the world frame.
Eigen::Vector3d position;
// These two variables should have the same physical
// interpretation with `orientation` and `position`.
// There two variables are used to modify the measurement
// Jacobian matrices to make the observability matrix
// have proper null space.
Eigen::Vector4d orientation_null;
Eigen::Vector3d position_null;
// Takes a vector from the cam0 frame to the cam1 frame.
static Eigen::Isometry3d T_cam0_cam1;
CAMState(): id(0), time(0),
orientation(Eigen::Vector4d(0, 0, 0, 1)),
position(Eigen::Vector3d::Zero()),
orientation_null(Eigen::Vector4d(0, 0, 0, 1)),
position_null(Eigen::Vector3d(0, 0, 0)) {}
CAMState(const StateIDType& new_id ): id(new_id), time(0),
orientation(Eigen::Vector4d(0, 0, 0, 1)),
position(Eigen::Vector3d::Zero()),
orientation_null(Eigen::Vector4d(0, 0, 0, 1)),
position_null(Eigen::Vector3d::Zero()) {}
};
typedef std::map<StateIDType, CAMState, std::less<int>,
Eigen::aligned_allocator<
std::pair<const StateIDType, CAMState> > > CamStateServer;
} // namespace msckf_vio
#endif // MSCKF_VIO_CAM_STATE_H

View File

@ -0,0 +1,440 @@
/*
* COPYRIGHT AND PERMISSION NOTICE
* Penn Software MSCKF_VIO
* Copyright (C) 2017 The Trustees of the University of Pennsylvania
* All rights reserved.
*/
#ifndef MSCKF_VIO_FEATURE_H
#define MSCKF_VIO_FEATURE_H
#include <iostream>
#include <map>
#include <vector>
#include <Eigen/Dense>
#include <Eigen/Geometry>
#include <Eigen/StdVector>
#include "math_utils.hpp"
#include "imu_state.h"
#include "cam_state.h"
namespace msckf_vio {
/*
* @brief Feature Salient part of an image. Please refer
* to the Appendix of "A Multi-State Constraint Kalman
* Filter for Vision-aided Inertial Navigation" for how
* the 3d position of a feature is initialized.
*/
struct Feature {
EIGEN_MAKE_ALIGNED_OPERATOR_NEW
typedef long long int FeatureIDType;
/*
* @brief OptimizationConfig Configuration parameters
* for 3d feature position optimization.
*/
struct OptimizationConfig {
double translation_threshold;
double huber_epsilon;
double estimation_precision;
double initial_damping;
int outer_loop_max_iteration;
int inner_loop_max_iteration;
OptimizationConfig():
translation_threshold(0.2),
huber_epsilon(0.01),
estimation_precision(5e-7),
initial_damping(1e-3),
outer_loop_max_iteration(10),
inner_loop_max_iteration(10) {
return;
}
};
// Constructors for the struct.
Feature(): id(0), position(Eigen::Vector3d::Zero()),
is_initialized(false) {}
Feature(const FeatureIDType& new_id): id(new_id),
position(Eigen::Vector3d::Zero()),
is_initialized(false) {}
/*
* @brief cost Compute the cost of the camera observations
* @param T_c0_c1 A rigid body transformation takes
* a vector in c0 frame to ci frame.
* @param x The current estimation.
* @param z The ith measurement of the feature j in ci frame.
* @return e The cost of this observation.
*/
inline void cost(const Eigen::Isometry3d& T_c0_ci,
const Eigen::Vector3d& x, const Eigen::Vector2d& z,
double& e) const;
/*
* @brief jacobian Compute the Jacobian of the camera observation
* @param T_c0_c1 A rigid body transformation takes
* a vector in c0 frame to ci frame.
* @param x The current estimation.
* @param z The actual measurement of the feature in ci frame.
* @return J The computed Jacobian.
* @return r The computed residual.
* @return w Weight induced by huber kernel.
*/
inline void jacobian(const Eigen::Isometry3d& T_c0_ci,
const Eigen::Vector3d& x, const Eigen::Vector2d& z,
Eigen::Matrix<double, 2, 3>& J, Eigen::Vector2d& r,
double& w) const;
/*
* @brief generateInitialGuess Compute the initial guess of
* the feature's 3d position using only two views.
* @param T_c1_c2: A rigid body transformation taking
* a vector from c2 frame to c1 frame.
* @param z1: feature observation in c1 frame.
* @param z2: feature observation in c2 frame.
* @return p: Computed feature position in c1 frame.
*/
inline void generateInitialGuess(
const Eigen::Isometry3d& T_c1_c2, const Eigen::Vector2d& z1,
const Eigen::Vector2d& z2, Eigen::Vector3d& p) const;
/*
* @brief checkMotion Check the input camera poses to ensure
* there is enough translation to triangulate the feature
* positon.
* @param cam_states : input camera poses.
* @return True if the translation between the input camera
* poses is sufficient.
*/
inline bool checkMotion(
const CamStateServer& cam_states) const;
/*
* @brief InitializePosition Intialize the feature position
* based on all current available measurements.
* @param cam_states: A map containing the camera poses with its
* ID as the associated key value.
* @return The computed 3d position is used to set the position
* member variable. Note the resulted position is in world
* frame.
* @return True if the estimated 3d position of the feature
* is valid.
*/
inline bool initializePosition(
const CamStateServer& cam_states);
// An unique identifier for the feature.
// In case of long time running, the variable
// type of id is set to FeatureIDType in order
// to avoid duplication.
FeatureIDType id;
// id for next feature
static FeatureIDType next_id;
// Store the observations of the features in the
// state_id(key)-image_coordinates(value) manner.
std::map<StateIDType, Eigen::Vector4d, std::less<StateIDType>,
Eigen::aligned_allocator<
std::pair<const StateIDType, Eigen::Vector2d> > > observations;
// 3d postion of the feature in the world frame.
Eigen::Vector3d position;
// A indicator to show if the 3d postion of the feature
// has been initialized or not.
bool is_initialized;
// Noise for a normalized feature measurement.
static double observation_noise;
// Optimization configuration for solving the 3d position.
static OptimizationConfig optimization_config;
};
typedef Feature::FeatureIDType FeatureIDType;
typedef std::map<FeatureIDType, Feature, std::less<int>,
Eigen::aligned_allocator<
std::pair<const FeatureIDType, Feature> > > MapServer;
void Feature::cost(const Eigen::Isometry3d& T_c0_ci,
const Eigen::Vector3d& x, const Eigen::Vector2d& z,
double& e) const {
// Compute hi1, hi2, and hi3 as Equation (37).
const double& alpha = x(0);
const double& beta = x(1);
const double& rho = x(2);
Eigen::Vector3d h = T_c0_ci.linear()*
Eigen::Vector3d(alpha, beta, 1.0) + rho*T_c0_ci.translation();
double& h1 = h(0);
double& h2 = h(1);
double& h3 = h(2);
// Predict the feature observation in ci frame.
Eigen::Vector2d z_hat(h1/h3, h2/h3);
// Compute the residual.
e = (z_hat-z).squaredNorm();
return;
}
void Feature::jacobian(const Eigen::Isometry3d& T_c0_ci,
const Eigen::Vector3d& x, const Eigen::Vector2d& z,
Eigen::Matrix<double, 2, 3>& J, Eigen::Vector2d& r,
double& w) const {
// Compute hi1, hi2, and hi3 as Equation (37).
const double& alpha = x(0);
const double& beta = x(1);
const double& rho = x(2);
Eigen::Vector3d h = T_c0_ci.linear()*
Eigen::Vector3d(alpha, beta, 1.0) + rho*T_c0_ci.translation();
double& h1 = h(0);
double& h2 = h(1);
double& h3 = h(2);
// Compute the Jacobian.
Eigen::Matrix3d W;
W.leftCols<2>() = T_c0_ci.linear().leftCols<2>();
W.rightCols<1>() = T_c0_ci.translation();
J.row(0) = 1/h3*W.row(0) - h1/(h3*h3)*W.row(2);
J.row(1) = 1/h3*W.row(1) - h2/(h3*h3)*W.row(2);
// Compute the residual.
Eigen::Vector2d z_hat(h1/h3, h2/h3);
r = z_hat - z;
// Compute the weight based on the residual.
double e = r.norm();
if (e <= optimization_config.huber_epsilon)
w = 1.0;
else
w = optimization_config.huber_epsilon / (2*e);
return;
}
void Feature::generateInitialGuess(
const Eigen::Isometry3d& T_c1_c2, const Eigen::Vector2d& z1,
const Eigen::Vector2d& z2, Eigen::Vector3d& p) const {
// Construct a least square problem to solve the depth.
Eigen::Vector3d m = T_c1_c2.linear() * Eigen::Vector3d(z1(0), z1(1), 1.0);
Eigen::Vector2d A(0.0, 0.0);
A(0) = m(0) - z2(0)*m(2);
A(1) = m(1) - z2(1)*m(2);
Eigen::Vector2d b(0.0, 0.0);
b(0) = z2(0)*T_c1_c2.translation()(2) - T_c1_c2.translation()(0);
b(1) = z2(1)*T_c1_c2.translation()(2) - T_c1_c2.translation()(1);
// Solve for the depth.
double depth = (A.transpose() * A).inverse() * A.transpose() * b;
p(0) = z1(0) * depth;
p(1) = z1(1) * depth;
p(2) = depth;
return;
}
bool Feature::checkMotion(
const CamStateServer& cam_states) const {
const StateIDType& first_cam_id = observations.begin()->first;
const StateIDType& last_cam_id = (--observations.end())->first;
Eigen::Isometry3d first_cam_pose;
first_cam_pose.linear() = quaternionToRotation(
cam_states.find(first_cam_id)->second.orientation).transpose();
first_cam_pose.translation() =
cam_states.find(first_cam_id)->second.position;
Eigen::Isometry3d last_cam_pose;
last_cam_pose.linear() = quaternionToRotation(
cam_states.find(last_cam_id)->second.orientation).transpose();
last_cam_pose.translation() =
cam_states.find(last_cam_id)->second.position;
// Get the direction of the feature when it is first observed.
// This direction is represented in the world frame.
Eigen::Vector3d feature_direction(
observations.begin()->second(0),
observations.begin()->second(1), 1.0);
feature_direction = feature_direction / feature_direction.norm();
feature_direction = first_cam_pose.linear()*feature_direction;
// Compute the translation between the first frame
// and the last frame. We assume the first frame and
// the last frame will provide the largest motion to
// speed up the checking process.
Eigen::Vector3d translation = last_cam_pose.translation() -
first_cam_pose.translation();
double parallel_translation =
translation.transpose()*feature_direction;
Eigen::Vector3d orthogonal_translation = translation -
parallel_translation*feature_direction;
if (orthogonal_translation.norm() >
optimization_config.translation_threshold)
return true;
else return false;
}
bool Feature::initializePosition(
const CamStateServer& cam_states) {
// Organize camera poses and feature observations properly.
std::vector<Eigen::Isometry3d,
Eigen::aligned_allocator<Eigen::Isometry3d> > cam_poses(0);
std::vector<Eigen::Vector2d,
Eigen::aligned_allocator<Eigen::Vector2d> > measurements(0);
for (auto& m : observations) {
// TODO: This should be handled properly. Normally, the
// required camera states should all be available in
// the input cam_states buffer.
auto cam_state_iter = cam_states.find(m.first);
if (cam_state_iter == cam_states.end()) continue;
// Add the measurement.
measurements.push_back(m.second.head<2>());
measurements.push_back(m.second.tail<2>());
// This camera pose will take a vector from this camera frame
// to the world frame.
Eigen::Isometry3d cam0_pose;
cam0_pose.linear() = quaternionToRotation(
cam_state_iter->second.orientation).transpose();
cam0_pose.translation() = cam_state_iter->second.position;
Eigen::Isometry3d cam1_pose;
cam1_pose = cam0_pose * CAMState::T_cam0_cam1.inverse();
cam_poses.push_back(cam0_pose);
cam_poses.push_back(cam1_pose);
}
// All camera poses should be modified such that it takes a
// vector from the first camera frame in the buffer to this
// camera frame.
Eigen::Isometry3d T_c0_w = cam_poses[0];
for (auto& pose : cam_poses)
pose = pose.inverse() * T_c0_w;
// Generate initial guess
Eigen::Vector3d initial_position(0.0, 0.0, 0.0);
generateInitialGuess(cam_poses[cam_poses.size()-1], measurements[0],
measurements[measurements.size()-1], initial_position);
Eigen::Vector3d solution(
initial_position(0)/initial_position(2),
initial_position(1)/initial_position(2),
1.0/initial_position(2));
// Apply Levenberg-Marquart method to solve for the 3d position.
double lambda = optimization_config.initial_damping;
int inner_loop_cntr = 0;
int outer_loop_cntr = 0;
bool is_cost_reduced = false;
double delta_norm = 0;
// Compute the initial cost.
double total_cost = 0.0;
for (int i = 0; i < cam_poses.size(); ++i) {
double this_cost = 0.0;
cost(cam_poses[i], solution, measurements[i], this_cost);
total_cost += this_cost;
}
// Outer loop.
do {
Eigen::Matrix3d A = Eigen::Matrix3d::Zero();
Eigen::Vector3d b = Eigen::Vector3d::Zero();
for (int i = 0; i < cam_poses.size(); ++i) {
Eigen::Matrix<double, 2, 3> J;
Eigen::Vector2d r;
double w;
jacobian(cam_poses[i], solution, measurements[i], J, r, w);
if (w == 1) {
A += J.transpose() * J;
b += J.transpose() * r;
} else {
double w_square = w * w;
A += w_square * J.transpose() * J;
b += w_square * J.transpose() * r;
}
}
// Inner loop.
// Solve for the delta that can reduce the total cost.
do {
Eigen::Matrix3d damper = lambda * Eigen::Matrix3d::Identity();
Eigen::Vector3d delta = (A+damper).ldlt().solve(b);
Eigen::Vector3d new_solution = solution - delta;
delta_norm = delta.norm();
double new_cost = 0.0;
for (int i = 0; i < cam_poses.size(); ++i) {
double this_cost = 0.0;
cost(cam_poses[i], new_solution, measurements[i], this_cost);
new_cost += this_cost;
}
if (new_cost < total_cost) {
is_cost_reduced = true;
solution = new_solution;
total_cost = new_cost;
lambda = lambda/10 > 1e-10 ? lambda/10 : 1e-10;
} else {
is_cost_reduced = false;
lambda = lambda*10 < 1e12 ? lambda*10 : 1e12;
}
} while (inner_loop_cntr++ <
optimization_config.inner_loop_max_iteration && !is_cost_reduced);
inner_loop_cntr = 0;
} while (outer_loop_cntr++ <
optimization_config.outer_loop_max_iteration &&
delta_norm > optimization_config.estimation_precision);
// Covert the feature position from inverse depth
// representation to its 3d coordinate.
Eigen::Vector3d final_position(solution(0)/solution(2),
solution(1)/solution(2), 1.0/solution(2));
// Check if the solution is valid. Make sure the feature
// is in front of every camera frame observing it.
bool is_valid_solution = true;
for (const auto& pose : cam_poses) {
Eigen::Vector3d position =
pose.linear()*final_position + pose.translation();
if (position(2) <= 0) {
is_valid_solution = false;
break;
}
}
// Convert the feature position to the world frame.
position = T_c0_w.linear()*final_position + T_c0_w.translation();
if (is_valid_solution)
is_initialized = true;
return is_valid_solution;
}
} // namespace msckf_vio
#endif // MSCKF_VIO_FEATURE_H

View File

@ -0,0 +1,398 @@
/*
* COPYRIGHT AND PERMISSION NOTICE
* Penn Software MSCKF_VIO
* Copyright (C) 2017 The Trustees of the University of Pennsylvania
* All rights reserved.
*/
#ifndef MSCKF_VIO_IMAGE_PROCESSOR_H
#define MSCKF_VIO_IMAGE_PROCESSOR_H
#include <vector>
#include <map>
#include <boost/shared_ptr.hpp>
#include <opencv2/opencv.hpp>
#include <opencv2/video.hpp>
#include <ros/ros.h>
#include <cv_bridge/cv_bridge.h>
#include <image_transport/image_transport.h>
#include <sensor_msgs/Imu.h>
#include <sensor_msgs/Image.h>
#include <message_filters/subscriber.h>
#include <message_filters/time_synchronizer.h>
namespace msckf_vio {
/*
* @brief ImageProcessor Detects and tracks features
* in image sequences.
*/
class ImageProcessor {
public:
// Constructor
ImageProcessor(ros::NodeHandle& n);
// Disable copy and assign constructors.
ImageProcessor(const ImageProcessor&) = delete;
ImageProcessor operator=(const ImageProcessor&) = delete;
// Destructor
~ImageProcessor();
// Initialize the object.
bool initialize();
typedef boost::shared_ptr<ImageProcessor> Ptr;
typedef boost::shared_ptr<const ImageProcessor> ConstPtr;
private:
/*
* @brief ProcessorConfig Configuration parameters for
* feature detection and tracking.
*/
struct ProcessorConfig {
int grid_row;
int grid_col;
int grid_min_feature_num;
int grid_max_feature_num;
int pyramid_levels;
int patch_size;
int fast_threshold;
int max_iteration;
double track_precision;
double ransac_threshold;
double stereo_threshold;
};
/*
* @brief FeatureIDType An alias for unsigned long long int.
*/
typedef unsigned long long int FeatureIDType;
/*
* @brief FeatureMetaData Contains necessary information
* of a feature for easy access.
*/
struct FeatureMetaData {
FeatureIDType id;
float response;
int lifetime;
cv::Point2f cam0_point;
cv::Point2f cam1_point;
};
/*
* @brief GridFeatures Organize features based on the grid
* they belong to. Note that the key is encoded by the
* grid index.
*/
typedef std::map<int, std::vector<FeatureMetaData> > GridFeatures;
/*
* @brief keyPointCompareByResponse
* Compare two keypoints based on the response.
*/
static bool keyPointCompareByResponse(
const cv::KeyPoint& pt1,
const cv::KeyPoint& pt2) {
// Keypoint with higher response will be at the
// beginning of the vector.
return pt1.response > pt2.response;
}
/*
* @brief featureCompareByResponse
* Compare two features based on the response.
*/
static bool featureCompareByResponse(
const FeatureMetaData& f1,
const FeatureMetaData& f2) {
// Features with higher response will be at the
// beginning of the vector.
return f1.response > f2.response;
}
/*
* @brief featureCompareByLifetime
* Compare two features based on the lifetime.
*/
static bool featureCompareByLifetime(
const FeatureMetaData& f1,
const FeatureMetaData& f2) {
// Features with longer lifetime will be at the
// beginning of the vector.
return f1.lifetime > f2.lifetime;
}
/*
* @brief loadParameters
* Load parameters from the parameter server.
*/
bool loadParameters();
/*
* @brief createRosIO
* Create ros publisher and subscirbers.
*/
bool createRosIO();
/*
* @brief stereoCallback
* Callback function for the stereo images.
* @param cam0_img left image.
* @param cam1_img right image.
*/
void stereoCallback(
const sensor_msgs::ImageConstPtr& cam0_img,
const sensor_msgs::ImageConstPtr& cam1_img);
/*
* @brief imuCallback
* Callback function for the imu message.
* @param msg IMU msg.
*/
void imuCallback(const sensor_msgs::ImuConstPtr& msg);
/*
* @initializeFirstFrame
* Initialize the image processing sequence, which is
* bascially detect new features on the first set of
* stereo images.
*/
void initializeFirstFrame();
/*
* @brief trackFeatures
* Tracker features on the newly received stereo images.
*/
void trackFeatures();
/*
* @addNewFeatures
* Detect new features on the image to ensure that the
* features are uniformly distributed on the image.
*/
void addNewFeatures();
/*
* @brief pruneGridFeatures
* Remove some of the features of a grid in case there are
* too many features inside of that grid, which ensures the
* number of features within each grid is bounded.
*/
void pruneGridFeatures();
/*
* @brief publish
* Publish the features on the current image including
* both the tracked and newly detected ones.
*/
void publish();
/*
* @brief drawFeaturesMono
* Draw tracked and newly detected features on the left
* image only.
*/
void drawFeaturesMono();
/*
* @brief drawFeaturesStereo
* Draw tracked and newly detected features on the
* stereo images.
*/
void drawFeaturesStereo();
/*
* @brief createImagePyramids
* Create image pyramids used for klt tracking.
*/
void createImagePyramids();
/*
* @brief integrateImuData Integrates the IMU gyro readings
* between the two consecutive images, which is used for
* both tracking prediction and 2-point RANSAC.
* @return cam0_R_p_c: a rotation matrix which takes a vector
* from previous cam0 frame to current cam0 frame.
* @return cam1_R_p_c: a rotation matrix which takes a vector
* from previous cam1 frame to current cam1 frame.
*/
void integrateImuData(cv::Matx33f& cam0_R_p_c,
cv::Matx33f& cam1_R_p_c);
/*
* @brief predictFeatureTracking Compensates the rotation
* between consecutive camera frames so that feature
* tracking would be more robust and fast.
* @param input_pts: features in the previous image to be tracked.
* @param R_p_c: a rotation matrix takes a vector in the previous
* camera frame to the current camera frame.
* @param intrinsics: intrinsic matrix of the camera.
* @return compensated_pts: predicted locations of the features
* in the current image based on the provided rotation.
*
* Note that the input and output points are of pixel coordinates.
*/
void predictFeatureTracking(
const std::vector<cv::Point2f>& input_pts,
const cv::Matx33f& R_p_c,
const cv::Vec4d& intrinsics,
std::vector<cv::Point2f>& compenstated_pts);
/*
* @brief twoPointRansac Applies two point ransac algorithm
* to mark the inliers in the input set.
* @param pts1: first set of points.
* @param pts2: second set of points.
* @param R_p_c: a rotation matrix takes a vector in the previous
* camera frame to the current camera frame.
* @param intrinsics: intrinsics of the camera.
* @param distortion_model: distortion model of the camera.
* @param distortion_coeffs: distortion coefficients.
* @param inlier_error: acceptable error to be considered as an inlier.
* @param success_probability: the required probability of success.
* @return inlier_flag: 1 for inliers and 0 for outliers.
*/
void twoPointRansac(
const std::vector<cv::Point2f>& pts1,
const std::vector<cv::Point2f>& pts2,
const cv::Matx33f& R_p_c,
const cv::Vec4d& intrinsics,
const std::string& distortion_model,
const cv::Vec4d& distortion_coeffs,
const double& inlier_error,
const double& success_probability,
std::vector<int>& inlier_markers);
void undistortPoints(
const std::vector<cv::Point2f>& pts_in,
const cv::Vec4d& intrinsics,
const std::string& distortion_model,
const cv::Vec4d& distortion_coeffs,
std::vector<cv::Point2f>& pts_out,
const cv::Matx33d &rectification_matrix = cv::Matx33d::eye(),
const cv::Vec4d &new_intrinsics = cv::Vec4d(1,1,0,0));
void rescalePoints(
std::vector<cv::Point2f>& pts1,
std::vector<cv::Point2f>& pts2,
float& scaling_factor);
std::vector<cv::Point2f> distortPoints(
const std::vector<cv::Point2f>& pts_in,
const cv::Vec4d& intrinsics,
const std::string& distortion_model,
const cv::Vec4d& distortion_coeffs);
/*
* @brief stereoMatch Matches features with stereo image pairs.
* @param cam0_points: points in the primary image.
* @return cam1_points: points in the secondary image.
* @return inlier_markers: 1 if the match is valid, 0 otherwise.
*/
void stereoMatch(
const std::vector<cv::Point2f>& cam0_points,
std::vector<cv::Point2f>& cam1_points,
std::vector<unsigned char>& inlier_markers);
/*
* @brief removeUnmarkedElements Remove the unmarked elements
* within a vector.
* @param raw_vec: vector with outliers.
* @param markers: 0 will represent a outlier, 1 will be an inlier.
* @return refined_vec: a vector without outliers.
*
* Note that the order of the inliers in the raw_vec is perserved
* in the refined_vec.
*/
template <typename T>
void removeUnmarkedElements(
const std::vector<T>& raw_vec,
const std::vector<unsigned char>& markers,
std::vector<T>& refined_vec) {
if (raw_vec.size() != markers.size()) {
ROS_WARN("The input size of raw_vec(%lu) and markers(%lu) does not match...",
raw_vec.size(), markers.size());
}
for (int i = 0; i < markers.size(); ++i) {
if (markers[i] == 0) continue;
refined_vec.push_back(raw_vec[i]);
}
return;
}
// Indicate if this is the first image message.
bool is_first_img;
// ID for the next new feature.
FeatureIDType next_feature_id;
// Feature detector
ProcessorConfig processor_config;
cv::Ptr<cv::Feature2D> detector_ptr;
// IMU message buffer.
std::vector<sensor_msgs::Imu> imu_msg_buffer;
// Camera calibration parameters
std::string cam0_distortion_model;
cv::Vec2i cam0_resolution;
cv::Vec4d cam0_intrinsics;
cv::Vec4d cam0_distortion_coeffs;
std::string cam1_distortion_model;
cv::Vec2i cam1_resolution;
cv::Vec4d cam1_intrinsics;
cv::Vec4d cam1_distortion_coeffs;
// Take a vector from cam0 frame to the IMU frame.
cv::Matx33d R_cam0_imu;
cv::Vec3d t_cam0_imu;
// Take a vector from cam1 frame to the IMU frame.
cv::Matx33d R_cam1_imu;
cv::Vec3d t_cam1_imu;
// Previous and current images
cv_bridge::CvImageConstPtr cam0_prev_img_ptr;
cv_bridge::CvImageConstPtr cam0_curr_img_ptr;
cv_bridge::CvImageConstPtr cam1_curr_img_ptr;
// Pyramids for previous and current image
std::vector<cv::Mat> prev_cam0_pyramid_;
std::vector<cv::Mat> curr_cam0_pyramid_;
std::vector<cv::Mat> curr_cam1_pyramid_;
// Features in the previous and current image.
boost::shared_ptr<GridFeatures> prev_features_ptr;
boost::shared_ptr<GridFeatures> curr_features_ptr;
// Number of features after each outlier removal step.
int before_tracking;
int after_tracking;
int after_matching;
int after_ransac;
// Ros node handle
ros::NodeHandle nh;
// Subscribers and publishers.
message_filters::Subscriber<
sensor_msgs::Image> cam0_img_sub;
message_filters::Subscriber<
sensor_msgs::Image> cam1_img_sub;
message_filters::TimeSynchronizer<
sensor_msgs::Image, sensor_msgs::Image> stereo_sub;
ros::Subscriber imu_sub;
ros::Publisher feature_pub;
ros::Publisher tracking_info_pub;
image_transport::Publisher debug_stereo_pub;
// Debugging
std::map<FeatureIDType, int> feature_lifetime;
void updateFeatureLifetime();
void featureLifetimeStatistics();
};
typedef ImageProcessor::Ptr ImageProcessorPtr;
typedef ImageProcessor::ConstPtr ImageProcessorConstPtr;
} // end namespace msckf_vio
#endif

View File

@ -0,0 +1,28 @@
/*
* COPYRIGHT AND PERMISSION NOTICE
* Penn Software MSCKF_VIO
* Copyright (C) 2017 The Trustees of the University of Pennsylvania
* All rights reserved.
*/
#ifndef IMAGE_PROCESSOR_NODELET_H
#define IMAGE_PROCESSOR_NODELET_H
#include <nodelet/nodelet.h>
#include <pluginlib/class_list_macros.h>
#include <msckf_vio/image_processor.h>
namespace msckf_vio {
class ImageProcessorNodelet : public nodelet::Nodelet {
public:
ImageProcessorNodelet() { return; }
~ImageProcessorNodelet() { return; }
private:
virtual void onInit();
ImageProcessorPtr img_processor_ptr;
};
} // end namespace msckf_vio
#endif

View File

@ -0,0 +1,110 @@
/*
* COPYRIGHT AND PERMISSION NOTICE
* Penn Software MSCKF_VIO
* Copyright (C) 2017 The Trustees of the University of Pennsylvania
* All rights reserved.
*/
#ifndef MSCKF_VIO_IMU_STATE_H
#define MSCKF_VIO_IMU_STATE_H
#include <map>
#include <vector>
#include <Eigen/Dense>
#include <Eigen/Geometry>
#define GRAVITY_ACCELERATION 9.81
namespace msckf_vio {
/*
* @brief IMUState State for IMU
*/
struct IMUState {
EIGEN_MAKE_ALIGNED_OPERATOR_NEW
typedef long long int StateIDType;
// An unique identifier for the IMU state.
StateIDType id;
// id for next IMU state
static StateIDType next_id;
// Time when the state is recorded
double time;
// Orientation
// Take a vector from the world frame to
// the IMU (body) frame.
Eigen::Vector4d orientation;
// Position of the IMU (body) frame
// in the world frame.
Eigen::Vector3d position;
// Velocity of the IMU (body) frame
// in the world frame.
Eigen::Vector3d velocity;
// Bias for measured angular velocity
// and acceleration.
Eigen::Vector3d gyro_bias;
Eigen::Vector3d acc_bias;
// Transformation between the IMU and the
// left camera (cam0)
Eigen::Matrix3d R_imu_cam0;
Eigen::Vector3d t_cam0_imu;
// These three variables should have the same physical
// interpretation with `orientation`, `position`, and
// `velocity`. There three variables are used to modify
// the transition matrices to make the observability matrix
// have proper null space.
Eigen::Vector4d orientation_null;
Eigen::Vector3d position_null;
Eigen::Vector3d velocity_null;
// Process noise
static double gyro_noise;
static double acc_noise;
static double gyro_bias_noise;
static double acc_bias_noise;
// Gravity vector in the world frame
static Eigen::Vector3d gravity;
// Transformation offset from the IMU frame to
// the body frame. The transformation takes a
// vector from the IMU frame to the body frame.
// The z axis of the body frame should point upwards.
// Normally, this transform should be identity.
static Eigen::Isometry3d T_imu_body;
IMUState(): id(0), time(0),
orientation(Eigen::Vector4d(0, 0, 0, 1)),
position(Eigen::Vector3d::Zero()),
velocity(Eigen::Vector3d::Zero()),
gyro_bias(Eigen::Vector3d::Zero()),
acc_bias(Eigen::Vector3d::Zero()),
orientation_null(Eigen::Vector4d(0, 0, 0, 1)),
position_null(Eigen::Vector3d::Zero()),
velocity_null(Eigen::Vector3d::Zero()) {}
IMUState(const StateIDType& new_id): id(new_id), time(0),
orientation(Eigen::Vector4d(0, 0, 0, 1)),
position(Eigen::Vector3d::Zero()),
velocity(Eigen::Vector3d::Zero()),
gyro_bias(Eigen::Vector3d::Zero()),
acc_bias(Eigen::Vector3d::Zero()),
orientation_null(Eigen::Vector4d(0, 0, 0, 1)),
position_null(Eigen::Vector3d::Zero()),
velocity_null(Eigen::Vector3d::Zero()) {}
};
typedef IMUState::StateIDType StateIDType;
} // namespace msckf_vio
#endif // MSCKF_VIO_IMU_STATE_H

View File

@ -0,0 +1,163 @@
/*
* COPYRIGHT AND PERMISSION NOTICE
* Penn Software MSCKF_VIO
* Copyright (C) 2017 The Trustees of the University of Pennsylvania
* All rights reserved.
*/
#ifndef MSCKF_VIO_MATH_UTILS_HPP
#define MSCKF_VIO_MATH_UTILS_HPP
#include <cmath>
#include <Eigen/Dense>
namespace msckf_vio {
/*
* @brief Create a skew-symmetric matrix from a 3-element vector.
* @note Performs the operation:
* w -> [ 0 -w3 w2]
* [ w3 0 -w1]
* [-w2 w1 0]
*/
inline Eigen::Matrix3d skewSymmetric(const Eigen::Vector3d& w) {
Eigen::Matrix3d w_hat;
w_hat(0, 0) = 0;
w_hat(0, 1) = -w(2);
w_hat(0, 2) = w(1);
w_hat(1, 0) = w(2);
w_hat(1, 1) = 0;
w_hat(1, 2) = -w(0);
w_hat(2, 0) = -w(1);
w_hat(2, 1) = w(0);
w_hat(2, 2) = 0;
return w_hat;
}
/*
* @brief Normalize the given quaternion to unit quaternion.
*/
inline void quaternionNormalize(Eigen::Vector4d& q) {
double norm = q.norm();
q = q / norm;
return;
}
/*
* @brief Perform q1 * q2
*/
inline Eigen::Vector4d quaternionMultiplication(
const Eigen::Vector4d& q1,
const Eigen::Vector4d& q2) {
Eigen::Matrix4d L;
L(0, 0) = q1(3); L(0, 1) = q1(2); L(0, 2) = -q1(1); L(0, 3) = q1(0);
L(1, 0) = -q1(2); L(1, 1) = q1(3); L(1, 2) = q1(0); L(1, 3) = q1(1);
L(2, 0) = q1(1); L(2, 1) = -q1(0); L(2, 2) = q1(3); L(2, 3) = q1(2);
L(3, 0) = -q1(0); L(3, 1) = -q1(1); L(3, 2) = -q1(2); L(3, 3) = q1(3);
Eigen::Vector4d q = L * q2;
quaternionNormalize(q);
return q;
}
/*
* @brief Convert the vector part of a quaternion to a
* full quaternion.
* @note This function is useful to convert delta quaternion
* which is usually a 3x1 vector to a full quaternion.
* For more details, check Equation (238) and (239) in
* "Indirect Kalman Filter for 3D Attitude Estimation:
* A Tutorial for quaternion Algebra".
*/
inline Eigen::Vector4d smallAngleQuaternion(
const Eigen::Vector3d& dtheta) {
Eigen::Vector3d dq = dtheta / 2.0;
Eigen::Vector4d q;
double dq_square_norm = dq.squaredNorm();
if (dq_square_norm <= 1) {
q.head<3>() = dq;
q(3) = std::sqrt(1-dq_square_norm);
} else {
q.head<3>() = dq;
q(3) = 1;
q = q / std::sqrt(1+dq_square_norm);
}
return q;
}
/*
* @brief Convert a quaternion to the corresponding rotation matrix
* @note Pay attention to the convention used. The function follows the
* conversion in "Indirect Kalman Filter for 3D Attitude Estimation:
* A Tutorial for Quaternion Algebra", Equation (78).
*
* The input quaternion should be in the form
* [q1, q2, q3, q4(scalar)]^T
*/
inline Eigen::Matrix3d quaternionToRotation(
const Eigen::Vector4d& q) {
const Eigen::Vector3d& q_vec = q.block(0, 0, 3, 1);
const double& q4 = q(3);
Eigen::Matrix3d R =
(2*q4*q4-1)*Eigen::Matrix3d::Identity() -
2*q4*skewSymmetric(q_vec) +
2*q_vec*q_vec.transpose();
//TODO: Is it necessary to use the approximation equation
// (Equation (87)) when the rotation angle is small?
return R;
}
/*
* @brief Convert a rotation matrix to a quaternion.
* @note Pay attention to the convention used. The function follows the
* conversion in "Indirect Kalman Filter for 3D Attitude Estimation:
* A Tutorial for Quaternion Algebra", Equation (78).
*
* The input quaternion should be in the form
* [q1, q2, q3, q4(scalar)]^T
*/
inline Eigen::Vector4d rotationToQuaternion(
const Eigen::Matrix3d& R) {
Eigen::Vector4d score;
score(0) = R(0, 0);
score(1) = R(1, 1);
score(2) = R(2, 2);
score(3) = R.trace();
int max_row = 0, max_col = 0;
score.maxCoeff(&max_row, &max_col);
Eigen::Vector4d q = Eigen::Vector4d::Zero();
if (max_row == 0) {
q(0) = std::sqrt(1+2*R(0, 0)-R.trace()) / 2.0;
q(1) = (R(0, 1)+R(1, 0)) / (4*q(0));
q(2) = (R(0, 2)+R(2, 0)) / (4*q(0));
q(3) = (R(1, 2)-R(2, 1)) / (4*q(0));
} else if (max_row == 1) {
q(1) = std::sqrt(1+2*R(1, 1)-R.trace()) / 2.0;
q(0) = (R(0, 1)+R(1, 0)) / (4*q(1));
q(2) = (R(1, 2)+R(2, 1)) / (4*q(1));
q(3) = (R(2, 0)-R(0, 2)) / (4*q(1));
} else if (max_row == 2) {
q(2) = std::sqrt(1+2*R(2, 2)-R.trace()) / 2.0;
q(0) = (R(0, 2)+R(2, 0)) / (4*q(2));
q(1) = (R(1, 2)+R(2, 1)) / (4*q(2));
q(3) = (R(0, 1)-R(1, 0)) / (4*q(2));
} else {
q(3) = std::sqrt(1+R.trace()) / 2.0;
q(0) = (R(1, 2)-R(2, 1)) / (4*q(3));
q(1) = (R(2, 0)-R(0, 2)) / (4*q(3));
q(2) = (R(0, 1)-R(1, 0)) / (4*q(3));
}
if (q(3) < 0) q = -q;
quaternionNormalize(q);
return q;
}
} // end namespace msckf_vio
#endif // MSCKF_VIO_MATH_UTILS_HPP

View File

@ -0,0 +1,242 @@
/*
* COPYRIGHT AND PERMISSION NOTICE
* Penn Software MSCKF_VIO
* Copyright (C) 2017 The Trustees of the University of Pennsylvania
* All rights reserved.
*/
#ifndef MSCKF_VIO_H
#define MSCKF_VIO_H
#include <map>
#include <set>
#include <vector>
#include <string>
#include <Eigen/Dense>
#include <Eigen/Geometry>
#include <boost/shared_ptr.hpp>
#include <ros/ros.h>
#include <sensor_msgs/Imu.h>
#include <nav_msgs/Odometry.h>
#include <tf/transform_broadcaster.h>
#include <std_srvs/Trigger.h>
#include "imu_state.h"
#include "cam_state.h"
#include "feature.hpp"
#include <msckf_vio/CameraMeasurement.h>
namespace msckf_vio {
/*
* @brief MsckfVio Implements the algorithm in
* Anatasios I. Mourikis, and Stergios I. Roumeliotis,
* "A Multi-State Constraint Kalman Filter for Vision-aided
* Inertial Navigation",
* http://www.ee.ucr.edu/~mourikis/tech_reports/TR_MSCKF.pdf
*/
class MsckfVio {
public:
EIGEN_MAKE_ALIGNED_OPERATOR_NEW
// Constructor
MsckfVio(ros::NodeHandle& pnh);
// Disable copy and assign constructor
MsckfVio(const MsckfVio&) = delete;
MsckfVio operator=(const MsckfVio&) = delete;
// Destructor
~MsckfVio() {}
/*
* @brief initialize Initialize the VIO.
*/
bool initialize();
/*
* @brief reset Resets the VIO to initial status.
*/
void reset();
typedef boost::shared_ptr<MsckfVio> Ptr;
typedef boost::shared_ptr<const MsckfVio> ConstPtr;
private:
/*
* @brief StateServer Store one IMU states and several
* camera states for constructing measurement
* model.
*/
struct StateServer {
IMUState imu_state;
CamStateServer cam_states;
// State covariance matrix
Eigen::MatrixXd state_cov;
Eigen::Matrix<double, 12, 12> continuous_noise_cov;
};
/*
* @brief loadParameters
* Load parameters from the parameter server.
*/
bool loadParameters();
/*
* @brief createRosIO
* Create ros publisher and subscirbers.
*/
bool createRosIO();
/*
* @brief imuCallback
* Callback function for the imu message.
* @param msg IMU msg.
*/
void imuCallback(const sensor_msgs::ImuConstPtr& msg);
/*
* @brief featureCallback
* Callback function for feature measurements.
* @param msg Stereo feature measurements.
*/
void featureCallback(const CameraMeasurementConstPtr& msg);
/*
* @brief publish Publish the results of VIO.
* @param time The time stamp of output msgs.
*/
void publish(const ros::Time& time);
/*
* @brief initializegravityAndBias
* Initialize the IMU bias and initial orientation
* based on the first few IMU readings.
*/
void initializeGravityAndBias();
/*
* @biref resetCallback
* Callback function for the reset service.
* Note that this is NOT anytime-reset. This function should
* only be called before the sensor suite starts moving.
* e.g. while the robot is still on the ground.
*/
bool resetCallback(std_srvs::Trigger::Request& req,
std_srvs::Trigger::Response& res);
// Filter related functions
// Propogate the state
void batchImuProcessing(
const double& time_bound);
void processModel(const double& time,
const Eigen::Vector3d& m_gyro,
const Eigen::Vector3d& m_acc);
void predictNewState(const double& dt,
const Eigen::Vector3d& gyro,
const Eigen::Vector3d& acc);
// Measurement update
void stateAugmentation(const double& time);
void addFeatureObservations(const CameraMeasurementConstPtr& msg);
// This function is used to compute the measurement Jacobian
// for a single feature observed at a single camera frame.
void measurementJacobian(const StateIDType& cam_state_id,
const FeatureIDType& feature_id,
Eigen::Matrix<double, 4, 6>& H_x,
Eigen::Matrix<double, 4, 3>& H_f,
Eigen::Vector4d& r);
// This function computes the Jacobian of all measurements viewed
// in the given camera states of this feature.
void featureJacobian(const FeatureIDType& feature_id,
const std::vector<StateIDType>& cam_state_ids,
Eigen::MatrixXd& H_x, Eigen::VectorXd& r);
void measurementUpdate(const Eigen::MatrixXd& H,
const Eigen::VectorXd& r);
bool gatingTest(const Eigen::MatrixXd& H,
const Eigen::VectorXd&r, const int& dof);
void removeLostFeatures();
void findRedundantCamStates(
std::vector<StateIDType>& rm_cam_state_ids);
void pruneCamStateBuffer();
// Reset the system online if the uncertainty is too large.
void onlineReset();
// Chi squared test table.
static std::map<int, double> chi_squared_test_table;
// State vector
StateServer state_server;
// Maximum number of camera states
int max_cam_state_size;
// Features used
MapServer map_server;
// IMU data buffer
// This is buffer is used to handle the unsynchronization or
// transfer delay between IMU and Image messages.
std::vector<sensor_msgs::Imu> imu_msg_buffer;
// Indicate if the gravity vector is set.
bool is_gravity_set;
// Indicate if the received image is the first one. The
// system will start after receiving the first image.
bool is_first_img;
// The position uncertainty threshold is used to determine
// when to reset the system online. Otherwise, the ever-
// increaseing uncertainty will make the estimation unstable.
// Note this online reset will be some dead-reckoning.
// Set this threshold to nonpositive to disable online reset.
double position_std_threshold;
// Tracking rate
double tracking_rate;
// Threshold for determine keyframes
double translation_threshold;
double rotation_threshold;
double tracking_rate_threshold;
// Ros node handle
ros::NodeHandle nh;
// Subscribers and publishers
ros::Subscriber imu_sub;
ros::Subscriber feature_sub;
ros::Publisher odom_pub;
ros::Publisher feature_pub;
tf::TransformBroadcaster tf_pub;
ros::ServiceServer reset_srv;
// Frame id
std::string fixed_frame_id;
std::string child_frame_id;
// Whether to publish tf or not.
bool publish_tf;
// Framte rate of the stereo images. This variable is
// only used to determine the timing threshold of
// each iteration of the filter.
double frame_rate;
// Debugging variables and functions
void mocapOdomCallback(
const nav_msgs::OdometryConstPtr& msg);
ros::Subscriber mocap_odom_sub;
ros::Publisher mocap_odom_pub;
geometry_msgs::TransformStamped raw_mocap_odom_msg;
Eigen::Isometry3d mocap_initial_frame;
};
typedef MsckfVio::Ptr MsckfVioPtr;
typedef MsckfVio::ConstPtr MsckfVioConstPtr;
} // namespace msckf_vio
#endif

View File

@ -0,0 +1,28 @@
/*
* COPYRIGHT AND PERMISSION NOTICE
* Penn Software MSCKF_VIO
* Copyright (C) 2017 The Trustees of the University of Pennsylvania
* All rights reserved.
*/
#ifndef MSCKF_VIO_NODELET_H
#define MSCKF_VIO_NODELET_H
#include <nodelet/nodelet.h>
#include <pluginlib/class_list_macros.h>
#include <msckf_vio/msckf_vio.h>
namespace msckf_vio {
class MsckfVioNodelet : public nodelet::Nodelet {
public:
MsckfVioNodelet() { return; }
~MsckfVioNodelet() { return; }
private:
virtual void onInit();
MsckfVioPtr msckf_vio_ptr;
};
} // end namespace msckf_vio
#endif