First commit of the code

This commit is contained in:
Ke Sun 2018-01-08 14:41:37 -05:00
parent 37dd131d33
commit 21e3f9865c
31 changed files with 5940 additions and 0 deletions

147
CMakeLists.txt Normal file
View File

@ -0,0 +1,147 @@
cmake_minimum_required(VERSION 2.8.3)
project(msckf_vio)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11")
# Modify cmake module path if new .cmake files are required
set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${CMAKE_CURRENT_LIST_DIR}/cmake")
find_package(catkin REQUIRED COMPONENTS
roscpp
std_msgs
tf
nav_msgs
sensor_msgs
geometry_msgs
eigen_conversions
tf_conversions
random_numbers
message_generation
image_transport
cv_bridge
message_filters
pcl_conversions
pcl_ros
)
## System dependencies are found with CMake's conventions
find_package(Boost REQUIRED)
find_package(Eigen3 REQUIRED)
find_package(OpenCV REQUIRED)
find_package(Cholmod REQUIRED)
find_package(SPQR REQUIRED)
add_message_files(
FILES
FeatureMeasurement.msg
CameraMeasurement.msg
TrackingInfo.msg
#DebugImuState.msg
#DebugCamState.msg
#DebugState.msg
#DebugMsckfInfo.msg
)
generate_messages(
DEPENDENCIES
std_msgs
)
catkin_package(
INCLUDE_DIRS include
# LIBRARIES msckf_vio
CATKIN_DEPENDS
roscpp std_msgs tf nav_msgs sensor_msgs geometry_msgs
eigen_conversions tf_conversions message_runtime
image_transport cv_bridge message_filters pcl_conversions
pcl_ros std_srvs
DEPENDS
Boost EIGEN3 OpenCV
)
include_directories(
include
${catkin_INCLUDE_DIRS}
${EIGEN3_INCLUDE_DIR}
${Boost_INCLUDE_DIR}
${OpenCV_INCLUDE_DIRS}
${CHOLMOD_INCLUDES}
${SPQR_INCLUDES}
)
link_directories(
${catkin_LIBRARY_DIRS}
)
# Msckf Vio
add_library(msckf_vio
src/msckf_vio.cpp
)
add_dependencies(msckf_vio
${${PROJECT_NAME}_EXPORTED_TARGETS}
${catkin_EXPORTED_TARGETS}
)
target_link_libraries(msckf_vio
${catkin_LIBRARIES}
${CHOLMOD_LIBRARIES}
${SPQR_LIBRARIES}
)
# Msckf Vio nodelet
add_library(msckf_vio_nodelet
src/msckf_vio_nodelet.cpp
)
add_dependencies(msckf_vio_nodelet
${${PROJECT_NAME}_EXPORTED_TARGETS}
${catkin_EXPORTED_TARGETS}
)
target_link_libraries(msckf_vio_nodelet
msckf_vio
${catkin_LIBRARIES}
)
# Image processor
add_library(image_processor
src/image_processor.cpp
)
add_dependencies(image_processor
${${PROJECT_NAME}_EXPORTED_TARGETS}
${catkin_EXPORTED_TARGETS}
)
target_link_libraries(image_processor
${catkin_LIBRARIES}
${OpenCV_LIBRARIES}
)
# Image processor nodelet
add_library(image_processor_nodelet
src/image_processor_nodelet.cpp
)
add_dependencies(image_processor_nodelet
${${PROJECT_NAME}_EXPORTED_TARGETS}
${catkin_EXPORTED_TARGETS}
)
target_link_libraries(image_processor_nodelet
image_processor
${catkin_LIBRARIES}
)
# Feature initialization test
catkin_add_gtest(test_feature_init
test/feature_initialization_test.cpp
)
add_dependencies(test_feature_init
${${PROJECT_NAME}_EXPORTED_TARGETS}
${catkin_EXPORTED_TARGETS}
)
target_link_libraries(test_feature_init
${catkin_LIBRARIES}
)
# Math utils test
catkin_add_gtest(test_math_utils
test/math_utils_test.cpp
)

89
cmake/FindCholmod.cmake Normal file
View File

@ -0,0 +1,89 @@
# Cholmod lib usually requires linking to a blas and lapack library.
# It is up to the user of this module to find a BLAS and link to it.
if (CHOLMOD_INCLUDES AND CHOLMOD_LIBRARIES)
set(CHOLMOD_FIND_QUIETLY TRUE)
endif (CHOLMOD_INCLUDES AND CHOLMOD_LIBRARIES)
find_path(CHOLMOD_INCLUDES
NAMES
cholmod.h
PATHS
$ENV{CHOLMODDIR}
${INCLUDE_INSTALL_DIR}
PATH_SUFFIXES
suitesparse
ufsparse
)
find_library(CHOLMOD_LIBRARIES cholmod PATHS $ENV{CHOLMODDIR} ${LIB_INSTALL_DIR})
if(CHOLMOD_LIBRARIES)
get_filename_component(CHOLMOD_LIBDIR ${CHOLMOD_LIBRARIES} PATH)
find_library(AMD_LIBRARY amd PATHS ${CHOLMOD_LIBDIR} $ENV{CHOLMODDIR} ${LIB_INSTALL_DIR})
if (AMD_LIBRARY)
set(CHOLMOD_LIBRARIES ${CHOLMOD_LIBRARIES} ${AMD_LIBRARY})
else ()
set(CHOLMOD_LIBRARIES FALSE)
endif ()
endif(CHOLMOD_LIBRARIES)
if(CHOLMOD_LIBRARIES)
find_library(COLAMD_LIBRARY colamd PATHS ${CHOLMOD_LIBDIR} $ENV{CHOLMODDIR} ${LIB_INSTALL_DIR})
if (COLAMD_LIBRARY)
set(CHOLMOD_LIBRARIES ${CHOLMOD_LIBRARIES} ${COLAMD_LIBRARY})
else ()
set(CHOLMOD_LIBRARIES FALSE)
endif ()
endif(CHOLMOD_LIBRARIES)
if(CHOLMOD_LIBRARIES)
find_library(CAMD_LIBRARY camd PATHS ${CHOLMOD_LIBDIR} $ENV{CHOLMODDIR} ${LIB_INSTALL_DIR})
if (CAMD_LIBRARY)
set(CHOLMOD_LIBRARIES ${CHOLMOD_LIBRARIES} ${CAMD_LIBRARY})
else ()
set(CHOLMOD_LIBRARIES FALSE)
endif ()
endif(CHOLMOD_LIBRARIES)
if(CHOLMOD_LIBRARIES)
find_library(CCOLAMD_LIBRARY ccolamd PATHS ${CHOLMOD_LIBDIR} $ENV{CHOLMODDIR} ${LIB_INSTALL_DIR})
if (CCOLAMD_LIBRARY)
set(CHOLMOD_LIBRARIES ${CHOLMOD_LIBRARIES} ${CCOLAMD_LIBRARY})
else ()
set(CHOLMOD_LIBRARIES FALSE)
endif ()
endif(CHOLMOD_LIBRARIES)
if(CHOLMOD_LIBRARIES)
find_library(CHOLMOD_METIS_LIBRARY metis PATHS ${CHOLMOD_LIBDIR} $ENV{CHOLMODDIR} ${LIB_INSTALL_DIR})
if (CHOLMOD_METIS_LIBRARY)
set(CHOLMOD_LIBRARIES ${CHOLMOD_LIBRARIES} ${CHOLMOD_METIS_LIBRARY})
endif ()
endif(CHOLMOD_LIBRARIES)
if(CHOLMOD_LIBRARIES)
find_library(SUITESPARSE_LIBRARY SuiteSparse PATHS ${CHOLMOD_LIBDIR} $ENV{CHOLMODDIR} ${LIB_INSTALL_DIR})
if (SUITESPARSE_LIBRARY)
set(CHOLMOD_LIBRARIES ${CHOLMOD_LIBRARIES} ${SUITESPARSE_LIBRARY})
endif (SUITESPARSE_LIBRARY)
endif(CHOLMOD_LIBRARIES)
include(FindPackageHandleStandardArgs)
find_package_handle_standard_args(CHOLMOD DEFAULT_MSG
CHOLMOD_INCLUDES CHOLMOD_LIBRARIES)
mark_as_advanced(CHOLMOD_INCLUDES CHOLMOD_LIBRARIES AMD_LIBRARY COLAMD_LIBRARY SUITESPARSE_LIBRARY)

36
cmake/FindSPQR.cmake Normal file
View File

@ -0,0 +1,36 @@
# SPQR lib usually requires linking to a blas and lapack library.
# It is up to the user of this module to find a BLAS and link to it.
# SPQR lib requires Cholmod, colamd and amd as well.
# FindCholmod.cmake can be used to find those packages before finding spqr
if (SPQR_INCLUDES AND SPQR_LIBRARIES)
set(SPQR_FIND_QUIETLY TRUE)
endif (SPQR_INCLUDES AND SPQR_LIBRARIES)
find_path(SPQR_INCLUDES
NAMES
SuiteSparseQR.hpp
PATHS
$ENV{SPQRDIR}
${INCLUDE_INSTALL_DIR}
PATH_SUFFIXES
suitesparse
ufsparse
)
find_library(SPQR_LIBRARIES spqr $ENV{SPQRDIR} ${LIB_INSTALL_DIR})
if(SPQR_LIBRARIES)
find_library(SUITESPARSE_LIBRARY SuiteSparse PATHS $ENV{SPQRDIR} ${LIB_INSTALL_DIR})
if (SUITESPARSE_LIBRARY)
set(SPQR_LIBRARIES ${SPQR_LIBRARIES} ${SUITESPARSE_LIBRARY})
endif (SUITESPARSE_LIBRARY)
endif(SPQR_LIBRARIES)
include(FindPackageHandleStandardArgs)
find_package_handle_standard_args(SPQR DEFAULT_MSG SPQR_INCLUDES SPQR_LIBRARIES)
mark_as_advanced(SPQR_INCLUDES SPQR_LIBRARIES)

View File

@ -0,0 +1,33 @@
# The modifications of the output file from Kalibr:
# 1. For each matrix (e.g. cam0/T_cam_imu), remove the brackets and minus sign for each line. Use one pair of brackets for each matrix.
# 2. Add the T_imu_body at the end of the calibration file (usually set to identity).
cam0:
T_cam_imu:
[0.014865542981794, 0.999557249008346, -0.025774436697440, 0.065222909535531,
-0.999880929698575, 0.014967213324719, 0.003756188357967, -0.020706385492719,
0.004140296794224, 0.025715529947966, 0.999660727177902, -0.008054602460030,
0, 0, 0, 1.000000000000000]
camera_model: pinhole
distortion_coeffs: [-0.28340811, 0.07395907, 0.00019359, 1.76187114e-05]
distortion_model: radtan
intrinsics: [458.654, 457.296, 367.215, 248.375]
resolution: [752, 480]
timeshift_cam_imu: 0.0
cam1:
T_cam_imu:
[0.012555267089103, 0.999598781151433, -0.025389800891747, -0.044901980682509,
-0.999755099723116, 0.013011905181504, 0.017900583825251, -0.020569771258915,
0.018223771455443, 0.025158836311552, 0.999517347077547, -0.008638135126028,
0, 0, 0, 1.000000000000000]
camera_model: pinhole
distortion_coeffs: [-0.28368365, 0.07451284, -0.00010473, -3.55590700e-05]
distortion_model: radtan
intrinsics: [457.587, 456.134, 379.999, 255.238]
resolution: [752, 480]
timeshift_cam_imu: 0.0
T_imu_body:
[1.0000, 0.0000, 0.0000, 0.0000,
0.0000, 1.0000, 0.0000, 0.0000,
0.0000, 0.0000, 1.0000, 0.0000,
0.0000, 0.0000, 0.0000, 1.0000]

View File

@ -0,0 +1,37 @@
# The modifications of the output file from Kalibr:
# 1. For each matrix (e.g. cam0/T_cam_imu), remove the brackets and minus sign for each line. Use one pair of brackets for each matrix.
# 2. Add the T_imu_body at the end of the calibration file (usually set to identity).
cam0:
T_cam_imu:
[0.9999354187625251, -0.008299416281612066, 0.007763890364879687, 0.1058254769257812,
-0.008323202950480819, -0.9999607512746784, 0.003036478688450292, -0.017679034754829417,
0.0077383846414136375, -0.0031009030240912025, -0.9999652502980172, -0.008890355463642424,
0.0, 0.0, 0.0, 1.0]
camera_model: pinhole
distortion_coeffs: [-0.0147, -0.00584, 0.00715, -0.00464]
distortion_model: equidistant
intrinsics: [606.57934, 606.73233, 474.93081, 402.27722]
resolution: [960, 800]
rostopic: /sync/cam0/image_raw
cam1:
T_cam_imu:
[0.9999415499962613, -0.003314180239269491, 0.010291394483555276, -0.09412457355264209,
-0.0029741561536762054, -0.9994548489877669, -0.032880985843089204, -0.016816910884051604,
0.010394757632964142, 0.03284845573511056, -0.9994062877376598, -0.008908185988981819,
0.0, 0.0, 0.0, 1.0]
T_cn_cnm1:
[0.9999843700393054, -0.004977416649937181, -0.0025428275521419763, -0.200059,
0.005065643350062822, 0.9993405242433947, 0.03595604029590122, 0.000634053,
0.002362182447858024, -0.03596835970409878, 0.9993501279159637, -0.000909473,
0.0, 0.0, 0.0, 1.0]
camera_model: pinhole
distortion_coeffs: [-0.01565, -0.00026, -0.00454, 0.00311]
distortion_model: equidistant
intrinsics: [605.7278, 605.75661, 481.98865, 417.83476]
resolution: [960, 800]
rostopic: /sync/cam1/image_raw
T_imu_body:
[1.0000, 0.0000, 0.0000, 0.0000,
0.0000, 1.0000, 0.0000, 0.0000,
0.0000, 0.0000, 1.0000, 0.0000,
0.0000, 0.0000, 0.0000, 1.0000]

View File

@ -0,0 +1,67 @@
/*
* COPYRIGHT AND PERMISSION NOTICE
* Penn Software MSCKF_VIO
* Copyright (C) 2017 The Trustees of the University of Pennsylvania
* All rights reserved.
*/
#ifndef MSCKF_VIO_CAM_STATE_H
#define MSCKF_VIO_CAM_STATE_H
#include <map>
#include <vector>
#include <Eigen/Dense>
#include "imu_state.h"
namespace msckf_vio {
/*
* @brief CAMState Stored camera state in order to
* form measurement model.
*/
struct CAMState {
EIGEN_MAKE_ALIGNED_OPERATOR_NEW
// An unique identifier for the CAM state.
StateIDType id;
// Time when the state is recorded
double time;
// Orientation
// Take a vector from the world frame to the camera frame.
Eigen::Vector4d orientation;
// Position of the camera frame in the world frame.
Eigen::Vector3d position;
// These two variables should have the same physical
// interpretation with `orientation` and `position`.
// There two variables are used to modify the measurement
// Jacobian matrices to make the observability matrix
// have proper null space.
Eigen::Vector4d orientation_null;
Eigen::Vector3d position_null;
// Takes a vector from the cam0 frame to the cam1 frame.
static Eigen::Isometry3d T_cam0_cam1;
CAMState(): id(0), time(0),
orientation(Eigen::Vector4d(0, 0, 0, 1)),
position(Eigen::Vector3d::Zero()),
orientation_null(Eigen::Vector4d(0, 0, 0, 1)),
position_null(Eigen::Vector3d(0, 0, 0)) {}
CAMState(const StateIDType& new_id ): id(new_id), time(0),
orientation(Eigen::Vector4d(0, 0, 0, 1)),
position(Eigen::Vector3d::Zero()),
orientation_null(Eigen::Vector4d(0, 0, 0, 1)),
position_null(Eigen::Vector3d::Zero()) {}
};
typedef std::map<StateIDType, CAMState, std::less<int>,
Eigen::aligned_allocator<
std::pair<const StateIDType, CAMState> > > CamStateServer;
} // namespace msckf_vio
#endif // MSCKF_VIO_CAM_STATE_H

View File

@ -0,0 +1,440 @@
/*
* COPYRIGHT AND PERMISSION NOTICE
* Penn Software MSCKF_VIO
* Copyright (C) 2017 The Trustees of the University of Pennsylvania
* All rights reserved.
*/
#ifndef MSCKF_VIO_FEATURE_H
#define MSCKF_VIO_FEATURE_H
#include <iostream>
#include <map>
#include <vector>
#include <Eigen/Dense>
#include <Eigen/Geometry>
#include <Eigen/StdVector>
#include "math_utils.hpp"
#include "imu_state.h"
#include "cam_state.h"
namespace msckf_vio {
/*
* @brief Feature Salient part of an image. Please refer
* to the Appendix of "A Multi-State Constraint Kalman
* Filter for Vision-aided Inertial Navigation" for how
* the 3d position of a feature is initialized.
*/
struct Feature {
EIGEN_MAKE_ALIGNED_OPERATOR_NEW
typedef long long int FeatureIDType;
/*
* @brief OptimizationConfig Configuration parameters
* for 3d feature position optimization.
*/
struct OptimizationConfig {
double translation_threshold;
double huber_epsilon;
double estimation_precision;
double initial_damping;
int outer_loop_max_iteration;
int inner_loop_max_iteration;
OptimizationConfig():
translation_threshold(0.2),
huber_epsilon(0.01),
estimation_precision(5e-7),
initial_damping(1e-3),
outer_loop_max_iteration(10),
inner_loop_max_iteration(10) {
return;
}
};
// Constructors for the struct.
Feature(): id(0), position(Eigen::Vector3d::Zero()),
is_initialized(false) {}
Feature(const FeatureIDType& new_id): id(new_id),
position(Eigen::Vector3d::Zero()),
is_initialized(false) {}
/*
* @brief cost Compute the cost of the camera observations
* @param T_c0_c1 A rigid body transformation takes
* a vector in c0 frame to ci frame.
* @param x The current estimation.
* @param z The ith measurement of the feature j in ci frame.
* @return e The cost of this observation.
*/
inline void cost(const Eigen::Isometry3d& T_c0_ci,
const Eigen::Vector3d& x, const Eigen::Vector2d& z,
double& e) const;
/*
* @brief jacobian Compute the Jacobian of the camera observation
* @param T_c0_c1 A rigid body transformation takes
* a vector in c0 frame to ci frame.
* @param x The current estimation.
* @param z The actual measurement of the feature in ci frame.
* @return J The computed Jacobian.
* @return r The computed residual.
* @return w Weight induced by huber kernel.
*/
inline void jacobian(const Eigen::Isometry3d& T_c0_ci,
const Eigen::Vector3d& x, const Eigen::Vector2d& z,
Eigen::Matrix<double, 2, 3>& J, Eigen::Vector2d& r,
double& w) const;
/*
* @brief generateInitialGuess Compute the initial guess of
* the feature's 3d position using only two views.
* @param T_c1_c2: A rigid body transformation taking
* a vector from c2 frame to c1 frame.
* @param z1: feature observation in c1 frame.
* @param z2: feature observation in c2 frame.
* @return p: Computed feature position in c1 frame.
*/
inline void generateInitialGuess(
const Eigen::Isometry3d& T_c1_c2, const Eigen::Vector2d& z1,
const Eigen::Vector2d& z2, Eigen::Vector3d& p) const;
/*
* @brief checkMotion Check the input camera poses to ensure
* there is enough translation to triangulate the feature
* positon.
* @param cam_states : input camera poses.
* @return True if the translation between the input camera
* poses is sufficient.
*/
inline bool checkMotion(
const CamStateServer& cam_states) const;
/*
* @brief InitializePosition Intialize the feature position
* based on all current available measurements.
* @param cam_states: A map containing the camera poses with its
* ID as the associated key value.
* @return The computed 3d position is used to set the position
* member variable. Note the resulted position is in world
* frame.
* @return True if the estimated 3d position of the feature
* is valid.
*/
inline bool initializePosition(
const CamStateServer& cam_states);
// An unique identifier for the feature.
// In case of long time running, the variable
// type of id is set to FeatureIDType in order
// to avoid duplication.
FeatureIDType id;
// id for next feature
static FeatureIDType next_id;
// Store the observations of the features in the
// state_id(key)-image_coordinates(value) manner.
std::map<StateIDType, Eigen::Vector4d, std::less<StateIDType>,
Eigen::aligned_allocator<
std::pair<const StateIDType, Eigen::Vector2d> > > observations;
// 3d postion of the feature in the world frame.
Eigen::Vector3d position;
// A indicator to show if the 3d postion of the feature
// has been initialized or not.
bool is_initialized;
// Noise for a normalized feature measurement.
static double observation_noise;
// Optimization configuration for solving the 3d position.
static OptimizationConfig optimization_config;
};
typedef Feature::FeatureIDType FeatureIDType;
typedef std::map<FeatureIDType, Feature, std::less<int>,
Eigen::aligned_allocator<
std::pair<const FeatureIDType, Feature> > > MapServer;
void Feature::cost(const Eigen::Isometry3d& T_c0_ci,
const Eigen::Vector3d& x, const Eigen::Vector2d& z,
double& e) const {
// Compute hi1, hi2, and hi3 as Equation (37).
const double& alpha = x(0);
const double& beta = x(1);
const double& rho = x(2);
Eigen::Vector3d h = T_c0_ci.linear()*
Eigen::Vector3d(alpha, beta, 1.0) + rho*T_c0_ci.translation();
double& h1 = h(0);
double& h2 = h(1);
double& h3 = h(2);
// Predict the feature observation in ci frame.
Eigen::Vector2d z_hat(h1/h3, h2/h3);
// Compute the residual.
e = (z_hat-z).squaredNorm();
return;
}
void Feature::jacobian(const Eigen::Isometry3d& T_c0_ci,
const Eigen::Vector3d& x, const Eigen::Vector2d& z,
Eigen::Matrix<double, 2, 3>& J, Eigen::Vector2d& r,
double& w) const {
// Compute hi1, hi2, and hi3 as Equation (37).
const double& alpha = x(0);
const double& beta = x(1);
const double& rho = x(2);
Eigen::Vector3d h = T_c0_ci.linear()*
Eigen::Vector3d(alpha, beta, 1.0) + rho*T_c0_ci.translation();
double& h1 = h(0);
double& h2 = h(1);
double& h3 = h(2);
// Compute the Jacobian.
Eigen::Matrix3d W;
W.leftCols<2>() = T_c0_ci.linear().leftCols<2>();
W.rightCols<1>() = T_c0_ci.translation();
J.row(0) = 1/h3*W.row(0) - h1/(h3*h3)*W.row(2);
J.row(1) = 1/h3*W.row(1) - h2/(h3*h3)*W.row(2);
// Compute the residual.
Eigen::Vector2d z_hat(h1/h3, h2/h3);
r = z_hat - z;
// Compute the weight based on the residual.
double e = r.norm();
if (e <= optimization_config.huber_epsilon)
w = 1.0;
else
w = optimization_config.huber_epsilon / (2*e);
return;
}
void Feature::generateInitialGuess(
const Eigen::Isometry3d& T_c1_c2, const Eigen::Vector2d& z1,
const Eigen::Vector2d& z2, Eigen::Vector3d& p) const {
// Construct a least square problem to solve the depth.
Eigen::Vector3d m = T_c1_c2.linear() * Eigen::Vector3d(z1(0), z1(1), 1.0);
Eigen::Vector2d A(0.0, 0.0);
A(0) = m(0) - z2(0)*m(2);
A(1) = m(1) - z2(1)*m(2);
Eigen::Vector2d b(0.0, 0.0);
b(0) = z2(0)*T_c1_c2.translation()(2) - T_c1_c2.translation()(0);
b(1) = z2(1)*T_c1_c2.translation()(2) - T_c1_c2.translation()(1);
// Solve for the depth.
double depth = (A.transpose() * A).inverse() * A.transpose() * b;
p(0) = z1(0) * depth;
p(1) = z1(1) * depth;
p(2) = depth;
return;
}
bool Feature::checkMotion(
const CamStateServer& cam_states) const {
const StateIDType& first_cam_id = observations.begin()->first;
const StateIDType& last_cam_id = (--observations.end())->first;
Eigen::Isometry3d first_cam_pose;
first_cam_pose.linear() = quaternionToRotation(
cam_states.find(first_cam_id)->second.orientation).transpose();
first_cam_pose.translation() =
cam_states.find(first_cam_id)->second.position;
Eigen::Isometry3d last_cam_pose;
last_cam_pose.linear() = quaternionToRotation(
cam_states.find(last_cam_id)->second.orientation).transpose();
last_cam_pose.translation() =
cam_states.find(last_cam_id)->second.position;
// Get the direction of the feature when it is first observed.
// This direction is represented in the world frame.
Eigen::Vector3d feature_direction(
observations.begin()->second(0),
observations.begin()->second(1), 1.0);
feature_direction = feature_direction / feature_direction.norm();
feature_direction = first_cam_pose.linear()*feature_direction;
// Compute the translation between the first frame
// and the last frame. We assume the first frame and
// the last frame will provide the largest motion to
// speed up the checking process.
Eigen::Vector3d translation = last_cam_pose.translation() -
first_cam_pose.translation();
double parallel_translation =
translation.transpose()*feature_direction;
Eigen::Vector3d orthogonal_translation = translation -
parallel_translation*feature_direction;
if (orthogonal_translation.norm() >
optimization_config.translation_threshold)
return true;
else return false;
}
bool Feature::initializePosition(
const CamStateServer& cam_states) {
// Organize camera poses and feature observations properly.
std::vector<Eigen::Isometry3d,
Eigen::aligned_allocator<Eigen::Isometry3d> > cam_poses(0);
std::vector<Eigen::Vector2d,
Eigen::aligned_allocator<Eigen::Vector2d> > measurements(0);
for (auto& m : observations) {
// TODO: This should be handled properly. Normally, the
// required camera states should all be available in
// the input cam_states buffer.
auto cam_state_iter = cam_states.find(m.first);
if (cam_state_iter == cam_states.end()) continue;
// Add the measurement.
measurements.push_back(m.second.head<2>());
measurements.push_back(m.second.tail<2>());
// This camera pose will take a vector from this camera frame
// to the world frame.
Eigen::Isometry3d cam0_pose;
cam0_pose.linear() = quaternionToRotation(
cam_state_iter->second.orientation).transpose();
cam0_pose.translation() = cam_state_iter->second.position;
Eigen::Isometry3d cam1_pose;
cam1_pose = cam0_pose * CAMState::T_cam0_cam1.inverse();
cam_poses.push_back(cam0_pose);
cam_poses.push_back(cam1_pose);
}
// All camera poses should be modified such that it takes a
// vector from the first camera frame in the buffer to this
// camera frame.
Eigen::Isometry3d T_c0_w = cam_poses[0];
for (auto& pose : cam_poses)
pose = pose.inverse() * T_c0_w;
// Generate initial guess
Eigen::Vector3d initial_position(0.0, 0.0, 0.0);
generateInitialGuess(cam_poses[cam_poses.size()-1], measurements[0],
measurements[measurements.size()-1], initial_position);
Eigen::Vector3d solution(
initial_position(0)/initial_position(2),
initial_position(1)/initial_position(2),
1.0/initial_position(2));
// Apply Levenberg-Marquart method to solve for the 3d position.
double lambda = optimization_config.initial_damping;
int inner_loop_cntr = 0;
int outer_loop_cntr = 0;
bool is_cost_reduced = false;
double delta_norm = 0;
// Compute the initial cost.
double total_cost = 0.0;
for (int i = 0; i < cam_poses.size(); ++i) {
double this_cost = 0.0;
cost(cam_poses[i], solution, measurements[i], this_cost);
total_cost += this_cost;
}
// Outer loop.
do {
Eigen::Matrix3d A = Eigen::Matrix3d::Zero();
Eigen::Vector3d b = Eigen::Vector3d::Zero();
for (int i = 0; i < cam_poses.size(); ++i) {
Eigen::Matrix<double, 2, 3> J;
Eigen::Vector2d r;
double w;
jacobian(cam_poses[i], solution, measurements[i], J, r, w);
if (w == 1) {
A += J.transpose() * J;
b += J.transpose() * r;
} else {
double w_square = w * w;
A += w_square * J.transpose() * J;
b += w_square * J.transpose() * r;
}
}
// Inner loop.
// Solve for the delta that can reduce the total cost.
do {
Eigen::Matrix3d damper = lambda * Eigen::Matrix3d::Identity();
Eigen::Vector3d delta = (A+damper).ldlt().solve(b);
Eigen::Vector3d new_solution = solution - delta;
delta_norm = delta.norm();
double new_cost = 0.0;
for (int i = 0; i < cam_poses.size(); ++i) {
double this_cost = 0.0;
cost(cam_poses[i], new_solution, measurements[i], this_cost);
new_cost += this_cost;
}
if (new_cost < total_cost) {
is_cost_reduced = true;
solution = new_solution;
total_cost = new_cost;
lambda = lambda/10 > 1e-10 ? lambda/10 : 1e-10;
} else {
is_cost_reduced = false;
lambda = lambda*10 < 1e12 ? lambda*10 : 1e12;
}
} while (inner_loop_cntr++ <
optimization_config.inner_loop_max_iteration && !is_cost_reduced);
inner_loop_cntr = 0;
} while (outer_loop_cntr++ <
optimization_config.outer_loop_max_iteration &&
delta_norm > optimization_config.estimation_precision);
// Covert the feature position from inverse depth
// representation to its 3d coordinate.
Eigen::Vector3d final_position(solution(0)/solution(2),
solution(1)/solution(2), 1.0/solution(2));
// Check if the solution is valid. Make sure the feature
// is in front of every camera frame observing it.
bool is_valid_solution = true;
for (const auto& pose : cam_poses) {
Eigen::Vector3d position =
pose.linear()*final_position + pose.translation();
if (position(2) <= 0) {
is_valid_solution = false;
break;
}
}
// Convert the feature position to the world frame.
position = T_c0_w.linear()*final_position + T_c0_w.translation();
if (is_valid_solution)
is_initialized = true;
return is_valid_solution;
}
} // namespace msckf_vio
#endif // MSCKF_VIO_FEATURE_H

View File

@ -0,0 +1,398 @@
/*
* COPYRIGHT AND PERMISSION NOTICE
* Penn Software MSCKF_VIO
* Copyright (C) 2017 The Trustees of the University of Pennsylvania
* All rights reserved.
*/
#ifndef MSCKF_VIO_IMAGE_PROCESSOR_H
#define MSCKF_VIO_IMAGE_PROCESSOR_H
#include <vector>
#include <map>
#include <boost/shared_ptr.hpp>
#include <opencv2/opencv.hpp>
#include <opencv2/video.hpp>
#include <ros/ros.h>
#include <cv_bridge/cv_bridge.h>
#include <image_transport/image_transport.h>
#include <sensor_msgs/Imu.h>
#include <sensor_msgs/Image.h>
#include <message_filters/subscriber.h>
#include <message_filters/time_synchronizer.h>
namespace msckf_vio {
/*
* @brief ImageProcessor Detects and tracks features
* in image sequences.
*/
class ImageProcessor {
public:
// Constructor
ImageProcessor(ros::NodeHandle& n);
// Disable copy and assign constructors.
ImageProcessor(const ImageProcessor&) = delete;
ImageProcessor operator=(const ImageProcessor&) = delete;
// Destructor
~ImageProcessor();
// Initialize the object.
bool initialize();
typedef boost::shared_ptr<ImageProcessor> Ptr;
typedef boost::shared_ptr<const ImageProcessor> ConstPtr;
private:
/*
* @brief ProcessorConfig Configuration parameters for
* feature detection and tracking.
*/
struct ProcessorConfig {
int grid_row;
int grid_col;
int grid_min_feature_num;
int grid_max_feature_num;
int pyramid_levels;
int patch_size;
int fast_threshold;
int max_iteration;
double track_precision;
double ransac_threshold;
double stereo_threshold;
};
/*
* @brief FeatureIDType An alias for unsigned long long int.
*/
typedef unsigned long long int FeatureIDType;
/*
* @brief FeatureMetaData Contains necessary information
* of a feature for easy access.
*/
struct FeatureMetaData {
FeatureIDType id;
float response;
int lifetime;
cv::Point2f cam0_point;
cv::Point2f cam1_point;
};
/*
* @brief GridFeatures Organize features based on the grid
* they belong to. Note that the key is encoded by the
* grid index.
*/
typedef std::map<int, std::vector<FeatureMetaData> > GridFeatures;
/*
* @brief keyPointCompareByResponse
* Compare two keypoints based on the response.
*/
static bool keyPointCompareByResponse(
const cv::KeyPoint& pt1,
const cv::KeyPoint& pt2) {
// Keypoint with higher response will be at the
// beginning of the vector.
return pt1.response > pt2.response;
}
/*
* @brief featureCompareByResponse
* Compare two features based on the response.
*/
static bool featureCompareByResponse(
const FeatureMetaData& f1,
const FeatureMetaData& f2) {
// Features with higher response will be at the
// beginning of the vector.
return f1.response > f2.response;
}
/*
* @brief featureCompareByLifetime
* Compare two features based on the lifetime.
*/
static bool featureCompareByLifetime(
const FeatureMetaData& f1,
const FeatureMetaData& f2) {
// Features with longer lifetime will be at the
// beginning of the vector.
return f1.lifetime > f2.lifetime;
}
/*
* @brief loadParameters
* Load parameters from the parameter server.
*/
bool loadParameters();
/*
* @brief createRosIO
* Create ros publisher and subscirbers.
*/
bool createRosIO();
/*
* @brief stereoCallback
* Callback function for the stereo images.
* @param cam0_img left image.
* @param cam1_img right image.
*/
void stereoCallback(
const sensor_msgs::ImageConstPtr& cam0_img,
const sensor_msgs::ImageConstPtr& cam1_img);
/*
* @brief imuCallback
* Callback function for the imu message.
* @param msg IMU msg.
*/
void imuCallback(const sensor_msgs::ImuConstPtr& msg);
/*
* @initializeFirstFrame
* Initialize the image processing sequence, which is
* bascially detect new features on the first set of
* stereo images.
*/
void initializeFirstFrame();
/*
* @brief trackFeatures
* Tracker features on the newly received stereo images.
*/
void trackFeatures();
/*
* @addNewFeatures
* Detect new features on the image to ensure that the
* features are uniformly distributed on the image.
*/
void addNewFeatures();
/*
* @brief pruneGridFeatures
* Remove some of the features of a grid in case there are
* too many features inside of that grid, which ensures the
* number of features within each grid is bounded.
*/
void pruneGridFeatures();
/*
* @brief publish
* Publish the features on the current image including
* both the tracked and newly detected ones.
*/
void publish();
/*
* @brief drawFeaturesMono
* Draw tracked and newly detected features on the left
* image only.
*/
void drawFeaturesMono();
/*
* @brief drawFeaturesStereo
* Draw tracked and newly detected features on the
* stereo images.
*/
void drawFeaturesStereo();
/*
* @brief createImagePyramids
* Create image pyramids used for klt tracking.
*/
void createImagePyramids();
/*
* @brief integrateImuData Integrates the IMU gyro readings
* between the two consecutive images, which is used for
* both tracking prediction and 2-point RANSAC.
* @return cam0_R_p_c: a rotation matrix which takes a vector
* from previous cam0 frame to current cam0 frame.
* @return cam1_R_p_c: a rotation matrix which takes a vector
* from previous cam1 frame to current cam1 frame.
*/
void integrateImuData(cv::Matx33f& cam0_R_p_c,
cv::Matx33f& cam1_R_p_c);
/*
* @brief predictFeatureTracking Compensates the rotation
* between consecutive camera frames so that feature
* tracking would be more robust and fast.
* @param input_pts: features in the previous image to be tracked.
* @param R_p_c: a rotation matrix takes a vector in the previous
* camera frame to the current camera frame.
* @param intrinsics: intrinsic matrix of the camera.
* @return compensated_pts: predicted locations of the features
* in the current image based on the provided rotation.
*
* Note that the input and output points are of pixel coordinates.
*/
void predictFeatureTracking(
const std::vector<cv::Point2f>& input_pts,
const cv::Matx33f& R_p_c,
const cv::Vec4d& intrinsics,
std::vector<cv::Point2f>& compenstated_pts);
/*
* @brief twoPointRansac Applies two point ransac algorithm
* to mark the inliers in the input set.
* @param pts1: first set of points.
* @param pts2: second set of points.
* @param R_p_c: a rotation matrix takes a vector in the previous
* camera frame to the current camera frame.
* @param intrinsics: intrinsics of the camera.
* @param distortion_model: distortion model of the camera.
* @param distortion_coeffs: distortion coefficients.
* @param inlier_error: acceptable error to be considered as an inlier.
* @param success_probability: the required probability of success.
* @return inlier_flag: 1 for inliers and 0 for outliers.
*/
void twoPointRansac(
const std::vector<cv::Point2f>& pts1,
const std::vector<cv::Point2f>& pts2,
const cv::Matx33f& R_p_c,
const cv::Vec4d& intrinsics,
const std::string& distortion_model,
const cv::Vec4d& distortion_coeffs,
const double& inlier_error,
const double& success_probability,
std::vector<int>& inlier_markers);
void undistortPoints(
const std::vector<cv::Point2f>& pts_in,
const cv::Vec4d& intrinsics,
const std::string& distortion_model,
const cv::Vec4d& distortion_coeffs,
std::vector<cv::Point2f>& pts_out,
const cv::Matx33d &rectification_matrix = cv::Matx33d::eye(),
const cv::Vec4d &new_intrinsics = cv::Vec4d(1,1,0,0));
void rescalePoints(
std::vector<cv::Point2f>& pts1,
std::vector<cv::Point2f>& pts2,
float& scaling_factor);
std::vector<cv::Point2f> distortPoints(
const std::vector<cv::Point2f>& pts_in,
const cv::Vec4d& intrinsics,
const std::string& distortion_model,
const cv::Vec4d& distortion_coeffs);
/*
* @brief stereoMatch Matches features with stereo image pairs.
* @param cam0_points: points in the primary image.
* @return cam1_points: points in the secondary image.
* @return inlier_markers: 1 if the match is valid, 0 otherwise.
*/
void stereoMatch(
const std::vector<cv::Point2f>& cam0_points,
std::vector<cv::Point2f>& cam1_points,
std::vector<unsigned char>& inlier_markers);
/*
* @brief removeUnmarkedElements Remove the unmarked elements
* within a vector.
* @param raw_vec: vector with outliers.
* @param markers: 0 will represent a outlier, 1 will be an inlier.
* @return refined_vec: a vector without outliers.
*
* Note that the order of the inliers in the raw_vec is perserved
* in the refined_vec.
*/
template <typename T>
void removeUnmarkedElements(
const std::vector<T>& raw_vec,
const std::vector<unsigned char>& markers,
std::vector<T>& refined_vec) {
if (raw_vec.size() != markers.size()) {
ROS_WARN("The input size of raw_vec(%lu) and markers(%lu) does not match...",
raw_vec.size(), markers.size());
}
for (int i = 0; i < markers.size(); ++i) {
if (markers[i] == 0) continue;
refined_vec.push_back(raw_vec[i]);
}
return;
}
// Indicate if this is the first image message.
bool is_first_img;
// ID for the next new feature.
FeatureIDType next_feature_id;
// Feature detector
ProcessorConfig processor_config;
cv::Ptr<cv::Feature2D> detector_ptr;
// IMU message buffer.
std::vector<sensor_msgs::Imu> imu_msg_buffer;
// Camera calibration parameters
std::string cam0_distortion_model;
cv::Vec2i cam0_resolution;
cv::Vec4d cam0_intrinsics;
cv::Vec4d cam0_distortion_coeffs;
std::string cam1_distortion_model;
cv::Vec2i cam1_resolution;
cv::Vec4d cam1_intrinsics;
cv::Vec4d cam1_distortion_coeffs;
// Take a vector from cam0 frame to the IMU frame.
cv::Matx33d R_cam0_imu;
cv::Vec3d t_cam0_imu;
// Take a vector from cam1 frame to the IMU frame.
cv::Matx33d R_cam1_imu;
cv::Vec3d t_cam1_imu;
// Previous and current images
cv_bridge::CvImageConstPtr cam0_prev_img_ptr;
cv_bridge::CvImageConstPtr cam0_curr_img_ptr;
cv_bridge::CvImageConstPtr cam1_curr_img_ptr;
// Pyramids for previous and current image
std::vector<cv::Mat> prev_cam0_pyramid_;
std::vector<cv::Mat> curr_cam0_pyramid_;
std::vector<cv::Mat> curr_cam1_pyramid_;
// Features in the previous and current image.
boost::shared_ptr<GridFeatures> prev_features_ptr;
boost::shared_ptr<GridFeatures> curr_features_ptr;
// Number of features after each outlier removal step.
int before_tracking;
int after_tracking;
int after_matching;
int after_ransac;
// Ros node handle
ros::NodeHandle nh;
// Subscribers and publishers.
message_filters::Subscriber<
sensor_msgs::Image> cam0_img_sub;
message_filters::Subscriber<
sensor_msgs::Image> cam1_img_sub;
message_filters::TimeSynchronizer<
sensor_msgs::Image, sensor_msgs::Image> stereo_sub;
ros::Subscriber imu_sub;
ros::Publisher feature_pub;
ros::Publisher tracking_info_pub;
image_transport::Publisher debug_stereo_pub;
// Debugging
std::map<FeatureIDType, int> feature_lifetime;
void updateFeatureLifetime();
void featureLifetimeStatistics();
};
typedef ImageProcessor::Ptr ImageProcessorPtr;
typedef ImageProcessor::ConstPtr ImageProcessorConstPtr;
} // end namespace msckf_vio
#endif

View File

@ -0,0 +1,28 @@
/*
* COPYRIGHT AND PERMISSION NOTICE
* Penn Software MSCKF_VIO
* Copyright (C) 2017 The Trustees of the University of Pennsylvania
* All rights reserved.
*/
#ifndef IMAGE_PROCESSOR_NODELET_H
#define IMAGE_PROCESSOR_NODELET_H
#include <nodelet/nodelet.h>
#include <pluginlib/class_list_macros.h>
#include <msckf_vio/image_processor.h>
namespace msckf_vio {
class ImageProcessorNodelet : public nodelet::Nodelet {
public:
ImageProcessorNodelet() { return; }
~ImageProcessorNodelet() { return; }
private:
virtual void onInit();
ImageProcessorPtr img_processor_ptr;
};
} // end namespace msckf_vio
#endif

View File

@ -0,0 +1,110 @@
/*
* COPYRIGHT AND PERMISSION NOTICE
* Penn Software MSCKF_VIO
* Copyright (C) 2017 The Trustees of the University of Pennsylvania
* All rights reserved.
*/
#ifndef MSCKF_VIO_IMU_STATE_H
#define MSCKF_VIO_IMU_STATE_H
#include <map>
#include <vector>
#include <Eigen/Dense>
#include <Eigen/Geometry>
#define GRAVITY_ACCELERATION 9.81
namespace msckf_vio {
/*
* @brief IMUState State for IMU
*/
struct IMUState {
EIGEN_MAKE_ALIGNED_OPERATOR_NEW
typedef long long int StateIDType;
// An unique identifier for the IMU state.
StateIDType id;
// id for next IMU state
static StateIDType next_id;
// Time when the state is recorded
double time;
// Orientation
// Take a vector from the world frame to
// the IMU (body) frame.
Eigen::Vector4d orientation;
// Position of the IMU (body) frame
// in the world frame.
Eigen::Vector3d position;
// Velocity of the IMU (body) frame
// in the world frame.
Eigen::Vector3d velocity;
// Bias for measured angular velocity
// and acceleration.
Eigen::Vector3d gyro_bias;
Eigen::Vector3d acc_bias;
// Transformation between the IMU and the
// left camera (cam0)
Eigen::Matrix3d R_imu_cam0;
Eigen::Vector3d t_cam0_imu;
// These three variables should have the same physical
// interpretation with `orientation`, `position`, and
// `velocity`. There three variables are used to modify
// the transition matrices to make the observability matrix
// have proper null space.
Eigen::Vector4d orientation_null;
Eigen::Vector3d position_null;
Eigen::Vector3d velocity_null;
// Process noise
static double gyro_noise;
static double acc_noise;
static double gyro_bias_noise;
static double acc_bias_noise;
// Gravity vector in the world frame
static Eigen::Vector3d gravity;
// Transformation offset from the IMU frame to
// the body frame. The transformation takes a
// vector from the IMU frame to the body frame.
// The z axis of the body frame should point upwards.
// Normally, this transform should be identity.
static Eigen::Isometry3d T_imu_body;
IMUState(): id(0), time(0),
orientation(Eigen::Vector4d(0, 0, 0, 1)),
position(Eigen::Vector3d::Zero()),
velocity(Eigen::Vector3d::Zero()),
gyro_bias(Eigen::Vector3d::Zero()),
acc_bias(Eigen::Vector3d::Zero()),
orientation_null(Eigen::Vector4d(0, 0, 0, 1)),
position_null(Eigen::Vector3d::Zero()),
velocity_null(Eigen::Vector3d::Zero()) {}
IMUState(const StateIDType& new_id): id(new_id), time(0),
orientation(Eigen::Vector4d(0, 0, 0, 1)),
position(Eigen::Vector3d::Zero()),
velocity(Eigen::Vector3d::Zero()),
gyro_bias(Eigen::Vector3d::Zero()),
acc_bias(Eigen::Vector3d::Zero()),
orientation_null(Eigen::Vector4d(0, 0, 0, 1)),
position_null(Eigen::Vector3d::Zero()),
velocity_null(Eigen::Vector3d::Zero()) {}
};
typedef IMUState::StateIDType StateIDType;
} // namespace msckf_vio
#endif // MSCKF_VIO_IMU_STATE_H

View File

@ -0,0 +1,163 @@
/*
* COPYRIGHT AND PERMISSION NOTICE
* Penn Software MSCKF_VIO
* Copyright (C) 2017 The Trustees of the University of Pennsylvania
* All rights reserved.
*/
#ifndef MSCKF_VIO_MATH_UTILS_HPP
#define MSCKF_VIO_MATH_UTILS_HPP
#include <cmath>
#include <Eigen/Dense>
namespace msckf_vio {
/*
* @brief Create a skew-symmetric matrix from a 3-element vector.
* @note Performs the operation:
* w -> [ 0 -w3 w2]
* [ w3 0 -w1]
* [-w2 w1 0]
*/
inline Eigen::Matrix3d skewSymmetric(const Eigen::Vector3d& w) {
Eigen::Matrix3d w_hat;
w_hat(0, 0) = 0;
w_hat(0, 1) = -w(2);
w_hat(0, 2) = w(1);
w_hat(1, 0) = w(2);
w_hat(1, 1) = 0;
w_hat(1, 2) = -w(0);
w_hat(2, 0) = -w(1);
w_hat(2, 1) = w(0);
w_hat(2, 2) = 0;
return w_hat;
}
/*
* @brief Normalize the given quaternion to unit quaternion.
*/
inline void quaternionNormalize(Eigen::Vector4d& q) {
double norm = q.norm();
q = q / norm;
return;
}
/*
* @brief Perform q1 * q2
*/
inline Eigen::Vector4d quaternionMultiplication(
const Eigen::Vector4d& q1,
const Eigen::Vector4d& q2) {
Eigen::Matrix4d L;
L(0, 0) = q1(3); L(0, 1) = q1(2); L(0, 2) = -q1(1); L(0, 3) = q1(0);
L(1, 0) = -q1(2); L(1, 1) = q1(3); L(1, 2) = q1(0); L(1, 3) = q1(1);
L(2, 0) = q1(1); L(2, 1) = -q1(0); L(2, 2) = q1(3); L(2, 3) = q1(2);
L(3, 0) = -q1(0); L(3, 1) = -q1(1); L(3, 2) = -q1(2); L(3, 3) = q1(3);
Eigen::Vector4d q = L * q2;
quaternionNormalize(q);
return q;
}
/*
* @brief Convert the vector part of a quaternion to a
* full quaternion.
* @note This function is useful to convert delta quaternion
* which is usually a 3x1 vector to a full quaternion.
* For more details, check Equation (238) and (239) in
* "Indirect Kalman Filter for 3D Attitude Estimation:
* A Tutorial for quaternion Algebra".
*/
inline Eigen::Vector4d smallAngleQuaternion(
const Eigen::Vector3d& dtheta) {
Eigen::Vector3d dq = dtheta / 2.0;
Eigen::Vector4d q;
double dq_square_norm = dq.squaredNorm();
if (dq_square_norm <= 1) {
q.head<3>() = dq;
q(3) = std::sqrt(1-dq_square_norm);
} else {
q.head<3>() = dq;
q(3) = 1;
q = q / std::sqrt(1+dq_square_norm);
}
return q;
}
/*
* @brief Convert a quaternion to the corresponding rotation matrix
* @note Pay attention to the convention used. The function follows the
* conversion in "Indirect Kalman Filter for 3D Attitude Estimation:
* A Tutorial for Quaternion Algebra", Equation (78).
*
* The input quaternion should be in the form
* [q1, q2, q3, q4(scalar)]^T
*/
inline Eigen::Matrix3d quaternionToRotation(
const Eigen::Vector4d& q) {
const Eigen::Vector3d& q_vec = q.block(0, 0, 3, 1);
const double& q4 = q(3);
Eigen::Matrix3d R =
(2*q4*q4-1)*Eigen::Matrix3d::Identity() -
2*q4*skewSymmetric(q_vec) +
2*q_vec*q_vec.transpose();
//TODO: Is it necessary to use the approximation equation
// (Equation (87)) when the rotation angle is small?
return R;
}
/*
* @brief Convert a rotation matrix to a quaternion.
* @note Pay attention to the convention used. The function follows the
* conversion in "Indirect Kalman Filter for 3D Attitude Estimation:
* A Tutorial for Quaternion Algebra", Equation (78).
*
* The input quaternion should be in the form
* [q1, q2, q3, q4(scalar)]^T
*/
inline Eigen::Vector4d rotationToQuaternion(
const Eigen::Matrix3d& R) {
Eigen::Vector4d score;
score(0) = R(0, 0);
score(1) = R(1, 1);
score(2) = R(2, 2);
score(3) = R.trace();
int max_row = 0, max_col = 0;
score.maxCoeff(&max_row, &max_col);
Eigen::Vector4d q = Eigen::Vector4d::Zero();
if (max_row == 0) {
q(0) = std::sqrt(1+2*R(0, 0)-R.trace()) / 2.0;
q(1) = (R(0, 1)+R(1, 0)) / (4*q(0));
q(2) = (R(0, 2)+R(2, 0)) / (4*q(0));
q(3) = (R(1, 2)-R(2, 1)) / (4*q(0));
} else if (max_row == 1) {
q(1) = std::sqrt(1+2*R(1, 1)-R.trace()) / 2.0;
q(0) = (R(0, 1)+R(1, 0)) / (4*q(1));
q(2) = (R(1, 2)+R(2, 1)) / (4*q(1));
q(3) = (R(2, 0)-R(0, 2)) / (4*q(1));
} else if (max_row == 2) {
q(2) = std::sqrt(1+2*R(2, 2)-R.trace()) / 2.0;
q(0) = (R(0, 2)+R(2, 0)) / (4*q(2));
q(1) = (R(1, 2)+R(2, 1)) / (4*q(2));
q(3) = (R(0, 1)-R(1, 0)) / (4*q(2));
} else {
q(3) = std::sqrt(1+R.trace()) / 2.0;
q(0) = (R(1, 2)-R(2, 1)) / (4*q(3));
q(1) = (R(2, 0)-R(0, 2)) / (4*q(3));
q(2) = (R(0, 1)-R(1, 0)) / (4*q(3));
}
if (q(3) < 0) q = -q;
quaternionNormalize(q);
return q;
}
} // end namespace msckf_vio
#endif // MSCKF_VIO_MATH_UTILS_HPP

View File

@ -0,0 +1,242 @@
/*
* COPYRIGHT AND PERMISSION NOTICE
* Penn Software MSCKF_VIO
* Copyright (C) 2017 The Trustees of the University of Pennsylvania
* All rights reserved.
*/
#ifndef MSCKF_VIO_H
#define MSCKF_VIO_H
#include <map>
#include <set>
#include <vector>
#include <string>
#include <Eigen/Dense>
#include <Eigen/Geometry>
#include <boost/shared_ptr.hpp>
#include <ros/ros.h>
#include <sensor_msgs/Imu.h>
#include <nav_msgs/Odometry.h>
#include <tf/transform_broadcaster.h>
#include <std_srvs/Trigger.h>
#include "imu_state.h"
#include "cam_state.h"
#include "feature.hpp"
#include <msckf_vio/CameraMeasurement.h>
namespace msckf_vio {
/*
* @brief MsckfVio Implements the algorithm in
* Anatasios I. Mourikis, and Stergios I. Roumeliotis,
* "A Multi-State Constraint Kalman Filter for Vision-aided
* Inertial Navigation",
* http://www.ee.ucr.edu/~mourikis/tech_reports/TR_MSCKF.pdf
*/
class MsckfVio {
public:
EIGEN_MAKE_ALIGNED_OPERATOR_NEW
// Constructor
MsckfVio(ros::NodeHandle& pnh);
// Disable copy and assign constructor
MsckfVio(const MsckfVio&) = delete;
MsckfVio operator=(const MsckfVio&) = delete;
// Destructor
~MsckfVio() {}
/*
* @brief initialize Initialize the VIO.
*/
bool initialize();
/*
* @brief reset Resets the VIO to initial status.
*/
void reset();
typedef boost::shared_ptr<MsckfVio> Ptr;
typedef boost::shared_ptr<const MsckfVio> ConstPtr;
private:
/*
* @brief StateServer Store one IMU states and several
* camera states for constructing measurement
* model.
*/
struct StateServer {
IMUState imu_state;
CamStateServer cam_states;
// State covariance matrix
Eigen::MatrixXd state_cov;
Eigen::Matrix<double, 12, 12> continuous_noise_cov;
};
/*
* @brief loadParameters
* Load parameters from the parameter server.
*/
bool loadParameters();
/*
* @brief createRosIO
* Create ros publisher and subscirbers.
*/
bool createRosIO();
/*
* @brief imuCallback
* Callback function for the imu message.
* @param msg IMU msg.
*/
void imuCallback(const sensor_msgs::ImuConstPtr& msg);
/*
* @brief featureCallback
* Callback function for feature measurements.
* @param msg Stereo feature measurements.
*/
void featureCallback(const CameraMeasurementConstPtr& msg);
/*
* @brief publish Publish the results of VIO.
* @param time The time stamp of output msgs.
*/
void publish(const ros::Time& time);
/*
* @brief initializegravityAndBias
* Initialize the IMU bias and initial orientation
* based on the first few IMU readings.
*/
void initializeGravityAndBias();
/*
* @biref resetCallback
* Callback function for the reset service.
* Note that this is NOT anytime-reset. This function should
* only be called before the sensor suite starts moving.
* e.g. while the robot is still on the ground.
*/
bool resetCallback(std_srvs::Trigger::Request& req,
std_srvs::Trigger::Response& res);
// Filter related functions
// Propogate the state
void batchImuProcessing(
const double& time_bound);
void processModel(const double& time,
const Eigen::Vector3d& m_gyro,
const Eigen::Vector3d& m_acc);
void predictNewState(const double& dt,
const Eigen::Vector3d& gyro,
const Eigen::Vector3d& acc);
// Measurement update
void stateAugmentation(const double& time);
void addFeatureObservations(const CameraMeasurementConstPtr& msg);
// This function is used to compute the measurement Jacobian
// for a single feature observed at a single camera frame.
void measurementJacobian(const StateIDType& cam_state_id,
const FeatureIDType& feature_id,
Eigen::Matrix<double, 4, 6>& H_x,
Eigen::Matrix<double, 4, 3>& H_f,
Eigen::Vector4d& r);
// This function computes the Jacobian of all measurements viewed
// in the given camera states of this feature.
void featureJacobian(const FeatureIDType& feature_id,
const std::vector<StateIDType>& cam_state_ids,
Eigen::MatrixXd& H_x, Eigen::VectorXd& r);
void measurementUpdate(const Eigen::MatrixXd& H,
const Eigen::VectorXd& r);
bool gatingTest(const Eigen::MatrixXd& H,
const Eigen::VectorXd&r, const int& dof);
void removeLostFeatures();
void findRedundantCamStates(
std::vector<StateIDType>& rm_cam_state_ids);
void pruneCamStateBuffer();
// Reset the system online if the uncertainty is too large.
void onlineReset();
// Chi squared test table.
static std::map<int, double> chi_squared_test_table;
// State vector
StateServer state_server;
// Maximum number of camera states
int max_cam_state_size;
// Features used
MapServer map_server;
// IMU data buffer
// This is buffer is used to handle the unsynchronization or
// transfer delay between IMU and Image messages.
std::vector<sensor_msgs::Imu> imu_msg_buffer;
// Indicate if the gravity vector is set.
bool is_gravity_set;
// Indicate if the received image is the first one. The
// system will start after receiving the first image.
bool is_first_img;
// The position uncertainty threshold is used to determine
// when to reset the system online. Otherwise, the ever-
// increaseing uncertainty will make the estimation unstable.
// Note this online reset will be some dead-reckoning.
// Set this threshold to nonpositive to disable online reset.
double position_std_threshold;
// Tracking rate
double tracking_rate;
// Threshold for determine keyframes
double translation_threshold;
double rotation_threshold;
double tracking_rate_threshold;
// Ros node handle
ros::NodeHandle nh;
// Subscribers and publishers
ros::Subscriber imu_sub;
ros::Subscriber feature_sub;
ros::Publisher odom_pub;
ros::Publisher feature_pub;
tf::TransformBroadcaster tf_pub;
ros::ServiceServer reset_srv;
// Frame id
std::string fixed_frame_id;
std::string child_frame_id;
// Whether to publish tf or not.
bool publish_tf;
// Framte rate of the stereo images. This variable is
// only used to determine the timing threshold of
// each iteration of the filter.
double frame_rate;
// Debugging variables and functions
void mocapOdomCallback(
const nav_msgs::OdometryConstPtr& msg);
ros::Subscriber mocap_odom_sub;
ros::Publisher mocap_odom_pub;
geometry_msgs::TransformStamped raw_mocap_odom_msg;
Eigen::Isometry3d mocap_initial_frame;
};
typedef MsckfVio::Ptr MsckfVioPtr;
typedef MsckfVio::ConstPtr MsckfVioConstPtr;
} // namespace msckf_vio
#endif

View File

@ -0,0 +1,28 @@
/*
* COPYRIGHT AND PERMISSION NOTICE
* Penn Software MSCKF_VIO
* Copyright (C) 2017 The Trustees of the University of Pennsylvania
* All rights reserved.
*/
#ifndef MSCKF_VIO_NODELET_H
#define MSCKF_VIO_NODELET_H
#include <nodelet/nodelet.h>
#include <pluginlib/class_list_macros.h>
#include <msckf_vio/msckf_vio.h>
namespace msckf_vio {
class MsckfVioNodelet : public nodelet::Nodelet {
public:
MsckfVioNodelet() { return; }
~MsckfVioNodelet() { return; }
private:
virtual void onInit();
MsckfVioPtr msckf_vio_ptr;
};
} // end namespace msckf_vio
#endif

View File

@ -0,0 +1,33 @@
<launch>
<arg name="robot" default="firefly_sbx"/>
<arg name="calibration_file"
default="$(find msckf_vio)/config/camchain-imucam-euroc.yaml"/>
<!-- Image Processor Nodelet -->
<group ns="$(arg robot)">
<node pkg="nodelet" type="nodelet" name="image_processor"
args="standalone msckf_vio/ImageProcessorNodelet"
output="screen">
<rosparam command="load" file="$(arg calibration_file)"/>
<param name="grid_row" value="4"/>
<param name="grid_col" value="5"/>
<param name="grid_min_feature_num" value="3"/>
<param name="grid_max_feature_num" value="4"/>
<param name="pyramid_levels" value="3"/>
<param name="patch_size" value="15"/>
<param name="fast_threshold" value="10"/>
<param name="max_iteration" value="30"/>
<param name="track_precision" value="0.01"/>
<param name="ransac_threshold" value="3"/>
<param name="stereo_threshold" value="5"/>
<remap from="~imu" to="/imu0"/>
<remap from="~cam0_image" to="/cam0/image_raw"/>
<remap from="~cam1_image" to="/cam1/image_raw"/>
</node>
</group>
</launch>

View File

@ -0,0 +1,33 @@
<launch>
<arg name="robot" default="fla"/>
<arg name="calibration_file"
default="$(find msckf_vio)/config/camchain-imucam-fla.yaml"/>
<!-- Image Processor Nodelet -->
<group ns="$(arg robot)">
<node pkg="nodelet" type="nodelet" name="image_processor"
args="standalone msckf_vio/ImageProcessorNodelet"
output="screen">
<rosparam command="load" file="$(arg calibration_file)"/>
<param name="grid_row" value="4"/>
<param name="grid_col" value="5"/>
<param name="grid_min_feature_num" value="2"/>
<param name="grid_max_feature_num" value="3"/>
<param name="pyramid_levels" value="3"/>
<param name="patch_size" value="15"/>
<param name="fast_threshold" value="10"/>
<param name="max_iteration" value="30"/>
<param name="track_precision" value="0.01"/>
<param name="ransac_threshold" value="3"/>
<param name="stereo_threshold" value="5"/>
<remap from="~imu" to="sync/imu/imu"/>
<remap from="~cam0_image" to="sync/cam0/image_raw"/>
<remap from="~cam1_image" to="sync/cam1/image_raw"/>
</node>
</group>
</launch>

View File

@ -0,0 +1,61 @@
<launch>
<arg name="robot" default="firefly_sbx"/>
<arg name="fixed_frame_id" default="world"/>
<arg name="calibration_file"
default="$(find msckf_vio)/config/camchain-imucam-euroc.yaml"/>
<!-- Image Processor Nodelet -->
<include file="$(find msckf_vio)/launch/image_processor_euroc.launch">
<arg name="robot" value="$(arg robot)"/>
<arg name="calibration_file" value="$(arg calibration_file)"/>
</include>
<!-- Msckf Vio Nodelet -->
<group ns="$(arg robot)">
<node pkg="nodelet" type="nodelet" name="vio"
args='standalone msckf_vio/MsckfVioNodelet'
output="screen">
<!-- Calibration parameters -->
<rosparam command="load" file="$(arg calibration_file)"/>
<param name="publish_tf" value="true"/>
<param name="frame_rate" value="20"/>
<param name="fixed_frame_id" value="$(arg fixed_frame_id)"/>
<param name="child_frame_id" value="odom"/>
<param name="max_cam_state_size" value="20"/>
<param name="position_std_threshold" value="8.0"/>
<param name="rotation_threshold" value="0.2618"/>
<param name="translation_threshold" value="0.4"/>
<param name="tracking_rate_threshold" value="0.5"/>
<!-- Feature optimization config -->
<param name="feature/config/translation_threshold" value="-1.0"/>
<!-- These values should be standard deviation -->
<param name="noise/gyro" value="0.005"/>
<param name="noise/acc" value="0.05"/>
<param name="noise/gyro_bias" value="0.001"/>
<param name="noise/acc_bias" value="0.01"/>
<param name="noise/feature" value="0.035"/>
<param name="initial_state/velocity/x" value="0.0"/>
<param name="initial_state/velocity/y" value="0.0"/>
<param name="initial_state/velocity/z" value="0.0"/>
<!-- These values should be covariance -->
<param name="initial_covariance/velocity" value="0.25"/>
<param name="initial_covariance/gyro_bias" value="0.01"/>
<param name="initial_covariance/acc_bias" value="0.01"/>
<param name="initial_covariance/extrinsic_rotation_cov" value="3.0462e-4"/>
<param name="initial_covariance/extrinsic_translation_cov" value="2.5e-5"/>
<remap from="~imu" to="/imu0"/>
<remap from="~features" to="image_processor/features"/>
</node>
</group>
</launch>

View File

@ -0,0 +1,61 @@
<launch>
<arg name="robot" default="fla"/>
<arg name="fixed_frame_id" default="vision"/>
<arg name="calibration_file"
default="$(find msckf_vio)/config/camchain-imucam-fla.yaml"/>
<!-- Image Processor Nodelet -->
<include file="$(find msckf_vio)/launch/image_processor_fla.launch">
<arg name="robot" value="$(arg robot)"/>
<arg name="calibration_file" value="$(arg calibration_file)"/>
</include>
<!-- Msckf Vio Nodelet -->
<group ns="$(arg robot)">
<node pkg="nodelet" type="nodelet" name="vio"
args='standalone msckf_vio/MsckfVioNodelet'
output="screen">
<!-- Calibration parameters -->
<rosparam command="load" file="$(arg calibration_file)"/>
<param name="publish_tf" value="true"/>
<param name="frame_rate" value="40"/>
<param name="fixed_frame_id" value="$(arg fixed_frame_id)"/>
<param name="child_frame_id" value="odom"/>
<param name="max_cam_state_size" value="20"/>
<param name="position_std_threshold" value="8.0"/>
<param name="rotation_threshold" value="0.2618"/>
<param name="translation_threshold" value="0.4"/>
<param name="tracking_rate_threshold" value="0.5"/>
<!-- Feature optimization config -->
<param name="feature/config/translation_threshold" value="-1.0"/>
<!-- These values should be standard deviation -->
<param name="noise/gyro" value="0.01"/>
<param name="noise/acc" value="0.1"/>
<param name="noise/gyro_bias" value="0.005"/>
<param name="noise/acc_bias" value="0.05"/>
<param name="noise/feature" value="0.03"/>
<param name="initial_state/velocity/x" value="0.0"/>
<param name="initial_state/velocity/y" value="0.0"/>
<param name="initial_state/velocity/z" value="0.0"/>
<!-- These values should be covariance -->
<param name="initial_covariance/velocity" value="0.25"/>
<param name="initial_covariance/gyro_bias" value="0.0001"/>
<param name="initial_covariance/acc_bias" value="0.01"/>
<param name="initial_covariance/extrinsic_rotation_cov" value="3.0462e-4"/>
<param name="initial_covariance/extrinsic_translation_cov" value="2.5e-5"/>
<remap from="~imu" to="sync/imu/imu"/>
<remap from="~features" to="image_processor/features"/>
</node>
</group>
</launch>

7
launch/reset.launch Normal file
View File

@ -0,0 +1,7 @@
<launch>
<arg name="robot" default="fla1"/>
<group ns="$(arg robot)">
<node pkg="rosservice" type="rosservice" name="reset_vio"
args="call --wait vio/reset"/>
</group>
</launch>

View File

@ -0,0 +1,4 @@
std_msgs/Header header
# All features on the current image,
# including tracked ones and newly detected ones.
FeatureMeasurement[] features

View File

@ -0,0 +1,6 @@
uint64 id
# Normalized feature coordinates (with identity intrinsic matrix)
float64 u0 # horizontal coordinate in cam0
float64 v0 # vertical coordinate in cam0
float64 u1 # horizontal coordinate in cam0
float64 v1 # vertical coordinate in cam0

7
msg/TrackingInfo.msg Normal file
View File

@ -0,0 +1,7 @@
std_msgs/Header header
# Number of features after each outlier removal step.
int16 before_tracking
int16 after_tracking
int16 after_matching
int16 after_ransac

20
nodelets.xml Normal file
View File

@ -0,0 +1,20 @@
<library path="lib/libmsckf_vio_nodelet">
<class name="msckf_vio/MsckfVioNodelet"
type="msckf_vio::MsckfVioNodelet"
base_class_type="nodelet::Nodelet">
<description>
Multi-State contraint Kalman filter for vision-
aided inertial navigation with observability constain.
</description>
</class>
</library>
<library path="lib/libimage_processor_nodelet">
<class name="msckf_vio/ImageProcessorNodelet"
type="msckf_vio::ImageProcessorNodelet"
base_class_type="nodelet::Nodelet">
<description>
Detect and track features in image sequence.
</description>
</class>
</library>

43
package.xml Normal file
View File

@ -0,0 +1,43 @@
<?xml version="1.0"?>
<package format="2">
<name>msckf_vio</name>
<version>0.0.1</version>
<description>Multi-State Constraint Kalman Filter for Vision-aided Inertial Navigation</description>
<maintainer email="sunke.polyu@gmail.com">Ke Sun</maintainer>
<license>Penn Software License</license>
<author email="sunke.polyu@gmail.com">Ke Sun</author>
<author email="kartikmohta@gmail.com">Kartik Mohta</author>
<buildtool_depend>catkin</buildtool_depend>
<depend>roscpp</depend>
<depend>std_msgs</depend>
<depend>tf</depend>
<depend>nav_msgs</depend>
<depend>sensor_msgs</depend>
<depend>geometry_msgs</depend>
<depend>eigen_conversions</depend>
<depend>tf_conversions</depend>
<depend>random_numbers</depend>
<depend>message_generation</depend>
<depend>message_runtime</depend>
<depend>nodelet</depend>
<depend>image_transport</depend>
<depend>cv_bridge</depend>
<depend>message_filters</depend>
<depend>pcl_conversions</depend>
<depend>pcl_ros</depend>
<depend>std_srvs</depend>
<depend>libpcl-all-dev</depend>
<depend>libpcl-all</depend>
<depend>suitesparse</depend>
<export>
<nodelet plugin="${prefix}/nodelets.xml"/>
</export>
</package>

319
rviz/rviz_euroc_config.rviz Normal file
View File

@ -0,0 +1,319 @@
Panels:
- Class: rviz/Displays
Help Height: 78
Name: Displays
Property Tree Widget:
Expanded:
- /TF1/Frames1
- /VIO1/Shape1
- /Ground Truth1
- /Ground Truth1/Shape1
Splitter Ratio: 0.5
Tree Height: 843
- Class: rviz/Selection
Name: Selection
- Class: rviz/Tool Properties
Expanded:
- /2D Pose Estimate1
- /2D Nav Goal1
- /Publish Point1
Name: Tool Properties
Splitter Ratio: 0.588679016
- Class: rviz/Views
Expanded:
- /Current View1
Name: Views
Splitter Ratio: 0.5
- Class: rviz/Time
Experimental: false
Name: Time
SyncMode: 0
SyncSource: VIO Points
Visualization Manager:
Class: ""
Displays:
- Alpha: 0.100000001
Cell Size: 1
Class: rviz/Grid
Color: 160; 160; 164
Enabled: true
Line Style:
Line Width: 0.0299999993
Value: Lines
Name: 1m Grid
Normal Cell Count: 0
Offset:
X: 0
Y: 0
Z: 0
Plane: XY
Plane Cell Count: 20
Reference Frame: <Fixed Frame>
Value: true
- Alpha: 0.699999988
Cell Size: 5
Class: rviz/Grid
Color: 160; 160; 164
Enabled: true
Line Style:
Line Width: 0.0299999993
Value: Lines
Name: 5m Grid
Normal Cell Count: 0
Offset:
X: 0
Y: 0
Z: 0
Plane: XY
Plane Cell Count: 4
Reference Frame: <Fixed Frame>
Value: true
- Class: rviz/TF
Enabled: true
Frame Timeout: 1000
Frames:
All Enabled: false
odom:
Value: true
world:
Value: true
Marker Scale: 4
Name: TF
Show Arrows: false
Show Axes: true
Show Names: true
Tree:
world:
{}
Update Interval: 0
Value: true
- Angle Tolerance: 0
Class: rviz/Odometry
Covariance:
Orientation:
Alpha: 0.5
Color: 255; 255; 127
Color Style: Unique
Frame: Local
Offset: 1
Scale: 1
Value: false
Position:
Alpha: 0.300000012
Color: 204; 51; 204
Scale: 1
Value: true
Value: false
Enabled: false
Keep: 200
Name: VIO
Position Tolerance: 0
Shape:
Alpha: 1
Axes Length: 1
Axes Radius: 0.100000001
Color: 255; 25; 0
Head Length: 0.100000001
Head Radius: 0.100000001
Shaft Length: 0.5
Shaft Radius: 0.0500000007
Value: Arrow
Topic: /firefly_sbx/vio/odom
Unreliable: false
Value: false
- Angle Tolerance: 0
Class: rviz/Odometry
Covariance:
Orientation:
Alpha: 0.5
Color: 255; 255; 127
Color Style: Unique
Frame: Local
Offset: 1
Scale: 1
Value: false
Position:
Alpha: 0.300000012
Color: 204; 51; 204
Scale: 1
Value: true
Value: true
Enabled: true
Keep: 1
Name: Covariance
Position Tolerance: 0
Shape:
Alpha: 1
Axes Length: 1
Axes Radius: 0.100000001
Color: 255; 25; 0
Head Length: 0.200000003
Head Radius: 0.200000003
Shaft Length: 1
Shaft Radius: 0.100000001
Value: Arrow
Topic: /firefly_sbx/vio/odom
Unreliable: false
Value: true
- Alpha: 1
Autocompute Intensity Bounds: true
Autocompute Value Bounds:
Max Value: 3
Min Value: -2
Value: false
Axis: Z
Channel Name: z
Class: rviz/PointCloud2
Color: 255; 255; 255
Color Transformer: AxisColor
Decay Time: 30
Enabled: true
Invert Rainbow: false
Max Color: 255; 255; 255
Max Intensity: 26.7646065
Min Color: 0; 0; 0
Min Intensity: -4.65562725
Name: VIO Points
Position Transformer: XYZ
Queue Size: 10
Selectable: true
Size (Pixels): 3
Size (m): 1
Style: Points
Topic: /firefly_sbx/vio/feature_point_cloud
Unreliable: false
Use Fixed Frame: true
Use rainbow: true
Value: true
- Class: rviz/Image
Enabled: true
Image Topic: /firefly_sbx/image_processor/debug_stereo_image
Max Value: 1
Median window: 5
Min Value: 0
Name: Stereo Features
Normalize Range: true
Queue Size: 2
Transport Hint: raw
Unreliable: false
Value: true
- Angle Tolerance: 0
Class: rviz/Odometry
Covariance:
Orientation:
Alpha: 0.5
Color: 255; 255; 127
Color Style: Unique
Frame: Local
Offset: 1
Scale: 1
Value: true
Position:
Alpha: 0.300000012
Color: 204; 51; 204
Scale: 1
Value: true
Value: false
Enabled: true
Keep: 1
Name: Ground Truth
Position Tolerance: 0
Shape:
Alpha: 1
Axes Length: 1
Axes Radius: 0.100000001
Color: 0; 170; 255
Head Length: 0.200000003
Head Radius: 0.200000003
Shaft Length: 1
Shaft Radius: 0.0500000007
Value: Arrow
Topic: /benchmark/odometry
Unreliable: false
Value: true
- Alpha: 1
Buffer Length: 1
Class: rviz/Path
Color: 170; 255; 0
Enabled: false
Head Diameter: 0.300000012
Head Length: 0.200000003
Length: 0.300000012
Line Style: Lines
Line Width: 0.0299999993
Name: Ground Truth Path
Offset:
X: 0
Y: 0
Z: 0
Pose Color: 255; 85; 255
Pose Style: None
Radius: 0.0299999993
Shaft Diameter: 0.100000001
Shaft Length: 0.100000001
Topic: /benchmark/path
Unreliable: false
Value: false
Enabled: true
Global Options:
Background Color: 48; 48; 48
Fixed Frame: world
Frame Rate: 30
Name: root
Tools:
- Class: rviz/Interact
Hide Inactive Objects: true
- Class: rviz/MoveCamera
- Class: rviz/Select
- Class: rviz/FocusCamera
- Class: rviz/Measure
- Class: rviz/SetInitialPose
Topic: /initialpose
- Class: rviz/SetGoal
Topic: /move_base_simple/goal
- Class: rviz/PublishPoint
Single click: true
Topic: /clicked_point
Value: true
Views:
Current:
Class: rviz/Orbit
Distance: 15.6398182
Enable Stereo Rendering:
Stereo Eye Separation: 0.0599999987
Stereo Focal Distance: 1
Swap Stereo Eyes: false
Value: false
Focal Point:
X: 0.80464071
Y: -1.53352785
Z: 0.741859734
Focal Shape Fixed Size: true
Focal Shape Size: 0.0500000007
Name: Current View
Near Clip Distance: 0.00999999978
Pitch: 1.55979633
Target Frame: odom
Value: Orbit (rviz)
Yaw: 6.27799845
Saved: ~
Window Geometry:
Displays:
collapsed: true
Height: 1056
Hide Left Dock: true
Hide Right Dock: true
QMainWindow State: 000000ff00000000fd0000000400000000000001b3000003dafc0200000008fb0000001200530065006c0065006300740069006f006e00000001e10000009b0000006400fffffffb0000001e0054006f006f006c002000500072006f007000650072007400690065007302000001ed000001df00000185000000a3fb000000120056006900650077007300200054006f006f02000001df000002110000018500000122fb000000200054006f006f006c002000500072006f0070006500720074006900650073003203000002880000011d000002210000017afb000000100044006900730070006c0061007900730000000028000003da000000dd00fffffffb0000002000730065006c0065006300740069006f006e00200062007500660066006500720200000138000000aa0000023a00000294fb00000014005700690064006500530074006500720065006f02000000e6000000d2000003ee0000030bfb0000000c004b0069006e0065006300740200000186000001060000030c0000026100000001000001a2000003dafc0200000003fb0000001e0054006f006f006c002000500072006f00700065007200740069006500730100000041000000780000000000000000fb0000000a005600690065007700730000000028000003da000000b000fffffffb0000001200530065006c0065006300740069006f006e010000025a000000b200000000000000000000000200000490000000a9fc0100000001fb0000000a00560069006500770073030000004e00000080000002e100000197000000030000053d0000016dfc0100000004fb0000001e00530074006500720065006f002000460065006100740075007200650073030000000a000002dc0000038a0000013cfb0000001e00530074006500720065006f0020004600650061007400750072006500730100000000000005510000000000000000fb0000000800540069006d00650000000000000005560000030000fffffffb0000000800540069006d0065010000000000000450000000000000000000000780000003da00000004000000040000000800000008fc0000000100000002000000010000000a0054006f006f006c00730100000000ffffffff0000000000000000
Selection:
collapsed: false
Stereo Features:
collapsed: false
Time:
collapsed: false
Tool Properties:
collapsed: false
Views:
collapsed: true
Width: 1920
X: 0
Y: 24

263
rviz/rviz_fla_config.rviz Normal file
View File

@ -0,0 +1,263 @@
Panels:
- Class: rviz/Displays
Help Height: 78
Name: Displays
Property Tree Widget:
Expanded:
- /TF1/Frames1
- /VIO1/Shape1
- /VIO Points1
- /VIO Points1/Autocompute Value Bounds1
Splitter Ratio: 0.5
Tree Height: 808
- Class: rviz/Selection
Name: Selection
- Class: rviz/Tool Properties
Expanded:
- /2D Pose Estimate1
- /2D Nav Goal1
- /Publish Point1
Name: Tool Properties
Splitter Ratio: 0.588679016
- Class: rviz/Views
Expanded:
- /Current View1
Name: Views
Splitter Ratio: 0.5
- Class: rviz/Time
Experimental: false
Name: Time
SyncMode: 0
SyncSource: Stereo Features
Visualization Manager:
Class: ""
Displays:
- Alpha: 0.100000001
Cell Size: 1
Class: rviz/Grid
Color: 160; 160; 164
Enabled: true
Line Style:
Line Width: 0.0299999993
Value: Lines
Name: 1m Grid
Normal Cell Count: 0
Offset:
X: 0
Y: 0
Z: 0
Plane: XY
Plane Cell Count: 700
Reference Frame: <Fixed Frame>
Value: true
- Alpha: 0.699999988
Cell Size: 10
Class: rviz/Grid
Color: 160; 160; 164
Enabled: true
Line Style:
Line Width: 0.0299999993
Value: Lines
Name: 10m Grid
Normal Cell Count: 0
Offset:
X: 0
Y: 0
Z: 0
Plane: XY
Plane Cell Count: 70
Reference Frame: <Fixed Frame>
Value: true
- Class: rviz/TF
Enabled: true
Frame Timeout: 1000
Frames:
All Enabled: false
odom:
Value: true
vision:
Value: true
Marker Scale: 20
Name: TF
Show Arrows: false
Show Axes: true
Show Names: true
Tree:
vision:
odom:
{}
Update Interval: 0
Value: true
- Angle Tolerance: 0
Class: rviz/Odometry
Covariance:
Orientation:
Alpha: 0.5
Color: 255; 255; 127
Color Style: Unique
Frame: Local
Offset: 1
Scale: 1
Value: true
Position:
Alpha: 0.300000012
Color: 204; 51; 204
Scale: 1
Value: true
Value: false
Enabled: true
Keep: 400
Name: VIO
Position Tolerance: 0
Shape:
Alpha: 1
Axes Length: 1
Axes Radius: 0.100000001
Color: 255; 25; 0
Head Length: 0.300000012
Head Radius: 0.300000012
Shaft Length: 3.5
Shaft Radius: 0.0500000007
Value: Arrow
Topic: /fla/vio/odom
Unreliable: false
Value: true
- Angle Tolerance: 0
Class: rviz/Odometry
Covariance:
Orientation:
Alpha: 0.5
Color: 255; 255; 127
Color Style: Unique
Frame: Local
Offset: 1
Scale: 1
Value: false
Position:
Alpha: 0.5
Color: 204; 51; 204
Scale: 1
Value: true
Value: true
Enabled: true
Keep: 1
Name: Covariance
Position Tolerance: 0
Shape:
Alpha: 1
Axes Length: 1
Axes Radius: 0.100000001
Color: 255; 25; 0
Head Length: 0.300000012
Head Radius: 0.100000001
Shaft Length: 1
Shaft Radius: 0.0500000007
Value: Arrow
Topic: /fla/vio/odom
Unreliable: false
Value: true
- Alpha: 1
Autocompute Intensity Bounds: true
Autocompute Value Bounds:
Max Value: 10
Min Value: -5
Value: false
Axis: Z
Channel Name: z
Class: rviz/PointCloud2
Color: 255; 255; 255
Color Transformer: AxisColor
Decay Time: 30
Enabled: true
Invert Rainbow: false
Max Color: 255; 255; 255
Max Intensity: 26.7646065
Min Color: 0; 0; 0
Min Intensity: -4.65562725
Name: VIO Points
Position Transformer: XYZ
Queue Size: 1
Selectable: true
Size (Pixels): 3
Size (m): 1
Style: Points
Topic: /fla/vio/feature_point_cloud
Unreliable: false
Use Fixed Frame: true
Use rainbow: true
Value: true
- Class: rviz/Image
Enabled: true
Image Topic: /fla/image_processor/debug_stereo_image
Max Value: 1
Median window: 5
Min Value: 0
Name: Stereo Features
Normalize Range: true
Queue Size: 2
Transport Hint: raw
Unreliable: false
Value: true
Enabled: true
Global Options:
Background Color: 48; 48; 48
Fixed Frame: vision
Frame Rate: 30
Name: root
Tools:
- Class: rviz/Interact
Hide Inactive Objects: true
- Class: rviz/MoveCamera
- Class: rviz/Select
- Class: rviz/FocusCamera
- Class: rviz/Measure
- Class: rviz/SetInitialPose
Topic: /initialpose
- Class: rviz/SetGoal
Topic: /move_base_simple/goal
- Class: rviz/PublishPoint
Single click: true
Topic: /clicked_point
Value: true
Views:
Current:
Class: rviz/Orbit
Distance: 98.0585938
Enable Stereo Rendering:
Stereo Eye Separation: 0.0599999987
Stereo Focal Distance: 1
Swap Stereo Eyes: false
Value: false
Focal Point:
X: -26.1133022
Y: -6.50123549
Z: -17.9633904
Focal Shape Fixed Size: true
Focal Shape Size: 0.0500000007
Name: Current View
Near Clip Distance: 0.00999999978
Pitch: 1.19979477
Target Frame: odom
Value: Orbit (rviz)
Yaw: 4.71076918
Saved: ~
Window Geometry:
Displays:
collapsed: true
Height: 1056
Hide Left Dock: true
Hide Right Dock: true
QMainWindow State: 000000ff00000000fd0000000400000000000001b3000003b7fc0200000008fb0000001200530065006c0065006300740069006f006e00000001e10000009b0000006400fffffffb0000001e0054006f006f006c002000500072006f007000650072007400690065007302000001ed000001df00000185000000a3fb000000120056006900650077007300200054006f006f02000001df000002110000018500000122fb000000200054006f006f006c002000500072006f0070006500720074006900650073003203000002880000011d000002210000017afb000000100044006900730070006c0061007900730000000028000003b7000000dd00fffffffb0000002000730065006c0065006300740069006f006e00200062007500660066006500720200000138000000aa0000023a00000294fb00000014005700690064006500530074006500720065006f02000000e6000000d2000003ee0000030bfb0000000c004b0069006e0065006300740200000186000001060000030c0000026100000001000001a2000001fcfc0200000003fb0000001e0054006f006f006c002000500072006f00700065007200740069006500730100000041000000780000000000000000fb0000000a00560069006500770073000000002b000001fc000000b000fffffffb0000001200530065006c0065006300740069006f006e010000025a000000b200000000000000000000000200000490000000a9fc0100000001fb0000000a00560069006500770073030000004e00000080000002e10000019700000003000005b7000001cffc0100000004fb0000001e00530074006500720065006f002000460065006100740075007200650073030000000c000002400000043f000001d9fb0000001e00530074006500720065006f0020004600650061007400750072006500730100000000000005510000000000000000fb0000000800540069006d00650000000000000005560000030000fffffffb0000000800540069006d0065010000000000000450000000000000000000000780000003da00000004000000040000000800000008fc0000000100000002000000010000000a0054006f006f006c00730100000000ffffffff0000000000000000
Selection:
collapsed: false
Stereo Features:
collapsed: false
Time:
collapsed: false
Tool Properties:
collapsed: false
Views:
collapsed: true
Width: 1920
X: 0
Y: 24

1510
src/image_processor.cpp Normal file

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,24 @@
/*
* COPYRIGHT AND PERMISSION NOTICE
* Penn Software MSCKF_VIO
* Copyright (C) 2017 The Trustees of the University of Pennsylvania
* All rights reserved.
*/
#include <msckf_vio/image_processor_nodelet.h>
namespace msckf_vio {
void ImageProcessorNodelet::onInit() {
img_processor_ptr.reset(new ImageProcessor(getPrivateNodeHandle()));
if (!img_processor_ptr->initialize()) {
ROS_ERROR("Cannot initialize Image Processor...");
return;
}
return;
}
PLUGINLIB_DECLARE_CLASS(msckf_vio, ImageProcessorNodelet,
msckf_vio::ImageProcessorNodelet, nodelet::Nodelet);
} // end namespace msckf_vio

1492
src/msckf_vio.cpp Normal file

File diff suppressed because it is too large Load Diff

24
src/msckf_vio_nodelet.cpp Normal file
View File

@ -0,0 +1,24 @@
/*
* COPYRIGHT AND PERMISSION NOTICE
* Penn Software MSCKF_VIO
* Copyright (C) 2017 The Trustees of the University of Pennsylvania
* All rights reserved.
*/
#include <msckf_vio/msckf_vio_nodelet.h>
namespace msckf_vio {
void MsckfVioNodelet::onInit() {
msckf_vio_ptr.reset(new MsckfVio(getPrivateNodeHandle()));
if (!msckf_vio_ptr->initialize()) {
ROS_ERROR("Cannot initialize MSCKF VIO...");
return;
}
return;
}
PLUGINLIB_DECLARE_CLASS(msckf_vio, MsckfVioNodelet,
msckf_vio::MsckfVioNodelet, nodelet::Nodelet);
} // end namespace msckf_vio

View File

@ -0,0 +1,128 @@
/*
*This file is part of msckf_vio
*
* msckf_vio is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* msckf_vio is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with msckf_vio. If not, see <http://www.gnu.org/licenses/>.
*/
#include <iostream>
#include <vector>
#include <map>
#include <Eigen/Dense>
#include <Eigen/Geometry>
#include <Eigen/StdVector>
#include <gtest/gtest.h>
#include <random_numbers/random_numbers.h>
#include <msckf_vio/cam_state.h>
#include <msckf_vio/feature.hpp>
using namespace std;
using namespace Eigen;
using namespace msckf_vio;
TEST(FeatureInitializeTest, sphereDistribution) {
// Set the real feature at the origin of the world frame.
Vector3d feature(0.5, 0.0, 0.0);
// Add 6 camera poses, all of which are able to see the
// feature at the origin. For simplicity, the six camera
// view are located at the six intersections between a
// unit sphere and the coordinate system. And the z axes
// of the camera frames are facing the origin.
vector<Isometry3d> cam_poses(6);
// Positive x axis.
cam_poses[0].linear() << 0.0, 0.0, -1.0,
1.0, 0.0, 0.0, 0.0, -1.0, 0.0;
cam_poses[0].translation() << 1.0, 0.0, 0.0;
// Positive y axis.
cam_poses[1].linear() << -1.0, 0.0, 0.0,
0.0, 0.0, -1.0, 0.0, -1.0, 0.0;
cam_poses[1].translation() << 0.0, 1.0, 0.0;
// Negative x axis.
cam_poses[2].linear() << 0.0, 0.0, 1.0,
-1.0, 0.0, 0.0, 0.0, -1.0, 0.0;
cam_poses[2].translation() << -1.0, 0.0, 0.0;
// Negative y axis.
cam_poses[3].linear() << 1.0, 0.0, 0.0,
0.0, 0.0, 1.0, 0.0, -1.0, 0.0;
cam_poses[3].translation() << 0.0, -1.0, 0.0;
// Positive z axis.
cam_poses[4].linear() << 0.0, -1.0, 0.0,
-1.0, 0.0, 0.0, 0.0, 0.0, -1.0;
cam_poses[4].translation() << 0.0, 0.0, 1.0;
// Negative z axis.
cam_poses[5].linear() << 1.0, 0.0, 0.0,
0.0, 1.0, 0.0, 0.0, 0.0, 1.0;
cam_poses[5].translation() << 0.0, 0.0, -1.0;
// Set the camera states
CamStateServer cam_states;
for (int i = 0; i < 6; ++i) {
CAMState new_cam_state;
new_cam_state.id = i;
new_cam_state.time = static_cast<double>(i);
new_cam_state.orientation = rotationToQuaternion(
Matrix3d(cam_poses[i].linear().transpose()));
new_cam_state.position = cam_poses[i].translation();
cam_states[new_cam_state.id] = new_cam_state;
}
// Compute measurements.
random_numbers::RandomNumberGenerator noise_generator;
vector<Vector2d, aligned_allocator<Vector2d> > measurements(6);
for (int i = 0; i < 6; ++i) {
Isometry3d cam_pose_inv = cam_poses[i].inverse();
Vector3d p = cam_pose_inv.linear()*feature + cam_pose_inv.translation();
double u = p(0) / p(2) + noise_generator.gaussian(0.0, 0.01);
double v = p(1) / p(2) + noise_generator.gaussian(0.0, 0.01);
//double u = p(0) / p(2);
//double v = p(1) / p(2);
measurements[i] = Vector2d(u, v);
}
for (int i = 0; i < 6; ++i) {
cout << "pose " << i << ":" << endl;
cout << "orientation: " << endl;
cout << cam_poses[i].linear() << endl;
cout << "translation: " << endl;
cout << cam_poses[i].translation().transpose() << endl;
cout << "measurement: " << endl;
cout << measurements[i].transpose() << endl;
cout << endl;
}
// Initialize a feature object.
Feature feature_object;
for (int i = 0; i < 6; ++i)
feature_object.observations[i] = measurements[i];
// Compute the 3d position of the feature.
feature_object.initializePosition(cam_states);
// Check the difference between the computed 3d
// feature position and the groud truth.
cout << "ground truth position: " << feature.transpose() << endl;
cout << "estimated position: " << feature_object.position.transpose() << endl;
Eigen::Vector3d error = feature_object.position - feature;
EXPECT_NEAR(error.norm(), 0, 0.05);
}
int main(int argc, char** argv) {
testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
}

87
test/math_utils_test.cpp Normal file
View File

@ -0,0 +1,87 @@
/*
*This file is part of msckf_vio
*
* msckf_vio is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* msckf_vio is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with msckf_vio. If not, see <http://www.gnu.org/licenses/>.
*/
#include <iostream>
#include <Eigen/Dense>
#include <gtest/gtest.h>
#include <msckf_vio/math_utils.hpp>
using namespace std;
using namespace Eigen;
using namespace msckf_vio;
TEST(MathUtilsTest, skewSymmetric) {
Vector3d w(1.0, 2.0, 3.0);
Matrix3d w_hat = skewSymmetric(w);
Vector3d zero_vector = w_hat * w;
FullPivLU<Matrix3d> lu_helper(w_hat);
EXPECT_EQ(lu_helper.rank(), 2);
EXPECT_DOUBLE_EQ(zero_vector.norm(), 0.0);
return;
}
TEST(MathUtilsTest, quaternionNormalize) {
Vector4d q(1.0, 1.0, 1.0, 1.0);
quaternionNormalize(q);
EXPECT_DOUBLE_EQ(q.norm(), 1.0);
return;
}
TEST(MathUtilsTest, quaternionToRotation) {
Vector4d q(0.0, 0.0, 0.0, 1.0);
Matrix3d R = quaternionToRotation(q);
Matrix3d zero_matrix = R - Matrix3d::Identity();
FullPivLU<Matrix3d> lu_helper(zero_matrix);
EXPECT_EQ(lu_helper.rank(), 0);
return;
}
TEST(MathUtilsTest, rotationToQuaternion) {
Vector4d q1(0.0, 0.0, 0.0, 1.0);
Matrix3d I = Matrix3d::Identity();
Vector4d q2 = rotationToQuaternion(I);
Vector4d zero_vector = q1 - q2;
EXPECT_DOUBLE_EQ(zero_vector.norm(), 0.0);
return;
}
TEST(MathUtilsTest, quaternionMultiplication) {
Vector4d q1(2.0, 2.0, 1.0, 1.0);
Vector4d q2(1.0, 2.0, 3.0, 1.0);
q1 = q1 / q1.norm();
q2 = q2 / q2.norm();
Vector4d q_prod = quaternionMultiplication(q1, q2);
Matrix3d R1 = quaternionToRotation(q1);
Matrix3d R2 = quaternionToRotation(q2);
Matrix3d R_prod = R1 * R2;
Matrix3d R_prod_cp = quaternionToRotation(q_prod);
Matrix3d zero_matrix = R_prod - R_prod_cp;
EXPECT_NEAR(zero_matrix.sum(), 0.0, 1e-10);
return;
}
int main(int argc, char** argv) {
testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
}