From 21e3f9865c85d41b172d7bdddfd889480f69e11d Mon Sep 17 00:00:00 2001 From: Ke Sun Date: Mon, 8 Jan 2018 14:41:37 -0500 Subject: [PATCH] First commit of the code --- CMakeLists.txt | 147 ++ cmake/FindCholmod.cmake | 89 ++ cmake/FindSPQR.cmake | 36 + config/camchain-imucam-euroc.yaml | 33 + config/camchain-imucam-fla.yaml | 37 + include/msckf_vio/cam_state.h | 67 + include/msckf_vio/feature.hpp | 440 ++++++ include/msckf_vio/image_processor.h | 398 +++++ include/msckf_vio/image_processor_nodelet.h | 28 + include/msckf_vio/imu_state.h | 110 ++ include/msckf_vio/math_utils.hpp | 163 ++ include/msckf_vio/msckf_vio.h | 242 +++ include/msckf_vio/msckf_vio_nodelet.h | 28 + launch/image_processor_euroc.launch | 33 + launch/image_processor_fla.launch | 33 + launch/msckf_vio_euroc.launch | 61 + launch/msckf_vio_fla.launch | 61 + launch/reset.launch | 7 + msg/CameraMeasurement.msg | 4 + msg/FeatureMeasurement.msg | 6 + msg/TrackingInfo.msg | 7 + nodelets.xml | 20 + package.xml | 43 + rviz/rviz_euroc_config.rviz | 319 ++++ rviz/rviz_fla_config.rviz | 263 ++++ src/image_processor.cpp | 1510 +++++++++++++++++++ src/image_processor_nodelet.cpp | 24 + src/msckf_vio.cpp | 1492 ++++++++++++++++++ src/msckf_vio_nodelet.cpp | 24 + test/feature_initialization_test.cpp | 128 ++ test/math_utils_test.cpp | 87 ++ 31 files changed, 5940 insertions(+) create mode 100644 CMakeLists.txt create mode 100644 cmake/FindCholmod.cmake create mode 100644 cmake/FindSPQR.cmake create mode 100644 config/camchain-imucam-euroc.yaml create mode 100644 config/camchain-imucam-fla.yaml create mode 100644 include/msckf_vio/cam_state.h create mode 100644 include/msckf_vio/feature.hpp create mode 100644 include/msckf_vio/image_processor.h create mode 100644 include/msckf_vio/image_processor_nodelet.h create mode 100644 include/msckf_vio/imu_state.h create mode 100644 include/msckf_vio/math_utils.hpp create mode 100644 include/msckf_vio/msckf_vio.h create mode 100644 include/msckf_vio/msckf_vio_nodelet.h create mode 100644 launch/image_processor_euroc.launch create mode 100644 launch/image_processor_fla.launch create mode 100644 launch/msckf_vio_euroc.launch create mode 100644 launch/msckf_vio_fla.launch create mode 100644 launch/reset.launch create mode 100644 msg/CameraMeasurement.msg create mode 100644 msg/FeatureMeasurement.msg create mode 100644 msg/TrackingInfo.msg create mode 100644 nodelets.xml create mode 100644 package.xml create mode 100644 rviz/rviz_euroc_config.rviz create mode 100644 rviz/rviz_fla_config.rviz create mode 100644 src/image_processor.cpp create mode 100644 src/image_processor_nodelet.cpp create mode 100644 src/msckf_vio.cpp create mode 100644 src/msckf_vio_nodelet.cpp create mode 100644 test/feature_initialization_test.cpp create mode 100644 test/math_utils_test.cpp diff --git a/CMakeLists.txt b/CMakeLists.txt new file mode 100644 index 0000000..3ad1df1 --- /dev/null +++ b/CMakeLists.txt @@ -0,0 +1,147 @@ +cmake_minimum_required(VERSION 2.8.3) +project(msckf_vio) + + +set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11") + +# Modify cmake module path if new .cmake files are required +set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${CMAKE_CURRENT_LIST_DIR}/cmake") + +find_package(catkin REQUIRED COMPONENTS + roscpp + std_msgs + tf + nav_msgs + sensor_msgs + geometry_msgs + eigen_conversions + tf_conversions + random_numbers + message_generation + image_transport + cv_bridge + message_filters + pcl_conversions + pcl_ros +) + +## System dependencies are found with CMake's conventions +find_package(Boost REQUIRED) +find_package(Eigen3 REQUIRED) +find_package(OpenCV REQUIRED) +find_package(Cholmod REQUIRED) +find_package(SPQR REQUIRED) + +add_message_files( + FILES + + FeatureMeasurement.msg + CameraMeasurement.msg + TrackingInfo.msg + + #DebugImuState.msg + #DebugCamState.msg + #DebugState.msg + #DebugMsckfInfo.msg +) + +generate_messages( + DEPENDENCIES + std_msgs +) + +catkin_package( + INCLUDE_DIRS include +# LIBRARIES msckf_vio + CATKIN_DEPENDS + roscpp std_msgs tf nav_msgs sensor_msgs geometry_msgs + eigen_conversions tf_conversions message_runtime + image_transport cv_bridge message_filters pcl_conversions + pcl_ros std_srvs + DEPENDS + Boost EIGEN3 OpenCV +) + +include_directories( + include + ${catkin_INCLUDE_DIRS} + ${EIGEN3_INCLUDE_DIR} + ${Boost_INCLUDE_DIR} + ${OpenCV_INCLUDE_DIRS} + ${CHOLMOD_INCLUDES} + ${SPQR_INCLUDES} +) + +link_directories( + ${catkin_LIBRARY_DIRS} +) + +# Msckf Vio +add_library(msckf_vio + src/msckf_vio.cpp +) +add_dependencies(msckf_vio + ${${PROJECT_NAME}_EXPORTED_TARGETS} + ${catkin_EXPORTED_TARGETS} +) +target_link_libraries(msckf_vio + ${catkin_LIBRARIES} + ${CHOLMOD_LIBRARIES} + ${SPQR_LIBRARIES} +) + +# Msckf Vio nodelet +add_library(msckf_vio_nodelet + src/msckf_vio_nodelet.cpp +) +add_dependencies(msckf_vio_nodelet + ${${PROJECT_NAME}_EXPORTED_TARGETS} + ${catkin_EXPORTED_TARGETS} +) +target_link_libraries(msckf_vio_nodelet + msckf_vio + ${catkin_LIBRARIES} +) + +# Image processor +add_library(image_processor + src/image_processor.cpp +) +add_dependencies(image_processor + ${${PROJECT_NAME}_EXPORTED_TARGETS} + ${catkin_EXPORTED_TARGETS} +) +target_link_libraries(image_processor + ${catkin_LIBRARIES} + ${OpenCV_LIBRARIES} +) + +# Image processor nodelet +add_library(image_processor_nodelet + src/image_processor_nodelet.cpp +) +add_dependencies(image_processor_nodelet + ${${PROJECT_NAME}_EXPORTED_TARGETS} + ${catkin_EXPORTED_TARGETS} +) +target_link_libraries(image_processor_nodelet + image_processor + ${catkin_LIBRARIES} +) + +# Feature initialization test +catkin_add_gtest(test_feature_init + test/feature_initialization_test.cpp +) +add_dependencies(test_feature_init + ${${PROJECT_NAME}_EXPORTED_TARGETS} + ${catkin_EXPORTED_TARGETS} +) +target_link_libraries(test_feature_init + ${catkin_LIBRARIES} +) + +# Math utils test +catkin_add_gtest(test_math_utils + test/math_utils_test.cpp +) diff --git a/cmake/FindCholmod.cmake b/cmake/FindCholmod.cmake new file mode 100644 index 0000000..7b3046d --- /dev/null +++ b/cmake/FindCholmod.cmake @@ -0,0 +1,89 @@ +# Cholmod lib usually requires linking to a blas and lapack library. +# It is up to the user of this module to find a BLAS and link to it. + +if (CHOLMOD_INCLUDES AND CHOLMOD_LIBRARIES) + set(CHOLMOD_FIND_QUIETLY TRUE) +endif (CHOLMOD_INCLUDES AND CHOLMOD_LIBRARIES) + +find_path(CHOLMOD_INCLUDES + NAMES + cholmod.h + PATHS + $ENV{CHOLMODDIR} + ${INCLUDE_INSTALL_DIR} + PATH_SUFFIXES + suitesparse + ufsparse +) + +find_library(CHOLMOD_LIBRARIES cholmod PATHS $ENV{CHOLMODDIR} ${LIB_INSTALL_DIR}) + +if(CHOLMOD_LIBRARIES) + + get_filename_component(CHOLMOD_LIBDIR ${CHOLMOD_LIBRARIES} PATH) + + find_library(AMD_LIBRARY amd PATHS ${CHOLMOD_LIBDIR} $ENV{CHOLMODDIR} ${LIB_INSTALL_DIR}) + if (AMD_LIBRARY) + set(CHOLMOD_LIBRARIES ${CHOLMOD_LIBRARIES} ${AMD_LIBRARY}) + else () + set(CHOLMOD_LIBRARIES FALSE) + endif () + +endif(CHOLMOD_LIBRARIES) + +if(CHOLMOD_LIBRARIES) + + find_library(COLAMD_LIBRARY colamd PATHS ${CHOLMOD_LIBDIR} $ENV{CHOLMODDIR} ${LIB_INSTALL_DIR}) + if (COLAMD_LIBRARY) + set(CHOLMOD_LIBRARIES ${CHOLMOD_LIBRARIES} ${COLAMD_LIBRARY}) + else () + set(CHOLMOD_LIBRARIES FALSE) + endif () + +endif(CHOLMOD_LIBRARIES) + +if(CHOLMOD_LIBRARIES) + + find_library(CAMD_LIBRARY camd PATHS ${CHOLMOD_LIBDIR} $ENV{CHOLMODDIR} ${LIB_INSTALL_DIR}) + if (CAMD_LIBRARY) + set(CHOLMOD_LIBRARIES ${CHOLMOD_LIBRARIES} ${CAMD_LIBRARY}) + else () + set(CHOLMOD_LIBRARIES FALSE) + endif () + +endif(CHOLMOD_LIBRARIES) + +if(CHOLMOD_LIBRARIES) + + find_library(CCOLAMD_LIBRARY ccolamd PATHS ${CHOLMOD_LIBDIR} $ENV{CHOLMODDIR} ${LIB_INSTALL_DIR}) + if (CCOLAMD_LIBRARY) + set(CHOLMOD_LIBRARIES ${CHOLMOD_LIBRARIES} ${CCOLAMD_LIBRARY}) + else () + set(CHOLMOD_LIBRARIES FALSE) + endif () + +endif(CHOLMOD_LIBRARIES) + +if(CHOLMOD_LIBRARIES) + + find_library(CHOLMOD_METIS_LIBRARY metis PATHS ${CHOLMOD_LIBDIR} $ENV{CHOLMODDIR} ${LIB_INSTALL_DIR}) + if (CHOLMOD_METIS_LIBRARY) + set(CHOLMOD_LIBRARIES ${CHOLMOD_LIBRARIES} ${CHOLMOD_METIS_LIBRARY}) + endif () + +endif(CHOLMOD_LIBRARIES) + +if(CHOLMOD_LIBRARIES) + + find_library(SUITESPARSE_LIBRARY SuiteSparse PATHS ${CHOLMOD_LIBDIR} $ENV{CHOLMODDIR} ${LIB_INSTALL_DIR}) + if (SUITESPARSE_LIBRARY) + set(CHOLMOD_LIBRARIES ${CHOLMOD_LIBRARIES} ${SUITESPARSE_LIBRARY}) + endif (SUITESPARSE_LIBRARY) + +endif(CHOLMOD_LIBRARIES) + +include(FindPackageHandleStandardArgs) +find_package_handle_standard_args(CHOLMOD DEFAULT_MSG + CHOLMOD_INCLUDES CHOLMOD_LIBRARIES) + +mark_as_advanced(CHOLMOD_INCLUDES CHOLMOD_LIBRARIES AMD_LIBRARY COLAMD_LIBRARY SUITESPARSE_LIBRARY) diff --git a/cmake/FindSPQR.cmake b/cmake/FindSPQR.cmake new file mode 100644 index 0000000..40e8807 --- /dev/null +++ b/cmake/FindSPQR.cmake @@ -0,0 +1,36 @@ +# SPQR lib usually requires linking to a blas and lapack library. +# It is up to the user of this module to find a BLAS and link to it. + +# SPQR lib requires Cholmod, colamd and amd as well. +# FindCholmod.cmake can be used to find those packages before finding spqr + +if (SPQR_INCLUDES AND SPQR_LIBRARIES) + set(SPQR_FIND_QUIETLY TRUE) +endif (SPQR_INCLUDES AND SPQR_LIBRARIES) + +find_path(SPQR_INCLUDES + NAMES + SuiteSparseQR.hpp + PATHS + $ENV{SPQRDIR} + ${INCLUDE_INSTALL_DIR} + PATH_SUFFIXES + suitesparse + ufsparse +) + +find_library(SPQR_LIBRARIES spqr $ENV{SPQRDIR} ${LIB_INSTALL_DIR}) + +if(SPQR_LIBRARIES) + + find_library(SUITESPARSE_LIBRARY SuiteSparse PATHS $ENV{SPQRDIR} ${LIB_INSTALL_DIR}) + if (SUITESPARSE_LIBRARY) + set(SPQR_LIBRARIES ${SPQR_LIBRARIES} ${SUITESPARSE_LIBRARY}) + endif (SUITESPARSE_LIBRARY) + +endif(SPQR_LIBRARIES) + +include(FindPackageHandleStandardArgs) +find_package_handle_standard_args(SPQR DEFAULT_MSG SPQR_INCLUDES SPQR_LIBRARIES) + +mark_as_advanced(SPQR_INCLUDES SPQR_LIBRARIES) diff --git a/config/camchain-imucam-euroc.yaml b/config/camchain-imucam-euroc.yaml new file mode 100644 index 0000000..dcbd9e5 --- /dev/null +++ b/config/camchain-imucam-euroc.yaml @@ -0,0 +1,33 @@ +# The modifications of the output file from Kalibr: +# 1. For each matrix (e.g. cam0/T_cam_imu), remove the brackets and minus sign for each line. Use one pair of brackets for each matrix. +# 2. Add the T_imu_body at the end of the calibration file (usually set to identity). +cam0: + T_cam_imu: + [0.014865542981794, 0.999557249008346, -0.025774436697440, 0.065222909535531, + -0.999880929698575, 0.014967213324719, 0.003756188357967, -0.020706385492719, + 0.004140296794224, 0.025715529947966, 0.999660727177902, -0.008054602460030, + 0, 0, 0, 1.000000000000000] + camera_model: pinhole + distortion_coeffs: [-0.28340811, 0.07395907, 0.00019359, 1.76187114e-05] + distortion_model: radtan + intrinsics: [458.654, 457.296, 367.215, 248.375] + resolution: [752, 480] + timeshift_cam_imu: 0.0 +cam1: + T_cam_imu: + [0.012555267089103, 0.999598781151433, -0.025389800891747, -0.044901980682509, + -0.999755099723116, 0.013011905181504, 0.017900583825251, -0.020569771258915, + 0.018223771455443, 0.025158836311552, 0.999517347077547, -0.008638135126028, + 0, 0, 0, 1.000000000000000] + camera_model: pinhole + distortion_coeffs: [-0.28368365, 0.07451284, -0.00010473, -3.55590700e-05] + distortion_model: radtan + intrinsics: [457.587, 456.134, 379.999, 255.238] + resolution: [752, 480] + timeshift_cam_imu: 0.0 +T_imu_body: + [1.0000, 0.0000, 0.0000, 0.0000, + 0.0000, 1.0000, 0.0000, 0.0000, + 0.0000, 0.0000, 1.0000, 0.0000, + 0.0000, 0.0000, 0.0000, 1.0000] + diff --git a/config/camchain-imucam-fla.yaml b/config/camchain-imucam-fla.yaml new file mode 100644 index 0000000..bb2109b --- /dev/null +++ b/config/camchain-imucam-fla.yaml @@ -0,0 +1,37 @@ +# The modifications of the output file from Kalibr: +# 1. For each matrix (e.g. cam0/T_cam_imu), remove the brackets and minus sign for each line. Use one pair of brackets for each matrix. +# 2. Add the T_imu_body at the end of the calibration file (usually set to identity). +cam0: + T_cam_imu: + [0.9999354187625251, -0.008299416281612066, 0.007763890364879687, 0.1058254769257812, + -0.008323202950480819, -0.9999607512746784, 0.003036478688450292, -0.017679034754829417, + 0.0077383846414136375, -0.0031009030240912025, -0.9999652502980172, -0.008890355463642424, + 0.0, 0.0, 0.0, 1.0] + camera_model: pinhole + distortion_coeffs: [-0.0147, -0.00584, 0.00715, -0.00464] + distortion_model: equidistant + intrinsics: [606.57934, 606.73233, 474.93081, 402.27722] + resolution: [960, 800] + rostopic: /sync/cam0/image_raw +cam1: + T_cam_imu: + [0.9999415499962613, -0.003314180239269491, 0.010291394483555276, -0.09412457355264209, + -0.0029741561536762054, -0.9994548489877669, -0.032880985843089204, -0.016816910884051604, + 0.010394757632964142, 0.03284845573511056, -0.9994062877376598, -0.008908185988981819, + 0.0, 0.0, 0.0, 1.0] + T_cn_cnm1: + [0.9999843700393054, -0.004977416649937181, -0.0025428275521419763, -0.200059, + 0.005065643350062822, 0.9993405242433947, 0.03595604029590122, 0.000634053, + 0.002362182447858024, -0.03596835970409878, 0.9993501279159637, -0.000909473, + 0.0, 0.0, 0.0, 1.0] + camera_model: pinhole + distortion_coeffs: [-0.01565, -0.00026, -0.00454, 0.00311] + distortion_model: equidistant + intrinsics: [605.7278, 605.75661, 481.98865, 417.83476] + resolution: [960, 800] + rostopic: /sync/cam1/image_raw +T_imu_body: + [1.0000, 0.0000, 0.0000, 0.0000, + 0.0000, 1.0000, 0.0000, 0.0000, + 0.0000, 0.0000, 1.0000, 0.0000, + 0.0000, 0.0000, 0.0000, 1.0000] diff --git a/include/msckf_vio/cam_state.h b/include/msckf_vio/cam_state.h new file mode 100644 index 0000000..8d528a1 --- /dev/null +++ b/include/msckf_vio/cam_state.h @@ -0,0 +1,67 @@ +/* + * COPYRIGHT AND PERMISSION NOTICE + * Penn Software MSCKF_VIO + * Copyright (C) 2017 The Trustees of the University of Pennsylvania + * All rights reserved. + */ + +#ifndef MSCKF_VIO_CAM_STATE_H +#define MSCKF_VIO_CAM_STATE_H + +#include +#include +#include + +#include "imu_state.h" + +namespace msckf_vio { +/* + * @brief CAMState Stored camera state in order to + * form measurement model. + */ +struct CAMState { + EIGEN_MAKE_ALIGNED_OPERATOR_NEW + + // An unique identifier for the CAM state. + StateIDType id; + + // Time when the state is recorded + double time; + + // Orientation + // Take a vector from the world frame to the camera frame. + Eigen::Vector4d orientation; + + // Position of the camera frame in the world frame. + Eigen::Vector3d position; + + // These two variables should have the same physical + // interpretation with `orientation` and `position`. + // There two variables are used to modify the measurement + // Jacobian matrices to make the observability matrix + // have proper null space. + Eigen::Vector4d orientation_null; + Eigen::Vector3d position_null; + + // Takes a vector from the cam0 frame to the cam1 frame. + static Eigen::Isometry3d T_cam0_cam1; + + CAMState(): id(0), time(0), + orientation(Eigen::Vector4d(0, 0, 0, 1)), + position(Eigen::Vector3d::Zero()), + orientation_null(Eigen::Vector4d(0, 0, 0, 1)), + position_null(Eigen::Vector3d(0, 0, 0)) {} + + CAMState(const StateIDType& new_id ): id(new_id), time(0), + orientation(Eigen::Vector4d(0, 0, 0, 1)), + position(Eigen::Vector3d::Zero()), + orientation_null(Eigen::Vector4d(0, 0, 0, 1)), + position_null(Eigen::Vector3d::Zero()) {} +}; + +typedef std::map, + Eigen::aligned_allocator< + std::pair > > CamStateServer; +} // namespace msckf_vio + +#endif // MSCKF_VIO_CAM_STATE_H diff --git a/include/msckf_vio/feature.hpp b/include/msckf_vio/feature.hpp new file mode 100644 index 0000000..0ee766e --- /dev/null +++ b/include/msckf_vio/feature.hpp @@ -0,0 +1,440 @@ +/* + * COPYRIGHT AND PERMISSION NOTICE + * Penn Software MSCKF_VIO + * Copyright (C) 2017 The Trustees of the University of Pennsylvania + * All rights reserved. + */ + +#ifndef MSCKF_VIO_FEATURE_H +#define MSCKF_VIO_FEATURE_H + +#include +#include +#include + +#include +#include +#include + +#include "math_utils.hpp" +#include "imu_state.h" +#include "cam_state.h" + +namespace msckf_vio { + +/* + * @brief Feature Salient part of an image. Please refer + * to the Appendix of "A Multi-State Constraint Kalman + * Filter for Vision-aided Inertial Navigation" for how + * the 3d position of a feature is initialized. + */ +struct Feature { + EIGEN_MAKE_ALIGNED_OPERATOR_NEW + typedef long long int FeatureIDType; + + /* + * @brief OptimizationConfig Configuration parameters + * for 3d feature position optimization. + */ + struct OptimizationConfig { + double translation_threshold; + double huber_epsilon; + double estimation_precision; + double initial_damping; + int outer_loop_max_iteration; + int inner_loop_max_iteration; + + OptimizationConfig(): + translation_threshold(0.2), + huber_epsilon(0.01), + estimation_precision(5e-7), + initial_damping(1e-3), + outer_loop_max_iteration(10), + inner_loop_max_iteration(10) { + return; + } + }; + + // Constructors for the struct. + Feature(): id(0), position(Eigen::Vector3d::Zero()), + is_initialized(false) {} + + Feature(const FeatureIDType& new_id): id(new_id), + position(Eigen::Vector3d::Zero()), + is_initialized(false) {} + + /* + * @brief cost Compute the cost of the camera observations + * @param T_c0_c1 A rigid body transformation takes + * a vector in c0 frame to ci frame. + * @param x The current estimation. + * @param z The ith measurement of the feature j in ci frame. + * @return e The cost of this observation. + */ + inline void cost(const Eigen::Isometry3d& T_c0_ci, + const Eigen::Vector3d& x, const Eigen::Vector2d& z, + double& e) const; + + /* + * @brief jacobian Compute the Jacobian of the camera observation + * @param T_c0_c1 A rigid body transformation takes + * a vector in c0 frame to ci frame. + * @param x The current estimation. + * @param z The actual measurement of the feature in ci frame. + * @return J The computed Jacobian. + * @return r The computed residual. + * @return w Weight induced by huber kernel. + */ + inline void jacobian(const Eigen::Isometry3d& T_c0_ci, + const Eigen::Vector3d& x, const Eigen::Vector2d& z, + Eigen::Matrix& J, Eigen::Vector2d& r, + double& w) const; + + /* + * @brief generateInitialGuess Compute the initial guess of + * the feature's 3d position using only two views. + * @param T_c1_c2: A rigid body transformation taking + * a vector from c2 frame to c1 frame. + * @param z1: feature observation in c1 frame. + * @param z2: feature observation in c2 frame. + * @return p: Computed feature position in c1 frame. + */ + inline void generateInitialGuess( + const Eigen::Isometry3d& T_c1_c2, const Eigen::Vector2d& z1, + const Eigen::Vector2d& z2, Eigen::Vector3d& p) const; + + /* + * @brief checkMotion Check the input camera poses to ensure + * there is enough translation to triangulate the feature + * positon. + * @param cam_states : input camera poses. + * @return True if the translation between the input camera + * poses is sufficient. + */ + inline bool checkMotion( + const CamStateServer& cam_states) const; + + /* + * @brief InitializePosition Intialize the feature position + * based on all current available measurements. + * @param cam_states: A map containing the camera poses with its + * ID as the associated key value. + * @return The computed 3d position is used to set the position + * member variable. Note the resulted position is in world + * frame. + * @return True if the estimated 3d position of the feature + * is valid. + */ + inline bool initializePosition( + const CamStateServer& cam_states); + + + // An unique identifier for the feature. + // In case of long time running, the variable + // type of id is set to FeatureIDType in order + // to avoid duplication. + FeatureIDType id; + + // id for next feature + static FeatureIDType next_id; + + // Store the observations of the features in the + // state_id(key)-image_coordinates(value) manner. + std::map, + Eigen::aligned_allocator< + std::pair > > observations; + + // 3d postion of the feature in the world frame. + Eigen::Vector3d position; + + // A indicator to show if the 3d postion of the feature + // has been initialized or not. + bool is_initialized; + + // Noise for a normalized feature measurement. + static double observation_noise; + + // Optimization configuration for solving the 3d position. + static OptimizationConfig optimization_config; + +}; + +typedef Feature::FeatureIDType FeatureIDType; +typedef std::map, + Eigen::aligned_allocator< + std::pair > > MapServer; + + +void Feature::cost(const Eigen::Isometry3d& T_c0_ci, + const Eigen::Vector3d& x, const Eigen::Vector2d& z, + double& e) const { + // Compute hi1, hi2, and hi3 as Equation (37). + const double& alpha = x(0); + const double& beta = x(1); + const double& rho = x(2); + + Eigen::Vector3d h = T_c0_ci.linear()* + Eigen::Vector3d(alpha, beta, 1.0) + rho*T_c0_ci.translation(); + double& h1 = h(0); + double& h2 = h(1); + double& h3 = h(2); + + // Predict the feature observation in ci frame. + Eigen::Vector2d z_hat(h1/h3, h2/h3); + + // Compute the residual. + e = (z_hat-z).squaredNorm(); + return; +} + +void Feature::jacobian(const Eigen::Isometry3d& T_c0_ci, + const Eigen::Vector3d& x, const Eigen::Vector2d& z, + Eigen::Matrix& J, Eigen::Vector2d& r, + double& w) const { + + // Compute hi1, hi2, and hi3 as Equation (37). + const double& alpha = x(0); + const double& beta = x(1); + const double& rho = x(2); + + Eigen::Vector3d h = T_c0_ci.linear()* + Eigen::Vector3d(alpha, beta, 1.0) + rho*T_c0_ci.translation(); + double& h1 = h(0); + double& h2 = h(1); + double& h3 = h(2); + + // Compute the Jacobian. + Eigen::Matrix3d W; + W.leftCols<2>() = T_c0_ci.linear().leftCols<2>(); + W.rightCols<1>() = T_c0_ci.translation(); + + J.row(0) = 1/h3*W.row(0) - h1/(h3*h3)*W.row(2); + J.row(1) = 1/h3*W.row(1) - h2/(h3*h3)*W.row(2); + + // Compute the residual. + Eigen::Vector2d z_hat(h1/h3, h2/h3); + r = z_hat - z; + + // Compute the weight based on the residual. + double e = r.norm(); + if (e <= optimization_config.huber_epsilon) + w = 1.0; + else + w = optimization_config.huber_epsilon / (2*e); + + return; +} + +void Feature::generateInitialGuess( + const Eigen::Isometry3d& T_c1_c2, const Eigen::Vector2d& z1, + const Eigen::Vector2d& z2, Eigen::Vector3d& p) const { + // Construct a least square problem to solve the depth. + Eigen::Vector3d m = T_c1_c2.linear() * Eigen::Vector3d(z1(0), z1(1), 1.0); + + Eigen::Vector2d A(0.0, 0.0); + A(0) = m(0) - z2(0)*m(2); + A(1) = m(1) - z2(1)*m(2); + + Eigen::Vector2d b(0.0, 0.0); + b(0) = z2(0)*T_c1_c2.translation()(2) - T_c1_c2.translation()(0); + b(1) = z2(1)*T_c1_c2.translation()(2) - T_c1_c2.translation()(1); + + // Solve for the depth. + double depth = (A.transpose() * A).inverse() * A.transpose() * b; + p(0) = z1(0) * depth; + p(1) = z1(1) * depth; + p(2) = depth; + return; +} + +bool Feature::checkMotion( + const CamStateServer& cam_states) const { + + const StateIDType& first_cam_id = observations.begin()->first; + const StateIDType& last_cam_id = (--observations.end())->first; + + Eigen::Isometry3d first_cam_pose; + first_cam_pose.linear() = quaternionToRotation( + cam_states.find(first_cam_id)->second.orientation).transpose(); + first_cam_pose.translation() = + cam_states.find(first_cam_id)->second.position; + + Eigen::Isometry3d last_cam_pose; + last_cam_pose.linear() = quaternionToRotation( + cam_states.find(last_cam_id)->second.orientation).transpose(); + last_cam_pose.translation() = + cam_states.find(last_cam_id)->second.position; + + // Get the direction of the feature when it is first observed. + // This direction is represented in the world frame. + Eigen::Vector3d feature_direction( + observations.begin()->second(0), + observations.begin()->second(1), 1.0); + feature_direction = feature_direction / feature_direction.norm(); + feature_direction = first_cam_pose.linear()*feature_direction; + + // Compute the translation between the first frame + // and the last frame. We assume the first frame and + // the last frame will provide the largest motion to + // speed up the checking process. + Eigen::Vector3d translation = last_cam_pose.translation() - + first_cam_pose.translation(); + double parallel_translation = + translation.transpose()*feature_direction; + Eigen::Vector3d orthogonal_translation = translation - + parallel_translation*feature_direction; + + if (orthogonal_translation.norm() > + optimization_config.translation_threshold) + return true; + else return false; +} + +bool Feature::initializePosition( + const CamStateServer& cam_states) { + // Organize camera poses and feature observations properly. + std::vector > cam_poses(0); + std::vector > measurements(0); + + for (auto& m : observations) { + // TODO: This should be handled properly. Normally, the + // required camera states should all be available in + // the input cam_states buffer. + auto cam_state_iter = cam_states.find(m.first); + if (cam_state_iter == cam_states.end()) continue; + + // Add the measurement. + measurements.push_back(m.second.head<2>()); + measurements.push_back(m.second.tail<2>()); + + // This camera pose will take a vector from this camera frame + // to the world frame. + Eigen::Isometry3d cam0_pose; + cam0_pose.linear() = quaternionToRotation( + cam_state_iter->second.orientation).transpose(); + cam0_pose.translation() = cam_state_iter->second.position; + + Eigen::Isometry3d cam1_pose; + cam1_pose = cam0_pose * CAMState::T_cam0_cam1.inverse(); + + cam_poses.push_back(cam0_pose); + cam_poses.push_back(cam1_pose); + } + + // All camera poses should be modified such that it takes a + // vector from the first camera frame in the buffer to this + // camera frame. + Eigen::Isometry3d T_c0_w = cam_poses[0]; + for (auto& pose : cam_poses) + pose = pose.inverse() * T_c0_w; + + // Generate initial guess + Eigen::Vector3d initial_position(0.0, 0.0, 0.0); + generateInitialGuess(cam_poses[cam_poses.size()-1], measurements[0], + measurements[measurements.size()-1], initial_position); + Eigen::Vector3d solution( + initial_position(0)/initial_position(2), + initial_position(1)/initial_position(2), + 1.0/initial_position(2)); + + // Apply Levenberg-Marquart method to solve for the 3d position. + double lambda = optimization_config.initial_damping; + int inner_loop_cntr = 0; + int outer_loop_cntr = 0; + bool is_cost_reduced = false; + double delta_norm = 0; + + // Compute the initial cost. + double total_cost = 0.0; + for (int i = 0; i < cam_poses.size(); ++i) { + double this_cost = 0.0; + cost(cam_poses[i], solution, measurements[i], this_cost); + total_cost += this_cost; + } + + // Outer loop. + do { + Eigen::Matrix3d A = Eigen::Matrix3d::Zero(); + Eigen::Vector3d b = Eigen::Vector3d::Zero(); + + for (int i = 0; i < cam_poses.size(); ++i) { + Eigen::Matrix J; + Eigen::Vector2d r; + double w; + + jacobian(cam_poses[i], solution, measurements[i], J, r, w); + + if (w == 1) { + A += J.transpose() * J; + b += J.transpose() * r; + } else { + double w_square = w * w; + A += w_square * J.transpose() * J; + b += w_square * J.transpose() * r; + } + } + + // Inner loop. + // Solve for the delta that can reduce the total cost. + do { + Eigen::Matrix3d damper = lambda * Eigen::Matrix3d::Identity(); + Eigen::Vector3d delta = (A+damper).ldlt().solve(b); + Eigen::Vector3d new_solution = solution - delta; + delta_norm = delta.norm(); + + double new_cost = 0.0; + for (int i = 0; i < cam_poses.size(); ++i) { + double this_cost = 0.0; + cost(cam_poses[i], new_solution, measurements[i], this_cost); + new_cost += this_cost; + } + + if (new_cost < total_cost) { + is_cost_reduced = true; + solution = new_solution; + total_cost = new_cost; + lambda = lambda/10 > 1e-10 ? lambda/10 : 1e-10; + } else { + is_cost_reduced = false; + lambda = lambda*10 < 1e12 ? lambda*10 : 1e12; + } + + } while (inner_loop_cntr++ < + optimization_config.inner_loop_max_iteration && !is_cost_reduced); + + inner_loop_cntr = 0; + + } while (outer_loop_cntr++ < + optimization_config.outer_loop_max_iteration && + delta_norm > optimization_config.estimation_precision); + + // Covert the feature position from inverse depth + // representation to its 3d coordinate. + Eigen::Vector3d final_position(solution(0)/solution(2), + solution(1)/solution(2), 1.0/solution(2)); + + // Check if the solution is valid. Make sure the feature + // is in front of every camera frame observing it. + bool is_valid_solution = true; + for (const auto& pose : cam_poses) { + Eigen::Vector3d position = + pose.linear()*final_position + pose.translation(); + if (position(2) <= 0) { + is_valid_solution = false; + break; + } + } + + // Convert the feature position to the world frame. + position = T_c0_w.linear()*final_position + T_c0_w.translation(); + + if (is_valid_solution) + is_initialized = true; + + return is_valid_solution; +} +} // namespace msckf_vio + +#endif // MSCKF_VIO_FEATURE_H diff --git a/include/msckf_vio/image_processor.h b/include/msckf_vio/image_processor.h new file mode 100644 index 0000000..086c5a9 --- /dev/null +++ b/include/msckf_vio/image_processor.h @@ -0,0 +1,398 @@ +/* + * COPYRIGHT AND PERMISSION NOTICE + * Penn Software MSCKF_VIO + * Copyright (C) 2017 The Trustees of the University of Pennsylvania + * All rights reserved. + */ + +#ifndef MSCKF_VIO_IMAGE_PROCESSOR_H +#define MSCKF_VIO_IMAGE_PROCESSOR_H + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +namespace msckf_vio { + +/* + * @brief ImageProcessor Detects and tracks features + * in image sequences. + */ +class ImageProcessor { +public: + // Constructor + ImageProcessor(ros::NodeHandle& n); + // Disable copy and assign constructors. + ImageProcessor(const ImageProcessor&) = delete; + ImageProcessor operator=(const ImageProcessor&) = delete; + + // Destructor + ~ImageProcessor(); + + // Initialize the object. + bool initialize(); + + typedef boost::shared_ptr Ptr; + typedef boost::shared_ptr ConstPtr; + +private: + + /* + * @brief ProcessorConfig Configuration parameters for + * feature detection and tracking. + */ + struct ProcessorConfig { + int grid_row; + int grid_col; + int grid_min_feature_num; + int grid_max_feature_num; + + int pyramid_levels; + int patch_size; + int fast_threshold; + int max_iteration; + double track_precision; + double ransac_threshold; + double stereo_threshold; + }; + + /* + * @brief FeatureIDType An alias for unsigned long long int. + */ + typedef unsigned long long int FeatureIDType; + + /* + * @brief FeatureMetaData Contains necessary information + * of a feature for easy access. + */ + struct FeatureMetaData { + FeatureIDType id; + float response; + int lifetime; + cv::Point2f cam0_point; + cv::Point2f cam1_point; + }; + + /* + * @brief GridFeatures Organize features based on the grid + * they belong to. Note that the key is encoded by the + * grid index. + */ + typedef std::map > GridFeatures; + + /* + * @brief keyPointCompareByResponse + * Compare two keypoints based on the response. + */ + static bool keyPointCompareByResponse( + const cv::KeyPoint& pt1, + const cv::KeyPoint& pt2) { + // Keypoint with higher response will be at the + // beginning of the vector. + return pt1.response > pt2.response; + } + /* + * @brief featureCompareByResponse + * Compare two features based on the response. + */ + static bool featureCompareByResponse( + const FeatureMetaData& f1, + const FeatureMetaData& f2) { + // Features with higher response will be at the + // beginning of the vector. + return f1.response > f2.response; + } + /* + * @brief featureCompareByLifetime + * Compare two features based on the lifetime. + */ + static bool featureCompareByLifetime( + const FeatureMetaData& f1, + const FeatureMetaData& f2) { + // Features with longer lifetime will be at the + // beginning of the vector. + return f1.lifetime > f2.lifetime; + } + + /* + * @brief loadParameters + * Load parameters from the parameter server. + */ + bool loadParameters(); + + /* + * @brief createRosIO + * Create ros publisher and subscirbers. + */ + bool createRosIO(); + + /* + * @brief stereoCallback + * Callback function for the stereo images. + * @param cam0_img left image. + * @param cam1_img right image. + */ + void stereoCallback( + const sensor_msgs::ImageConstPtr& cam0_img, + const sensor_msgs::ImageConstPtr& cam1_img); + + /* + * @brief imuCallback + * Callback function for the imu message. + * @param msg IMU msg. + */ + void imuCallback(const sensor_msgs::ImuConstPtr& msg); + + /* + * @initializeFirstFrame + * Initialize the image processing sequence, which is + * bascially detect new features on the first set of + * stereo images. + */ + void initializeFirstFrame(); + + /* + * @brief trackFeatures + * Tracker features on the newly received stereo images. + */ + void trackFeatures(); + + /* + * @addNewFeatures + * Detect new features on the image to ensure that the + * features are uniformly distributed on the image. + */ + void addNewFeatures(); + + /* + * @brief pruneGridFeatures + * Remove some of the features of a grid in case there are + * too many features inside of that grid, which ensures the + * number of features within each grid is bounded. + */ + void pruneGridFeatures(); + + /* + * @brief publish + * Publish the features on the current image including + * both the tracked and newly detected ones. + */ + void publish(); + + /* + * @brief drawFeaturesMono + * Draw tracked and newly detected features on the left + * image only. + */ + void drawFeaturesMono(); + /* + * @brief drawFeaturesStereo + * Draw tracked and newly detected features on the + * stereo images. + */ + void drawFeaturesStereo(); + + /* + * @brief createImagePyramids + * Create image pyramids used for klt tracking. + */ + void createImagePyramids(); + + /* + * @brief integrateImuData Integrates the IMU gyro readings + * between the two consecutive images, which is used for + * both tracking prediction and 2-point RANSAC. + * @return cam0_R_p_c: a rotation matrix which takes a vector + * from previous cam0 frame to current cam0 frame. + * @return cam1_R_p_c: a rotation matrix which takes a vector + * from previous cam1 frame to current cam1 frame. + */ + void integrateImuData(cv::Matx33f& cam0_R_p_c, + cv::Matx33f& cam1_R_p_c); + + /* + * @brief predictFeatureTracking Compensates the rotation + * between consecutive camera frames so that feature + * tracking would be more robust and fast. + * @param input_pts: features in the previous image to be tracked. + * @param R_p_c: a rotation matrix takes a vector in the previous + * camera frame to the current camera frame. + * @param intrinsics: intrinsic matrix of the camera. + * @return compensated_pts: predicted locations of the features + * in the current image based on the provided rotation. + * + * Note that the input and output points are of pixel coordinates. + */ + void predictFeatureTracking( + const std::vector& input_pts, + const cv::Matx33f& R_p_c, + const cv::Vec4d& intrinsics, + std::vector& compenstated_pts); + + /* + * @brief twoPointRansac Applies two point ransac algorithm + * to mark the inliers in the input set. + * @param pts1: first set of points. + * @param pts2: second set of points. + * @param R_p_c: a rotation matrix takes a vector in the previous + * camera frame to the current camera frame. + * @param intrinsics: intrinsics of the camera. + * @param distortion_model: distortion model of the camera. + * @param distortion_coeffs: distortion coefficients. + * @param inlier_error: acceptable error to be considered as an inlier. + * @param success_probability: the required probability of success. + * @return inlier_flag: 1 for inliers and 0 for outliers. + */ + void twoPointRansac( + const std::vector& pts1, + const std::vector& pts2, + const cv::Matx33f& R_p_c, + const cv::Vec4d& intrinsics, + const std::string& distortion_model, + const cv::Vec4d& distortion_coeffs, + const double& inlier_error, + const double& success_probability, + std::vector& inlier_markers); + void undistortPoints( + const std::vector& pts_in, + const cv::Vec4d& intrinsics, + const std::string& distortion_model, + const cv::Vec4d& distortion_coeffs, + std::vector& pts_out, + const cv::Matx33d &rectification_matrix = cv::Matx33d::eye(), + const cv::Vec4d &new_intrinsics = cv::Vec4d(1,1,0,0)); + void rescalePoints( + std::vector& pts1, + std::vector& pts2, + float& scaling_factor); + std::vector distortPoints( + const std::vector& pts_in, + const cv::Vec4d& intrinsics, + const std::string& distortion_model, + const cv::Vec4d& distortion_coeffs); + + /* + * @brief stereoMatch Matches features with stereo image pairs. + * @param cam0_points: points in the primary image. + * @return cam1_points: points in the secondary image. + * @return inlier_markers: 1 if the match is valid, 0 otherwise. + */ + void stereoMatch( + const std::vector& cam0_points, + std::vector& cam1_points, + std::vector& inlier_markers); + + /* + * @brief removeUnmarkedElements Remove the unmarked elements + * within a vector. + * @param raw_vec: vector with outliers. + * @param markers: 0 will represent a outlier, 1 will be an inlier. + * @return refined_vec: a vector without outliers. + * + * Note that the order of the inliers in the raw_vec is perserved + * in the refined_vec. + */ + template + void removeUnmarkedElements( + const std::vector& raw_vec, + const std::vector& markers, + std::vector& refined_vec) { + if (raw_vec.size() != markers.size()) { + ROS_WARN("The input size of raw_vec(%lu) and markers(%lu) does not match...", + raw_vec.size(), markers.size()); + } + for (int i = 0; i < markers.size(); ++i) { + if (markers[i] == 0) continue; + refined_vec.push_back(raw_vec[i]); + } + return; + } + + // Indicate if this is the first image message. + bool is_first_img; + + // ID for the next new feature. + FeatureIDType next_feature_id; + + // Feature detector + ProcessorConfig processor_config; + cv::Ptr detector_ptr; + + // IMU message buffer. + std::vector imu_msg_buffer; + + // Camera calibration parameters + std::string cam0_distortion_model; + cv::Vec2i cam0_resolution; + cv::Vec4d cam0_intrinsics; + cv::Vec4d cam0_distortion_coeffs; + + std::string cam1_distortion_model; + cv::Vec2i cam1_resolution; + cv::Vec4d cam1_intrinsics; + cv::Vec4d cam1_distortion_coeffs; + + // Take a vector from cam0 frame to the IMU frame. + cv::Matx33d R_cam0_imu; + cv::Vec3d t_cam0_imu; + // Take a vector from cam1 frame to the IMU frame. + cv::Matx33d R_cam1_imu; + cv::Vec3d t_cam1_imu; + + // Previous and current images + cv_bridge::CvImageConstPtr cam0_prev_img_ptr; + cv_bridge::CvImageConstPtr cam0_curr_img_ptr; + cv_bridge::CvImageConstPtr cam1_curr_img_ptr; + + // Pyramids for previous and current image + std::vector prev_cam0_pyramid_; + std::vector curr_cam0_pyramid_; + std::vector curr_cam1_pyramid_; + + // Features in the previous and current image. + boost::shared_ptr prev_features_ptr; + boost::shared_ptr curr_features_ptr; + + // Number of features after each outlier removal step. + int before_tracking; + int after_tracking; + int after_matching; + int after_ransac; + + // Ros node handle + ros::NodeHandle nh; + + // Subscribers and publishers. + message_filters::Subscriber< + sensor_msgs::Image> cam0_img_sub; + message_filters::Subscriber< + sensor_msgs::Image> cam1_img_sub; + message_filters::TimeSynchronizer< + sensor_msgs::Image, sensor_msgs::Image> stereo_sub; + ros::Subscriber imu_sub; + ros::Publisher feature_pub; + ros::Publisher tracking_info_pub; + image_transport::Publisher debug_stereo_pub; + + // Debugging + std::map feature_lifetime; + void updateFeatureLifetime(); + void featureLifetimeStatistics(); +}; + +typedef ImageProcessor::Ptr ImageProcessorPtr; +typedef ImageProcessor::ConstPtr ImageProcessorConstPtr; + +} // end namespace msckf_vio + +#endif diff --git a/include/msckf_vio/image_processor_nodelet.h b/include/msckf_vio/image_processor_nodelet.h new file mode 100644 index 0000000..ebe1c91 --- /dev/null +++ b/include/msckf_vio/image_processor_nodelet.h @@ -0,0 +1,28 @@ +/* + * COPYRIGHT AND PERMISSION NOTICE + * Penn Software MSCKF_VIO + * Copyright (C) 2017 The Trustees of the University of Pennsylvania + * All rights reserved. + */ + +#ifndef IMAGE_PROCESSOR_NODELET_H +#define IMAGE_PROCESSOR_NODELET_H + +#include +#include +#include + +namespace msckf_vio { +class ImageProcessorNodelet : public nodelet::Nodelet { +public: + ImageProcessorNodelet() { return; } + ~ImageProcessorNodelet() { return; } + +private: + virtual void onInit(); + ImageProcessorPtr img_processor_ptr; +}; +} // end namespace msckf_vio + +#endif + diff --git a/include/msckf_vio/imu_state.h b/include/msckf_vio/imu_state.h new file mode 100644 index 0000000..7dd148b --- /dev/null +++ b/include/msckf_vio/imu_state.h @@ -0,0 +1,110 @@ +/* + * COPYRIGHT AND PERMISSION NOTICE + * Penn Software MSCKF_VIO + * Copyright (C) 2017 The Trustees of the University of Pennsylvania + * All rights reserved. + */ + +#ifndef MSCKF_VIO_IMU_STATE_H +#define MSCKF_VIO_IMU_STATE_H + +#include +#include +#include +#include + +#define GRAVITY_ACCELERATION 9.81 + +namespace msckf_vio { + +/* + * @brief IMUState State for IMU + */ +struct IMUState { + EIGEN_MAKE_ALIGNED_OPERATOR_NEW + typedef long long int StateIDType; + + // An unique identifier for the IMU state. + StateIDType id; + + // id for next IMU state + static StateIDType next_id; + + // Time when the state is recorded + double time; + + // Orientation + // Take a vector from the world frame to + // the IMU (body) frame. + Eigen::Vector4d orientation; + + // Position of the IMU (body) frame + // in the world frame. + Eigen::Vector3d position; + + // Velocity of the IMU (body) frame + // in the world frame. + Eigen::Vector3d velocity; + + // Bias for measured angular velocity + // and acceleration. + Eigen::Vector3d gyro_bias; + Eigen::Vector3d acc_bias; + + // Transformation between the IMU and the + // left camera (cam0) + Eigen::Matrix3d R_imu_cam0; + Eigen::Vector3d t_cam0_imu; + + // These three variables should have the same physical + // interpretation with `orientation`, `position`, and + // `velocity`. There three variables are used to modify + // the transition matrices to make the observability matrix + // have proper null space. + Eigen::Vector4d orientation_null; + Eigen::Vector3d position_null; + Eigen::Vector3d velocity_null; + + // Process noise + static double gyro_noise; + static double acc_noise; + static double gyro_bias_noise; + static double acc_bias_noise; + + // Gravity vector in the world frame + static Eigen::Vector3d gravity; + + // Transformation offset from the IMU frame to + // the body frame. The transformation takes a + // vector from the IMU frame to the body frame. + // The z axis of the body frame should point upwards. + // Normally, this transform should be identity. + static Eigen::Isometry3d T_imu_body; + + IMUState(): id(0), time(0), + orientation(Eigen::Vector4d(0, 0, 0, 1)), + position(Eigen::Vector3d::Zero()), + velocity(Eigen::Vector3d::Zero()), + gyro_bias(Eigen::Vector3d::Zero()), + acc_bias(Eigen::Vector3d::Zero()), + orientation_null(Eigen::Vector4d(0, 0, 0, 1)), + position_null(Eigen::Vector3d::Zero()), + velocity_null(Eigen::Vector3d::Zero()) {} + + IMUState(const StateIDType& new_id): id(new_id), time(0), + orientation(Eigen::Vector4d(0, 0, 0, 1)), + position(Eigen::Vector3d::Zero()), + velocity(Eigen::Vector3d::Zero()), + gyro_bias(Eigen::Vector3d::Zero()), + acc_bias(Eigen::Vector3d::Zero()), + orientation_null(Eigen::Vector4d(0, 0, 0, 1)), + position_null(Eigen::Vector3d::Zero()), + velocity_null(Eigen::Vector3d::Zero()) {} + +}; + +typedef IMUState::StateIDType StateIDType; + +} // namespace msckf_vio + +#endif // MSCKF_VIO_IMU_STATE_H diff --git a/include/msckf_vio/math_utils.hpp b/include/msckf_vio/math_utils.hpp new file mode 100644 index 0000000..163761e --- /dev/null +++ b/include/msckf_vio/math_utils.hpp @@ -0,0 +1,163 @@ +/* + * COPYRIGHT AND PERMISSION NOTICE + * Penn Software MSCKF_VIO + * Copyright (C) 2017 The Trustees of the University of Pennsylvania + * All rights reserved. + */ + +#ifndef MSCKF_VIO_MATH_UTILS_HPP +#define MSCKF_VIO_MATH_UTILS_HPP + +#include +#include + +namespace msckf_vio { + +/* + * @brief Create a skew-symmetric matrix from a 3-element vector. + * @note Performs the operation: + * w -> [ 0 -w3 w2] + * [ w3 0 -w1] + * [-w2 w1 0] + */ +inline Eigen::Matrix3d skewSymmetric(const Eigen::Vector3d& w) { + Eigen::Matrix3d w_hat; + w_hat(0, 0) = 0; + w_hat(0, 1) = -w(2); + w_hat(0, 2) = w(1); + w_hat(1, 0) = w(2); + w_hat(1, 1) = 0; + w_hat(1, 2) = -w(0); + w_hat(2, 0) = -w(1); + w_hat(2, 1) = w(0); + w_hat(2, 2) = 0; + return w_hat; +} + +/* + * @brief Normalize the given quaternion to unit quaternion. + */ +inline void quaternionNormalize(Eigen::Vector4d& q) { + double norm = q.norm(); + q = q / norm; + return; +} + +/* + * @brief Perform q1 * q2 + */ +inline Eigen::Vector4d quaternionMultiplication( + const Eigen::Vector4d& q1, + const Eigen::Vector4d& q2) { + Eigen::Matrix4d L; + L(0, 0) = q1(3); L(0, 1) = q1(2); L(0, 2) = -q1(1); L(0, 3) = q1(0); + L(1, 0) = -q1(2); L(1, 1) = q1(3); L(1, 2) = q1(0); L(1, 3) = q1(1); + L(2, 0) = q1(1); L(2, 1) = -q1(0); L(2, 2) = q1(3); L(2, 3) = q1(2); + L(3, 0) = -q1(0); L(3, 1) = -q1(1); L(3, 2) = -q1(2); L(3, 3) = q1(3); + + Eigen::Vector4d q = L * q2; + quaternionNormalize(q); + return q; +} + +/* + * @brief Convert the vector part of a quaternion to a + * full quaternion. + * @note This function is useful to convert delta quaternion + * which is usually a 3x1 vector to a full quaternion. + * For more details, check Equation (238) and (239) in + * "Indirect Kalman Filter for 3D Attitude Estimation: + * A Tutorial for quaternion Algebra". + */ +inline Eigen::Vector4d smallAngleQuaternion( + const Eigen::Vector3d& dtheta) { + + Eigen::Vector3d dq = dtheta / 2.0; + Eigen::Vector4d q; + double dq_square_norm = dq.squaredNorm(); + + if (dq_square_norm <= 1) { + q.head<3>() = dq; + q(3) = std::sqrt(1-dq_square_norm); + } else { + q.head<3>() = dq; + q(3) = 1; + q = q / std::sqrt(1+dq_square_norm); + } + + return q; +} + +/* + * @brief Convert a quaternion to the corresponding rotation matrix + * @note Pay attention to the convention used. The function follows the + * conversion in "Indirect Kalman Filter for 3D Attitude Estimation: + * A Tutorial for Quaternion Algebra", Equation (78). + * + * The input quaternion should be in the form + * [q1, q2, q3, q4(scalar)]^T + */ +inline Eigen::Matrix3d quaternionToRotation( + const Eigen::Vector4d& q) { + const Eigen::Vector3d& q_vec = q.block(0, 0, 3, 1); + const double& q4 = q(3); + Eigen::Matrix3d R = + (2*q4*q4-1)*Eigen::Matrix3d::Identity() - + 2*q4*skewSymmetric(q_vec) + + 2*q_vec*q_vec.transpose(); + //TODO: Is it necessary to use the approximation equation + // (Equation (87)) when the rotation angle is small? + return R; +} + +/* + * @brief Convert a rotation matrix to a quaternion. + * @note Pay attention to the convention used. The function follows the + * conversion in "Indirect Kalman Filter for 3D Attitude Estimation: + * A Tutorial for Quaternion Algebra", Equation (78). + * + * The input quaternion should be in the form + * [q1, q2, q3, q4(scalar)]^T + */ +inline Eigen::Vector4d rotationToQuaternion( + const Eigen::Matrix3d& R) { + Eigen::Vector4d score; + score(0) = R(0, 0); + score(1) = R(1, 1); + score(2) = R(2, 2); + score(3) = R.trace(); + + int max_row = 0, max_col = 0; + score.maxCoeff(&max_row, &max_col); + + Eigen::Vector4d q = Eigen::Vector4d::Zero(); + if (max_row == 0) { + q(0) = std::sqrt(1+2*R(0, 0)-R.trace()) / 2.0; + q(1) = (R(0, 1)+R(1, 0)) / (4*q(0)); + q(2) = (R(0, 2)+R(2, 0)) / (4*q(0)); + q(3) = (R(1, 2)-R(2, 1)) / (4*q(0)); + } else if (max_row == 1) { + q(1) = std::sqrt(1+2*R(1, 1)-R.trace()) / 2.0; + q(0) = (R(0, 1)+R(1, 0)) / (4*q(1)); + q(2) = (R(1, 2)+R(2, 1)) / (4*q(1)); + q(3) = (R(2, 0)-R(0, 2)) / (4*q(1)); + } else if (max_row == 2) { + q(2) = std::sqrt(1+2*R(2, 2)-R.trace()) / 2.0; + q(0) = (R(0, 2)+R(2, 0)) / (4*q(2)); + q(1) = (R(1, 2)+R(2, 1)) / (4*q(2)); + q(3) = (R(0, 1)-R(1, 0)) / (4*q(2)); + } else { + q(3) = std::sqrt(1+R.trace()) / 2.0; + q(0) = (R(1, 2)-R(2, 1)) / (4*q(3)); + q(1) = (R(2, 0)-R(0, 2)) / (4*q(3)); + q(2) = (R(0, 1)-R(1, 0)) / (4*q(3)); + } + + if (q(3) < 0) q = -q; + quaternionNormalize(q); + return q; +} + +} // end namespace msckf_vio + +#endif // MSCKF_VIO_MATH_UTILS_HPP diff --git a/include/msckf_vio/msckf_vio.h b/include/msckf_vio/msckf_vio.h new file mode 100644 index 0000000..8077acf --- /dev/null +++ b/include/msckf_vio/msckf_vio.h @@ -0,0 +1,242 @@ +/* + * COPYRIGHT AND PERMISSION NOTICE + * Penn Software MSCKF_VIO + * Copyright (C) 2017 The Trustees of the University of Pennsylvania + * All rights reserved. + */ + +#ifndef MSCKF_VIO_H +#define MSCKF_VIO_H + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include "imu_state.h" +#include "cam_state.h" +#include "feature.hpp" +#include + +namespace msckf_vio { +/* + * @brief MsckfVio Implements the algorithm in + * Anatasios I. Mourikis, and Stergios I. Roumeliotis, + * "A Multi-State Constraint Kalman Filter for Vision-aided + * Inertial Navigation", + * http://www.ee.ucr.edu/~mourikis/tech_reports/TR_MSCKF.pdf + */ +class MsckfVio { + public: + EIGEN_MAKE_ALIGNED_OPERATOR_NEW + + // Constructor + MsckfVio(ros::NodeHandle& pnh); + // Disable copy and assign constructor + MsckfVio(const MsckfVio&) = delete; + MsckfVio operator=(const MsckfVio&) = delete; + + // Destructor + ~MsckfVio() {} + + /* + * @brief initialize Initialize the VIO. + */ + bool initialize(); + + /* + * @brief reset Resets the VIO to initial status. + */ + void reset(); + + typedef boost::shared_ptr Ptr; + typedef boost::shared_ptr ConstPtr; + + private: + /* + * @brief StateServer Store one IMU states and several + * camera states for constructing measurement + * model. + */ + struct StateServer { + IMUState imu_state; + CamStateServer cam_states; + + // State covariance matrix + Eigen::MatrixXd state_cov; + Eigen::Matrix continuous_noise_cov; + }; + + + /* + * @brief loadParameters + * Load parameters from the parameter server. + */ + bool loadParameters(); + + /* + * @brief createRosIO + * Create ros publisher and subscirbers. + */ + bool createRosIO(); + + /* + * @brief imuCallback + * Callback function for the imu message. + * @param msg IMU msg. + */ + void imuCallback(const sensor_msgs::ImuConstPtr& msg); + + /* + * @brief featureCallback + * Callback function for feature measurements. + * @param msg Stereo feature measurements. + */ + void featureCallback(const CameraMeasurementConstPtr& msg); + + /* + * @brief publish Publish the results of VIO. + * @param time The time stamp of output msgs. + */ + void publish(const ros::Time& time); + + /* + * @brief initializegravityAndBias + * Initialize the IMU bias and initial orientation + * based on the first few IMU readings. + */ + void initializeGravityAndBias(); + + /* + * @biref resetCallback + * Callback function for the reset service. + * Note that this is NOT anytime-reset. This function should + * only be called before the sensor suite starts moving. + * e.g. while the robot is still on the ground. + */ + bool resetCallback(std_srvs::Trigger::Request& req, + std_srvs::Trigger::Response& res); + + // Filter related functions + // Propogate the state + void batchImuProcessing( + const double& time_bound); + void processModel(const double& time, + const Eigen::Vector3d& m_gyro, + const Eigen::Vector3d& m_acc); + void predictNewState(const double& dt, + const Eigen::Vector3d& gyro, + const Eigen::Vector3d& acc); + + // Measurement update + void stateAugmentation(const double& time); + void addFeatureObservations(const CameraMeasurementConstPtr& msg); + // This function is used to compute the measurement Jacobian + // for a single feature observed at a single camera frame. + void measurementJacobian(const StateIDType& cam_state_id, + const FeatureIDType& feature_id, + Eigen::Matrix& H_x, + Eigen::Matrix& H_f, + Eigen::Vector4d& r); + // This function computes the Jacobian of all measurements viewed + // in the given camera states of this feature. + void featureJacobian(const FeatureIDType& feature_id, + const std::vector& cam_state_ids, + Eigen::MatrixXd& H_x, Eigen::VectorXd& r); + void measurementUpdate(const Eigen::MatrixXd& H, + const Eigen::VectorXd& r); + bool gatingTest(const Eigen::MatrixXd& H, + const Eigen::VectorXd&r, const int& dof); + void removeLostFeatures(); + void findRedundantCamStates( + std::vector& rm_cam_state_ids); + void pruneCamStateBuffer(); + // Reset the system online if the uncertainty is too large. + void onlineReset(); + + // Chi squared test table. + static std::map chi_squared_test_table; + + // State vector + StateServer state_server; + // Maximum number of camera states + int max_cam_state_size; + + // Features used + MapServer map_server; + + // IMU data buffer + // This is buffer is used to handle the unsynchronization or + // transfer delay between IMU and Image messages. + std::vector imu_msg_buffer; + + // Indicate if the gravity vector is set. + bool is_gravity_set; + + // Indicate if the received image is the first one. The + // system will start after receiving the first image. + bool is_first_img; + + // The position uncertainty threshold is used to determine + // when to reset the system online. Otherwise, the ever- + // increaseing uncertainty will make the estimation unstable. + // Note this online reset will be some dead-reckoning. + // Set this threshold to nonpositive to disable online reset. + double position_std_threshold; + + // Tracking rate + double tracking_rate; + + // Threshold for determine keyframes + double translation_threshold; + double rotation_threshold; + double tracking_rate_threshold; + + // Ros node handle + ros::NodeHandle nh; + + // Subscribers and publishers + ros::Subscriber imu_sub; + ros::Subscriber feature_sub; + ros::Publisher odom_pub; + ros::Publisher feature_pub; + tf::TransformBroadcaster tf_pub; + ros::ServiceServer reset_srv; + + // Frame id + std::string fixed_frame_id; + std::string child_frame_id; + + // Whether to publish tf or not. + bool publish_tf; + + // Framte rate of the stereo images. This variable is + // only used to determine the timing threshold of + // each iteration of the filter. + double frame_rate; + + // Debugging variables and functions + void mocapOdomCallback( + const nav_msgs::OdometryConstPtr& msg); + + ros::Subscriber mocap_odom_sub; + ros::Publisher mocap_odom_pub; + geometry_msgs::TransformStamped raw_mocap_odom_msg; + Eigen::Isometry3d mocap_initial_frame; +}; + +typedef MsckfVio::Ptr MsckfVioPtr; +typedef MsckfVio::ConstPtr MsckfVioConstPtr; + +} // namespace msckf_vio + +#endif diff --git a/include/msckf_vio/msckf_vio_nodelet.h b/include/msckf_vio/msckf_vio_nodelet.h new file mode 100644 index 0000000..75d9ca2 --- /dev/null +++ b/include/msckf_vio/msckf_vio_nodelet.h @@ -0,0 +1,28 @@ +/* + * COPYRIGHT AND PERMISSION NOTICE + * Penn Software MSCKF_VIO + * Copyright (C) 2017 The Trustees of the University of Pennsylvania + * All rights reserved. + */ + +#ifndef MSCKF_VIO_NODELET_H +#define MSCKF_VIO_NODELET_H + +#include +#include +#include + +namespace msckf_vio { +class MsckfVioNodelet : public nodelet::Nodelet { +public: + MsckfVioNodelet() { return; } + ~MsckfVioNodelet() { return; } + +private: + virtual void onInit(); + MsckfVioPtr msckf_vio_ptr; +}; +} // end namespace msckf_vio + +#endif + diff --git a/launch/image_processor_euroc.launch b/launch/image_processor_euroc.launch new file mode 100644 index 0000000..f728112 --- /dev/null +++ b/launch/image_processor_euroc.launch @@ -0,0 +1,33 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/launch/image_processor_fla.launch b/launch/image_processor_fla.launch new file mode 100644 index 0000000..75c634e --- /dev/null +++ b/launch/image_processor_fla.launch @@ -0,0 +1,33 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/launch/msckf_vio_euroc.launch b/launch/msckf_vio_euroc.launch new file mode 100644 index 0000000..bfbf7eb --- /dev/null +++ b/launch/msckf_vio_euroc.launch @@ -0,0 +1,61 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/launch/msckf_vio_fla.launch b/launch/msckf_vio_fla.launch new file mode 100644 index 0000000..5c5c873 --- /dev/null +++ b/launch/msckf_vio_fla.launch @@ -0,0 +1,61 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/launch/reset.launch b/launch/reset.launch new file mode 100644 index 0000000..87dfba8 --- /dev/null +++ b/launch/reset.launch @@ -0,0 +1,7 @@ + + + + + + diff --git a/msg/CameraMeasurement.msg b/msg/CameraMeasurement.msg new file mode 100644 index 0000000..29a69bf --- /dev/null +++ b/msg/CameraMeasurement.msg @@ -0,0 +1,4 @@ +std_msgs/Header header +# All features on the current image, +# including tracked ones and newly detected ones. +FeatureMeasurement[] features diff --git a/msg/FeatureMeasurement.msg b/msg/FeatureMeasurement.msg new file mode 100644 index 0000000..b688a64 --- /dev/null +++ b/msg/FeatureMeasurement.msg @@ -0,0 +1,6 @@ +uint64 id +# Normalized feature coordinates (with identity intrinsic matrix) +float64 u0 # horizontal coordinate in cam0 +float64 v0 # vertical coordinate in cam0 +float64 u1 # horizontal coordinate in cam0 +float64 v1 # vertical coordinate in cam0 diff --git a/msg/TrackingInfo.msg b/msg/TrackingInfo.msg new file mode 100644 index 0000000..24674ca --- /dev/null +++ b/msg/TrackingInfo.msg @@ -0,0 +1,7 @@ +std_msgs/Header header + +# Number of features after each outlier removal step. +int16 before_tracking +int16 after_tracking +int16 after_matching +int16 after_ransac diff --git a/nodelets.xml b/nodelets.xml new file mode 100644 index 0000000..9f1025a --- /dev/null +++ b/nodelets.xml @@ -0,0 +1,20 @@ + + + + Multi-State contraint Kalman filter for vision- + aided inertial navigation with observability constain. + + + + + + + + Detect and track features in image sequence. + + + diff --git a/package.xml b/package.xml new file mode 100644 index 0000000..9200202 --- /dev/null +++ b/package.xml @@ -0,0 +1,43 @@ + + + + msckf_vio + 0.0.1 + Multi-State Constraint Kalman Filter for Vision-aided Inertial Navigation + + Ke Sun + Penn Software License + + Ke Sun + Kartik Mohta + + catkin + + roscpp + std_msgs + tf + nav_msgs + sensor_msgs + geometry_msgs + eigen_conversions + tf_conversions + random_numbers + message_generation + message_runtime + nodelet + image_transport + cv_bridge + message_filters + pcl_conversions + pcl_ros + std_srvs + + libpcl-all-dev + libpcl-all + suitesparse + + + + + + diff --git a/rviz/rviz_euroc_config.rviz b/rviz/rviz_euroc_config.rviz new file mode 100644 index 0000000..8fb216d --- /dev/null +++ b/rviz/rviz_euroc_config.rviz @@ -0,0 +1,319 @@ +Panels: + - Class: rviz/Displays + Help Height: 78 + Name: Displays + Property Tree Widget: + Expanded: + - /TF1/Frames1 + - /VIO1/Shape1 + - /Ground Truth1 + - /Ground Truth1/Shape1 + Splitter Ratio: 0.5 + Tree Height: 843 + - Class: rviz/Selection + Name: Selection + - Class: rviz/Tool Properties + Expanded: + - /2D Pose Estimate1 + - /2D Nav Goal1 + - /Publish Point1 + Name: Tool Properties + Splitter Ratio: 0.588679016 + - Class: rviz/Views + Expanded: + - /Current View1 + Name: Views + Splitter Ratio: 0.5 + - Class: rviz/Time + Experimental: false + Name: Time + SyncMode: 0 + SyncSource: VIO Points +Visualization Manager: + Class: "" + Displays: + - Alpha: 0.100000001 + Cell Size: 1 + Class: rviz/Grid + Color: 160; 160; 164 + Enabled: true + Line Style: + Line Width: 0.0299999993 + Value: Lines + Name: 1m Grid + Normal Cell Count: 0 + Offset: + X: 0 + Y: 0 + Z: 0 + Plane: XY + Plane Cell Count: 20 + Reference Frame: + Value: true + - Alpha: 0.699999988 + Cell Size: 5 + Class: rviz/Grid + Color: 160; 160; 164 + Enabled: true + Line Style: + Line Width: 0.0299999993 + Value: Lines + Name: 5m Grid + Normal Cell Count: 0 + Offset: + X: 0 + Y: 0 + Z: 0 + Plane: XY + Plane Cell Count: 4 + Reference Frame: + Value: true + - Class: rviz/TF + Enabled: true + Frame Timeout: 1000 + Frames: + All Enabled: false + odom: + Value: true + world: + Value: true + Marker Scale: 4 + Name: TF + Show Arrows: false + Show Axes: true + Show Names: true + Tree: + world: + {} + Update Interval: 0 + Value: true + - Angle Tolerance: 0 + Class: rviz/Odometry + Covariance: + Orientation: + Alpha: 0.5 + Color: 255; 255; 127 + Color Style: Unique + Frame: Local + Offset: 1 + Scale: 1 + Value: false + Position: + Alpha: 0.300000012 + Color: 204; 51; 204 + Scale: 1 + Value: true + Value: false + Enabled: false + Keep: 200 + Name: VIO + Position Tolerance: 0 + Shape: + Alpha: 1 + Axes Length: 1 + Axes Radius: 0.100000001 + Color: 255; 25; 0 + Head Length: 0.100000001 + Head Radius: 0.100000001 + Shaft Length: 0.5 + Shaft Radius: 0.0500000007 + Value: Arrow + Topic: /firefly_sbx/vio/odom + Unreliable: false + Value: false + - Angle Tolerance: 0 + Class: rviz/Odometry + Covariance: + Orientation: + Alpha: 0.5 + Color: 255; 255; 127 + Color Style: Unique + Frame: Local + Offset: 1 + Scale: 1 + Value: false + Position: + Alpha: 0.300000012 + Color: 204; 51; 204 + Scale: 1 + Value: true + Value: true + Enabled: true + Keep: 1 + Name: Covariance + Position Tolerance: 0 + Shape: + Alpha: 1 + Axes Length: 1 + Axes Radius: 0.100000001 + Color: 255; 25; 0 + Head Length: 0.200000003 + Head Radius: 0.200000003 + Shaft Length: 1 + Shaft Radius: 0.100000001 + Value: Arrow + Topic: /firefly_sbx/vio/odom + Unreliable: false + Value: true + - Alpha: 1 + Autocompute Intensity Bounds: true + Autocompute Value Bounds: + Max Value: 3 + Min Value: -2 + Value: false + Axis: Z + Channel Name: z + Class: rviz/PointCloud2 + Color: 255; 255; 255 + Color Transformer: AxisColor + Decay Time: 30 + Enabled: true + Invert Rainbow: false + Max Color: 255; 255; 255 + Max Intensity: 26.7646065 + Min Color: 0; 0; 0 + Min Intensity: -4.65562725 + Name: VIO Points + Position Transformer: XYZ + Queue Size: 10 + Selectable: true + Size (Pixels): 3 + Size (m): 1 + Style: Points + Topic: /firefly_sbx/vio/feature_point_cloud + Unreliable: false + Use Fixed Frame: true + Use rainbow: true + Value: true + - Class: rviz/Image + Enabled: true + Image Topic: /firefly_sbx/image_processor/debug_stereo_image + Max Value: 1 + Median window: 5 + Min Value: 0 + Name: Stereo Features + Normalize Range: true + Queue Size: 2 + Transport Hint: raw + Unreliable: false + Value: true + - Angle Tolerance: 0 + Class: rviz/Odometry + Covariance: + Orientation: + Alpha: 0.5 + Color: 255; 255; 127 + Color Style: Unique + Frame: Local + Offset: 1 + Scale: 1 + Value: true + Position: + Alpha: 0.300000012 + Color: 204; 51; 204 + Scale: 1 + Value: true + Value: false + Enabled: true + Keep: 1 + Name: Ground Truth + Position Tolerance: 0 + Shape: + Alpha: 1 + Axes Length: 1 + Axes Radius: 0.100000001 + Color: 0; 170; 255 + Head Length: 0.200000003 + Head Radius: 0.200000003 + Shaft Length: 1 + Shaft Radius: 0.0500000007 + Value: Arrow + Topic: /benchmark/odometry + Unreliable: false + Value: true + - Alpha: 1 + Buffer Length: 1 + Class: rviz/Path + Color: 170; 255; 0 + Enabled: false + Head Diameter: 0.300000012 + Head Length: 0.200000003 + Length: 0.300000012 + Line Style: Lines + Line Width: 0.0299999993 + Name: Ground Truth Path + Offset: + X: 0 + Y: 0 + Z: 0 + Pose Color: 255; 85; 255 + Pose Style: None + Radius: 0.0299999993 + Shaft Diameter: 0.100000001 + Shaft Length: 0.100000001 + Topic: /benchmark/path + Unreliable: false + Value: false + Enabled: true + Global Options: + Background Color: 48; 48; 48 + Fixed Frame: world + Frame Rate: 30 + Name: root + Tools: + - Class: rviz/Interact + Hide Inactive Objects: true + - Class: rviz/MoveCamera + - Class: rviz/Select + - Class: rviz/FocusCamera + - Class: rviz/Measure + - Class: rviz/SetInitialPose + Topic: /initialpose + - Class: rviz/SetGoal + Topic: /move_base_simple/goal + - Class: rviz/PublishPoint + Single click: true + Topic: /clicked_point + Value: true + Views: + Current: + Class: rviz/Orbit + Distance: 15.6398182 + Enable Stereo Rendering: + Stereo Eye Separation: 0.0599999987 + Stereo Focal Distance: 1 + Swap Stereo Eyes: false + Value: false + Focal Point: + X: 0.80464071 + Y: -1.53352785 + Z: 0.741859734 + Focal Shape Fixed Size: true + Focal Shape Size: 0.0500000007 + Name: Current View + Near Clip Distance: 0.00999999978 + Pitch: 1.55979633 + Target Frame: odom + Value: Orbit (rviz) + Yaw: 6.27799845 + Saved: ~ +Window Geometry: + Displays: + collapsed: true + Height: 1056 + Hide Left Dock: true + Hide Right Dock: true + QMainWindow State: 000000ff00000000fd0000000400000000000001b3000003dafc0200000008fb0000001200530065006c0065006300740069006f006e00000001e10000009b0000006400fffffffb0000001e0054006f006f006c002000500072006f007000650072007400690065007302000001ed000001df00000185000000a3fb000000120056006900650077007300200054006f006f02000001df000002110000018500000122fb000000200054006f006f006c002000500072006f0070006500720074006900650073003203000002880000011d000002210000017afb000000100044006900730070006c0061007900730000000028000003da000000dd00fffffffb0000002000730065006c0065006300740069006f006e00200062007500660066006500720200000138000000aa0000023a00000294fb00000014005700690064006500530074006500720065006f02000000e6000000d2000003ee0000030bfb0000000c004b0069006e0065006300740200000186000001060000030c0000026100000001000001a2000003dafc0200000003fb0000001e0054006f006f006c002000500072006f00700065007200740069006500730100000041000000780000000000000000fb0000000a005600690065007700730000000028000003da000000b000fffffffb0000001200530065006c0065006300740069006f006e010000025a000000b200000000000000000000000200000490000000a9fc0100000001fb0000000a00560069006500770073030000004e00000080000002e100000197000000030000053d0000016dfc0100000004fb0000001e00530074006500720065006f002000460065006100740075007200650073030000000a000002dc0000038a0000013cfb0000001e00530074006500720065006f0020004600650061007400750072006500730100000000000005510000000000000000fb0000000800540069006d00650000000000000005560000030000fffffffb0000000800540069006d0065010000000000000450000000000000000000000780000003da00000004000000040000000800000008fc0000000100000002000000010000000a0054006f006f006c00730100000000ffffffff0000000000000000 + Selection: + collapsed: false + Stereo Features: + collapsed: false + Time: + collapsed: false + Tool Properties: + collapsed: false + Views: + collapsed: true + Width: 1920 + X: 0 + Y: 24 diff --git a/rviz/rviz_fla_config.rviz b/rviz/rviz_fla_config.rviz new file mode 100644 index 0000000..ca45dd5 --- /dev/null +++ b/rviz/rviz_fla_config.rviz @@ -0,0 +1,263 @@ +Panels: + - Class: rviz/Displays + Help Height: 78 + Name: Displays + Property Tree Widget: + Expanded: + - /TF1/Frames1 + - /VIO1/Shape1 + - /VIO Points1 + - /VIO Points1/Autocompute Value Bounds1 + Splitter Ratio: 0.5 + Tree Height: 808 + - Class: rviz/Selection + Name: Selection + - Class: rviz/Tool Properties + Expanded: + - /2D Pose Estimate1 + - /2D Nav Goal1 + - /Publish Point1 + Name: Tool Properties + Splitter Ratio: 0.588679016 + - Class: rviz/Views + Expanded: + - /Current View1 + Name: Views + Splitter Ratio: 0.5 + - Class: rviz/Time + Experimental: false + Name: Time + SyncMode: 0 + SyncSource: Stereo Features +Visualization Manager: + Class: "" + Displays: + - Alpha: 0.100000001 + Cell Size: 1 + Class: rviz/Grid + Color: 160; 160; 164 + Enabled: true + Line Style: + Line Width: 0.0299999993 + Value: Lines + Name: 1m Grid + Normal Cell Count: 0 + Offset: + X: 0 + Y: 0 + Z: 0 + Plane: XY + Plane Cell Count: 700 + Reference Frame: + Value: true + - Alpha: 0.699999988 + Cell Size: 10 + Class: rviz/Grid + Color: 160; 160; 164 + Enabled: true + Line Style: + Line Width: 0.0299999993 + Value: Lines + Name: 10m Grid + Normal Cell Count: 0 + Offset: + X: 0 + Y: 0 + Z: 0 + Plane: XY + Plane Cell Count: 70 + Reference Frame: + Value: true + - Class: rviz/TF + Enabled: true + Frame Timeout: 1000 + Frames: + All Enabled: false + odom: + Value: true + vision: + Value: true + Marker Scale: 20 + Name: TF + Show Arrows: false + Show Axes: true + Show Names: true + Tree: + vision: + odom: + {} + Update Interval: 0 + Value: true + - Angle Tolerance: 0 + Class: rviz/Odometry + Covariance: + Orientation: + Alpha: 0.5 + Color: 255; 255; 127 + Color Style: Unique + Frame: Local + Offset: 1 + Scale: 1 + Value: true + Position: + Alpha: 0.300000012 + Color: 204; 51; 204 + Scale: 1 + Value: true + Value: false + Enabled: true + Keep: 400 + Name: VIO + Position Tolerance: 0 + Shape: + Alpha: 1 + Axes Length: 1 + Axes Radius: 0.100000001 + Color: 255; 25; 0 + Head Length: 0.300000012 + Head Radius: 0.300000012 + Shaft Length: 3.5 + Shaft Radius: 0.0500000007 + Value: Arrow + Topic: /fla/vio/odom + Unreliable: false + Value: true + - Angle Tolerance: 0 + Class: rviz/Odometry + Covariance: + Orientation: + Alpha: 0.5 + Color: 255; 255; 127 + Color Style: Unique + Frame: Local + Offset: 1 + Scale: 1 + Value: false + Position: + Alpha: 0.5 + Color: 204; 51; 204 + Scale: 1 + Value: true + Value: true + Enabled: true + Keep: 1 + Name: Covariance + Position Tolerance: 0 + Shape: + Alpha: 1 + Axes Length: 1 + Axes Radius: 0.100000001 + Color: 255; 25; 0 + Head Length: 0.300000012 + Head Radius: 0.100000001 + Shaft Length: 1 + Shaft Radius: 0.0500000007 + Value: Arrow + Topic: /fla/vio/odom + Unreliable: false + Value: true + - Alpha: 1 + Autocompute Intensity Bounds: true + Autocompute Value Bounds: + Max Value: 10 + Min Value: -5 + Value: false + Axis: Z + Channel Name: z + Class: rviz/PointCloud2 + Color: 255; 255; 255 + Color Transformer: AxisColor + Decay Time: 30 + Enabled: true + Invert Rainbow: false + Max Color: 255; 255; 255 + Max Intensity: 26.7646065 + Min Color: 0; 0; 0 + Min Intensity: -4.65562725 + Name: VIO Points + Position Transformer: XYZ + Queue Size: 1 + Selectable: true + Size (Pixels): 3 + Size (m): 1 + Style: Points + Topic: /fla/vio/feature_point_cloud + Unreliable: false + Use Fixed Frame: true + Use rainbow: true + Value: true + - Class: rviz/Image + Enabled: true + Image Topic: /fla/image_processor/debug_stereo_image + Max Value: 1 + Median window: 5 + Min Value: 0 + Name: Stereo Features + Normalize Range: true + Queue Size: 2 + Transport Hint: raw + Unreliable: false + Value: true + Enabled: true + Global Options: + Background Color: 48; 48; 48 + Fixed Frame: vision + Frame Rate: 30 + Name: root + Tools: + - Class: rviz/Interact + Hide Inactive Objects: true + - Class: rviz/MoveCamera + - Class: rviz/Select + - Class: rviz/FocusCamera + - Class: rviz/Measure + - Class: rviz/SetInitialPose + Topic: /initialpose + - Class: rviz/SetGoal + Topic: /move_base_simple/goal + - Class: rviz/PublishPoint + Single click: true + Topic: /clicked_point + Value: true + Views: + Current: + Class: rviz/Orbit + Distance: 98.0585938 + Enable Stereo Rendering: + Stereo Eye Separation: 0.0599999987 + Stereo Focal Distance: 1 + Swap Stereo Eyes: false + Value: false + Focal Point: + X: -26.1133022 + Y: -6.50123549 + Z: -17.9633904 + Focal Shape Fixed Size: true + Focal Shape Size: 0.0500000007 + Name: Current View + Near Clip Distance: 0.00999999978 + Pitch: 1.19979477 + Target Frame: odom + Value: Orbit (rviz) + Yaw: 4.71076918 + Saved: ~ +Window Geometry: + Displays: + collapsed: true + Height: 1056 + Hide Left Dock: true + Hide Right Dock: true + QMainWindow State: 000000ff00000000fd0000000400000000000001b3000003b7fc0200000008fb0000001200530065006c0065006300740069006f006e00000001e10000009b0000006400fffffffb0000001e0054006f006f006c002000500072006f007000650072007400690065007302000001ed000001df00000185000000a3fb000000120056006900650077007300200054006f006f02000001df000002110000018500000122fb000000200054006f006f006c002000500072006f0070006500720074006900650073003203000002880000011d000002210000017afb000000100044006900730070006c0061007900730000000028000003b7000000dd00fffffffb0000002000730065006c0065006300740069006f006e00200062007500660066006500720200000138000000aa0000023a00000294fb00000014005700690064006500530074006500720065006f02000000e6000000d2000003ee0000030bfb0000000c004b0069006e0065006300740200000186000001060000030c0000026100000001000001a2000001fcfc0200000003fb0000001e0054006f006f006c002000500072006f00700065007200740069006500730100000041000000780000000000000000fb0000000a00560069006500770073000000002b000001fc000000b000fffffffb0000001200530065006c0065006300740069006f006e010000025a000000b200000000000000000000000200000490000000a9fc0100000001fb0000000a00560069006500770073030000004e00000080000002e10000019700000003000005b7000001cffc0100000004fb0000001e00530074006500720065006f002000460065006100740075007200650073030000000c000002400000043f000001d9fb0000001e00530074006500720065006f0020004600650061007400750072006500730100000000000005510000000000000000fb0000000800540069006d00650000000000000005560000030000fffffffb0000000800540069006d0065010000000000000450000000000000000000000780000003da00000004000000040000000800000008fc0000000100000002000000010000000a0054006f006f006c00730100000000ffffffff0000000000000000 + Selection: + collapsed: false + Stereo Features: + collapsed: false + Time: + collapsed: false + Tool Properties: + collapsed: false + Views: + collapsed: true + Width: 1920 + X: 0 + Y: 24 diff --git a/src/image_processor.cpp b/src/image_processor.cpp new file mode 100644 index 0000000..fcee98b --- /dev/null +++ b/src/image_processor.cpp @@ -0,0 +1,1510 @@ +/* + * COPYRIGHT AND PERMISSION NOTICE + * Penn Software MSCKF_VIO + * Copyright (C) 2017 The Trustees of the University of Pennsylvania + * All rights reserved. + */ + +#include +#include +#include +#include + +#include +#include + +#include +#include +#include + +using namespace std; +using namespace cv; +using namespace Eigen; + +namespace msckf_vio { +ImageProcessor::ImageProcessor(ros::NodeHandle& n) : + nh(n), + is_first_img(true), + //img_transport(n), + stereo_sub(10), + prev_features_ptr(new GridFeatures()), + curr_features_ptr(new GridFeatures()) { + return; +} + +ImageProcessor::~ImageProcessor() { + destroyAllWindows(); + //ROS_INFO("Feature lifetime statistics:"); + //featureLifetimeStatistics(); + return; +} + +bool ImageProcessor::loadParameters() { + // Camera calibration parameters + nh.param("cam0/distortion_model", + cam0_distortion_model, string("radtan")); + nh.param("cam1/distortion_model", + cam1_distortion_model, string("radtan")); + + vector cam0_resolution_temp(2); + nh.getParam("cam0/resolution", cam0_resolution_temp); + cam0_resolution[0] = cam0_resolution_temp[0]; + cam0_resolution[1] = cam0_resolution_temp[1]; + + vector cam1_resolution_temp(2); + nh.getParam("cam1/resolution", cam1_resolution_temp); + cam1_resolution[0] = cam1_resolution_temp[0]; + cam1_resolution[1] = cam1_resolution_temp[1]; + + vector cam0_intrinsics_temp(4); + nh.getParam("cam0/intrinsics", cam0_intrinsics_temp); + cam0_intrinsics[0] = cam0_intrinsics_temp[0]; + cam0_intrinsics[1] = cam0_intrinsics_temp[1]; + cam0_intrinsics[2] = cam0_intrinsics_temp[2]; + cam0_intrinsics[3] = cam0_intrinsics_temp[3]; + + vector cam1_intrinsics_temp(4); + nh.getParam("cam1/intrinsics", cam1_intrinsics_temp); + cam1_intrinsics[0] = cam1_intrinsics_temp[0]; + cam1_intrinsics[1] = cam1_intrinsics_temp[1]; + cam1_intrinsics[2] = cam1_intrinsics_temp[2]; + cam1_intrinsics[3] = cam1_intrinsics_temp[3]; + + vector cam0_distortion_coeffs_temp(4); + nh.getParam("cam0/distortion_coeffs", + cam0_distortion_coeffs_temp); + cam0_distortion_coeffs[0] = cam0_distortion_coeffs_temp[0]; + cam0_distortion_coeffs[1] = cam0_distortion_coeffs_temp[1]; + cam0_distortion_coeffs[2] = cam0_distortion_coeffs_temp[2]; + cam0_distortion_coeffs[3] = cam0_distortion_coeffs_temp[3]; + + vector cam1_distortion_coeffs_temp(4); + nh.getParam("cam1/distortion_coeffs", + cam1_distortion_coeffs_temp); + cam1_distortion_coeffs[0] = cam1_distortion_coeffs_temp[0]; + cam1_distortion_coeffs[1] = cam1_distortion_coeffs_temp[1]; + cam1_distortion_coeffs[2] = cam1_distortion_coeffs_temp[2]; + cam1_distortion_coeffs[3] = cam1_distortion_coeffs_temp[3]; + + vector cam0_extrinsics(16); + cv::Matx33d R_imu_cam0; + cv::Vec3d t_imu_cam0; + nh.getParam("cam0/T_cam_imu", cam0_extrinsics); + R_imu_cam0(0, 0) = cam0_extrinsics[0]; + R_imu_cam0(0, 1) = cam0_extrinsics[1]; + R_imu_cam0(0, 2) = cam0_extrinsics[2]; + R_imu_cam0(1, 0) = cam0_extrinsics[4]; + R_imu_cam0(1, 1) = cam0_extrinsics[5]; + R_imu_cam0(1, 2) = cam0_extrinsics[6]; + R_imu_cam0(2, 0) = cam0_extrinsics[8]; + R_imu_cam0(2, 1) = cam0_extrinsics[9]; + R_imu_cam0(2, 2) = cam0_extrinsics[10]; + t_imu_cam0(0) = cam0_extrinsics[3]; + t_imu_cam0(1) = cam0_extrinsics[7]; + t_imu_cam0(2) = cam0_extrinsics[11]; + + R_cam0_imu = R_imu_cam0.t(); + t_cam0_imu = -R_imu_cam0.t() * t_imu_cam0; + + vector cam1_extrinsics(16); + cv::Matx33d R_imu_cam1; + cv::Vec3d t_imu_cam1; + nh.getParam("cam1/T_cam_imu", cam1_extrinsics); + R_imu_cam1(0, 0) = cam1_extrinsics[0]; + R_imu_cam1(0, 1) = cam1_extrinsics[1]; + R_imu_cam1(0, 2) = cam1_extrinsics[2]; + R_imu_cam1(1, 0) = cam1_extrinsics[4]; + R_imu_cam1(1, 1) = cam1_extrinsics[5]; + R_imu_cam1(1, 2) = cam1_extrinsics[6]; + R_imu_cam1(2, 0) = cam1_extrinsics[8]; + R_imu_cam1(2, 1) = cam1_extrinsics[9]; + R_imu_cam1(2, 2) = cam1_extrinsics[10]; + t_imu_cam1(0) = cam1_extrinsics[3]; + t_imu_cam1(1) = cam1_extrinsics[7]; + t_imu_cam1(2) = cam1_extrinsics[11]; + + R_cam1_imu = R_imu_cam1.t(); + t_cam1_imu = -R_imu_cam1.t() * t_imu_cam1; + + // Processor parameters + nh.param("grid_row", processor_config.grid_row, 4); + nh.param("grid_col", processor_config.grid_col, 4); + nh.param("grid_min_feature_num", + processor_config.grid_min_feature_num, 2); + nh.param("grid_max_feature_num", + processor_config.grid_max_feature_num, 4); + nh.param("pyramid_levels", + processor_config.pyramid_levels, 3); + nh.param("patch_size", + processor_config.patch_size, 31); + nh.param("fast_threshold", + processor_config.fast_threshold, 20); + nh.param("max_iteration", + processor_config.max_iteration, 30); + nh.param("track_precision", + processor_config.track_precision, 0.01); + nh.param("ransac_threshold", + processor_config.ransac_threshold, 3); + nh.param("stereo_threshold", + processor_config.stereo_threshold, 3); + + ROS_INFO("==========================================="); + ROS_INFO("cam0_resolution: %d, %d", + cam0_resolution[0], cam0_resolution[1]); + ROS_INFO("cam0_intrinscs: %f, %f, %f, %f", + cam0_intrinsics[0], cam0_intrinsics[1], + cam0_intrinsics[2], cam0_intrinsics[3]); + ROS_INFO("cam0_distortion_model: %s", + cam0_distortion_model.c_str()); + ROS_INFO("cam0_distortion_coefficients: %f, %f, %f, %f", + cam0_distortion_coeffs[0], cam0_distortion_coeffs[1], + cam0_distortion_coeffs[2], cam0_distortion_coeffs[3]); + + ROS_INFO("cam1_resolution: %d, %d", + cam1_resolution[0], cam1_resolution[1]); + ROS_INFO("cam1_intrinscs: %f, %f, %f, %f", + cam1_intrinsics[0], cam1_intrinsics[1], + cam1_intrinsics[2], cam1_intrinsics[3]); + ROS_INFO("cam1_distortion_model: %s", + cam1_distortion_model.c_str()); + ROS_INFO("cam1_distortion_coefficients: %f, %f, %f, %f", + cam1_distortion_coeffs[0], cam1_distortion_coeffs[1], + cam1_distortion_coeffs[2], cam1_distortion_coeffs[3]); + + cout << R_imu_cam0 << endl; + cout << t_imu_cam0.t() << endl; + + ROS_INFO("grid_row: %d", + processor_config.grid_row); + ROS_INFO("grid_col: %d", + processor_config.grid_col); + ROS_INFO("grid_min_feature_num: %d", + processor_config.grid_min_feature_num); + ROS_INFO("grid_max_feature_num: %d", + processor_config.grid_max_feature_num); + ROS_INFO("pyramid_levels: %d", + processor_config.pyramid_levels); + ROS_INFO("patch_size: %d", + processor_config.patch_size); + ROS_INFO("fast_threshold: %d", + processor_config.fast_threshold); + ROS_INFO("max_iteration: %d", + processor_config.max_iteration); + ROS_INFO("track_precision: %f", + processor_config.track_precision); + ROS_INFO("ransac_threshold: %f", + processor_config.ransac_threshold); + ROS_INFO("stereo_threshold: %f", + processor_config.stereo_threshold); + ROS_INFO("==========================================="); + return true; +} + +bool ImageProcessor::createRosIO() { + feature_pub = nh.advertise( + "features", 3); + tracking_info_pub = nh.advertise( + "tracking_info", 1); + image_transport::ImageTransport it(nh); + debug_stereo_pub = it.advertise("debug_stereo_image", 1); + + cam0_img_sub.subscribe(nh, "cam0_image", 10); + cam1_img_sub.subscribe(nh, "cam1_image", 10); + stereo_sub.connectInput(cam0_img_sub, cam1_img_sub); + stereo_sub.registerCallback(&ImageProcessor::stereoCallback, this); + imu_sub = nh.subscribe("imu", 50, + &ImageProcessor::imuCallback, this); + + return true; +} + +bool ImageProcessor::initialize() { + if (!loadParameters()) return false; + ROS_INFO("Finish loading ROS parameters..."); + + // Create feature detector. + detector_ptr = FastFeatureDetector::create( + processor_config.fast_threshold); + + if (!createRosIO()) return false; + ROS_INFO("Finish creating ROS IO..."); + + return true; +} + +void ImageProcessor::stereoCallback( + const sensor_msgs::ImageConstPtr& cam0_img, + const sensor_msgs::ImageConstPtr& cam1_img) { + + //cout << "==================================" << endl; + + // Get the current image. + cam0_curr_img_ptr = cv_bridge::toCvShare(cam0_img, + sensor_msgs::image_encodings::MONO8); + cam1_curr_img_ptr = cv_bridge::toCvShare(cam1_img, + sensor_msgs::image_encodings::MONO8); + + // Build the image pyramids once since they're used at multiple places + createImagePyramids(); + + // Detect features in the first frame. + if (is_first_img) { + ros::Time start_time = ros::Time::now(); + initializeFirstFrame(); + //ROS_INFO("Detection time: %f", + // (ros::Time::now()-start_time).toSec()); + is_first_img = false; + + // Draw results. + start_time = ros::Time::now(); + drawFeaturesStereo(); + //ROS_INFO("Draw features: %f", + // (ros::Time::now()-start_time).toSec()); + } else { + // Track the feature in the previous image. + ros::Time start_time = ros::Time::now(); + trackFeatures(); + //ROS_INFO("Tracking time: %f", + // (ros::Time::now()-start_time).toSec()); + + // Add new features into the current image. + start_time = ros::Time::now(); + addNewFeatures(); + //ROS_INFO("Addition time: %f", + // (ros::Time::now()-start_time).toSec()); + + // Add new features into the current image. + start_time = ros::Time::now(); + pruneGridFeatures(); + //ROS_INFO("Prune grid features: %f", + // (ros::Time::now()-start_time).toSec()); + + // Draw results. + start_time = ros::Time::now(); + drawFeaturesStereo(); + //ROS_INFO("Draw features: %f", + // (ros::Time::now()-start_time).toSec()); + } + + //ros::Time start_time = ros::Time::now(); + //updateFeatureLifetime(); + //ROS_INFO("Statistics: %f", + // (ros::Time::now()-start_time).toSec()); + + // Publish features in the current image. + ros::Time start_time = ros::Time::now(); + publish(); + //ROS_INFO("Publishing: %f", + // (ros::Time::now()-start_time).toSec()); + + // Update the previous image and previous features. + cam0_prev_img_ptr = cam0_curr_img_ptr; + prev_features_ptr = curr_features_ptr; + std::swap(prev_cam0_pyramid_, curr_cam0_pyramid_); + + // Initialize the current features to empty vectors. + curr_features_ptr.reset(new GridFeatures()); + for (int code = 0; code < + processor_config.grid_row*processor_config.grid_col; ++code) { + (*curr_features_ptr)[code] = vector(0); + } + + return; +} + +void ImageProcessor::imuCallback( + const sensor_msgs::ImuConstPtr& msg) { + // Wait for the first image to be set. + if (is_first_img) return; + imu_msg_buffer.push_back(*msg); + return; +} + +void ImageProcessor::createImagePyramids() { + const Mat& curr_cam0_img = cam0_curr_img_ptr->image; + buildOpticalFlowPyramid( + curr_cam0_img, curr_cam0_pyramid_, + Size(processor_config.patch_size, processor_config.patch_size), + processor_config.pyramid_levels, true, BORDER_REFLECT_101, + BORDER_CONSTANT, false); + + const Mat& curr_cam1_img = cam1_curr_img_ptr->image; + buildOpticalFlowPyramid( + curr_cam1_img, curr_cam1_pyramid_, + Size(processor_config.patch_size, processor_config.patch_size), + processor_config.pyramid_levels, true, BORDER_REFLECT_101, + BORDER_CONSTANT, false); +} + +void ImageProcessor::initializeFirstFrame() { + // Size of each grid. + const Mat& img = cam0_curr_img_ptr->image; + static int grid_height = img.rows / processor_config.grid_row; + static int grid_width = img.cols / processor_config.grid_col; + + // Detect new features on the frist image. + vector new_features(0); + detector_ptr->detect(img, new_features); + + // Find the stereo matched points for the newly + // detected features. + vector cam0_points(new_features.size()); + for (int i = 0; i < new_features.size(); ++i) + cam0_points[i] = new_features[i].pt; + + vector cam1_points(0); + vector inlier_markers(0); + stereoMatch(cam0_points, cam1_points, inlier_markers); + + vector cam0_inliers(0); + vector cam1_inliers(0); + vector response_inliers(0); + for (int i = 0; i < inlier_markers.size(); ++i) { + if (inlier_markers[i] == 0) continue; + cam0_inliers.push_back(cam0_points[i]); + cam1_inliers.push_back(cam1_points[i]); + response_inliers.push_back(new_features[i].response); + } + + // Group the features into grids + GridFeatures grid_new_features; + for (int code = 0; code < + processor_config.grid_row*processor_config.grid_col; ++code) + grid_new_features[code] = vector(0); + + for (int i = 0; i < cam0_inliers.size(); ++i) { + const cv::Point2f& cam0_point = cam0_inliers[i]; + const cv::Point2f& cam1_point = cam1_inliers[i]; + const float& response = response_inliers[i]; + + int row = static_cast(cam0_point.y / grid_height); + int col = static_cast(cam0_point.x / grid_width); + int code = row*processor_config.grid_col + col; + + FeatureMetaData new_feature; + new_feature.response = response; + new_feature.cam0_point = cam0_point; + new_feature.cam1_point = cam1_point; + grid_new_features[code].push_back(new_feature); + } + + // Sort the new features in each grid based on its response. + for (auto& item : grid_new_features) + std::sort(item.second.begin(), item.second.end(), + &ImageProcessor::featureCompareByResponse); + + // Collect new features within each grid with high response. + for (int code = 0; code < + processor_config.grid_row*processor_config.grid_col; ++code) { + vector& features_this_grid = (*curr_features_ptr)[code]; + vector& new_features_this_grid = grid_new_features[code]; + + for (int k = 0; k < processor_config.grid_min_feature_num && + k < new_features_this_grid.size(); ++k) { + features_this_grid.push_back(new_features_this_grid[k]); + features_this_grid.back().id = next_feature_id++; + features_this_grid.back().lifetime = 1; + } + } + + return; +} + +void ImageProcessor::predictFeatureTracking( + const vector& input_pts, + const cv::Matx33f& R_p_c, + const cv::Vec4d& intrinsics, + vector& compensated_pts) { + + // Return directly if there are no input features. + if (input_pts.size() == 0) { + compensated_pts.clear(); + return; + } + compensated_pts.resize(input_pts.size()); + + // Intrinsic matrix. + cv::Matx33f K( + intrinsics[0], 0.0, intrinsics[2], + 0.0, intrinsics[1], intrinsics[3], + 0.0, 0.0, 1.0); + cv::Matx33f H = K * R_p_c * K.inv(); + + for (int i = 0; i < input_pts.size(); ++i) { + cv::Vec3f p1(input_pts[i].x, input_pts[i].y, 1.0f); + cv::Vec3f p2 = H * p1; + compensated_pts[i].x = p2[0] / p2[2]; + compensated_pts[i].y = p2[1] / p2[2]; + } + + return; +} + +void ImageProcessor::trackFeatures() { + // Size of each grid. + static int grid_height = + cam0_curr_img_ptr->image.rows / processor_config.grid_row; + static int grid_width = + cam0_curr_img_ptr->image.cols / processor_config.grid_col; + + // Compute a rough relative rotation which takes a vector + // from the previous frame to the current frame. + Matx33f cam0_R_p_c; + Matx33f cam1_R_p_c; + integrateImuData(cam0_R_p_c, cam1_R_p_c); + + // Organize the features in the previous image. + vector prev_ids(0); + vector prev_lifetime(0); + vector prev_cam0_points(0); + vector prev_cam1_points(0); + + for (const auto& item : *prev_features_ptr) { + for (const auto& prev_feature : item.second) { + prev_ids.push_back(prev_feature.id); + prev_lifetime.push_back(prev_feature.lifetime); + prev_cam0_points.push_back(prev_feature.cam0_point); + prev_cam1_points.push_back(prev_feature.cam1_point); + } + } + + // Number of the features before tracking. + before_tracking = prev_cam0_points.size(); + + // Abort tracking if there is no features in + // the previous frame. + if (prev_ids.size() == 0) return; + + // Track features using LK optical flow method. + vector curr_cam0_points(0); + vector track_inliers(0); + + predictFeatureTracking(prev_cam0_points, + cam0_R_p_c, cam0_intrinsics, curr_cam0_points); + + calcOpticalFlowPyrLK( + prev_cam0_pyramid_, curr_cam0_pyramid_, + prev_cam0_points, curr_cam0_points, + track_inliers, noArray(), + Size(processor_config.patch_size, processor_config.patch_size), + processor_config.pyramid_levels, + TermCriteria(TermCriteria::COUNT+TermCriteria::EPS, + processor_config.max_iteration, + processor_config.track_precision), + cv::OPTFLOW_USE_INITIAL_FLOW); + + // Mark those tracked points out of the image region + // as untracked. + for (int i = 0; i < curr_cam0_points.size(); ++i) { + if (track_inliers[i] == 0) continue; + if (curr_cam0_points[i].y < 0 || + curr_cam0_points[i].y > cam0_curr_img_ptr->image.rows-1 || + curr_cam0_points[i].x < 0 || + curr_cam0_points[i].x > cam0_curr_img_ptr->image.cols-1) + track_inliers[i] = 0; + } + + // Collect the tracked points. + vector prev_tracked_ids(0); + vector prev_tracked_lifetime(0); + vector prev_tracked_cam0_points(0); + vector prev_tracked_cam1_points(0); + vector curr_tracked_cam0_points(0); + + removeUnmarkedElements( + prev_ids, track_inliers, prev_tracked_ids); + removeUnmarkedElements( + prev_lifetime, track_inliers, prev_tracked_lifetime); + removeUnmarkedElements( + prev_cam0_points, track_inliers, prev_tracked_cam0_points); + removeUnmarkedElements( + prev_cam1_points, track_inliers, prev_tracked_cam1_points); + removeUnmarkedElements( + curr_cam0_points, track_inliers, curr_tracked_cam0_points); + + // Number of features left after tracking. + after_tracking = curr_tracked_cam0_points.size(); + + + // Outlier removal involves three steps, which forms a close + // loop between the previous and current frames of cam0 (left) + // and cam1 (right). Assuming the stereo matching between the + // previous cam0 and cam1 images are correct, the three steps are: + // + // prev frames cam0 ----------> cam1 + // | | + // |ransac |ransac + // | stereo match | + // curr frames cam0 ----------> cam1 + // + // 1) Stereo matching between current images of cam0 and cam1. + // 2) RANSAC between previous and current images of cam0. + // 3) RANSAC between previous and current images of cam1. + // + // For Step 3, tracking between the images is no longer needed. + // The stereo matching results are directly used in the RANSAC. + + // Step 1: stereo matching. + vector curr_cam1_points(0); + vector match_inliers(0); + stereoMatch(curr_tracked_cam0_points, curr_cam1_points, match_inliers); + + vector prev_matched_ids(0); + vector prev_matched_lifetime(0); + vector prev_matched_cam0_points(0); + vector prev_matched_cam1_points(0); + vector curr_matched_cam0_points(0); + vector curr_matched_cam1_points(0); + + removeUnmarkedElements( + prev_tracked_ids, match_inliers, prev_matched_ids); + removeUnmarkedElements( + prev_tracked_lifetime, match_inliers, prev_matched_lifetime); + removeUnmarkedElements( + prev_tracked_cam0_points, match_inliers, prev_matched_cam0_points); + removeUnmarkedElements( + prev_tracked_cam1_points, match_inliers, prev_matched_cam1_points); + removeUnmarkedElements( + curr_tracked_cam0_points, match_inliers, curr_matched_cam0_points); + removeUnmarkedElements( + curr_cam1_points, match_inliers, curr_matched_cam1_points); + + // Number of features left after stereo matching. + after_matching = curr_matched_cam0_points.size(); + + // Step 2 and 3: RANSAC on temporal image pairs of cam0 and cam1. + vector cam0_ransac_inliers(0); + twoPointRansac(prev_matched_cam0_points, curr_matched_cam0_points, + cam0_R_p_c, cam0_intrinsics, cam0_distortion_model, + cam0_distortion_coeffs, processor_config.ransac_threshold, + 0.99, cam0_ransac_inliers); + + vector cam1_ransac_inliers(0); + twoPointRansac(prev_matched_cam1_points, curr_matched_cam1_points, + cam1_R_p_c, cam1_intrinsics, cam1_distortion_model, + cam1_distortion_coeffs, processor_config.ransac_threshold, + 0.99, cam1_ransac_inliers); + + // Number of features after ransac. + after_ransac = 0; + + for (int i = 0; i < cam0_ransac_inliers.size(); ++i) { + if (cam0_ransac_inliers[i] == 0 || + cam1_ransac_inliers[i] == 0) continue; + int row = static_cast( + curr_matched_cam0_points[i].y / grid_height); + int col = static_cast( + curr_matched_cam0_points[i].x / grid_width); + int code = row*processor_config.grid_col + col; + (*curr_features_ptr)[code].push_back(FeatureMetaData()); + + FeatureMetaData& grid_new_feature = (*curr_features_ptr)[code].back(); + grid_new_feature.id = prev_matched_ids[i]; + grid_new_feature.lifetime = ++prev_matched_lifetime[i]; + grid_new_feature.cam0_point = curr_matched_cam0_points[i]; + grid_new_feature.cam1_point = curr_matched_cam1_points[i]; + + ++after_ransac; + } + + // Compute the tracking rate. + int prev_feature_num = 0; + for (const auto& item : *prev_features_ptr) + prev_feature_num += item.second.size(); + + int curr_feature_num = 0; + for (const auto& item : *curr_features_ptr) + curr_feature_num += item.second.size(); + + ROS_INFO_THROTTLE(0.5, + "\033[0;32m candidates: %d; track: %d; match: %d; ransac: %d/%d=%f\033[0m", + before_tracking, after_tracking, after_matching, + curr_feature_num, prev_feature_num, + static_cast(curr_feature_num)/ + (static_cast(prev_feature_num)+1e-5)); + //printf( + // "\033[0;32m candidates: %d; raw track: %d; stereo match: %d; ransac: %d/%d=%f\033[0m\n", + // before_tracking, after_tracking, after_matching, + // curr_feature_num, prev_feature_num, + // static_cast(curr_feature_num)/ + // (static_cast(prev_feature_num)+1e-5)); + + return; +} + +void ImageProcessor::stereoMatch( + const vector& cam0_points, + vector& cam1_points, + vector& inlier_markers) { + + if (cam0_points.size() == 0) return; + + if(cam1_points.size() == 0) { + // Initialize cam1_points by projecting cam0_points to cam1 using the + // rotation from stereo extrinsics + const cv::Matx33d R_cam0_cam1 = R_cam1_imu.t() * R_cam0_imu; + vector cam0_points_undistorted; + undistortPoints(cam0_points, cam0_intrinsics, cam0_distortion_model, + cam0_distortion_coeffs, cam0_points_undistorted, + R_cam0_cam1); + cam1_points = distortPoints(cam0_points_undistorted, cam1_intrinsics, + cam1_distortion_model, cam1_distortion_coeffs); + } + + // Track features using LK optical flow method. + calcOpticalFlowPyrLK(curr_cam0_pyramid_, curr_cam1_pyramid_, + cam0_points, cam1_points, + inlier_markers, noArray(), + Size(processor_config.patch_size, processor_config.patch_size), + processor_config.pyramid_levels, + TermCriteria(TermCriteria::COUNT+TermCriteria::EPS, + processor_config.max_iteration, + processor_config.track_precision), + cv::OPTFLOW_USE_INITIAL_FLOW); + + // Mark those tracked points out of the image region + // as untracked. + for (int i = 0; i < cam1_points.size(); ++i) { + if (inlier_markers[i] == 0) continue; + if (cam1_points[i].y < 0 || + cam1_points[i].y > cam1_curr_img_ptr->image.rows-1 || + cam1_points[i].x < 0 || + cam1_points[i].x > cam1_curr_img_ptr->image.cols-1) + inlier_markers[i] = 0; + } + + // Compute the relative rotation between the cam0 + // frame and cam1 frame. + const cv::Matx33d R_cam0_cam1 = R_cam1_imu.t() * R_cam0_imu; + const cv::Vec3d t_cam0_cam1 = R_cam1_imu.t() * (t_cam0_imu-t_cam1_imu); + // Compute the essential matrix. + const cv::Matx33d t_cam0_cam1_hat( + 0.0, -t_cam0_cam1[2], t_cam0_cam1[1], + t_cam0_cam1[2], 0.0, -t_cam0_cam1[0], + -t_cam0_cam1[1], t_cam0_cam1[0], 0.0); + const cv::Matx33d E = t_cam0_cam1_hat * R_cam0_cam1; + + // Further remove outliers based on the known + // essential matrix. + vector cam0_points_undistorted(0); + vector cam1_points_undistorted(0); + undistortPoints( + cam0_points, cam0_intrinsics, cam0_distortion_model, + cam0_distortion_coeffs, cam0_points_undistorted); + undistortPoints( + cam1_points, cam1_intrinsics, cam1_distortion_model, + cam1_distortion_coeffs, cam1_points_undistorted); + + double norm_pixel_unit = 4.0 / ( + cam0_intrinsics[0]+cam0_intrinsics[1]+ + cam1_intrinsics[0]+cam1_intrinsics[1]); + + for (int i = 0; i < cam0_points_undistorted.size(); ++i) { + if (inlier_markers[i] == 0) continue; + cv::Vec3d pt0(cam0_points_undistorted[i].x, + cam0_points_undistorted[i].y, 1.0); + cv::Vec3d pt1(cam1_points_undistorted[i].x, + cam1_points_undistorted[i].y, 1.0); + cv::Vec3d epipolar_line = E * pt0; + double error = fabs((pt1.t() * epipolar_line)[0]) / sqrt( + epipolar_line[0]*epipolar_line[0]+ + epipolar_line[1]*epipolar_line[1]); + if (error > processor_config.stereo_threshold*norm_pixel_unit) + inlier_markers[i] = 0; + } + + return; +} + +void ImageProcessor::addNewFeatures() { + const Mat& curr_img = cam0_curr_img_ptr->image; + + // Size of each grid. + static int grid_height = + cam0_curr_img_ptr->image.rows / processor_config.grid_row; + static int grid_width = + cam0_curr_img_ptr->image.cols / processor_config.grid_col; + + // Create a mask to avoid redetecting existing features. + Mat mask(curr_img.rows, curr_img.cols, CV_8U, Scalar(1)); + + for (const auto& features : *curr_features_ptr) { + for (const auto& feature : features.second) { + const int y = static_cast(feature.cam0_point.y); + const int x = static_cast(feature.cam0_point.x); + + int up_lim = y-2, bottom_lim = y+3, + left_lim = x-2, right_lim = x+3; + if (up_lim < 0) up_lim = 0; + if (bottom_lim > curr_img.rows) bottom_lim = curr_img.rows; + if (left_lim < 0) left_lim = 0; + if (right_lim > curr_img.cols) right_lim = curr_img.cols; + + Range row_range(up_lim, bottom_lim); + Range col_range(left_lim, right_lim); + mask(row_range, col_range) = 0; + } + } + + // Detect new features. + vector new_features(0); + detector_ptr->detect(curr_img, new_features, mask); + + // Collect the new detected features based on the grid. + // Select the ones with top response within each grid afterwards. + vector > new_feature_sieve( + processor_config.grid_row*processor_config.grid_col); + for (const auto& feature : new_features) { + int row = static_cast(feature.pt.y / grid_height); + int col = static_cast(feature.pt.x / grid_width); + new_feature_sieve[ + row*processor_config.grid_col+col].push_back(feature); + } + + new_features.clear(); + for (auto& item : new_feature_sieve) { + if (item.size() > processor_config.grid_max_feature_num) { + std::sort(item.begin(), item.end(), + &ImageProcessor::keyPointCompareByResponse); + item.erase( + item.begin()+processor_config.grid_max_feature_num, item.end()); + } + new_features.insert(new_features.end(), item.begin(), item.end()); + } + + int detected_new_features = new_features.size(); + + // Find the stereo matched points for the newly + // detected features. + vector cam0_points(new_features.size()); + for (int i = 0; i < new_features.size(); ++i) + cam0_points[i] = new_features[i].pt; + + vector cam1_points(0); + vector inlier_markers(0); + stereoMatch(cam0_points, cam1_points, inlier_markers); + + vector cam0_inliers(0); + vector cam1_inliers(0); + vector response_inliers(0); + for (int i = 0; i < inlier_markers.size(); ++i) { + if (inlier_markers[i] == 0) continue; + cam0_inliers.push_back(cam0_points[i]); + cam1_inliers.push_back(cam1_points[i]); + response_inliers.push_back(new_features[i].response); + } + + int matched_new_features = cam0_inliers.size(); + + if (matched_new_features < 5 && + static_cast(matched_new_features)/ + static_cast(detected_new_features) < 0.1) + ROS_WARN("Images at [%f] seems unsynced...", + cam0_curr_img_ptr->header.stamp.toSec()); + + // Group the features into grids + GridFeatures grid_new_features; + for (int code = 0; code < + processor_config.grid_row*processor_config.grid_col; ++code) + grid_new_features[code] = vector(0); + + for (int i = 0; i < cam0_inliers.size(); ++i) { + const cv::Point2f& cam0_point = cam0_inliers[i]; + const cv::Point2f& cam1_point = cam1_inliers[i]; + const float& response = response_inliers[i]; + + int row = static_cast(cam0_point.y / grid_height); + int col = static_cast(cam0_point.x / grid_width); + int code = row*processor_config.grid_col + col; + + FeatureMetaData new_feature; + new_feature.response = response; + new_feature.cam0_point = cam0_point; + new_feature.cam1_point = cam1_point; + grid_new_features[code].push_back(new_feature); + } + + // Sort the new features in each grid based on its response. + for (auto& item : grid_new_features) + std::sort(item.second.begin(), item.second.end(), + &ImageProcessor::featureCompareByResponse); + + int new_added_feature_num = 0; + // Collect new features within each grid with high response. + for (int code = 0; code < + processor_config.grid_row*processor_config.grid_col; ++code) { + vector& features_this_grid = (*curr_features_ptr)[code]; + vector& new_features_this_grid = grid_new_features[code]; + + if (features_this_grid.size() >= + processor_config.grid_min_feature_num) continue; + + int vacancy_num = processor_config.grid_min_feature_num - + features_this_grid.size(); + for (int k = 0; + k < vacancy_num && k < new_features_this_grid.size(); ++k) { + features_this_grid.push_back(new_features_this_grid[k]); + features_this_grid.back().id = next_feature_id++; + features_this_grid.back().lifetime = 1; + + ++new_added_feature_num; + } + } + + //printf("\033[0;33m detected: %d; matched: %d; new added feature: %d\033[0m\n", + // detected_new_features, matched_new_features, new_added_feature_num); + + return; +} + +void ImageProcessor::pruneGridFeatures() { + for (auto& item : *curr_features_ptr) { + auto& grid_features = item.second; + // Continue if the number of features in this grid does + // not exceed the upper bound. + if (grid_features.size() <= + processor_config.grid_max_feature_num) continue; + std::sort(grid_features.begin(), grid_features.end(), + &ImageProcessor::featureCompareByLifetime); + grid_features.erase(grid_features.begin()+ + processor_config.grid_max_feature_num, + grid_features.end()); + } + return; +} + +void ImageProcessor::undistortPoints( + const vector& pts_in, + const cv::Vec4d& intrinsics, + const string& distortion_model, + const cv::Vec4d& distortion_coeffs, + vector& pts_out, + const cv::Matx33d &rectification_matrix, + const cv::Vec4d &new_intrinsics) { + + if (pts_in.size() == 0) return; + + const cv::Matx33d K( + intrinsics[0], 0.0, intrinsics[2], + 0.0, intrinsics[1], intrinsics[3], + 0.0, 0.0, 1.0); + + const cv::Matx33d K_new( + new_intrinsics[0], 0.0, new_intrinsics[2], + 0.0, new_intrinsics[1], new_intrinsics[3], + 0.0, 0.0, 1.0); + + if (distortion_model == "radtan") { + cv::undistortPoints(pts_in, pts_out, K, distortion_coeffs, + rectification_matrix, K_new); + } else if (distortion_model == "equidistant") { + cv::fisheye::undistortPoints(pts_in, pts_out, K, distortion_coeffs, + rectification_matrix, K_new); + } else { + ROS_WARN_ONCE("The model %s is unrecognized, use radtan instead...", + distortion_model.c_str()); + cv::undistortPoints(pts_in, pts_out, K, distortion_coeffs, + rectification_matrix, K_new); + } + + return; +} + +vector ImageProcessor::distortPoints( + const vector& pts_in, + const cv::Vec4d& intrinsics, + const string& distortion_model, + const cv::Vec4d& distortion_coeffs) { + + const cv::Matx33d K(intrinsics[0], 0.0, intrinsics[2], + 0.0, intrinsics[1], intrinsics[3], + 0.0, 0.0, 1.0); + + vector pts_out; + if (distortion_model == "radtan") { + vector homogenous_pts; + cv::convertPointsToHomogeneous(pts_in, homogenous_pts); + cv::projectPoints(homogenous_pts, cv::Vec3d::zeros(), cv::Vec3d::zeros(), K, + distortion_coeffs, pts_out); + } else if (distortion_model == "equidistant") { + cv::fisheye::distortPoints(pts_in, pts_out, K, distortion_coeffs); + } else { + ROS_WARN_ONCE("The model %s is unrecognized, using radtan instead...", + distortion_model.c_str()); + vector homogenous_pts; + cv::convertPointsToHomogeneous(pts_in, homogenous_pts); + cv::projectPoints(homogenous_pts, cv::Vec3d::zeros(), cv::Vec3d::zeros(), K, + distortion_coeffs, pts_out); + } + + return pts_out; +} + +void ImageProcessor::integrateImuData( + Matx33f& cam0_R_p_c, Matx33f& cam1_R_p_c) { + // Find the start and the end limit within the imu msg buffer. + auto begin_iter = imu_msg_buffer.begin(); + while (begin_iter != imu_msg_buffer.end()) { + if ((begin_iter->header.stamp- + cam0_prev_img_ptr->header.stamp).toSec() < -0.01) + ++begin_iter; + else + break; + } + + auto end_iter = begin_iter; + while (end_iter != imu_msg_buffer.end()) { + if ((end_iter->header.stamp- + cam0_curr_img_ptr->header.stamp).toSec() < 0.005) + ++end_iter; + else + break; + } + + // Compute the mean angular velocity in the IMU frame. + Vec3f mean_ang_vel(0.0, 0.0, 0.0); + for (auto iter = begin_iter; iter < end_iter; ++iter) + mean_ang_vel += Vec3f(iter->angular_velocity.x, + iter->angular_velocity.y, iter->angular_velocity.z); + + if (end_iter-begin_iter > 0) + mean_ang_vel *= 1.0f / (end_iter-begin_iter); + + // Transform the mean angular velocity from the IMU + // frame to the cam0 and cam1 frames. + Vec3f cam0_mean_ang_vel = R_cam0_imu.t() * mean_ang_vel; + Vec3f cam1_mean_ang_vel = R_cam1_imu.t() * mean_ang_vel; + + // Compute the relative rotation. + double dtime = (cam0_curr_img_ptr->header.stamp- + cam0_prev_img_ptr->header.stamp).toSec(); + Rodrigues(cam0_mean_ang_vel*dtime, cam0_R_p_c); + Rodrigues(cam1_mean_ang_vel*dtime, cam1_R_p_c); + cam0_R_p_c = cam0_R_p_c.t(); + cam1_R_p_c = cam1_R_p_c.t(); + + // Delete the useless and used imu messages. + imu_msg_buffer.erase(imu_msg_buffer.begin(), end_iter); + return; +} + +void ImageProcessor::rescalePoints( + vector& pts1, vector& pts2, + float& scaling_factor) { + + scaling_factor = 0.0f; + + for (int i = 0; i < pts1.size(); ++i) { + scaling_factor += sqrt(pts1[i].dot(pts1[i])); + scaling_factor += sqrt(pts2[i].dot(pts2[i])); + } + + scaling_factor = (pts1.size()+pts2.size()) / + scaling_factor * sqrt(2.0f); + + for (int i = 0; i < pts1.size(); ++i) { + pts1[i] *= scaling_factor; + pts2[i] *= scaling_factor; + } + + return; +} + +void ImageProcessor::twoPointRansac( + const vector& pts1, const vector& pts2, + const cv::Matx33f& R_p_c, const cv::Vec4d& intrinsics, + const std::string& distortion_model, + const cv::Vec4d& distortion_coeffs, + const double& inlier_error, + const double& success_probability, + vector& inlier_markers) { + + // Check the size of input point size. + if (pts1.size() != pts2.size()) + ROS_ERROR("Sets of different size (%lu and %lu) are used...", + pts1.size(), pts2.size()); + + double norm_pixel_unit = 2.0 / (intrinsics[0]+intrinsics[1]); + int iter_num = static_cast( + ceil(log(1-success_probability) / log(1-0.7*0.7))); + + // Initially, mark all points as inliers. + inlier_markers.clear(); + inlier_markers.resize(pts1.size(), 1); + + // Undistort all the points. + vector pts1_undistorted(pts1.size()); + vector pts2_undistorted(pts2.size()); + undistortPoints( + pts1, intrinsics, distortion_model, + distortion_coeffs, pts1_undistorted); + undistortPoints( + pts2, intrinsics, distortion_model, + distortion_coeffs, pts2_undistorted); + + // Compenstate the points in the previous image with + // the relative rotation. + for (auto& pt : pts1_undistorted) { + Vec3f pt_h(pt.x, pt.y, 1.0f); + //Vec3f pt_hc = dR * pt_h; + Vec3f pt_hc = R_p_c * pt_h; + pt.x = pt_hc[0]; + pt.y = pt_hc[1]; + } + + // Normalize the points to gain numerical stability. + float scaling_factor = 0.0f; + rescalePoints(pts1_undistorted, pts2_undistorted, scaling_factor); + norm_pixel_unit *= scaling_factor; + + // Compute the difference between previous and current points, + // which will be used frequently later. + vector pts_diff(pts1_undistorted.size()); + for (int i = 0; i < pts1_undistorted.size(); ++i) + pts_diff[i] = pts1_undistorted[i] - pts2_undistorted[i]; + + // Mark the point pairs with large difference directly. + // BTW, the mean distance of the rest of the point pairs + // are computed. + double mean_pt_distance = 0.0; + int raw_inlier_cntr = 0; + for (int i = 0; i < pts_diff.size(); ++i) { + double distance = sqrt(pts_diff[i].dot(pts_diff[i])); + // 25 pixel distance is a pretty large tolerance for normal motion. + // However, to be used with aggressive motion, this tolerance should + // be increased significantly to match the usage. + if (distance > 50.0*norm_pixel_unit) { + inlier_markers[i] = 0; + } else { + mean_pt_distance += distance; + ++raw_inlier_cntr; + } + } + mean_pt_distance /= raw_inlier_cntr; + + // If the current number of inliers is less than 3, just mark + // all input as outliers. This case can happen with fast + // rotation where very few features are tracked. + if (raw_inlier_cntr < 3) { + for (auto& marker : inlier_markers) marker = 0; + return; + } + + // Before doing 2-point RANSAC, we have to check if the motion + // is degenerated, meaning that there is no translation between + // the frames, in which case, the model of the RANSAC does not + // work. If so, the distance between the matched points will + // be almost 0. + //if (mean_pt_distance < inlier_error*norm_pixel_unit) { + if (mean_pt_distance < norm_pixel_unit) { + //ROS_WARN_THROTTLE(1.0, "Degenerated motion..."); + for (int i = 0; i < pts_diff.size(); ++i) { + if (inlier_markers[i] == 0) continue; + if (sqrt(pts_diff[i].dot(pts_diff[i])) > + inlier_error*norm_pixel_unit) + inlier_markers[i] = 0; + } + return; + } + + // In the case of general motion, the RANSAC model can be applied. + // The three column corresponds to tx, ty, and tz respectively. + MatrixXd coeff_t(pts_diff.size(), 3); + for (int i = 0; i < pts_diff.size(); ++i) { + coeff_t(i, 0) = pts_diff[i].y; + coeff_t(i, 1) = -pts_diff[i].x; + coeff_t(i, 2) = pts1_undistorted[i].x*pts2_undistorted[i].y - + pts1_undistorted[i].y*pts2_undistorted[i].x; + } + + vector raw_inlier_idx; + for (int i = 0; i < inlier_markers.size(); ++i) { + if (inlier_markers[i] != 0) + raw_inlier_idx.push_back(i); + } + + vector best_inlier_set; + double best_error = 1e10; + random_numbers::RandomNumberGenerator random_gen; + + for (int iter_idx = 0; iter_idx < iter_num; ++iter_idx) { + // Randomly select two point pairs. + // Although this is a weird way of selecting two pairs, but it + // is able to efficiently avoid selecting repetitive pairs. + int pair_idx1 = raw_inlier_idx[random_gen.uniformInteger( + 0, raw_inlier_idx.size()-1)]; + int idx_diff = random_gen.uniformInteger( + 1, raw_inlier_idx.size()-1); + int pair_idx2 = pair_idx1+idx_diff < raw_inlier_idx.size() ? + pair_idx1+idx_diff : pair_idx1+idx_diff-raw_inlier_idx.size(); + + // Construct the model; + Vector2d coeff_tx(coeff_t(pair_idx1, 0), coeff_t(pair_idx2, 0)); + Vector2d coeff_ty(coeff_t(pair_idx1, 1), coeff_t(pair_idx2, 1)); + Vector2d coeff_tz(coeff_t(pair_idx1, 2), coeff_t(pair_idx2, 2)); + vector coeff_l1_norm(3); + coeff_l1_norm[0] = coeff_tx.lpNorm<1>(); + coeff_l1_norm[1] = coeff_ty.lpNorm<1>(); + coeff_l1_norm[2] = coeff_tz.lpNorm<1>(); + int base_indicator = min_element(coeff_l1_norm.begin(), + coeff_l1_norm.end())-coeff_l1_norm.begin(); + + Vector3d model(0.0, 0.0, 0.0); + if (base_indicator == 0) { + Matrix2d A; + A << coeff_ty, coeff_tz; + Vector2d solution = A.inverse() * (-coeff_tx); + model(0) = 1.0; + model(1) = solution(0); + model(2) = solution(1); + } else if (base_indicator ==1) { + Matrix2d A; + A << coeff_tx, coeff_tz; + Vector2d solution = A.inverse() * (-coeff_ty); + model(0) = solution(0); + model(1) = 1.0; + model(2) = solution(1); + } else { + Matrix2d A; + A << coeff_tx, coeff_ty; + Vector2d solution = A.inverse() * (-coeff_tz); + model(0) = solution(0); + model(1) = solution(1); + model(2) = 1.0; + } + + // Find all the inliers among point pairs. + VectorXd error = coeff_t * model; + + vector inlier_set; + for (int i = 0; i < error.rows(); ++i) { + if (inlier_markers[i] == 0) continue; + if (std::abs(error(i)) < inlier_error*norm_pixel_unit) + inlier_set.push_back(i); + } + + // If the number of inliers is small, the current + // model is probably wrong. + if (inlier_set.size() < 0.2*pts1_undistorted.size()) + continue; + + // Refit the model using all of the possible inliers. + VectorXd coeff_tx_better(inlier_set.size()); + VectorXd coeff_ty_better(inlier_set.size()); + VectorXd coeff_tz_better(inlier_set.size()); + for (int i = 0; i < inlier_set.size(); ++i) { + coeff_tx_better(i) = coeff_t(inlier_set[i], 0); + coeff_ty_better(i) = coeff_t(inlier_set[i], 1); + coeff_tz_better(i) = coeff_t(inlier_set[i], 2); + } + + Vector3d model_better(0.0, 0.0, 0.0); + if (base_indicator == 0) { + MatrixXd A(inlier_set.size(), 2); + A << coeff_ty_better, coeff_tz_better; + Vector2d solution = + (A.transpose() * A).inverse() * A.transpose() * (-coeff_tx_better); + model_better(0) = 1.0; + model_better(1) = solution(0); + model_better(2) = solution(1); + } else if (base_indicator ==1) { + MatrixXd A(inlier_set.size(), 2); + A << coeff_tx_better, coeff_tz_better; + Vector2d solution = + (A.transpose() * A).inverse() * A.transpose() * (-coeff_ty_better); + model_better(0) = solution(0); + model_better(1) = 1.0; + model_better(2) = solution(1); + } else { + MatrixXd A(inlier_set.size(), 2); + A << coeff_tx_better, coeff_ty_better; + Vector2d solution = + (A.transpose() * A).inverse() * A.transpose() * (-coeff_tz_better); + model_better(0) = solution(0); + model_better(1) = solution(1); + model_better(2) = 1.0; + } + + // Compute the error and upate the best model if possible. + VectorXd new_error = coeff_t * model_better; + + double this_error = 0.0; + for (const auto& inlier_idx : inlier_set) + this_error += std::abs(new_error(inlier_idx)); + this_error /= inlier_set.size(); + + if (inlier_set.size() > best_inlier_set.size()) { + best_error = this_error; + best_inlier_set = inlier_set; + } + } + + // Fill in the markers. + inlier_markers.clear(); + inlier_markers.resize(pts1.size(), 0); + for (const auto& inlier_idx : best_inlier_set) + inlier_markers[inlier_idx] = 1; + + //printf("inlier ratio: %lu/%lu\n", + // best_inlier_set.size(), inlier_markers.size()); + + return; +} + +void ImageProcessor::publish() { + + // Publish features. + CameraMeasurementPtr feature_msg_ptr(new CameraMeasurement); + feature_msg_ptr->header.stamp = cam0_curr_img_ptr->header.stamp; + + vector curr_ids(0); + vector curr_cam0_points(0); + vector curr_cam1_points(0); + + for (const auto& grid_features : (*curr_features_ptr)) { + for (const auto& feature : grid_features.second) { + curr_ids.push_back(feature.id); + curr_cam0_points.push_back(feature.cam0_point); + curr_cam1_points.push_back(feature.cam1_point); + } + } + + vector curr_cam0_points_undistorted(0); + vector curr_cam1_points_undistorted(0); + + undistortPoints( + curr_cam0_points, cam0_intrinsics, cam0_distortion_model, + cam0_distortion_coeffs, curr_cam0_points_undistorted); + undistortPoints( + curr_cam1_points, cam1_intrinsics, cam1_distortion_model, + cam1_distortion_coeffs, curr_cam1_points_undistorted); + + for (int i = 0; i < curr_ids.size(); ++i) { + feature_msg_ptr->features.push_back(FeatureMeasurement()); + feature_msg_ptr->features[i].id = curr_ids[i]; + feature_msg_ptr->features[i].u0 = curr_cam0_points_undistorted[i].x; + feature_msg_ptr->features[i].v0 = curr_cam0_points_undistorted[i].y; + feature_msg_ptr->features[i].u1 = curr_cam1_points_undistorted[i].x; + feature_msg_ptr->features[i].v1 = curr_cam1_points_undistorted[i].y; + } + + feature_pub.publish(feature_msg_ptr); + + // Publish tracking info. + TrackingInfoPtr tracking_info_msg_ptr(new TrackingInfo()); + tracking_info_msg_ptr->header.stamp = cam0_curr_img_ptr->header.stamp; + tracking_info_msg_ptr->before_tracking = before_tracking; + tracking_info_msg_ptr->after_tracking = after_tracking; + tracking_info_msg_ptr->after_matching = after_matching; + tracking_info_msg_ptr->after_ransac = after_ransac; + tracking_info_pub.publish(tracking_info_msg_ptr); + + return; +} + +void ImageProcessor::drawFeaturesMono() { + // Colors for different features. + Scalar tracked(0, 255, 0); + Scalar new_feature(0, 255, 255); + + static int grid_height = + cam0_curr_img_ptr->image.rows / processor_config.grid_row; + static int grid_width = + cam0_curr_img_ptr->image.cols / processor_config.grid_col; + + // Create an output image. + int img_height = cam0_curr_img_ptr->image.rows; + int img_width = cam0_curr_img_ptr->image.cols; + Mat out_img(img_height, img_width, CV_8UC3); + cvtColor(cam0_curr_img_ptr->image, out_img, CV_GRAY2RGB); + + // Draw grids on the image. + for (int i = 1; i < processor_config.grid_row; ++i) { + Point pt1(0, i*grid_height); + Point pt2(img_width, i*grid_height); + line(out_img, pt1, pt2, Scalar(255, 0, 0)); + } + for (int i = 1; i < processor_config.grid_col; ++i) { + Point pt1(i*grid_width, 0); + Point pt2(i*grid_width, img_height); + line(out_img, pt1, pt2, Scalar(255, 0, 0)); + } + + // Collect features ids in the previous frame. + vector prev_ids(0); + for (const auto& grid_features : *prev_features_ptr) + for (const auto& feature : grid_features.second) + prev_ids.push_back(feature.id); + + // Collect feature points in the previous frame. + map prev_points; + for (const auto& grid_features : *prev_features_ptr) + for (const auto& feature : grid_features.second) + prev_points[feature.id] = feature.cam0_point; + + // Collect feature points in the current frame. + map curr_points; + for (const auto& grid_features : *curr_features_ptr) + for (const auto& feature : grid_features.second) + curr_points[feature.id] = feature.cam0_point; + + // Draw tracked features. + for (const auto& id : prev_ids) { + if (prev_points.find(id) != prev_points.end() && + curr_points.find(id) != curr_points.end()) { + cv::Point2f prev_pt = prev_points[id]; + cv::Point2f curr_pt = curr_points[id]; + circle(out_img, curr_pt, 3, tracked); + line(out_img, prev_pt, curr_pt, tracked, 1); + + prev_points.erase(id); + curr_points.erase(id); + } + } + + // Draw new features. + for (const auto& new_curr_point : curr_points) { + cv::Point2f pt = new_curr_point.second; + circle(out_img, pt, 3, new_feature, -1); + } + + imshow("Feature", out_img); + waitKey(5); +} + +void ImageProcessor::drawFeaturesStereo() { + + if(debug_stereo_pub.getNumSubscribers() > 0) + { + // Colors for different features. + Scalar tracked(0, 255, 0); + Scalar new_feature(0, 255, 255); + + static int grid_height = + cam0_curr_img_ptr->image.rows / processor_config.grid_row; + static int grid_width = + cam0_curr_img_ptr->image.cols / processor_config.grid_col; + + // Create an output image. + int img_height = cam0_curr_img_ptr->image.rows; + int img_width = cam0_curr_img_ptr->image.cols; + Mat out_img(img_height, img_width*2, CV_8UC3); + cvtColor(cam0_curr_img_ptr->image, + out_img.colRange(0, img_width), CV_GRAY2RGB); + cvtColor(cam1_curr_img_ptr->image, + out_img.colRange(img_width, img_width*2), CV_GRAY2RGB); + + // Draw grids on the image. + for (int i = 1; i < processor_config.grid_row; ++i) { + Point pt1(0, i*grid_height); + Point pt2(img_width*2, i*grid_height); + line(out_img, pt1, pt2, Scalar(255, 0, 0)); + } + for (int i = 1; i < processor_config.grid_col; ++i) { + Point pt1(i*grid_width, 0); + Point pt2(i*grid_width, img_height); + line(out_img, pt1, pt2, Scalar(255, 0, 0)); + } + for (int i = 1; i < processor_config.grid_col; ++i) { + Point pt1(i*grid_width+img_width, 0); + Point pt2(i*grid_width+img_width, img_height); + line(out_img, pt1, pt2, Scalar(255, 0, 0)); + } + + // Collect features ids in the previous frame. + vector prev_ids(0); + for (const auto& grid_features : *prev_features_ptr) + for (const auto& feature : grid_features.second) + prev_ids.push_back(feature.id); + + // Collect feature points in the previous frame. + map prev_cam0_points; + map prev_cam1_points; + for (const auto& grid_features : *prev_features_ptr) + for (const auto& feature : grid_features.second) { + prev_cam0_points[feature.id] = feature.cam0_point; + prev_cam1_points[feature.id] = feature.cam1_point; + } + + // Collect feature points in the current frame. + map curr_cam0_points; + map curr_cam1_points; + for (const auto& grid_features : *curr_features_ptr) + for (const auto& feature : grid_features.second) { + curr_cam0_points[feature.id] = feature.cam0_point; + curr_cam1_points[feature.id] = feature.cam1_point; + } + + // Draw tracked features. + for (const auto& id : prev_ids) { + if (prev_cam0_points.find(id) != prev_cam0_points.end() && + curr_cam0_points.find(id) != curr_cam0_points.end()) { + cv::Point2f prev_pt0 = prev_cam0_points[id]; + cv::Point2f prev_pt1 = prev_cam1_points[id] + Point2f(img_width, 0.0); + cv::Point2f curr_pt0 = curr_cam0_points[id]; + cv::Point2f curr_pt1 = curr_cam1_points[id] + Point2f(img_width, 0.0); + + circle(out_img, curr_pt0, 3, tracked, -1); + circle(out_img, curr_pt1, 3, tracked, -1); + line(out_img, prev_pt0, curr_pt0, tracked, 1); + line(out_img, prev_pt1, curr_pt1, tracked, 1); + + prev_cam0_points.erase(id); + prev_cam1_points.erase(id); + curr_cam0_points.erase(id); + curr_cam1_points.erase(id); + } + } + + // Draw new features. + for (const auto& new_cam0_point : curr_cam0_points) { + cv::Point2f pt0 = new_cam0_point.second; + cv::Point2f pt1 = curr_cam1_points[new_cam0_point.first] + + Point2f(img_width, 0.0); + + circle(out_img, pt0, 3, new_feature, -1); + circle(out_img, pt1, 3, new_feature, -1); + } + + cv_bridge::CvImage debug_image(cam0_curr_img_ptr->header, "bgr8", out_img); + debug_stereo_pub.publish(debug_image.toImageMsg()); + } + //imshow("Feature", out_img); + //waitKey(5); + + return; +} + +void ImageProcessor::updateFeatureLifetime() { + for (int code = 0; code < + processor_config.grid_row*processor_config.grid_col; ++code) { + vector& features = (*curr_features_ptr)[code]; + for (const auto& feature : features) { + if (feature_lifetime.find(feature.id) == feature_lifetime.end()) + feature_lifetime[feature.id] = 1; + else + ++feature_lifetime[feature.id]; + } + } + + return; +} + +void ImageProcessor::featureLifetimeStatistics() { + + map lifetime_statistics; + for (const auto& data : feature_lifetime) { + if (lifetime_statistics.find(data.second) == + lifetime_statistics.end()) + lifetime_statistics[data.second] = 1; + else + ++lifetime_statistics[data.second]; + } + + for (const auto& data : lifetime_statistics) + cout << data.first << " : " << data.second << endl; + + return; +} + +} // end namespace msckf_vio diff --git a/src/image_processor_nodelet.cpp b/src/image_processor_nodelet.cpp new file mode 100644 index 0000000..e172693 --- /dev/null +++ b/src/image_processor_nodelet.cpp @@ -0,0 +1,24 @@ +/* + * COPYRIGHT AND PERMISSION NOTICE + * Penn Software MSCKF_VIO + * Copyright (C) 2017 The Trustees of the University of Pennsylvania + * All rights reserved. + */ + +#include + +namespace msckf_vio { +void ImageProcessorNodelet::onInit() { + img_processor_ptr.reset(new ImageProcessor(getPrivateNodeHandle())); + if (!img_processor_ptr->initialize()) { + ROS_ERROR("Cannot initialize Image Processor..."); + return; + } + return; +} + +PLUGINLIB_DECLARE_CLASS(msckf_vio, ImageProcessorNodelet, + msckf_vio::ImageProcessorNodelet, nodelet::Nodelet); + +} // end namespace msckf_vio + diff --git a/src/msckf_vio.cpp b/src/msckf_vio.cpp new file mode 100644 index 0000000..e0b0b01 --- /dev/null +++ b/src/msckf_vio.cpp @@ -0,0 +1,1492 @@ +/* + * COPYRIGHT AND PERMISSION NOTICE + * Penn Software MSCKF_VIO + * Copyright (C) 2017 The Trustees of the University of Pennsylvania + * All rights reserved. + */ + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include +#include + +using namespace std; +using namespace Eigen; + +namespace msckf_vio{ +// Static member variables in IMUState class. +StateIDType IMUState::next_id = 0; +double IMUState::gyro_noise = 0.001; +double IMUState::acc_noise = 0.01; +double IMUState::gyro_bias_noise = 0.001; +double IMUState::acc_bias_noise = 0.01; +Vector3d IMUState::gravity = Vector3d(0, 0, -GRAVITY_ACCELERATION); +Isometry3d IMUState::T_imu_body = Isometry3d::Identity(); + +// Static member variables in CAMState class. +Isometry3d CAMState::T_cam0_cam1 = Isometry3d::Identity(); + +// Static member variables in Feature class. +FeatureIDType Feature::next_id = 0; +double Feature::observation_noise = 0.01; +Feature::OptimizationConfig Feature::optimization_config; + +map MsckfVio::chi_squared_test_table; + +MsckfVio::MsckfVio(ros::NodeHandle& pnh): + is_gravity_set(false), + is_first_img(true), + nh(pnh) { + return; +} + +bool MsckfVio::loadParameters() { + // Frame id + nh.param("fixed_frame_id", fixed_frame_id, "world"); + nh.param("child_frame_id", child_frame_id, "robot"); + nh.param("publish_tf", publish_tf, true); + nh.param("frame_rate", frame_rate, 40.0); + nh.param("position_std_threshold", position_std_threshold, 8.0); + + nh.param("rotation_threshold", rotation_threshold, 0.2618); + nh.param("translation_threshold", translation_threshold, 0.4); + nh.param("tracking_rate_threshold", tracking_rate_threshold, 0.5); + + // Feature optimization parameters + nh.param("feature/config/translation_threshold", + Feature::optimization_config.translation_threshold, 0.2); + + // Noise related parameters + nh.param("noise/gyro", IMUState::gyro_noise, 0.001); + nh.param("noise/acc", IMUState::acc_noise, 0.01); + nh.param("noise/gyro_bias", IMUState::gyro_bias_noise, 0.001); + nh.param("noise/acc_bias", IMUState::acc_bias_noise, 0.01); + nh.param("noise/feature", Feature::observation_noise, 0.01); + + // Use variance instead of standard deviation. + IMUState::gyro_noise *= IMUState::gyro_noise; + IMUState::acc_noise *= IMUState::acc_noise; + IMUState::gyro_bias_noise *= IMUState::gyro_bias_noise; + IMUState::acc_bias_noise *= IMUState::acc_bias_noise; + Feature::observation_noise *= Feature::observation_noise; + + // Set the initial IMU state. + // The intial orientation and position will be set to the origin + // implicitly. But the initial velocity and bias can be + // set by parameters. + // TODO: is it reasonable to set the initial bias to 0? + nh.param("initial_state/velocity/x", + state_server.imu_state.velocity(0), 0.0); + nh.param("initial_state/velocity/y", + state_server.imu_state.velocity(1), 0.0); + nh.param("initial_state/velocity/z", + state_server.imu_state.velocity(2), 0.0); + + // The initial covariance of orientation and position can be + // set to 0. But for velocity, bias and extrinsic parameters, + // there should be nontrivial uncertainty. + double gyro_bias_cov, acc_bias_cov, velocity_cov; + nh.param("initial_covariance/velocity", + velocity_cov, 0.25); + nh.param("initial_covariance/gyro_bias", + gyro_bias_cov, 1e-4); + nh.param("initial_covariance/acc_bias", + acc_bias_cov, 1e-2); + + double extrinsic_rotation_cov, extrinsic_translation_cov; + nh.param("initial_covariance/extrinsic_rotation_cov", + extrinsic_rotation_cov, 3.0462e-4); + nh.param("initial_covariance/extrinsic_translation_cov", + extrinsic_translation_cov, 1e-4); + + state_server.state_cov = MatrixXd::Zero(21, 21); + for (int i = 3; i < 6; ++i) + state_server.state_cov(i, i) = gyro_bias_cov; + for (int i = 6; i < 9; ++i) + state_server.state_cov(i, i) = velocity_cov; + for (int i = 9; i < 12; ++i) + state_server.state_cov(i, i) = acc_bias_cov; + for (int i = 15; i < 18; ++i) + state_server.state_cov(i, i) = extrinsic_rotation_cov; + for (int i = 18; i < 21; ++i) + state_server.state_cov(i, i) = extrinsic_translation_cov; + + // Transformation offsets between the frames involved. + Isometry3d T_imu_cam0; + vector cam0_extrinsics(16); + nh.getParam("cam0/T_cam_imu", cam0_extrinsics); + T_imu_cam0.linear()(0, 0) = cam0_extrinsics[0]; + T_imu_cam0.linear()(0, 1) = cam0_extrinsics[1]; + T_imu_cam0.linear()(0, 2) = cam0_extrinsics[2]; + T_imu_cam0.linear()(1, 0) = cam0_extrinsics[4]; + T_imu_cam0.linear()(1, 1) = cam0_extrinsics[5]; + T_imu_cam0.linear()(1, 2) = cam0_extrinsics[6]; + T_imu_cam0.linear()(2, 0) = cam0_extrinsics[8]; + T_imu_cam0.linear()(2, 1) = cam0_extrinsics[9]; + T_imu_cam0.linear()(2, 2) = cam0_extrinsics[10]; + T_imu_cam0.translation()(0) = cam0_extrinsics[3]; + T_imu_cam0.translation()(1) = cam0_extrinsics[7]; + T_imu_cam0.translation()(2) = cam0_extrinsics[11]; + Isometry3d T_cam0_imu = T_imu_cam0.inverse(); + + Isometry3d T_imu_cam1; + vector cam1_extrinsics(16); + nh.getParam("cam1/T_cam_imu", cam1_extrinsics); + T_imu_cam1.linear()(0, 0) = cam1_extrinsics[0]; + T_imu_cam1.linear()(0, 1) = cam1_extrinsics[1]; + T_imu_cam1.linear()(0, 2) = cam1_extrinsics[2]; + T_imu_cam1.linear()(1, 0) = cam1_extrinsics[4]; + T_imu_cam1.linear()(1, 1) = cam1_extrinsics[5]; + T_imu_cam1.linear()(1, 2) = cam1_extrinsics[6]; + T_imu_cam1.linear()(2, 0) = cam1_extrinsics[8]; + T_imu_cam1.linear()(2, 1) = cam1_extrinsics[9]; + T_imu_cam1.linear()(2, 2) = cam1_extrinsics[10]; + T_imu_cam1.translation()(0) = cam1_extrinsics[3]; + T_imu_cam1.translation()(1) = cam1_extrinsics[7]; + T_imu_cam1.translation()(2) = cam1_extrinsics[11]; + Isometry3d T_cam1_imu = T_imu_cam1.inverse(); + + Isometry3d T_body_imu; + vector imu_extrinsics(16); + nh.getParam("T_imu_body", imu_extrinsics); + T_body_imu.linear()(0, 0) = imu_extrinsics[0]; + T_body_imu.linear()(0, 1) = imu_extrinsics[1]; + T_body_imu.linear()(0, 2) = imu_extrinsics[2]; + T_body_imu.linear()(1, 0) = imu_extrinsics[4]; + T_body_imu.linear()(1, 1) = imu_extrinsics[5]; + T_body_imu.linear()(1, 2) = imu_extrinsics[6]; + T_body_imu.linear()(2, 0) = imu_extrinsics[8]; + T_body_imu.linear()(2, 1) = imu_extrinsics[9]; + T_body_imu.linear()(2, 2) = imu_extrinsics[10]; + T_body_imu.translation()(0) = imu_extrinsics[3]; + T_body_imu.translation()(1) = imu_extrinsics[7]; + T_body_imu.translation()(2) = imu_extrinsics[11]; + + //CAMState::T_imu_cam0 = T_cam0_imu.inverse(); + state_server.imu_state.R_imu_cam0 = T_cam0_imu.linear().transpose(); + state_server.imu_state.t_cam0_imu = T_cam0_imu.translation(); + CAMState::T_cam0_cam1 = T_imu_cam1 * T_cam0_imu; + IMUState::T_imu_body = T_body_imu.inverse(); + + // Maximum number of camera states to be stored + nh.param("max_cam_state_size", max_cam_state_size, 30); + + ROS_INFO("==========================================="); + ROS_INFO("fixed frame id: %s", fixed_frame_id.c_str()); + ROS_INFO("child frame id: %s", child_frame_id.c_str()); + ROS_INFO("publish tf: %d", publish_tf); + ROS_INFO("frame rate: %f", frame_rate); + ROS_INFO("position std threshold: %f", position_std_threshold); + ROS_INFO("Keyframe rotation threshold: %f", rotation_threshold); + ROS_INFO("Keyframe translation threshold: %f", translation_threshold); + ROS_INFO("Keyframe tracking rate threshold: %f", tracking_rate_threshold); + ROS_INFO("gyro noise: %.10f", IMUState::gyro_noise); + ROS_INFO("gyro bias noise: %.10f", IMUState::gyro_bias_noise); + ROS_INFO("acc noise: %.10f", IMUState::acc_noise); + ROS_INFO("acc bias noise: %.10f", IMUState::acc_bias_noise); + ROS_INFO("observation noise: %.10f", Feature::observation_noise); + ROS_INFO("initial velocity: %f, %f, %f", + state_server.imu_state.velocity(0), + state_server.imu_state.velocity(1), + state_server.imu_state.velocity(2)); + ROS_INFO("initial gyro bias cov: %f", gyro_bias_cov); + ROS_INFO("initial acc bias cov: %f", acc_bias_cov); + ROS_INFO("initial velocity cov: %f", velocity_cov); + ROS_INFO("initial extrinsic rotation cov: %f", + extrinsic_rotation_cov); + ROS_INFO("initial extrinsic translation cov: %f", + extrinsic_translation_cov); + + cout << T_imu_cam1.linear() << endl; + cout << T_imu_cam1.translation().transpose() << endl; + + ROS_INFO("max camera state #: %d", max_cam_state_size); + ROS_INFO("==========================================="); + return true; +} + +bool MsckfVio::createRosIO() { + odom_pub = nh.advertise("odom", 10); + feature_pub = nh.advertise( + "feature_point_cloud", 10); + + reset_srv = nh.advertiseService("reset", + &MsckfVio::resetCallback, this); + + imu_sub = nh.subscribe("imu", 100, + &MsckfVio::imuCallback, this); + feature_sub = nh.subscribe("features", 40, + &MsckfVio::featureCallback, this); + + mocap_odom_sub = nh.subscribe("mocap_odom", 10, + &MsckfVio::mocapOdomCallback, this); + mocap_odom_pub = nh.advertise("gt_odom", 1); + + return true; +} + +bool MsckfVio::initialize() { + if (!loadParameters()) return false; + ROS_INFO("Finish loading ROS parameters..."); + + // Initialize state server + state_server.continuous_noise_cov = + Matrix::Zero(); + state_server.continuous_noise_cov.block<3, 3>(0, 0) = + Matrix3d::Identity()*IMUState::gyro_noise; + state_server.continuous_noise_cov.block<3, 3>(3, 3) = + Matrix3d::Identity()*IMUState::gyro_bias_noise; + state_server.continuous_noise_cov.block<3, 3>(6, 6) = + Matrix3d::Identity()*IMUState::acc_noise; + state_server.continuous_noise_cov.block<3, 3>(9, 9) = + Matrix3d::Identity()*IMUState::acc_bias_noise; + + // Initialize the chi squared test table with confidence + // level 0.95. + for (int i = 1; i < 100; ++i) { + boost::math::chi_squared chi_squared_dist(i); + chi_squared_test_table[i] = + boost::math::quantile(chi_squared_dist, 0.05); + } + + if (!createRosIO()) return false; + ROS_INFO("Finish creating ROS IO..."); + + return true; +} + +void MsckfVio::imuCallback( + const sensor_msgs::ImuConstPtr& msg) { + + // IMU msgs are pushed backed into a buffer instead of + // being processed immediately. The IMU msgs are processed + // when the next image is available, in which way, we can + // easily handle the transfer delay. + imu_msg_buffer.push_back(*msg); + + if (!is_gravity_set) { + if (imu_msg_buffer.size() < 200) return; + //if (imu_msg_buffer.size() < 10) return; + initializeGravityAndBias(); + is_gravity_set = true; + } + + return; +} + +void MsckfVio::initializeGravityAndBias() { + + // Initialize gravity and gyro bias. + Vector3d sum_angular_vel = Vector3d::Zero(); + Vector3d sum_linear_acc = Vector3d::Zero(); + + for (const auto& imu_msg : imu_msg_buffer) { + Vector3d angular_vel = Vector3d::Zero(); + Vector3d linear_acc = Vector3d::Zero(); + + tf::vectorMsgToEigen(imu_msg.angular_velocity, angular_vel); + tf::vectorMsgToEigen(imu_msg.linear_acceleration, linear_acc); + + sum_angular_vel += angular_vel; + sum_linear_acc += linear_acc; + } + + state_server.imu_state.gyro_bias = + sum_angular_vel / imu_msg_buffer.size(); + //IMUState::gravity = + // -sum_linear_acc / imu_msg_buffer.size(); + // This is the gravity in the IMU frame. + Vector3d gravity_imu = + sum_linear_acc / imu_msg_buffer.size(); + + // Initialize the initial orientation, so that the estimation + // is consistent with the inertial frame. + double gravity_norm = gravity_imu.norm(); + IMUState::gravity = Vector3d(0.0, 0.0, -gravity_norm); + + Quaterniond q0_i_w = Quaterniond::FromTwoVectors( + gravity_imu, -IMUState::gravity); + state_server.imu_state.orientation = + rotationToQuaternion(q0_i_w.toRotationMatrix().transpose()); + + return; +} + +bool MsckfVio::resetCallback( + std_srvs::Trigger::Request& req, + std_srvs::Trigger::Response& res) { + + ROS_WARN("Start resetting msckf vio..."); + // Temporarily shutdown the subscribers to prevent the + // state from updating. + feature_sub.shutdown(); + imu_sub.shutdown(); + + // Reset the IMU state. + IMUState& imu_state = state_server.imu_state; + imu_state.time = 0.0; + imu_state.orientation = Vector4d(0.0, 0.0, 0.0, 1.0); + imu_state.position = Vector3d::Zero(); + imu_state.velocity = Vector3d::Zero(); + imu_state.gyro_bias = Vector3d::Zero(); + imu_state.acc_bias = Vector3d::Zero(); + imu_state.orientation_null = Vector4d(0.0, 0.0, 0.0, 1.0); + imu_state.position_null = Vector3d::Zero(); + imu_state.velocity_null = Vector3d::Zero(); + + // Remove all existing camera states. + state_server.cam_states.clear(); + + // Reset the state covariance. + double gyro_bias_cov, acc_bias_cov, velocity_cov; + nh.param("initial_covariance/velocity", + velocity_cov, 0.25); + nh.param("initial_covariance/gyro_bias", + gyro_bias_cov, 1e-4); + nh.param("initial_covariance/acc_bias", + acc_bias_cov, 1e-2); + + double extrinsic_rotation_cov, extrinsic_translation_cov; + nh.param("initial_covariance/extrinsic_rotation_cov", + extrinsic_rotation_cov, 3.0462e-4); + nh.param("initial_covariance/extrinsic_translation_cov", + extrinsic_translation_cov, 1e-4); + + state_server.state_cov = MatrixXd::Zero(21, 21); + for (int i = 3; i < 6; ++i) + state_server.state_cov(i, i) = gyro_bias_cov; + for (int i = 6; i < 9; ++i) + state_server.state_cov(i, i) = velocity_cov; + for (int i = 9; i < 12; ++i) + state_server.state_cov(i, i) = acc_bias_cov; + for (int i = 15; i < 18; ++i) + state_server.state_cov(i, i) = extrinsic_rotation_cov; + for (int i = 18; i < 21; ++i) + state_server.state_cov(i, i) = extrinsic_translation_cov; + + // Clear all exsiting features in the map. + map_server.clear(); + + // Clear the IMU msg buffer. + imu_msg_buffer.clear(); + + // Reset the starting flags. + is_gravity_set = false; + is_first_img = true; + + // Restart the subscribers. + imu_sub = nh.subscribe("imu", 100, + &MsckfVio::imuCallback, this); + feature_sub = nh.subscribe("features", 40, + &MsckfVio::featureCallback, this); + + // TODO: When can the reset fail? + res.success = true; + ROS_WARN("Resetting msckf vio completed..."); + return true; +} + +void MsckfVio::featureCallback( + const CameraMeasurementConstPtr& msg) { + + // Return if the gravity vector has not been set. + if (!is_gravity_set) return; + + // Start the system if the first image is received. + // The frame where the first image is received will be + // the origin. + if (is_first_img) { + is_first_img = false; + state_server.imu_state.time = msg->header.stamp.toSec(); + } + + static double max_processing_time = 0.0; + static int critical_time_cntr = 0; + double processing_start_time = ros::Time::now().toSec(); + + // Propogate the IMU state. + // that are received before the image msg. + ros::Time start_time = ros::Time::now(); + batchImuProcessing(msg->header.stamp.toSec()); + double imu_processing_time = ( + ros::Time::now()-start_time).toSec(); + + // Augment the state vector. + start_time = ros::Time::now(); + stateAugmentation(msg->header.stamp.toSec()); + double state_augmentation_time = ( + ros::Time::now()-start_time).toSec(); + + // Add new observations for existing features or new + // features in the map server. + start_time = ros::Time::now(); + addFeatureObservations(msg); + double add_observations_time = ( + ros::Time::now()-start_time).toSec(); + + // Perform measurement update if necessary. + start_time = ros::Time::now(); + removeLostFeatures(); + double remove_lost_features_time = ( + ros::Time::now()-start_time).toSec(); + + start_time = ros::Time::now(); + pruneCamStateBuffer(); + double prune_cam_states_time = ( + ros::Time::now()-start_time).toSec(); + + // Publish the odometry. + start_time = ros::Time::now(); + publish(msg->header.stamp); + double publish_time = ( + ros::Time::now()-start_time).toSec(); + + // Reset the system if necessary. + onlineReset(); + + double processing_end_time = ros::Time::now().toSec(); + double processing_time = + processing_end_time - processing_start_time; + if (processing_time > 1.0/frame_rate) { + ++critical_time_cntr; + ROS_INFO("\033[1;31mTotal processing time %f/%d...\033[0m", + processing_time, critical_time_cntr); + //printf("IMU processing time: %f/%f\n", + // imu_processing_time, imu_processing_time/processing_time); + //printf("State augmentation time: %f/%f\n", + // state_augmentation_time, state_augmentation_time/processing_time); + //printf("Add observations time: %f/%f\n", + // add_observations_time, add_observations_time/processing_time); + printf("Remove lost features time: %f/%f\n", + remove_lost_features_time, remove_lost_features_time/processing_time); + printf("Remove camera states time: %f/%f\n", + prune_cam_states_time, prune_cam_states_time/processing_time); + //printf("Publish time: %f/%f\n", + // publish_time, publish_time/processing_time); + } + + return; +} + +void MsckfVio::mocapOdomCallback( + const nav_msgs::OdometryConstPtr& msg) { + static bool first_mocap_odom_msg = true; + + // If this is the first mocap odometry messsage, set + // the initial frame. + if (first_mocap_odom_msg) { + Quaterniond orientation; + Vector3d translation; + tf::pointMsgToEigen( + msg->pose.pose.position, translation); + tf::quaternionMsgToEigen( + msg->pose.pose.orientation, orientation); + //tf::vectorMsgToEigen( + // msg->transform.translation, translation); + //tf::quaternionMsgToEigen( + // msg->transform.rotation, orientation); + mocap_initial_frame.linear() = orientation.toRotationMatrix(); + mocap_initial_frame.translation() = translation; + first_mocap_odom_msg = false; + } + + // Transform the ground truth. + Quaterniond orientation; + Vector3d translation; + //tf::vectorMsgToEigen( + // msg->transform.translation, translation); + //tf::quaternionMsgToEigen( + // msg->transform.rotation, orientation); + tf::pointMsgToEigen( + msg->pose.pose.position, translation); + tf::quaternionMsgToEigen( + msg->pose.pose.orientation, orientation); + + Eigen::Isometry3d T_b_v_gt; + T_b_v_gt.linear() = orientation.toRotationMatrix(); + T_b_v_gt.translation() = translation; + Eigen::Isometry3d T_b_w_gt = mocap_initial_frame.inverse() * T_b_v_gt; + + //Eigen::Vector3d body_velocity_gt; + //tf::vectorMsgToEigen(msg->twist.twist.linear, body_velocity_gt); + //body_velocity_gt = mocap_initial_frame.linear().transpose() * + // body_velocity_gt; + + // Ground truth tf. + if (publish_tf) { + tf::Transform T_b_w_gt_tf; + tf::transformEigenToTF(T_b_w_gt, T_b_w_gt_tf); + tf_pub.sendTransform(tf::StampedTransform( + T_b_w_gt_tf, msg->header.stamp, fixed_frame_id, child_frame_id+"_mocap")); + } + + // Ground truth odometry. + nav_msgs::Odometry mocap_odom_msg; + mocap_odom_msg.header.stamp = msg->header.stamp; + mocap_odom_msg.header.frame_id = fixed_frame_id; + mocap_odom_msg.child_frame_id = child_frame_id+"_mocap"; + + tf::poseEigenToMsg(T_b_w_gt, mocap_odom_msg.pose.pose); + //tf::vectorEigenToMsg(body_velocity_gt, + // mocap_odom_msg.twist.twist.linear); + + mocap_odom_pub.publish(mocap_odom_msg); + return; +} + +void MsckfVio::batchImuProcessing(const double& time_bound) { + // Counter how many IMU msgs in the buffer are used. + int used_imu_msg_cntr = 0; + + for (const auto& imu_msg : imu_msg_buffer) { + double imu_time = imu_msg.header.stamp.toSec(); + if (imu_time < state_server.imu_state.time) { + ++used_imu_msg_cntr; + continue; + } + if (imu_time > time_bound) break; + + // Convert the msgs. + Vector3d m_gyro, m_acc; + tf::vectorMsgToEigen(imu_msg.angular_velocity, m_gyro); + tf::vectorMsgToEigen(imu_msg.linear_acceleration, m_acc); + + // Execute process model. + processModel(imu_time, m_gyro, m_acc); + ++used_imu_msg_cntr; + } + + // Set the state ID for the new IMU state. + state_server.imu_state.id = IMUState::next_id++; + + // Remove all used IMU msgs. + imu_msg_buffer.erase(imu_msg_buffer.begin(), + imu_msg_buffer.begin()+used_imu_msg_cntr); + + return; +} + +void MsckfVio::processModel(const double& time, + const Vector3d& m_gyro, + const Vector3d& m_acc) { + + // Remove the bias from the measured gyro and acceleration + IMUState& imu_state = state_server.imu_state; + Vector3d gyro = m_gyro - imu_state.gyro_bias; + Vector3d acc = m_acc - imu_state.acc_bias; + double dtime = time - imu_state.time; + + // Compute discrete transition and noise covariance matrix + Matrix F = Matrix::Zero(); + Matrix G = Matrix::Zero(); + + F.block<3, 3>(0, 0) = -skewSymmetric(gyro); + F.block<3, 3>(0, 3) = -Matrix3d::Identity(); + F.block<3, 3>(6, 0) = -quaternionToRotation( + imu_state.orientation).transpose()*skewSymmetric(acc); + F.block<3, 3>(6, 9) = -quaternionToRotation( + imu_state.orientation).transpose(); + F.block<3, 3>(12, 6) = Matrix3d::Identity(); + + G.block<3, 3>(0, 0) = -Matrix3d::Identity(); + G.block<3, 3>(3, 3) = Matrix3d::Identity(); + G.block<3, 3>(6, 6) = -quaternionToRotation( + imu_state.orientation).transpose(); + G.block<3, 3>(9, 9) = Matrix3d::Identity(); + + // Approximate matrix exponential to the 3rd order, + // which can be considered to be accurate enough assuming + // dtime is within 0.01s. + Matrix Fdt = F * dtime; + Matrix Fdt_square = Fdt * Fdt; + Matrix Fdt_cube = Fdt_square * Fdt; + Matrix Phi = Matrix::Identity() + + Fdt + 0.5*Fdt_square + (1.0/6.0)*Fdt_cube; + + // Propogate the state using 4th order Runge-Kutta + predictNewState(dtime, gyro, acc); + + // Modify the transition matrix + Matrix3d R_kk_1 = quaternionToRotation(imu_state.orientation_null); + Phi.block<3, 3>(0, 0) = + quaternionToRotation(imu_state.orientation) * R_kk_1.transpose(); + + Vector3d u = R_kk_1 * IMUState::gravity; + RowVector3d s = (u.transpose()*u).inverse() * u.transpose(); + + Matrix3d A1 = Phi.block<3, 3>(6, 0); + Vector3d w1 = skewSymmetric( + imu_state.velocity_null-imu_state.velocity) * IMUState::gravity; + Phi.block<3, 3>(6, 0) = A1 - (A1*u-w1)*s; + + Matrix3d A2 = Phi.block<3, 3>(12, 0); + Vector3d w2 = skewSymmetric( + dtime*imu_state.velocity_null+imu_state.position_null- + imu_state.position) * IMUState::gravity; + Phi.block<3, 3>(12, 0) = A2 - (A2*u-w2)*s; + + // Propogate the state covariance matrix. + Matrix Q = Phi*G*state_server.continuous_noise_cov* + G.transpose()*Phi.transpose()*dtime; + state_server.state_cov.block<21, 21>(0, 0) = + Phi*state_server.state_cov.block<21, 21>(0, 0)*Phi.transpose() + Q; + + if (state_server.cam_states.size() > 0) { + state_server.state_cov.block( + 0, 21, 21, state_server.state_cov.cols()-21) = + Phi * state_server.state_cov.block( + 0, 21, 21, state_server.state_cov.cols()-21); + state_server.state_cov.block( + 21, 0, state_server.state_cov.rows()-21, 21) = + state_server.state_cov.block( + 21, 0, state_server.state_cov.rows()-21, 21) * Phi.transpose(); + } + + MatrixXd state_cov_fixed = (state_server.state_cov + + state_server.state_cov.transpose()) / 2.0; + state_server.state_cov = state_cov_fixed; + + // Update the state correspondes to null space. + imu_state.orientation_null = imu_state.orientation; + imu_state.position_null = imu_state.position; + imu_state.velocity_null = imu_state.velocity; + + // Update the state info + state_server.imu_state.time = time; + return; +} + +void MsckfVio::predictNewState(const double& dt, + const Vector3d& gyro, + const Vector3d& acc) { + + // TODO: Will performing the forward integration using + // the inverse of the quaternion give better accuracy? + double gyro_norm = gyro.norm(); + Matrix4d Omega = Matrix4d::Zero(); + Omega.block<3, 3>(0, 0) = -skewSymmetric(gyro); + Omega.block<3, 1>(0, 3) = gyro; + Omega.block<1, 3>(3, 0) = -gyro; + + Vector4d& q = state_server.imu_state.orientation; + Vector3d& v = state_server.imu_state.velocity; + Vector3d& p = state_server.imu_state.position; + + // Some pre-calculation + Vector4d dq_dt, dq_dt2; + if (gyro_norm > 1e-5) { + dq_dt = (cos(gyro_norm*dt*0.5)*Matrix4d::Identity() + + 1/gyro_norm*sin(gyro_norm*dt*0.5)*Omega) * q; + dq_dt2 = (cos(gyro_norm*dt*0.25)*Matrix4d::Identity() + + 1/gyro_norm*sin(gyro_norm*dt*0.25)*Omega) * q; + } + else { + dq_dt = (Matrix4d::Identity()+0.5*dt*Omega) * + cos(gyro_norm*dt*0.5) * q; + dq_dt2 = (Matrix4d::Identity()+0.25*dt*Omega) * + cos(gyro_norm*dt*0.25) * q; + } + Matrix3d dR_dt_transpose = quaternionToRotation(dq_dt).transpose(); + Matrix3d dR_dt2_transpose = quaternionToRotation(dq_dt2).transpose(); + + // k1 = f(tn, yn) + Vector3d k1_v_dot = quaternionToRotation(q).transpose()*acc + + IMUState::gravity; + Vector3d k1_p_dot = v; + + // k2 = f(tn+dt/2, yn+k1*dt/2) + Vector3d k1_v = v + k1_v_dot*dt/2; + Vector3d k2_v_dot = dR_dt2_transpose*acc + + IMUState::gravity; + Vector3d k2_p_dot = k1_v; + + // k3 = f(tn+dt/2, yn+k2*dt/2) + Vector3d k2_v = v + k2_v_dot*dt/2; + Vector3d k3_v_dot = dR_dt2_transpose*acc + + IMUState::gravity; + Vector3d k3_p_dot = k2_v; + + // k4 = f(tn+dt, yn+k3*dt) + Vector3d k3_v = v + k3_v_dot*dt; + Vector3d k4_v_dot = dR_dt_transpose*acc + + IMUState::gravity; + Vector3d k4_p_dot = k3_v; + + // yn+1 = yn + dt/6*(k1+2*k2+2*k3+k4) + q = dq_dt; + quaternionNormalize(q); + v = v + dt/6*(k1_v_dot+2*k2_v_dot+2*k3_v_dot+k4_v_dot); + p = p + dt/6*(k1_p_dot+2*k2_p_dot+2*k3_p_dot+k4_p_dot); + + return; +} + +void MsckfVio::stateAugmentation(const double& time) { + + const Matrix3d& R_i_c = state_server.imu_state.R_imu_cam0; + const Vector3d& t_c_i = state_server.imu_state.t_cam0_imu; + + // Add a new camera state to the state server. + Matrix3d R_w_i = quaternionToRotation( + state_server.imu_state.orientation); + Matrix3d R_w_c = R_i_c * R_w_i; + Vector3d t_c_w = state_server.imu_state.position + + R_w_i.transpose()*t_c_i; + + state_server.cam_states[state_server.imu_state.id] = + CAMState(state_server.imu_state.id); + CAMState& cam_state = state_server.cam_states[ + state_server.imu_state.id]; + + cam_state.time = time; + cam_state.orientation = rotationToQuaternion(R_w_c); + cam_state.position = t_c_w; + + cam_state.orientation_null = cam_state.orientation; + cam_state.position_null = cam_state.position; + + // Update the covariance matrix of the state. + // To simplify computation, the matrix J below is the nontrivial block + // in Equation (16) in "A Multi-State Constraint Kalman Filter for Vision + // -aided Inertial Navigation". + Matrix J = Matrix::Zero(); + J.block<3, 3>(0, 0) = R_i_c; + J.block<3, 3>(0, 15) = Matrix3d::Identity(); + J.block<3, 3>(3, 0) = skewSymmetric(R_w_i.transpose()*t_c_i); + //J.block<3, 3>(3, 0) = -R_w_i.transpose()*skewSymmetric(t_c_i); + J.block<3, 3>(3, 12) = Matrix3d::Identity(); + J.block<3, 3>(3, 18) = Matrix3d::Identity(); + + // Resize the state covariance matrix. + size_t old_rows = state_server.state_cov.rows(); + size_t old_cols = state_server.state_cov.cols(); + state_server.state_cov.conservativeResize(old_rows+6, old_cols+6); + + // Rename some matrix blocks for convenience. + const Matrix& P11 = + state_server.state_cov.block<21, 21>(0, 0); + const MatrixXd& P12 = + state_server.state_cov.block(0, 21, 21, old_cols-21); + + // Fill in the augmented state covariance. + state_server.state_cov.block(old_rows, 0, 6, old_cols) << J*P11, J*P12; + state_server.state_cov.block(0, old_cols, old_rows, 6) = + state_server.state_cov.block(old_rows, 0, 6, old_cols).transpose(); + state_server.state_cov.block<6, 6>(old_rows, old_cols) = + J * P11 * J.transpose(); + + // Fix the covariance to be symmetric + MatrixXd state_cov_fixed = (state_server.state_cov + + state_server.state_cov.transpose()) / 2.0; + state_server.state_cov = state_cov_fixed; + + return; +} + +void MsckfVio::addFeatureObservations( + const CameraMeasurementConstPtr& msg) { + + StateIDType state_id = state_server.imu_state.id; + int curr_feature_num = map_server.size(); + int tracked_feature_num = 0; + + // Add new observations for existing features or new + // features in the map server. + for (const auto& feature : msg->features) { + if (map_server.find(feature.id) == map_server.end()) { + // This is a new feature. + map_server[feature.id] = Feature(feature.id); + map_server[feature.id].observations[state_id] = + Vector4d(feature.u0, feature.v0, + feature.u1, feature.v1); + } else { + // This is an old feature. + map_server[feature.id].observations[state_id] = + Vector4d(feature.u0, feature.v0, + feature.u1, feature.v1); + ++tracked_feature_num; + } + } + + tracking_rate = + static_cast(tracked_feature_num) / + static_cast(curr_feature_num); + + return; +} + +void MsckfVio::measurementJacobian( + const StateIDType& cam_state_id, + const FeatureIDType& feature_id, + Matrix& H_x, Matrix& H_f, Vector4d& r) { + + // Prepare all the required data. + const CAMState& cam_state = state_server.cam_states[cam_state_id]; + const Feature& feature = map_server[feature_id]; + + // Cam0 pose. + Matrix3d R_w_c0 = quaternionToRotation(cam_state.orientation); + const Vector3d& t_c0_w = cam_state.position; + + // Cam1 pose. + Matrix3d R_c0_c1 = CAMState::T_cam0_cam1.linear(); + Matrix3d R_w_c1 = CAMState::T_cam0_cam1.linear() * R_w_c0; + Vector3d t_c1_w = t_c0_w - R_w_c1.transpose()*CAMState::T_cam0_cam1.translation(); + + // 3d feature position in the world frame. + // And its observation with the stereo cameras. + const Vector3d& p_w = feature.position; + const Vector4d& z = feature.observations.find(cam_state_id)->second; + + // Convert the feature position from the world frame to + // the cam0 and cam1 frame. + Vector3d p_c0 = R_w_c0 * (p_w-t_c0_w); + Vector3d p_c1 = R_w_c1 * (p_w-t_c1_w); + + // Compute the Jacobians. + Matrix dz_dpc0 = Matrix::Zero(); + dz_dpc0(0, 0) = 1 / p_c0(2); + dz_dpc0(1, 1) = 1 / p_c0(2); + dz_dpc0(0, 2) = -p_c0(0) / (p_c0(2)*p_c0(2)); + dz_dpc0(1, 2) = -p_c0(1) / (p_c0(2)*p_c0(2)); + + Matrix dz_dpc1 = Matrix::Zero(); + dz_dpc1(2, 0) = 1 / p_c1(2); + dz_dpc1(3, 1) = 1 / p_c1(2); + dz_dpc1(2, 2) = -p_c1(0) / (p_c1(2)*p_c1(2)); + dz_dpc1(3, 2) = -p_c1(1) / (p_c1(2)*p_c1(2)); + + Matrix dpc0_dxc = Matrix::Zero(); + dpc0_dxc.leftCols(3) = skewSymmetric(p_c0); + dpc0_dxc.rightCols(3) = -R_w_c0; + + Matrix dpc1_dxc = Matrix::Zero(); + dpc1_dxc.leftCols(3) = R_c0_c1 * skewSymmetric(p_c0); + dpc1_dxc.rightCols(3) = -R_w_c1; + + Matrix3d dpc0_dpg = R_w_c0; + Matrix3d dpc1_dpg = R_w_c1; + + H_x = dz_dpc0*dpc0_dxc + dz_dpc1*dpc1_dxc; + H_f = dz_dpc0*dpc0_dpg + dz_dpc1*dpc1_dpg; + + // Modifty the measurement Jacobian to ensure + // observability constrain. + Matrix A = H_x; + Matrix u = Matrix::Zero(); + u.block<3, 1>(0, 0) = quaternionToRotation( + cam_state.orientation_null) * IMUState::gravity; + u.block<3, 1>(3, 0) = skewSymmetric( + p_w-cam_state.position_null) * IMUState::gravity; + H_x = A - A*u*(u.transpose()*u).inverse()*u.transpose(); + H_f = -H_x.block<4, 3>(0, 3); + + // Compute the residual. + r = z - Vector4d(p_c0(0)/p_c0(2), p_c0(1)/p_c0(2), + p_c1(0)/p_c1(2), p_c1(1)/p_c1(2)); + + return; +} + +void MsckfVio::featureJacobian( + const FeatureIDType& feature_id, + const std::vector& cam_state_ids, + MatrixXd& H_x, VectorXd& r) { + + const auto& feature = map_server[feature_id]; + + // Check how many camera states in the provided camera + // id camera has actually seen this feature. + vector valid_cam_state_ids(0); + for (const auto& cam_id : cam_state_ids) { + if (feature.observations.find(cam_id) == + feature.observations.end()) continue; + + valid_cam_state_ids.push_back(cam_id); + } + + int jacobian_row_size = 0; + jacobian_row_size = 4 * valid_cam_state_ids.size(); + + MatrixXd H_xj = MatrixXd::Zero(jacobian_row_size, + 21+state_server.cam_states.size()*6); + MatrixXd H_fj = MatrixXd::Zero(jacobian_row_size, 3); + VectorXd r_j = VectorXd::Zero(jacobian_row_size); + int stack_cntr = 0; + + for (const auto& cam_id : valid_cam_state_ids) { + + Matrix H_xi = Matrix::Zero(); + Matrix H_fi = Matrix::Zero(); + Vector4d r_i = Vector4d::Zero(); + measurementJacobian(cam_id, feature.id, H_xi, H_fi, r_i); + + auto cam_state_iter = state_server.cam_states.find(cam_id); + int cam_state_cntr = std::distance( + state_server.cam_states.begin(), cam_state_iter); + + // Stack the Jacobians. + H_xj.block<4, 6>(stack_cntr, 21+6*cam_state_cntr) = H_xi; + H_fj.block<4, 3>(stack_cntr, 0) = H_fi; + r_j.segment<4>(stack_cntr) = r_i; + stack_cntr += 4; + } + + // Project the residual and Jacobians onto the nullspace + // of H_fj. + JacobiSVD svd_helper(H_fj, ComputeFullU | ComputeThinV); + MatrixXd A = svd_helper.matrixU().rightCols( + jacobian_row_size - 3); + + H_x = A.transpose() * H_xj; + r = A.transpose() * r_j; + + return; +} + +void MsckfVio::measurementUpdate( + const MatrixXd& H, const VectorXd& r) { + + if (H.rows() == 0 || r.rows() == 0) return; + + // Decompose the final Jacobian matrix to reduce computational + // complexity as in Equation (28), (29). + MatrixXd H_thin; + VectorXd r_thin; + + if (H.rows() > H.cols()) { + // Convert H to a sparse matrix. + SparseMatrix H_sparse = H.sparseView(); + + // Perform QR decompostion on H_sparse. + SPQR > spqr_helper; + spqr_helper.setSPQROrdering(SPQR_ORDERING_NATURAL); + spqr_helper.compute(H_sparse); + + MatrixXd H_temp; + VectorXd r_temp; + (spqr_helper.matrixQ().transpose() * H).evalTo(H_temp); + (spqr_helper.matrixQ().transpose() * r).evalTo(r_temp); + + H_thin = H_temp.topRows(21+state_server.cam_states.size()*6); + r_thin = r_temp.head(21+state_server.cam_states.size()*6); + + //HouseholderQR qr_helper(H); + //MatrixXd Q = qr_helper.householderQ(); + //MatrixXd Q1 = Q.leftCols(21+state_server.cam_states.size()*6); + + //H_thin = Q1.transpose() * H; + //r_thin = Q1.transpose() * r; + } else { + H_thin = H; + r_thin = r; + } + + // Compute the Kalman gain. + const MatrixXd& P = state_server.state_cov; + MatrixXd S = H_thin*P*H_thin.transpose() + + Feature::observation_noise*MatrixXd::Identity( + H_thin.rows(), H_thin.rows()); + //MatrixXd K_transpose = S.fullPivHouseholderQr().solve(H_thin*P); + MatrixXd K_transpose = S.ldlt().solve(H_thin*P); + MatrixXd K = K_transpose.transpose(); + + // Compute the error of the state. + VectorXd delta_x = K * r_thin; + + // Update the IMU state. + const VectorXd& delta_x_imu = delta_x.head<21>(); + + if (//delta_x_imu.segment<3>(0).norm() > 0.15 || + //delta_x_imu.segment<3>(3).norm() > 0.15 || + delta_x_imu.segment<3>(6).norm() > 0.5 || + //delta_x_imu.segment<3>(9).norm() > 0.5 || + delta_x_imu.segment<3>(12).norm() > 1.0) { + printf("delta velocity: %f\n", delta_x_imu.segment<3>(6).norm()); + printf("delta position: %f\n", delta_x_imu.segment<3>(12).norm()); + ROS_WARN("Update change is too large."); + //return; + } + + const Vector4d dq_imu = + smallAngleQuaternion(delta_x_imu.head<3>()); + state_server.imu_state.orientation = quaternionMultiplication( + dq_imu, state_server.imu_state.orientation); + state_server.imu_state.gyro_bias += delta_x_imu.segment<3>(3); + state_server.imu_state.velocity += delta_x_imu.segment<3>(6); + state_server.imu_state.acc_bias += delta_x_imu.segment<3>(9); + state_server.imu_state.position += delta_x_imu.segment<3>(12); + + const Vector4d dq_extrinsic = + smallAngleQuaternion(delta_x_imu.segment<3>(15)); + state_server.imu_state.R_imu_cam0 = quaternionToRotation( + dq_extrinsic) * state_server.imu_state.R_imu_cam0; + state_server.imu_state.t_cam0_imu += delta_x_imu.segment<3>(18); + + // Update the camera states. + auto cam_state_iter = state_server.cam_states.begin(); + for (int i = 0; i < state_server.cam_states.size(); + ++i, ++cam_state_iter) { + const VectorXd& delta_x_cam = delta_x.segment<6>(21+i*6); + const Vector4d dq_cam = smallAngleQuaternion(delta_x_cam.head<3>()); + cam_state_iter->second.orientation = quaternionMultiplication( + dq_cam, cam_state_iter->second.orientation); + cam_state_iter->second.position += delta_x_cam.tail<3>(); + } + + // Update state covariance. + MatrixXd I_KH = MatrixXd::Identity(K.rows(), H_thin.cols()) - K*H_thin; + //state_server.state_cov = I_KH*state_server.state_cov*I_KH.transpose() + + // K*K.transpose()*Feature::observation_noise; + state_server.state_cov = I_KH*state_server.state_cov; + + // Fix the covariance to be symmetric + MatrixXd state_cov_fixed = (state_server.state_cov + + state_server.state_cov.transpose()) / 2.0; + state_server.state_cov = state_cov_fixed; + + return; +} + +bool MsckfVio::gatingTest( + const MatrixXd& H, const VectorXd& r, const int& dof) { + + MatrixXd P1 = H * state_server.state_cov * H.transpose(); + MatrixXd P2 = Feature::observation_noise * + MatrixXd::Identity(H.rows(), H.rows()); + double gamma = r.transpose() * (P1+P2).ldlt().solve(r); + + //cout << dof << " " << gamma << " " << + // chi_squared_test_table[dof] << " "; + + if (gamma < chi_squared_test_table[dof]) { + //cout << "passed" << endl; + return true; + } else { + //cout << "failed" << endl; + return false; + } +} + +void MsckfVio::removeLostFeatures() { + + // Remove the features that lost track. + // BTW, find the size the final Jacobian matrix and residual vector. + int jacobian_row_size = 0; + vector invalid_feature_ids(0); + vector processed_feature_ids(0); + + for (auto iter = map_server.begin(); + iter != map_server.end(); ++iter) { + // Rename the feature to be checked. + auto& feature = iter->second; + + // Pass the features that are still being tracked. + if (feature.observations.find(state_server.imu_state.id) != + feature.observations.end()) continue; + if (feature.observations.size() < 3) { + invalid_feature_ids.push_back(feature.id); + continue; + } + + // Check if the feature can be initialized if it + // has not been. + if (!feature.is_initialized) { + if (!feature.checkMotion(state_server.cam_states)) { + invalid_feature_ids.push_back(feature.id); + continue; + } else { + if(!feature.initializePosition(state_server.cam_states)) { + invalid_feature_ids.push_back(feature.id); + continue; + } + } + } + + jacobian_row_size += 4*feature.observations.size() - 3; + processed_feature_ids.push_back(feature.id); + } + + //cout << "invalid/processed feature #: " << + // invalid_feature_ids.size() << "/" << + // processed_feature_ids.size() << endl; + //cout << "jacobian row #: " << jacobian_row_size << endl; + + // Remove the features that do not have enough measurements. + for (const auto& feature_id : invalid_feature_ids) + map_server.erase(feature_id); + + // Return if there is no lost feature to be processed. + if (processed_feature_ids.size() == 0) return; + + MatrixXd H_x = MatrixXd::Zero(jacobian_row_size, + 21+6*state_server.cam_states.size()); + VectorXd r = VectorXd::Zero(jacobian_row_size); + int stack_cntr = 0; + + // Process the features which lose track. + for (const auto& feature_id : processed_feature_ids) { + auto& feature = map_server[feature_id]; + + vector cam_state_ids(0); + for (const auto& measurement : feature.observations) + cam_state_ids.push_back(measurement.first); + + MatrixXd H_xj; + VectorXd r_j; + featureJacobian(feature.id, cam_state_ids, H_xj, r_j); + + if (gatingTest(H_xj, r_j, cam_state_ids.size()-1)) { + H_x.block(stack_cntr, 0, H_xj.rows(), H_xj.cols()) = H_xj; + r.segment(stack_cntr, r_j.rows()) = r_j; + stack_cntr += H_xj.rows(); + } + + // Put an upper bound on the row size of measurement Jacobian, + // which helps guarantee the executation time. + if (stack_cntr > 1500) break; + } + + H_x.conservativeResize(stack_cntr, H_x.cols()); + r.conservativeResize(stack_cntr); + + // Perform the measurement update step. + measurementUpdate(H_x, r); + + // Remove all processed features from the map. + for (const auto& feature_id : processed_feature_ids) + map_server.erase(feature_id); + + return; +} + +void MsckfVio::findRedundantCamStates( + vector& rm_cam_state_ids) { + + // Move the iterator to the key position. + auto key_cam_state_iter = state_server.cam_states.end(); + for (int i = 0; i < 4; ++i) + --key_cam_state_iter; + auto cam_state_iter = key_cam_state_iter; + ++cam_state_iter; + auto first_cam_state_iter = state_server.cam_states.begin(); + + // Pose of the key camera state. + const Vector3d key_position = + key_cam_state_iter->second.position; + const Matrix3d key_rotation = quaternionToRotation( + key_cam_state_iter->second.orientation); + + // Mark the camera states to be removed based on the + // motion between states. + for (int i = 0; i < 2; ++i) { + const Vector3d position = + cam_state_iter->second.position; + const Matrix3d rotation = quaternionToRotation( + cam_state_iter->second.orientation); + + double distance = (position-key_position).norm(); + double angle = AngleAxisd( + rotation*key_rotation.transpose()).angle(); + + //if (angle < 0.1745 && distance < 0.2 && tracking_rate > 0.5) { + if (angle < 0.2618 && distance < 0.4 && tracking_rate > 0.5) { + rm_cam_state_ids.push_back(cam_state_iter->first); + ++cam_state_iter; + } else { + rm_cam_state_ids.push_back(first_cam_state_iter->first); + ++first_cam_state_iter; + } + } + + // Sort the elements in the output vector. + sort(rm_cam_state_ids.begin(), rm_cam_state_ids.end()); + + return; +} + +void MsckfVio::pruneCamStateBuffer() { + + if (state_server.cam_states.size() < max_cam_state_size) + return; + + // Find two camera states to be removed. + vector rm_cam_state_ids(0); + findRedundantCamStates(rm_cam_state_ids); + + // Find the size of the Jacobian matrix. + int jacobian_row_size = 0; + for (auto& item : map_server) { + auto& feature = item.second; + // Check how many camera states to be removed are associated + // with this feature. + vector involved_cam_state_ids(0); + for (const auto& cam_id : rm_cam_state_ids) { + if (feature.observations.find(cam_id) != + feature.observations.end()) + involved_cam_state_ids.push_back(cam_id); + } + + if (involved_cam_state_ids.size() == 0) continue; + if (involved_cam_state_ids.size() == 1) { + feature.observations.erase(involved_cam_state_ids[0]); + continue; + } + + if (!feature.is_initialized) { + // Check if the feature can be initialize. + if (!feature.checkMotion(state_server.cam_states)) { + // If the feature cannot be initialized, just remove + // the observations associated with the camera states + // to be removed. + for (const auto& cam_id : involved_cam_state_ids) + feature.observations.erase(cam_id); + continue; + } else { + if(!feature.initializePosition(state_server.cam_states)) { + for (const auto& cam_id : involved_cam_state_ids) + feature.observations.erase(cam_id); + continue; + } + } + } + + jacobian_row_size += 4*involved_cam_state_ids.size() - 3; + } + + //cout << "jacobian row #: " << jacobian_row_size << endl; + + // Compute the Jacobian and residual. + MatrixXd H_x = MatrixXd::Zero(jacobian_row_size, + 21+6*state_server.cam_states.size()); + VectorXd r = VectorXd::Zero(jacobian_row_size); + int stack_cntr = 0; + + for (auto& item : map_server) { + auto& feature = item.second; + // Check how many camera states to be removed are associated + // with this feature. + vector involved_cam_state_ids(0); + for (const auto& cam_id : rm_cam_state_ids) { + if (feature.observations.find(cam_id) != + feature.observations.end()) + involved_cam_state_ids.push_back(cam_id); + } + + if (involved_cam_state_ids.size() == 0) continue; + + MatrixXd H_xj; + VectorXd r_j; + featureJacobian(feature.id, involved_cam_state_ids, H_xj, r_j); + + if (gatingTest(H_xj, r_j, involved_cam_state_ids.size())) { + H_x.block(stack_cntr, 0, H_xj.rows(), H_xj.cols()) = H_xj; + r.segment(stack_cntr, r_j.rows()) = r_j; + stack_cntr += H_xj.rows(); + } + + for (const auto& cam_id : involved_cam_state_ids) + feature.observations.erase(cam_id); + } + + H_x.conservativeResize(stack_cntr, H_x.cols()); + r.conservativeResize(stack_cntr); + + // Perform measurement update. + measurementUpdate(H_x, r); + + for (const auto& cam_id : rm_cam_state_ids) { + int cam_sequence = std::distance(state_server.cam_states.begin(), + state_server.cam_states.find(cam_id)); + int cam_state_start = 21 + 6*cam_sequence; + int cam_state_end = cam_state_start + 6; + + // Remove the corresponding rows and columns in the state + // covariance matrix. + if (cam_state_end < state_server.state_cov.rows()) { + state_server.state_cov.block(cam_state_start, 0, + state_server.state_cov.rows()-cam_state_end, + state_server.state_cov.cols()) = + state_server.state_cov.block(cam_state_end, 0, + state_server.state_cov.rows()-cam_state_end, + state_server.state_cov.cols()); + + state_server.state_cov.block(0, cam_state_start, + state_server.state_cov.rows(), + state_server.state_cov.cols()-cam_state_end) = + state_server.state_cov.block(0, cam_state_end, + state_server.state_cov.rows(), + state_server.state_cov.cols()-cam_state_end); + + state_server.state_cov.conservativeResize( + state_server.state_cov.rows()-6, state_server.state_cov.cols()-6); + } else { + state_server.state_cov.conservativeResize( + state_server.state_cov.rows()-6, state_server.state_cov.cols()-6); + } + + // Remove this camera state in the state vector. + state_server.cam_states.erase(cam_id); + } + + return; +} + +void MsckfVio::onlineReset() { + + // Never perform online reset if position std threshold + // is non-positive. + if (position_std_threshold <= 0) return; + static long long int online_reset_counter = 0; + + // Check the uncertainty of positions to determine if + // the system can be reset. + double position_x_std = std::sqrt(state_server.state_cov(12, 12)); + double position_y_std = std::sqrt(state_server.state_cov(13, 13)); + double position_z_std = std::sqrt(state_server.state_cov(14, 14)); + + if (position_x_std < position_std_threshold && + position_y_std < position_std_threshold && + position_z_std < position_std_threshold) return; + + ROS_WARN("Start %lld online reset procedure...", + ++online_reset_counter); + ROS_INFO("Stardard deviation in xyz: %f, %f, %f", + position_x_std, position_y_std, position_z_std); + + // Remove all existing camera states. + state_server.cam_states.clear(); + + // Clear all exsiting features in the map. + map_server.clear(); + + // Reset the state covariance. + double gyro_bias_cov, acc_bias_cov, velocity_cov; + nh.param("initial_covariance/velocity", + velocity_cov, 0.25); + nh.param("initial_covariance/gyro_bias", + gyro_bias_cov, 1e-4); + nh.param("initial_covariance/acc_bias", + acc_bias_cov, 1e-2); + + double extrinsic_rotation_cov, extrinsic_translation_cov; + nh.param("initial_covariance/extrinsic_rotation_cov", + extrinsic_rotation_cov, 3.0462e-4); + nh.param("initial_covariance/extrinsic_translation_cov", + extrinsic_translation_cov, 1e-4); + + state_server.state_cov = MatrixXd::Zero(21, 21); + for (int i = 3; i < 6; ++i) + state_server.state_cov(i, i) = gyro_bias_cov; + for (int i = 6; i < 9; ++i) + state_server.state_cov(i, i) = velocity_cov; + for (int i = 9; i < 12; ++i) + state_server.state_cov(i, i) = acc_bias_cov; + for (int i = 15; i < 18; ++i) + state_server.state_cov(i, i) = extrinsic_rotation_cov; + for (int i = 18; i < 21; ++i) + state_server.state_cov(i, i) = extrinsic_translation_cov; + + ROS_WARN("%lld online reset complete...", online_reset_counter); + return; +} + +void MsckfVio::publish(const ros::Time& time) { + + // Convert the IMU frame to the body frame. + const IMUState& imu_state = state_server.imu_state; + Eigen::Isometry3d T_i_w = Eigen::Isometry3d::Identity(); + T_i_w.linear() = quaternionToRotation( + imu_state.orientation).transpose(); + T_i_w.translation() = imu_state.position; + + Eigen::Isometry3d T_b_w = IMUState::T_imu_body * T_i_w * + IMUState::T_imu_body.inverse(); + Eigen::Vector3d body_velocity = + IMUState::T_imu_body.linear() * imu_state.velocity; + + // Publish tf + if (publish_tf) { + tf::Transform T_b_w_tf; + tf::transformEigenToTF(T_b_w, T_b_w_tf); + tf_pub.sendTransform(tf::StampedTransform( + T_b_w_tf, time, fixed_frame_id, child_frame_id)); + } + + // Publish the odometry + nav_msgs::Odometry odom_msg; + odom_msg.header.stamp = time; + odom_msg.header.frame_id = fixed_frame_id; + odom_msg.child_frame_id = child_frame_id; + + tf::poseEigenToMsg(T_b_w, odom_msg.pose.pose); + tf::vectorEigenToMsg(body_velocity, odom_msg.twist.twist.linear); + + // Convert the covariance. + Matrix3d P_oo = state_server.state_cov.block<3, 3>(0, 0); + Matrix3d P_op = state_server.state_cov.block<3, 3>(0, 12); + Matrix3d P_po = state_server.state_cov.block<3, 3>(12, 0); + Matrix3d P_pp = state_server.state_cov.block<3, 3>(12, 12); + Matrix P_imu_pose = Matrix::Zero(); + P_imu_pose << P_pp, P_po, P_op, P_oo; + + Matrix H_pose = Matrix::Zero(); + H_pose.block<3, 3>(0, 0) = IMUState::T_imu_body.linear(); + H_pose.block<3, 3>(3, 3) = IMUState::T_imu_body.linear(); + Matrix P_body_pose = H_pose * + P_imu_pose * H_pose.transpose(); + + for (int i = 0; i < 6; ++i) + for (int j = 0; j < 6; ++j) + odom_msg.pose.covariance[6*i+j] = P_body_pose(i, j); + + // Construct the covariance for the velocity. + Matrix3d P_imu_vel = state_server.state_cov.block<3, 3>(6, 6); + Matrix3d H_vel = IMUState::T_imu_body.linear(); + Matrix3d P_body_vel = H_vel * P_imu_vel * H_vel.transpose(); + for (int i = 0; i < 3; ++i) + for (int j = 0; j < 3; ++j) + odom_msg.twist.covariance[i*6+j] = P_body_vel(i, j); + + odom_pub.publish(odom_msg); + + // Publish the 3D positions of the features that + // has been initialized. + pcl::PointCloud::Ptr feature_msg_ptr( + new pcl::PointCloud()); + feature_msg_ptr->header.frame_id = fixed_frame_id; + feature_msg_ptr->height = 1; + for (const auto& item : map_server) { + const auto& feature = item.second; + if (feature.is_initialized) { + Vector3d feature_position = + IMUState::T_imu_body.linear() * feature.position; + feature_msg_ptr->points.push_back(pcl::PointXYZ( + feature_position(0), feature_position(1), feature_position(2))); + } + } + feature_msg_ptr->width = feature_msg_ptr->points.size(); + + feature_pub.publish(feature_msg_ptr); + + return; +} + +} // namespace msckf_vio + diff --git a/src/msckf_vio_nodelet.cpp b/src/msckf_vio_nodelet.cpp new file mode 100644 index 0000000..dfbb54c --- /dev/null +++ b/src/msckf_vio_nodelet.cpp @@ -0,0 +1,24 @@ +/* + * COPYRIGHT AND PERMISSION NOTICE + * Penn Software MSCKF_VIO + * Copyright (C) 2017 The Trustees of the University of Pennsylvania + * All rights reserved. + */ + +#include + +namespace msckf_vio { +void MsckfVioNodelet::onInit() { + msckf_vio_ptr.reset(new MsckfVio(getPrivateNodeHandle())); + if (!msckf_vio_ptr->initialize()) { + ROS_ERROR("Cannot initialize MSCKF VIO..."); + return; + } + return; +} + +PLUGINLIB_DECLARE_CLASS(msckf_vio, MsckfVioNodelet, + msckf_vio::MsckfVioNodelet, nodelet::Nodelet); + +} // end namespace msckf_vio + diff --git a/test/feature_initialization_test.cpp b/test/feature_initialization_test.cpp new file mode 100644 index 0000000..51e9e02 --- /dev/null +++ b/test/feature_initialization_test.cpp @@ -0,0 +1,128 @@ +/* + *This file is part of msckf_vio + * + * msckf_vio is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * msckf_vio is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with msckf_vio. If not, see . + */ + +#include +#include +#include + +#include +#include +#include + +#include +#include + +#include +#include + + +using namespace std; +using namespace Eigen; +using namespace msckf_vio; + +TEST(FeatureInitializeTest, sphereDistribution) { + // Set the real feature at the origin of the world frame. + Vector3d feature(0.5, 0.0, 0.0); + + // Add 6 camera poses, all of which are able to see the + // feature at the origin. For simplicity, the six camera + // view are located at the six intersections between a + // unit sphere and the coordinate system. And the z axes + // of the camera frames are facing the origin. + vector cam_poses(6); + // Positive x axis. + cam_poses[0].linear() << 0.0, 0.0, -1.0, + 1.0, 0.0, 0.0, 0.0, -1.0, 0.0; + cam_poses[0].translation() << 1.0, 0.0, 0.0; + // Positive y axis. + cam_poses[1].linear() << -1.0, 0.0, 0.0, + 0.0, 0.0, -1.0, 0.0, -1.0, 0.0; + cam_poses[1].translation() << 0.0, 1.0, 0.0; + // Negative x axis. + cam_poses[2].linear() << 0.0, 0.0, 1.0, + -1.0, 0.0, 0.0, 0.0, -1.0, 0.0; + cam_poses[2].translation() << -1.0, 0.0, 0.0; + // Negative y axis. + cam_poses[3].linear() << 1.0, 0.0, 0.0, + 0.0, 0.0, 1.0, 0.0, -1.0, 0.0; + cam_poses[3].translation() << 0.0, -1.0, 0.0; + // Positive z axis. + cam_poses[4].linear() << 0.0, -1.0, 0.0, + -1.0, 0.0, 0.0, 0.0, 0.0, -1.0; + cam_poses[4].translation() << 0.0, 0.0, 1.0; + // Negative z axis. + cam_poses[5].linear() << 1.0, 0.0, 0.0, + 0.0, 1.0, 0.0, 0.0, 0.0, 1.0; + cam_poses[5].translation() << 0.0, 0.0, -1.0; + + // Set the camera states + CamStateServer cam_states; + for (int i = 0; i < 6; ++i) { + CAMState new_cam_state; + new_cam_state.id = i; + new_cam_state.time = static_cast(i); + new_cam_state.orientation = rotationToQuaternion( + Matrix3d(cam_poses[i].linear().transpose())); + new_cam_state.position = cam_poses[i].translation(); + cam_states[new_cam_state.id] = new_cam_state; + } + + // Compute measurements. + random_numbers::RandomNumberGenerator noise_generator; + vector > measurements(6); + for (int i = 0; i < 6; ++i) { + Isometry3d cam_pose_inv = cam_poses[i].inverse(); + Vector3d p = cam_pose_inv.linear()*feature + cam_pose_inv.translation(); + double u = p(0) / p(2) + noise_generator.gaussian(0.0, 0.01); + double v = p(1) / p(2) + noise_generator.gaussian(0.0, 0.01); + //double u = p(0) / p(2); + //double v = p(1) / p(2); + measurements[i] = Vector2d(u, v); + } + + for (int i = 0; i < 6; ++i) { + cout << "pose " << i << ":" << endl; + cout << "orientation: " << endl; + cout << cam_poses[i].linear() << endl; + cout << "translation: " << endl; + cout << cam_poses[i].translation().transpose() << endl; + cout << "measurement: " << endl; + cout << measurements[i].transpose() << endl; + cout << endl; + } + + // Initialize a feature object. + Feature feature_object; + for (int i = 0; i < 6; ++i) + feature_object.observations[i] = measurements[i]; + + // Compute the 3d position of the feature. + feature_object.initializePosition(cam_states); + + // Check the difference between the computed 3d + // feature position and the groud truth. + cout << "ground truth position: " << feature.transpose() << endl; + cout << "estimated position: " << feature_object.position.transpose() << endl; + Eigen::Vector3d error = feature_object.position - feature; + EXPECT_NEAR(error.norm(), 0, 0.05); +} + +int main(int argc, char** argv) { + testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} + diff --git a/test/math_utils_test.cpp b/test/math_utils_test.cpp new file mode 100644 index 0000000..4f44ada --- /dev/null +++ b/test/math_utils_test.cpp @@ -0,0 +1,87 @@ +/* + *This file is part of msckf_vio + * + * msckf_vio is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * msckf_vio is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with msckf_vio. If not, see . + */ + +#include +#include +#include +#include + +using namespace std; +using namespace Eigen; +using namespace msckf_vio; + +TEST(MathUtilsTest, skewSymmetric) { + Vector3d w(1.0, 2.0, 3.0); + Matrix3d w_hat = skewSymmetric(w); + Vector3d zero_vector = w_hat * w; + + FullPivLU lu_helper(w_hat); + EXPECT_EQ(lu_helper.rank(), 2); + EXPECT_DOUBLE_EQ(zero_vector.norm(), 0.0); + return; +} + +TEST(MathUtilsTest, quaternionNormalize) { + Vector4d q(1.0, 1.0, 1.0, 1.0); + quaternionNormalize(q); + + EXPECT_DOUBLE_EQ(q.norm(), 1.0); + return; +} + +TEST(MathUtilsTest, quaternionToRotation) { + Vector4d q(0.0, 0.0, 0.0, 1.0); + Matrix3d R = quaternionToRotation(q); + Matrix3d zero_matrix = R - Matrix3d::Identity(); + + FullPivLU lu_helper(zero_matrix); + EXPECT_EQ(lu_helper.rank(), 0); + return; +} + +TEST(MathUtilsTest, rotationToQuaternion) { + Vector4d q1(0.0, 0.0, 0.0, 1.0); + Matrix3d I = Matrix3d::Identity(); + Vector4d q2 = rotationToQuaternion(I); + Vector4d zero_vector = q1 - q2; + + EXPECT_DOUBLE_EQ(zero_vector.norm(), 0.0); + return; +} + +TEST(MathUtilsTest, quaternionMultiplication) { + Vector4d q1(2.0, 2.0, 1.0, 1.0); + Vector4d q2(1.0, 2.0, 3.0, 1.0); + q1 = q1 / q1.norm(); + q2 = q2 / q2.norm(); + Vector4d q_prod = quaternionMultiplication(q1, q2); + + Matrix3d R1 = quaternionToRotation(q1); + Matrix3d R2 = quaternionToRotation(q2); + Matrix3d R_prod = R1 * R2; + Matrix3d R_prod_cp = quaternionToRotation(q_prod); + + Matrix3d zero_matrix = R_prod - R_prod_cp; + + EXPECT_NEAR(zero_matrix.sum(), 0.0, 1e-10); + return; +} + +int main(int argc, char** argv) { + testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +}