Compare commits

..

1 Commits

Author SHA1 Message Date
c6b8a2c2fc added branch 2019-04-03 06:51:18 +00:00
42 changed files with 478 additions and 4899 deletions

View File

@ -1,16 +0,0 @@
{
"configurations": [
{
"name": "Linux",
"includePath": [
"${workspaceFolder}/**"
],
"defines": [],
"compilerPath": "/usr/bin/gcc",
"cStandard": "c11",
"cppStandard": "c++14",
"intelliSenseMode": "clang-x64"
}
],
"version": 4
}

Binary file not shown.

Binary file not shown.

Binary file not shown.

View File

@ -1,6 +0,0 @@
{
"files.associations": {
"core": "cpp",
"sparsecore": "cpp"
}
}

View File

@ -24,7 +24,6 @@ find_package(catkin REQUIRED COMPONENTS
pcl_conversions pcl_conversions
pcl_ros pcl_ros
std_srvs std_srvs
visualization_msgs
) )
## System dependencies are found with CMake's conventions ## System dependencies are found with CMake's conventions
@ -80,7 +79,6 @@ include_directories(
add_library(msckf_vio add_library(msckf_vio
src/msckf_vio.cpp src/msckf_vio.cpp
src/utils.cpp src/utils.cpp
src/image_handler.cpp
) )
add_dependencies(msckf_vio add_dependencies(msckf_vio
${${PROJECT_NAME}_EXPORTED_TARGETS} ${${PROJECT_NAME}_EXPORTED_TARGETS}
@ -89,7 +87,6 @@ add_dependencies(msckf_vio
target_link_libraries(msckf_vio target_link_libraries(msckf_vio
${catkin_LIBRARIES} ${catkin_LIBRARIES}
${SUITESPARSE_LIBRARIES} ${SUITESPARSE_LIBRARIES}
${OpenCV_LIBRARIES}
) )
# Msckf Vio nodelet # Msckf Vio nodelet
@ -109,7 +106,6 @@ target_link_libraries(msckf_vio_nodelet
add_library(image_processor add_library(image_processor
src/image_processor.cpp src/image_processor.cpp
src/utils.cpp src/utils.cpp
src/image_handler.cpp
) )
add_dependencies(image_processor add_dependencies(image_processor
${${PROJECT_NAME}_EXPORTED_TARGETS} ${${PROJECT_NAME}_EXPORTED_TARGETS}

BIN
GPATH

Binary file not shown.

BIN
GRTAGS

Binary file not shown.

BIN
GSYMS

Binary file not shown.

BIN
GTAGS

Binary file not shown.

View File

@ -1,22 +1,12 @@
# MSCKF\_VIO # MSCKF\_VIO
The `MSCKF_VIO` package is a stereo-photometric version of MSCKF. The software takes in synchronized stereo images and IMU messages and generates real-time 6DOF pose estimation of the IMU frame. The `MSCKF_VIO` package is a stereo version of MSCKF. The software takes in synchronized stereo images and IMU messages and generates real-time 6DOF pose estimation of the IMU frame.
This approach is based on the paper written by Ke Sun et al. The software is tested on Ubuntu 16.04 with ROS Kinetic.
[https://arxiv.org/abs/1712.00036](https://arxiv.org/abs/1712.00036) and their Stereo MSCKF implementation, which tightly fuse the matched feature information of a stereo image pair into a 6DOF Pose.
The approach implemented in this repository follows the semi-dense msckf approach tightly fusing the photometric
information around the matched featues into the covariance matrix, as described and derived in the master thesis [Pose Estimation using a Stereo-Photometric Multi-State Constraint Kalman Filter](http://raphael.maenle.net/resources/sp-msckf/maenle_master_thesis.pdf).
It's positioning is comparable to the approach from Ke Sun et al. with the photometric approach, with a higher
computational load, especially with larger image patches around the feature. A video explaining the approach can be
found on [https://youtu.be/HrqQywAnenQ](https://youtu.be/HrqQywAnenQ):
<br/>
[![Stereo-Photometric MSCKF](https://img.youtube.com/vi/HrqQywAnenQ/0.jpg)](https://www.youtube.com/watch?v=HrqQywAnenQ)
<br/>
This software should be deployed using ROS Kinetic on Ubuntu 16.04 or 18.04.
Video: [https://www.youtube.com/watch?v=jxfJFgzmNSw&t](https://www.youtube.com/watch?v=jxfJFgzmNSw&t=3s)<br/>
Paper Draft: [https://arxiv.org/abs/1712.00036](https://arxiv.org/abs/1712.00036)
## License ## License
@ -38,6 +28,16 @@ cd your_work_space
catkin_make --pkg msckf_vio --cmake-args -DCMAKE_BUILD_TYPE=Release catkin_make --pkg msckf_vio --cmake-args -DCMAKE_BUILD_TYPE=Release
``` ```
## Calibration
An accurate calibration is crucial for successfully running the software. To get the best performance of the software, the stereo cameras and IMU should be hardware synchronized. Note that for the stereo calibration, which includes the camera intrinsics, distortion, and extrinsics between the two cameras, you have to use a calibration software. **Manually setting these parameters will not be accurate enough.** [Kalibr](https://github.com/ethz-asl/kalibr) can be used for the stereo calibration and also to get the transformation between the stereo cameras and IMU. The yaml file generated by Kalibr can be directly used in this software. See calibration files in the `config` folder for details. The two calibration files in the `config` folder should work directly with the EuRoC and [fast flight](https://github.com/KumarRobotics/msckf_vio/wiki) datasets. The convention of the calibration file is as follows:
`camx/T_cam_imu`: takes a vector from the IMU frame to the camx frame.
`cam1/T_cn_cnm1`: takes a vector from the cam0 frame to the cam1 frame.
The filter uses the first 200 IMU messages to initialize the gyro bias, acc bias, and initial orientation. Therefore, the robot is required to start from a stationary state in order to initialize the VIO successfully.
## EuRoC and UPenn Fast flight dataset example usage ## EuRoC and UPenn Fast flight dataset example usage
First obtain either the [EuRoC](https://projects.asl.ethz.ch/datasets/doku.php?id=kmavvisualinertialdatasets) or the [UPenn fast flight](https://github.com/KumarRobotics/msckf_vio/wiki/Dataset) dataset. First obtain either the [EuRoC](https://projects.asl.ethz.ch/datasets/doku.php?id=kmavvisualinertialdatasets) or the [UPenn fast flight](https://github.com/KumarRobotics/msckf_vio/wiki/Dataset) dataset.
@ -75,8 +75,6 @@ To visualize the pose and feature estimates you can use the provided rviz config
## ROS Nodes ## ROS Nodes
The general structure is similar to the structure of the MSCKF implementation this repository is derived from.
### `image_processor` node ### `image_processor` node
**Subscribed Topics** **Subscribed Topics**

View File

@ -9,7 +9,7 @@ cam0:
0, 0, 0, 1.000000000000000] 0, 0, 0, 1.000000000000000]
camera_model: pinhole camera_model: pinhole
distortion_coeffs: [-0.28340811, 0.07395907, 0.00019359, 1.76187114e-05] distortion_coeffs: [-0.28340811, 0.07395907, 0.00019359, 1.76187114e-05]
distortion_model: pre-radtan distortion_model: radtan
intrinsics: [458.654, 457.296, 367.215, 248.375] intrinsics: [458.654, 457.296, 367.215, 248.375]
resolution: [752, 480] resolution: [752, 480]
timeshift_cam_imu: 0.0 timeshift_cam_imu: 0.0
@ -26,7 +26,7 @@ cam1:
0, 0, 0, 1.000000000000000] 0, 0, 0, 1.000000000000000]
camera_model: pinhole camera_model: pinhole
distortion_coeffs: [-0.28368365, 0.07451284, -0.00010473, -3.55590700e-05] distortion_coeffs: [-0.28368365, 0.07451284, -0.00010473, -3.55590700e-05]
distortion_model: pre-radtan distortion_model: radtan
intrinsics: [457.587, 456.134, 379.999, 255.238] intrinsics: [457.587, 456.134, 379.999, 255.238]
resolution: [752, 480] resolution: [752, 480]
timeshift_cam_imu: 0.0 timeshift_cam_imu: 0.0

View File

@ -1,36 +0,0 @@
cam0:
T_cam_imu:
[-0.9995378259923383, 0.02917807204183088, -0.008530798463872679, 0.047094247958417004,
0.007526588843243184, -0.03435493139706542, -0.9993813532126198, -0.04788273017221637,
-0.029453096117288798, -0.9989836729399656, 0.034119442089241274, -0.0697294754693238,
0.0, 0.0, 0.0, 1.0]
camera_model: pinhole
distortion_coeffs: [0.0034823894022493434, 0.0007150348452162257, -0.0020532361418706202,
0.00020293673591811182]
distortion_model: pre-equidistant
intrinsics: [190.97847715128717, 190.9733070521226, 254.93170605935475, 256.8974428996504]
resolution: [512, 512]
rostopic: /cam0/image_raw
cam1:
T_cam_imu:
[-0.9995240747493029, 0.02986739485347808, -0.007717688852024281, -0.05374086123613335,
0.008095979457928231, 0.01256553460985914, -0.9998882749870535, -0.04648588412432889,
-0.02976708103202316, -0.9994748851595197, -0.0128013601698453, -0.07333210787623645,
0.0, 0.0, 0.0, 1.0]
T_cn_cnm1:
[0.9999994317488622, -0.0008361847221513937, -0.0006612844045898121, -0.10092123225528335,
0.0008042457277382264, 0.9988989443471681, -0.04690684567228134, -0.001964540595211977,
0.0006997790813734836, 0.04690628718225568, 0.9988990492196964, -0.0014663556043866572,
0.0, 0.0, 0.0, 1.0]
camera_model: pinhole
distortion_coeffs: [0.0034003170790442797, 0.001766278153469831, -0.00266312569781606,
0.0003299517423931039]
distortion_model: pre-equidistant
intrinsics: [190.44236969414825, 190.4344384721956, 252.59949716835982, 254.91723064636983]
resolution: [512, 512]
rostopic: /cam1/image_raw
T_imu_body:
[1.0000, 0.0000, 0.0000, 0.0000,
0.0000, 1.0000, 0.0000, 0.0000,
0.0000, 0.0000, 1.0000, 0.0000,
0.0000, 0.0000, 0.0000, 1.0000]

View File

@ -1,36 +0,0 @@
cam0:
T_cam_imu:
[-0.9995378259923383, 0.02917807204183088, -0.008530798463872679, 0.047094247958417004,
0.007526588843243184, -0.03435493139706542, -0.9993813532126198, -0.04788273017221637,
-0.029453096117288798, -0.9989836729399656, 0.034119442089241274, -0.0697294754693238,
0.0, 0.0, 0.0, 1.0]
camera_model: pinhole
distortion_coeffs: [0.010171079892421483, -0.010816440029919381, 0.005942781769412756,
-0.001662284667857643]
distortion_model: equidistant
intrinsics: [380.81042871360756, 380.81194179427075, 510.29465304840727, 514.3304630538506]
resolution: [1024, 1024]
rostopic: /cam0/image_raw
cam1:
T_cam_imu:
[-0.9995240747493029, 0.02986739485347808, -0.007717688852024281, -0.05374086123613335,
0.008095979457928231, 0.01256553460985914, -0.9998882749870535, -0.04648588412432889,
-0.02976708103202316, -0.9994748851595197, -0.0128013601698453, -0.07333210787623645,
0.0, 0.0, 0.0, 1.0]
T_cn_cnm1:
[0.9999994317488622, -0.0008361847221513937, -0.0006612844045898121, -0.10092123225528335,
0.0008042457277382264, 0.9988989443471681, -0.04690684567228134, -0.001964540595211977,
0.0006997790813734836, 0.04690628718225568, 0.9988990492196964, -0.0014663556043866572,
0.0, 0.0, 0.0, 1.0]
camera_model: pinhole
distortion_coeffs: [0.01371679169245271, -0.015567360615942622, 0.00905043103315326,
-0.002347858896562788]
distortion_model: equidistant
intrinsics: [379.2869884263036, 379.26583742214524, 505.5666703237407, 510.2840961765407]
resolution: [1024, 1024]
rostopic: /cam1/image_raw
T_imu_body:
[1.0000, 0.0000, 0.0000, 0.0000,
0.0000, 1.0000, 0.0000, 0.0000,
0.0000, 0.0000, 1.0000, 0.0000,
0.0000, 0.0000, 0.0000, 1.0000]

View File

@ -15,37 +15,6 @@
#include "imu_state.h" #include "imu_state.h"
namespace msckf_vio { namespace msckf_vio {
struct Frame{
cv::Mat image;
cv::Mat dximage;
cv::Mat dyimage;
double exposureTime_ms;
};
typedef std::map<StateIDType, Frame, std::less<StateIDType>,
Eigen::aligned_allocator<
std::pair<StateIDType, Frame> > > movingWindow;
struct IlluminationParameter{
double frame_bias;
double frame_gain;
double feature_bias;
double feature_gain;
};
struct CameraCalibration{
std::string distortion_model;
cv::Vec2i resolution;
cv::Vec4d intrinsics;
cv::Vec4d distortion_coeffs;
movingWindow moving_window;
cv::Mat featureVisu;
int id;
};
/* /*
* @brief CAMState Stored camera state in order to * @brief CAMState Stored camera state in order to
* form measurement model. * form measurement model.
@ -66,9 +35,6 @@ struct CAMState {
// Position of the camera frame in the world frame. // Position of the camera frame in the world frame.
Eigen::Vector3d position; Eigen::Vector3d position;
// Illumination Information of the frame
IlluminationParameter illumination;
// These two variables should have the same physical // These two variables should have the same physical
// interpretation with `orientation` and `position`. // interpretation with `orientation` and `position`.
// There two variables are used to modify the measurement // There two variables are used to modify the measurement

File diff suppressed because it is too large Load Diff

View File

@ -1,61 +0,0 @@
#ifndef MSCKF_VIO_IMAGE_HANDLER_H
#define MSCKF_VIO_IMAGE_HANDLER_H
#include <vector>
#include <boost/shared_ptr.hpp>
#include <opencv2/opencv.hpp>
#include <opencv2/video.hpp>
#include <ros/ros.h>
#include <cv_bridge/cv_bridge.h>
namespace msckf_vio {
/*
* @brief utilities for msckf_vio
*/
namespace image_handler {
cv::Point2f pinholeDownProject(const cv::Point2f& p_in, const cv::Vec4d& intrinsics);
cv::Point2f pinholeUpProject(const cv::Point2f& p_in, const cv::Vec4d& intrinsics);
void undistortImage(
cv::InputArray src,
cv::OutputArray dst,
const std::string& distortion_model,
const cv::Vec4d& intrinsics,
const cv::Vec4d& distortion_coeffs);
void undistortPoints(
const std::vector<cv::Point2f>& pts_in,
const cv::Vec4d& intrinsics,
const std::string& distortion_model,
const cv::Vec4d& distortion_coeffs,
std::vector<cv::Point2f>& pts_out,
const cv::Matx33d &rectification_matrix = cv::Matx33d::eye(),
const cv::Vec4d &new_intrinsics = cv::Vec4d(1,1,0,0));
std::vector<cv::Point2f> distortPoints(
const std::vector<cv::Point2f>& pts_in,
const cv::Vec4d& intrinsics,
const std::string& distortion_model,
const cv::Vec4d& distortion_coeffs);
cv::Point2f distortPoint(
const cv::Point2f& pt_in,
const cv::Vec4d& intrinsics,
const std::string& distortion_model,
const cv::Vec4d& distortion_coeffs);
void undistortPoint(
const cv::Point2f& pt_in,
const cv::Vec4d& intrinsics,
const std::string& distortion_model,
const cv::Vec4d& distortion_coeffs,
cv::Point2f& pt_out,
const cv::Matx33d &rectification_matrix = cv::Matx33d::eye(),
const cv::Vec4d &new_intrinsics = cv::Vec4d(1,1,0,0));
}
}
#endif

View File

@ -14,6 +14,10 @@
#include <opencv2/opencv.hpp> #include <opencv2/opencv.hpp>
#include <opencv2/video.hpp> #include <opencv2/video.hpp>
#include <opencv2/cudaoptflow.hpp>
#include <opencv2/cudaimgproc.hpp>
#include <opencv2/cudaarithm.hpp>
#include <ros/ros.h> #include <ros/ros.h>
#include <cv_bridge/cv_bridge.h> #include <cv_bridge/cv_bridge.h>
#include <image_transport/image_transport.h> #include <image_transport/image_transport.h>
@ -22,8 +26,6 @@
#include <message_filters/subscriber.h> #include <message_filters/subscriber.h>
#include <message_filters/time_synchronizer.h> #include <message_filters/time_synchronizer.h>
#include "cam_state.h"
namespace msckf_vio { namespace msckf_vio {
/* /*
@ -310,7 +312,7 @@ private:
const std::vector<unsigned char>& markers, const std::vector<unsigned char>& markers,
std::vector<T>& refined_vec) { std::vector<T>& refined_vec) {
if (raw_vec.size() != markers.size()) { if (raw_vec.size() != markers.size()) {
ROS_WARN("The input size of raw_vec(%lu) and markers(%lu) does not match...", ROS_WARN("The input size of raw_vec(%i) and markers(%i) does not match...",
raw_vec.size(), markers.size()); raw_vec.size(), markers.size());
} }
for (int i = 0; i < markers.size(); ++i) { for (int i = 0; i < markers.size(); ++i) {
@ -320,8 +322,6 @@ private:
return; return;
} }
bool STREAMPAUSE;
// Indicate if this is the first image message. // Indicate if this is the first image message.
bool is_first_img; bool is_first_img;
@ -336,8 +336,15 @@ private:
std::vector<sensor_msgs::Imu> imu_msg_buffer; std::vector<sensor_msgs::Imu> imu_msg_buffer;
// Camera calibration parameters // Camera calibration parameters
CameraCalibration cam0; std::string cam0_distortion_model;
CameraCalibration cam1; cv::Vec2i cam0_resolution;
cv::Vec4d cam0_intrinsics;
cv::Vec4d cam0_distortion_coeffs;
std::string cam1_distortion_model;
cv::Vec2i cam1_resolution;
cv::Vec4d cam1_intrinsics;
cv::Vec4d cam1_distortion_coeffs;
// Take a vector from cam0 frame to the IMU frame. // Take a vector from cam0 frame to the IMU frame.
cv::Matx33d R_cam0_imu; cv::Matx33d R_cam0_imu;
@ -360,6 +367,13 @@ private:
boost::shared_ptr<GridFeatures> prev_features_ptr; boost::shared_ptr<GridFeatures> prev_features_ptr;
boost::shared_ptr<GridFeatures> curr_features_ptr; boost::shared_ptr<GridFeatures> curr_features_ptr;
cv::Ptr<cv::cuda::SparsePyrLKOpticalFlow> d_pyrLK_sparse;
cv::cuda::GpuMat cam0_curr_img;
cv::cuda::GpuMat cam1_curr_img;
cv::cuda::GpuMat cam0_points_gpu;
cv::cuda::GpuMat cam1_points_gpu;
// Number of features after each outlier removal step. // Number of features after each outlier removal step.
int before_tracking; int before_tracking;
int after_tracking; int after_tracking;

View File

@ -43,50 +43,6 @@ inline void quaternionNormalize(Eigen::Vector4d& q) {
return; return;
} }
/*
* @brief invert rotation of passed quaternion through conjugation
* and return conjugation
*/
inline Eigen::Vector4d quaternionConjugate(Eigen::Vector4d& q)
{
Eigen::Vector4d p;
p(0) = -q(0);
p(1) = -q(1);
p(2) = -q(2);
p(3) = q(3);
quaternionNormalize(p);
return p;
}
/*
* @brief converts a vector4 to a vector3, dropping (3)
* this is typically used to get the vector part of a quaternion
for eq small angle approximation
*/
inline Eigen::Vector3d v4tov3(const Eigen::Vector4d& q)
{
Eigen::Vector3d p;
p(0) = q(0);
p(1) = q(1);
p(2) = q(2);
return p;
}
/*
* @brief Perform q1 * q2
*/
inline Eigen::Vector4d QtoV(const Eigen::Quaterniond& q)
{
Eigen::Vector4d p;
p(0) = q.x();
p(1) = q.y();
p(2) = q.z();
p(3) = q.w();
return p;
}
/* /*
* @brief Perform q1 * q2 * @brief Perform q1 * q2
*/ */

View File

@ -14,17 +14,11 @@
#include <string> #include <string>
#include <Eigen/Dense> #include <Eigen/Dense>
#include <Eigen/Geometry> #include <Eigen/Geometry>
#include <math.h>
#include <boost/shared_ptr.hpp> #include <boost/shared_ptr.hpp>
#include <opencv2/opencv.hpp>
#include <opencv2/video.hpp>
#include <ros/ros.h> #include <ros/ros.h>
#include <sensor_msgs/Imu.h> #include <sensor_msgs/Imu.h>
#include <sensor_msgs/Image.h>
#include <sensor_msgs/PointCloud.h>
#include <nav_msgs/Odometry.h> #include <nav_msgs/Odometry.h>
#include <std_msgs/Float64.h>
#include <tf/transform_broadcaster.h> #include <tf/transform_broadcaster.h>
#include <std_srvs/Trigger.h> #include <std_srvs/Trigger.h>
@ -33,13 +27,6 @@
#include "feature.hpp" #include "feature.hpp"
#include <msckf_vio/CameraMeasurement.h> #include <msckf_vio/CameraMeasurement.h>
#include <cv_bridge/cv_bridge.h>
#include <image_transport/image_transport.h>
#include <message_filters/subscriber.h>
#include <message_filters/time_synchronizer.h>
#define PI 3.14159265
namespace msckf_vio { namespace msckf_vio {
/* /*
* @brief MsckfVio Implements the algorithm in * @brief MsckfVio Implements the algorithm in
@ -110,27 +97,11 @@ class MsckfVio {
void imuCallback(const sensor_msgs::ImuConstPtr& msg); void imuCallback(const sensor_msgs::ImuConstPtr& msg);
/* /*
* @brief truthCallback * @brief featureCallback
* Callback function for ground truth navigation information * Callback function for feature measurements.
* @param TransformStamped msg * @param msg Stereo feature measurements.
*/ */
void truthCallback( void featureCallback(const CameraMeasurementConstPtr& msg);
const geometry_msgs::TransformStampedPtr& msg);
/*
* @brief imageCallback
* Callback function for feature measurements
* Triggers measurement update
* @param msg
* Camera 0 Image
* Camera 1 Image
* Stereo feature measurements.
*/
void imageCallback (
const sensor_msgs::ImageConstPtr& cam0_img,
const sensor_msgs::ImageConstPtr& cam1_img,
const CameraMeasurementConstPtr& feature_msg);
/* /*
* @brief publish Publish the results of VIO. * @brief publish Publish the results of VIO.
@ -155,26 +126,6 @@ class MsckfVio {
bool resetCallback(std_srvs::Trigger::Request& req, bool resetCallback(std_srvs::Trigger::Request& req,
std_srvs::Trigger::Response& res); std_srvs::Trigger::Response& res);
// Saves the exposure time and the camera measurementes
void manageMovingWindow(
const sensor_msgs::ImageConstPtr& cam0_img,
const sensor_msgs::ImageConstPtr& cam1_img,
const CameraMeasurementConstPtr& feature_msg);
void calcErrorState();
// Debug related Functions
// Propagate the true state
void batchTruthProcessing(
const double& time_bound);
void processTruthtoIMU(const double& time,
const Eigen::Vector4d& m_rot,
const Eigen::Vector3d& m_trans);
// Filter related functions // Filter related functions
// Propogate the state // Propogate the state
void batchImuProcessing( void batchImuProcessing(
@ -186,12 +137,8 @@ class MsckfVio {
const Eigen::Vector3d& gyro, const Eigen::Vector3d& gyro,
const Eigen::Vector3d& acc); const Eigen::Vector3d& acc);
// groundtruth state augmentation
void truePhotometricStateAugmentation(const double& time);
// Measurement update // Measurement update
void stateAugmentation(const double& time); void stateAugmentation(const double& time);
void PhotometricStateAugmentation(const double& time);
void addFeatureObservations(const CameraMeasurementConstPtr& msg); void addFeatureObservations(const CameraMeasurementConstPtr& msg);
// This function is used to compute the measurement Jacobian // This function is used to compute the measurement Jacobian
// for a single feature observed at a single camera frame. // for a single feature observed at a single camera frame.
@ -202,118 +149,25 @@ class MsckfVio {
Eigen::Vector4d& r); Eigen::Vector4d& r);
// This function computes the Jacobian of all measurements viewed // This function computes the Jacobian of all measurements viewed
// in the given camera states of this feature. // in the given camera states of this feature.
bool featureJacobian( void featureJacobian(const FeatureIDType& feature_id,
const FeatureIDType& feature_id,
const std::vector<StateIDType>& cam_state_ids, const std::vector<StateIDType>& cam_state_ids,
Eigen::MatrixXd& H_x, Eigen::VectorXd& r); Eigen::MatrixXd& H_x, Eigen::VectorXd& r);
void twodotMeasurementJacobian(
const StateIDType& cam_state_id,
const FeatureIDType& feature_id,
Eigen::MatrixXd& H_x, Eigen::MatrixXd& H_y, Eigen::VectorXd& r);
bool ConstructJacobians(
Eigen::MatrixXd& H_rho,
Eigen::MatrixXd& H_pl,
Eigen::MatrixXd& H_pA,
const Feature& feature,
const StateIDType& cam_state_id,
Eigen::MatrixXd& H_xl,
Eigen::MatrixXd& H_yl);
bool PhotometricPatchPointResidual(
const StateIDType& cam_state_id,
const Feature& feature,
Eigen::VectorXd& r);
bool PhotometricPatchPointJacobian(
const CAMState& cam_state,
const StateIDType& cam_state_id,
const Feature& feature,
Eigen::Vector3d point,
int count,
Eigen::Matrix<double, 2, 1>& H_rhoj,
Eigen::Matrix<double, 2, 6>& H_plj,
Eigen::Matrix<double, 2, 6>& H_pAj,
Eigen::Matrix<double, 2, 4>& dI_dhj);
bool PhotometricMeasurementJacobian(
const StateIDType& cam_state_id,
const FeatureIDType& feature_id,
Eigen::MatrixXd& H_x,
Eigen::MatrixXd& H_y,
Eigen::VectorXd& r);
bool twodotFeatureJacobian(
const FeatureIDType& feature_id,
const std::vector<StateIDType>& cam_state_ids,
Eigen::MatrixXd& H_x, Eigen::VectorXd& r);
bool PhotometricFeatureJacobian(
const FeatureIDType& feature_id,
const std::vector<StateIDType>& cam_state_ids,
Eigen::MatrixXd& H_x, Eigen::VectorXd& r);
void photometricMeasurementUpdate(const Eigen::MatrixXd& H, const Eigen::VectorXd& r);
void measurementUpdate(const Eigen::MatrixXd& H, void measurementUpdate(const Eigen::MatrixXd& H,
const Eigen::VectorXd& r); const Eigen::VectorXd& r);
void twoMeasurementUpdate(const Eigen::MatrixXd& H, const Eigen::VectorXd& r);
bool gatingTest(const Eigen::MatrixXd& H, bool gatingTest(const Eigen::MatrixXd& H,
const Eigen::VectorXd&r, const int& dof, int filter=0); const Eigen::VectorXd&r, const int& dof);
void removeLostFeatures(); void removeLostFeatures();
void findRedundantCamStates( void findRedundantCamStates(
std::vector<StateIDType>& rm_cam_state_ids); std::vector<StateIDType>& rm_cam_state_ids);
void pruneLastCamStateBuffer();
void pruneCamStateBuffer(); void pruneCamStateBuffer();
// Reset the system online if the uncertainty is too large. // Reset the system online if the uncertainty is too large.
void onlineReset(); void onlineReset();
// Photometry flag
int FILTER;
// debug flag
bool STREAMPAUSE;
bool PRINTIMAGES;
bool GROUNDTRUTH;
bool nan_flag;
bool play;
double last_time_bound;
double time_offset;
// Patch size for Photometry
int N;
// Image rescale
int SCALE;
// Chi squared test table. // Chi squared test table.
static std::map<int, double> chi_squared_test_table; static std::map<int, double> chi_squared_test_table;
double eval_time;
IMUState timed_old_imu_state;
IMUState timed_old_true_state;
IMUState old_imu_state;
IMUState old_true_state;
// change in position
Eigen::Vector3d delta_position;
Eigen::Vector3d delta_orientation;
// State vector // State vector
StateServer state_server; StateServer state_server;
StateServer photometric_state_server;
// Ground truth state vector
StateServer true_state_server;
// error state based on ground truth
StateServer err_state_server;
// Maximum number of camera states // Maximum number of camera states
int max_cam_state_size; int max_cam_state_size;
@ -325,22 +179,6 @@ class MsckfVio {
// transfer delay between IMU and Image messages. // transfer delay between IMU and Image messages.
std::vector<sensor_msgs::Imu> imu_msg_buffer; std::vector<sensor_msgs::Imu> imu_msg_buffer;
// Ground Truth message data
std::vector<geometry_msgs::TransformStamped> truth_msg_buffer;
// Moving Window buffer
movingWindow cam0_moving_window;
movingWindow cam1_moving_window;
// Camera calibration parameters
CameraCalibration cam0;
CameraCalibration cam1;
// covariance data
double irradiance_frame_bias;
ros::Time image_save_time;
// Indicate if the gravity vector is set. // Indicate if the gravity vector is set.
bool is_gravity_set; bool is_gravity_set;
@ -368,21 +206,12 @@ class MsckfVio {
// Subscribers and publishers // Subscribers and publishers
ros::Subscriber imu_sub; ros::Subscriber imu_sub;
ros::Subscriber truth_sub; ros::Subscriber feature_sub;
ros::Publisher truth_odom_pub;
ros::Publisher odom_pub; ros::Publisher odom_pub;
ros::Publisher marker_pub;
ros::Publisher feature_pub; ros::Publisher feature_pub;
tf::TransformBroadcaster tf_pub; tf::TransformBroadcaster tf_pub;
ros::ServiceServer reset_srv; ros::ServiceServer reset_srv;
message_filters::Subscriber<sensor_msgs::Image> cam0_img_sub;
message_filters::Subscriber<sensor_msgs::Image> cam1_img_sub;
message_filters::Subscriber<CameraMeasurement> feature_sub;
message_filters::TimeSynchronizer<sensor_msgs::Image, sensor_msgs::Image, CameraMeasurement> image_sub;
// Frame id // Frame id
std::string fixed_frame_id; std::string fixed_frame_id;
std::string child_frame_id; std::string child_frame_id;
@ -403,9 +232,6 @@ class MsckfVio {
ros::Publisher mocap_odom_pub; ros::Publisher mocap_odom_pub;
geometry_msgs::TransformStamped raw_mocap_odom_msg; geometry_msgs::TransformStamped raw_mocap_odom_msg;
Eigen::Isometry3d mocap_initial_frame; Eigen::Isometry3d mocap_initial_frame;
Eigen::Vector4d mocap_initial_orientation;
Eigen::Vector3d mocap_initial_position;
}; };
typedef MsckfVio::Ptr MsckfVioPtr; typedef MsckfVio::Ptr MsckfVioPtr;

View File

@ -11,6 +11,9 @@
#include <ros/ros.h> #include <ros/ros.h>
#include <string> #include <string>
#include <opencv2/core/core.hpp> #include <opencv2/core/core.hpp>
#include <opencv2/cudaoptflow.hpp>
#include <opencv2/cudaimgproc.hpp>
#include <opencv2/cudaarithm.hpp>
#include <Eigen/Geometry> #include <Eigen/Geometry>
namespace msckf_vio { namespace msckf_vio {
@ -18,6 +21,10 @@ namespace msckf_vio {
* @brief utilities for msckf_vio * @brief utilities for msckf_vio
*/ */
namespace utils { namespace utils {
void download(const cv::cuda::GpuMat& d_mat, std::vector<uchar>& vec);
void download(const cv::cuda::GpuMat& d_mat, std::vector<cv::Point2f>& vec);
Eigen::Isometry3d getTransformEigen(const ros::NodeHandle &nh, Eigen::Isometry3d getTransformEigen(const ros::NodeHandle &nh,
const std::string &field); const std::string &field);

View File

@ -8,8 +8,7 @@
<group ns="$(arg robot)"> <group ns="$(arg robot)">
<node pkg="nodelet" type="nodelet" name="image_processor" <node pkg="nodelet" type="nodelet" name="image_processor"
args="standalone msckf_vio/ImageProcessorNodelet" args="standalone msckf_vio/ImageProcessorNodelet"
output="screen" output="screen">
>
<rosparam command="load" file="$(arg calibration_file)"/> <rosparam command="load" file="$(arg calibration_file)"/>
<param name="grid_row" value="4"/> <param name="grid_row" value="4"/>

View File

@ -1,33 +0,0 @@
<launch>
<arg name="robot" default="firefly_sbx"/>
<arg name="calibration_file"
default="$(find msckf_vio)/config/camchain-imucam-mynt.yaml"/>
<!-- Image Processor Nodelet -->
<group ns="$(arg robot)">
<node pkg="nodelet" type="nodelet" name="image_processor"
args="standalone msckf_vio/ImageProcessorNodelet"
output="screen">
<rosparam command="load" file="$(arg calibration_file)"/>
<param name="grid_row" value="4"/>
<param name="grid_col" value="5"/>
<param name="grid_min_feature_num" value="3"/>
<param name="grid_max_feature_num" value="4"/>
<param name="pyramid_levels" value="3"/>
<param name="patch_size" value="15"/>
<param name="fast_threshold" value="10"/>
<param name="max_iteration" value="30"/>
<param name="track_precision" value="0.01"/>
<param name="ransac_threshold" value="3"/>
<param name="stereo_threshold" value="5"/>
<remap from="~imu" to="/imu0"/>
<remap from="~cam0_image" to="/mynteye/left/image_raw"/>
<remap from="~cam1_image" to="/mynteye/right/image_raw"/>
</node>
</group>
</launch>

View File

@ -1,38 +0,0 @@
<launch>
<arg name="robot" default="firefly_sbx"/>
<arg name="calibration_file"
default="$(find msckf_vio)/config/camchain-imucam-tum-scaled.yaml"/>
<!-- Image Processor Nodelet -->
<group ns="$(arg robot)">
<node pkg="nodelet" type="nodelet" name="image_processor"
args="standalone msckf_vio/ImageProcessorNodelet"
output="screen"
>
<!-- Debugging Flaggs -->
<param name="StreamPause" value="true"/>
<rosparam command="load" file="$(arg calibration_file)"/>
<param name="grid_row" value="4"/>
<param name="grid_col" value="4"/>
<param name="grid_min_feature_num" value="3"/>
<param name="grid_max_feature_num" value="5"/>
<param name="pyramid_levels" value="3"/>
<param name="patch_size" value="15"/>
<param name="fast_threshold" value="10"/>
<param name="max_iteration" value="30"/>
<param name="track_precision" value="0.01"/>
<param name="ransac_threshold" value="3"/>
<param name="stereo_threshold" value="5"/>
<remap from="~imu" to="/imu0"/>
<remap from="~cam0_image" to="/cam0/image_raw"/>
<remap from="~cam1_image" to="/cam1/image_raw"/>
</node>
</group>
</launch>

View File

@ -1,37 +0,0 @@
<launch>
<arg name="robot" default="firefly_sbx"/>
<arg name="calibration_file"
default="$(find msckf_vio)/config/camchain-imucam-tum.yaml"/>
<!-- Image Processor Nodelet -->
<group ns="$(arg robot)">
<node pkg="nodelet" type="nodelet" name="image_processor"
args="standalone msckf_vio/ImageProcessorNodelet"
output="screen"
>
<!-- Debugging Flaggs -->
<param name="StreamPause" value="true"/>
<rosparam command="load" file="$(arg calibration_file)"/>
<param name="grid_row" value="4"/>
<param name="grid_col" value="4"/>
<param name="grid_min_feature_num" value="3"/>
<param name="grid_max_feature_num" value="4"/>
<param name="pyramid_levels" value="3"/>
<param name="patch_size" value="15"/>
<param name="fast_threshold" value="10"/>
<param name="max_iteration" value="30"/>
<param name="track_precision" value="0.01"/>
<param name="ransac_threshold" value="3"/>
<param name="stereo_threshold" value="5"/>
<remap from="~imu" to="/imu0"/>
<remap from="~cam0_image" to="/cam0/image_raw"/>
<remap from="~cam1_image" to="/cam1/image_raw"/>
</node>
</group>
</launch>

View File

@ -1,75 +0,0 @@
<launch>
<arg name="robot" default="firefly_sbx"/>
<arg name="fixed_frame_id" default="world"/>
<arg name="calibration_file"
default="$(find msckf_vio)/config/camchain-imucam-tum.yaml"/>
<!-- Image Processor Nodelet -->
<include file="$(find msckf_vio)/launch/image_processor_tum.launch">
<arg name="robot" value="$(arg robot)"/>
<arg name="calibration_file" value="$(arg calibration_file)"/>
</include>
<!-- Msckf Vio Nodelet -->
<group ns="$(arg robot)">
<node pkg="nodelet" type="nodelet" name="vio"
args='standalone msckf_vio/MsckfVioNodelet'
output="screen"
launch-prefix="xterm -e gdb --args">
<!-- Photometry Flag-->
<param name="PHOTOMETRIC" value="true"/>
<!-- Debugging Flaggs -->
<param name="PrintImages" value="true"/>
<param name="GroundTruth" value="false"/>
<param name="patch_size_n" value="3"/>
<!-- Calibration parameters -->
<rosparam command="load" file="$(arg calibration_file)"/>
<param name="publish_tf" value="true"/>
<param name="frame_rate" value="20"/>
<param name="fixed_frame_id" value="$(arg fixed_frame_id)"/>
<param name="child_frame_id" value="odom"/>
<param name="max_cam_state_size" value="20"/>
<param name="position_std_threshold" value="8.0"/>
<param name="rotation_threshold" value="0.2618"/>
<param name="translation_threshold" value="0.4"/>
<param name="tracking_rate_threshold" value="0.5"/>
<!-- Feature optimization config -->
<param name="feature/config/translation_threshold" value="-1.0"/>
<!-- These values should be standard deviation -->
<param name="noise/gyro" value="0.005"/>
<param name="noise/acc" value="0.05"/>
<param name="noise/gyro_bias" value="0.001"/>
<param name="noise/acc_bias" value="0.01"/>
<param name="noise/feature" value="0.035"/>
<param name="initial_state/velocity/x" value="0.0"/>
<param name="initial_state/velocity/y" value="0.0"/>
<param name="initial_state/velocity/z" value="0.0"/>
<!-- These values should be covariance -->
<param name="initial_covariance/velocity" value="0.25"/>
<param name="initial_covariance/gyro_bias" value="0.01"/>
<param name="initial_covariance/acc_bias" value="0.01"/>
<param name="initial_covariance/extrinsic_rotation_cov" value="3.0462e-4"/>
<param name="initial_covariance/extrinsic_translation_cov" value="2.5e-5"/>
<param name="initial_covariance/irradiance_frame_bias" value="0.1"/>
<remap from="~imu" to="/imu0"/>
<remap from="~cam0_image" to="/cam0/image_raw"/>
<remap from="~cam1_image" to="/cam1/image_raw"/>
<remap from="~features" to="image_processor/features"/>
</node>
</group>
</launch>

View File

@ -17,18 +17,6 @@
args='standalone msckf_vio/MsckfVioNodelet' args='standalone msckf_vio/MsckfVioNodelet'
output="screen"> output="screen">
<!-- Filter Flag, 0 = msckf, 1 = photometric, 2 = two -->
<param name="FILTER" value="1"/>
<!-- Debugging Flaggs -->
<param name="StreamPause" value="true"/>
<param name="PrintImages" value="true"/>
<param name="GroundTruth" value="false"/>
<param name="patch_size_n" value="5"/>
<param name="image_scale" value ="1"/>
<!-- Calibration parameters --> <!-- Calibration parameters -->
<rosparam command="load" file="$(arg calibration_file)"/> <rosparam command="load" file="$(arg calibration_file)"/>
@ -65,9 +53,6 @@
<param name="initial_covariance/extrinsic_translation_cov" value="2.5e-5"/> <param name="initial_covariance/extrinsic_translation_cov" value="2.5e-5"/>
<remap from="~imu" to="/imu0"/> <remap from="~imu" to="/imu0"/>
<remap from="~cam0_image" to="/cam0/image_raw"/>
<remap from="~cam1_image" to="/cam1/image_raw"/>
<remap from="~features" to="image_processor/features"/> <remap from="~features" to="image_processor/features"/>
</node> </node>

View File

@ -1,61 +0,0 @@
<launch>
<arg name="robot" default="firefly_sbx"/>
<arg name="fixed_frame_id" default="world"/>
<arg name="calibration_file"
default="$(find msckf_vio)/config/camchain-imucam-mynt.yaml"/>
<!-- Image Processor Nodelet -->
<include file="$(find msckf_vio)/launch/image_processor_mynt.launch">
<arg name="robot" value="$(arg robot)"/>
<arg name="calibration_file" value="$(arg calibration_file)"/>
</include>
<!-- Msckf Vio Nodelet -->
<group ns="$(arg robot)">
<node pkg="nodelet" type="nodelet" name="vio"
args='standalone msckf_vio/MsckfVioNodelet'
output="screen">
<!-- Calibration parameters -->
<rosparam command="load" file="$(arg calibration_file)"/>
<param name="publish_tf" value="true"/>
<param name="frame_rate" value="20"/>
<param name="fixed_frame_id" value="$(arg fixed_frame_id)"/>
<param name="child_frame_id" value="odom"/>
<param name="max_cam_state_size" value="20"/>
<param name="position_std_threshold" value="8.0"/>
<param name="rotation_threshold" value="0.2618"/>
<param name="translation_threshold" value="0.4"/>
<param name="tracking_rate_threshold" value="0.5"/>
<!-- Feature optimization config -->
<param name="feature/config/translation_threshold" value="-1.0"/>
<!-- These values should be standard deviation -->
<param name="noise/gyro" value="0.005"/>
<param name="noise/acc" value="0.05"/>
<param name="noise/gyro_bias" value="0.001"/>
<param name="noise/acc_bias" value="0.01"/>
<param name="noise/feature" value="0.035"/>
<param name="initial_state/velocity/x" value="0.0"/>
<param name="initial_state/velocity/y" value="0.0"/>
<param name="initial_state/velocity/z" value="0.0"/>
<!-- These values should be covariance -->
<param name="initial_covariance/velocity" value="0.25"/>
<param name="initial_covariance/gyro_bias" value="0.01"/>
<param name="initial_covariance/acc_bias" value="0.01"/>
<param name="initial_covariance/extrinsic_rotation_cov" value="3.0462e-4"/>
<param name="initial_covariance/extrinsic_translation_cov" value="2.5e-5"/>
<remap from="~imu" to="/mynteye/imu/data_raw"/>
<remap from="~features" to="image_processor/features"/>
</node>
</group>
</launch>

View File

@ -1,77 +0,0 @@
<launch>
<arg name="robot" default="firefly_sbx"/>
<arg name="fixed_frame_id" default="world"/>
<arg name="calibration_file"
default="$(find msckf_vio)/config/camchain-imucam-tum-scaled.yaml"/>
<!-- Image Processor Nodelet -->
<include file="$(find msckf_vio)/launch/image_processor_tinytum.launch">
<arg name="robot" value="$(arg robot)"/>
<arg name="calibration_file" value="$(arg calibration_file)"/>
</include>
<!-- Msckf Vio Nodelet -->
<group ns="$(arg robot)">
<node pkg="nodelet" type="nodelet" name="vio"
args='standalone msckf_vio/MsckfVioNodelet'
output="screen">
<param name="FILTER" value="0"/>
<!-- Debugging Flaggs -->
<param name="StreamPause" value="true"/>
<param name="PrintImages" value="false"/>
<param name="GroundTruth" value="false"/>
<param name="patch_size_n" value="3"/>
<param name="image_scale" value ="1"/>
<!-- Calibration parameters -->
<rosparam command="load" file="$(arg calibration_file)"/>
<param name="publish_tf" value="true"/>
<param name="frame_rate" value="20"/>
<param name="fixed_frame_id" value="$(arg fixed_frame_id)"/>
<param name="child_frame_id" value="odom"/>
<param name="max_cam_state_size" value="12"/>
<param name="position_std_threshold" value="8.0"/>
<param name="rotation_threshold" value="0.2618"/>
<param name="translation_threshold" value="0.4"/>
<param name="tracking_rate_threshold" value="0.5"/>
<!-- Feature optimization config -->
<param name="feature/config/translation_threshold" value="-1.0"/>
<!-- These values should be standard deviation -->
<param name="noise/gyro" value="0.005"/>
<param name="noise/acc" value="0.05"/>
<param name="noise/gyro_bias" value="0.001"/>
<param name="noise/acc_bias" value="0.01"/>
<param name="noise/feature" value="0.035"/>
<param name="initial_state/velocity/x" value="0.0"/>
<param name="initial_state/velocity/y" value="0.0"/>
<param name="initial_state/velocity/z" value="0.0"/>
<!-- These values should be covariance -->
<param name="initial_covariance/velocity" value="0.25"/>
<param name="initial_covariance/gyro_bias" value="0.01"/>
<param name="initial_covariance/acc_bias" value="0.01"/>
<param name="initial_covariance/extrinsic_rotation_cov" value="3.0462e-4"/>
<param name="initial_covariance/extrinsic_translation_cov" value="2.5e-5"/>
<param name="initial_covariance/irradiance_frame_bias" value="0.1"/>
<remap from="~imu" to="/imu0"/>
<remap from="~ground_truth" to="/vrpn_client/raw_transform"/>
<remap from="~cam0_image" to="/cam0/image_raw"/>
<remap from="~cam1_image" to="/cam1/image_raw"/>
<remap from="~features" to="image_processor/features"/>
</node>
</group>
</launch>

View File

@ -1,78 +0,0 @@
<launch>
<arg name="robot" default="firefly_sbx"/>
<arg name="fixed_frame_id" default="world"/>
<arg name="calibration_file"
default="$(find msckf_vio)/config/camchain-imucam-tum.yaml"/>
<!-- Image Processor Nodelet -->
<include file="$(find msckf_vio)/launch/image_processor_tum.launch">
<arg name="robot" value="$(arg robot)"/>
<arg name="calibration_file" value="$(arg calibration_file)"/>
</include>
<!-- Msckf Vio Nodelet -->
<group ns="$(arg robot)">
<node pkg="nodelet" type="nodelet" name="vio"
args='standalone msckf_vio/MsckfVioNodelet'
output="screen">
<!-- Filter Flag, 0 = msckf, 1 = photometric, 2 = two -->
<param name="FILTER" value="1"/>
<!-- Debugging Flaggs -->
<param name="StreamPause" value="true"/>
<param name="PrintImages" value="false"/>
<param name="GroundTruth" value="false"/>
<param name="patch_size_n" value="3"/>
<!-- Calibration parameters -->
<rosparam command="load" file="$(arg calibration_file)"/>
<param name="publish_tf" value="true"/>
<param name="frame_rate" value="20"/>
<param name="fixed_frame_id" value="$(arg fixed_frame_id)"/>
<param name="child_frame_id" value="odom"/>
<param name="max_cam_state_size" value="20"/>
<param name="position_std_threshold" value="8.0"/>
<param name="rotation_threshold" value="0.2618"/>
<param name="translation_threshold" value="0.4"/>
<param name="tracking_rate_threshold" value="0.5"/>
<!-- Feature optimization config -->
<param name="feature/config/translation_threshold" value="-1.0"/>
<!-- These values should be standard deviation -->
<param name="noise/gyro" value="0.005"/>
<param name="noise/acc" value="0.05"/>
<param name="noise/gyro_bias" value="0.001"/>
<param name="noise/acc_bias" value="0.01"/>
<param name="noise/feature" value="0.035"/>
<param name="initial_state/velocity/x" value="0.0"/>
<param name="initial_state/velocity/y" value="0.0"/>
<param name="initial_state/velocity/z" value="0.0"/>
<!-- These values should be covariance -->
<param name="initial_covariance/velocity" value="0.25"/>
<param name="initial_covariance/gyro_bias" value="0.01"/>
<param name="initial_covariance/acc_bias" value="0.01"/>
<param name="initial_covariance/extrinsic_rotation_cov" value="3.0462e-4"/>
<param name="initial_covariance/extrinsic_translation_cov" value="2.5e-5"/>
<param name="initial_covariance/irradiance_frame_bias" value="0.1"/>
<remap from="~imu" to="/imu0"/>
<remap from="~ground_truth" to="/vrpn_client/raw_transform"/>
<remap from="~cam0_image" to="/cam0/image_raw"/>
<remap from="~cam1_image" to="/cam1/image_raw"/>
<remap from="~features" to="image_processor/features"/>
</node>
</group>
<!--node name="player" pkg="bagcontrol" type="control.py" /-->
</launch>

73
log
View File

@ -1,73 +0,0 @@
# Created by Octave 3.8.1, Wed Jun 12 14:36:37 2019 CEST <raphael@raphael-desktop>
# name: Hx
# type: matrix
# rows: 18
# columns: 49
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 313.795 -58.7912 139.778 -46.7616 144.055 86.9644 0 -314.123 55.6434 -140.648 46.7616 -144.055 -86.9644 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 441.06 -94.50069999999999 174.424 -53.7653 204.822 120.248 0 -441.685 90.1101 -175.657 53.7653 -204.822 -120.248 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 225.35 -54.5629 77.60599999999999 -21.1425 105.886 60.3706 0 -225.756 52.3373 -78.2406 21.1425 -105.886 -60.3706 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 175.128 20.6203 175.127 -79.63939999999999 73.245 62.1868 0 -174.573 -22.5235 -175.576 79.63939999999999 -73.245 -62.1868 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 296.962 43.5469 311.307 -143.667 123.399 108.355 0 -295.905 -46.7952 -312.063 143.667 -123.399 -108.355 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 126.283 117.889 311.864 -161.264 38.8521 71.8019 0 -124.464 -119.541 -312.118 161.264 -38.8521 -71.8019 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 49.2502 63.7166 155.071 -80.81950000000001 12.7732 32.1826 0 -48.2934 -64.4113 -155.157 80.81950000000001 -12.7732 -32.1826 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 69.59699999999999 154.579 335.384 -179.355 9.212580000000001 62.0364 0 -67.35599999999999 -155.735 -335.462 179.355 -9.212580000000001 -62.0364 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 -66.6965 304.947 500.218 -285.589 -71.31010000000001 55.5058 0 70.8009 -305.077 -499.831 285.589 71.31010000000001 -55.5058 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 323.404 -62.2043 141.092 -46.6015 148.737 89.1108 0 0 0 0 0 0 0 0 -324.336 57.8552 -141.991 46.6015 -148.737 -89.1108 1 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 454.208 -99.3095 175.986 -53.4094 211.276 123.174 0 0 0 0 0 0 0 0 -455.779 93.0992 -177.158 53.4094 -211.276 -123.174 1 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 231.884 -57.0266 78.2559 -20.8926 109.118 61.8183 0 0 0 0 0 0 0 0 -232.824 53.8025 -78.80719999999999 20.8926 -109.118 -61.8183 1 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 181.715 18.8525 177.045 -80.08499999999999 76.3716 63.8254 0 0 0 0 0 0 0 0 -181.07 -20.839 -177.959 80.08499999999999 -76.3716 -63.8254 1 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 308.249 40.5812 314.711 -144.494 128.766 111.207 0 0 0 0 0 0 0 0 -306.972 -43.8825 -316.328 144.494 -128.766 -111.207 1 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 133.309 116.865 315.474 -162.598 42.0763 73.8353 0 0 0 0 0 0 0 0 -130.603 -117.454 -316.931 162.598 -42.0763 -73.8353 1 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 52.4426 63.3607 156.902 -81.5288 14.2139 33.1222 0 0 0 0 0 0 0 0 -50.9976 -63.4393 -157.607 81.5288 -14.2139 -33.1222 1 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 75.5508 154.234 339.369 -180.995 11.859 63.8956 0 0 0 0 0 0 0 0 -72.1041 -153.816 -340.865 180.995 -11.859 -63.8956 1 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 -62.0551 306.357 506.351 -288.542 -69.50409999999999 57.4562 0 0 0 0 0 0 0 0 68.6262 -303.028 -508.423 288.542 69.50409999999999 -57.4562 1 0 0 0 0 0 0 0
# name: Hy
# type: matrix
# rows: 18
# columns: 14
1.56267 0 0 0 0 0 0 0 0 0 0.252873 0 0 0.110387
0 1.56267 0 0 0 0 0 0 0 0 0.453168 0 0 0.162961
0 0 1.56267 0 0 0 0 0 0 0 0.638441 0 0 0.0873758
0 0 0 1.56267 0 0 0 0 0 0 0.21031 0 0 0.0321517
0 0 0 0 1.56267 0 0 0 0 0 0.375554 0 0 0.0505151
0 0 0 0 0 1.56267 0 0 0 0 0.638441 0 0 -0.0336034
0 0 0 0 0 0 1.56267 0 0 0 0.157733 0 0 -0.0232704
0 0 0 0 0 0 0 1.56267 0 0 0.212814 0 0 -0.0688343
0 0 0 0 0 0 0 0 1.56267 0 0.360532 0 0 -0.187745
1.56267 0 0 0 0 0 0 0 0 0 0 0.252873 0 0.322811
0 1.56267 0 0 0 0 0 0 0 0 0 0.453168 0 0.467319
0 0 1.56267 0 0 0 0 0 0 0 0 0.638441 0 0.245907
0 0 0 1.56267 0 0 0 0 0 0 0 0.21031 0 0.135725
0 0 0 0 1.56267 0 0 0 0 0 0 0.375554 0 0.225277
0 0 0 0 0 1.56267 0 0 0 0 0 0.638441 0 0.012926
0 0 0 0 0 0 1.56267 0 0 0 0 0.157733 0 -0.0108962
0 0 0 0 0 0 0 1.56267 0 0 0 0.212814 0 -0.06974909999999999
0 0 0 0 0 0 0 0 1.56267 0 0 0.360532 0 -0.318846
# name: r
# type: matrix
# rows: 18
# columns: 1
0.0354809
0.0153183
0.0570191
-0.0372801
0.0878601
0.06811780000000001
-0.00426164
5.162985999041026e-321
6.927779999999998e-310
0
2.121999999910509e-314
0
0
0
3.6073900000086e-313
4.446590812571219e-323
3.952525166729972e-323
3.952525166729972e-323

View File

@ -1,4 +1,4 @@
Header header std_msgs/Header header
# All features on the current image, # All features on the current image,
# including tracked ones and newly detected ones. # including tracked ones and newly detected ones.
FeatureMeasurement[] features FeatureMeasurement[] features

View File

@ -3,12 +3,13 @@
<name>msckf_vio</name> <name>msckf_vio</name>
<version>0.0.1</version> <version>0.0.1</version>
<description>Multi-State Constraint Kalman Filter - Photometric expansion</description> <description>Multi-State Constraint Kalman Filter for Vision-aided Inertial Navigation</description>
<maintainer email="raphael@maenle.net">Raphael Maenle</maintainer> <maintainer email="sunke.polyu@gmail.com">Ke Sun</maintainer>
<license>Penn Software License</license> <license>Penn Software License</license>
<author email="raphael@maenle.net">Raphael Maenle</author> <author email="sunke.polyu@gmail.com">Ke Sun</author>
<author email="kartikmohta@gmail.com">Kartik Mohta</author>
<buildtool_depend>catkin</buildtool_depend> <buildtool_depend>catkin</buildtool_depend>
@ -18,7 +19,6 @@
<depend>nav_msgs</depend> <depend>nav_msgs</depend>
<depend>sensor_msgs</depend> <depend>sensor_msgs</depend>
<depend>geometry_msgs</depend> <depend>geometry_msgs</depend>
<depend>visualization_msgs</depend>
<depend>eigen_conversions</depend> <depend>eigen_conversions</depend>
<depend>tf_conversions</depend> <depend>tf_conversions</depend>
<depend>random_numbers</depend> <depend>random_numbers</depend>

View File

@ -1,97 +0,0 @@
stereo callback()
create image pyramids
_Constructs the image pyramid which can be passed to calcOpticalFlowPyrLK._
.
if first Frame:
*initialize first Frame ()
else:
*track Features ()
*addnewFeatures ()
*pruneGridFeatures()
_removes worst features from any overflowing grid_
publish features (u1, v1, u2, v2)
_undistorts them beforehand_
addnewFeatures()
*mask existing features
*detect new fast features
*collect in a grid, keep only best n per grid
*stereomatch()
*save inliers into a new feature with u,v on cam0 and cam1
track Features()
*integrateIMUData ()
_uses existing IMU data between two frames to calc. rotation between the two frames_
*predictFeatureTracking()
_compensates the rotation between consecutive frames - rotates previous camera frame features to current camera rotation_
*calcOpticalFlowPyrLK()
_measures the change between the features in the previous frame and in the current frame (using the predicted features)_
*remove points outside of image region
_how does this even happen?_
*stereo match()
_find tracked features from optical flow in the camera 1 image_
_remove all features that could not be matched_
*twoPointRansac(cam0)
*twoPointRansac(cam1)
_remove any features outside best found ransac model_
twoPointRansac()
*mark all points as inliers
*compensate rotation between frames
*normalize points
*calculate difference bewteen previous and current points
*mark large distances (over 50 pixels currently)
*calculate mean points distance
*return if inliers (non marked) < 3
*return if motion smaller than norm pixel unit
*ransac
*optimize with found inlier after random sample
*set inlier markers
initialize first Frame()
features = FastFeatureDetector detect ()
*stereo match ()
group features into grid
- according to position in the image
- sorting them by response
- save the top responses
- save the top responses
stereo match ()
*undistort cam0 Points
*project cam0 Points to cam1 to initialize points in cam1
*calculate lk optical flow
_used because camera calibrations not perfect enough_
_also, calculation more efficient, because LK calculated anyway_
*compute relative trans/rot between cam0 and cam1*
*remove outliers based on essential matrix
_essential matrix relates points in stereo image (pinhole model)_
for every point:
- calculate epipolar line of point in cam0
- calculate error of cam1 to epipolar line
- remove if to big

View File

@ -1,82 +0,0 @@
featureCallback
propagate IMU state()
state Augmentation()
add Feature Observations()
#the following possibly trigger ekf update step:
remove Lost Features ()
prune Camera State Buffer ()
remove Lost Features()
every feature that does not have a current observation:
*just delete if not enough features
check Motion of Feature ()
_calculation here makes no sense - he uses pixel position as direction vector for feature?_
*initialize Position ()
caculate feature Jakobian and Residual()
*for every observation in this feature
- calculate u and v in camera frames, based on estimated feature position
- input results into jakobi d(measurement)/d(camera 0/1)
- input results into jakobi d(camera 0/1)/d(state) and jakobi d(camera 0/1)/d(feature position)
- project both jakobis to nullspace of feature position jakobi
- calculate residual: measurement - u and v of camera frames
- project residual onto nullspace of feature position jakobi
- stack residual and jakobians
gating Test()
*measurementUpdate()
_use calculated residuals and jakobians to calculate change in error_
measurementUpdate():
- QR reduce the stacked Measurment Jakobis
- calcualte Kalman Gain based on Measurement Jakobian, Error-State Covariance and Noise
_does some fancy shit here_
- calculate estimated error after observation: delta_x = KalmanGain * residual
- add estimated error to state (imu states and cam states)
initialize Position ():
* create initial guess for global feature position ()
_uses first feature measurement on left camera and last feature measurement of right camera_
- transform first measurement to plane of last measurement
- calcualte least square point between rays
* get position approximation using measured feature positions
_using Levenberg Marqhart iterative search_
add Feature Observations()
* if feature not in map, add feature to map
_and add u0, v0, u1, v1 as first observation
* if feature in map, add new observation u0,v0,u1,v1
state Augmentation()
* Add estimated cam position to state
* Update P with Jakobian of cam Position
propagate IMU state ()
_uses IMU process model for every saved IMU state_
for every buffered imu state:
*remove bias
*Compute F and G matrix (continuous transition and noise cov.)
_using current orientation, gyro and acc. reading_
* approximate phi: phi = 1 + Fdt + ...
* calculate new state propagating through runge kutta
* modify transition matrix to have a propper null space?
* calculate Q = Phi*G*Q_noise*GT*PhiT
* calculate P = Phi*P*PhiT + Q
stateAugmentation ()
_save current IMU state as camera position_

View File

@ -1,64 +0,0 @@
#!/usr/bin/env python
import rosbag
import rospy
from sensor_msgs.msg import Imu, Image
from geometry_msgs.msg import TransformStamped
import time
import signal
import sys
def signal_handler(sig, frame):
print('gracefully exiting the program.')
bag.close()
sys.exit(0)
def main():
global bag
cam0_topic = '/cam0/image_raw'
cam1_topic = '/cam1/image_raw'
imu0_topic = '/imu0'
grnd_topic = '/vrpn_client/raw_transform'
rospy.init_node('controlbag')
rospy.set_param('play_bag', False)
cam0_pub = rospy.Publisher(cam0_topic, Image, queue_size=10)
cam1_pub = rospy.Publisher(cam1_topic, Image, queue_size=10)
imu0_pub = rospy.Publisher(imu0_topic, Imu, queue_size=10)
grnd_pub = rospy.Publisher(grnd_topic, TransformStamped, queue_size=10)
signal.signal(signal.SIGINT, signal_handler)
bag = rosbag.Bag('/home/raphael/dev/MSCKF_ws/bag/TUM/dataset-corridor1_1024_16.bag')
for topic, msg, t in bag.read_messages(topics=[cam0_topic, cam1_topic, imu0_topic, grnd_topic]):
# pause if parameter set to false
flag = False
while not rospy.get_param('/play_bag'):
time.sleep(0.01)
if not flag:
print("stopped playback")
flag = not flag
if flag:
print("resume playback")
if topic == cam0_topic:
cam0_pub.publish(msg)
elif topic == cam1_topic:
cam1_pub.publish(msg)
elif topic == imu0_topic:
imu0_pub.publish(msg)
elif topic ==grnd_topic:
grnd_pub.publish(msg)
#print msg
bag.close()
if __name__== "__main__":
main()

View File

@ -1,64 +0,0 @@
#!/usr/bin/env python
import rosbag
import rospy
from sensor_msgs.msg import Imu, Image
from geometry_msgs.msg import TransformStamped
import time
import signal
import sys
def signal_handler(sig, frame):
print('gracefully exiting the program.')
bag.close()
sys.exit(0)
def main():
global bag
cam0_topic = '/cam0/image_raw'
cam1_topic = '/cam1/image_raw'
imu0_topic = '/imu0'
grnd_topic = '/vrpn_client/raw_transform'
rospy.init_node('controlbag')
rospy.set_param('play_bag', False)
cam0_pub = rospy.Publisher(cam0_topic, Image, queue_size=10)
cam1_pub = rospy.Publisher(cam1_topic, Image, queue_size=10)
imu0_pub = rospy.Publisher(imu0_topic, Imu, queue_size=10)
grnd_pub = rospy.Publisher(grnd_topic, TransformStamped, queue_size=10)
signal.signal(signal.SIGINT, signal_handler)
bag = rosbag.Bag('/home/raphael/dev/MSCKF_ws/bag/TUM/dataset-corridor1_1024_16.bag')
for topic, msg, t in bag.read_messages(topics=[cam0_topic, cam1_topic, imu0_topic, grnd_topic]):
# pause if parameter set to false
flag = False
while not rospy.get_param('/play_bag'):
time.sleep(0.01)
if not flag:
print("stopped playback")
flag = not flag
if flag:
print("resume playback")
if topic == cam0_topic:
cam0_pub.publish(msg)
elif topic == cam1_topic:
cam1_pub.publish(msg)
elif topic == imu0_topic:
imu0_pub.publish(msg)
elif topic ==grnd_topic:
grnd_pub.publish(msg)
#print msg
bag.close()
if __name__== "__main__":
main()

View File

@ -1,293 +0,0 @@
#include <iostream>
#include <algorithm>
#include <set>
#include <Eigen/Dense>
#include <sensor_msgs/image_encodings.h>
#include <random_numbers/random_numbers.h>
#include <msckf_vio/CameraMeasurement.h>
#include <msckf_vio/TrackingInfo.h>
#include <msckf_vio/image_processor.h>
namespace msckf_vio {
namespace image_handler {
cv::Point2f pinholeDownProject(const cv::Point2f& p_in, const cv::Vec4d& intrinsics)
{
return cv::Point2f(p_in.x * intrinsics[0] + intrinsics[2], p_in.y * intrinsics[1] + intrinsics[3]);
}
cv::Point2f pinholeUpProject(const cv::Point2f& p_in, const cv::Vec4d& intrinsics)
{
return cv::Point2f((p_in.x - intrinsics[2])/intrinsics[0], (p_in.y - intrinsics[3])/intrinsics[1]);
}
void undistortImage(
cv::InputArray src,
cv::OutputArray dst,
const std::string& distortion_model,
const cv::Vec4d& intrinsics,
const cv::Vec4d& distortion_coeffs)
{
const cv::Matx33d K(intrinsics[0], 0.0, intrinsics[2],
0.0, intrinsics[1], intrinsics[3],
0.0, 0.0, 1.0);
if (distortion_model == "pre-equidistant")
cv::fisheye::undistortImage(src, dst, K, distortion_coeffs, K);
else if (distortion_model == "equidistant")
src.copyTo(dst);
else if (distortion_model == "pre-radtan")
cv::undistort(src, dst, K, distortion_coeffs, K);
else if (distortion_model == "radtan")
src.copyTo(dst);
}
void undistortPoint(
const cv::Point2f& pt_in,
const cv::Vec4d& intrinsics,
const std::string& distortion_model,
const cv::Vec4d& distortion_coeffs,
cv::Point2f& pt_out,
const cv::Matx33d &rectification_matrix,
const cv::Vec4d &new_intrinsics) {
std::vector<cv::Point2f> pts_in;
std::vector<cv::Point2f> pts_out;
pts_in.push_back(pt_in);
if (pts_in.size() == 0) return;
const cv::Matx33d K(
intrinsics[0], 0.0, intrinsics[2],
0.0, intrinsics[1], intrinsics[3],
0.0, 0.0, 1.0);
const cv::Matx33d K_new(
new_intrinsics[0], 0.0, new_intrinsics[2],
0.0, new_intrinsics[1], new_intrinsics[3],
0.0, 0.0, 1.0);
if (distortion_model == "radtan") {
cv::undistortPoints(pts_in, pts_out, K, distortion_coeffs,
rectification_matrix, K_new);
}
// equidistant
else if (distortion_model == "equidistant") {
cv::fisheye::undistortPoints(pts_in, pts_out, K, distortion_coeffs,
rectification_matrix, K_new);
}
// fov
else if (distortion_model == "fov") {
for(int i = 0; i < pts_in.size(); i++)
{
float omega = distortion_coeffs[0];
float rd = sqrt(pts_in[i].x * pts_in[i].x + pts_in[i].y * pts_in[i].y);
float ru = tan(rd * omega)/(2 * tan(omega / 2));
cv::Point2f newPoint(
((pts_in[i].x - intrinsics[2]) / intrinsics[0]) * (ru / rd),
((pts_in[i].y - intrinsics[3]) / intrinsics[1]) * (ru / rd));
pts_out.push_back(newPoint);
}
}
else if (distortion_model == "pre-equidistant" or distortion_model == "pre-radtan")
{
std::vector<cv::Point2f> temp_pts_out;
for(int i = 0; i < pts_in.size(); i++)
temp_pts_out.push_back(pinholeUpProject(pts_in[i], intrinsics));
pts_out = temp_pts_out;
}
else {
ROS_WARN_ONCE("The model %s is unrecognized, use radtan instead...",
distortion_model.c_str());
cv::undistortPoints(pts_in, pts_out, K, distortion_coeffs,
rectification_matrix, K_new);
}
pt_out = pts_out[0];
return;
}
void undistortPoints(
const std::vector<cv::Point2f>& pts_in,
const cv::Vec4d& intrinsics,
const std::string& distortion_model,
const cv::Vec4d& distortion_coeffs,
std::vector<cv::Point2f>& pts_out,
const cv::Matx33d &rectification_matrix,
const cv::Vec4d &new_intrinsics) {
if (pts_in.size() == 0) return;
const cv::Matx33d K(
intrinsics[0], 0.0, intrinsics[2],
0.0, intrinsics[1], intrinsics[3],
0.0, 0.0, 1.0);
const cv::Matx33d K_new(
new_intrinsics[0], 0.0, new_intrinsics[2],
0.0, new_intrinsics[1], new_intrinsics[3],
0.0, 0.0, 1.0);
if (distortion_model == "radtan") {
cv::undistortPoints(pts_in, pts_out, K, distortion_coeffs,
rectification_matrix, K_new);
}
else if (distortion_model == "equidistant") {
cv::fisheye::undistortPoints(pts_in, pts_out, K, distortion_coeffs,
rectification_matrix, K_new);
}
else if (distortion_model == "fov") {
for(int i = 0; i < pts_in.size(); i++)
{
float omega = distortion_coeffs[0];
float rd = sqrt(pts_in[i].x * pts_in[i].x + pts_in[i].y * pts_in[i].y);
float ru = tan(rd * omega)/(2 * tan(omega / 2));
cv::Point2f newPoint(
((pts_in[i].x - intrinsics[2]) / intrinsics[0]) * (ru / rd),
((pts_in[i].y - intrinsics[3]) / intrinsics[1]) * (ru / rd));
pts_out.push_back(newPoint);
}
}
else if (distortion_model == "pre-equidistant" or distortion_model == "pre-radtan")
{
std::vector<cv::Point2f> temp_pts_out;
for(int i = 0; i < pts_in.size(); i++)
temp_pts_out.push_back(pinholeUpProject(pts_in[i], intrinsics));
pts_out = temp_pts_out;
}
else {
ROS_WARN_ONCE("The model %s is unrecognized, use radtan instead...",
distortion_model.c_str());
cv::undistortPoints(pts_in, pts_out, K, distortion_coeffs,
rectification_matrix, K_new);
}
return;
}
std::vector<cv::Point2f> distortPoints(
const std::vector<cv::Point2f>& pts_in,
const cv::Vec4d& intrinsics,
const std::string& distortion_model,
const cv::Vec4d& distortion_coeffs) {
const cv::Matx33d K(intrinsics[0], 0.0, intrinsics[2],
0.0, intrinsics[1], intrinsics[3],
0.0, 0.0, 1.0);
std::vector<cv::Point2f> pts_out;
if (distortion_model == "radtan") {
std::vector<cv::Point3f> homogenous_pts;
cv::convertPointsToHomogeneous(pts_in, homogenous_pts);
cv::projectPoints(homogenous_pts, cv::Vec3d::zeros(), cv::Vec3d::zeros(), K,
distortion_coeffs, pts_out);
} else if (distortion_model == "equidistant") {
cv::fisheye::distortPoints(pts_in, pts_out, K, distortion_coeffs);
}
else if (distortion_model == "fov") {
for(int i = 0; i < pts_in.size(); i++)
{
// based on 'straight lines have to be straight'
float ru = sqrt(pts_in[i].x * pts_in[i].x + pts_in[i].y * pts_in[i].y);
float omega = distortion_coeffs[0];
float rd = 1 / (omega)*atan(2*ru*tan(omega / 2));
cv::Point2f newPoint(
pts_in[i].x * (rd/ru) * intrinsics[0] + intrinsics[2],
pts_in[i].y * (rd/ru) * intrinsics[1] + intrinsics[3]);
pts_out.push_back(newPoint);
}
}
else if (distortion_model == "pre-equidistant" or distortion_model == "pre-radtan")
{
std::vector<cv::Point2f> temp_pts_out;
for(int i = 0; i < pts_in.size(); i++)
temp_pts_out.push_back(pinholeDownProject(pts_in[i], intrinsics));
pts_out = temp_pts_out;
}
else {
ROS_WARN_ONCE("The model %s is unrecognized, using radtan instead...",
distortion_model.c_str());
std::vector<cv::Point3f> homogenous_pts;
cv::convertPointsToHomogeneous(pts_in, homogenous_pts);
cv::projectPoints(homogenous_pts, cv::Vec3d::zeros(), cv::Vec3d::zeros(), K,
distortion_coeffs, pts_out);
}
return pts_out;
}
cv::Point2f distortPoint(
const cv::Point2f& pt_in,
const cv::Vec4d& intrinsics,
const std::string& distortion_model,
const cv::Vec4d& distortion_coeffs) {
std::vector<cv::Point2f> pts_in;
pts_in.push_back(pt_in);
const cv::Matx33d K(intrinsics[0], 0.0, intrinsics[2],
0.0, intrinsics[1], intrinsics[3],
0.0, 0.0, 1.0);
std::vector<cv::Point2f> pts_out;
if (distortion_model == "radtan") {
std::vector<cv::Point3f> homogenous_pts;
cv::convertPointsToHomogeneous(pts_in, homogenous_pts);
cv::projectPoints(homogenous_pts, cv::Vec3d::zeros(), cv::Vec3d::zeros(), K,
distortion_coeffs, pts_out);
} else if (distortion_model == "equidistant") {
cv::fisheye::distortPoints(pts_in, pts_out, K, distortion_coeffs);
}
else if (distortion_model == "fov") {
for(int i = 0; i < pts_in.size(); i++)
{
// based on 'straight lines have to be straight'
float ru = sqrt(pts_in[i].x * pts_in[i].x + pts_in[i].y * pts_in[i].y);
float omega = distortion_coeffs[0];
float rd = 1 / (omega)*atan(2*ru*tan(omega / 2));
cv::Point2f newPoint(
pts_in[i].x * (rd/ru) * intrinsics[0] + intrinsics[2],
pts_in[i].y * (rd/ru) * intrinsics[1] + intrinsics[3]);
pts_out.push_back(newPoint);
}
}
else if (distortion_model == "pre-equidistant" or distortion_model == "pre-radtan")
{
std::vector<cv::Point2f> temp_pts_out;
for(int i = 0; i < pts_in.size(); i++)
pts_out.push_back(pinholeDownProject(pts_in[i], intrinsics));
pts_out = temp_pts_out;
}
else {
ROS_WARN_ONCE("The model %s is unrecognized, using radtan instead...",
distortion_model.c_str());
std::vector<cv::Point3f> homogenous_pts;
cv::convertPointsToHomogeneous(pts_in, homogenous_pts);
cv::projectPoints(homogenous_pts, cv::Vec3d::zeros(), cv::Vec3d::zeros(), K,
distortion_coeffs, pts_out);
}
return pts_out[0];
}
} // namespace image_handler
} // namespace msckf_vio

View File

@ -17,7 +17,6 @@
#include <msckf_vio/TrackingInfo.h> #include <msckf_vio/TrackingInfo.h>
#include <msckf_vio/image_processor.h> #include <msckf_vio/image_processor.h>
#include <msckf_vio/utils.h> #include <msckf_vio/utils.h>
#include <msckf_vio/image_handler.h>
using namespace std; using namespace std;
using namespace cv; using namespace cv;
@ -42,54 +41,51 @@ ImageProcessor::~ImageProcessor() {
} }
bool ImageProcessor::loadParameters() { bool ImageProcessor::loadParameters() {
// debug parameters
nh.param<bool>("StreamPause", STREAMPAUSE, false);
// Camera calibration parameters // Camera calibration parameters
nh.param<string>("cam0/distortion_model", nh.param<string>("cam0/distortion_model",
cam0.distortion_model, string("radtan")); cam0_distortion_model, string("radtan"));
nh.param<string>("cam1/distortion_model", nh.param<string>("cam1/distortion_model",
cam1.distortion_model, string("radtan")); cam1_distortion_model, string("radtan"));
vector<int> cam0_resolution_temp(2); vector<int> cam0_resolution_temp(2);
nh.getParam("cam0/resolution", cam0_resolution_temp); nh.getParam("cam0/resolution", cam0_resolution_temp);
cam0.resolution[0] = cam0_resolution_temp[0]; cam0_resolution[0] = cam0_resolution_temp[0];
cam0.resolution[1] = cam0_resolution_temp[1]; cam0_resolution[1] = cam0_resolution_temp[1];
vector<int> cam1_resolution_temp(2); vector<int> cam1_resolution_temp(2);
nh.getParam("cam1/resolution", cam1_resolution_temp); nh.getParam("cam1/resolution", cam1_resolution_temp);
cam1.resolution[0] = cam1_resolution_temp[0]; cam1_resolution[0] = cam1_resolution_temp[0];
cam1.resolution[1] = cam1_resolution_temp[1]; cam1_resolution[1] = cam1_resolution_temp[1];
vector<double> cam0_intrinsics_temp(4); vector<double> cam0_intrinsics_temp(4);
nh.getParam("cam0/intrinsics", cam0_intrinsics_temp); nh.getParam("cam0/intrinsics", cam0_intrinsics_temp);
cam0.intrinsics[0] = cam0_intrinsics_temp[0]; cam0_intrinsics[0] = cam0_intrinsics_temp[0];
cam0.intrinsics[1] = cam0_intrinsics_temp[1]; cam0_intrinsics[1] = cam0_intrinsics_temp[1];
cam0.intrinsics[2] = cam0_intrinsics_temp[2]; cam0_intrinsics[2] = cam0_intrinsics_temp[2];
cam0.intrinsics[3] = cam0_intrinsics_temp[3]; cam0_intrinsics[3] = cam0_intrinsics_temp[3];
vector<double> cam1_intrinsics_temp(4); vector<double> cam1_intrinsics_temp(4);
nh.getParam("cam1/intrinsics", cam1_intrinsics_temp); nh.getParam("cam1/intrinsics", cam1_intrinsics_temp);
cam1.intrinsics[0] = cam1_intrinsics_temp[0]; cam1_intrinsics[0] = cam1_intrinsics_temp[0];
cam1.intrinsics[1] = cam1_intrinsics_temp[1]; cam1_intrinsics[1] = cam1_intrinsics_temp[1];
cam1.intrinsics[2] = cam1_intrinsics_temp[2]; cam1_intrinsics[2] = cam1_intrinsics_temp[2];
cam1.intrinsics[3] = cam1_intrinsics_temp[3]; cam1_intrinsics[3] = cam1_intrinsics_temp[3];
vector<double> cam0_distortion_coeffs_temp(4); vector<double> cam0_distortion_coeffs_temp(4);
nh.getParam("cam0/distortion_coeffs", nh.getParam("cam0/distortion_coeffs",
cam0_distortion_coeffs_temp); cam0_distortion_coeffs_temp);
cam0.distortion_coeffs[0] = cam0_distortion_coeffs_temp[0]; cam0_distortion_coeffs[0] = cam0_distortion_coeffs_temp[0];
cam0.distortion_coeffs[1] = cam0_distortion_coeffs_temp[1]; cam0_distortion_coeffs[1] = cam0_distortion_coeffs_temp[1];
cam0.distortion_coeffs[2] = cam0_distortion_coeffs_temp[2]; cam0_distortion_coeffs[2] = cam0_distortion_coeffs_temp[2];
cam0.distortion_coeffs[3] = cam0_distortion_coeffs_temp[3]; cam0_distortion_coeffs[3] = cam0_distortion_coeffs_temp[3];
vector<double> cam1_distortion_coeffs_temp(4); vector<double> cam1_distortion_coeffs_temp(4);
nh.getParam("cam1/distortion_coeffs", nh.getParam("cam1/distortion_coeffs",
cam1_distortion_coeffs_temp); cam1_distortion_coeffs_temp);
cam1.distortion_coeffs[0] = cam1_distortion_coeffs_temp[0]; cam1_distortion_coeffs[0] = cam1_distortion_coeffs_temp[0];
cam1.distortion_coeffs[1] = cam1_distortion_coeffs_temp[1]; cam1_distortion_coeffs[1] = cam1_distortion_coeffs_temp[1];
cam1.distortion_coeffs[2] = cam1_distortion_coeffs_temp[2]; cam1_distortion_coeffs[2] = cam1_distortion_coeffs_temp[2];
cam1.distortion_coeffs[3] = cam1_distortion_coeffs_temp[3]; cam1_distortion_coeffs[3] = cam1_distortion_coeffs_temp[3];
cv::Mat T_imu_cam0 = utils::getTransformCV(nh, "cam0/T_cam_imu"); cv::Mat T_imu_cam0 = utils::getTransformCV(nh, "cam0/T_cam_imu");
cv::Matx33d R_imu_cam0(T_imu_cam0(cv::Rect(0,0,3,3))); cv::Matx33d R_imu_cam0(T_imu_cam0(cv::Rect(0,0,3,3)));
@ -127,27 +123,27 @@ bool ImageProcessor::loadParameters() {
processor_config.stereo_threshold, 3); processor_config.stereo_threshold, 3);
ROS_INFO("==========================================="); ROS_INFO("===========================================");
ROS_INFO("cam0.resolution: %d, %d", ROS_INFO("cam0_resolution: %d, %d",
cam0.resolution[0], cam0.resolution[1]); cam0_resolution[0], cam0_resolution[1]);
ROS_INFO("cam0_intrinscs: %f, %f, %f, %f", ROS_INFO("cam0_intrinscs: %f, %f, %f, %f",
cam0.intrinsics[0], cam0.intrinsics[1], cam0_intrinsics[0], cam0_intrinsics[1],
cam0.intrinsics[2], cam0.intrinsics[3]); cam0_intrinsics[2], cam0_intrinsics[3]);
ROS_INFO("cam0.distortion_model: %s", ROS_INFO("cam0_distortion_model: %s",
cam0.distortion_model.c_str()); cam0_distortion_model.c_str());
ROS_INFO("cam0_distortion_coefficients: %f, %f, %f, %f", ROS_INFO("cam0_distortion_coefficients: %f, %f, %f, %f",
cam0.distortion_coeffs[0], cam0.distortion_coeffs[1], cam0_distortion_coeffs[0], cam0_distortion_coeffs[1],
cam0.distortion_coeffs[2], cam0.distortion_coeffs[3]); cam0_distortion_coeffs[2], cam0_distortion_coeffs[3]);
ROS_INFO("cam1.resolution: %d, %d", ROS_INFO("cam1_resolution: %d, %d",
cam1.resolution[0], cam1.resolution[1]); cam1_resolution[0], cam1_resolution[1]);
ROS_INFO("cam1_intrinscs: %f, %f, %f, %f", ROS_INFO("cam1_intrinscs: %f, %f, %f, %f",
cam1.intrinsics[0], cam1.intrinsics[1], cam1_intrinsics[0], cam1_intrinsics[1],
cam1.intrinsics[2], cam1.intrinsics[3]); cam1_intrinsics[2], cam1_intrinsics[3]);
ROS_INFO("cam1.distortion_model: %s", ROS_INFO("cam1_distortion_model: %s",
cam1.distortion_model.c_str()); cam1_distortion_model.c_str());
ROS_INFO("cam1_distortion_coefficients: %f, %f, %f, %f", ROS_INFO("cam1_distortion_coefficients: %f, %f, %f, %f",
cam1.distortion_coeffs[0], cam1.distortion_coeffs[1], cam1_distortion_coeffs[0], cam1_distortion_coeffs[1],
cam1.distortion_coeffs[2], cam1.distortion_coeffs[3]); cam1_distortion_coeffs[2], cam1_distortion_coeffs[3]);
cout << R_imu_cam0 << endl; cout << R_imu_cam0 << endl;
cout << t_imu_cam0.t() << endl; cout << t_imu_cam0.t() << endl;
@ -174,6 +170,10 @@ bool ImageProcessor::loadParameters() {
processor_config.ransac_threshold); processor_config.ransac_threshold);
ROS_INFO("stereo_threshold: %f", ROS_INFO("stereo_threshold: %f",
processor_config.stereo_threshold); processor_config.stereo_threshold);
ROS_INFO("OpenCV Major Version: %d",
CV_MAJOR_VERSION);
ROS_INFO("OpenCV Minor Version: %d",
CV_MINOR_VERSION);
ROS_INFO("==========================================="); ROS_INFO("===========================================");
return true; return true;
} }
@ -204,6 +204,13 @@ bool ImageProcessor::initialize() {
detector_ptr = FastFeatureDetector::create( detector_ptr = FastFeatureDetector::create(
processor_config.fast_threshold); processor_config.fast_threshold);
//create gpu optical flow lk
d_pyrLK_sparse = cuda::SparsePyrLKOpticalFlow::create(
Size(processor_config.patch_size, processor_config.patch_size),
processor_config.pyramid_levels,
processor_config.max_iteration,
true);
if (!createRosIO()) return false; if (!createRosIO()) return false;
ROS_INFO("Finish creating ROS IO..."); ROS_INFO("Finish creating ROS IO...");
@ -214,9 +221,7 @@ void ImageProcessor::stereoCallback(
const sensor_msgs::ImageConstPtr& cam0_img, const sensor_msgs::ImageConstPtr& cam0_img,
const sensor_msgs::ImageConstPtr& cam1_img) { const sensor_msgs::ImageConstPtr& cam1_img) {
// stop playing bagfile if printing images //cout << "==================================" << endl;
//if(STREAMPAUSE)
// nh.setParam("/play_bag_image", false);
// Get the current image. // Get the current image.
cam0_curr_img_ptr = cv_bridge::toCvShare(cam0_img, cam0_curr_img_ptr = cv_bridge::toCvShare(cam0_img,
@ -224,27 +229,14 @@ void ImageProcessor::stereoCallback(
cam1_curr_img_ptr = cv_bridge::toCvShare(cam1_img, cam1_curr_img_ptr = cv_bridge::toCvShare(cam1_img,
sensor_msgs::image_encodings::MONO8); sensor_msgs::image_encodings::MONO8);
ros::Time start_time = ros::Time::now();
cv::Mat new_cam0;
cv::Mat new_cam1;
image_handler::undistortImage(cam0_curr_img_ptr->image, new_cam0, cam0.distortion_model, cam0.intrinsics, cam0.distortion_coeffs);
image_handler::undistortImage(cam1_curr_img_ptr->image, new_cam1, cam1.distortion_model, cam1.intrinsics, cam1.distortion_coeffs);
new_cam0.copyTo(cam0_curr_img_ptr->image);
new_cam1.copyTo(cam1_curr_img_ptr->image);
//ROS_INFO("Publishing: %f",
// (ros::Time::now()-start_time).toSec());
// Build the image pyramids once since they're used at multiple places // Build the image pyramids once since they're used at multiple places
createImagePyramids();
// removed due to alternate cuda construct
//createImagePyramids();
// Detect features in the first frame. // Detect features in the first frame.
if (is_first_img) { if (is_first_img) {
start_time = ros::Time::now(); ros::Time start_time = ros::Time::now();
initializeFirstFrame(); initializeFirstFrame();
//ROS_INFO("Detection time: %f", //ROS_INFO("Detection time: %f",
// (ros::Time::now()-start_time).toSec()); // (ros::Time::now()-start_time).toSec());
@ -257,7 +249,7 @@ void ImageProcessor::stereoCallback(
// (ros::Time::now()-start_time).toSec()); // (ros::Time::now()-start_time).toSec());
} else { } else {
// Track the feature in the previous image. // Track the feature in the previous image.
start_time = ros::Time::now(); ros::Time start_time = ros::Time::now();
trackFeatures(); trackFeatures();
//ROS_INFO("Tracking time: %f", //ROS_INFO("Tracking time: %f",
// (ros::Time::now()-start_time).toSec()); // (ros::Time::now()-start_time).toSec());
@ -265,7 +257,6 @@ void ImageProcessor::stereoCallback(
// Add new features into the current image. // Add new features into the current image.
start_time = ros::Time::now(); start_time = ros::Time::now();
addNewFeatures(); addNewFeatures();
//ROS_INFO("Addition time: %f", //ROS_INFO("Addition time: %f",
// (ros::Time::now()-start_time).toSec()); // (ros::Time::now()-start_time).toSec());
@ -288,18 +279,16 @@ void ImageProcessor::stereoCallback(
// (ros::Time::now()-start_time).toSec()); // (ros::Time::now()-start_time).toSec());
// Publish features in the current image. // Publish features in the current image.
start_time = ros::Time::now(); ros::Time start_time = ros::Time::now();
publish(); publish();
//ROS_INFO("Publishing: %f", //ROS_INFO("Publishing: %f",
// (ros::Time::now()-start_time).toSec()); // (ros::Time::now()-start_time).toSec());
// Update the previous image and previous features. // Update the previous image and previous features.
cam0_prev_img_ptr = cam0_curr_img_ptr; cam0_prev_img_ptr = cam0_curr_img_ptr;
prev_features_ptr = curr_features_ptr; prev_features_ptr = curr_features_ptr;
std::swap(prev_cam0_pyramid_, curr_cam0_pyramid_); std::swap(prev_cam0_pyramid_, curr_cam0_pyramid_);
// Initialize the current features to empty vectors. // Initialize the current features to empty vectors.
curr_features_ptr.reset(new GridFeatures()); curr_features_ptr.reset(new GridFeatures());
for (int code = 0; code < for (int code = 0; code <
@ -307,10 +296,6 @@ void ImageProcessor::stereoCallback(
(*curr_features_ptr)[code] = vector<FeatureMetaData>(0); (*curr_features_ptr)[code] = vector<FeatureMetaData>(0);
} }
// stop playing bagfile if printing images
//if(STREAMPAUSE)
// nh.setParam("/play_bag_image", true);
return; return;
} }
@ -324,6 +309,7 @@ void ImageProcessor::imuCallback(
void ImageProcessor::createImagePyramids() { void ImageProcessor::createImagePyramids() {
const Mat& curr_cam0_img = cam0_curr_img_ptr->image; const Mat& curr_cam0_img = cam0_curr_img_ptr->image;
// TODO: build cuda optical flow
buildOpticalFlowPyramid( buildOpticalFlowPyramid(
curr_cam0_img, curr_cam0_pyramid_, curr_cam0_img, curr_cam0_pyramid_,
Size(processor_config.patch_size, processor_config.patch_size), Size(processor_config.patch_size, processor_config.patch_size),
@ -331,6 +317,7 @@ void ImageProcessor::createImagePyramids() {
BORDER_CONSTANT, false); BORDER_CONSTANT, false);
const Mat& curr_cam1_img = cam1_curr_img_ptr->image; const Mat& curr_cam1_img = cam1_curr_img_ptr->image;
// TODO: build cuda optical flow
buildOpticalFlowPyramid( buildOpticalFlowPyramid(
curr_cam1_img, curr_cam1_pyramid_, curr_cam1_img, curr_cam1_pyramid_,
Size(processor_config.patch_size, processor_config.patch_size), Size(processor_config.patch_size, processor_config.patch_size),
@ -417,6 +404,7 @@ void ImageProcessor::predictFeatureTracking(
const cv::Matx33f& R_p_c, const cv::Matx33f& R_p_c,
const cv::Vec4d& intrinsics, const cv::Vec4d& intrinsics,
vector<cv::Point2f>& compensated_pts) { vector<cv::Point2f>& compensated_pts) {
// Return directly if there are no input features. // Return directly if there are no input features.
if (input_pts.size() == 0) { if (input_pts.size() == 0) {
compensated_pts.clear(); compensated_pts.clear();
@ -447,6 +435,7 @@ void ImageProcessor::trackFeatures() {
cam0_curr_img_ptr->image.rows / processor_config.grid_row; cam0_curr_img_ptr->image.rows / processor_config.grid_row;
static int grid_width = static int grid_width =
cam0_curr_img_ptr->image.cols / processor_config.grid_col; cam0_curr_img_ptr->image.cols / processor_config.grid_col;
// Compute a rough relative rotation which takes a vector // Compute a rough relative rotation which takes a vector
// from the previous frame to the current frame. // from the previous frame to the current frame.
Matx33f cam0_R_p_c; Matx33f cam0_R_p_c;
@ -480,8 +469,10 @@ void ImageProcessor::trackFeatures() {
vector<unsigned char> track_inliers(0); vector<unsigned char> track_inliers(0);
predictFeatureTracking(prev_cam0_points, predictFeatureTracking(prev_cam0_points,
cam0_R_p_c, cam0.intrinsics, curr_cam0_points); cam0_R_p_c, cam0_intrinsics, curr_cam0_points);
//TODO: test change to sparse
/*
calcOpticalFlowPyrLK( calcOpticalFlowPyrLK(
prev_cam0_pyramid_, curr_cam0_pyramid_, prev_cam0_pyramid_, curr_cam0_pyramid_,
prev_cam0_points, curr_cam0_points, prev_cam0_points, curr_cam0_points,
@ -492,6 +483,25 @@ void ImageProcessor::trackFeatures() {
processor_config.max_iteration, processor_config.max_iteration,
processor_config.track_precision), processor_config.track_precision),
cv::OPTFLOW_USE_INITIAL_FLOW); cv::OPTFLOW_USE_INITIAL_FLOW);
*/
cam0_curr_img = cv::cuda::GpuMat(cam0_curr_img_ptr->image);
cam1_curr_img = cv::cuda::GpuMat(cam1_curr_img_ptr->image);
cam0_points_gpu = cv::cuda::GpuMat(prev_cam0_points);
cam1_points_gpu = cv::cuda::GpuMat(curr_cam0_points);
cv::cuda::GpuMat inlier_markers_gpu;
d_pyrLK_sparse->calc(cam0_curr_img,
cam1_curr_img,
cam0_points_gpu,
cam1_points_gpu,
inlier_markers_gpu,
noArray());
utils::download(cam1_points_gpu, curr_cam0_points);
utils::download(inlier_markers_gpu, track_inliers);
// Mark those tracked points out of the image region // Mark those tracked points out of the image region
// as untracked. // as untracked.
@ -575,14 +585,14 @@ void ImageProcessor::trackFeatures() {
// Step 2 and 3: RANSAC on temporal image pairs of cam0 and cam1. // Step 2 and 3: RANSAC on temporal image pairs of cam0 and cam1.
vector<int> cam0_ransac_inliers(0); vector<int> cam0_ransac_inliers(0);
twoPointRansac(prev_matched_cam0_points, curr_matched_cam0_points, twoPointRansac(prev_matched_cam0_points, curr_matched_cam0_points,
cam0_R_p_c, cam0.intrinsics, cam0.distortion_model, cam0_R_p_c, cam0_intrinsics, cam0_distortion_model,
cam0.distortion_coeffs, processor_config.ransac_threshold, cam0_distortion_coeffs, processor_config.ransac_threshold,
0.99, cam0_ransac_inliers); 0.99, cam0_ransac_inliers);
vector<int> cam1_ransac_inliers(0); vector<int> cam1_ransac_inliers(0);
twoPointRansac(prev_matched_cam1_points, curr_matched_cam1_points, twoPointRansac(prev_matched_cam1_points, curr_matched_cam1_points,
cam1_R_p_c, cam1.intrinsics, cam1.distortion_model, cam1_R_p_c, cam1_intrinsics, cam1_distortion_model,
cam1.distortion_coeffs, processor_config.ransac_threshold, cam1_distortion_coeffs, processor_config.ransac_threshold,
0.99, cam1_ransac_inliers); 0.99, cam1_ransac_inliers);
// Number of features after ransac. // Number of features after ransac.
@ -607,7 +617,6 @@ void ImageProcessor::trackFeatures() {
++after_ransac; ++after_ransac;
} }
// Compute the tracking rate. // Compute the tracking rate.
int prev_feature_num = 0; int prev_feature_num = 0;
for (const auto& item : *prev_features_ptr) for (const auto& item : *prev_features_ptr)
@ -637,6 +646,7 @@ void ImageProcessor::stereoMatch(
const vector<cv::Point2f>& cam0_points, const vector<cv::Point2f>& cam0_points,
vector<cv::Point2f>& cam1_points, vector<cv::Point2f>& cam1_points,
vector<unsigned char>& inlier_markers) { vector<unsigned char>& inlier_markers) {
if (cam0_points.size() == 0) return; if (cam0_points.size() == 0) return;
if(cam1_points.size() == 0) { if(cam1_points.size() == 0) {
@ -644,20 +654,31 @@ void ImageProcessor::stereoMatch(
// rotation from stereo extrinsics // rotation from stereo extrinsics
const cv::Matx33d R_cam0_cam1 = R_cam1_imu.t() * R_cam0_imu; const cv::Matx33d R_cam0_cam1 = R_cam1_imu.t() * R_cam0_imu;
vector<cv::Point2f> cam0_points_undistorted; vector<cv::Point2f> cam0_points_undistorted;
image_handler::undistortPoints(cam0_points, cam0.intrinsics, cam0.distortion_model, undistortPoints(cam0_points, cam0_intrinsics, cam0_distortion_model,
cam0.distortion_coeffs, cam0_points_undistorted, cam0_distortion_coeffs, cam0_points_undistorted,
R_cam0_cam1); R_cam0_cam1);
<<<<<<< HEAD
cam1_points = distortPoints(cam0_points_undistorted, cam1_intrinsics, cam1_points = distortPoints(cam0_points_undistorted, cam1_intrinsics,
cam1_distortion_model, cam1_distortion_coeffs); cam1_distortion_model, cam1_distortion_coeffs);
=======
cam1_points = image_handler::distortPoints(cam0_points_undistorted, cam1.intrinsics,
cam1.distortion_model, cam1.distortion_coeffs);
>>>>>>> photometry-jakobi
} }
cam0_curr_img = cv::cuda::GpuMat(cam0_curr_img_ptr->image);
cam1_curr_img = cv::cuda::GpuMat(cam1_curr_img_ptr->image);
cam0_points_gpu = cv::cuda::GpuMat(cam0_points);
cam1_points_gpu = cv::cuda::GpuMat(cam1_points);
cv::cuda::GpuMat inlier_markers_gpu;
d_pyrLK_sparse->calc(cam0_curr_img,
cam1_curr_img,
cam0_points_gpu,
cam1_points_gpu,
inlier_markers_gpu,
noArray());
utils::download(cam1_points_gpu, cam1_points);
utils::download(inlier_markers_gpu, inlier_markers);
// Track features using LK optical flow method. // Track features using LK optical flow method.
/*
calcOpticalFlowPyrLK(curr_cam0_pyramid_, curr_cam1_pyramid_, calcOpticalFlowPyrLK(curr_cam0_pyramid_, curr_cam1_pyramid_,
cam0_points, cam1_points, cam0_points, cam1_points,
inlier_markers, noArray(), inlier_markers, noArray(),
@ -667,7 +688,7 @@ void ImageProcessor::stereoMatch(
processor_config.max_iteration, processor_config.max_iteration,
processor_config.track_precision), processor_config.track_precision),
cv::OPTFLOW_USE_INITIAL_FLOW); cv::OPTFLOW_USE_INITIAL_FLOW);
*/
// Mark those tracked points out of the image region // Mark those tracked points out of the image region
// as untracked. // as untracked.
for (int i = 0; i < cam1_points.size(); ++i) { for (int i = 0; i < cam1_points.size(); ++i) {
@ -692,21 +713,18 @@ void ImageProcessor::stereoMatch(
// Further remove outliers based on the known // Further remove outliers based on the known
// essential matrix. // essential matrix.
vector<cv::Point2f> cam0_points_undistorted(0); vector<cv::Point2f> cam0_points_undistorted(0);
vector<cv::Point2f> cam1_points_undistorted(0); vector<cv::Point2f> cam1_points_undistorted(0);
image_handler::undistortPoints( undistortPoints(
cam0_points, cam0.intrinsics, cam0.distortion_model, cam0_points, cam0_intrinsics, cam0_distortion_model,
cam0.distortion_coeffs, cam0_points_undistorted); cam0_distortion_coeffs, cam0_points_undistorted);
image_handler::undistortPoints( undistortPoints(
cam1_points, cam1.intrinsics, cam1.distortion_model, cam1_points, cam1_intrinsics, cam1_distortion_model,
cam1.distortion_coeffs, cam1_points_undistorted); cam1_distortion_coeffs, cam1_points_undistorted);
double norm_pixel_unit = 4.0 / ( double norm_pixel_unit = 4.0 / (
cam0.intrinsics[0]+cam0.intrinsics[1]+ cam0_intrinsics[0]+cam0_intrinsics[1]+
cam1.intrinsics[0]+cam1.intrinsics[1]); cam1_intrinsics[0]+cam1_intrinsics[1]);
for (int i = 0; i < cam0_points_undistorted.size(); ++i) { for (int i = 0; i < cam0_points_undistorted.size(); ++i) {
if (inlier_markers[i] == 0) continue; if (inlier_markers[i] == 0) continue;
@ -733,8 +751,8 @@ void ImageProcessor::addNewFeatures() {
cam0_curr_img_ptr->image.rows / processor_config.grid_row; cam0_curr_img_ptr->image.rows / processor_config.grid_row;
static int grid_width = static int grid_width =
cam0_curr_img_ptr->image.cols / processor_config.grid_col; cam0_curr_img_ptr->image.cols / processor_config.grid_col;
// Create a mask to avoid redetecting existing features.
// Create a mask to avoid redetecting existing features.
Mat mask(curr_img.rows, curr_img.cols, CV_8U, Scalar(1)); Mat mask(curr_img.rows, curr_img.cols, CV_8U, Scalar(1));
for (const auto& features : *curr_features_ptr) { for (const auto& features : *curr_features_ptr) {
@ -754,6 +772,7 @@ void ImageProcessor::addNewFeatures() {
mask(row_range, col_range) = 0; mask(row_range, col_range) = 0;
} }
} }
// Detect new features. // Detect new features.
vector<KeyPoint> new_features(0); vector<KeyPoint> new_features(0);
detector_ptr->detect(curr_img, new_features, mask); detector_ptr->detect(curr_img, new_features, mask);
@ -768,6 +787,7 @@ void ImageProcessor::addNewFeatures() {
new_feature_sieve[ new_feature_sieve[
row*processor_config.grid_col+col].push_back(feature); row*processor_config.grid_col+col].push_back(feature);
} }
new_features.clear(); new_features.clear();
for (auto& item : new_feature_sieve) { for (auto& item : new_feature_sieve) {
if (item.size() > processor_config.grid_max_feature_num) { if (item.size() > processor_config.grid_max_feature_num) {
@ -780,6 +800,7 @@ void ImageProcessor::addNewFeatures() {
} }
int detected_new_features = new_features.size(); int detected_new_features = new_features.size();
// Find the stereo matched points for the newly // Find the stereo matched points for the newly
// detected features. // detected features.
vector<cv::Point2f> cam0_points(new_features.size()); vector<cv::Point2f> cam0_points(new_features.size());
@ -807,6 +828,7 @@ void ImageProcessor::addNewFeatures() {
static_cast<double>(detected_new_features) < 0.1) static_cast<double>(detected_new_features) < 0.1)
ROS_WARN("Images at [%f] seems unsynced...", ROS_WARN("Images at [%f] seems unsynced...",
cam0_curr_img_ptr->header.stamp.toSec()); cam0_curr_img_ptr->header.stamp.toSec());
// Group the features into grids // Group the features into grids
GridFeatures grid_new_features; GridFeatures grid_new_features;
for (int code = 0; code < for (int code = 0; code <
@ -828,6 +850,7 @@ void ImageProcessor::addNewFeatures() {
new_feature.cam1_point = cam1_point; new_feature.cam1_point = cam1_point;
grid_new_features[code].push_back(new_feature); grid_new_features[code].push_back(new_feature);
} }
// Sort the new features in each grid based on its response. // Sort the new features in each grid based on its response.
for (auto& item : grid_new_features) for (auto& item : grid_new_features)
std::sort(item.second.begin(), item.second.end(), std::sort(item.second.begin(), item.second.end(),
@ -877,6 +900,73 @@ void ImageProcessor::pruneGridFeatures() {
return; return;
} }
void ImageProcessor::undistortPoints(
const vector<cv::Point2f>& pts_in,
const cv::Vec4d& intrinsics,
const string& distortion_model,
const cv::Vec4d& distortion_coeffs,
vector<cv::Point2f>& pts_out,
const cv::Matx33d &rectification_matrix,
const cv::Vec4d &new_intrinsics) {
if (pts_in.size() == 0) return;
const cv::Matx33d K(
intrinsics[0], 0.0, intrinsics[2],
0.0, intrinsics[1], intrinsics[3],
0.0, 0.0, 1.0);
const cv::Matx33d K_new(
new_intrinsics[0], 0.0, new_intrinsics[2],
0.0, new_intrinsics[1], new_intrinsics[3],
0.0, 0.0, 1.0);
if (distortion_model == "radtan") {
cv::undistortPoints(pts_in, pts_out, K, distortion_coeffs,
rectification_matrix, K_new);
} else if (distortion_model == "equidistant") {
cv::fisheye::undistortPoints(pts_in, pts_out, K, distortion_coeffs,
rectification_matrix, K_new);
} else {
ROS_WARN_ONCE("The model %s is unrecognized, use radtan instead...",
distortion_model.c_str());
cv::undistortPoints(pts_in, pts_out, K, distortion_coeffs,
rectification_matrix, K_new);
}
return;
}
vector<cv::Point2f> ImageProcessor::distortPoints(
const vector<cv::Point2f>& pts_in,
const cv::Vec4d& intrinsics,
const string& distortion_model,
const cv::Vec4d& distortion_coeffs) {
const cv::Matx33d K(intrinsics[0], 0.0, intrinsics[2],
0.0, intrinsics[1], intrinsics[3],
0.0, 0.0, 1.0);
vector<cv::Point2f> pts_out;
if (distortion_model == "radtan") {
vector<cv::Point3f> homogenous_pts;
cv::convertPointsToHomogeneous(pts_in, homogenous_pts);
cv::projectPoints(homogenous_pts, cv::Vec3d::zeros(), cv::Vec3d::zeros(), K,
distortion_coeffs, pts_out);
} else if (distortion_model == "equidistant") {
cv::fisheye::distortPoints(pts_in, pts_out, K, distortion_coeffs);
} else {
ROS_WARN_ONCE("The model %s is unrecognized, using radtan instead...",
distortion_model.c_str());
vector<cv::Point3f> homogenous_pts;
cv::convertPointsToHomogeneous(pts_in, homogenous_pts);
cv::projectPoints(homogenous_pts, cv::Vec3d::zeros(), cv::Vec3d::zeros(), K,
distortion_coeffs, pts_out);
}
return pts_out;
}
void ImageProcessor::integrateImuData( void ImageProcessor::integrateImuData(
Matx33f& cam0_R_p_c, Matx33f& cam1_R_p_c) { Matx33f& cam0_R_p_c, Matx33f& cam1_R_p_c) {
// Find the start and the end limit within the imu msg buffer. // Find the start and the end limit within the imu msg buffer.
@ -928,6 +1018,7 @@ void ImageProcessor::integrateImuData(
void ImageProcessor::rescalePoints( void ImageProcessor::rescalePoints(
vector<Point2f>& pts1, vector<Point2f>& pts2, vector<Point2f>& pts1, vector<Point2f>& pts2,
float& scaling_factor) { float& scaling_factor) {
scaling_factor = 0.0f; scaling_factor = 0.0f;
for (int i = 0; i < pts1.size(); ++i) { for (int i = 0; i < pts1.size(); ++i) {
@ -957,7 +1048,7 @@ void ImageProcessor::twoPointRansac(
// Check the size of input point size. // Check the size of input point size.
if (pts1.size() != pts2.size()) if (pts1.size() != pts2.size())
ROS_ERROR("Sets of different size (%lu and %lu) are used...", ROS_ERROR("Sets of different size (%i and %i) are used...",
pts1.size(), pts2.size()); pts1.size(), pts2.size());
double norm_pixel_unit = 2.0 / (intrinsics[0]+intrinsics[1]); double norm_pixel_unit = 2.0 / (intrinsics[0]+intrinsics[1]);
@ -971,10 +1062,10 @@ void ImageProcessor::twoPointRansac(
// Undistort all the points. // Undistort all the points.
vector<Point2f> pts1_undistorted(pts1.size()); vector<Point2f> pts1_undistorted(pts1.size());
vector<Point2f> pts2_undistorted(pts2.size()); vector<Point2f> pts2_undistorted(pts2.size());
image_handler::undistortPoints( undistortPoints(
pts1, intrinsics, distortion_model, pts1, intrinsics, distortion_model,
distortion_coeffs, pts1_undistorted); distortion_coeffs, pts1_undistorted);
image_handler::undistortPoints( undistortPoints(
pts2, intrinsics, distortion_model, pts2, intrinsics, distortion_model,
distortion_coeffs, pts2_undistorted); distortion_coeffs, pts2_undistorted);
@ -1192,6 +1283,7 @@ void ImageProcessor::twoPointRansac(
} }
void ImageProcessor::publish() { void ImageProcessor::publish() {
// Publish features. // Publish features.
CameraMeasurementPtr feature_msg_ptr(new CameraMeasurement); CameraMeasurementPtr feature_msg_ptr(new CameraMeasurement);
feature_msg_ptr->header.stamp = cam0_curr_img_ptr->header.stamp; feature_msg_ptr->header.stamp = cam0_curr_img_ptr->header.stamp;
@ -1211,12 +1303,12 @@ void ImageProcessor::publish() {
vector<Point2f> curr_cam0_points_undistorted(0); vector<Point2f> curr_cam0_points_undistorted(0);
vector<Point2f> curr_cam1_points_undistorted(0); vector<Point2f> curr_cam1_points_undistorted(0);
image_handler::undistortPoints( undistortPoints(
curr_cam0_points, cam0.intrinsics, cam0.distortion_model, curr_cam0_points, cam0_intrinsics, cam0_distortion_model,
cam0.distortion_coeffs, curr_cam0_points_undistorted); cam0_distortion_coeffs, curr_cam0_points_undistorted);
image_handler::undistortPoints( undistortPoints(
curr_cam1_points, cam1.intrinsics, cam1.distortion_model, curr_cam1_points, cam1_intrinsics, cam1_distortion_model,
cam1.distortion_coeffs, curr_cam1_points_undistorted); cam1_distortion_coeffs, curr_cam1_points_undistorted);
for (int i = 0; i < curr_ids.size(); ++i) { for (int i = 0; i < curr_ids.size(); ++i) {
feature_msg_ptr->features.push_back(FeatureMeasurement()); feature_msg_ptr->features.push_back(FeatureMeasurement());

File diff suppressed because it is too large Load Diff

View File

@ -1,75 +0,0 @@
#!/usr/bin/env python
from __future__ import print_function
import sys
import rospy
import cv2
from std_msgs.msg import String
from sensor_msgs.msg import Image
from cv_bridge import CvBridge, CvBridgeError
class image_converter:
def __init__(self):
self.image0_pub = rospy.Publisher("/cam0/new_image_raw",Image, queue_size=10)
self.image1_pub = rospy.Publisher("/cam1/new_image_raw",Image, queue_size=10)
self.bridge = CvBridge()
self.image0_sub = rospy.Subscriber("/cam0/image_raw",Image,self.callback_cam0)
self.image1_sub = rospy.Subscriber("/cam1/image_raw",Image,self.callback_cam1)
def callback_cam0(self,data):
try:
cv_image = self.bridge.imgmsg_to_cv2(data, "bgr8")
except CvBridgeError as e:
print(e)
imgScale = 0.25
newX,newY = cv_image.shape[1]*imgScale, cv_image.shape[0]*imgScale
newimg = cv2.resize(cv_image,(int(newX),int(newY)))
newpub = self.bridge.cv2_to_imgmsg(newimg, "bgr8")
newdata = data
newdata.height = newpub.height
newdata.width = newpub.width
newdata.step = newpub.step
newdata.data = newpub.data
try:
self.image0_pub.publish(newdata)
except CvBridgeError as e:
print(e)
def callback_cam1(self,data):
try:
cv_image = self.bridge.imgmsg_to_cv2(data, "bgr8")
except CvBridgeError as e:
print(e)
imgScale = 0.25
newX,newY = cv_image.shape[1]*imgScale, cv_image.shape[0]*imgScale
newimg = cv2.resize(cv_image,(int(newX),int(newY)))
newpub = self.bridge.cv2_to_imgmsg(newimg, "bgr8")
newdata = data
newdata.height = newpub.height
newdata.width = newpub.width
newdata.step = newpub.step
newdata.data = newpub.data
try:
self.image1_pub.publish(newdata)
except CvBridgeError as e:
print(e)
def main(args):
ic = image_converter()
rospy.init_node('image_converter', anonymous=True)
try:
rospy.spin()
except KeyboardInterrupt:
print("Shutting down")
cv2.destroyAllWindows()
if __name__ == '__main__':
main(sys.argv)

View File

@ -11,6 +11,20 @@
namespace msckf_vio { namespace msckf_vio {
namespace utils { namespace utils {
void download(const cv::cuda::GpuMat& d_mat, std::vector<cv::Point2f>& vec)
{
vec.resize(d_mat.cols);
cv::Mat mat(1, d_mat.cols, CV_32FC2, (void*)&vec[0]);
d_mat.download(mat);
}
void download(const cv::cuda::GpuMat& d_mat, std::vector<uchar>& vec)
{
vec.resize(d_mat.cols);
cv::Mat mat(1, d_mat.cols, CV_8UC1, (void*)&vec[0]);
d_mat.download(mat);
}
Eigen::Isometry3d getTransformEigen(const ros::NodeHandle &nh, Eigen::Isometry3d getTransformEigen(const ros::NodeHandle &nh,
const std::string &field) { const std::string &field) {
Eigen::Isometry3d T; Eigen::Isometry3d T;