diff --git a/.vscode/c_cpp_properties.json b/.vscode/c_cpp_properties.json new file mode 100644 index 0000000..3d22904 --- /dev/null +++ b/.vscode/c_cpp_properties.json @@ -0,0 +1,16 @@ +{ + "configurations": [ + { + "name": "Linux", + "includePath": [ + "${workspaceFolder}/**" + ], + "defines": [], + "compilerPath": "/usr/bin/gcc", + "cStandard": "c11", + "cppStandard": "c++14", + "intelliSenseMode": "clang-x64" + } + ], + "version": 4 +} \ No newline at end of file diff --git a/.vscode/ipch/778a17e566a4909e/mmap_address.bin b/.vscode/ipch/778a17e566a4909e/mmap_address.bin new file mode 100644 index 0000000..42269a4 Binary files /dev/null and b/.vscode/ipch/778a17e566a4909e/mmap_address.bin differ diff --git a/.vscode/ipch/ccf983af1f87ec2b/mmap_address.bin b/.vscode/ipch/ccf983af1f87ec2b/mmap_address.bin new file mode 100644 index 0000000..bea14df Binary files /dev/null and b/.vscode/ipch/ccf983af1f87ec2b/mmap_address.bin differ diff --git a/.vscode/ipch/e40aedd19a224f8d/mmap_address.bin b/.vscode/ipch/e40aedd19a224f8d/mmap_address.bin new file mode 100644 index 0000000..3e2f12a Binary files /dev/null and b/.vscode/ipch/e40aedd19a224f8d/mmap_address.bin differ diff --git a/.vscode/settings.json b/.vscode/settings.json new file mode 100644 index 0000000..2086c47 --- /dev/null +++ b/.vscode/settings.json @@ -0,0 +1,6 @@ +{ + "files.associations": { + "core": "cpp", + "sparsecore": "cpp" + } +} \ No newline at end of file diff --git a/CMakeLists.txt b/CMakeLists.txt index d81aed6..99992f6 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -24,6 +24,7 @@ find_package(catkin REQUIRED COMPONENTS pcl_conversions pcl_ros std_srvs + visualization_msgs ) ## System dependencies are found with CMake's conventions @@ -79,6 +80,7 @@ include_directories( add_library(msckf_vio src/msckf_vio.cpp src/utils.cpp + src/image_handler.cpp ) add_dependencies(msckf_vio ${${PROJECT_NAME}_EXPORTED_TARGETS} @@ -87,6 +89,7 @@ add_dependencies(msckf_vio target_link_libraries(msckf_vio ${catkin_LIBRARIES} ${SUITESPARSE_LIBRARIES} + ${OpenCV_LIBRARIES} ) # Msckf Vio nodelet @@ -106,6 +109,7 @@ target_link_libraries(msckf_vio_nodelet add_library(image_processor src/image_processor.cpp src/utils.cpp + src/image_handler.cpp ) add_dependencies(image_processor ${${PROJECT_NAME}_EXPORTED_TARGETS} diff --git a/GPATH b/GPATH new file mode 100644 index 0000000..de48055 Binary files /dev/null and b/GPATH differ diff --git a/GRTAGS b/GRTAGS new file mode 100644 index 0000000..6894142 Binary files /dev/null and b/GRTAGS differ diff --git a/GSYMS b/GSYMS new file mode 100644 index 0000000..65789a0 Binary files /dev/null and b/GSYMS differ diff --git a/GTAGS b/GTAGS new file mode 100644 index 0000000..d31c9ce Binary files /dev/null and b/GTAGS differ diff --git a/config/camchain-imucam-euroc.yaml b/config/camchain-imucam-euroc.yaml index de4c321..ad2572c 100644 --- a/config/camchain-imucam-euroc.yaml +++ b/config/camchain-imucam-euroc.yaml @@ -9,7 +9,7 @@ cam0: 0, 0, 0, 1.000000000000000] camera_model: pinhole distortion_coeffs: [-0.28340811, 0.07395907, 0.00019359, 1.76187114e-05] - distortion_model: radtan + distortion_model: pre-radtan intrinsics: [458.654, 457.296, 367.215, 248.375] resolution: [752, 480] timeshift_cam_imu: 0.0 @@ -26,7 +26,7 @@ cam1: 0, 0, 0, 1.000000000000000] camera_model: pinhole distortion_coeffs: [-0.28368365, 0.07451284, -0.00010473, -3.55590700e-05] - distortion_model: radtan + distortion_model: pre-radtan intrinsics: [457.587, 456.134, 379.999, 255.238] resolution: [752, 480] timeshift_cam_imu: 0.0 diff --git a/config/camchain-imucam-tum-scaled.yaml b/config/camchain-imucam-tum-scaled.yaml new file mode 100644 index 0000000..f5ea7e8 --- /dev/null +++ b/config/camchain-imucam-tum-scaled.yaml @@ -0,0 +1,36 @@ +cam0: + T_cam_imu: + [-0.9995378259923383, 0.02917807204183088, -0.008530798463872679, 0.047094247958417004, + 0.007526588843243184, -0.03435493139706542, -0.9993813532126198, -0.04788273017221637, + -0.029453096117288798, -0.9989836729399656, 0.034119442089241274, -0.0697294754693238, + 0.0, 0.0, 0.0, 1.0] + camera_model: pinhole + distortion_coeffs: [0.0034823894022493434, 0.0007150348452162257, -0.0020532361418706202, + 0.00020293673591811182] + distortion_model: pre-equidistant + intrinsics: [190.97847715128717, 190.9733070521226, 254.93170605935475, 256.8974428996504] + resolution: [512, 512] + rostopic: /cam0/image_raw +cam1: + T_cam_imu: + [-0.9995240747493029, 0.02986739485347808, -0.007717688852024281, -0.05374086123613335, + 0.008095979457928231, 0.01256553460985914, -0.9998882749870535, -0.04648588412432889, + -0.02976708103202316, -0.9994748851595197, -0.0128013601698453, -0.07333210787623645, + 0.0, 0.0, 0.0, 1.0] + T_cn_cnm1: + [0.9999994317488622, -0.0008361847221513937, -0.0006612844045898121, -0.10092123225528335, + 0.0008042457277382264, 0.9988989443471681, -0.04690684567228134, -0.001964540595211977, + 0.0006997790813734836, 0.04690628718225568, 0.9988990492196964, -0.0014663556043866572, + 0.0, 0.0, 0.0, 1.0] + camera_model: pinhole + distortion_coeffs: [0.0034003170790442797, 0.001766278153469831, -0.00266312569781606, + 0.0003299517423931039] + distortion_model: pre-equidistant + intrinsics: [190.44236969414825, 190.4344384721956, 252.59949716835982, 254.91723064636983] + resolution: [512, 512] + rostopic: /cam1/image_raw +T_imu_body: + [1.0000, 0.0000, 0.0000, 0.0000, + 0.0000, 1.0000, 0.0000, 0.0000, + 0.0000, 0.0000, 1.0000, 0.0000, + 0.0000, 0.0000, 0.0000, 1.0000] diff --git a/config/camchain-imucam-tum.yaml b/config/camchain-imucam-tum.yaml new file mode 100644 index 0000000..7edbd87 --- /dev/null +++ b/config/camchain-imucam-tum.yaml @@ -0,0 +1,36 @@ +cam0: + T_cam_imu: + [-0.9995378259923383, 0.02917807204183088, -0.008530798463872679, 0.047094247958417004, + 0.007526588843243184, -0.03435493139706542, -0.9993813532126198, -0.04788273017221637, + -0.029453096117288798, -0.9989836729399656, 0.034119442089241274, -0.0697294754693238, + 0.0, 0.0, 0.0, 1.0] + camera_model: pinhole + distortion_coeffs: [0.010171079892421483, -0.010816440029919381, 0.005942781769412756, + -0.001662284667857643] + distortion_model: equidistant + intrinsics: [380.81042871360756, 380.81194179427075, 510.29465304840727, 514.3304630538506] + resolution: [1024, 1024] + rostopic: /cam0/image_raw +cam1: + T_cam_imu: + [-0.9995240747493029, 0.02986739485347808, -0.007717688852024281, -0.05374086123613335, + 0.008095979457928231, 0.01256553460985914, -0.9998882749870535, -0.04648588412432889, + -0.02976708103202316, -0.9994748851595197, -0.0128013601698453, -0.07333210787623645, + 0.0, 0.0, 0.0, 1.0] + T_cn_cnm1: + [0.9999994317488622, -0.0008361847221513937, -0.0006612844045898121, -0.10092123225528335, + 0.0008042457277382264, 0.9988989443471681, -0.04690684567228134, -0.001964540595211977, + 0.0006997790813734836, 0.04690628718225568, 0.9988990492196964, -0.0014663556043866572, + 0.0, 0.0, 0.0, 1.0] + camera_model: pinhole + distortion_coeffs: [0.01371679169245271, -0.015567360615942622, 0.00905043103315326, + -0.002347858896562788] + distortion_model: equidistant + intrinsics: [379.2869884263036, 379.26583742214524, 505.5666703237407, 510.2840961765407] + resolution: [1024, 1024] + rostopic: /cam1/image_raw +T_imu_body: + [1.0000, 0.0000, 0.0000, 0.0000, + 0.0000, 1.0000, 0.0000, 0.0000, + 0.0000, 0.0000, 1.0000, 0.0000, + 0.0000, 0.0000, 0.0000, 1.0000] \ No newline at end of file diff --git a/include/msckf_vio/cam_state.h b/include/msckf_vio/cam_state.h index 8d528a1..9eda6f5 100644 --- a/include/msckf_vio/cam_state.h +++ b/include/msckf_vio/cam_state.h @@ -15,6 +15,37 @@ #include "imu_state.h" namespace msckf_vio { + +struct Frame{ + cv::Mat image; + cv::Mat dximage; + cv::Mat dyimage; + double exposureTime_ms; +}; + +typedef std::map, + Eigen::aligned_allocator< + std::pair > > movingWindow; + +struct IlluminationParameter{ + double frame_bias; + double frame_gain; + double feature_bias; + double feature_gain; +}; + +struct CameraCalibration{ + std::string distortion_model; + cv::Vec2i resolution; + cv::Vec4d intrinsics; + cv::Vec4d distortion_coeffs; + movingWindow moving_window; + cv::Mat featureVisu; + int id; +}; + + + /* * @brief CAMState Stored camera state in order to * form measurement model. @@ -35,6 +66,9 @@ struct CAMState { // Position of the camera frame in the world frame. Eigen::Vector3d position; + // Illumination Information of the frame + IlluminationParameter illumination; + // These two variables should have the same physical // interpretation with `orientation` and `position`. // There two variables are used to modify the measurement diff --git a/include/msckf_vio/feature.hpp b/include/msckf_vio/feature.hpp index f9f08c5..0014809 100644 --- a/include/msckf_vio/feature.hpp +++ b/include/msckf_vio/feature.hpp @@ -15,11 +15,18 @@ #include #include #include +#include +#include +#include +#include + +#include "image_handler.h" #include "math_utils.hpp" #include "imu_state.h" #include "cam_state.h" + namespace msckf_vio { /* @@ -57,11 +64,16 @@ struct Feature { // Constructors for the struct. Feature(): id(0), position(Eigen::Vector3d::Zero()), - is_initialized(false) {} + is_initialized(false), is_anchored(false) {} Feature(const FeatureIDType& new_id): id(new_id), position(Eigen::Vector3d::Zero()), - is_initialized(false) {} + is_initialized(false), is_anchored(false) {} + + +void Rhocost(const Eigen::Isometry3d& T_c0_ci, + const double x, const Eigen::Vector2d& z1, const Eigen::Vector2d& z2, + double& e) const; /* * @brief cost Compute the cost of the camera observations @@ -75,6 +87,13 @@ struct Feature { const Eigen::Vector3d& x, const Eigen::Vector2d& z, double& e) const; + bool initializeRho(const CamStateServer& cam_states); + + inline void RhoJacobian(const Eigen::Isometry3d& T_c0_ci, + const double x, const Eigen::Vector2d& z1, const Eigen::Vector2d& z2, + Eigen::Matrix& J, Eigen::Vector2d& r, + double& w) const; + /* * @brief jacobian Compute the Jacobian of the camera observation * @param T_c0_c1 A rigid body transformation takes @@ -90,6 +109,10 @@ struct Feature { Eigen::Matrix& J, Eigen::Vector2d& r, double& w) const; + inline double generateInitialDepth( + const Eigen::Isometry3d& T_c1_c2, const Eigen::Vector2d& z1, + const Eigen::Vector2d& z2) const; + /* * @brief generateInitialGuess Compute the initial guess of * the feature's 3d position using only two views. @@ -114,6 +137,20 @@ struct Feature { inline bool checkMotion( const CamStateServer& cam_states) const; + + /* + * @brief InitializeAnchor generates the NxN patch around the + * feature in the Anchor image + * @param cam_states: A map containing all recorded images + * currently presented in the camera state vector + * @return the irradiance of the Anchor NxN Patch + * @return True if the Anchor can be estimated + */ + + bool initializeAnchor( + const CameraCalibration& cam, int N); + + /* * @brief InitializePosition Intialize the feature position * based on all current available measurements. @@ -128,6 +165,80 @@ struct Feature { inline bool initializePosition( const CamStateServer& cam_states); + cv::Point2f pixelDistanceAt( + const CAMState& cam_state, + const StateIDType& cam_state_id, + const CameraCalibration& cam, + Eigen::Vector3d& in_p) const; + + +/* +* @brief project PositionToCamera Takes a 3d position in a world frame +* and projects it into the passed camera frame using pinhole projection +* then distorts it using camera information to get +* the resulting distorted pixel position +*/ + inline cv::Point2f projectPositionToCamera( + const CAMState& cam_state, + const StateIDType& cam_state_id, + const CameraCalibration& cam, + Eigen::Vector3d& in_p) const; + + double CompleteCvKernel( + const cv::Point2f pose, + const StateIDType& cam_state_id, + CameraCalibration& cam, + std::string type) const; + + double cvKernel( + const cv::Point2f pose, + std::string type) const; + + double Kernel( + const cv::Point2f pose, + const cv::Mat frame, + std::string type) const; + + /* + * @brief IrradianceAnchorPatch_Camera returns irradiance values + * of the Anchor Patch position in a camera frame + * + */ + + bool estimate_FrameIrradiance( + const CAMState& cam_state, + const StateIDType& cam_state_id, + CameraCalibration& cam, + std::vector& anchorPatch_estimate, + IlluminationParameter& estimatedIllumination) const; + +bool MarkerGeneration( + ros::Publisher& marker_pub, + const CamStateServer& cam_states) const; + +bool VisualizeKernel( + const CAMState& cam_state, + const StateIDType& cam_state_id, + CameraCalibration& cam0) const; + + bool VisualizePatch( + const CAMState& cam_state, + const StateIDType& cam_state_id, + CameraCalibration& cam, + const Eigen::VectorXd& photo_r, + std::stringstream& ss) const; + /* + * @brief AnchorPixelToPosition uses the calcualted pixels + * of the anchor patch to generate 3D positions of all of em + */ +inline Eigen::Vector3d AnchorPixelToPosition(cv::Point2f in_p, + const CameraCalibration& cam); + + /* + * @brief Irradiance returns irradiance value of a pixel + */ + + inline float PixelIrradiance(cv::Point2f pose, cv::Mat image) const; // An unique identifier for the feature. // In case of long time running, the variable @@ -144,13 +255,38 @@ struct Feature { Eigen::aligned_allocator< std::pair > > observations; + // NxN Patch of Anchor Image + std::vector anchorPatch; + std::vector anchorPatch_ideal; + std::vector anchorPatch_real; + + // Position of NxN Patch in 3D space in anchor camera frame + std::vector anchorPatch_3d; + + // Anchor Isometry + Eigen::Isometry3d T_anchor_w; + // 3d postion of the feature in the world frame. Eigen::Vector3d position; + // inverse depth representation + double anchor_rho; + // A indicator to show if the 3d postion of the feature // has been initialized or not. bool is_initialized; + bool is_anchored; + + cv::Mat abs_xderImage; + cv::Mat abs_yderImage; + + cv::Mat xderImage; + cv::Mat yderImage; + + cv::Mat anchorImage_blurred; + cv::Point2f anchor_center_pos; + cv::Point2f undist_anchor_center_pos; // Noise for a normalized feature measurement. static double observation_noise; @@ -164,10 +300,31 @@ typedef std::map, Eigen::aligned_allocator< std::pair > > MapServer; +void Feature::Rhocost(const Eigen::Isometry3d& T_c0_ci, + const double x, const Eigen::Vector2d& z1, const Eigen::Vector2d& z2, + double& e) const +{ + // Compute hi1, hi2, and hi3 as Equation (37). + const double& rho = x; + + Eigen::Vector3d h = T_c0_ci.linear()* + Eigen::Vector3d(z1(0), z1(1), 1.0) + rho*T_c0_ci.translation(); + double& h1 = h(0); + double& h2 = h(1); + double& h3 = h(2); + + // Predict the feature observation in ci frame. + Eigen::Vector2d z_hat(h1/h3, h2/h3); + + // Compute the residual. + e = (z_hat-z2).squaredNorm(); + return; +} void Feature::cost(const Eigen::Isometry3d& T_c0_ci, const Eigen::Vector3d& x, const Eigen::Vector2d& z, - double& e) const { + double& e) const +{ // Compute hi1, hi2, and hi3 as Equation (37). const double& alpha = x(0); const double& beta = x(1); @@ -175,9 +332,9 @@ void Feature::cost(const Eigen::Isometry3d& T_c0_ci, Eigen::Vector3d h = T_c0_ci.linear()* Eigen::Vector3d(alpha, beta, 1.0) + rho*T_c0_ci.translation(); - double& h1 = h(0); - double& h2 = h(1); - double& h3 = h(2); + double h1 = h(0); + double h2 = h(1); + double h3 = h(2); // Predict the feature observation in ci frame. Eigen::Vector2d z_hat(h1/h3, h2/h3); @@ -187,10 +344,47 @@ void Feature::cost(const Eigen::Isometry3d& T_c0_ci, return; } +void Feature::RhoJacobian(const Eigen::Isometry3d& T_c0_ci, + const double x, const Eigen::Vector2d& z1, const Eigen::Vector2d& z2, + Eigen::Matrix& J, Eigen::Vector2d& r, + double& w) const +{ + + const double& rho = x; + + Eigen::Vector3d h = T_c0_ci.linear()* + Eigen::Vector3d(z1(0), z2(1), 1.0) + rho*T_c0_ci.translation(); + double& h1 = h(0); + double& h2 = h(1); + double& h3 = h(2); + + // Compute the Jacobian. + Eigen::Matrix3d W; + W.leftCols<2>() = T_c0_ci.linear().leftCols<2>(); + W.rightCols<1>() = T_c0_ci.translation(); + + J(0,0) = -h1/(h3*h3); + J(1,0) = -h2/(h3*h3); + + // Compute the residual. + Eigen::Vector2d z_hat(h1/h3, h2/h3); + r = z_hat - z2; + + // Compute the weight based on the residual. + double e = r.norm(); + if (e <= optimization_config.huber_epsilon) + w = 1.0; + else + w = optimization_config.huber_epsilon / (2*e); + + return; +} + void Feature::jacobian(const Eigen::Isometry3d& T_c0_ci, const Eigen::Vector3d& x, const Eigen::Vector2d& z, Eigen::Matrix& J, Eigen::Vector2d& r, - double& w) const { + double& w) const +{ // Compute hi1, hi2, and hi3 as Equation (37). const double& alpha = x(0); @@ -225,9 +419,10 @@ void Feature::jacobian(const Eigen::Isometry3d& T_c0_ci, return; } -void Feature::generateInitialGuess( +double Feature::generateInitialDepth( const Eigen::Isometry3d& T_c1_c2, const Eigen::Vector2d& z1, - const Eigen::Vector2d& z2, Eigen::Vector3d& p) const { + const Eigen::Vector2d& z2) const +{ // Construct a least square problem to solve the depth. Eigen::Vector3d m = T_c1_c2.linear() * Eigen::Vector3d(z1(0), z1(1), 1.0); @@ -241,14 +436,23 @@ void Feature::generateInitialGuess( // Solve for the depth. double depth = (A.transpose() * A).inverse() * A.transpose() * b; + return depth; +} + + +void Feature::generateInitialGuess( + const Eigen::Isometry3d& T_c1_c2, const Eigen::Vector2d& z1, + const Eigen::Vector2d& z2, Eigen::Vector3d& p) const +{ + double depth = generateInitialDepth(T_c1_c2, z1, z2); p(0) = z1(0) * depth; p(1) = z1(1) * depth; p(2) = depth; return; } -bool Feature::checkMotion( - const CamStateServer& cam_states) const { +bool Feature::checkMotion(const CamStateServer& cam_states) const +{ const StateIDType& first_cam_id = observations.begin()->first; const StateIDType& last_cam_id = (--observations.end())->first; @@ -290,9 +494,792 @@ bool Feature::checkMotion( else return false; } -bool Feature::initializePosition( - const CamStateServer& cam_states) { +double Feature::CompleteCvKernel( + const cv::Point2f pose, + const StateIDType& cam_state_id, + CameraCalibration& cam, + std::string type) const +{ + + double delta = 0; + + if(type == "Sobel_x") + delta = ((double)cam.moving_window.find(cam_state_id)->second.dximage.at(pose.y, pose.x))/255.; + else if (type == "Sobel_y") + delta = ((double)cam.moving_window.find(cam_state_id)->second.dyimage.at(pose.y, pose.x))/255.; + + return delta; +} + +double Feature::cvKernel( + const cv::Point2f pose, + std::string type) const +{ + + double delta = 0; + + if(type == "Sobel_x") + delta = ((double)xderImage.at(pose.y, pose.x))/255.; + else if (type == "Sobel_y") + delta = ((double)yderImage.at(pose.y, pose.x))/255.; + return delta; + +} + +double Feature::Kernel( + const cv::Point2f pose, + const cv::Mat frame, + std::string type) const +{ +Eigen::Matrix kernel = Eigen::Matrix::Zero(); +if(type == "Sobel_x") + kernel << -1., 0., 1.,-2., 0., 2. , -1., 0., 1.; +else if(type == "Sobel_y") + kernel << -1., -2., -1., 0., 0., 0., 1., 2., 1.; + +double delta = 0; +int offs = (int)(kernel.rows()-1)/2; + +for(int i = 0; i < kernel.rows(); i++) + for(int j = 0; j < kernel.cols(); j++) + delta += ((float)frame.at(pose.y+j-offs , pose.x+i-offs))/255. * (float)kernel(j,i); + +return delta; +} +bool Feature::estimate_FrameIrradiance( + const CAMState& cam_state, + const StateIDType& cam_state_id, + CameraCalibration& cam, + std::vector& anchorPatch_estimate, + IlluminationParameter& estimated_illumination) const +{ + // get irradiance of patch in anchor frame + // subtract estimated b and divide by a of anchor frame + // muliply by a and add b of this frame + + auto anchor = observations.begin(); + if(cam.moving_window.find(anchor->first) == cam.moving_window.end()) + return false; + + double anchorExposureTime_ms = cam.moving_window.find(anchor->first)->second.exposureTime_ms; + double frameExposureTime_ms = cam.moving_window.find(cam_state_id)->second.exposureTime_ms; + + + double a_A = anchorExposureTime_ms; + double b_A = 0; + double a_l = frameExposureTime_ms; + double b_l = 0; + + estimated_illumination.frame_gain = a_l; + estimated_illumination.frame_bias = b_l; + estimated_illumination.feature_gain = 1; + estimated_illumination.feature_bias = 0; + //printf("frames: %lld, %lld\n", anchor->first, cam_state_id); + //printf("exposure: %f, %f\n", a_A, a_l); + + for (double anchorPixel : anchorPatch) + { + float irradiance = (anchorPixel - b_A) / a_A ; + anchorPatch_estimate.push_back(irradiance); + } +} + +// generates markers for every camera position/observation +// and estimated feature/path position +bool Feature::MarkerGeneration( + ros::Publisher& marker_pub, + const CamStateServer& cam_states) const +{ + visualization_msgs::MarkerArray ma; + + // add all camera states used for estimation + int count = 0; + + for(auto observation : observations) + { + visualization_msgs::Marker marker; + marker.header.frame_id = "world"; + marker.header.stamp = ros::Time::now(); + + marker.ns = "cameras"; + marker.id = count++; + marker.type = visualization_msgs::Marker::ARROW; + marker.action = visualization_msgs::Marker::ADD; + + marker.pose.position.x = cam_states.find(observation.first)->second.position(0); + marker.pose.position.y = cam_states.find(observation.first)->second.position(1); + marker.pose.position.z = cam_states.find(observation.first)->second.position(2); + + // rotate form x to z axis + Eigen::Vector4d q = quaternionMultiplication(Eigen::Vector4d(0, -0.707, 0, 0.707), cam_states.find(observation.first)->second.orientation); + + marker.pose.orientation.x = q(0); + marker.pose.orientation.y = q(1); + marker.pose.orientation.z = q(2); + marker.pose.orientation.w = q(3); + + marker.scale.x = 0.15; + marker.scale.y = 0.05; + marker.scale.z = 0.05; + + if(count == 1) + { + marker.color.r = 0.0f; + marker.color.g = 0.0f; + marker.color.b = 1.0f; + } + else + { + marker.color.r = 0.0f; + marker.color.g = 1.0f; + marker.color.b = 0.0f; + } + marker.color.a = 1.0; + + marker.lifetime = ros::Duration(0); + + ma.markers.push_back(marker); + } + + // 'delete' any existing cameras (make invisible) + for(int i = count; i < 20; i++) + { + visualization_msgs::Marker marker; + + marker.header.frame_id = "world"; + marker.header.stamp = ros::Time::now(); + + marker.ns = "cameras"; + marker.id = i; + marker.type = visualization_msgs::Marker::ARROW; + marker.action = visualization_msgs::Marker::ADD; + + marker.pose.orientation.w = 1; + + marker.color.a = 0.0; + + marker.lifetime = ros::Duration(1); + + ma.markers.push_back(marker); + } + + //generate feature patch points position + visualization_msgs::Marker marker; + + marker.header.frame_id = "world"; + marker.header.stamp = ros::Time::now(); + + marker.ns = "patch"; + marker.id = 0; + marker.type = visualization_msgs::Marker::POINTS; + marker.action = visualization_msgs::Marker::ADD; + + marker.pose.orientation.w = 1; + + marker.scale.x = 0.02; + marker.scale.y = 0.02; + + marker.color.r = 1.0f; + marker.color.g = 0.0f; + marker.color.b = 0.0f; + marker.color.a = 1.0; + + for(auto point : anchorPatch_3d) + { + geometry_msgs::Point p; + p.x = point(0); + p.y = point(1); + p.z = point(2); + marker.points.push_back(p); + } + ma.markers.push_back(marker); + + marker_pub.publish(ma); +} + + +bool Feature::VisualizeKernel( + const CAMState& cam_state, + const StateIDType& cam_state_id, + CameraCalibration& cam0) const +{ + auto anchor = observations.begin(); + cv::Mat anchorImage = cam0.moving_window.find(anchor->first)->second.image; + + //cv::Mat xderImage; + //cv::Mat yderImage; + + //cv::Sobel(anchorImage, xderImage, CV_8UC1, 1, 0, 3); + //cv::Sobel(anchorImage, yderImage, CV_8UC1, 0, 1, 3); + + + // cv::Mat xderImage2(anchorImage.rows, anchorImage.cols, anchorImage_blurred.type()); + // cv::Mat yderImage2(anchorImage.rows, anchorImage.cols, anchorImage_blurred.type()); + + /* + for(int i = 1; i < anchorImage.rows-1; i++) + for(int j = 1; j < anchorImage.cols-1; j++) + xderImage2.at(j,i) = 255.*fabs(Kernel(cv::Point2f(i,j), anchorImage_blurred, "Sobel_x")); + + + for(int i = 1; i < anchorImage.rows-1; i++) + for(int j = 1; j < anchorImage.cols-1; j++) + yderImage2.at(j,i) = 255.*fabs(Kernel(cv::Point2f(i,j), anchorImage_blurred, "Sobel_y")); + */ + //cv::imshow("anchor", anchorImage); + cv::imshow("xder2", xderImage); + cv::imshow("yder2", yderImage); + + cvWaitKey(0); +} + +bool Feature::VisualizePatch( + const CAMState& cam_state, + const StateIDType& cam_state_id, + CameraCalibration& cam, + const Eigen::VectorXd& photo_r, + std::stringstream& ss) const +{ + + double rescale = 1; + + //visu - anchor + auto anchor = observations.begin(); + cv::Mat anchorImage = cam.moving_window.find(anchor->first)->second.image; + cv::Mat dottedFrame(anchorImage.size(), CV_8UC3); + cv::cvtColor(anchorImage, dottedFrame, CV_GRAY2RGB); + + // visualize the true anchor points (the surrounding of the original measurements) + for(auto point : anchorPatch_real) + { + // visu - feature + cv::Point xs(point.x, point.y); + cv::Point ys(point.x, point.y); + cv::rectangle(dottedFrame, xs, ys, cv::Scalar(0,255,255)); + } + cam.featureVisu = dottedFrame.clone(); + + // visu - feature + cv::Mat current_image = cam.moving_window.find(cam_state_id)->second.image; + cv::cvtColor(current_image, dottedFrame, CV_GRAY2RGB); + + // set position in frame + // save irradiance of projection + std::vector projectionPatch; + for(auto point : anchorPatch_3d) + { + cv::Point2f p_in_c0 = projectPositionToCamera(cam_state, cam_state_id, cam, point); + projectionPatch.push_back(PixelIrradiance(p_in_c0, current_image)); + // visu - feature + cv::Point xs(p_in_c0.x, p_in_c0.y); + cv::Point ys(p_in_c0.x, p_in_c0.y); + cv::rectangle(dottedFrame, xs, ys, cv::Scalar(0,255,0)); + } + + cv::hconcat(cam.featureVisu, dottedFrame, cam.featureVisu); + + + // patches visualization + int N = sqrt(anchorPatch_3d.size()); + int scale = 30; + cv::Mat irradianceFrame(anchorImage.size(), CV_8UC3, cv::Scalar(255, 240, 255)); + cv::resize(irradianceFrame, irradianceFrame, cv::Size(), rescale, rescale); + + // irradiance grid anchor + std::stringstream namer; + namer << "anchor"; + cv::putText(irradianceFrame, namer.str() , cvPoint(30, 25), + cv::FONT_HERSHEY_COMPLEX_SMALL, 0.8, cvScalar(0,0,0), 1, CV_AA); + + + for(int i = 0; isecond(0),observations.find(cam_state_id)->second(1)); + else if(cam.id == 1) + p_f = cv::Point2f(observations.find(cam_state_id)->second(2),observations.find(cam_state_id)->second(3)); + // move to real pixels + + p_f = image_handler::distortPoint(p_f, cam.intrinsics, cam.distortion_model, cam.distortion_coeffs); + for(int i = 0; i0) + cv::rectangle(irradianceFrame, + cv::Point(40+scale*(N+i+1), 15+scale*(N/2+j)), + cv::Point(40+scale*(N+i), 15+scale*(N/2+j+1)), + cv::Scalar(255 - photo_r(2*(i*N+j)+1)*255, 255 - photo_r(2*(i*N+j)+1)*255, 255), + CV_FILLED); + else + cv::rectangle(irradianceFrame, + cv::Point(40+scale*(N+i+1), 15+scale*(N/2+j)), + cv::Point(40+scale*(N+i), 15+scale*(N/2+j+1)), + cv::Scalar(255, 255 + photo_r(2*(i*N+j)+1)*255, 255 + photo_r(2*(i*N+j)+1)*255), + CV_FILLED); + + // gradient arrow + /* + cv::arrowedLine(irradianceFrame, + cv::Point(30+scale*(N/2 +0.5), 50+scale*(N+(N/2)+0.5)), + cv::Point(30+scale*(N/2+0.5)+scale*gradientVector.x, 50+scale*(N+(N/2)+0.5)+scale*gradientVector.y), + cv::Scalar(100, 0, 255), + 1); + */ + + // residual gradient direction + /* + cv::arrowedLine(irradianceFrame, + cv::Point(40+scale*(N+N/2+0.5), 15+scale*((N-0.5))), + cv::Point(40+scale*(N+N/2+0.5)+scale*residualVector.x, 15+scale*(N-0.5)+scale*residualVector.y), + cv::Scalar(0, 255, 175), + 3); + */ + + cv::hconcat(cam.featureVisu, irradianceFrame, cam.featureVisu); + +/* + // visualize position of used observations and resulting feature position + cv::Mat positionFrame(anchorImage.size(), CV_8UC3, cv::Scalar(255, 240, 255)); + cv::resize(positionFrame, positionFrame, cv::Size(), rescale, rescale); + + + // draw world zero + cv::line(positionFrame, + cv::Point(20,20), + cv::Point(20,30), + cv::Scalar(0,0,255), + CV_FILLED); + + cv::line(positionFrame, + cv::Point(20,20), + cv::Point(30,20), + cv::Scalar(255,0,0), + CV_FILLED); + // for every observation, get cam state + for(auto obs : observations) + { + cv::line(positionFrame, + cv::Point(20,20), + cv::Point(30,20), + cv::Scalar(255,0,0), + CV_FILLED); + } + + // draw, x y position and arrow with direction - write z next to it + + cv::resize(cam.featureVisu, cam.featureVisu, cv::Size(), rescale, rescale); + + + cv::hconcat(cam.featureVisu, positionFrame, cam.featureVisu); +*/ + // write feature position + std::stringstream pos_s; + pos_s << "u: " << observations.begin()->second(0) << " v: " << observations.begin()->second(1); + cv::putText(cam.featureVisu, ss.str() , cvPoint(anchorImage.rows + 100, anchorImage.cols - 30), + cv::FONT_HERSHEY_COMPLEX_SMALL, 0.8, cvScalar(200,200,250), 1, CV_AA); + // create line? + + //save image + + std::stringstream loc; + // loc << "/home/raphael/dev/MSCKF_ws/img/feature_" << std::to_string(ros::Time::now().toSec()) << ".jpg"; + //cv::imwrite(loc.str(), cam.featureVisu); + + cv::imshow("patch", cam.featureVisu); + cvWaitKey(1); +} + +float Feature::PixelIrradiance(cv::Point2f pose, cv::Mat image) const +{ + + return ((float)image.at(pose.y, pose.x))/255.; +} + +cv::Point2f Feature::pixelDistanceAt( + const CAMState& cam_state, + const StateIDType& cam_state_id, + const CameraCalibration& cam, + Eigen::Vector3d& in_p) const +{ + + cv::Point2f cam_p = projectPositionToCamera(cam_state, cam_state_id, cam, in_p); + + // create vector of patch in pixel plane + std::vector surroundingPoints; + surroundingPoints.push_back(cv::Point2f(cam_p.x+1, cam_p.y)); + surroundingPoints.push_back(cv::Point2f(cam_p.x-1, cam_p.y)); + surroundingPoints.push_back(cv::Point2f(cam_p.x, cam_p.y+1)); + surroundingPoints.push_back(cv::Point2f(cam_p.x, cam_p.y-1)); + + std::vector pure; + image_handler::undistortPoints(surroundingPoints, + cam.intrinsics, + cam.distortion_model, + cam.distortion_coeffs, + pure); + + // transfrom position to camera frame + // to get distance multiplier + Eigen::Matrix3d R_w_c0 = quaternionToRotation(cam_state.orientation); + const Eigen::Vector3d& t_c0_w = cam_state.position; + Eigen::Vector3d p_c0 = R_w_c0 * (in_p-t_c0_w); + + // returns the distance between the pixel points in space + cv::Point2f distance(fabs(pure[0].x - pure[1].x), fabs(pure[2].y - pure[3].y)); + + return distance; +} + +cv::Point2f Feature::projectPositionToCamera( + const CAMState& cam_state, + const StateIDType& cam_state_id, + const CameraCalibration& cam, + Eigen::Vector3d& in_p) const +{ + Eigen::Isometry3d T_c0_w; + + cv::Point2f out_p; + cv::Point2f my_p; + // transfrom position to camera frame + + // cam0 position + Eigen::Matrix3d R_w_c0 = quaternionToRotation(cam_state.orientation); + const Eigen::Vector3d& t_c0_w = cam_state.position; + + // project point according to model + if(cam.id == 0) + { + Eigen::Vector3d p_c0 = R_w_c0 * (in_p-t_c0_w); + out_p = cv::Point2f(p_c0(0)/p_c0(2), p_c0(1)/p_c0(2)); + + } + // if camera is one, calcualte the cam1 position from cam0 position first + else if(cam.id == 1) + { + // cam1 position + Eigen::Matrix3d R_c0_c1 = CAMState::T_cam0_cam1.linear(); + Eigen::Matrix3d R_w_c1 = R_c0_c1 * R_w_c0; + Eigen::Vector3d t_c1_w = t_c0_w - R_w_c1.transpose()*CAMState::T_cam0_cam1.translation(); + + Eigen::Vector3d p_c1 = R_w_c1 * (in_p-t_c1_w); + out_p = cv::Point2f(p_c1(0)/p_c1(2), p_c1(1)/p_c1(2)); + } + + // undistort point according to camera model + if (cam.distortion_model.substr(0,3) == "pre-") + my_p = cv::Point2f(out_p.x * cam.intrinsics[0] + cam.intrinsics[2], out_p.y * cam.intrinsics[1] + cam.intrinsics[3]); + else + my_p = image_handler::distortPoint(out_p, + cam.intrinsics, + cam.distortion_model, + cam.distortion_coeffs); + return my_p; +} + +Eigen::Vector3d Feature::AnchorPixelToPosition(cv::Point2f in_p, const CameraCalibration& cam) +{ + // use undistorted position of point of interest + // project it back into 3D space using pinhole model + // save resulting NxN positions for this feature + + Eigen::Vector3d PositionInCamera(in_p.x/anchor_rho, in_p.y/anchor_rho, 1/anchor_rho); + Eigen::Vector3d PositionInWorld = T_anchor_w.linear()*PositionInCamera + T_anchor_w.translation(); + return PositionInWorld; + //printf("%f, %f, %f\n",PositionInWorld[0], PositionInWorld[1], PositionInWorld[2]); +} + +//@test center projection must always be initial feature projection +bool Feature::initializeAnchor(const CameraCalibration& cam, int N) +{ + + //initialize patch Size + int n = (int)(N-1)/2; + + auto anchor = observations.begin(); + if(cam.moving_window.find(anchor->first) == cam.moving_window.end()) + { + return false; + } + cv::Mat anchorImage = cam.moving_window.find(anchor->first)->second.image; + cv::Mat anchorImage_deeper; + anchorImage.convertTo(anchorImage_deeper,CV_16S); + //TODO remove this? + + + cv::Sobel(anchorImage_deeper, xderImage, -1, 1, 0, 3); + cv::Sobel(anchorImage_deeper, yderImage, -1, 0, 1, 3); + + xderImage/=8.; + yderImage/=8.; + + cv::convertScaleAbs(xderImage, abs_xderImage); + cv::convertScaleAbs(yderImage, abs_yderImage); + + cv::GaussianBlur(anchorImage, anchorImage_blurred, cv::Size(3,3), 0, 0, cv::BORDER_DEFAULT); + + auto u = anchor->second(0);//*cam.intrinsics[0] + cam.intrinsics[2]; + auto v = anchor->second(1);//*cam.intrinsics[1] + cam.intrinsics[3]; + + + // check if image has been pre-undistorted + if(cam.distortion_model.substr(0,3) == "pre") + { + //project onto pixel plane + undist_anchor_center_pos = cv::Point2f(u * cam.intrinsics[0] + cam.intrinsics[2], v * cam.intrinsics[1] + cam.intrinsics[3]); + // create vector of patch in pixel plane + for(double u_run = -n; u_run <= n; u_run++) + for(double v_run = -n; v_run <= n; v_run++) + anchorPatch_real.push_back(cv::Point2f(undist_anchor_center_pos.x+u_run, undist_anchor_center_pos.y+v_run)); + + + //project back into u,v + for(int i = 0; i < N*N; i++) + anchorPatch_ideal.push_back(cv::Point2f((anchorPatch_real[i].x-cam.intrinsics[2])/cam.intrinsics[0], (anchorPatch_real[i].y-cam.intrinsics[3])/cam.intrinsics[1])); + } + + else + { + // get feature in undistorted pixel space + // this only reverts from 'pure' space into undistorted pixel space using camera matrix + cv::Point2f und_pix_p = image_handler::distortPoint(cv::Point2f(u, v), + cam.intrinsics, + cam.distortion_model, + cam.distortion_coeffs); + // create vector of patch in pixel plane + for(double u_run = -n; u_run <= n; u_run++) + for(double v_run = -n; v_run <= n; v_run++) + anchorPatch_real.push_back(cv::Point2f(und_pix_p.x+u_run, und_pix_p.y+v_run)); + + + //create undistorted pure points + image_handler::undistortPoints(anchorPatch_real, + cam.intrinsics, + cam.distortion_model, + cam.distortion_coeffs, + anchorPatch_ideal); + + } + + // save anchor position for later visualisaztion + anchor_center_pos = anchorPatch_real[(N*N-1)/2]; + + // save true pixel Patch position + for(auto point : anchorPatch_real) + if(point.x - n < 0 || point.x + n >= cam.resolution(0)-1 || point.y - n < 0 || point.y + n >= cam.resolution(1)-1) + return false; + + for(auto point : anchorPatch_real) + anchorPatch.push_back(PixelIrradiance(point, anchorImage)); + + // project patch pixel to 3D space in camera coordinate system + for(auto point : anchorPatch_ideal) + anchorPatch_3d.push_back(AnchorPixelToPosition(point, cam)); + + is_anchored = true; + + return true; +} + + +bool Feature::initializeRho(const CamStateServer& cam_states) { + // Organize camera poses and feature observations properly. + std::vector > cam_poses(0); + std::vector > measurements(0); + + for (auto& m : observations) { + auto cam_state_iter = cam_states.find(m.first); + if (cam_state_iter == cam_states.end()) continue; + + // Add the measurement. + measurements.push_back(m.second.head<2>()); + measurements.push_back(m.second.tail<2>()); + + // This camera pose will take a vector from this camera frame + // to the world frame. + Eigen::Isometry3d cam0_pose; + cam0_pose.linear() = quaternionToRotation( + cam_state_iter->second.orientation).transpose(); + cam0_pose.translation() = cam_state_iter->second.position; + + Eigen::Isometry3d cam1_pose; + cam1_pose = cam0_pose * CAMState::T_cam0_cam1.inverse(); + + cam_poses.push_back(cam0_pose); + cam_poses.push_back(cam1_pose); + } + + // All camera poses should be modified such that it takes a + // vector from the first camera frame in the buffer to this + // camera frame. + Eigen::Isometry3d T_c0_w = cam_poses[0]; + T_anchor_w = T_c0_w; + for (auto& pose : cam_poses) + pose = pose.inverse() * T_c0_w; + + // Generate initial guess + double initial_depth = 0; + initial_depth = generateInitialDepth(cam_poses[cam_poses.size()-1], measurements[0], + measurements[measurements.size()-1]); + + double solution = 1.0/initial_depth; + + // Apply Levenberg-Marquart method to solve for the 3d position. + double lambda = optimization_config.initial_damping; + int inner_loop_cntr = 0; + int outer_loop_cntr = 0; + bool is_cost_reduced = false; + double delta_norm = 0; + + // Compute the initial cost. + double total_cost = 0.0; + for (int i = 0; i < cam_poses.size(); ++i) { + double this_cost = 0.0; + Rhocost(cam_poses[i], solution, measurements[0], measurements[i], this_cost); + total_cost += this_cost; + } + + // Outer loop. + do { + Eigen::Matrix A = Eigen::Matrix::Zero(); + Eigen::Matrix b = Eigen::Matrix::Zero(); + + for (int i = 0; i < cam_poses.size(); ++i) { + Eigen::Matrix J; + Eigen::Vector2d r; + double w; + + RhoJacobian(cam_poses[i], solution, measurements[0], measurements[i], J, r, w); + + if (w == 1) { + A += J.transpose() * J; + b += J.transpose() * r; + } else { + double w_square = w * w; + A += w_square * J.transpose() * J; + b += w_square * J.transpose() * r; + } + } + + // Inner loop. + // Solve for the delta that can reduce the total cost. + do { + Eigen::Matrix damper = lambda*Eigen::Matrix::Identity(); + Eigen::Matrix delta = (A+damper).ldlt().solve(b); + double new_solution = solution - delta(0,0); + delta_norm = delta.norm(); + + double new_cost = 0.0; + for (int i = 0; i < cam_poses.size(); ++i) { + double this_cost = 0.0; + Rhocost(cam_poses[i], new_solution, measurements[0], measurements[i], this_cost); + new_cost += this_cost; + } + + if (new_cost < total_cost) { + is_cost_reduced = true; + solution = new_solution; + total_cost = new_cost; + lambda = lambda/10 > 1e-10 ? lambda/10 : 1e-10; + } else { + is_cost_reduced = false; + lambda = lambda*10 < 1e12 ? lambda*10 : 1e12; + } + + } while (inner_loop_cntr++ < + optimization_config.inner_loop_max_iteration && !is_cost_reduced); + + inner_loop_cntr = 0; + + } while (outer_loop_cntr++ < + optimization_config.outer_loop_max_iteration && + delta_norm > optimization_config.estimation_precision); + + // Covert the feature position from inverse depth + // representation to its 3d coordinate. + Eigen::Vector3d final_position(measurements[0](0)/solution, + measurements[0](1)/solution, 1.0/solution); + + // Check if the solution is valid. Make sure the feature + // is in front of every camera frame observing it. + bool is_valid_solution = true; + for (const auto& pose : cam_poses) { + Eigen::Vector3d position = + pose.linear()*final_position + pose.translation(); + if (position(2) <= 0) { + is_valid_solution = false; + break; + } + } + + //save inverse depth distance from camera + anchor_rho = solution; + + // Convert the feature position to the world frame. + position = T_c0_w.linear()*final_position + T_c0_w.translation(); + + if (is_valid_solution) + is_initialized = true; + + return is_valid_solution; +} + + +bool Feature::initializePosition(const CamStateServer& cam_states) { + + // Organize camera poses and feature observations properly. + std::vector > cam_poses(0); std::vector +#include +#include +#include + +#include +#include + + +namespace msckf_vio { +/* + * @brief utilities for msckf_vio + */ +namespace image_handler { + +cv::Point2f pinholeDownProject(const cv::Point2f& p_in, const cv::Vec4d& intrinsics); +cv::Point2f pinholeUpProject(const cv::Point2f& p_in, const cv::Vec4d& intrinsics); + +void undistortImage( + cv::InputArray src, + cv::OutputArray dst, + const std::string& distortion_model, + const cv::Vec4d& intrinsics, + const cv::Vec4d& distortion_coeffs); + +void undistortPoints( + const std::vector& pts_in, + const cv::Vec4d& intrinsics, + const std::string& distortion_model, + const cv::Vec4d& distortion_coeffs, + std::vector& pts_out, + const cv::Matx33d &rectification_matrix = cv::Matx33d::eye(), + const cv::Vec4d &new_intrinsics = cv::Vec4d(1,1,0,0)); + +std::vector distortPoints( + const std::vector& pts_in, + const cv::Vec4d& intrinsics, + const std::string& distortion_model, + const cv::Vec4d& distortion_coeffs); + +cv::Point2f distortPoint( + const cv::Point2f& pt_in, + const cv::Vec4d& intrinsics, + const std::string& distortion_model, + const cv::Vec4d& distortion_coeffs); + +void undistortPoint( + const cv::Point2f& pt_in, + const cv::Vec4d& intrinsics, + const std::string& distortion_model, + const cv::Vec4d& distortion_coeffs, + cv::Point2f& pt_out, + const cv::Matx33d &rectification_matrix = cv::Matx33d::eye(), + const cv::Vec4d &new_intrinsics = cv::Vec4d(1,1,0,0)); + +} +} +#endif diff --git a/include/msckf_vio/image_processor.h b/include/msckf_vio/image_processor.h index 086c5a9..7fb3943 100644 --- a/include/msckf_vio/image_processor.h +++ b/include/msckf_vio/image_processor.h @@ -22,6 +22,8 @@ #include #include +#include "cam_state.h" + namespace msckf_vio { /* @@ -318,6 +320,8 @@ private: return; } + bool STREAMPAUSE; + // Indicate if this is the first image message. bool is_first_img; @@ -332,15 +336,8 @@ private: std::vector imu_msg_buffer; // Camera calibration parameters - std::string cam0_distortion_model; - cv::Vec2i cam0_resolution; - cv::Vec4d cam0_intrinsics; - cv::Vec4d cam0_distortion_coeffs; - - std::string cam1_distortion_model; - cv::Vec2i cam1_resolution; - cv::Vec4d cam1_intrinsics; - cv::Vec4d cam1_distortion_coeffs; + CameraCalibration cam0; + CameraCalibration cam1; // Take a vector from cam0 frame to the IMU frame. cv::Matx33d R_cam0_imu; diff --git a/include/msckf_vio/math_utils.hpp b/include/msckf_vio/math_utils.hpp index 3000d4c..d6cb57a 100644 --- a/include/msckf_vio/math_utils.hpp +++ b/include/msckf_vio/math_utils.hpp @@ -43,6 +43,50 @@ inline void quaternionNormalize(Eigen::Vector4d& q) { return; } +/* + * @brief invert rotation of passed quaternion through conjugation + * and return conjugation + */ +inline Eigen::Vector4d quaternionConjugate(Eigen::Vector4d& q) +{ + Eigen::Vector4d p; + p(0) = -q(0); + p(1) = -q(1); + p(2) = -q(2); + p(3) = q(3); + quaternionNormalize(p); + return p; +} + +/* + * @brief converts a vector4 to a vector3, dropping (3) + * this is typically used to get the vector part of a quaternion + for eq small angle approximation +*/ +inline Eigen::Vector3d v4tov3(const Eigen::Vector4d& q) +{ + Eigen::Vector3d p; + p(0) = q(0); + p(1) = q(1); + p(2) = q(2); + return p; +} + +/* + * @brief Perform q1 * q2 + */ + +inline Eigen::Vector4d QtoV(const Eigen::Quaterniond& q) +{ + Eigen::Vector4d p; + p(0) = q.x(); + p(1) = q.y(); + p(2) = q.z(); + p(3) = q.w(); + return p; +} + + /* * @brief Perform q1 * q2 */ diff --git a/include/msckf_vio/msckf_vio.h b/include/msckf_vio/msckf_vio.h index 8077acf..4584c57 100644 --- a/include/msckf_vio/msckf_vio.h +++ b/include/msckf_vio/msckf_vio.h @@ -14,11 +14,17 @@ #include #include #include +#include #include +#include +#include #include #include +#include +#include #include +#include #include #include @@ -27,6 +33,13 @@ #include "feature.hpp" #include +#include +#include +#include +#include + +#define PI 3.14159265 + namespace msckf_vio { /* * @brief MsckfVio Implements the algorithm in @@ -97,11 +110,27 @@ class MsckfVio { void imuCallback(const sensor_msgs::ImuConstPtr& msg); /* - * @brief featureCallback - * Callback function for feature measurements. - * @param msg Stereo feature measurements. + * @brief truthCallback + * Callback function for ground truth navigation information + * @param TransformStamped msg + */ + void truthCallback( + const geometry_msgs::TransformStampedPtr& msg); + + + /* + * @brief imageCallback + * Callback function for feature measurements + * Triggers measurement update + * @param msg + * Camera 0 Image + * Camera 1 Image + * Stereo feature measurements. */ - void featureCallback(const CameraMeasurementConstPtr& msg); + void imageCallback ( + const sensor_msgs::ImageConstPtr& cam0_img, + const sensor_msgs::ImageConstPtr& cam1_img, + const CameraMeasurementConstPtr& feature_msg); /* * @brief publish Publish the results of VIO. @@ -126,6 +155,26 @@ class MsckfVio { bool resetCallback(std_srvs::Trigger::Request& req, std_srvs::Trigger::Response& res); + // Saves the exposure time and the camera measurementes + void manageMovingWindow( + const sensor_msgs::ImageConstPtr& cam0_img, + const sensor_msgs::ImageConstPtr& cam1_img, + const CameraMeasurementConstPtr& feature_msg); + + + void calcErrorState(); + + // Debug related Functions + // Propagate the true state + void batchTruthProcessing( + const double& time_bound); + + + void processTruthtoIMU(const double& time, + const Eigen::Vector4d& m_rot, + const Eigen::Vector3d& m_trans); + + // Filter related functions // Propogate the state void batchImuProcessing( @@ -137,8 +186,12 @@ class MsckfVio { const Eigen::Vector3d& gyro, const Eigen::Vector3d& acc); + // groundtruth state augmentation + void truePhotometricStateAugmentation(const double& time); + // Measurement update void stateAugmentation(const double& time); + void PhotometricStateAugmentation(const double& time); void addFeatureObservations(const CameraMeasurementConstPtr& msg); // This function is used to compute the measurement Jacobian // for a single feature observed at a single camera frame. @@ -149,25 +202,118 @@ class MsckfVio { Eigen::Vector4d& r); // This function computes the Jacobian of all measurements viewed // in the given camera states of this feature. - void featureJacobian(const FeatureIDType& feature_id, + bool featureJacobian( + const FeatureIDType& feature_id, const std::vector& cam_state_ids, Eigen::MatrixXd& H_x, Eigen::VectorXd& r); + + + void twodotMeasurementJacobian( + const StateIDType& cam_state_id, + const FeatureIDType& feature_id, + Eigen::MatrixXd& H_x, Eigen::MatrixXd& H_y, Eigen::VectorXd& r); + + bool ConstructJacobians( + Eigen::MatrixXd& H_rho, + Eigen::MatrixXd& H_pl, + Eigen::MatrixXd& H_pA, + const Feature& feature, + const StateIDType& cam_state_id, + Eigen::MatrixXd& H_xl, + Eigen::MatrixXd& H_yl); + + bool PhotometricPatchPointResidual( + const StateIDType& cam_state_id, + const Feature& feature, + Eigen::VectorXd& r); + + bool PhotometricPatchPointJacobian( + const CAMState& cam_state, + const StateIDType& cam_state_id, + const Feature& feature, + Eigen::Vector3d point, + int count, + Eigen::Matrix& H_rhoj, + Eigen::Matrix& H_plj, + Eigen::Matrix& H_pAj, + Eigen::Matrix& dI_dhj); + + bool PhotometricMeasurementJacobian( + const StateIDType& cam_state_id, + const FeatureIDType& feature_id, + Eigen::MatrixXd& H_x, + Eigen::MatrixXd& H_y, + Eigen::VectorXd& r); + + + bool twodotFeatureJacobian( + const FeatureIDType& feature_id, + const std::vector& cam_state_ids, + Eigen::MatrixXd& H_x, Eigen::VectorXd& r); + + bool PhotometricFeatureJacobian( + const FeatureIDType& feature_id, + const std::vector& cam_state_ids, + Eigen::MatrixXd& H_x, Eigen::VectorXd& r); + + void photometricMeasurementUpdate(const Eigen::MatrixXd& H, const Eigen::VectorXd& r); void measurementUpdate(const Eigen::MatrixXd& H, const Eigen::VectorXd& r); + void twoMeasurementUpdate(const Eigen::MatrixXd& H, const Eigen::VectorXd& r); + bool gatingTest(const Eigen::MatrixXd& H, - const Eigen::VectorXd&r, const int& dof); + const Eigen::VectorXd&r, const int& dof, int filter=0); void removeLostFeatures(); void findRedundantCamStates( std::vector& rm_cam_state_ids); + + void pruneLastCamStateBuffer(); void pruneCamStateBuffer(); // Reset the system online if the uncertainty is too large. void onlineReset(); + // Photometry flag + int FILTER; + + // debug flag + bool STREAMPAUSE; + bool PRINTIMAGES; + bool GROUNDTRUTH; + + bool nan_flag; + bool play; + double last_time_bound; + double time_offset; + + // Patch size for Photometry + int N; + // Image rescale + int SCALE; // Chi squared test table. static std::map chi_squared_test_table; + double eval_time; + + IMUState timed_old_imu_state; + IMUState timed_old_true_state; + + IMUState old_imu_state; + IMUState old_true_state; + + // change in position + Eigen::Vector3d delta_position; + Eigen::Vector3d delta_orientation; + // State vector StateServer state_server; + StateServer photometric_state_server; + + // Ground truth state vector + StateServer true_state_server; + + // error state based on ground truth + StateServer err_state_server; + // Maximum number of camera states int max_cam_state_size; @@ -179,6 +325,22 @@ class MsckfVio { // transfer delay between IMU and Image messages. std::vector imu_msg_buffer; + // Ground Truth message data + std::vector truth_msg_buffer; + // Moving Window buffer + movingWindow cam0_moving_window; + movingWindow cam1_moving_window; + + // Camera calibration parameters + CameraCalibration cam0; + CameraCalibration cam1; + + // covariance data + double irradiance_frame_bias; + + + ros::Time image_save_time; + // Indicate if the gravity vector is set. bool is_gravity_set; @@ -206,12 +368,21 @@ class MsckfVio { // Subscribers and publishers ros::Subscriber imu_sub; - ros::Subscriber feature_sub; + ros::Subscriber truth_sub; + ros::Publisher truth_odom_pub; ros::Publisher odom_pub; + ros::Publisher marker_pub; ros::Publisher feature_pub; tf::TransformBroadcaster tf_pub; ros::ServiceServer reset_srv; + + message_filters::Subscriber cam0_img_sub; + message_filters::Subscriber cam1_img_sub; + message_filters::Subscriber feature_sub; + + message_filters::TimeSynchronizer image_sub; + // Frame id std::string fixed_frame_id; std::string child_frame_id; @@ -232,6 +403,9 @@ class MsckfVio { ros::Publisher mocap_odom_pub; geometry_msgs::TransformStamped raw_mocap_odom_msg; Eigen::Isometry3d mocap_initial_frame; + + Eigen::Vector4d mocap_initial_orientation; + Eigen::Vector3d mocap_initial_position; }; typedef MsckfVio::Ptr MsckfVioPtr; diff --git a/launch/image_processor_euroc.launch b/launch/image_processor_euroc.launch index f728112..f49fc6c 100644 --- a/launch/image_processor_euroc.launch +++ b/launch/image_processor_euroc.launch @@ -8,7 +8,8 @@ + output="screen" + > diff --git a/launch/image_processor_mynt.launch b/launch/image_processor_mynt.launch new file mode 100644 index 0000000..63b058e --- /dev/null +++ b/launch/image_processor_mynt.launch @@ -0,0 +1,33 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/launch/image_processor_tinytum.launch b/launch/image_processor_tinytum.launch new file mode 100644 index 0000000..efe6e7c --- /dev/null +++ b/launch/image_processor_tinytum.launch @@ -0,0 +1,38 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/launch/image_processor_tum.launch b/launch/image_processor_tum.launch new file mode 100644 index 0000000..135af70 --- /dev/null +++ b/launch/image_processor_tum.launch @@ -0,0 +1,37 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/launch/msckf_vio_debug_tum.launch b/launch/msckf_vio_debug_tum.launch new file mode 100644 index 0000000..4796014 --- /dev/null +++ b/launch/msckf_vio_debug_tum.launch @@ -0,0 +1,75 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/launch/msckf_vio_euroc.launch b/launch/msckf_vio_euroc.launch index bfbf7eb..0df8102 100644 --- a/launch/msckf_vio_euroc.launch +++ b/launch/msckf_vio_euroc.launch @@ -17,6 +17,18 @@ args='standalone msckf_vio/MsckfVioNodelet' output="screen"> + + + + + + + + + + + + @@ -53,6 +65,9 @@ + + + diff --git a/launch/msckf_vio_mynt.launch b/launch/msckf_vio_mynt.launch new file mode 100644 index 0000000..678787e --- /dev/null +++ b/launch/msckf_vio_mynt.launch @@ -0,0 +1,61 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/launch/msckf_vio_tinytum.launch b/launch/msckf_vio_tinytum.launch new file mode 100644 index 0000000..1e8c9a5 --- /dev/null +++ b/launch/msckf_vio_tinytum.launch @@ -0,0 +1,77 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/launch/msckf_vio_tum.launch b/launch/msckf_vio_tum.launch new file mode 100644 index 0000000..7f239c7 --- /dev/null +++ b/launch/msckf_vio_tum.launch @@ -0,0 +1,78 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/log b/log new file mode 100644 index 0000000..1e2f876 --- /dev/null +++ b/log @@ -0,0 +1,73 @@ +# Created by Octave 3.8.1, Wed Jun 12 14:36:37 2019 CEST +# name: Hx +# type: matrix +# rows: 18 +# columns: 49 + 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 313.795 -58.7912 139.778 -46.7616 144.055 86.9644 0 -314.123 55.6434 -140.648 46.7616 -144.055 -86.9644 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 + 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 441.06 -94.50069999999999 174.424 -53.7653 204.822 120.248 0 -441.685 90.1101 -175.657 53.7653 -204.822 -120.248 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 + 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 225.35 -54.5629 77.60599999999999 -21.1425 105.886 60.3706 0 -225.756 52.3373 -78.2406 21.1425 -105.886 -60.3706 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 + 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 175.128 20.6203 175.127 -79.63939999999999 73.245 62.1868 0 -174.573 -22.5235 -175.576 79.63939999999999 -73.245 -62.1868 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 + 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 296.962 43.5469 311.307 -143.667 123.399 108.355 0 -295.905 -46.7952 -312.063 143.667 -123.399 -108.355 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 + 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 126.283 117.889 311.864 -161.264 38.8521 71.8019 0 -124.464 -119.541 -312.118 161.264 -38.8521 -71.8019 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 + 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 49.2502 63.7166 155.071 -80.81950000000001 12.7732 32.1826 0 -48.2934 -64.4113 -155.157 80.81950000000001 -12.7732 -32.1826 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 + 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 69.59699999999999 154.579 335.384 -179.355 9.212580000000001 62.0364 0 -67.35599999999999 -155.735 -335.462 179.355 -9.212580000000001 -62.0364 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 + 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 -66.6965 304.947 500.218 -285.589 -71.31010000000001 55.5058 0 70.8009 -305.077 -499.831 285.589 71.31010000000001 -55.5058 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 + 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 323.404 -62.2043 141.092 -46.6015 148.737 89.1108 0 0 0 0 0 0 0 0 -324.336 57.8552 -141.991 46.6015 -148.737 -89.1108 1 0 0 0 0 0 0 0 + 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 454.208 -99.3095 175.986 -53.4094 211.276 123.174 0 0 0 0 0 0 0 0 -455.779 93.0992 -177.158 53.4094 -211.276 -123.174 1 0 0 0 0 0 0 0 + 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 231.884 -57.0266 78.2559 -20.8926 109.118 61.8183 0 0 0 0 0 0 0 0 -232.824 53.8025 -78.80719999999999 20.8926 -109.118 -61.8183 1 0 0 0 0 0 0 0 + 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 181.715 18.8525 177.045 -80.08499999999999 76.3716 63.8254 0 0 0 0 0 0 0 0 -181.07 -20.839 -177.959 80.08499999999999 -76.3716 -63.8254 1 0 0 0 0 0 0 0 + 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 308.249 40.5812 314.711 -144.494 128.766 111.207 0 0 0 0 0 0 0 0 -306.972 -43.8825 -316.328 144.494 -128.766 -111.207 1 0 0 0 0 0 0 0 + 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 133.309 116.865 315.474 -162.598 42.0763 73.8353 0 0 0 0 0 0 0 0 -130.603 -117.454 -316.931 162.598 -42.0763 -73.8353 1 0 0 0 0 0 0 0 + 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 52.4426 63.3607 156.902 -81.5288 14.2139 33.1222 0 0 0 0 0 0 0 0 -50.9976 -63.4393 -157.607 81.5288 -14.2139 -33.1222 1 0 0 0 0 0 0 0 + 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 75.5508 154.234 339.369 -180.995 11.859 63.8956 0 0 0 0 0 0 0 0 -72.1041 -153.816 -340.865 180.995 -11.859 -63.8956 1 0 0 0 0 0 0 0 + 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 -62.0551 306.357 506.351 -288.542 -69.50409999999999 57.4562 0 0 0 0 0 0 0 0 68.6262 -303.028 -508.423 288.542 69.50409999999999 -57.4562 1 0 0 0 0 0 0 0 + + +# name: Hy +# type: matrix +# rows: 18 +# columns: 14 + 1.56267 0 0 0 0 0 0 0 0 0 0.252873 0 0 0.110387 + 0 1.56267 0 0 0 0 0 0 0 0 0.453168 0 0 0.162961 + 0 0 1.56267 0 0 0 0 0 0 0 0.638441 0 0 0.0873758 + 0 0 0 1.56267 0 0 0 0 0 0 0.21031 0 0 0.0321517 + 0 0 0 0 1.56267 0 0 0 0 0 0.375554 0 0 0.0505151 + 0 0 0 0 0 1.56267 0 0 0 0 0.638441 0 0 -0.0336034 + 0 0 0 0 0 0 1.56267 0 0 0 0.157733 0 0 -0.0232704 + 0 0 0 0 0 0 0 1.56267 0 0 0.212814 0 0 -0.0688343 + 0 0 0 0 0 0 0 0 1.56267 0 0.360532 0 0 -0.187745 + 1.56267 0 0 0 0 0 0 0 0 0 0 0.252873 0 0.322811 + 0 1.56267 0 0 0 0 0 0 0 0 0 0.453168 0 0.467319 + 0 0 1.56267 0 0 0 0 0 0 0 0 0.638441 0 0.245907 + 0 0 0 1.56267 0 0 0 0 0 0 0 0.21031 0 0.135725 + 0 0 0 0 1.56267 0 0 0 0 0 0 0.375554 0 0.225277 + 0 0 0 0 0 1.56267 0 0 0 0 0 0.638441 0 0.012926 + 0 0 0 0 0 0 1.56267 0 0 0 0 0.157733 0 -0.0108962 + 0 0 0 0 0 0 0 1.56267 0 0 0 0.212814 0 -0.06974909999999999 + 0 0 0 0 0 0 0 0 1.56267 0 0 0.360532 0 -0.318846 + + +# name: r +# type: matrix +# rows: 18 +# columns: 1 + 0.0354809 + 0.0153183 + 0.0570191 + -0.0372801 + 0.0878601 + 0.06811780000000001 + -0.00426164 + 5.162985999041026e-321 + 6.927779999999998e-310 + 0 + 2.121999999910509e-314 + 0 + 0 + 0 + 3.6073900000086e-313 + 4.446590812571219e-323 + 3.952525166729972e-323 + 3.952525166729972e-323 + + diff --git a/msg/CameraMeasurement.msg b/msg/CameraMeasurement.msg index 29a69bf..84e448d 100644 --- a/msg/CameraMeasurement.msg +++ b/msg/CameraMeasurement.msg @@ -1,4 +1,4 @@ -std_msgs/Header header +Header header # All features on the current image, # including tracked ones and newly detected ones. FeatureMeasurement[] features diff --git a/package.xml b/package.xml index 66d6b26..cf1b6f6 100644 --- a/package.xml +++ b/package.xml @@ -3,13 +3,12 @@ msckf_vio 0.0.1 - Multi-State Constraint Kalman Filter for Vision-aided Inertial Navigation + Multi-State Constraint Kalman Filter - Photometric expansion - Ke Sun + Raphael Maenle Penn Software License - Ke Sun - Kartik Mohta + Raphael Maenle catkin @@ -19,6 +18,7 @@ nav_msgs sensor_msgs geometry_msgs + visualization_msgs eigen_conversions tf_conversions random_numbers diff --git a/pseudocode/pseudocode_image_processing b/pseudocode/pseudocode_image_processing new file mode 100644 index 0000000..d1472f0 --- /dev/null +++ b/pseudocode/pseudocode_image_processing @@ -0,0 +1,97 @@ + + +stereo callback() + + create image pyramids + _Constructs the image pyramid which can be passed to calcOpticalFlowPyrLK._ +. + if first Frame: + *initialize first Frame () + + else: + *track Features () + + *addnewFeatures () + + *pruneGridFeatures() + _removes worst features from any overflowing grid_ + + publish features (u1, v1, u2, v2) + _undistorts them beforehand_ + +addnewFeatures() + *mask existing features + *detect new fast features + *collect in a grid, keep only best n per grid + *stereomatch() + *save inliers into a new feature with u,v on cam0 and cam1 + + +track Features() + *integrateIMUData () + _uses existing IMU data between two frames to calc. rotation between the two frames_ + + *predictFeatureTracking() + _compensates the rotation between consecutive frames - rotates previous camera frame features to current camera rotation_ + + *calcOpticalFlowPyrLK() + _measures the change between the features in the previous frame and in the current frame (using the predicted features)_ + + *remove points outside of image region + _how does this even happen?_ + + *stereo match() + _find tracked features from optical flow in the camera 1 image_ + _remove all features that could not be matched_ + + *twoPointRansac(cam0) + *twoPointRansac(cam1) + _remove any features outside best found ransac model_ + + + + +twoPointRansac() + *mark all points as inliers + *compensate rotation between frames + *normalize points + *calculate difference bewteen previous and current points + *mark large distances (over 50 pixels currently) + *calculate mean points distance + *return if inliers (non marked) < 3 + *return if motion smaller than norm pixel unit + *ransac + *optimize with found inlier after random sample + + *set inlier markers + +initialize first Frame() + + features = FastFeatureDetector detect () + + *stereo match () + + group features into grid + - according to position in the image + - sorting them by response + - save the top responses + - save the top responses + + +stereo match () + + *undistort cam0 Points + *project cam0 Points to cam1 to initialize points in cam1 + + *calculate lk optical flow + _used because camera calibrations not perfect enough_ + _also, calculation more efficient, because LK calculated anyway_ + + *compute relative trans/rot between cam0 and cam1* + + *remove outliers based on essential matrix + _essential matrix relates points in stereo image (pinhole model)_ + for every point: + - calculate epipolar line of point in cam0 + - calculate error of cam1 to epipolar line + - remove if to big diff --git a/pseudocode/pseudocode_msckf b/pseudocode/pseudocode_msckf new file mode 100644 index 0000000..9dc2ecd --- /dev/null +++ b/pseudocode/pseudocode_msckf @@ -0,0 +1,82 @@ +featureCallback + propagate IMU state() + state Augmentation() + add Feature Observations() + + #the following possibly trigger ekf update step: + remove Lost Features () + prune Camera State Buffer () + + +remove Lost Features() + every feature that does not have a current observation: + *just delete if not enough features + + check Motion of Feature () + _calculation here makes no sense - he uses pixel position as direction vector for feature?_ + *initialize Position () + + caculate feature Jakobian and Residual() + *for every observation in this feature + - calculate u and v in camera frames, based on estimated feature position + - input results into jakobi d(measurement)/d(camera 0/1) + - input results into jakobi d(camera 0/1)/d(state) and jakobi d(camera 0/1)/d(feature position) + - project both jakobis to nullspace of feature position jakobi + - calculate residual: measurement - u and v of camera frames + - project residual onto nullspace of feature position jakobi + + - stack residual and jakobians + + gating Test() + + *measurementUpdate() + _use calculated residuals and jakobians to calculate change in error_ +measurementUpdate(): + - QR reduce the stacked Measurment Jakobis + - calcualte Kalman Gain based on Measurement Jakobian, Error-State Covariance and Noise + _does some fancy shit here_ + - calculate estimated error after observation: delta_x = KalmanGain * residual + - add estimated error to state (imu states and cam states) + +initialize Position (): + * create initial guess for global feature position () + _uses first feature measurement on left camera and last feature measurement of right camera_ + - transform first measurement to plane of last measurement + - calcualte least square point between rays + * get position approximation using measured feature positions + _using Levenberg Marqhart iterative search_ + + + +add Feature Observations() + * if feature not in map, add feature to map + _and add u0, v0, u1, v1 as first observation + * if feature in map, add new observation u0,v0,u1,v1 + + + +state Augmentation() + * Add estimated cam position to state + * Update P with Jakobian of cam Position + + +propagate IMU state () + _uses IMU process model for every saved IMU state_ + + for every buffered imu state: + + *remove bias + + *Compute F and G matrix (continuous transition and noise cov.) + _using current orientation, gyro and acc. reading_ + * approximate phi: phi = 1 + Fdt + ... + + * calculate new state propagating through runge kutta + * modify transition matrix to have a propper null space? + + * calculate Q = Phi*G*Q_noise*GT*PhiT + * calculate P = Phi*P*PhiT + Q + + + stateAugmentation () + _save current IMU state as camera position_ diff --git a/src/bagcontrol.py b/src/bagcontrol.py new file mode 100644 index 0000000..3292b5b --- /dev/null +++ b/src/bagcontrol.py @@ -0,0 +1,64 @@ +#!/usr/bin/env python +import rosbag +import rospy +from sensor_msgs.msg import Imu, Image +from geometry_msgs.msg import TransformStamped +import time +import signal +import sys + + +def signal_handler(sig, frame): + print('gracefully exiting the program.') + bag.close() + sys.exit(0) + +def main(): + global bag + + cam0_topic = '/cam0/image_raw' + cam1_topic = '/cam1/image_raw' + imu0_topic = '/imu0' + grnd_topic = '/vrpn_client/raw_transform' + + rospy.init_node('controlbag') + rospy.set_param('play_bag', False) + + cam0_pub = rospy.Publisher(cam0_topic, Image, queue_size=10) + cam1_pub = rospy.Publisher(cam1_topic, Image, queue_size=10) + imu0_pub = rospy.Publisher(imu0_topic, Imu, queue_size=10) + grnd_pub = rospy.Publisher(grnd_topic, TransformStamped, queue_size=10) + + signal.signal(signal.SIGINT, signal_handler) + + bag = rosbag.Bag('/home/raphael/dev/MSCKF_ws/bag/TUM/dataset-corridor1_1024_16.bag') + for topic, msg, t in bag.read_messages(topics=[cam0_topic, cam1_topic, imu0_topic, grnd_topic]): + + # pause if parameter set to false + flag = False + while not rospy.get_param('/play_bag'): + time.sleep(0.01) + if not flag: + print("stopped playback") + flag = not flag + + if flag: + print("resume playback") + + if topic == cam0_topic: + cam0_pub.publish(msg) + + elif topic == cam1_topic: + cam1_pub.publish(msg) + + elif topic == imu0_topic: + imu0_pub.publish(msg) + + elif topic ==grnd_topic: + grnd_pub.publish(msg) + + #print msg + bag.close() + +if __name__== "__main__": + main() \ No newline at end of file diff --git a/src/control.py b/src/control.py new file mode 100755 index 0000000..3292b5b --- /dev/null +++ b/src/control.py @@ -0,0 +1,64 @@ +#!/usr/bin/env python +import rosbag +import rospy +from sensor_msgs.msg import Imu, Image +from geometry_msgs.msg import TransformStamped +import time +import signal +import sys + + +def signal_handler(sig, frame): + print('gracefully exiting the program.') + bag.close() + sys.exit(0) + +def main(): + global bag + + cam0_topic = '/cam0/image_raw' + cam1_topic = '/cam1/image_raw' + imu0_topic = '/imu0' + grnd_topic = '/vrpn_client/raw_transform' + + rospy.init_node('controlbag') + rospy.set_param('play_bag', False) + + cam0_pub = rospy.Publisher(cam0_topic, Image, queue_size=10) + cam1_pub = rospy.Publisher(cam1_topic, Image, queue_size=10) + imu0_pub = rospy.Publisher(imu0_topic, Imu, queue_size=10) + grnd_pub = rospy.Publisher(grnd_topic, TransformStamped, queue_size=10) + + signal.signal(signal.SIGINT, signal_handler) + + bag = rosbag.Bag('/home/raphael/dev/MSCKF_ws/bag/TUM/dataset-corridor1_1024_16.bag') + for topic, msg, t in bag.read_messages(topics=[cam0_topic, cam1_topic, imu0_topic, grnd_topic]): + + # pause if parameter set to false + flag = False + while not rospy.get_param('/play_bag'): + time.sleep(0.01) + if not flag: + print("stopped playback") + flag = not flag + + if flag: + print("resume playback") + + if topic == cam0_topic: + cam0_pub.publish(msg) + + elif topic == cam1_topic: + cam1_pub.publish(msg) + + elif topic == imu0_topic: + imu0_pub.publish(msg) + + elif topic ==grnd_topic: + grnd_pub.publish(msg) + + #print msg + bag.close() + +if __name__== "__main__": + main() \ No newline at end of file diff --git a/src/image_handler.cpp b/src/image_handler.cpp new file mode 100644 index 0000000..21f7238 --- /dev/null +++ b/src/image_handler.cpp @@ -0,0 +1,293 @@ +#include +#include +#include +#include + +#include +#include + +#include +#include +#include + +namespace msckf_vio { +namespace image_handler { + + +cv::Point2f pinholeDownProject(const cv::Point2f& p_in, const cv::Vec4d& intrinsics) +{ + return cv::Point2f(p_in.x * intrinsics[0] + intrinsics[2], p_in.y * intrinsics[1] + intrinsics[3]); +} + +cv::Point2f pinholeUpProject(const cv::Point2f& p_in, const cv::Vec4d& intrinsics) +{ + return cv::Point2f((p_in.x - intrinsics[2])/intrinsics[0], (p_in.y - intrinsics[3])/intrinsics[1]); +} + +void undistortImage( + cv::InputArray src, + cv::OutputArray dst, + const std::string& distortion_model, + const cv::Vec4d& intrinsics, + const cv::Vec4d& distortion_coeffs) +{ + + const cv::Matx33d K(intrinsics[0], 0.0, intrinsics[2], + 0.0, intrinsics[1], intrinsics[3], + 0.0, 0.0, 1.0); + + if (distortion_model == "pre-equidistant") + cv::fisheye::undistortImage(src, dst, K, distortion_coeffs, K); + else if (distortion_model == "equidistant") + src.copyTo(dst); + + else if (distortion_model == "pre-radtan") + cv::undistort(src, dst, K, distortion_coeffs, K); + else if (distortion_model == "radtan") + src.copyTo(dst); + +} + +void undistortPoint( + const cv::Point2f& pt_in, + const cv::Vec4d& intrinsics, + const std::string& distortion_model, + const cv::Vec4d& distortion_coeffs, + cv::Point2f& pt_out, + const cv::Matx33d &rectification_matrix, + const cv::Vec4d &new_intrinsics) { + + + std::vector pts_in; + std::vector pts_out; + pts_in.push_back(pt_in); + if (pts_in.size() == 0) return; + + const cv::Matx33d K( + intrinsics[0], 0.0, intrinsics[2], + 0.0, intrinsics[1], intrinsics[3], + 0.0, 0.0, 1.0); + + const cv::Matx33d K_new( + new_intrinsics[0], 0.0, new_intrinsics[2], + 0.0, new_intrinsics[1], new_intrinsics[3], + 0.0, 0.0, 1.0); + + if (distortion_model == "radtan") { + cv::undistortPoints(pts_in, pts_out, K, distortion_coeffs, + rectification_matrix, K_new); + } + // equidistant + else if (distortion_model == "equidistant") { + cv::fisheye::undistortPoints(pts_in, pts_out, K, distortion_coeffs, + rectification_matrix, K_new); + } + // fov + else if (distortion_model == "fov") { + for(int i = 0; i < pts_in.size(); i++) + { + float omega = distortion_coeffs[0]; + float rd = sqrt(pts_in[i].x * pts_in[i].x + pts_in[i].y * pts_in[i].y); + float ru = tan(rd * omega)/(2 * tan(omega / 2)); + + cv::Point2f newPoint( + ((pts_in[i].x - intrinsics[2]) / intrinsics[0]) * (ru / rd), + ((pts_in[i].y - intrinsics[3]) / intrinsics[1]) * (ru / rd)); + + pts_out.push_back(newPoint); + } + } + + else if (distortion_model == "pre-equidistant" or distortion_model == "pre-radtan") + { + std::vector temp_pts_out; + for(int i = 0; i < pts_in.size(); i++) + temp_pts_out.push_back(pinholeUpProject(pts_in[i], intrinsics)); + + pts_out = temp_pts_out; + } + else { + ROS_WARN_ONCE("The model %s is unrecognized, use radtan instead...", + distortion_model.c_str()); + cv::undistortPoints(pts_in, pts_out, K, distortion_coeffs, + rectification_matrix, K_new); + } + pt_out = pts_out[0]; + return; +} + +void undistortPoints( + const std::vector& pts_in, + const cv::Vec4d& intrinsics, + const std::string& distortion_model, + const cv::Vec4d& distortion_coeffs, + std::vector& pts_out, + const cv::Matx33d &rectification_matrix, + const cv::Vec4d &new_intrinsics) { + + if (pts_in.size() == 0) return; + + const cv::Matx33d K( + intrinsics[0], 0.0, intrinsics[2], + 0.0, intrinsics[1], intrinsics[3], + 0.0, 0.0, 1.0); + + const cv::Matx33d K_new( + new_intrinsics[0], 0.0, new_intrinsics[2], + 0.0, new_intrinsics[1], new_intrinsics[3], + 0.0, 0.0, 1.0); + + if (distortion_model == "radtan") { + cv::undistortPoints(pts_in, pts_out, K, distortion_coeffs, + rectification_matrix, K_new); + } + else if (distortion_model == "equidistant") { + cv::fisheye::undistortPoints(pts_in, pts_out, K, distortion_coeffs, + rectification_matrix, K_new); + } + else if (distortion_model == "fov") { + for(int i = 0; i < pts_in.size(); i++) + { + float omega = distortion_coeffs[0]; + float rd = sqrt(pts_in[i].x * pts_in[i].x + pts_in[i].y * pts_in[i].y); + float ru = tan(rd * omega)/(2 * tan(omega / 2)); + + cv::Point2f newPoint( + ((pts_in[i].x - intrinsics[2]) / intrinsics[0]) * (ru / rd), + ((pts_in[i].y - intrinsics[3]) / intrinsics[1]) * (ru / rd)); + + pts_out.push_back(newPoint); + } + } + else if (distortion_model == "pre-equidistant" or distortion_model == "pre-radtan") + { + std::vector temp_pts_out; + for(int i = 0; i < pts_in.size(); i++) + temp_pts_out.push_back(pinholeUpProject(pts_in[i], intrinsics)); + + pts_out = temp_pts_out; + + } + else { + ROS_WARN_ONCE("The model %s is unrecognized, use radtan instead...", + distortion_model.c_str()); + cv::undistortPoints(pts_in, pts_out, K, distortion_coeffs, + rectification_matrix, K_new); + } + + return; +} + +std::vector distortPoints( + const std::vector& pts_in, + const cv::Vec4d& intrinsics, + const std::string& distortion_model, + const cv::Vec4d& distortion_coeffs) { + + const cv::Matx33d K(intrinsics[0], 0.0, intrinsics[2], + 0.0, intrinsics[1], intrinsics[3], + 0.0, 0.0, 1.0); + + std::vector pts_out; + if (distortion_model == "radtan") { + std::vector homogenous_pts; + cv::convertPointsToHomogeneous(pts_in, homogenous_pts); + cv::projectPoints(homogenous_pts, cv::Vec3d::zeros(), cv::Vec3d::zeros(), K, + distortion_coeffs, pts_out); + } else if (distortion_model == "equidistant") { + cv::fisheye::distortPoints(pts_in, pts_out, K, distortion_coeffs); + } + else if (distortion_model == "fov") { + for(int i = 0; i < pts_in.size(); i++) + { + // based on 'straight lines have to be straight' + float ru = sqrt(pts_in[i].x * pts_in[i].x + pts_in[i].y * pts_in[i].y); + float omega = distortion_coeffs[0]; + float rd = 1 / (omega)*atan(2*ru*tan(omega / 2)); + + cv::Point2f newPoint( + pts_in[i].x * (rd/ru) * intrinsics[0] + intrinsics[2], + pts_in[i].y * (rd/ru) * intrinsics[1] + intrinsics[3]); + + pts_out.push_back(newPoint); + } + } + else if (distortion_model == "pre-equidistant" or distortion_model == "pre-radtan") + { + std::vector temp_pts_out; + for(int i = 0; i < pts_in.size(); i++) + temp_pts_out.push_back(pinholeDownProject(pts_in[i], intrinsics)); + + pts_out = temp_pts_out; + } + else { + ROS_WARN_ONCE("The model %s is unrecognized, using radtan instead...", + distortion_model.c_str()); + std::vector homogenous_pts; + cv::convertPointsToHomogeneous(pts_in, homogenous_pts); + cv::projectPoints(homogenous_pts, cv::Vec3d::zeros(), cv::Vec3d::zeros(), K, + distortion_coeffs, pts_out); + } + + return pts_out; +} + +cv::Point2f distortPoint( + const cv::Point2f& pt_in, + const cv::Vec4d& intrinsics, + const std::string& distortion_model, + const cv::Vec4d& distortion_coeffs) { + + std::vector pts_in; + pts_in.push_back(pt_in); + + const cv::Matx33d K(intrinsics[0], 0.0, intrinsics[2], + 0.0, intrinsics[1], intrinsics[3], + 0.0, 0.0, 1.0); + + std::vector pts_out; + if (distortion_model == "radtan") { + std::vector homogenous_pts; + cv::convertPointsToHomogeneous(pts_in, homogenous_pts); + cv::projectPoints(homogenous_pts, cv::Vec3d::zeros(), cv::Vec3d::zeros(), K, + distortion_coeffs, pts_out); + } else if (distortion_model == "equidistant") { + cv::fisheye::distortPoints(pts_in, pts_out, K, distortion_coeffs); + } + else if (distortion_model == "fov") { + for(int i = 0; i < pts_in.size(); i++) + { + // based on 'straight lines have to be straight' + float ru = sqrt(pts_in[i].x * pts_in[i].x + pts_in[i].y * pts_in[i].y); + float omega = distortion_coeffs[0]; + float rd = 1 / (omega)*atan(2*ru*tan(omega / 2)); + + cv::Point2f newPoint( + pts_in[i].x * (rd/ru) * intrinsics[0] + intrinsics[2], + pts_in[i].y * (rd/ru) * intrinsics[1] + intrinsics[3]); + + pts_out.push_back(newPoint); + } + } + else if (distortion_model == "pre-equidistant" or distortion_model == "pre-radtan") + { + std::vector temp_pts_out; + for(int i = 0; i < pts_in.size(); i++) + pts_out.push_back(pinholeDownProject(pts_in[i], intrinsics)); + + pts_out = temp_pts_out; + } + else { + ROS_WARN_ONCE("The model %s is unrecognized, using radtan instead...", + distortion_model.c_str()); + std::vector homogenous_pts; + cv::convertPointsToHomogeneous(pts_in, homogenous_pts); + cv::projectPoints(homogenous_pts, cv::Vec3d::zeros(), cv::Vec3d::zeros(), K, + distortion_coeffs, pts_out); + } + + return pts_out[0]; +} + + } // namespace image_handler +} // namespace msckf_vio diff --git a/src/image_processor.cpp b/src/image_processor.cpp index a5039f3..39206e1 100644 --- a/src/image_processor.cpp +++ b/src/image_processor.cpp @@ -17,6 +17,7 @@ #include #include #include +#include using namespace std; using namespace cv; @@ -41,51 +42,54 @@ ImageProcessor::~ImageProcessor() { } bool ImageProcessor::loadParameters() { + + // debug parameters + nh.param("StreamPause", STREAMPAUSE, false); // Camera calibration parameters nh.param("cam0/distortion_model", - cam0_distortion_model, string("radtan")); + cam0.distortion_model, string("radtan")); nh.param("cam1/distortion_model", - cam1_distortion_model, string("radtan")); + cam1.distortion_model, string("radtan")); vector cam0_resolution_temp(2); nh.getParam("cam0/resolution", cam0_resolution_temp); - cam0_resolution[0] = cam0_resolution_temp[0]; - cam0_resolution[1] = cam0_resolution_temp[1]; + cam0.resolution[0] = cam0_resolution_temp[0]; + cam0.resolution[1] = cam0_resolution_temp[1]; vector cam1_resolution_temp(2); nh.getParam("cam1/resolution", cam1_resolution_temp); - cam1_resolution[0] = cam1_resolution_temp[0]; - cam1_resolution[1] = cam1_resolution_temp[1]; + cam1.resolution[0] = cam1_resolution_temp[0]; + cam1.resolution[1] = cam1_resolution_temp[1]; vector cam0_intrinsics_temp(4); nh.getParam("cam0/intrinsics", cam0_intrinsics_temp); - cam0_intrinsics[0] = cam0_intrinsics_temp[0]; - cam0_intrinsics[1] = cam0_intrinsics_temp[1]; - cam0_intrinsics[2] = cam0_intrinsics_temp[2]; - cam0_intrinsics[3] = cam0_intrinsics_temp[3]; + cam0.intrinsics[0] = cam0_intrinsics_temp[0]; + cam0.intrinsics[1] = cam0_intrinsics_temp[1]; + cam0.intrinsics[2] = cam0_intrinsics_temp[2]; + cam0.intrinsics[3] = cam0_intrinsics_temp[3]; vector cam1_intrinsics_temp(4); nh.getParam("cam1/intrinsics", cam1_intrinsics_temp); - cam1_intrinsics[0] = cam1_intrinsics_temp[0]; - cam1_intrinsics[1] = cam1_intrinsics_temp[1]; - cam1_intrinsics[2] = cam1_intrinsics_temp[2]; - cam1_intrinsics[3] = cam1_intrinsics_temp[3]; + cam1.intrinsics[0] = cam1_intrinsics_temp[0]; + cam1.intrinsics[1] = cam1_intrinsics_temp[1]; + cam1.intrinsics[2] = cam1_intrinsics_temp[2]; + cam1.intrinsics[3] = cam1_intrinsics_temp[3]; vector cam0_distortion_coeffs_temp(4); nh.getParam("cam0/distortion_coeffs", cam0_distortion_coeffs_temp); - cam0_distortion_coeffs[0] = cam0_distortion_coeffs_temp[0]; - cam0_distortion_coeffs[1] = cam0_distortion_coeffs_temp[1]; - cam0_distortion_coeffs[2] = cam0_distortion_coeffs_temp[2]; - cam0_distortion_coeffs[3] = cam0_distortion_coeffs_temp[3]; + cam0.distortion_coeffs[0] = cam0_distortion_coeffs_temp[0]; + cam0.distortion_coeffs[1] = cam0_distortion_coeffs_temp[1]; + cam0.distortion_coeffs[2] = cam0_distortion_coeffs_temp[2]; + cam0.distortion_coeffs[3] = cam0_distortion_coeffs_temp[3]; vector cam1_distortion_coeffs_temp(4); nh.getParam("cam1/distortion_coeffs", cam1_distortion_coeffs_temp); - cam1_distortion_coeffs[0] = cam1_distortion_coeffs_temp[0]; - cam1_distortion_coeffs[1] = cam1_distortion_coeffs_temp[1]; - cam1_distortion_coeffs[2] = cam1_distortion_coeffs_temp[2]; - cam1_distortion_coeffs[3] = cam1_distortion_coeffs_temp[3]; + cam1.distortion_coeffs[0] = cam1_distortion_coeffs_temp[0]; + cam1.distortion_coeffs[1] = cam1_distortion_coeffs_temp[1]; + cam1.distortion_coeffs[2] = cam1_distortion_coeffs_temp[2]; + cam1.distortion_coeffs[3] = cam1_distortion_coeffs_temp[3]; cv::Mat T_imu_cam0 = utils::getTransformCV(nh, "cam0/T_cam_imu"); cv::Matx33d R_imu_cam0(T_imu_cam0(cv::Rect(0,0,3,3))); @@ -123,27 +127,27 @@ bool ImageProcessor::loadParameters() { processor_config.stereo_threshold, 3); ROS_INFO("==========================================="); - ROS_INFO("cam0_resolution: %d, %d", - cam0_resolution[0], cam0_resolution[1]); + ROS_INFO("cam0.resolution: %d, %d", + cam0.resolution[0], cam0.resolution[1]); ROS_INFO("cam0_intrinscs: %f, %f, %f, %f", - cam0_intrinsics[0], cam0_intrinsics[1], - cam0_intrinsics[2], cam0_intrinsics[3]); - ROS_INFO("cam0_distortion_model: %s", - cam0_distortion_model.c_str()); + cam0.intrinsics[0], cam0.intrinsics[1], + cam0.intrinsics[2], cam0.intrinsics[3]); + ROS_INFO("cam0.distortion_model: %s", + cam0.distortion_model.c_str()); ROS_INFO("cam0_distortion_coefficients: %f, %f, %f, %f", - cam0_distortion_coeffs[0], cam0_distortion_coeffs[1], - cam0_distortion_coeffs[2], cam0_distortion_coeffs[3]); + cam0.distortion_coeffs[0], cam0.distortion_coeffs[1], + cam0.distortion_coeffs[2], cam0.distortion_coeffs[3]); - ROS_INFO("cam1_resolution: %d, %d", - cam1_resolution[0], cam1_resolution[1]); + ROS_INFO("cam1.resolution: %d, %d", + cam1.resolution[0], cam1.resolution[1]); ROS_INFO("cam1_intrinscs: %f, %f, %f, %f", - cam1_intrinsics[0], cam1_intrinsics[1], - cam1_intrinsics[2], cam1_intrinsics[3]); - ROS_INFO("cam1_distortion_model: %s", - cam1_distortion_model.c_str()); + cam1.intrinsics[0], cam1.intrinsics[1], + cam1.intrinsics[2], cam1.intrinsics[3]); + ROS_INFO("cam1.distortion_model: %s", + cam1.distortion_model.c_str()); ROS_INFO("cam1_distortion_coefficients: %f, %f, %f, %f", - cam1_distortion_coeffs[0], cam1_distortion_coeffs[1], - cam1_distortion_coeffs[2], cam1_distortion_coeffs[3]); + cam1.distortion_coeffs[0], cam1.distortion_coeffs[1], + cam1.distortion_coeffs[2], cam1.distortion_coeffs[3]); cout << R_imu_cam0 << endl; cout << t_imu_cam0.t() << endl; @@ -210,7 +214,9 @@ void ImageProcessor::stereoCallback( const sensor_msgs::ImageConstPtr& cam0_img, const sensor_msgs::ImageConstPtr& cam1_img) { - //cout << "==================================" << endl; + // stop playing bagfile if printing images + //if(STREAMPAUSE) + // nh.setParam("/play_bag_image", false); // Get the current image. cam0_curr_img_ptr = cv_bridge::toCvShare(cam0_img, @@ -218,12 +224,27 @@ void ImageProcessor::stereoCallback( cam1_curr_img_ptr = cv_bridge::toCvShare(cam1_img, sensor_msgs::image_encodings::MONO8); + ros::Time start_time = ros::Time::now(); + + + cv::Mat new_cam0; + cv::Mat new_cam1; + + image_handler::undistortImage(cam0_curr_img_ptr->image, new_cam0, cam0.distortion_model, cam0.intrinsics, cam0.distortion_coeffs); + image_handler::undistortImage(cam1_curr_img_ptr->image, new_cam1, cam1.distortion_model, cam1.intrinsics, cam1.distortion_coeffs); + + new_cam0.copyTo(cam0_curr_img_ptr->image); + new_cam1.copyTo(cam1_curr_img_ptr->image); + + //ROS_INFO("Publishing: %f", + // (ros::Time::now()-start_time).toSec()); + // Build the image pyramids once since they're used at multiple places createImagePyramids(); // Detect features in the first frame. if (is_first_img) { - ros::Time start_time = ros::Time::now(); + start_time = ros::Time::now(); initializeFirstFrame(); //ROS_INFO("Detection time: %f", // (ros::Time::now()-start_time).toSec()); @@ -236,7 +257,7 @@ void ImageProcessor::stereoCallback( // (ros::Time::now()-start_time).toSec()); } else { // Track the feature in the previous image. - ros::Time start_time = ros::Time::now(); + start_time = ros::Time::now(); trackFeatures(); //ROS_INFO("Tracking time: %f", // (ros::Time::now()-start_time).toSec()); @@ -244,6 +265,7 @@ void ImageProcessor::stereoCallback( // Add new features into the current image. start_time = ros::Time::now(); addNewFeatures(); + //ROS_INFO("Addition time: %f", // (ros::Time::now()-start_time).toSec()); @@ -266,16 +288,18 @@ void ImageProcessor::stereoCallback( // (ros::Time::now()-start_time).toSec()); // Publish features in the current image. - ros::Time start_time = ros::Time::now(); + start_time = ros::Time::now(); publish(); //ROS_INFO("Publishing: %f", // (ros::Time::now()-start_time).toSec()); // Update the previous image and previous features. + cam0_prev_img_ptr = cam0_curr_img_ptr; prev_features_ptr = curr_features_ptr; std::swap(prev_cam0_pyramid_, curr_cam0_pyramid_); + // Initialize the current features to empty vectors. curr_features_ptr.reset(new GridFeatures()); for (int code = 0; code < @@ -283,6 +307,10 @@ void ImageProcessor::stereoCallback( (*curr_features_ptr)[code] = vector(0); } + // stop playing bagfile if printing images + //if(STREAMPAUSE) + // nh.setParam("/play_bag_image", true); + return; } @@ -389,7 +417,6 @@ void ImageProcessor::predictFeatureTracking( const cv::Matx33f& R_p_c, const cv::Vec4d& intrinsics, vector& compensated_pts) { - // Return directly if there are no input features. if (input_pts.size() == 0) { compensated_pts.clear(); @@ -420,7 +447,6 @@ void ImageProcessor::trackFeatures() { cam0_curr_img_ptr->image.rows / processor_config.grid_row; static int grid_width = cam0_curr_img_ptr->image.cols / processor_config.grid_col; - // Compute a rough relative rotation which takes a vector // from the previous frame to the current frame. Matx33f cam0_R_p_c; @@ -454,7 +480,7 @@ void ImageProcessor::trackFeatures() { vector track_inliers(0); predictFeatureTracking(prev_cam0_points, - cam0_R_p_c, cam0_intrinsics, curr_cam0_points); + cam0_R_p_c, cam0.intrinsics, curr_cam0_points); calcOpticalFlowPyrLK( prev_cam0_pyramid_, curr_cam0_pyramid_, @@ -549,14 +575,14 @@ void ImageProcessor::trackFeatures() { // Step 2 and 3: RANSAC on temporal image pairs of cam0 and cam1. vector cam0_ransac_inliers(0); twoPointRansac(prev_matched_cam0_points, curr_matched_cam0_points, - cam0_R_p_c, cam0_intrinsics, cam0_distortion_model, - cam0_distortion_coeffs, processor_config.ransac_threshold, + cam0_R_p_c, cam0.intrinsics, cam0.distortion_model, + cam0.distortion_coeffs, processor_config.ransac_threshold, 0.99, cam0_ransac_inliers); vector cam1_ransac_inliers(0); twoPointRansac(prev_matched_cam1_points, curr_matched_cam1_points, - cam1_R_p_c, cam1_intrinsics, cam1_distortion_model, - cam1_distortion_coeffs, processor_config.ransac_threshold, + cam1_R_p_c, cam1.intrinsics, cam1.distortion_model, + cam1.distortion_coeffs, processor_config.ransac_threshold, 0.99, cam1_ransac_inliers); // Number of features after ransac. @@ -581,6 +607,7 @@ void ImageProcessor::trackFeatures() { ++after_ransac; } + // Compute the tracking rate. int prev_feature_num = 0; for (const auto& item : *prev_features_ptr) @@ -610,7 +637,6 @@ void ImageProcessor::stereoMatch( const vector& cam0_points, vector& cam1_points, vector& inlier_markers) { - if (cam0_points.size() == 0) return; if(cam1_points.size() == 0) { @@ -618,11 +644,17 @@ void ImageProcessor::stereoMatch( // rotation from stereo extrinsics const cv::Matx33d R_cam0_cam1 = R_cam1_imu.t() * R_cam0_imu; vector cam0_points_undistorted; - undistortPoints(cam0_points, cam0_intrinsics, cam0_distortion_model, - cam0_distortion_coeffs, cam0_points_undistorted, + image_handler::undistortPoints(cam0_points, cam0.intrinsics, cam0.distortion_model, + cam0.distortion_coeffs, cam0_points_undistorted, R_cam0_cam1); +<<<<<<< HEAD cam1_points = distortPoints(cam0_points_undistorted, cam1_intrinsics, cam1_distortion_model, cam1_distortion_coeffs); +======= + + cam1_points = image_handler::distortPoints(cam0_points_undistorted, cam1.intrinsics, + cam1.distortion_model, cam1.distortion_coeffs); +>>>>>>> photometry-jakobi } // Track features using LK optical flow method. @@ -660,18 +692,21 @@ void ImageProcessor::stereoMatch( // Further remove outliers based on the known // essential matrix. + + vector cam0_points_undistorted(0); vector cam1_points_undistorted(0); - undistortPoints( - cam0_points, cam0_intrinsics, cam0_distortion_model, - cam0_distortion_coeffs, cam0_points_undistorted); - undistortPoints( - cam1_points, cam1_intrinsics, cam1_distortion_model, - cam1_distortion_coeffs, cam1_points_undistorted); + image_handler::undistortPoints( + cam0_points, cam0.intrinsics, cam0.distortion_model, + cam0.distortion_coeffs, cam0_points_undistorted); + image_handler::undistortPoints( + cam1_points, cam1.intrinsics, cam1.distortion_model, + cam1.distortion_coeffs, cam1_points_undistorted); + double norm_pixel_unit = 4.0 / ( - cam0_intrinsics[0]+cam0_intrinsics[1]+ - cam1_intrinsics[0]+cam1_intrinsics[1]); + cam0.intrinsics[0]+cam0.intrinsics[1]+ + cam1.intrinsics[0]+cam1.intrinsics[1]); for (int i = 0; i < cam0_points_undistorted.size(); ++i) { if (inlier_markers[i] == 0) continue; @@ -698,8 +733,8 @@ void ImageProcessor::addNewFeatures() { cam0_curr_img_ptr->image.rows / processor_config.grid_row; static int grid_width = cam0_curr_img_ptr->image.cols / processor_config.grid_col; - // Create a mask to avoid redetecting existing features. + Mat mask(curr_img.rows, curr_img.cols, CV_8U, Scalar(1)); for (const auto& features : *curr_features_ptr) { @@ -719,7 +754,6 @@ void ImageProcessor::addNewFeatures() { mask(row_range, col_range) = 0; } } - // Detect new features. vector new_features(0); detector_ptr->detect(curr_img, new_features, mask); @@ -734,7 +768,6 @@ void ImageProcessor::addNewFeatures() { new_feature_sieve[ row*processor_config.grid_col+col].push_back(feature); } - new_features.clear(); for (auto& item : new_feature_sieve) { if (item.size() > processor_config.grid_max_feature_num) { @@ -747,7 +780,6 @@ void ImageProcessor::addNewFeatures() { } int detected_new_features = new_features.size(); - // Find the stereo matched points for the newly // detected features. vector cam0_points(new_features.size()); @@ -775,7 +807,6 @@ void ImageProcessor::addNewFeatures() { static_cast(detected_new_features) < 0.1) ROS_WARN("Images at [%f] seems unsynced...", cam0_curr_img_ptr->header.stamp.toSec()); - // Group the features into grids GridFeatures grid_new_features; for (int code = 0; code < @@ -797,7 +828,6 @@ void ImageProcessor::addNewFeatures() { new_feature.cam1_point = cam1_point; grid_new_features[code].push_back(new_feature); } - // Sort the new features in each grid based on its response. for (auto& item : grid_new_features) std::sort(item.second.begin(), item.second.end(), @@ -847,73 +877,6 @@ void ImageProcessor::pruneGridFeatures() { return; } -void ImageProcessor::undistortPoints( - const vector& pts_in, - const cv::Vec4d& intrinsics, - const string& distortion_model, - const cv::Vec4d& distortion_coeffs, - vector& pts_out, - const cv::Matx33d &rectification_matrix, - const cv::Vec4d &new_intrinsics) { - - if (pts_in.size() == 0) return; - - const cv::Matx33d K( - intrinsics[0], 0.0, intrinsics[2], - 0.0, intrinsics[1], intrinsics[3], - 0.0, 0.0, 1.0); - - const cv::Matx33d K_new( - new_intrinsics[0], 0.0, new_intrinsics[2], - 0.0, new_intrinsics[1], new_intrinsics[3], - 0.0, 0.0, 1.0); - - if (distortion_model == "radtan") { - cv::undistortPoints(pts_in, pts_out, K, distortion_coeffs, - rectification_matrix, K_new); - } else if (distortion_model == "equidistant") { - cv::fisheye::undistortPoints(pts_in, pts_out, K, distortion_coeffs, - rectification_matrix, K_new); - } else { - ROS_WARN_ONCE("The model %s is unrecognized, use radtan instead...", - distortion_model.c_str()); - cv::undistortPoints(pts_in, pts_out, K, distortion_coeffs, - rectification_matrix, K_new); - } - - return; -} - -vector ImageProcessor::distortPoints( - const vector& pts_in, - const cv::Vec4d& intrinsics, - const string& distortion_model, - const cv::Vec4d& distortion_coeffs) { - - const cv::Matx33d K(intrinsics[0], 0.0, intrinsics[2], - 0.0, intrinsics[1], intrinsics[3], - 0.0, 0.0, 1.0); - - vector pts_out; - if (distortion_model == "radtan") { - vector homogenous_pts; - cv::convertPointsToHomogeneous(pts_in, homogenous_pts); - cv::projectPoints(homogenous_pts, cv::Vec3d::zeros(), cv::Vec3d::zeros(), K, - distortion_coeffs, pts_out); - } else if (distortion_model == "equidistant") { - cv::fisheye::distortPoints(pts_in, pts_out, K, distortion_coeffs); - } else { - ROS_WARN_ONCE("The model %s is unrecognized, using radtan instead...", - distortion_model.c_str()); - vector homogenous_pts; - cv::convertPointsToHomogeneous(pts_in, homogenous_pts); - cv::projectPoints(homogenous_pts, cv::Vec3d::zeros(), cv::Vec3d::zeros(), K, - distortion_coeffs, pts_out); - } - - return pts_out; -} - void ImageProcessor::integrateImuData( Matx33f& cam0_R_p_c, Matx33f& cam1_R_p_c) { // Find the start and the end limit within the imu msg buffer. @@ -965,7 +928,6 @@ void ImageProcessor::integrateImuData( void ImageProcessor::rescalePoints( vector& pts1, vector& pts2, float& scaling_factor) { - scaling_factor = 0.0f; for (int i = 0; i < pts1.size(); ++i) { @@ -1009,10 +971,10 @@ void ImageProcessor::twoPointRansac( // Undistort all the points. vector pts1_undistorted(pts1.size()); vector pts2_undistorted(pts2.size()); - undistortPoints( + image_handler::undistortPoints( pts1, intrinsics, distortion_model, distortion_coeffs, pts1_undistorted); - undistortPoints( + image_handler::undistortPoints( pts2, intrinsics, distortion_model, distortion_coeffs, pts2_undistorted); @@ -1230,7 +1192,6 @@ void ImageProcessor::twoPointRansac( } void ImageProcessor::publish() { - // Publish features. CameraMeasurementPtr feature_msg_ptr(new CameraMeasurement); feature_msg_ptr->header.stamp = cam0_curr_img_ptr->header.stamp; @@ -1250,12 +1211,12 @@ void ImageProcessor::publish() { vector curr_cam0_points_undistorted(0); vector curr_cam1_points_undistorted(0); - undistortPoints( - curr_cam0_points, cam0_intrinsics, cam0_distortion_model, - cam0_distortion_coeffs, curr_cam0_points_undistorted); - undistortPoints( - curr_cam1_points, cam1_intrinsics, cam1_distortion_model, - cam1_distortion_coeffs, curr_cam1_points_undistorted); + image_handler::undistortPoints( + curr_cam0_points, cam0.intrinsics, cam0.distortion_model, + cam0.distortion_coeffs, curr_cam0_points_undistorted); + image_handler::undistortPoints( + curr_cam1_points, cam1.intrinsics, cam1.distortion_model, + cam1.distortion_coeffs, curr_cam1_points_undistorted); for (int i = 0; i < curr_ids.size(); ++i) { feature_msg_ptr->features.push_back(FeatureMeasurement()); diff --git a/src/msckf_vio.cpp b/src/msckf_vio.cpp index d9c1b8d..437a98c 100644 --- a/src/msckf_vio.cpp +++ b/src/msckf_vio.cpp @@ -27,6 +27,11 @@ #include #include +#include +#include +#include + + using namespace std; using namespace Eigen; @@ -53,11 +58,20 @@ map MsckfVio::chi_squared_test_table; MsckfVio::MsckfVio(ros::NodeHandle& pnh): is_gravity_set(false), is_first_img(true), + image_sub(10), + nan_flag(false), + last_time_bound(0), nh(pnh) { return; } bool MsckfVio::loadParameters() { + //Photometry Flag + nh.param("FILTER", FILTER, 0); + nh.param("PrintImages", PRINTIMAGES, false); + nh.param("StreamPause", STREAMPAUSE, false); + nh.param("GroundTruth", GROUNDTRUTH, false); + // Frame id nh.param("fixed_frame_id", fixed_frame_id, "world"); nh.param("child_frame_id", child_frame_id, "robot"); @@ -116,6 +130,66 @@ bool MsckfVio::loadParameters() { nh.param("initial_covariance/extrinsic_translation_cov", extrinsic_translation_cov, 1e-4); + // Get the initial irradiance covariance + nh.param("initial_covariance/irradiance_frame_bias", + irradiance_frame_bias, 0.1); + + // Get the photometric patch size + nh.param("patch_size_n", + N, 3); + // Get rescaling factor for image + nh.param("image_scale", SCALE, 1); + + // get camera information (used for back projection) + nh.param("cam0/distortion_model", + cam0.distortion_model, string("radtan")); + nh.param("cam1/distortion_model", + cam1.distortion_model, string("radtan")); + + vector cam0_resolution_temp(2); + nh.getParam("cam0/resolution", cam0_resolution_temp); + cam0.resolution[0] = cam0_resolution_temp[0]; + cam0.resolution[1] = cam0_resolution_temp[1]; + + vector cam1_resolution_temp(2); + nh.getParam("cam1/resolution", cam1_resolution_temp); + cam1.resolution[0] = cam1_resolution_temp[0]; + cam1.resolution[1] = cam1_resolution_temp[1]; + + vector cam0_intrinsics_temp(4); + nh.getParam("cam0/intrinsics", cam0_intrinsics_temp); + cam0.intrinsics[0] = cam0_intrinsics_temp[0]; + cam0.intrinsics[1] = cam0_intrinsics_temp[1]; + cam0.intrinsics[2] = cam0_intrinsics_temp[2]; + cam0.intrinsics[3] = cam0_intrinsics_temp[3]; + + vector cam1_intrinsics_temp(4); + nh.getParam("cam1/intrinsics", cam1_intrinsics_temp); + cam1.intrinsics[0] = cam1_intrinsics_temp[0]; + cam1.intrinsics[1] = cam1_intrinsics_temp[1]; + cam1.intrinsics[2] = cam1_intrinsics_temp[2]; + cam1.intrinsics[3] = cam1_intrinsics_temp[3]; + + vector cam0_distortion_coeffs_temp(4); + nh.getParam("cam0/distortion_coeffs", + cam0_distortion_coeffs_temp); + cam0.distortion_coeffs[0] = cam0_distortion_coeffs_temp[0]; + cam0.distortion_coeffs[1] = cam0_distortion_coeffs_temp[1]; + cam0.distortion_coeffs[2] = cam0_distortion_coeffs_temp[2]; + cam0.distortion_coeffs[3] = cam0_distortion_coeffs_temp[3]; + + vector cam1_distortion_coeffs_temp(4); + nh.getParam("cam1/distortion_coeffs", + cam1_distortion_coeffs_temp); + cam1.distortion_coeffs[0] = cam1_distortion_coeffs_temp[0]; + cam1.distortion_coeffs[1] = cam1_distortion_coeffs_temp[1]; + cam1.distortion_coeffs[2] = cam1_distortion_coeffs_temp[2]; + cam1.distortion_coeffs[3] = cam1_distortion_coeffs_temp[3]; + + cam0.id = 0; + cam1.id = 1; + + state_server.state_cov = MatrixXd::Zero(21, 21); for (int i = 3; i < 6; ++i) state_server.state_cov(i, i) = gyro_bias_cov; @@ -134,14 +208,17 @@ bool MsckfVio::loadParameters() { state_server.imu_state.R_imu_cam0 = T_cam0_imu.linear().transpose(); state_server.imu_state.t_cam0_imu = T_cam0_imu.translation(); + true_state_server.imu_state.R_imu_cam0 = T_cam0_imu.linear().transpose(); + true_state_server.imu_state.t_cam0_imu = T_cam0_imu.translation(); + CAMState::T_cam0_cam1 = utils::getTransformEigen(nh, "cam1/T_cn_cnm1"); IMUState::T_imu_body = utils::getTransformEigen(nh, "T_imu_body").inverse(); // Maximum number of camera states to be stored - nh.param("max_cam_state_size", max_cam_state_size, 30); - + nh.param("max_cam_state_size", max_cam_state_size, 20); + //cam_state_size = max_cam_state_size; ROS_INFO("==========================================="); ROS_INFO("fixed frame id: %s", fixed_frame_id.c_str()); ROS_INFO("child frame id: %s", child_frame_id.c_str()); @@ -171,23 +248,43 @@ bool MsckfVio::loadParameters() { cout << T_imu_cam0.linear() << endl; cout << T_imu_cam0.translation().transpose() << endl; + cout << "OpenCV version : " << CV_VERSION << endl; + cout << "Major version : " << CV_MAJOR_VERSION << endl; + cout << "Minor version : " << CV_MINOR_VERSION << endl; + cout << "Subminor version : " << CV_SUBMINOR_VERSION << endl; + + ROS_INFO("max camera state #: %d", max_cam_state_size); ROS_INFO("==========================================="); return true; } bool MsckfVio::createRosIO() { + + // activating bag playing parameter, for debugging + nh.setParam("/play_bag", true); + + eval_time = 0; odom_pub = nh.advertise("odom", 10); + truth_odom_pub = nh.advertise("true_odom", 10); feature_pub = nh.advertise( "feature_point_cloud", 10); + marker_pub = nh.advertise("/visualization_marker_array", 10); reset_srv = nh.advertiseService("reset", &MsckfVio::resetCallback, this); imu_sub = nh.subscribe("imu", 100, &MsckfVio::imuCallback, this); - feature_sub = nh.subscribe("features", 40, - &MsckfVio::featureCallback, this); + truth_sub = nh.subscribe("ground_truth", 100, + &MsckfVio::truthCallback, this); + + cam0_img_sub.subscribe(nh, "cam0_image", 100); + cam1_img_sub.subscribe(nh, "cam1_image", 100); + feature_sub.subscribe(nh, "features", 100); + + image_sub.connectInput(cam0_img_sub, cam1_img_sub, feature_sub); + image_sub.registerCallback(&MsckfVio::imageCallback, this); mocap_odom_sub = nh.subscribe("mocap_odom", 10, &MsckfVio::mocapOdomCallback, this); @@ -214,7 +311,7 @@ bool MsckfVio::initialize() { // Initialize the chi squared test table with confidence // level 0.95. - for (int i = 1; i < 100; ++i) { + for (int i = 1; i < 1000; ++i) { boost::math::chi_squared chi_squared_dist(i); chi_squared_test_table[i] = boost::math::quantile(chi_squared_dist, 0.05); @@ -223,11 +320,23 @@ bool MsckfVio::initialize() { if (!createRosIO()) return false; ROS_INFO("Finish creating ROS IO..."); + /* + rosbag::Bag bag; + bag.open("/home/raphael/dev/MSCKF_ws/bag/TUM/dataset-corridor1_1024_16.bag", rosbag::bagmode::Read); + + for(rosbag::MessageInstance const m: rosbag::View(bag)) + { + std_msgs::Int32::ConstPtr i = m.instantiate(); + if (i != NULL) + std::cout << i->data << std::endl; + } + + bag.close(); + */ return true; } -void MsckfVio::imuCallback( - const sensor_msgs::ImuConstPtr& msg) { +void MsckfVio::imuCallback(const sensor_msgs::ImuConstPtr& msg){ // IMU msgs are pushed backed into a buffer instead of // being processed immediately. The IMU msgs are processed @@ -245,6 +354,254 @@ void MsckfVio::imuCallback( return; } +void MsckfVio::truthCallback(const geometry_msgs::TransformStampedPtr& msg){ + static bool first_truth_odom_msg = true; + + // If this is the first mocap odometry messsage, set + // the initial frame. + if (first_truth_odom_msg) { + Quaterniond orientation; + Vector3d translation; + tf::vectorMsgToEigen( + msg->transform.translation, translation); + tf::quaternionMsgToEigen( + msg->transform.rotation, orientation); + + mocap_initial_orientation = QtoV(orientation); + mocap_initial_position = translation; + + first_truth_odom_msg = false; + } + + // Transform the ground truth. + Quaterniond orientation; + Vector3d translation; + //tf::vectorMsgToEigen( + // msg->transform.translation, translation); + //tf::quaternionMsgToEigen( + // msg->transform.rotation, orientation); + tf::vectorMsgToEigen( + msg->transform.translation, translation); + tf::quaternionMsgToEigen( + msg->transform.rotation, orientation); + + Eigen::Vector4d q = quaternionMultiplication(quaternionConjugate(mocap_initial_orientation), QtoV(orientation)); + + translation -= mocap_initial_position; + + msg->transform.rotation.x = q(0); + msg->transform.rotation.y = q(1); + msg->transform.rotation.z = q(2); + msg->transform.rotation.w = q(3); + + msg->transform.translation.x = translation(0); + msg->transform.translation.y = translation(1); + msg->transform.translation.z = translation(2); + + truth_msg_buffer.push_back(*msg); +} + +void MsckfVio::imageCallback( + const sensor_msgs::ImageConstPtr& cam0_img, + const sensor_msgs::ImageConstPtr& cam1_img, + const CameraMeasurementConstPtr& feature_msg) +{ + // stop playing bagfile if printing images + if(STREAMPAUSE) + nh.setParam("/play_bag", false); + + // Return if the gravity vector has not been set. + if (!is_gravity_set) + { + if(STREAMPAUSE) + nh.setParam("/play_bag", true); + return; + } + + old_imu_state = state_server.imu_state; + old_true_state = true_state_server.imu_state; + + + // Start the system if the first image is received. + // The frame where the first image is received will be + // the origin. + if (is_first_img) { + is_first_img = false; + + time_offset = feature_msg->header.stamp.toSec(); + state_server.imu_state.time = feature_msg->header.stamp.toSec(); + } + static double max_processing_time = 0.0; + static int critical_time_cntr = 0; + double processing_start_time = ros::Time::now().toSec(); + + // Propogate the IMU state. + // that are received before the image feature_msg. + ros::Time start_time = ros::Time::now(); + + nh.param("FILTER", FILTER, 0); + + batchImuProcessing(feature_msg->header.stamp.toSec()); + + double imu_processing_time = ( + ros::Time::now()-start_time).toSec(); + + // Augment the state vector. + start_time = ros::Time::now(); + //truePhotometricStateAugmentation(feature_msg->header.stamp.toSec()); + PhotometricStateAugmentation(feature_msg->header.stamp.toSec()); + double state_augmentation_time = ( + ros::Time::now()-start_time).toSec(); + + + // cout << "2" << endl; + // Add new observations for existing features or new + // features in the map server. + start_time = ros::Time::now(); + addFeatureObservations(feature_msg); + double add_observations_time = ( + ros::Time::now()-start_time).toSec(); + + + // cout << "3" << endl; + // Add new images to moving window + start_time = ros::Time::now(); + manageMovingWindow(cam0_img, cam1_img, feature_msg); + double manage_moving_window_time = ( + ros::Time::now()-start_time).toSec(); + + + // cout << "4" << endl; + + // Perform measurement update if necessary. + start_time = ros::Time::now(); + removeLostFeatures(); + double remove_lost_features_time = ( + ros::Time::now()-start_time).toSec(); + + // cout << "5" << endl; + start_time = ros::Time::now(); + pruneCamStateBuffer(); + double prune_cam_states_time = ( + ros::Time::now()-start_time).toSec(); + + // save true state in seperate state vector and calculate relative error in change + batchTruthProcessing(feature_msg->header.stamp.toSec()); + + + // cout << "6" << endl; + // Publish the odometry. + start_time = ros::Time::now(); + publish(feature_msg->header.stamp); + double publish_time = ( + ros::Time::now()-start_time).toSec(); + + // Reset the system if necessary. + onlineReset(); + + double processing_end_time = ros::Time::now().toSec(); + double processing_time = + processing_end_time - processing_start_time; + if (processing_time > 1.0/frame_rate) { + ++critical_time_cntr; + //ROS_INFO("\033[1;31mTotal processing time %f/%d...\033[0m", + // processing_time, critical_time_cntr); + //printf("IMU processing time: %f/%f\n", + // imu_processing_time, imu_processing_time/processing_time); + //printf("State augmentation time: %f/%f\n", + // state_augmentation_time, state_augmentation_time/processing_time); + //printf("Add observations time: %f/%f\n", + // add_observations_time, add_observations_time/processing_time); + //printf("Remove lost features time: %f/%f\n", + // remove_lost_features_time, remove_lost_features_time/processing_time); + //printf("Remove camera states time: %f/%f\n", + // prune_cam_states_time, prune_cam_states_time/processing_time); + //printf("Publish time: %f/%f\n", + // publish_time, publish_time/processing_time); + } + + if(STREAMPAUSE) + nh.setParam("/play_bag", true); + return; +} + +void MsckfVio::manageMovingWindow( + const sensor_msgs::ImageConstPtr& cam0_img, + const sensor_msgs::ImageConstPtr& cam1_img, + const CameraMeasurementConstPtr& feature_msg) { + + //save exposure Time into moving window + cam0.moving_window[state_server.imu_state.id].exposureTime_ms = strtod(cam0_img->header.frame_id.data(), NULL) / 1000000; + cam1.moving_window[state_server.imu_state.id].exposureTime_ms = strtod(cam1_img->header.frame_id.data(), NULL) / 1000000; + if(cam0.moving_window[state_server.imu_state.id].exposureTime_ms < 1) + cam0.moving_window[state_server.imu_state.id].exposureTime_ms = 1; + if(cam1.moving_window[state_server.imu_state.id].exposureTime_ms < 1) + cam1.moving_window[state_server.imu_state.id].exposureTime_ms = 1; + if(cam0.moving_window[state_server.imu_state.id].exposureTime_ms > 100) + cam0.moving_window[state_server.imu_state.id].exposureTime_ms = 100; + if(cam1.moving_window[state_server.imu_state.id].exposureTime_ms > 100) + cam1.moving_window[state_server.imu_state.id].exposureTime_ms = 100; + + // Get the current image. + cv_bridge::CvImageConstPtr cam0_img_ptr = cv_bridge::toCvShare(cam0_img, + sensor_msgs::image_encodings::MONO8); + cv_bridge::CvImageConstPtr cam1_img_ptr = cv_bridge::toCvShare(cam1_img, + sensor_msgs::image_encodings::MONO8); + + cv::Mat new_cam0; + cv::Mat new_cam1; + + image_handler::undistortImage(cam0_img_ptr->image, new_cam0, cam0.distortion_model, cam0.intrinsics, cam0.distortion_coeffs); + image_handler::undistortImage(cam1_img_ptr->image, new_cam1, cam1.distortion_model, cam1.intrinsics, cam1.distortion_coeffs); + + // save image information into moving window + cam0.moving_window[state_server.imu_state.id].image = new_cam0.clone(); + cam1.moving_window[state_server.imu_state.id].image = new_cam1.clone(); + + cv::Mat xder; + cv::Mat yder; + cv::Mat deeper_frame; + + // generate derivative matrix for cam0 + cam0_img_ptr->image.convertTo(deeper_frame,CV_16S); + cv::Sobel(deeper_frame, xder, -1, 1, 0, 5); + cv::Sobel(deeper_frame, yder, -1, 0, 1, 5); + xder/=96.; + yder/=96.; + + // save into moving window + cam0.moving_window[state_server.imu_state.id].dximage = xder.clone(); + cam0.moving_window[state_server.imu_state.id].dyimage = yder.clone(); + + // generate derivative matrix for cam 1 + cam1_img_ptr->image.convertTo(deeper_frame,CV_16S); + cv::Sobel(deeper_frame, xder, -1, 1, 0, 5); + cv::Sobel(deeper_frame, yder, -1, 0, 1, 5); + xder/=96.; + yder/=96.; + + +/* + cv::Mat norm_abs_xderImage; + cv::convertScaleAbs(xder, norm_abs_xderImage); + cv::normalize(norm_abs_xderImage, norm_abs_xderImage, 0, 255, cv::NORM_MINMAX, CV_8UC1); + + cv::imshow("xder", norm_abs_xderImage); + cvWaitKey(0); +*/ + + // save into moving window + cam1.moving_window[state_server.imu_state.id].dximage = xder.clone(); + cam1.moving_window[state_server.imu_state.id].dyimage = yder.clone(); + + //TODO handle any massive overflow correctly (should be pruned, before this ever triggers) + while(cam0.moving_window.size() > 100) + { + cam1.moving_window.erase(cam1.moving_window.begin()); + cam0.moving_window.erase(cam0.moving_window.begin()); + } +} + void MsckfVio::initializeGravityAndBias() { // Initialize gravity and gyro bias. @@ -274,23 +631,26 @@ void MsckfVio::initializeGravityAndBias() { // is consistent with the inertial frame. double gravity_norm = gravity_imu.norm(); IMUState::gravity = Vector3d(0.0, 0.0, -gravity_norm); - + Quaterniond q0_i_w = Quaterniond::FromTwoVectors( gravity_imu, -IMUState::gravity); state_server.imu_state.orientation = rotationToQuaternion(q0_i_w.toRotationMatrix().transpose()); - + // printf("gravity Norm %f\n", gravity_norm); + // printf("linear_acc %f, %f, %f\n", gravity_imu(0), gravity_imu(1), gravity_imu(2)); + // printf("quaterniond: %f, %f, %f, %f\n", q0_i_w.w(), q0_i_w.x(), q0_i_w.y(), q0_i_w.z()); return; } bool MsckfVio::resetCallback( std_srvs::Trigger::Request& req, - std_srvs::Trigger::Response& res) { + std_srvs::Trigger::Response& res) +{ + cout << "reset" << endl; ROS_WARN("Start resetting msckf vio..."); // Temporarily shutdown the subscribers to prevent the // state from updating. - feature_sub.shutdown(); imu_sub.shutdown(); // Reset the IMU state. @@ -323,6 +683,10 @@ bool MsckfVio::resetCallback( nh.param("initial_covariance/extrinsic_translation_cov", extrinsic_translation_cov, 1e-4); + // Reset the irradiance covariance + nh.param("initial_covariance/irradiance_frame_bias", + irradiance_frame_bias, 0.1); + state_server.state_cov = MatrixXd::Zero(21, 21); for (int i = 3; i < 6; ++i) state_server.state_cov(i, i) = gyro_bias_cov; @@ -346,10 +710,14 @@ bool MsckfVio::resetCallback( is_first_img = true; // Restart the subscribers. - imu_sub = nh.subscribe("imu", 100, + imu_sub = nh.subscribe("imu", 1000, &MsckfVio::imuCallback, this); - feature_sub = nh.subscribe("features", 40, - &MsckfVio::featureCallback, this); + + truth_sub = nh.subscribe("ground_truth", 100, + &MsckfVio::truthCallback, this); + + // feature_sub = nh.subscribe("features", 40, + // &MsckfVio::featureCallback, this); // TODO: When can the reset fail? res.success = true; @@ -357,90 +725,7 @@ bool MsckfVio::resetCallback( return true; } -void MsckfVio::featureCallback( - const CameraMeasurementConstPtr& msg) { - - // Return if the gravity vector has not been set. - if (!is_gravity_set) return; - - // Start the system if the first image is received. - // The frame where the first image is received will be - // the origin. - if (is_first_img) { - is_first_img = false; - state_server.imu_state.time = msg->header.stamp.toSec(); - } - - static double max_processing_time = 0.0; - static int critical_time_cntr = 0; - double processing_start_time = ros::Time::now().toSec(); - - // Propogate the IMU state. - // that are received before the image msg. - ros::Time start_time = ros::Time::now(); - batchImuProcessing(msg->header.stamp.toSec()); - double imu_processing_time = ( - ros::Time::now()-start_time).toSec(); - - // Augment the state vector. - start_time = ros::Time::now(); - stateAugmentation(msg->header.stamp.toSec()); - double state_augmentation_time = ( - ros::Time::now()-start_time).toSec(); - - // Add new observations for existing features or new - // features in the map server. - start_time = ros::Time::now(); - addFeatureObservations(msg); - double add_observations_time = ( - ros::Time::now()-start_time).toSec(); - - // Perform measurement update if necessary. - start_time = ros::Time::now(); - removeLostFeatures(); - double remove_lost_features_time = ( - ros::Time::now()-start_time).toSec(); - - start_time = ros::Time::now(); - pruneCamStateBuffer(); - double prune_cam_states_time = ( - ros::Time::now()-start_time).toSec(); - - // Publish the odometry. - start_time = ros::Time::now(); - publish(msg->header.stamp); - double publish_time = ( - ros::Time::now()-start_time).toSec(); - - // Reset the system if necessary. - onlineReset(); - - double processing_end_time = ros::Time::now().toSec(); - double processing_time = - processing_end_time - processing_start_time; - if (processing_time > 1.0/frame_rate) { - ++critical_time_cntr; - ROS_INFO("\033[1;31mTotal processing time %f/%d...\033[0m", - processing_time, critical_time_cntr); - //printf("IMU processing time: %f/%f\n", - // imu_processing_time, imu_processing_time/processing_time); - //printf("State augmentation time: %f/%f\n", - // state_augmentation_time, state_augmentation_time/processing_time); - //printf("Add observations time: %f/%f\n", - // add_observations_time, add_observations_time/processing_time); - printf("Remove lost features time: %f/%f\n", - remove_lost_features_time, remove_lost_features_time/processing_time); - printf("Remove camera states time: %f/%f\n", - prune_cam_states_time, prune_cam_states_time/processing_time); - //printf("Publish time: %f/%f\n", - // publish_time, publish_time/processing_time); - } - - return; -} - -void MsckfVio::mocapOdomCallback( - const nav_msgs::OdometryConstPtr& msg) { +void MsckfVio::mocapOdomCallback(const nav_msgs::OdometryConstPtr& msg) { static bool first_mocap_odom_msg = true; // If this is the first mocap odometry messsage, set @@ -505,6 +790,212 @@ void MsckfVio::mocapOdomCallback( return; } +void MsckfVio::calcErrorState() +{ + + // imu "error" + err_state_server.imu_state.id = state_server.imu_state.id; + err_state_server.imu_state.time = state_server.imu_state.time; + + err_state_server.imu_state.orientation = quaternionMultiplication(true_state_server.imu_state.orientation, quaternionConjugate(state_server.imu_state.orientation)); + + // convert to small angle approximation + err_state_server.imu_state.orientation *= 2; + err_state_server.imu_state.orientation(3) = 0; + + err_state_server.imu_state.position = true_state_server.imu_state.position - state_server.imu_state.position; + err_state_server.imu_state.velocity = true_state_server.imu_state.velocity - state_server.imu_state.velocity; + + err_state_server.imu_state.gyro_bias = true_state_server.imu_state.gyro_bias - true_state_server.imu_state.gyro_bias; + err_state_server.imu_state.gyro_bias = true_state_server.imu_state.acc_bias - true_state_server.imu_state.acc_bias; + + err_state_server.imu_state.R_imu_cam0 = true_state_server.imu_state.R_imu_cam0 - true_state_server.imu_state.R_imu_cam0; + err_state_server.imu_state.t_cam0_imu = true_state_server.imu_state.t_cam0_imu - true_state_server.imu_state.t_cam0_imu; + + err_state_server.imu_state.orientation_null = true_state_server.imu_state.orientation_null - true_state_server.imu_state.orientation_null; + err_state_server.imu_state.position_null = true_state_server.imu_state.position_null - true_state_server.imu_state.position_null; + err_state_server.imu_state.velocity_null = true_state_server.imu_state.velocity_null - true_state_server.imu_state.velocity_null; + + auto cam_state_iter = state_server.cam_states.begin(); + auto true_cam_state_iter = true_state_server.cam_states.begin(); + auto err_cam_state_iter = err_state_server.cam_states.begin(); + + for (int i = 0; i < state_server.cam_states.size(); ++i, ++cam_state_iter, ++err_cam_state_iter, ++true_cam_state_iter) + { + // calculate error in camera rotation + Eigen::Vector4d q = cam_state_iter->second.orientation; + Eigen::Vector4d p = quaternionConjugate(true_cam_state_iter->second.orientation); + err_cam_state_iter->second.orientation = quaternionMultiplication(p, q); + // small angle approximation + err_cam_state_iter->second.orientation *= 2; + err_cam_state_iter->second.orientation(3) = 0; + + // calculate error of state position + err_cam_state_iter->second.position = true_cam_state_iter->second.position - cam_state_iter->second.position; + } +} + +void MsckfVio::batchTruthProcessing(const double& time_bound) { + + // Counter how many IMU msgs in the buffer are used. + int used_truth_msg_cntr = 0; + double truth_time; + for (const auto& truth_msg : truth_msg_buffer) { + truth_time = truth_msg.header.stamp.toSec(); + if (truth_time < true_state_server.imu_state.time) { + ++used_truth_msg_cntr; + continue; + } + if (truth_time > time_bound) break; + + // Convert the msgs. + Eigen::Vector3d m_translation; + Eigen::Vector4d m_rotation; + tf::vectorMsgToEigen(truth_msg.transform.translation, m_translation); + + m_rotation[0] = truth_msg.transform.rotation.x; + m_rotation[1] = truth_msg.transform.rotation.y; + m_rotation[2] = truth_msg.transform.rotation.z; + m_rotation[3] = truth_msg.transform.rotation.w; + quaternionNormalize(m_rotation); + // Execute process model. + processTruthtoIMU(truth_time, m_rotation, m_translation); + ++used_truth_msg_cntr; + + } + last_time_bound = time_bound; + + // Set the state ID for the new IMU state. + true_state_server.imu_state.id = IMUState::next_id; + err_state_server.imu_state.id = IMUState::next_id; + // Remove all used Truth msgs. + truth_msg_buffer.erase(truth_msg_buffer.begin(), + truth_msg_buffer.begin()+used_truth_msg_cntr); + + std::ofstream logfile; + + int FileHandler; + char FileBuffer[1024]; + float load = 0; + + FileHandler = open("/proc/loadavg", O_RDONLY); + if( FileHandler >= 0) { + auto file = read(FileHandler, FileBuffer, sizeof(FileBuffer) - 1); + sscanf(FileBuffer, "%f", &load); + close(FileHandler); + } + + auto gt = true_state_server.imu_state.position; + auto gr = true_state_server.imu_state.orientation; + logfile.open("/home/raphael/dev/MSCKF_ws/bag/log.txt", std::ios_base::app); + //ros time, cpu load , ground truth, estimation, ros time + logfile << true_state_server.imu_state.time - time_offset << "; " << load << "; "; + logfile << gt[0] << "; " << gt[1] << "; " << gt[2] << "; " << gr[0] << "; " << gr[1] << "; " << gr[2] << "; " << gr[3] << ";"; + + // estimation + auto et = state_server.imu_state.position; + auto er = state_server.imu_state.orientation; + logfile << et[0] << "; " << et[1] << "; " << et[2] << "; " << er[0] << "; " << er[1] << "; " << er[2] << "; " << er[3] << "; \n" << endl;; + logfile.close(); + /* + // calculate change + delta_position = state_server.imu_state.position - old_imu_state.position; + Eigen::Vector4d delta_orientation_quaternion = quaternionMultiplication(quaternionConjugate(state_server.imu_state.orientation), old_imu_state.orientation); + delta_orientation = Eigen::Vector3d(delta_orientation_quaternion[0]*2, delta_orientation_quaternion[1]*2, delta_orientation_quaternion[2]*2); + // calculate error in position + + // calculate error in change + + Eigen::Vector3d delta_true_position = true_state_server.imu_state.position - old_true_state.position; + Eigen::Vector4d delta_true_orientation = quaternionMultiplication(quaternionConjugate(true_state_server.imu_state.orientation), old_true_state.orientation); + Eigen::Vector3d delta_smallangle_true_orientation = Eigen::Vector3d(delta_true_orientation[0]*2, delta_true_orientation[1]*2, delta_true_orientation[2]*2); + Eigen::Vector3d error_delta_position = delta_true_position - delta_position; + Eigen::Vector3d error_delta_orientation = delta_smallangle_true_orientation - delta_orientation; + + Eigen::Vector3d error_position = true_state_server.imu_state.position - state_server.imu_state.position; + Eigen::Vector4d error_orientation_q = quaternionMultiplication(quaternionConjugate(true_state_server.imu_state.orientation), state_server.imu_state.orientation); + Eigen::Vector3d error_orientation = Eigen::Vector3d(error_orientation_q[0]*2, error_orientation_q[1]*2, error_orientation_q[2]*2); + + double relerr = (sqrt(error_delta_position[0]*error_delta_position[0] + error_delta_position[1]*error_delta_position[1] + error_delta_position[2]*error_delta_position[2])/ + sqrt(delta_true_position[0]*delta_true_position[0] + delta_true_position[1]*delta_true_position[1] + delta_true_position[2]*delta_true_position[2])); + + double relOerr = (sqrt(error_delta_orientation[0]*error_delta_orientation[0] + error_delta_orientation[1]*error_delta_orientation[1] + error_delta_orientation[2]*error_delta_orientation[2])/ + sqrt(delta_smallangle_true_orientation[0]*delta_smallangle_true_orientation[0] + delta_smallangle_true_orientation[1]*delta_smallangle_true_orientation[1] + delta_smallangle_true_orientation[2]*delta_smallangle_true_orientation[2])); + + double abserr = (sqrt(error_delta_position[0]*error_delta_position[0] + error_delta_position[1]*error_delta_position[1] + error_delta_position[2]*error_delta_position[2])/ + sqrt(delta_true_position[0]*delta_true_position[0] + delta_true_position[1]*delta_true_position[1] + delta_true_position[2]*delta_true_position[2])); + + + cout << "relative pos error: " << relerr * 100 << "%" << endl; + cout << "relative ori error: " << relOerr * 100 << "%" << endl; + //cout << "absolute pos error: " << + */ + if (eval_time + 1 < ros::Time::now().toSec()) + { + + // calculate change + delta_position = state_server.imu_state.position - timed_old_imu_state.position; + Eigen::Vector4d delta_orientation_quaternion = quaternionMultiplication(quaternionConjugate(state_server.imu_state.orientation), timed_old_imu_state.orientation); + delta_orientation = Eigen::Vector3d(delta_orientation_quaternion[0]*2, delta_orientation_quaternion[1]*2, delta_orientation_quaternion[2]*2); + // calculate error in position + + // calculate error in change + + Eigen::Vector3d delta_true_position = true_state_server.imu_state.position - timed_old_true_state.position; + Eigen::Vector4d delta_true_orientation = quaternionMultiplication(quaternionConjugate(true_state_server.imu_state.orientation), timed_old_true_state.orientation); + Eigen::Vector3d delta_smallangle_true_orientation = Eigen::Vector3d(delta_true_orientation[0]*2, delta_true_orientation[1]*2, delta_true_orientation[2]*2); + Eigen::Vector3d error_delta_position = delta_true_position - delta_position; + Eigen::Vector3d error_delta_orientation = delta_smallangle_true_orientation - delta_orientation; + + Eigen::Vector3d error_position = true_state_server.imu_state.position - state_server.imu_state.position; + Eigen::Vector4d error_orientation_q = quaternionMultiplication(quaternionConjugate(true_state_server.imu_state.orientation), state_server.imu_state.orientation); + Eigen::Vector3d error_orientation = Eigen::Vector3d(error_orientation_q[0]*2, error_orientation_q[1]*2, error_orientation_q[2]*2); + + double relerr = (sqrt(error_delta_position[0]*error_delta_position[0] + error_delta_position[1]*error_delta_position[1] + error_delta_position[2]*error_delta_position[2])/ + sqrt(delta_true_position[0]*delta_true_position[0] + delta_true_position[1]*delta_true_position[1] + delta_true_position[2]*delta_true_position[2])); + + double relOerr = (sqrt(error_delta_orientation[0]*error_delta_orientation[0] + error_delta_orientation[1]*error_delta_orientation[1] + error_delta_orientation[2]*error_delta_orientation[2])/ + sqrt(delta_smallangle_true_orientation[0]*delta_smallangle_true_orientation[0] + delta_smallangle_true_orientation[1]*delta_smallangle_true_orientation[1] + delta_smallangle_true_orientation[2]*delta_smallangle_true_orientation[2])); + + double abserr = (sqrt(error_delta_position[0]*error_delta_position[0] + error_delta_position[1]*error_delta_position[1] + error_delta_position[2]*error_delta_position[2])/ + sqrt(delta_true_position[0]*delta_true_position[0] + delta_true_position[1]*delta_true_position[1] + delta_true_position[2]*delta_true_position[2])); + + // cout << "relative pos error: " << relerr * 100 << "%" << endl; + // cout << "relative ori error: " << relOerr * 100 << "%" << endl; + + timed_old_imu_state = state_server.imu_state; + timed_old_true_state = true_state_server.imu_state; + eval_time = ros::Time::now().toSec(); + } +} + +void MsckfVio::processTruthtoIMU(const double& time, + const Vector4d& m_rot, + const Vector3d& m_trans){ + + IMUState& imu_state = true_state_server.imu_state; + double dtime = time - imu_state.time; + + Vector4d& q = imu_state.orientation; + Vector3d& v = imu_state.velocity; + Vector3d& p = imu_state.position; + + double dt = time - imu_state.time; + + v = (m_trans-v)/dt; + p = m_trans; + q = m_rot; + + // Update the state correspondes to null space. + imu_state.orientation_null = imu_state.orientation; + imu_state.position_null = imu_state.position; + imu_state.velocity_null = imu_state.velocity; + + // Update the state info + true_state_server.imu_state.time = time; + +} + void MsckfVio::batchImuProcessing(const double& time_bound) { // Counter how many IMU msgs in the buffer are used. int used_imu_msg_cntr = 0; @@ -539,7 +1030,8 @@ void MsckfVio::batchImuProcessing(const double& time_bound) { void MsckfVio::processModel(const double& time, const Vector3d& m_gyro, - const Vector3d& m_acc) { + const Vector3d& m_acc) +{ // Remove the bias from the measured gyro and acceleration IMUState& imu_state = state_server.imu_state; @@ -577,25 +1069,6 @@ void MsckfVio::processModel(const double& time, // Propogate the state using 4th order Runge-Kutta predictNewState(dtime, gyro, acc); - // Modify the transition matrix - Matrix3d R_kk_1 = quaternionToRotation(imu_state.orientation_null); - Phi.block<3, 3>(0, 0) = - quaternionToRotation(imu_state.orientation) * R_kk_1.transpose(); - - Vector3d u = R_kk_1 * IMUState::gravity; - RowVector3d s = (u.transpose()*u).inverse() * u.transpose(); - - Matrix3d A1 = Phi.block<3, 3>(6, 0); - Vector3d w1 = skewSymmetric( - imu_state.velocity_null-imu_state.velocity) * IMUState::gravity; - Phi.block<3, 3>(6, 0) = A1 - (A1*u-w1)*s; - - Matrix3d A2 = Phi.block<3, 3>(12, 0); - Vector3d w2 = skewSymmetric( - dtime*imu_state.velocity_null+imu_state.position_null- - imu_state.position) * IMUState::gravity; - Phi.block<3, 3>(12, 0) = A2 - (A2*u-w2)*s; - // Propogate the state covariance matrix. Matrix Q = Phi*G*state_server.continuous_noise_cov* G.transpose()*Phi.transpose()*dtime; @@ -629,7 +1102,8 @@ void MsckfVio::processModel(const double& time, void MsckfVio::predictNewState(const double& dt, const Vector3d& gyro, - const Vector3d& acc) { + const Vector3d& acc) +{ // TODO: Will performing the forward integration using // the inverse of the quaternion give better accuracy? @@ -692,7 +1166,8 @@ void MsckfVio::predictNewState(const double& dt, return; } -void MsckfVio::stateAugmentation(const double& time) { +void MsckfVio::stateAugmentation(const double& time) +{ const Matrix3d& R_i_c = state_server.imu_state.R_imu_cam0; const Vector3d& t_c_i = state_server.imu_state.t_cam0_imu; @@ -731,6 +1206,7 @@ void MsckfVio::stateAugmentation(const double& time) { // Resize the state covariance matrix. size_t old_rows = state_server.state_cov.rows(); size_t old_cols = state_server.state_cov.cols(); + state_server.state_cov.conservativeResize(old_rows+6, old_cols+6); // Rename some matrix blocks for convenience. @@ -746,6 +1222,109 @@ void MsckfVio::stateAugmentation(const double& time) { state_server.state_cov.block<6, 6>(old_rows, old_cols) = J * P11 * J.transpose(); + // Fix the covariance to be symmetric + MatrixXd state_cov_fixed = (state_server.state_cov + + state_server.state_cov.transpose()) / 2.0; + state_server.state_cov = state_cov_fixed; + return; +} + +void MsckfVio::truePhotometricStateAugmentation(const double& time) +{ + const Matrix3d& true_R_i_c = true_state_server.imu_state.R_imu_cam0; + const Vector3d& true_t_c_i = true_state_server.imu_state.t_cam0_imu; + + // Add a new camera state to the state server. + Matrix3d true_R_w_i = quaternionToRotation( + true_state_server.imu_state.orientation); + Matrix3d true_R_w_c = true_R_i_c * true_R_w_i; + Vector3d true_t_c_w = true_state_server.imu_state.position + + true_R_w_i.transpose()*true_t_c_i; + + true_state_server.cam_states[true_state_server.imu_state.id] = + CAMState(true_state_server.imu_state.id); + CAMState& true_cam_state = true_state_server.cam_states[ + true_state_server.imu_state.id]; + + // manage error state size + err_state_server.cam_states[err_state_server.imu_state.id] = + CAMState(err_state_server.imu_state.id); + CAMState& err_cam_state = err_state_server.cam_states[ + err_state_server.imu_state.id]; + + err_cam_state.time = time; + + true_cam_state.time = time; + true_cam_state.orientation = rotationToQuaternion(true_R_w_c); + true_cam_state.position = true_t_c_w; + + true_cam_state.orientation_null = true_cam_state.orientation; + true_cam_state.position_null = true_cam_state.position; + + return; +} + +void MsckfVio::PhotometricStateAugmentation(const double& time) +{ + + const Matrix3d& R_i_c = state_server.imu_state.R_imu_cam0; + const Vector3d& t_c_i = state_server.imu_state.t_cam0_imu; + + // Add a new camera state to the state server. + Matrix3d R_w_i = quaternionToRotation( + state_server.imu_state.orientation); + Matrix3d R_w_c = R_i_c * R_w_i; + Vector3d t_c_w = state_server.imu_state.position + + R_w_i.transpose()*t_c_i; + + state_server.cam_states[state_server.imu_state.id] = + CAMState(state_server.imu_state.id); + CAMState& cam_state = state_server.cam_states[ + state_server.imu_state.id]; + + cam_state.time = time; + cam_state.orientation = rotationToQuaternion(R_w_c); + cam_state.position = t_c_w; + + cam_state.orientation_null = cam_state.orientation; + cam_state.position_null = cam_state.position; + + + // Update the covariance matrix of the state. + // To simplify computation, the matrix J below is the nontrivial block + // in Equation (16) in "A Multi-State Constraint Kalman Filter for Vision + // -aided Inertial Navigation". + Matrix J = Matrix::Zero(); + J.block<3, 3>(0, 0) = R_i_c; + J.block<3, 3>(0, 15) = Matrix3d::Identity(); + J.block<3, 3>(3, 0) = skewSymmetric(R_w_i.transpose()*t_c_i); + //J.block<3, 3>(3, 0) = -R_w_i.transpose()*skewSymmetric(t_c_i); + J.block<3, 3>(3, 12) = Matrix3d::Identity(); + J.block<3, 3>(3, 18) = Matrix3d::Identity(); + + // Resize the state covariance matrix. + size_t old_rows = state_server.state_cov.rows(); + size_t old_cols = state_server.state_cov.cols(); + + // add 7 for camera state + irradiance bias eta = b_l + state_server.state_cov.conservativeResizeLike(Eigen::MatrixXd::Zero(old_rows+7, old_cols+7)); + + // Rename some matrix blocks for convenience. + const Matrix& P11 = + state_server.state_cov.block<21, 21>(0, 0); + const MatrixXd& P12 = + state_server.state_cov.block(0, 21, 21, old_cols-21); + + // Fill in the augmented state covariance. + state_server.state_cov.block(old_rows, 0, 6, old_cols) << J*P11, J*P12; + state_server.state_cov.block(0, old_cols, old_rows, 6) = + state_server.state_cov.block(old_rows, 0, 6, old_cols).transpose(); + state_server.state_cov.block<6, 6>(old_rows, old_cols) = + J * P11 * J.transpose(); + + // Add photometry P_eta and surrounding Zeros + state_server.state_cov(old_rows+6, old_cols+6) = irradiance_frame_bias; + // Fix the covariance to be symmetric MatrixXd state_cov_fixed = (state_server.state_cov + state_server.state_cov.transpose()) / 2.0; @@ -755,7 +1334,8 @@ void MsckfVio::stateAugmentation(const double& time) { } void MsckfVio::addFeatureObservations( - const CameraMeasurementConstPtr& msg) { + const CameraMeasurementConstPtr& msg) +{ StateIDType state_id = state_server.imu_state.id; int curr_feature_num = map_server.size(); @@ -786,10 +1366,717 @@ void MsckfVio::addFeatureObservations( return; } +void MsckfVio::twodotMeasurementJacobian( + const StateIDType& cam_state_id, + const FeatureIDType& feature_id, + MatrixXd& H_x, MatrixXd& H_y, VectorXd& r) +{ + + // Prepare all the required data. + const CAMState& cam_state = state_server.cam_states[cam_state_id]; + const Feature& feature = map_server[feature_id]; + + // Cam0 pose + Matrix3d R_w_c0 = quaternionToRotation(cam_state.orientation); + const Vector3d& t_c0_w = cam_state.position; + + // Cam1 pose + Matrix3d R_c0_c1 = CAMState::T_cam0_cam1.linear(); + Matrix3d R_w_c1 = R_c0_c1 * R_w_c0; + Vector3d t_c1_w = t_c0_w - R_w_c1.transpose()*CAMState::T_cam0_cam1.translation(); + + // individual Jacobians + Matrix dh_dC0pij = Matrix::Zero(); + Matrix dh_dC1pij = Matrix::Zero(); + Matrix dh_dGpij = Matrix::Zero(); + Matrix dh_dXplj = Matrix::Zero(); + Matrix dGpj_drhoj = Matrix::Zero(); + Matrix dGpj_XpAj = Matrix::Zero(); + + Matrix dC0pij_dGpij = Matrix::Zero(); + Matrix dC1pij_dGpij = Matrix::Zero(); + Matrix dC0pij_dXplj = Matrix::Zero(); + Matrix dC1pij_dXplj = Matrix::Zero(); + + // one line of the NxN Jacobians + Eigen::Matrix H_rho; + Eigen::Matrix H_plj; + Eigen::Matrix H_pAj; + + auto frame = cam0.moving_window.find(cam_state_id)->second.image; + + auto point = feature.anchorPatch_3d[0]; + + Eigen::Vector3d p_c0 = R_w_c0 * (point-t_c0_w); + Eigen::Vector3d p_c1 = R_w_c1 * (point-t_c1_w); + // add jacobian + + //dh / d{}^Cp_{ij} + dh_dC0pij(0, 0) = 1. / p_c0(2); + dh_dC0pij(1, 1) = 1. / p_c0(2); + dh_dC0pij(0, 2) = -(p_c0(0))/(p_c0(2)*p_c0(2)); + dh_dC0pij(1, 2) = -(p_c0(1))/(p_c0(2)*p_c0(2)); + + //dh / d{}^Cp_{ij} + dh_dC1pij(2, 0) = 1. / p_c1(2); + dh_dC1pij(3, 1) = 1. / p_c1(2); + dh_dC1pij(2, 2) = -(p_c1(0))/(p_c1(2)*p_c1(2)); + dh_dC1pij(3, 2) = -(p_c1(1))/(p_c1(2)*p_c1(2)); + + dC0pij_dGpij = R_w_c0; + dC1pij_dGpij = R_c0_c1 * R_w_c0; + + dC0pij_dXplj.leftCols(3) = skewSymmetric(p_c0); + dC0pij_dXplj.rightCols(3) = -R_w_c0; + + + // d{}^Gp_P{ij} / \rho_i + double rho = feature.anchor_rho; + // Isometry T_anchor_w takes a vector in anchor frame to world frame + dGpj_drhoj = -feature.T_anchor_w.linear() * Eigen::Vector3d(feature.anchorPatch_ideal[(N*N-1)/2].x/(rho*rho), feature.anchorPatch_ideal[(N*N-1)/2].y/(rho*rho), 1/(rho*rho)); + + dGpj_XpAj.block<3, 3>(0, 0) = - feature.T_anchor_w.linear() + * skewSymmetric(Eigen::Vector3d(feature.anchorPatch_ideal[(N*N-1)/2].x/(rho), + feature.anchorPatch_ideal[(N*N-1)/2].y/(rho), + 1/(rho))); + dGpj_XpAj.block<3, 3>(0, 3) = Matrix::Identity(); + + // Intermediate Jakobians + H_rho = dh_dC0pij * dC0pij_dGpij * dGpj_drhoj + dh_dC1pij * dC1pij_dGpij * dGpj_drhoj; // 4 x 1 + H_plj = dh_dC0pij * dC0pij_dXplj + dh_dC1pij * R_c0_c1 * dC0pij_dXplj; // 4 x 6 + H_pAj = dh_dC0pij * dC0pij_dGpij * dGpj_XpAj + dh_dC1pij * dC1pij_dGpij * dGpj_XpAj; // 4 x 6 + + // calculate residual + + //observation + const Vector4d& total_z = feature.observations.find(cam_state_id)->second; + + VectorXd r_i = VectorXd::Zero(4); + + //calculate residual + + r_i[0] = total_z[0] - p_c0(0)/p_c0(2); + r_i[1] = total_z[1] - p_c0(1)/p_c0(2); + r_i[2] = total_z[2] - p_c1(0)/p_c1(2); + r_i[3] = total_z[3] - p_c1(1)/p_c1(2); + + MatrixXd H_xl = MatrixXd::Zero(4, 21+state_server.cam_states.size()*7); + + // set anchor Jakobi + // get position of anchor in cam states + + auto cam_state_anchor = state_server.cam_states.find(feature.observations.begin()->first); + int cam_state_cntr_anchor = std::distance(state_server.cam_states.begin(), cam_state_anchor); + H_xl.block(0, 21+cam_state_cntr_anchor*7, 4, 6) = H_pAj; + + // set frame Jakobi + //get position of current frame in cam states + auto cam_state_iter = state_server.cam_states.find(cam_state_id); + int cam_state_cntr = std::distance(state_server.cam_states.begin(), cam_state_iter); + + // set jakobi of state + H_xl.block(0, 21+cam_state_cntr*7, 4, 6) = H_plj; + + H_x = H_xl; + H_y = H_rho; + r = r_i; + + //TODO make this more fluent as well + if(PRINTIMAGES) + { + std::stringstream ss; + ss << "INFO:" << " anchor: " << cam_state_cntr_anchor << " frame: " << cam_state_cntr; + feature.MarkerGeneration(marker_pub, state_server.cam_states); + } + + return; +} + +bool MsckfVio::twodotFeatureJacobian( + const FeatureIDType& feature_id, + const std::vector& cam_state_ids, + MatrixXd& H_x, VectorXd& r) +{ + if(FILTER != 2) + return false; + + const auto& feature = map_server[feature_id]; + + + // Check how many camera states in the provided camera + // id camera has actually seen this feature. + vector valid_cam_state_ids(0); + for (const auto& cam_id : cam_state_ids) { + if (feature.observations.find(cam_id) == + feature.observations.end()) continue; + + if (feature.observations.find(cam_id) == + feature.observations.begin()) continue; + valid_cam_state_ids.push_back(cam_id); + } + + int jacobian_row_size = 0; + jacobian_row_size = 4 * valid_cam_state_ids.size(); + + MatrixXd H_xi = MatrixXd::Zero(jacobian_row_size, + 21+state_server.cam_states.size()*7); + + MatrixXd H_yi = MatrixXd::Zero(jacobian_row_size, 1); + + VectorXd r_i = VectorXd::Zero(jacobian_row_size); + int stack_cntr = 0; + + for (const auto& cam_id : valid_cam_state_ids) { + + MatrixXd H_xl; + MatrixXd H_yl; + + Eigen::VectorXd r_l = VectorXd::Zero(4); + + twodotMeasurementJacobian(cam_id, feature.id, H_xl, H_yl, r_l); + auto cam_state_iter = state_server.cam_states.find(cam_id); + int cam_state_cntr = std::distance( + state_server.cam_states.begin(), cam_state_iter); + + // Stack the Jacobians. + H_xi.block(stack_cntr, 0, H_xl.rows(), H_xl.cols()) = H_xl; + H_yi.block(stack_cntr, 0, H_yl.rows(), H_yl.cols()) = H_yl; + + r_i.segment(stack_cntr, 4) = r_l; + stack_cntr += 4; + } + + + H_xi.conservativeResize(stack_cntr, H_xi.cols()); + H_yi.conservativeResize(stack_cntr, H_yi.cols()); + // Project the residual and Jacobians onto the nullspace + // of H_yj. + + + // get Nullspace + FullPivLU lu(H_yi.transpose()); + MatrixXd A_null_space = lu.kernel(); + + H_x = A_null_space.transpose() * H_xi; + r = A_null_space.transpose() * r_i; + + if(PRINTIMAGES) + { + + ofstream myfile; + myfile.open("/home/raphael/dev/octave/two2octave"); + myfile << "# Created by Octave 3.8.1, Wed Jun 12 14:36:37 2019 CEST \n" + << "# name: Hx\n" + << "# type: matrix\n" + << "# rows: " << H_xi.rows() << "\n" + << "# columns: " << H_xi.cols() << "\n" + << H_xi << endl; + + myfile << "# name: Hy\n" + << "# type: matrix\n" + << "# rows: " << H_yi.rows() << "\n" + << "# columns: " << H_yi.cols() << "\n" + << H_yi << endl; + + + myfile << "# name: r\n" + << "# type: matrix\n" + << "# rows: " << r_i.rows() << "\n" + << "# columns: " << 1 << "\n" + << r_i << endl; + + + myfile << "# name: A\n" + << "# type: matrix\n" + << "# rows: " << A_null_space.rows() << "\n" + << "# columns: " << 1 << "\n" + << A_null_space << endl; + + myfile.close(); + std::cout << "resume playback" << std::endl; + nh.setParam("/play_bag", true); + } + return true; +} + + +bool MsckfVio::PhotometricPatchPointResidual( + const StateIDType& cam_state_id, + const Feature& feature, + VectorXd& r) +{ + VectorXd r_photo = VectorXd::Zero(2*N*N); + int count = 0; + const CAMState& cam_state = state_server.cam_states[cam_state_id]; + + //estimate photometric measurement + std::vector estimate_irradiance; + IlluminationParameter estimated_illumination; + std::vector estimate_photo_z_c0, estimate_photo_z_c1; + std::vector photo_z_c0, photo_z_c1; + + // estimate irradiance based on anchor frame + + feature.estimate_FrameIrradiance(cam_state, cam_state_id, cam0, estimate_irradiance, estimated_illumination); + for (auto& estimate_irradiance_j : estimate_irradiance) + estimate_photo_z_c0.push_back (estimate_irradiance_j);// * + //estimated_illumination.frame_gain * estimated_illumination.feature_gain + + //estimated_illumination.frame_bias + estimated_illumination.feature_bias); + feature.estimate_FrameIrradiance(cam_state, cam_state_id, cam1, estimate_irradiance, estimated_illumination); + for (auto& estimate_irradiance_j : estimate_irradiance) + estimate_photo_z_c1.push_back (estimate_irradiance_j);// * + //estimated_illumination.frame_gain * estimated_illumination.feature_gain + + //estimated_illumination.frame_bias + estimated_illumination.feature_bias); + + // irradiance measurement around feature point in c0 and c1 + std::vector true_irradiance_c0, true_irradiance_c1; + cv::Point2f p_f_c0(feature.observations.find(cam_state_id)->second(0), feature.observations.find(cam_state_id)->second(1)); + cv::Point2f p_f_c1(feature.observations.find(cam_state_id)->second(2), feature.observations.find(cam_state_id)->second(3)); + + p_f_c0 = image_handler::distortPoint(p_f_c0, cam0.intrinsics, cam0.distortion_model, cam0.distortion_coeffs); + p_f_c1 = image_handler::distortPoint(p_f_c1, cam1.intrinsics, cam1.distortion_model, cam1.distortion_coeffs); + + cv::Mat current_image_c0 = cam0.moving_window.find(cam_state_id)->second.image; + cv::Mat current_image_c1 = cam1.moving_window.find(cam_state_id)->second.image; + for(int i = 0; i current_image_c0.cols-1 or p_in_c0.y < 0 or p_in_c0.y > current_image_c0.rows-1) + return false; + if(p_in_c1.x < 0 or p_in_c1.x > current_image_c1.cols-1 or p_in_c1.y < 0 or p_in_c1.y > current_image_c1.rows-1) + return false; + + // add observation + photo_z_c0.push_back(feature.PixelIrradiance(p_in_c0, current_image_c0)); + photo_z_c1.push_back(feature.PixelIrradiance(p_in_c1, current_image_c1)); + + // calculate photom. residual acc. to paper + //r_photo(count*2) = photo_z_c0[count] - estimate_photo_z_c0[count]; + //r_photo(count*2+1) = photo_z_c1[count] - estimate_photo_z_c1[count]; + + // calculate photom. residual alternating between frames + r_photo(count*2) = true_irradiance_c0[count] - photo_z_c0[count]; + r_photo(count*2+1) = true_irradiance_c1[count] - photo_z_c1[count]; + count++; + } + r = r_photo; + return true; +} + +// generates the jacobian of one patch point regarding rho, anchor and current frame +bool MsckfVio::PhotometricPatchPointJacobian( + const CAMState& cam_state, + const StateIDType& cam_state_id, + const Feature& feature, + Eigen::Vector3d point, + int count, + Eigen::Matrix& H_rhoj, + Eigen::Matrix& H_plj, + Eigen::Matrix& H_pAj, + Matrix& dI_dhj) +{ + + const StateIDType anchor_state_id = feature.observations.begin()->first; + const CAMState anchor_state = state_server.cam_states[anchor_state_id]; + + // Cam0 pose. + Matrix3d R_w_c0 = quaternionToRotation(cam_state.orientation); + const Vector3d& t_c0_w = cam_state.position; + + // Cam1 pose. + Matrix3d R_c0_c1 = CAMState::T_cam0_cam1.linear(); + Matrix3d R_w_c1 = CAMState::T_cam0_cam1.linear() * R_w_c0; + Vector3d t_c1_w = t_c0_w - R_w_c1.transpose()*CAMState::T_cam0_cam1.translation(); + + + // individual Jacobians + /*Matrix */dI_dhj = Matrix::Zero(); + + Matrix dh_dC0pij = Matrix::Zero(); + Matrix dh_dC1pij = Matrix::Zero(); + Matrix dh_dGpij = Matrix::Zero(); + Matrix dh_dXplj = Matrix::Zero(); + Matrix dGpj_drhoj = Matrix::Zero(); + Matrix dGpj_XpAj = Matrix::Zero(); + + Matrix dC0pij_dGpij = Matrix::Zero(); + Matrix dC1pij_dGpij = Matrix::Zero(); + Matrix dC0pij_dXplj = Matrix::Zero(); + Matrix dC1pij_dXplj = Matrix::Zero(); + + // irradiance jacobian + double dx_c0, dy_c0, dx_c1, dy_c1; + + Eigen::Vector3d p_c0 = R_w_c0 * (point-t_c0_w); + Eigen::Vector3d p_c1 = R_w_c1 * (point-t_c1_w); + + cv::Point2f p_in_c0 = feature.projectPositionToCamera(cam_state, cam_state_id, cam0, point); + cv::Point2f p_in_c1 = feature.projectPositionToCamera(cam_state, cam_state_id, cam1, point); + + cv::Point2f p_in_anchor = feature.projectPositionToCamera(anchor_state, anchor_state_id, cam0, point); + + // calculate derivation for anchor frame, use position for derivation calculation + // frame derivative calculated convoluting with kernel [-1, 0, 1] + dx_c0 = feature.CompleteCvKernel(p_in_c0, cam_state_id, cam0, "Sobel_x"); + dy_c0 = feature.CompleteCvKernel(p_in_c0, cam_state_id, cam0, "Sobel_y"); + + dx_c1 = feature.CompleteCvKernel(p_in_c1, cam_state_id, cam1, "Sobel_x"); + dy_c1 = feature.CompleteCvKernel(p_in_c1, cam_state_id, cam1, "Sobel_y"); + + dI_dhj(0, 0) = dx_c0 * cam0.intrinsics[0]; + dI_dhj(0, 1) = dy_c0 * cam0.intrinsics[1]; + dI_dhj(1, 2) = dx_c1 * cam1.intrinsics[0]; + dI_dhj(1, 3) = dy_c1 * cam1.intrinsics[1]; + + // add jacobian + + //dh / d{}^Cp_{ij} + dh_dC0pij(0, 0) = 1. / p_c0(2); + dh_dC0pij(1, 1) = 1. / p_c0(2); + dh_dC0pij(0, 2) = -(p_c0(0))/(p_c0(2)*p_c0(2)); + dh_dC0pij(1, 2) = -(p_c0(1))/(p_c0(2)*p_c0(2)); + + //dh / d{}^Cp_{ij} + dh_dC1pij(2, 0) = 1. / p_c1(2); + dh_dC1pij(3, 1) = 1. / p_c1(2); + dh_dC1pij(2, 2) = -(p_c1(0))/(p_c1(2)*p_c1(2)); + dh_dC1pij(3, 2) = -(p_c1(1))/(p_c1(2)*p_c1(2)); + + dC0pij_dGpij = R_w_c0; + dC1pij_dGpij = R_c0_c1 * R_w_c0; + + dC0pij_dXplj.leftCols(3) = skewSymmetric(p_c0); + dC0pij_dXplj.rightCols(3) = -R_w_c0; + + + // d{}^Gp_P{ij} / \rho_i + double rho = feature.anchor_rho; + // Isometry T_anchor_w takes a vector in anchor frame to world frame + dGpj_drhoj = -feature.T_anchor_w.linear() * Eigen::Vector3d(feature.anchorPatch_ideal[(N*N-1)/2].x/(rho*rho), feature.anchorPatch_ideal[(N*N-1)/2].y/(rho*rho), 1/(rho*rho)); + + dGpj_XpAj.block<3, 3>(0, 0) = - feature.T_anchor_w.linear() + * skewSymmetric(Eigen::Vector3d(feature.anchorPatch_ideal[(N*N-1)/2].x/(rho), + feature.anchorPatch_ideal[(N*N-1)/2].y/(rho), + 1/(rho))); + dGpj_XpAj.block<3, 3>(0, 3) = Matrix::Identity(); + + // Intermediate Jakobians + H_rhoj = dI_dhj * dh_dC0pij * dC0pij_dGpij * dGpj_drhoj + dI_dhj * dh_dC1pij * dC1pij_dGpij * dGpj_drhoj; // 4 x 1 + H_plj = dI_dhj * dh_dC0pij * dC0pij_dXplj + dI_dhj * dh_dC1pij * R_c0_c1 * dC0pij_dXplj; // 4 x 6 + H_pAj = dI_dhj * dh_dC0pij * dC0pij_dGpij * dGpj_XpAj + dI_dhj * dh_dC1pij * dC1pij_dGpij * dGpj_XpAj; // 4 x 6 + + // check if any direction not large enough for eval + /*if((dI_dhj(0, 0) < 0.1 or dI_dhj(0, 1) < 0.1) and (dI_dhj(1, 2) < 0.1 or dI_dhj(1, 3) < 0.1)) + return false; + */ + // check if point nullspaceable + if (H_rhoj(0, 0) != 0) + return true; + + return false; +} + +bool MsckfVio::PhotometricMeasurementJacobian( + const StateIDType& cam_state_id, + const FeatureIDType& feature_id, + MatrixXd& H_x, MatrixXd& H_y, VectorXd& r) +{ + // Prepare all the required data. + const CAMState& cam_state = state_server.cam_states[cam_state_id]; + const Feature& feature = map_server[feature_id]; + + //photometric observation + VectorXd r_photo; + + // one line of the NxN Jacobians + Eigen::Matrix H_rhoj; + Eigen::Matrix H_plj; + Eigen::Matrix H_pAj; + + Eigen::MatrixXd dI_dh(2* N*N, 4); + + // combined Jacobians + Eigen::MatrixXd H_rho(2 * N*N, 1); + Eigen::MatrixXd H_pl(2 * N*N, 6); + Eigen::MatrixXd H_pA(2 * N*N, 6); + + // calcualte residual of patch + if (not PhotometricPatchPointResidual(cam_state_id, feature, r_photo)) + return false; + + //cout << "r\n" << r_photo << endl; + // calculate jacobian for patch + int count = 0; + bool valid = false; + Matrix dI_dhj;// = Matrix::Zero(); + int valid_count = 0; + for (auto point : feature.anchorPatch_3d) + { + // get jacobi of single point in patch + if (PhotometricPatchPointJacobian(cam_state, cam_state_id, feature, point, count, H_rhoj, H_plj, H_pAj, dI_dhj)) + { + valid_count++; + valid = true; + } + + // stack point into entire jacobi + H_rho.block<2, 1>(count*2, 0) = H_rhoj; + H_pl.block<2, 6>(count*2, 0) = H_plj; + H_pA.block<2, 6>(count*2, 0) = H_pAj; + + dI_dh.block<2, 4>(count*2, 0) = dI_dhj; + + count++; + } + // cout << "valid: " << valid_count << "/" << feature.anchorPatch_3d.size() << endl; + // Eigen::Matrix h_photo = (dI_dh.transpose() * dI_dh).inverse() * dI_dh.transpose() * r_photo; + // cout << "h photo: \n" << h_photo << endl; + + // construct the jacobian structure needed for nullspacing + MatrixXd H_xl; + MatrixXd H_yl; + + ConstructJacobians(H_rho, H_pl, H_pA, feature, cam_state_id, H_xl, H_yl); + + // set to return values + H_x = H_xl; + H_y = H_yl; + r = r_photo; + + if(PRINTIMAGES) + { + // pregenerating information for visualization + std::stringstream ss; + auto cam_state_anchor = state_server.cam_states.find(feature.observations.begin()->first); + int cam_state_cntr_anchor = std::distance(state_server.cam_states.begin(), cam_state_anchor); + //get position of current frame in cam states + auto cam_state_iter = state_server.cam_states.find(cam_state_id); + int cam_state_cntr = std::distance(state_server.cam_states.begin(), cam_state_iter); + + ss << "INFO:" << " anchor: " << cam_state_cntr_anchor << " frame: " << cam_state_cntr; + + // visualizing functions + feature.MarkerGeneration(marker_pub, state_server.cam_states); + feature.VisualizePatch(cam_state, cam_state_id, cam1, r_photo, ss); + //feature.VisualizeKernel(cam_state, cam_state_id, cam0); + } + + if(valid) + return true; + else + return false; +} + +// uses the calcualted jacobians to construct the final Hx Hy jacobians used for nullspacing +bool MsckfVio::ConstructJacobians(Eigen::MatrixXd& H_rho, + Eigen::MatrixXd& H_pl, + Eigen::MatrixXd& H_pA, + const Feature& feature, + const StateIDType& cam_state_id, + Eigen::MatrixXd& H_xl, + Eigen::MatrixXd& H_yl) +{ + H_xl = MatrixXd::Zero(2*N*N, 21+state_server.cam_states.size()*7); + H_yl = MatrixXd::Zero(2*N*N, 1); + + // get position of anchor in cam states + auto cam_state_anchor = state_server.cam_states.find(feature.observations.begin()->first); + int cam_state_cntr_anchor = std::distance(state_server.cam_states.begin(), cam_state_anchor); + // set anchor Jakobi + H_xl.block(0, 21+cam_state_cntr_anchor*7, 2*N*N, 6) = H_pA; + + //get position of current frame in cam states + auto cam_state_iter = state_server.cam_states.find(cam_state_id); + // set frame Jakobi + int cam_state_cntr = std::distance(state_server.cam_states.begin(), cam_state_iter); + + // set jakobi of state + H_xl.block(0, 21+cam_state_cntr*7, 2*N*N, 6) = H_pl; + + // set ones for irradiance bias + // H_xl.block(0, 21+cam_state_cntr*7+6, N*N, 1) = Eigen::ArrayXd::Ones(N*N); + + // set irradiance error Block + //H_yl.block(0, 0, N*N, N*N) = /* estimated_illumination.feature_gain * estimated_illumination.frame_gain * */ Eigen::MatrixXd::Identity(N*N, N*N); + + + /*for(int i = 0; i< N*N; i++) + H_yl(i, N*N+cam_state_cntr) = estimate_irradiance[i]; + */ + + H_yl.block(0, 0, H_rho.rows(), H_rho.cols()) = H_rho; +} + + +bool MsckfVio::PhotometricFeatureJacobian( + const FeatureIDType& feature_id, + const std::vector& cam_state_ids, + MatrixXd& H_x, VectorXd& r) +{ + if(FILTER != 1) + return false; + + const auto& feature = map_server[feature_id]; + + // Check how many camera states in the provided camera + // id camera has actually seen this feature. + vector valid_cam_state_ids(0); + for (const auto& cam_id : cam_state_ids) { + if (feature.observations.find(cam_id) == + feature.observations.end()) continue; + + if (feature.observations.find(cam_id) == feature.observations.begin()) + continue; + valid_cam_state_ids.push_back(cam_id); + } + + int jacobian_row_size = 0; + + // stacked feature jacobians + MatrixXd H_xi; + MatrixXd H_yi; + VectorXd r_i; + + // temporary measurement jacobians + MatrixXd H_xl; + MatrixXd H_yl; + Eigen::VectorXd r_l; + int stack_cntr = 0; + bool first = true; + + // go through every valid state the feature was observed in + for (const auto& cam_id : valid_cam_state_ids) { + + // skip observation if measurement is not valid + if(not PhotometricMeasurementJacobian(cam_id, feature.id, H_xl, H_yl, r_l)) + { + continue; + } + + // set size of stacking jacobians, once the returned jacobians are known + if(first) + { + first = not first; + jacobian_row_size = r_l.size() * valid_cam_state_ids.size(); + H_xi = MatrixXd::Zero(jacobian_row_size, H_xl.cols()); + H_yi = MatrixXd::Zero(jacobian_row_size, H_yl.cols()); // CHANGED N*N+1 to 1 + r_i = VectorXd::Zero(jacobian_row_size); + } + auto cam_state_iter = state_server.cam_states.find(cam_id); + int cam_state_cntr = std::distance( + state_server.cam_states.begin(), cam_state_iter); + + // Stack the Jacobians. + H_xi.block(stack_cntr, 0, H_xl.rows(), H_xl.cols()) = H_xl; + H_yi.block(stack_cntr, 0, H_yl.rows(), H_yl.cols()) = H_yl; + r_i.segment(stack_cntr, r_l.size()) = r_l; + stack_cntr += r_l.size(); + } + + // if not enough to propper nullspace (in paper implementation) + if(stack_cntr < r_l.size()) + return false; + + // Project the residual and Jacobians onto the nullspace + // of H_yj. + + // get Nullspace + + bool valid = false; + for(int i = 0; i < H_yi.rows(); i++) + if(H_yi(i,0) != 0) + valid = true; + + + FullPivLU lu(H_yi.transpose()); + MatrixXd A_null_space = lu.kernel(); + + H_x = A_null_space.transpose() * H_xi; + r = A_null_space.transpose() * r_i; + //cout << "\nx\n" << H_x.colPivHouseholderQr().solve(r) << endl; + + if(PRINTIMAGES) + { + ofstream myfile; + myfile.open ("/home/raphael/dev/MSCKF_ws/log.txt"); + myfile << "\nHxi" << endl; + for(int i = 0; i < H_xi.rows(); i++) + myfile << H_xi.block(i, 0, 1, H_xi.cols()) << ";"; + + myfile << "\nr" << endl; + for(int i = 0; i < r_i.rows(); i++) + myfile << r.segment(i, 1) << ";"; + + myfile << "\nHyi" << endl; + for(int i = 0; i < H_yi.rows(); i++) + myfile << H_yi.block(i, 0, 1, H_yi.cols()) << ";"; + + myfile << "A_null_space" << endl; + for(int i = 0; i < A_null_space.rows(); i++) + myfile << A_null_space.block(i, 0, 1, A_null_space.cols()) << ";"; + + + myfile.close(); + + //octave + myfile.open("/home/raphael/dev/octave/log2octave"); + myfile << "# Created by Octave 3.8.1, Wed Jun 12 14:36:37 2019 CEST \n" + << "# name: Hx\n" + << "# type: matrix\n" + << "# rows: " << H_xi.rows() << "\n" + << "# columns: " << H_xi.cols() << "\n" + << H_xi << endl; + + myfile << "# name: Hy\n" + << "# type: matrix\n" + << "# rows: " << H_yi.rows() << "\n" + << "# columns: " << H_yi.cols() << "\n" + << H_yi << endl; + + myfile << "# name: r\n" + << "# type: matrix\n" + << "# rows: " << r_i.rows() << "\n" + << "# columns: " << 1 << "\n" + << r_i << endl; + + myfile << "# name: A\n" + << "# type: matrix\n" + << "# rows: " << A_null_space.rows() << "\n" + << "# columns: " << A_null_space.cols() << "\n" + << A_null_space << endl; + + myfile << "# name: P\n" + << "# type: matrix\n" + << "# rows: " << state_server.state_cov.rows() << "\n" + << "# columns: " << state_server.state_cov.rows() << "\n" + << state_server.state_cov << endl; + + + myfile.close(); + cout << "---------- LOGGED -------- " << endl; + } + + if(valid) + return true; + return false; +} + void MsckfVio::measurementJacobian( const StateIDType& cam_state_id, const FeatureIDType& feature_id, - Matrix& H_x, Matrix& H_f, Vector4d& r) { + Matrix& H_x, Matrix& H_f, Vector4d& r) +{ // Prepare all the required data. const CAMState& cam_state = state_server.cam_states[cam_state_id]; @@ -828,6 +2115,8 @@ void MsckfVio::measurementJacobian( dz_dpc1(3, 2) = -p_c1(1) / (p_c1(2)*p_c1(2)); Matrix dpc0_dxc = Matrix::Zero(); + + // original jacobi dpc0_dxc.leftCols(3) = skewSymmetric(p_c0); dpc0_dxc.rightCols(3) = -R_w_c0; @@ -841,28 +2130,23 @@ void MsckfVio::measurementJacobian( H_x = dz_dpc0*dpc0_dxc + dz_dpc1*dpc1_dxc; H_f = dz_dpc0*dpc0_dpg + dz_dpc1*dpc1_dpg; - // Modifty the measurement Jacobian to ensure - // observability constrain. - Matrix A = H_x; - Matrix u = Matrix::Zero(); - u.block<3, 1>(0, 0) = quaternionToRotation( - cam_state.orientation_null) * IMUState::gravity; - u.block<3, 1>(3, 0) = skewSymmetric( - p_w-cam_state.position_null) * IMUState::gravity; - H_x = A - A*u*(u.transpose()*u).inverse()*u.transpose(); - H_f = -H_x.block<4, 3>(0, 3); - // Compute the residual. r = z - Vector4d(p_c0(0)/p_c0(2), p_c0(1)/p_c0(2), p_c1(0)/p_c1(2), p_c1(1)/p_c1(2)); + // cout << "h reg: \n" << r(0) << "\n" << r(1) << endl; return; } -void MsckfVio::featureJacobian( +bool MsckfVio::featureJacobian( const FeatureIDType& feature_id, const std::vector& cam_state_ids, - MatrixXd& H_x, VectorXd& r) { + MatrixXd& H_x, VectorXd& r) +{ + + + if(FILTER != 0) + return false; const auto& feature = map_server[feature_id]; @@ -880,7 +2164,7 @@ void MsckfVio::featureJacobian( jacobian_row_size = 4 * valid_cam_state_ids.size(); MatrixXd H_xj = MatrixXd::Zero(jacobian_row_size, - 21+state_server.cam_states.size()*6); + 21+state_server.cam_states.size()*7); MatrixXd H_fj = MatrixXd::Zero(jacobian_row_size, 3); VectorXd r_j = VectorXd::Zero(jacobian_row_size); int stack_cntr = 0; @@ -897,7 +2181,7 @@ void MsckfVio::featureJacobian( state_server.cam_states.begin(), cam_state_iter); // Stack the Jacobians. - H_xj.block<4, 6>(stack_cntr, 21+6*cam_state_cntr) = H_xi; + H_xj.block<4, 6>(stack_cntr, 21+7*cam_state_cntr) = H_xi; H_fj.block<4, 3>(stack_cntr, 0) = H_fi; r_j.segment<4>(stack_cntr) = r_i; stack_cntr += 4; @@ -905,26 +2189,110 @@ void MsckfVio::featureJacobian( // Project the residual and Jacobians onto the nullspace // of H_fj. + + /* JacobiSVD svd_helper(H_fj, ComputeFullU | ComputeThinV); + int sv_size = 0; + Eigen::VectorXd singularValues = svd_helper.singularValues(); + for(int i = 0; i < singularValues.size(); i++) + if(singularValues[i] > 1e-5) + sv_size++; + cout << "sv size: " << sv_size << endl; + MatrixXd A = svd_helper.matrixU().rightCols( - jacobian_row_size - 3); + jacobian_row_size - 3); + */ + FullPivLU lu(H_fj.transpose()); + MatrixXd A = lu.kernel(); H_x = A.transpose() * H_xj; r = A.transpose() * r_j; - return; + Eigen::MatrixXd xres = H_x.colPivHouseholderQr().solve(r); + // stop playing bagfile if printing images + if(PRINTIMAGES) + { + + ofstream myfile; + myfile.open("/home/raphael/dev/octave/org2octave"); + myfile << "# Created by Octave 3.8.1, Wed Jun 12 14:36:37 2019 CEST \n" + << "# name: Hx\n" + << "# type: matrix\n" + << "# rows: " << H_xj.rows() << "\n" + << "# columns: " << H_xj.cols() << "\n" + << H_xj << endl; + + myfile << "# name: Hy\n" + << "# type: matrix\n" + << "# rows: " << H_fj.rows() << "\n" + << "# columns: " << H_fj.cols() << "\n" + << H_fj << endl; + + myfile << "# name: r\n" + << "# type: matrix\n" + << "# rows: " << r_j.rows() << "\n" + << "# columns: " << 1 << "\n" + << r_j << endl; + + + myfile << "# name: xres\n" + << "# type: matrix\n" + << "# rows: " << xres.rows() << "\n" + << "# columns: " << 1 << "\n" + << xres << endl; + + myfile.close(); + + myfile.open("/home/raphael/dev/octave/org2octave"); + myfile << "# Created by Octave 3.8.1, Wed Jun 12 14:36:37 2019 CEST \n" + << "# name: Hx\n" + << "# type: matrix\n" + << "# rows: " << H_xj.rows() << "\n" + << "# columns: " << H_xj.cols() << "\n" + << H_xj << endl; + + myfile << "# name: Hy\n" + << "# type: matrix\n" + << "# rows: " << H_fj.rows() << "\n" + << "# columns: " << H_fj.cols() << "\n" + << H_fj << endl; + + myfile << "# name: r\n" + << "# type: matrix\n" + << "# rows: " << r_j.rows() << "\n" + << "# columns: " << 1 << "\n" + << r_j << endl; + + myfile << "# name: A\n" + << "# type: matrix\n" + << "# rows: " << A.rows() << "\n" + << "# columns: " << A.cols() << "\n" + << A << endl; + + myfile << "# name: P\n" + << "# type: matrix\n" + << "# rows: " << state_server.state_cov.rows() << "\n" + << "# columns: " << state_server.state_cov.rows() << "\n" + << state_server.state_cov << endl; + + + myfile.close(); + } + + return true; + } -void MsckfVio::measurementUpdate( - const MatrixXd& H, const VectorXd& r) { - - if (H.rows() == 0 || r.rows() == 0) return; +void MsckfVio::measurementUpdate(const MatrixXd& H, const VectorXd& r) { + if (H.rows() == 0 || r.rows() == 0) + return; // Decompose the final Jacobian matrix to reduce computational // complexity as in Equation (28), (29). MatrixXd H_thin; VectorXd r_thin; - + + // QR decomposition to make stuff faster if (H.rows() > H.cols()) { // Convert H to a sparse matrix. SparseMatrix H_sparse = H.sparseView(); @@ -939,33 +2307,58 @@ void MsckfVio::measurementUpdate( (spqr_helper.matrixQ().transpose() * H).evalTo(H_temp); (spqr_helper.matrixQ().transpose() * r).evalTo(r_temp); - H_thin = H_temp.topRows(21+state_server.cam_states.size()*6); - r_thin = r_temp.head(21+state_server.cam_states.size()*6); + H_thin = H_temp.topRows(21+state_server.cam_states.size()*7); + r_thin = r_temp.head(21+state_server.cam_states.size()*7); - //HouseholderQR qr_helper(H); - //MatrixXd Q = qr_helper.householderQ(); - //MatrixXd Q1 = Q.leftCols(21+state_server.cam_states.size()*6); - - //H_thin = Q1.transpose() * H; - //r_thin = Q1.transpose() * r; } else { H_thin = H; r_thin = r; - } + } // Compute the Kalman gain. const MatrixXd& P = state_server.state_cov; + MatrixXd S = H_thin*P*H_thin.transpose() + Feature::observation_noise*MatrixXd::Identity( H_thin.rows(), H_thin.rows()); - //MatrixXd K_transpose = S.fullPivHouseholderQr().solve(H_thin*P); + + + + //MatrixXd K_transpose = S.fullPivHouseholderQr().solve(H*P); MatrixXd K_transpose = S.ldlt().solve(H_thin*P); MatrixXd K = K_transpose.transpose(); - // Compute the error of the state. - VectorXd delta_x = K * r_thin; + VectorXd delta_x = K * r; + + // cout << "reg rotate: " << delta_x[0] << ", " << delta_x[1] << ", " << delta_x[2] << endl; + // cout << "reg: " << delta_x[12] << ", " << delta_x[13] << ", " << delta_x[14] << endl; + + if(FILTER != 0) return; + + if(PRINTIMAGES) + { + //octave + ofstream myfile; + + myfile.open("/home/raphael/dev/octave/measurement2octave"); + myfile << "# Created by Octave 3.8.1, Wed Jun 12 14:36:37 2019 CEST \n" + << "# name: K\n" + << "# type: matrix\n" + << "# rows: " << K.rows() << "\n" + << "# columns: " << K.cols() << "\n" + << K << endl; + + myfile << "# name: r\n" + << "# type: matrix\n" + << "# rows: " << r.rows() << "\n" + << "# columns: " << r.cols() << "\n" + << r << endl; + + myfile.close(); + } // Update the IMU state. + const VectorXd& delta_x_imu = delta_x.head<21>(); if (//delta_x_imu.segment<3>(0).norm() > 0.15 || @@ -998,11 +2391,12 @@ void MsckfVio::measurementUpdate( auto cam_state_iter = state_server.cam_states.begin(); for (int i = 0; i < state_server.cam_states.size(); ++i, ++cam_state_iter) { - const VectorXd& delta_x_cam = delta_x.segment<6>(21+i*6); + const VectorXd& delta_x_cam = delta_x.segment(21+i*7, 6); const Vector4d dq_cam = smallAngleQuaternion(delta_x_cam.head<3>()); cam_state_iter->second.orientation = quaternionMultiplication( dq_cam, cam_state_iter->second.orientation); cam_state_iter->second.position += delta_x_cam.tail<3>(); + cam_state_iter->second.illumination.frame_bias += delta_x(21+i*7+6); } // Update state covariance. @@ -1019,31 +2413,278 @@ void MsckfVio::measurementUpdate( return; } -bool MsckfVio::gatingTest( - const MatrixXd& H, const VectorXd& r, const int& dof) { +void MsckfVio::twoMeasurementUpdate(const MatrixXd& H, const VectorXd& r) { + + + if (H.rows() == 0 || r.rows() == 0) + { + return; + } + // Decompose the final Jacobian matrix to reduce computational + // complexity as in Equation (28), (29). + MatrixXd H_thin; + VectorXd r_thin; + + if (H.rows() > H.cols()) { + // Convert H to a sparse matrix. + SparseMatrix H_sparse = H.sparseView(); + + // Perform QR decompostion on H_sparse. + SPQR > spqr_helper; + spqr_helper.setSPQROrdering(SPQR_ORDERING_NATURAL); + spqr_helper.compute(H_sparse); + + MatrixXd H_temp; + VectorXd r_temp; + (spqr_helper.matrixQ().transpose() * H).evalTo(H_temp); + (spqr_helper.matrixQ().transpose() * r).evalTo(r_temp); + + H_thin = H_temp.topRows(21+state_server.cam_states.size()*7); + r_thin = r_temp.head(21+state_server.cam_states.size()*7); + + //HouseholderQR qr_helper(H); + //MatrixXd Q = qr_helper.householderQ(); + //MatrixXd Q1 = Q.leftCols(21+state_server.cam_states.size()*6); + + //H_thin = Q1.transpose() * H; + //r_thin = Q1.transpose() * r; + } else { + H_thin = H; + r_thin = r; + } + + + + // Compute the Kalman gain. + const MatrixXd& P = state_server.state_cov; + MatrixXd S = H_thin*P*H_thin.transpose() + + Feature::observation_noise*MatrixXd::Identity( + H_thin.rows(), H_thin.rows()); + //MatrixXd K_transpose = S.fullPivHouseholderQr().solve(H*P); + MatrixXd K_transpose = S.ldlt().solve(H_thin*P); + MatrixXd K = K_transpose.transpose(); + // Compute the error of the state. + + VectorXd delta_x = K * r; + // Update the IMU state. + if (FILTER != 2) return; + + //cout << "two: " << delta_x[12] << ", " << delta_x[13] << ", " << delta_x[14] << endl; + + delta_position = Eigen::Vector3d(delta_x[12], delta_x[13], delta_x[14]); + delta_orientation = Eigen::Vector3d(delta_x[0], delta_x[1], delta_x[2]); + + const VectorXd& delta_x_imu = delta_x.head<21>(); + + if (//delta_x_imu.segment<3>(0).norm() > 0.15 || + //delta_x_imu.segment<3>(3).norm() > 0.15 || + delta_x_imu.segment<3>(6).norm() > 0.5 || + //delta_x_imu.segment<3>(9).norm() > 0.5 || + delta_x_imu.segment<3>(12).norm() > 1.0) { + printf("delta velocity: %f\n", delta_x_imu.segment<3>(6).norm()); + printf("delta position: %f\n", delta_x_imu.segment<3>(12).norm()); + ROS_WARN("Update change is too large."); + //return; + } + + const Vector4d dq_imu = + smallAngleQuaternion(delta_x_imu.head<3>()); + state_server.imu_state.orientation = quaternionMultiplication( + dq_imu, state_server.imu_state.orientation); + state_server.imu_state.gyro_bias += delta_x_imu.segment<3>(3); + state_server.imu_state.velocity += delta_x_imu.segment<3>(6); + state_server.imu_state.acc_bias += delta_x_imu.segment<3>(9); + state_server.imu_state.position += delta_x_imu.segment<3>(12); + + const Vector4d dq_extrinsic = + smallAngleQuaternion(delta_x_imu.segment<3>(15)); + state_server.imu_state.R_imu_cam0 = quaternionToRotation( + dq_extrinsic) * state_server.imu_state.R_imu_cam0; + state_server.imu_state.t_cam0_imu += delta_x_imu.segment<3>(18); + + // Update the camera states. + auto cam_state_iter = state_server.cam_states.begin(); + for (int i = 0; i < state_server.cam_states.size(); + ++i, ++cam_state_iter) { + const VectorXd& delta_x_cam = delta_x.segment(21+i*7, 6); + const Vector4d dq_cam = smallAngleQuaternion(delta_x_cam.head<3>()); + cam_state_iter->second.orientation = quaternionMultiplication( + dq_cam, cam_state_iter->second.orientation); + cam_state_iter->second.position += delta_x_cam.tail<3>(); + cam_state_iter->second.illumination.frame_bias += delta_x(21+i*7+6); + } + + // Update state covariance. + MatrixXd I_KH = MatrixXd::Identity(K.rows(), H_thin.cols()) - K*H_thin; + //state_server.state_cov = I_KH*state_server.state_cov*I_KH.transpose() + + // K*K.transpose()*Feature::observation_noise; + state_server.state_cov = I_KH*state_server.state_cov; + + // Fix the covariance to be symmetric + MatrixXd state_cov_fixed = (state_server.state_cov + + state_server.state_cov.transpose()) / 2.0; + state_server.state_cov = state_cov_fixed; + return; +} + + +void MsckfVio::photometricMeasurementUpdate(const MatrixXd& H, const VectorXd& r) { + + if (H.rows() == 0 || r.rows() == 0) + return; + // Decompose the final Jacobian matrix to reduce computational + // complexity as in Equation (28), (29). + MatrixXd H_thin; + VectorXd r_thin; + + // QR decomposition to make stuff faster + if (H.rows() > H.cols()) { + // Convert H to a sparse matrix. + SparseMatrix H_sparse = H.sparseView(); + + // Perform QR decompostion on H_sparse. + SPQR > spqr_helper; + spqr_helper.setSPQROrdering(SPQR_ORDERING_NATURAL); + + spqr_helper.compute(H_sparse); + + MatrixXd H_temp; + VectorXd r_temp; + + (spqr_helper.matrixQ().transpose() * H).evalTo(H_temp); + (spqr_helper.matrixQ().transpose() * r).evalTo(r_temp); + + H_thin = H_temp.topRows(21+state_server.cam_states.size()*7); + r_thin = r_temp.head(21+state_server.cam_states.size()*7); + + } else { + H_thin = H; + r_thin = r; + } + + // Compute the Kalman gain. + const MatrixXd& P = state_server.state_cov; + MatrixXd S = H_thin*P*H_thin.transpose() + + Feature::observation_noise*MatrixXd::Identity( + H_thin.rows(), H_thin.rows()); + //MatrixXd K_transpose = S.fullPivHouseholderQr().solve(H*P); + + MatrixXd K_transpose = S.ldlt().solve(H_thin*P); + MatrixXd K = K_transpose.transpose(); + // Compute the error of the state. + + VectorXd delta_x = K * r; + // Update the IMU state. + cout << "pho: " << delta_x[12] << ", " << delta_x[13] << ", " << delta_x[14] << endl; + + + if (FILTER != 1) return; + + if(PRINTIMAGES) + { + //octave + ofstream myfile; + + myfile.open("/home/raphael/dev/octave/measurement2octave"); + myfile << "# Created by Octave 3.8.1, Wed Jun 12 14:36:37 2019 CEST \n" + << "# name: K\n" + << "# type: matrix\n" + << "# rows: " << K.rows() << "\n" + << "# columns: " << K.cols() << "\n" + << K << endl; + + myfile << "# name: r\n" + << "# type: matrix\n" + << "# rows: " << r.rows() << "\n" + << "# columns: " << r.cols() << "\n" + << r << endl; + + myfile.close(); + } + const VectorXd& delta_x_imu = delta_x.head<21>(); + + if (//delta_x_imu.segment<3>(0).norm() > 0.15 || + //delta_x_imu.segment<3>(3).norm() > 0.15 || + delta_x_imu.segment<3>(6).norm() > 0.5 || + //delta_x_imu.segment<3>(9).norm() > 0.5 || + delta_x_imu.segment<3>(12).norm() > 1.0) { + printf("delta velocity: %f\n", delta_x_imu.segment<3>(6).norm()); + printf("delta position: %f\n", delta_x_imu.segment<3>(12).norm()); + ROS_WARN("Update change is too large."); + //return; + } + + const Vector4d dq_imu = + smallAngleQuaternion(delta_x_imu.head<3>()); + state_server.imu_state.orientation = quaternionMultiplication( + dq_imu, state_server.imu_state.orientation); + state_server.imu_state.gyro_bias += delta_x_imu.segment<3>(3); + state_server.imu_state.velocity += delta_x_imu.segment<3>(6); + state_server.imu_state.acc_bias += delta_x_imu.segment<3>(9); + state_server.imu_state.position += delta_x_imu.segment<3>(12); + + const Vector4d dq_extrinsic = + smallAngleQuaternion(delta_x_imu.segment<3>(15)); + state_server.imu_state.R_imu_cam0 = quaternionToRotation( + dq_extrinsic) * state_server.imu_state.R_imu_cam0; + state_server.imu_state.t_cam0_imu += delta_x_imu.segment<3>(18); + + // Update the camera states. + auto cam_state_iter = state_server.cam_states.begin(); + for (int i = 0; i < state_server.cam_states.size(); + ++i, ++cam_state_iter) { + const VectorXd& delta_x_cam = delta_x.segment(21+i*7, 6); + const Vector4d dq_cam = smallAngleQuaternion(delta_x_cam.head<3>()); + cam_state_iter->second.orientation = quaternionMultiplication( + dq_cam, cam_state_iter->second.orientation); + cam_state_iter->second.position += delta_x_cam.tail<3>(); + cam_state_iter->second.illumination.frame_bias += delta_x(21+i*7+6); + } + + // Update state covariance. + MatrixXd I_KH = MatrixXd::Identity(K.rows(), H_thin.cols()) - K*H_thin; + //state_server.state_cov = I_KH*state_server.state_cov*I_KH.transpose() + + // K*K.transpose()*Feature::observation_noise; + state_server.state_cov = I_KH*state_server.state_cov; + + // Fix the covariance to be symmetric + MatrixXd state_cov_fixed = (state_server.state_cov + + state_server.state_cov.transpose()) / 2.0; + state_server.state_cov = state_cov_fixed; + + return; +} + + +bool MsckfVio::gatingTest(const MatrixXd& H, const VectorXd& r, const int& dof, int filter) { MatrixXd P1 = H * state_server.state_cov * H.transpose(); + MatrixXd P2 = Feature::observation_noise * MatrixXd::Identity(H.rows(), H.rows()); + double gamma = r.transpose() * (P1+P2).ldlt().solve(r); - //cout << dof << " " << gamma << " " << - // chi_squared_test_table[dof] << " "; - + if (chi_squared_test_table[dof] == 0) + return false; if (gamma < chi_squared_test_table[dof]) { - //cout << "passed" << endl; + // cout << "passed" << endl; + cout << "gate: " << dof << " " << gamma << " " << + chi_squared_test_table[dof] << endl; return true; } else { - //cout << "failed" << endl; + // cout << "failed" << endl; return false; } } void MsckfVio::removeLostFeatures() { - // Remove the features that lost track. // BTW, find the size the final Jacobian matrix and residual vector. int jacobian_row_size = 0; + int pjacobian_row_size = 0; + int twojacobian_row_size = 0; + vector invalid_feature_ids(0); vector processed_feature_ids(0); @@ -1067,14 +2708,25 @@ void MsckfVio::removeLostFeatures() { invalid_feature_ids.push_back(feature.id); continue; } else { - if(!feature.initializePosition(state_server.cam_states)) { + if(!feature.initializeRho(state_server.cam_states)) { invalid_feature_ids.push_back(feature.id); continue; } } } + if(!feature.is_anchored) + { + if(!feature.initializeAnchor(cam0, N)) + { + invalid_feature_ids.push_back(feature.id); + continue; + } + } + pjacobian_row_size += 2*N*N*feature.observations.size(); + twojacobian_row_size += 4*feature.observations.size(); jacobian_row_size += 4*feature.observations.size() - 3; + processed_feature_ids.push_back(feature.id); } @@ -1091,10 +2743,21 @@ void MsckfVio::removeLostFeatures() { if (processed_feature_ids.size() == 0) return; MatrixXd H_x = MatrixXd::Zero(jacobian_row_size, - 21+6*state_server.cam_states.size()); + 21+7*state_server.cam_states.size()); VectorXd r = VectorXd::Zero(jacobian_row_size); int stack_cntr = 0; + MatrixXd pH_x = MatrixXd::Zero(pjacobian_row_size, + 21+7*state_server.cam_states.size()); + VectorXd pr = VectorXd::Zero(pjacobian_row_size); + int pstack_cntr = 0; + + + MatrixXd twoH_x = MatrixXd::Zero(twojacobian_row_size, + 21+7*state_server.cam_states.size()); + VectorXd twor = VectorXd::Zero(twojacobian_row_size); + int twostack_cntr = 0; + // Process the features which lose track. for (const auto& feature_id : processed_feature_ids) { auto& feature = map_server[feature_id]; @@ -1105,24 +2768,66 @@ void MsckfVio::removeLostFeatures() { MatrixXd H_xj; VectorXd r_j; - featureJacobian(feature.id, cam_state_ids, H_xj, r_j); - - if (gatingTest(H_xj, r_j, cam_state_ids.size()-1)) { - H_x.block(stack_cntr, 0, H_xj.rows(), H_xj.cols()) = H_xj; - r.segment(stack_cntr, r_j.rows()) = r_j; - stack_cntr += H_xj.rows(); + MatrixXd pH_xj; + VectorXd pr_j; + MatrixXd twoH_xj; + VectorXd twor_j; + + if(PhotometricFeatureJacobian(feature.id, cam_state_ids, pH_xj, pr_j)) + { + if (gatingTest(pH_xj, pr_j, pr_j.size(), 1)) { //, cam_state_ids.size()-1)) { + pH_x.block(pstack_cntr, 0, pH_xj.rows(), pH_xj.cols()) = pH_xj; + pr.segment(pstack_cntr, pr_j.rows()) = pr_j; + pstack_cntr += pH_xj.rows(); + } + } + + if(featureJacobian(feature.id, cam_state_ids, H_xj, r_j)) + { + if (gatingTest(H_xj, r_j, r_j.size())) { //, cam_state_ids.size()-1)) { + H_x.block(stack_cntr, 0, H_xj.rows(), H_xj.cols()) = H_xj; + r.segment(stack_cntr, r_j.rows()) = r_j; + stack_cntr += H_xj.rows(); + } } + if(twodotFeatureJacobian(feature.id, cam_state_ids, twoH_xj, twor_j)) + { + if (gatingTest(twoH_xj, twor_j, twor_j.size())) { //, cam_state_ids.size()-1)) { + twoH_x.block(twostack_cntr, 0, twoH_xj.rows(), twoH_xj.cols()) = twoH_xj; + twor.segment(twostack_cntr, twor_j.rows()) = twor_j; + twostack_cntr += twoH_xj.rows(); + } + } + + + // Put an upper bound on the row size of measurement Jacobian, // which helps guarantee the executation time. - if (stack_cntr > 1500) break; + //if (stack_cntr > 1500) break; } - H_x.conservativeResize(stack_cntr, H_x.cols()); - r.conservativeResize(stack_cntr); + if(pstack_cntr) + { + pH_x.conservativeResize(pstack_cntr, pH_x.cols()); + pr.conservativeResize(pstack_cntr); + photometricMeasurementUpdate(pH_x, pr); + } - // Perform the measurement update step. - measurementUpdate(H_x, r); + if(stack_cntr) + { + H_x.conservativeResize(stack_cntr, H_x.cols()); + r.conservativeResize(stack_cntr); + measurementUpdate(H_x, r); + + } + if(twostack_cntr) + { + twoH_x.conservativeResize(twostack_cntr, twoH_x.cols()); + twor.conservativeResize(twostack_cntr); + twoMeasurementUpdate(twoH_x, twor); + } + // Remove all processed features from the map. for (const auto& feature_id : processed_feature_ids) @@ -1131,8 +2836,7 @@ void MsckfVio::removeLostFeatures() { return; } -void MsckfVio::findRedundantCamStates( - vector& rm_cam_state_ids) { +void MsckfVio::findRedundantCamStates(vector& rm_cam_state_ids) { // Move the iterator to the key position. auto key_cam_state_iter = state_server.cam_states.end(); @@ -1176,6 +2880,184 @@ void MsckfVio::findRedundantCamStates( return; } +void MsckfVio::pruneLastCamStateBuffer() +{ + + if (state_server.cam_states.size() < max_cam_state_size) + return; + + auto rm_cam_state_id = state_server.cam_states.begin()->first; + + // Set the size of the Jacobian matrix. + int jacobian_row_size = 0; + int pjacobian_row_size = 0; + int twojacobian_row_size = 0; + + + //initialize all the features which are going to be removed + for (auto& item : map_server) { + auto& feature = item.second; + + // check if feature is involved, if not continue + if (feature.observations.find(rm_cam_state_id) == + feature.observations.end()) + continue; + + + if (!feature.is_initialized) { + + // Check if the feature can be initialized + if (!feature.checkMotion(state_server.cam_states)) { + + // remove any features that can't be initialized + feature.observations.erase(rm_cam_state_id); + continue; + } else { + if(!feature.initializeRho(state_server.cam_states)) { + feature.observations.erase(rm_cam_state_id); + continue; + } + } + } + if(!feature.is_anchored) + { + if(!feature.initializeAnchor(cam0, N)) + { + feature.observations.erase(rm_cam_state_id); + continue; + } + } + + pjacobian_row_size += 2*N*N*feature.observations.size(); + jacobian_row_size += 4*feature.observations.size() - 3; + twojacobian_row_size += 4*feature.observations.size(); + + } + + MatrixXd H_xj; + VectorXd r_j; + MatrixXd H_x = MatrixXd::Zero(jacobian_row_size, 21+7*state_server.cam_states.size()); + VectorXd r = VectorXd::Zero(jacobian_row_size); + MatrixXd pH_xj; + VectorXd pr_j; + MatrixXd pH_x = MatrixXd::Zero(pjacobian_row_size, 21+7*state_server.cam_states.size()); + VectorXd pr = VectorXd::Zero(pjacobian_row_size); + MatrixXd twoH_xj; + VectorXd twor_j; + MatrixXd twoH_x = MatrixXd::Zero(twojacobian_row_size, 21+7*state_server.cam_states.size()); + VectorXd twor = VectorXd::Zero(twojacobian_row_size); + int stack_cntr = 0; + int pruned_cntr = 0; + int pstack_cntr = 0; + int twostack_cntr = 0; + + for (auto& item : map_server) { + auto& feature = item.second; + + // check if feature is involved, if not continue + if (feature.observations.find(rm_cam_state_id) == + feature.observations.end()) + continue; + + + vector involved_cam_state_ids(0); + for (const auto& cam_state : state_server.cam_states) + involved_cam_state_ids.push_back(cam_state.first); + + + if(PhotometricFeatureJacobian(feature.id, involved_cam_state_ids, pH_xj, pr_j) == true) + { + + if (gatingTest(pH_xj, pr_j, pr_j.size(), 1)) { //, cam_state_ids.size()-1)) { + pH_x.block(pstack_cntr, 0, pH_xj.rows(), pH_xj.cols()) = pH_xj; + pr.segment(pstack_cntr, pr_j.rows()) = pr_j; + pstack_cntr += pH_xj.rows(); + } + } + + if(featureJacobian(feature.id, involved_cam_state_ids, H_xj, r_j)) + { + if (gatingTest(H_xj, r_j, r_j.size())) {// involved_cam_state_ids.size())) { + H_x.block(stack_cntr, 0, H_xj.rows(), H_xj.cols()) = H_xj; + r.segment(stack_cntr, r_j.rows()) = r_j; + stack_cntr += H_xj.rows(); + pruned_cntr++; + } + } + + if(twodotFeatureJacobian(feature.id, involved_cam_state_ids, twoH_xj, twor_j)) + { + if (gatingTest(twoH_xj, twor_j, twor_j.size())) {// involved_cam_state_ids.size())) { + twoH_x.block(twostack_cntr, 0, twoH_xj.rows(), twoH_xj.cols()) = twoH_xj; + twor.segment(twostack_cntr, twor_j.rows()) = twor_j; + twostack_cntr += twoH_xj.rows(); + } + } + + for (const auto& cam_id : involved_cam_state_ids) + feature.observations.erase(cam_id); + } + + if(pstack_cntr) + { + pH_x.conservativeResize(pstack_cntr, pH_x.cols()); + pr.conservativeResize(pstack_cntr); + photometricMeasurementUpdate(pH_x, pr); + } + + if(stack_cntr) + { + H_x.conservativeResize(stack_cntr, H_x.cols()); + r.conservativeResize(stack_cntr); + measurementUpdate(H_x, r); + } + + if(twostack_cntr) + { + twoH_x.conservativeResize(twostack_cntr, twoH_x.cols()); + twor.conservativeResize(twostack_cntr); + twoMeasurementUpdate(twoH_x, twor); + } + + //reduction + int cam_sequence = std::distance(state_server.cam_states.begin(), + state_server.cam_states.find(rm_cam_state_id)); + int cam_state_start = 21 + 7*cam_sequence; + int cam_state_end = cam_state_start + 7; + + // Remove the corresponding rows and columns in the state + // covariance matrix. + if (cam_state_end < state_server.state_cov.rows()) { + state_server.state_cov.block(cam_state_start, 0, + state_server.state_cov.rows()-cam_state_end, + state_server.state_cov.cols()) = + state_server.state_cov.block(cam_state_end, 0, + state_server.state_cov.rows()-cam_state_end, + state_server.state_cov.cols()); + + state_server.state_cov.block(0, cam_state_start, + state_server.state_cov.rows(), + state_server.state_cov.cols()-cam_state_end) = + state_server.state_cov.block(0, cam_state_end, + state_server.state_cov.rows(), + state_server.state_cov.cols()-cam_state_end); + + state_server.state_cov.conservativeResize( + state_server.state_cov.rows()-7, state_server.state_cov.cols()-7); + } else { + state_server.state_cov.conservativeResize( + state_server.state_cov.rows()-7, state_server.state_cov.cols()-7); + } + + + // Remove this camera state in the state vector. + state_server.cam_states.erase(rm_cam_state_id); + cam0.moving_window.erase(rm_cam_state_id); + cam1.moving_window.erase(rm_cam_state_id); + + return; +} + void MsckfVio::pruneCamStateBuffer() { if (state_server.cam_states.size() < max_cam_state_size) @@ -1187,6 +3069,9 @@ void MsckfVio::pruneCamStateBuffer() { // Find the size of the Jacobian matrix. int jacobian_row_size = 0; + int pjacobian_row_size = 0; + int twojacobian_row_size = 0; + for (auto& item : map_server) { auto& feature = item.second; // Check how many camera states to be removed are associated @@ -1203,7 +3088,6 @@ void MsckfVio::pruneCamStateBuffer() { feature.observations.erase(involved_cam_state_ids[0]); continue; } - if (!feature.is_initialized) { // Check if the feature can be initialize. if (!feature.checkMotion(state_server.cam_states)) { @@ -1214,25 +3098,46 @@ void MsckfVio::pruneCamStateBuffer() { feature.observations.erase(cam_id); continue; } else { - if(!feature.initializePosition(state_server.cam_states)) { + if(!feature.initializeRho(state_server.cam_states)) { for (const auto& cam_id : involved_cam_state_ids) feature.observations.erase(cam_id); continue; } } } - + if(!feature.is_anchored) + { + if(!feature.initializeAnchor(cam0, N)) + { + for (const auto& cam_id : involved_cam_state_ids) + feature.observations.erase(cam_id); + continue; + } + } + + twojacobian_row_size += 4*involved_cam_state_ids.size(); + pjacobian_row_size += 2*N*N*involved_cam_state_ids.size(); jacobian_row_size += 4*involved_cam_state_ids.size() - 3; } //cout << "jacobian row #: " << jacobian_row_size << endl; // Compute the Jacobian and residual. - MatrixXd H_x = MatrixXd::Zero(jacobian_row_size, - 21+6*state_server.cam_states.size()); + MatrixXd H_xj; + VectorXd r_j; + MatrixXd H_x = MatrixXd::Zero(jacobian_row_size, 21+7*state_server.cam_states.size()); VectorXd r = VectorXd::Zero(jacobian_row_size); int stack_cntr = 0; - + MatrixXd pH_xj; + VectorXd pr_j; + MatrixXd pH_x = MatrixXd::Zero(pjacobian_row_size, 21+7*state_server.cam_states.size()); + VectorXd pr = VectorXd::Zero(pjacobian_row_size); + int pstack_cntr = 0; + MatrixXd twoH_xj; + VectorXd twor_j; + MatrixXd twoH_x = MatrixXd::Zero(twojacobian_row_size, 21+7*state_server.cam_states.size()); + VectorXd twor = VectorXd::Zero(twojacobian_row_size); + int twostack_cntr = 0; for (auto& item : map_server) { auto& feature = item.second; // Check how many camera states to be removed are associated @@ -1246,31 +3151,64 @@ void MsckfVio::pruneCamStateBuffer() { if (involved_cam_state_ids.size() == 0) continue; - MatrixXd H_xj; - VectorXd r_j; - featureJacobian(feature.id, involved_cam_state_ids, H_xj, r_j); - - if (gatingTest(H_xj, r_j, involved_cam_state_ids.size())) { - H_x.block(stack_cntr, 0, H_xj.rows(), H_xj.cols()) = H_xj; - r.segment(stack_cntr, r_j.rows()) = r_j; - stack_cntr += H_xj.rows(); + if(PhotometricFeatureJacobian(feature.id, involved_cam_state_ids, pH_xj, pr_j) == true) + { + if (gatingTest(pH_xj, pr_j, pr_j.size(), 1)) {// involved_cam_state_ids.size())) { + pH_x.block(pstack_cntr, 0, pH_xj.rows(), pH_xj.cols()) = pH_xj; + pr.segment(pstack_cntr, pr_j.rows()) = pr_j; + pstack_cntr += pH_xj.rows(); + } + } + if(featureJacobian(feature.id, involved_cam_state_ids, H_xj, r_j)) + { + if (gatingTest(H_xj, r_j, involved_cam_state_ids.size())) {// involved_cam_state_ids.size())) { + H_x.block(stack_cntr, 0, H_xj.rows(), H_xj.cols()) = H_xj; + r.segment(stack_cntr, r_j.rows()) = r_j; + stack_cntr += H_xj.rows(); + } + } + if(twodotFeatureJacobian(feature.id, involved_cam_state_ids, twoH_xj, twor_j)) + { + if (gatingTest(twoH_xj, twor_j, twor_j.size())) {// involved_cam_state_ids.size())) { + twoH_x.block(twostack_cntr, 0, twoH_xj.rows(), twoH_xj.cols()) = twoH_xj; + twor.segment(twostack_cntr, twor_j.rows()) = twor_j; + twostack_cntr += twoH_xj.rows(); + } } for (const auto& cam_id : involved_cam_state_ids) feature.observations.erase(cam_id); + + } + + if(pstack_cntr) + { + pH_x.conservativeResize(pstack_cntr, pH_x.cols()); + pr.conservativeResize(pstack_cntr); + photometricMeasurementUpdate(pH_x, pr); } - H_x.conservativeResize(stack_cntr, H_x.cols()); - r.conservativeResize(stack_cntr); + if(stack_cntr) + { + H_x.conservativeResize(stack_cntr, H_x.cols()); + r.conservativeResize(stack_cntr); + measurementUpdate(H_x, r); + } - // Perform measurement update. - measurementUpdate(H_x, r); + if(stack_cntr) + { + twoH_x.conservativeResize(twostack_cntr, twoH_x.cols()); + twor.conservativeResize(twostack_cntr); + twoMeasurementUpdate(twoH_x, twor); + } + //reduction for (const auto& cam_id : rm_cam_state_ids) { int cam_sequence = std::distance(state_server.cam_states.begin(), state_server.cam_states.find(cam_id)); - int cam_state_start = 21 + 6*cam_sequence; - int cam_state_end = cam_state_start + 6; + int cam_state_start = 21 + 7*cam_sequence; + int cam_state_end = cam_state_start + 7; + // Remove the corresponding rows and columns in the state // covariance matrix. @@ -1290,14 +3228,25 @@ void MsckfVio::pruneCamStateBuffer() { state_server.state_cov.cols()-cam_state_end); state_server.state_cov.conservativeResize( - state_server.state_cov.rows()-6, state_server.state_cov.cols()-6); + state_server.state_cov.rows()-7, state_server.state_cov.cols()-7); } else { state_server.state_cov.conservativeResize( - state_server.state_cov.rows()-6, state_server.state_cov.cols()-6); + state_server.state_cov.rows()-7, state_server.state_cov.cols()-7); } // Remove this camera state in the state vector. - state_server.cam_states.erase(cam_id); + + + for (const auto& cam_id : rm_cam_state_ids) { + + state_server.cam_states.erase(cam_id); + cam0.moving_window.erase(cam_id); + cam1.moving_window.erase(cam_id); + + // Remove this camera state in the true state vector. + true_state_server.cam_states.erase(cam_id); + err_state_server.cam_states.erase(cam_id); + } } return; @@ -1346,6 +3295,10 @@ void MsckfVio::onlineReset() { nh.param("initial_covariance/extrinsic_translation_cov", extrinsic_translation_cov, 1e-4); + // Reset the irradiance covariance + nh.param("initial_covariance/irradiance_frame_bias", + irradiance_frame_bias, 0.1); + state_server.state_cov = MatrixXd::Zero(21, 21); for (int i = 3; i < 6; ++i) state_server.state_cov(i, i) = gyro_bias_cov; @@ -1376,14 +3329,41 @@ void MsckfVio::publish(const ros::Time& time) { Eigen::Vector3d body_velocity = IMUState::T_imu_body.linear() * imu_state.velocity; + // Generate camera frame form IMU Frame + Eigen::Isometry3d T_i_c = Eigen::Isometry3d::Identity(); + T_i_c.linear() = imu_state.R_imu_cam0; + T_i_c.translation() = imu_state.t_cam0_imu; + // Publish tf if (publish_tf) { tf::Transform T_b_w_tf; tf::transformEigenToTF(T_b_w, T_b_w_tf); tf_pub.sendTransform(tf::StampedTransform( T_b_w_tf, time, fixed_frame_id, child_frame_id)); + + tf::Transform T_i_c_tf; + tf::transformEigenToTF(T_i_c, T_i_c_tf); + tf_pub.sendTransform(tf::StampedTransform( + T_i_c_tf, time, child_frame_id, "camera")); } + // Publish true odometry + // Ground truth odometry. + nav_msgs::Odometry truth_odom_msg; + truth_odom_msg.header.stamp = time; + truth_odom_msg.header.frame_id = fixed_frame_id; + truth_odom_msg.child_frame_id = child_frame_id+"_true"; + + Eigen::Isometry3d true_T_i_w = Eigen::Isometry3d::Identity(); + true_T_i_w.linear() = quaternionToRotation( + true_state_server.imu_state.orientation).transpose(); + true_T_i_w.translation() = true_state_server.imu_state.position; + tf::poseEigenToMsg(true_T_i_w, truth_odom_msg.pose.pose); + Eigen::Isometry3d true_T_b_w = IMUState::T_imu_body * true_T_i_w * + IMUState::T_imu_body.inverse(); + + tf::poseEigenToMsg(true_T_b_w, truth_odom_msg.pose.pose); + truth_odom_pub.publish(truth_odom_msg); // Publish the odometry nav_msgs::Odometry odom_msg; odom_msg.header.stamp = time; @@ -1439,7 +3419,6 @@ void MsckfVio::publish(const ros::Time& time) { feature_msg_ptr->width = feature_msg_ptr->points.size(); feature_pub.publish(feature_msg_ptr); - return; } diff --git a/src/shrinkImage.py b/src/shrinkImage.py new file mode 100755 index 0000000..89ca55d --- /dev/null +++ b/src/shrinkImage.py @@ -0,0 +1,75 @@ +#!/usr/bin/env python +from __future__ import print_function + +import sys +import rospy +import cv2 +from std_msgs.msg import String +from sensor_msgs.msg import Image +from cv_bridge import CvBridge, CvBridgeError + +class image_converter: + + def __init__(self): + self.image0_pub = rospy.Publisher("/cam0/new_image_raw",Image, queue_size=10) + self.image1_pub = rospy.Publisher("/cam1/new_image_raw",Image, queue_size=10) + + self.bridge = CvBridge() + self.image0_sub = rospy.Subscriber("/cam0/image_raw",Image,self.callback_cam0) + self.image1_sub = rospy.Subscriber("/cam1/image_raw",Image,self.callback_cam1) + + def callback_cam0(self,data): + try: + cv_image = self.bridge.imgmsg_to_cv2(data, "bgr8") + except CvBridgeError as e: + print(e) + + imgScale = 0.25 + newX,newY = cv_image.shape[1]*imgScale, cv_image.shape[0]*imgScale + newimg = cv2.resize(cv_image,(int(newX),int(newY))) + + newpub = self.bridge.cv2_to_imgmsg(newimg, "bgr8") + newdata = data + newdata.height = newpub.height + newdata.width = newpub.width + newdata.step = newpub.step + newdata.data = newpub.data + try: + self.image0_pub.publish(newdata) + except CvBridgeError as e: + print(e) + + def callback_cam1(self,data): + try: + cv_image = self.bridge.imgmsg_to_cv2(data, "bgr8") + except CvBridgeError as e: + print(e) + + imgScale = 0.25 + newX,newY = cv_image.shape[1]*imgScale, cv_image.shape[0]*imgScale + newimg = cv2.resize(cv_image,(int(newX),int(newY))) + + newpub = self.bridge.cv2_to_imgmsg(newimg, "bgr8") + newdata = data + newdata.height = newpub.height + newdata.width = newpub.width + newdata.step = newpub.step + newdata.data = newpub.data + + try: + self.image1_pub.publish(newdata) + except CvBridgeError as e: + print(e) + + +def main(args): + ic = image_converter() + rospy.init_node('image_converter', anonymous=True) + try: + rospy.spin() + except KeyboardInterrupt: + print("Shutting down") + cv2.destroyAllWindows() + +if __name__ == '__main__': + main(sys.argv) \ No newline at end of file