From ebc73c0c5ec382de12ae22025d4b197acf4f6e82 Mon Sep 17 00:00:00 2001 From: Raphael Maenle Date: Wed, 26 Jun 2019 19:23:50 +0200 Subject: [PATCH] not working, added stop and go to image processing, added undistorted image to image processing --- config/camchain-imucam-tum-scaled.yaml | 4 +- include/msckf_vio/feature.hpp | 138 ++++++++++++++++++++++--- include/msckf_vio/image_handler.h | 3 + include/msckf_vio/image_processor.h | 2 + include/msckf_vio/msckf_vio.h | 2 +- launch/image_processor_tinytum.launch | 3 + launch/image_processor_tum.launch | 3 + launch/msckf_vio_tinytum.launch | 4 +- src/image_handler.cpp | 56 +++++++++- src/image_processor.cpp | 31 +++++- src/msckf_vio.cpp | 58 ++++++++--- 11 files changed, 260 insertions(+), 44 deletions(-) diff --git a/config/camchain-imucam-tum-scaled.yaml b/config/camchain-imucam-tum-scaled.yaml index 01f1983..40b2dc0 100644 --- a/config/camchain-imucam-tum-scaled.yaml +++ b/config/camchain-imucam-tum-scaled.yaml @@ -7,7 +7,7 @@ cam0: camera_model: pinhole distortion_coeffs: [0.0034823894022493434, 0.0007150348452162257, -0.0020532361418706202, 0.00020293673591811182] - distortion_model: equidistant + distortion_model: pre-equidistant intrinsics: [190.97847715128717, 190.9733070521226, 254.93170605935475, 256.8974428996504] resolution: [512, 512] rostopic: /cam0/image_raw @@ -25,7 +25,7 @@ cam1: camera_model: pinhole distortion_coeffs: [0.0034003170790442797, 0.001766278153469831, -0.00266312569781606, 0.0003299517423931039] - distortion_model: equidistant + distortion_model: pre-equidistant intrinsics: [190.44236969414825, 190.4344384721956, 252.59949716835982, 254.91723064636983] resolution: [512, 512] rostopic: /cam1/image_raw diff --git a/include/msckf_vio/feature.hpp b/include/msckf_vio/feature.hpp index 405f2d4..54157e7 100644 --- a/include/msckf_vio/feature.hpp +++ b/include/msckf_vio/feature.hpp @@ -137,6 +137,9 @@ void Rhocost(const Eigen::Isometry3d& T_c0_ci, inline bool checkMotion( const CamStateServer& cam_states) const; + + bool initializeAnchorUndistort(const CameraCalibration& cam, int N); + /* * @brief InitializeAnchor generates the NxN patch around the * feature in the Anchor image @@ -171,6 +174,12 @@ void Rhocost(const Eigen::Isometry3d& T_c0_ci, Eigen::Vector3d& in_p) const; +cv::Point2f projectPositionToCameraUndistorted( + const CAMState& cam_state, + const StateIDType& cam_state_id, + const CameraCalibration& cam, + Eigen::Vector3d& in_p) const; + /* * @brief project PositionToCamera Takes a 3d position in a world frame @@ -692,26 +701,22 @@ bool Feature::VisualizeKernel( //cv::Sobel(anchorImage, yderImage, CV_8UC1, 0, 1, 3); - cv::Mat xderImage2(anchorImage.rows, anchorImage.cols, anchorImage_blurred.type()); - cv::Mat yderImage2(anchorImage.rows, anchorImage.cols, anchorImage_blurred.type()); - - - cv::Mat norm_abs_xderImage; - cv::normalize(abs_xderImage, norm_abs_xderImage, 0, 255, cv::NORM_MINMAX, CV_8UC1); - - cv::imshow("xder", norm_abs_xderImage); - cv::imshow("yder", abs_yderImage); + // cv::Mat xderImage2(anchorImage.rows, anchorImage.cols, anchorImage_blurred.type()); + // cv::Mat yderImage2(anchorImage.rows, anchorImage.cols, anchorImage_blurred.type()); + /* for(int i = 1; i < anchorImage.rows-1; i++) for(int j = 1; j < anchorImage.cols-1; j++) xderImage2.at(j,i) = 255.*fabs(Kernel(cv::Point2f(i,j), anchorImage_blurred, "Sobel_x")); + for(int i = 1; i < anchorImage.rows-1; i++) for(int j = 1; j < anchorImage.cols-1; j++) yderImage2.at(j,i) = 255.*fabs(Kernel(cv::Point2f(i,j), anchorImage_blurred, "Sobel_y")); - cv::imshow("anchor", anchorImage); - cv::imshow("xder2", xderImage2); - cv::imshow("yder2", yderImage2); + */ + //cv::imshow("anchor", anchorImage); + cv::imshow("xder2", xderImage); + cv::imshow("yder2", yderImage); cvWaitKey(0); } @@ -752,7 +757,7 @@ bool Feature::VisualizePatch( std::vector projectionPatch; for(auto point : anchorPatch_3d) { - cv::Point2f p_in_c0 = projectPositionToCamera(cam_state, cam_state_id, cam0, point); + cv::Point2f p_in_c0 = projectPositionToCameraUndistorted(cam_state, cam_state_id, cam0, point); projectionPatch.push_back(PixelIrradiance(p_in_c0, current_image)); // visu - feature cv::Point xs(p_in_c0.x, p_in_c0.y); @@ -807,10 +812,11 @@ bool Feature::VisualizePatch( cv::putText(irradianceFrame, namer.str() , cvPoint(30, 65+scale*2*N), cv::FONT_HERSHEY_COMPLEX_SMALL, 0.8, cvScalar(0,0,0), 1, CV_AA); - cv::Point2f p_f(observations.find(cam_state_id)->second(0),observations.find(cam_state_id)->second(1)); + cv::Point2f p_f(observations.find(cam_state_id)->second(0)*cam0.intrinsics[0] + cam0.intrinsics[2],observations.find(cam_state_id)->second(1)*cam0.intrinsics[1] + cam0.intrinsics[3]); // move to real pixels - p_f = image_handler::distortPoint(p_f, cam0.intrinsics, cam0.distortion_model, cam0.distortion_coeffs); - + + // without distort + // p_f = image_handler::distortPoint(p_f, cam0.intrinsics, cam0.distortion_model, cam0.distortion_coeffs); for(int i = 0; ifirst) + //printf("undist:\n \tproj pos: %f, %f\n\ttrue pos: %f, %f\n", out_p.x, out_p.y, undist_anchor_center_pos.x, undist_anchor_center_pos.y); + + cv::Point2f my_p(out_p.x * cam.intrinsics[0] + cam.intrinsics[2], out_p.y * cam.intrinsics[1] + cam.intrinsics[3]); + + // printf("truPosition: %f, %f, %f\n", position.x(), position.y(), position.z()); + // printf("camPosition: %f, %f, %f\n", p_c0(0), p_c0(1), p_c0(2)); + // printf("Photo projection: %f, %f\n", my_p[0].x, my_p[0].y); + + return my_p; +} cv::Point2f Feature::projectPositionToCamera( const CAMState& cam_state, @@ -1080,6 +1113,79 @@ bool Feature::initializeAnchor(const CameraCalibration& cam, int N) return true; } +//@test center projection must always be initial feature projection +bool Feature::initializeAnchorUndistort(const CameraCalibration& cam, int N) +{ + + //initialize patch Size + int n = (int)(N-1)/2; + + auto anchor = observations.begin(); + if(cam.moving_window.find(anchor->first) == cam.moving_window.end()) + return false; + + cv::Mat anchorImage = cam.moving_window.find(anchor->first)->second.image; + cv::Mat anchorImage_deeper; + anchorImage.convertTo(anchorImage_deeper,CV_16S); + //TODO remove this? + + + cv::Mat hen = cv::Mat::zeros(cv::Size(3, 3), CV_16S); + hen.at(1,0) = 1; + + //sobel test + /* + cv::Mat newhen; + cv::Sobel(hen, newhen, -1, 1, 0, 3); + std::cout << "newhen" << newhen << std::endl; + */ + + cv::Sobel(anchorImage_deeper, xderImage, -1, 1, 0, 3); + cv::Sobel(anchorImage_deeper, yderImage, -1, 0, 1, 3); + + xderImage/=4; + yderImage/=4; + + cv::convertScaleAbs(xderImage, abs_xderImage); + cv::convertScaleAbs(yderImage, abs_yderImage); + + cvWaitKey(0); + + auto u = anchor->second(0);//*cam.intrinsics[0] + cam.intrinsics[2]; + auto v = anchor->second(1);//*cam.intrinsics[1] + cam.intrinsics[3]; + + //project onto pixel plane + undist_anchor_center_pos = cv::Point2f(u * cam.intrinsics[0] + cam.intrinsics[2], v * cam.intrinsics[1] + cam.intrinsics[3]); + + // create vector of patch in pixel plane + for(double u_run = -n; u_run <= n; u_run++) + for(double v_run = -n; v_run <= n; v_run++) + anchorPatch_real.push_back(cv::Point2f(undist_anchor_center_pos.x+u_run, undist_anchor_center_pos.y+v_run)); + + //project back into u,v + for(int i = 0; i < N*N; i++) + anchorPatch_ideal.push_back(cv::Point2f((anchorPatch_real[i].x-cam.intrinsics[2])/cam.intrinsics[0], (anchorPatch_real[i].y-cam.intrinsics[3])/cam.intrinsics[1])); + + + // save anchor position for later visualisaztion + anchor_center_pos = anchorPatch_real[(N*N-1)/2]; + + // save true pixel Patch position + for(auto point : anchorPatch_real) + if(point.x - n < 0 || point.x + n >= cam.resolution(0)-1 || point.y - n < 0 || point.y + n >= cam.resolution(1)-1) + return false; + + for(auto point : anchorPatch_real) + anchorPatch.push_back(PixelIrradiance(point, anchorImage)); + + // project patch pixel to 3D space in camera coordinate system + for(auto point : anchorPatch_ideal) + anchorPatch_3d.push_back(AnchorPixelToPosition(point, cam)); + + is_anchored = true; + return true; +} + bool Feature::initializeRho(const CamStateServer& cam_states) { // Organize camera poses and feature observations properly. diff --git a/include/msckf_vio/image_handler.h b/include/msckf_vio/image_handler.h index d664be6..eae27c3 100644 --- a/include/msckf_vio/image_handler.h +++ b/include/msckf_vio/image_handler.h @@ -16,6 +16,9 @@ namespace msckf_vio { */ namespace image_handler { +cv::Point2f pinholeDownProject(const cv::Point2f& p_in, const cv::Vec4d& intrinsics); +cv::Point2f pinholeUpProject(const cv::Point2f& p_in, const cv::Vec4d& intrinsics); + void undistortPoints( const std::vector& pts_in, const cv::Vec4d& intrinsics, diff --git a/include/msckf_vio/image_processor.h b/include/msckf_vio/image_processor.h index 638fbcd..7fb3943 100644 --- a/include/msckf_vio/image_processor.h +++ b/include/msckf_vio/image_processor.h @@ -320,6 +320,8 @@ private: return; } + bool STREAMPAUSE; + // Indicate if this is the first image message. bool is_first_img; diff --git a/include/msckf_vio/msckf_vio.h b/include/msckf_vio/msckf_vio.h index 9e9975d..3394a0e 100644 --- a/include/msckf_vio/msckf_vio.h +++ b/include/msckf_vio/msckf_vio.h @@ -207,7 +207,7 @@ class MsckfVio { Eigen::MatrixXd& H_x, Eigen::VectorXd& r); - void PhotometricMeasurementJacobian( + bool PhotometricMeasurementJacobian( const StateIDType& cam_state_id, const FeatureIDType& feature_id, Eigen::MatrixXd& H_x, diff --git a/launch/image_processor_tinytum.launch b/launch/image_processor_tinytum.launch index 53dd99a..7500ff2 100644 --- a/launch/image_processor_tinytum.launch +++ b/launch/image_processor_tinytum.launch @@ -11,6 +11,9 @@ output="screen" > + + + diff --git a/launch/image_processor_tum.launch b/launch/image_processor_tum.launch index d2a02e4..135af70 100644 --- a/launch/image_processor_tum.launch +++ b/launch/image_processor_tum.launch @@ -11,6 +11,9 @@ output="screen" > + + + diff --git a/launch/msckf_vio_tinytum.launch b/launch/msckf_vio_tinytum.launch index 44b3312..beaaef4 100644 --- a/launch/msckf_vio_tinytum.launch +++ b/launch/msckf_vio_tinytum.launch @@ -21,11 +21,11 @@ - + - + diff --git a/src/image_handler.cpp b/src/image_handler.cpp index d5e62ec..bd2a699 100644 --- a/src/image_handler.cpp +++ b/src/image_handler.cpp @@ -14,6 +14,16 @@ namespace msckf_vio { namespace image_handler { +cv::Point2f pinholeDownProject(const cv::Point2f& p_in, const cv::Vec4d& intrinsics) +{ + return cv::Point2f(p_in.x * intrinsics[0] + intrinsics[2], p_in.y * intrinsics[1] + intrinsics[3]); +} + +cv::Point2f pinholeUpProject(const cv::Point2f& p_in, const cv::Vec4d& intrinsics) +{ + return cv::Point2f((p_in.x - intrinsics[2])/intrinsics[0], (p_in.y - intrinsics[3])/intrinsics[1]); +} + void undistortPoint( const cv::Point2f& pt_in, const cv::Vec4d& intrinsics, @@ -42,10 +52,13 @@ void undistortPoint( if (distortion_model == "radtan") { cv::undistortPoints(pts_in, pts_out, K, distortion_coeffs, rectification_matrix, K_new); - } else if (distortion_model == "equidistant") { + } + // equidistant + else if (distortion_model == "equidistant") { cv::fisheye::undistortPoints(pts_in, pts_out, K, distortion_coeffs, rectification_matrix, K_new); } + // fov else if (distortion_model == "fov") { for(int i = 0; i < pts_in.size(); i++) { @@ -59,7 +72,15 @@ void undistortPoint( pts_out.push_back(newPoint); } - } + } + else if (distortion_model == "pre-equidistant") + { + std::vector temp_pts_out; + for(int i = 0; i < pts_in.size(); i++) + temp_pts_out.push_back(pinholeUpProject(pts_in[i], intrinsics)); + + pts_out = temp_pts_out; + } else { ROS_WARN_ONCE("The model %s is unrecognized, use radtan instead...", distortion_model.c_str()); @@ -112,7 +133,16 @@ void undistortPoints( pts_out.push_back(newPoint); } - } + } + else if (distortion_model == "pre-equidistant") + { + std::vector temp_pts_out; + for(int i = 0; i < pts_in.size(); i++) + temp_pts_out.push_back(pinholeUpProject(pts_in[i], intrinsics)); + + pts_out = temp_pts_out; + + } else { ROS_WARN_ONCE("The model %s is unrecognized, use radtan instead...", distortion_model.c_str()); @@ -156,7 +186,15 @@ std::vector distortPoints( pts_out.push_back(newPoint); } - } + } + else if (distortion_model == "pre-equidistant") + { + std::vector temp_pts_out; + for(int i = 0; i < pts_in.size(); i++) + temp_pts_out.push_back(pinholeDownProject(pts_in[i], intrinsics)); + + pts_out = temp_pts_out; + } else { ROS_WARN_ONCE("The model %s is unrecognized, using radtan instead...", distortion_model.c_str()); @@ -205,7 +243,15 @@ cv::Point2f distortPoint( pts_out.push_back(newPoint); } - } + } + else if (distortion_model == "pre-equidistant") + { + std::vector temp_pts_out; + for(int i = 0; i < pts_in.size(); i++) + pts_out.push_back(pinholeDownProject(pts_in[i], intrinsics)); + + pts_out = temp_pts_out; + } else { ROS_WARN_ONCE("The model %s is unrecognized, using radtan instead...", distortion_model.c_str()); diff --git a/src/image_processor.cpp b/src/image_processor.cpp index d35bb16..da8494e 100644 --- a/src/image_processor.cpp +++ b/src/image_processor.cpp @@ -42,6 +42,9 @@ ImageProcessor::~ImageProcessor() { } bool ImageProcessor::loadParameters() { + + // debug parameters + nh.param("StreamPause", STREAMPAUSE, false); // Camera calibration parameters nh.param("cam0/distortion_model", cam0.distortion_model, string("radtan")); @@ -211,7 +214,9 @@ void ImageProcessor::stereoCallback( const sensor_msgs::ImageConstPtr& cam0_img, const sensor_msgs::ImageConstPtr& cam1_img) { - //cout << "==================================" << endl; + // stop playing bagfile if printing images + if(STREAMPAUSE) + nh.setParam("/play_bag_image", false); // Get the current image. cam0_curr_img_ptr = cv_bridge::toCvShare(cam0_img, @@ -219,6 +224,18 @@ void ImageProcessor::stereoCallback( cam1_curr_img_ptr = cv_bridge::toCvShare(cam1_img, sensor_msgs::image_encodings::MONO8); + + const cv::Matx33d K0(cam0.intrinsics[0], 0.0, cam0.intrinsics[2], + 0.0, cam0.intrinsics[1], cam0.intrinsics[3], + 0.0, 0.0, 1.0); + const cv::Matx33d K1(cam1.intrinsics[0], 0.0, cam1.intrinsics[2], + 0.0, cam1.intrinsics[1], cam1.intrinsics[3], + 0.0, 0.0, 1.0); + + + cv::fisheye::undistortImage(cam0_curr_img_ptr->image, cam0_curr_img_ptr->image, K0, cam0.distortion_coeffs, K0); + cv::fisheye::undistortImage(cam1_curr_img_ptr->image, cam1_curr_img_ptr->image, K1, cam1.distortion_coeffs, K1); + // Build the image pyramids once since they're used at multiple places createImagePyramids(); @@ -245,6 +262,7 @@ void ImageProcessor::stereoCallback( // Add new features into the current image. start_time = ros::Time::now(); addNewFeatures(); + //ROS_INFO("Addition time: %f", // (ros::Time::now()-start_time).toSec()); @@ -267,16 +285,19 @@ void ImageProcessor::stereoCallback( // (ros::Time::now()-start_time).toSec()); // Publish features in the current image. + ros::Time start_time = ros::Time::now(); publish(); //ROS_INFO("Publishing: %f", // (ros::Time::now()-start_time).toSec()); // Update the previous image and previous features. + cam0_prev_img_ptr = cam0_curr_img_ptr; prev_features_ptr = curr_features_ptr; std::swap(prev_cam0_pyramid_, curr_cam0_pyramid_); + // Initialize the current features to empty vectors. curr_features_ptr.reset(new GridFeatures()); for (int code = 0; code < @@ -284,6 +305,10 @@ void ImageProcessor::stereoCallback( (*curr_features_ptr)[code] = vector(0); } + // stop playing bagfile if printing images + if(STREAMPAUSE) + nh.setParam("/play_bag_image", true); + return; } @@ -580,6 +605,7 @@ void ImageProcessor::trackFeatures() { ++after_ransac; } + // Compute the tracking rate. int prev_feature_num = 0; for (const auto& item : *prev_features_ptr) @@ -659,6 +685,8 @@ void ImageProcessor::stereoMatch( // Further remove outliers based on the known // essential matrix. + + vector cam0_points_undistorted(0); vector cam1_points_undistorted(0); image_handler::undistortPoints( @@ -668,6 +696,7 @@ void ImageProcessor::stereoMatch( cam1_points, cam1.intrinsics, cam1.distortion_model, cam1.distortion_coeffs, cam1_points_undistorted); + double norm_pixel_unit = 4.0 / ( cam0.intrinsics[0]+cam0.intrinsics[1]+ cam1.intrinsics[0]+cam1.intrinsics[1]); diff --git a/src/msckf_vio.cpp b/src/msckf_vio.cpp index a3d5dd6..e5f5b3f 100644 --- a/src/msckf_vio.cpp +++ b/src/msckf_vio.cpp @@ -450,7 +450,7 @@ void MsckfVio::imageCallback( double imu_processing_time = ( ros::Time::now()-start_time).toSec(); - // cout << "1" << endl; + //cout << "1" << endl; // Augment the state vector. start_time = ros::Time::now(); //truePhotometricStateAugmentation(feature_msg->header.stamp.toSec()); @@ -459,7 +459,7 @@ void MsckfVio::imageCallback( ros::Time::now()-start_time).toSec(); - // cout << "2" << endl; + //cout << "2" << endl; // Add new observations for existing features or new // features in the map server. start_time = ros::Time::now(); @@ -468,7 +468,7 @@ void MsckfVio::imageCallback( ros::Time::now()-start_time).toSec(); - // cout << "3" << endl; + //cout << "3" << endl; // Add new images to moving window start_time = ros::Time::now(); manageMovingWindow(cam0_img, cam1_img, feature_msg); @@ -476,20 +476,20 @@ void MsckfVio::imageCallback( ros::Time::now()-start_time).toSec(); - // cout << "4" << endl; + //cout << "4" << endl; // Perform measurement update if necessary. start_time = ros::Time::now(); removeLostFeatures(); double remove_lost_features_time = ( ros::Time::now()-start_time).toSec(); - // cout << "5" << endl; + //cout << "5" << endl; start_time = ros::Time::now(); pruneCamStateBuffer(); double prune_cam_states_time = ( ros::Time::now()-start_time).toSec(); - // cout << "6" << endl; + //cout << "6" << endl; // Publish the odometry. start_time = ros::Time::now(); publish(feature_msg->header.stamp); @@ -552,6 +552,20 @@ void MsckfVio::manageMovingWindow( cam0.moving_window[state_server.imu_state.id].image = cam0_img_ptr->image.clone(); cam1.moving_window[state_server.imu_state.id].image = cam1_img_ptr->image.clone(); + + // undistort Images + + const cv::Matx33d K(cam0.intrinsics[0], 0.0, cam0.intrinsics[2], + 0.0, cam0.intrinsics[1], cam0.intrinsics[3], + 0.0, 0.0, 1.0); + + cv::Mat undistortedCam0; + cv::fisheye::undistortImage(cam0.moving_window[state_server.imu_state.id].image, undistortedCam0, K, cam0.distortion_coeffs, K); + //cv::imshow("anchor", undistortedCam0); + //cvWaitKey(0); + cam0.moving_window[state_server.imu_state.id].image = undistortedCam0.clone(); + cam1.moving_window[state_server.imu_state.id].image = cam1_img_ptr->image.clone(); + //TODO handle any massive overflow correctly (should be pruned, before this ever triggers) while(cam0.moving_window.size() > 100) { @@ -1228,7 +1242,7 @@ void MsckfVio::addFeatureObservations( return; } -void MsckfVio::PhotometricMeasurementJacobian( +bool MsckfVio::PhotometricMeasurementJacobian( const StateIDType& cam_state_id, const FeatureIDType& feature_id, MatrixXd& H_x, MatrixXd& H_y, VectorXd& r) @@ -1305,21 +1319,25 @@ void MsckfVio::PhotometricMeasurementJacobian( for (auto point : feature.anchorPatch_3d) { + //cout << "____feature-measurement_____\n" << endl; Eigen::Vector3d p_c0 = R_w_c0 * (point-t_c0_w); - cv::Point2f p_in_c0 = feature.projectPositionToCamera(cam_state, cam_state_id, cam0, point); - cv::Point2f p_in_anchor = feature.projectPositionToCamera(anchor_state, anchor_state_id, cam0, point); + cv::Point2f p_in_c0 = feature.projectPositionToCameraUndistorted(cam_state, cam_state_id, cam0, point); + cv::Point2f p_in_anchor = feature.projectPositionToCameraUndistorted(anchor_state, anchor_state_id, cam0, point); + if( p_in_c0.x < 0 || p_in_c0.x >= cam0.resolution(0) || p_in_c0.y < 0 || p_in_c0.y >= cam0.resolution(1) ) + { + cout << "skip" << endl; + return false; + } //add observation - photo_z.push_back(feature.PixelIrradiance(p_in_c0, frame)); - + photo_z.push_back(feature.PixelIrradiance(p_in_c0, frame)); //calculate photom. residual r_photo(count) = photo_z[count] - estimate_photo_z[count]; //cout << "residual: " << photo_r.back() << endl; // calculate derivation for anchor frame, use position for derivation calculation // frame derivative calculated convoluting with kernel [-1, 0, 1] - dx = feature.cvKernel(p_in_anchor, "Sobel_x"); dy = feature.cvKernel(p_in_anchor, "Sobel_y"); @@ -1329,6 +1347,7 @@ void MsckfVio::PhotometricMeasurementJacobian( dI_dhj(0, 0) = dx * cam0.intrinsics[0]; dI_dhj(0, 1) = dy * cam0.intrinsics[1]; + //dh / d{}^Cp_{ij} dh_dCpij(0, 0) = 1 / p_c0(2); dh_dCpij(1, 1) = 1 / p_c0(2); @@ -1369,6 +1388,7 @@ void MsckfVio::PhotometricMeasurementJacobian( count++; } + MatrixXd H_xl = MatrixXd::Zero(N*N, 21+state_server.cam_states.size()*7); //MatrixXd H_yl = MatrixXd::Zero(N*N, N*N+state_server.cam_states.size()+1); @@ -1419,7 +1439,7 @@ void MsckfVio::PhotometricMeasurementJacobian( //feature.VisualizeKernel(cam_state, cam_state_id, cam0); } - return; + return true; } void MsckfVio::PhotometricFeatureJacobian( @@ -1460,7 +1480,8 @@ void MsckfVio::PhotometricFeatureJacobian( MatrixXd H_yl; Eigen::VectorXd r_l = VectorXd::Zero(N*N); - PhotometricMeasurementJacobian(cam_id, feature.id, H_xl, H_yl, r_l); + if(not PhotometricMeasurementJacobian(cam_id, feature.id, H_xl, H_yl, r_l)) + continue; auto cam_state_iter = state_server.cam_states.find(cam_id); int cam_state_cntr = std::distance( @@ -1475,6 +1496,9 @@ void MsckfVio::PhotometricFeatureJacobian( } + + H_xi.conservativeResize(stack_cntr, H_xi.cols()); + H_yi.conservativeResize(stack_cntr, H_yi.cols()); // Project the residual and Jacobians onto the nullspace // of H_yj. @@ -1978,7 +2002,7 @@ void MsckfVio::removeLostFeatures() { } if(!feature.is_anchored) { - if(!feature.initializeAnchor(cam0, N)) + if(!feature.initializeAnchorUndistort(cam0, N)) { invalid_feature_ids.push_back(feature.id); continue; @@ -2144,7 +2168,7 @@ void MsckfVio::pruneLastCamStateBuffer() } if(!feature.is_anchored) { - if(!feature.initializeAnchor(cam0, N)) + if(!feature.initializeAnchorUndistort(cam0, N)) { feature.observations.erase(rm_cam_state_id); continue; @@ -2296,7 +2320,7 @@ void MsckfVio::pruneCamStateBuffer() { } if(!feature.is_anchored) { - if(!feature.initializeAnchor(cam0, N)) + if(!feature.initializeAnchorUndistort(cam0, N)) { for (const auto& cam_id : involved_cam_state_ids) feature.observations.erase(cam_id);