diff --git a/config/camchain-imucam-tum-scaled.yaml b/config/camchain-imucam-tum-scaled.yaml index 4facacc..f5ea7e8 100644 --- a/config/camchain-imucam-tum-scaled.yaml +++ b/config/camchain-imucam-tum-scaled.yaml @@ -7,7 +7,7 @@ cam0: camera_model: pinhole distortion_coeffs: [0.0034823894022493434, 0.0007150348452162257, -0.0020532361418706202, 0.00020293673591811182] - distortion_model: equidistant + distortion_model: pre-equidistant intrinsics: [190.97847715128717, 190.9733070521226, 254.93170605935475, 256.8974428996504] resolution: [512, 512] rostopic: /cam0/image_raw @@ -25,7 +25,7 @@ cam1: camera_model: pinhole distortion_coeffs: [0.0034003170790442797, 0.001766278153469831, -0.00266312569781606, 0.0003299517423931039] - distortion_model: equidistant + distortion_model: pre-equidistant intrinsics: [190.44236969414825, 190.4344384721956, 252.59949716835982, 254.91723064636983] resolution: [512, 512] rostopic: /cam1/image_raw diff --git a/src/image_processor.cpp b/src/image_processor.cpp index 0e690b7..168eebc 100644 --- a/src/image_processor.cpp +++ b/src/image_processor.cpp @@ -219,18 +219,18 @@ void ImageProcessor::stereoCallback( cam1_curr_img_ptr = cv_bridge::toCvShare(cam1_img, sensor_msgs::image_encodings::MONO8); + ros::Time start_time = ros::Time::now(); image_handler::undistortImage(cam0_curr_img_ptr->image, cam0_curr_img_ptr->image, cam0.distortion_model, cam0.intrinsics, cam0.distortion_coeffs); image_handler::undistortImage(cam1_curr_img_ptr->image, cam1_curr_img_ptr->image, cam1.distortion_model, cam1.intrinsics, cam1.distortion_coeffs); - - - + ROS_INFO("Publishing: %f", + (ros::Time::now()-start_time).toSec()); // Build the image pyramids once since they're used at multiple places createImagePyramids(); // Detect features in the first frame. if (is_first_img) { - ros::Time start_time = ros::Time::now(); + start_time = ros::Time::now(); initializeFirstFrame(); //ROS_INFO("Detection time: %f", // (ros::Time::now()-start_time).toSec()); @@ -243,7 +243,7 @@ void ImageProcessor::stereoCallback( // (ros::Time::now()-start_time).toSec()); } else { // Track the feature in the previous image. - ros::Time start_time = ros::Time::now(); + start_time = ros::Time::now(); trackFeatures(); //ROS_INFO("Tracking time: %f", // (ros::Time::now()-start_time).toSec()); @@ -273,7 +273,7 @@ void ImageProcessor::stereoCallback( // (ros::Time::now()-start_time).toSec()); // Publish features in the current image. - ros::Time start_time = ros::Time::now(); + start_time = ros::Time::now(); publish(); //ROS_INFO("Publishing: %f", // (ros::Time::now()-start_time).toSec()); diff --git a/src/msckf_vio.cpp b/src/msckf_vio.cpp index e6ce390..10b9cd7 100644 --- a/src/msckf_vio.cpp +++ b/src/msckf_vio.cpp @@ -405,12 +405,19 @@ void MsckfVio::imageCallback( const sensor_msgs::ImageConstPtr& cam1_img, const CameraMeasurementConstPtr& feature_msg) { - // Return if the gravity vector has not been set. - if (!is_gravity_set) return; - - // stop playing bagfile if printing images + cout << "hello" << endl; + // stop playing bagfile if printing images if(STREAMPAUSE) nh.setParam("/play_bag", false); + // Return if the gravity vector has not been set. + if (!is_gravity_set) + { + if(STREAMPAUSE) + nh.setParam("/play_bag", true); + return; + } + + // Start the system if the first image is received. // The frame where the first image is received will be