added saving exposure time from the frame ID, where the TUM dataset saves it
This commit is contained in:
		| @@ -30,6 +30,11 @@ struct CameraCalibration{ | |||||||
|   cv::Vec4d distortion_coeffs; |   cv::Vec4d distortion_coeffs; | ||||||
| }; | }; | ||||||
|  |  | ||||||
|  | struct Frame{ | ||||||
|  |   cv::Mat image; | ||||||
|  |   double exposureTime_ms; | ||||||
|  | }; | ||||||
|  |  | ||||||
| /* | /* | ||||||
|  * @brief CAMState Stored camera state in order to |  * @brief CAMState Stored camera state in order to | ||||||
|  *    form measurement model. |  *    form measurement model. | ||||||
| @@ -50,6 +55,7 @@ struct CAMState { | |||||||
|   // Position of the camera frame in the world frame. |   // Position of the camera frame in the world frame. | ||||||
|   Eigen::Vector3d position; |   Eigen::Vector3d position; | ||||||
|  |  | ||||||
|  |   // Illumination Information of the frame | ||||||
|   IlluminationParameter illumination;  |   IlluminationParameter illumination;  | ||||||
|  |  | ||||||
|   // These two variables should have the same physical |   // These two variables should have the same physical | ||||||
| @@ -80,9 +86,9 @@ typedef std::map<StateIDType, CAMState, std::less<int>, | |||||||
|         Eigen::aligned_allocator< |         Eigen::aligned_allocator< | ||||||
|         std::pair<const StateIDType, CAMState> > > CamStateServer; |         std::pair<const StateIDType, CAMState> > > CamStateServer; | ||||||
|  |  | ||||||
| typedef std::map<StateIDType, cv::Mat, std::less<StateIDType>, | typedef std::map<StateIDType, Frame, std::less<StateIDType>, | ||||||
|         Eigen::aligned_allocator< |         Eigen::aligned_allocator< | ||||||
|         std::pair<StateIDType, cv::Mat> > > movingWindow; |         std::pair<StateIDType, Frame> > > movingWindow; | ||||||
| } // namespace msckf_vio | } // namespace msckf_vio | ||||||
|  |  | ||||||
| #endif // MSCKF_VIO_CAM_STATE_H | #endif // MSCKF_VIO_CAM_STATE_H | ||||||
|   | |||||||
| @@ -361,7 +361,7 @@ bool Feature::IrradianceOfAnchorPatch( | |||||||
|   for (auto point : anchorPatch_3d) |   for (auto point : anchorPatch_3d) | ||||||
|   { |   { | ||||||
|     cv::Point2f p_in_c0 = projectPositionToCamera(cam_state, cam_state_id, cam, point);  |     cv::Point2f p_in_c0 = projectPositionToCamera(cam_state, cam_state_id, cam, point);  | ||||||
|     uint8_t irradiance = Irradiance(p_in_c0 , cam0_moving_window.find(cam_state_id)->second); |     uint8_t irradiance = Irradiance(p_in_c0 , cam0_moving_window.find(cam_state_id)->second.image); | ||||||
|     anchorPatch_measurement.push_back(irradiance); |     anchorPatch_measurement.push_back(irradiance); | ||||||
|   } |   } | ||||||
| } | } | ||||||
| @@ -425,7 +425,7 @@ bool Feature::initializeAnchor( | |||||||
|   if(cam0_moving_window.find(anchor->first) == cam0_moving_window.end()) |   if(cam0_moving_window.find(anchor->first) == cam0_moving_window.end()) | ||||||
|     return false; |     return false; | ||||||
|  |  | ||||||
|   cv::Mat anchorImage = cam0_moving_window.find(anchor->first)->second; |   cv::Mat anchorImage = cam0_moving_window.find(anchor->first)->second.image; | ||||||
|   auto u = anchor->second(0)*cam.intrinsics[0] + cam.intrinsics[2]; |   auto u = anchor->second(0)*cam.intrinsics[0] + cam.intrinsics[2]; | ||||||
|   auto v = anchor->second(1)*cam.intrinsics[1] + cam.intrinsics[3]; |   auto v = anchor->second(1)*cam.intrinsics[1] + cam.intrinsics[3]; | ||||||
|   int count = 0; |   int count = 0; | ||||||
|   | |||||||
| @@ -145,8 +145,8 @@ class MsckfVio { | |||||||
|         std_srvs::Trigger::Response& res); |         std_srvs::Trigger::Response& res); | ||||||
|  |  | ||||||
|     void manageMovingWindow( |     void manageMovingWindow( | ||||||
|         const cv_bridge::CvImageConstPtr& cam0_image_ptr, |         const sensor_msgs::ImageConstPtr& cam0_img, | ||||||
|         const cv_bridge::CvImageConstPtr& cam1_image_ptr, |         const sensor_msgs::ImageConstPtr& cam1_img, | ||||||
|         const CameraMeasurementConstPtr& feature_msg); |         const CameraMeasurementConstPtr& feature_msg); | ||||||
|  |  | ||||||
|     // Filter related functions |     // Filter related functions | ||||||
|   | |||||||
| @@ -310,12 +310,6 @@ void MsckfVio::imageCallback( | |||||||
|   // Return if the gravity vector has not been set. |   // Return if the gravity vector has not been set. | ||||||
|   if (!is_gravity_set) return; |   if (!is_gravity_set) return; | ||||||
|  |  | ||||||
|     // Get the current image. |  | ||||||
|   cv_bridge::CvImageConstPtr cam0_image_ptr = cv_bridge::toCvShare(cam0_img, |  | ||||||
|       sensor_msgs::image_encodings::MONO8); |  | ||||||
|   cv_bridge::CvImageConstPtr cam1_img_ptr = cv_bridge::toCvShare(cam1_img, |  | ||||||
|       sensor_msgs::image_encodings::MONO8); |  | ||||||
|  |  | ||||||
|   // Start the system if the first image is received. |   // Start the system if the first image is received. | ||||||
|   // The frame where the first image is received will be |   // The frame where the first image is received will be | ||||||
|   // the origin. |   // the origin. | ||||||
| @@ -350,7 +344,7 @@ void MsckfVio::imageCallback( | |||||||
|  |  | ||||||
|   // Add new images to moving window |   // Add new images to moving window | ||||||
|   start_time = ros::Time::now(); |   start_time = ros::Time::now(); | ||||||
|   manageMovingWindow(cam0_image_ptr, cam1_img_ptr, feature_msg); |   manageMovingWindow(cam0_img, cam1_img, feature_msg); | ||||||
|   double manage_moving_window_time = ( |   double manage_moving_window_time = ( | ||||||
|       ros::Time::now()-start_time).toSec(); |       ros::Time::now()-start_time).toSec(); | ||||||
|  |  | ||||||
| @@ -399,12 +393,23 @@ void MsckfVio::imageCallback( | |||||||
| } | } | ||||||
|  |  | ||||||
| void MsckfVio::manageMovingWindow( | void MsckfVio::manageMovingWindow( | ||||||
|     const cv_bridge::CvImageConstPtr& cam0_image_ptr, |     const sensor_msgs::ImageConstPtr& cam0_img, | ||||||
|     const cv_bridge::CvImageConstPtr& cam1_image_ptr, |     const sensor_msgs::ImageConstPtr& cam1_img, | ||||||
|     const CameraMeasurementConstPtr& feature_msg) { |     const CameraMeasurementConstPtr& feature_msg) { | ||||||
|  |  | ||||||
|   cam0_moving_window[state_server.imu_state.id] = cam0_image_ptr->image.clone(); |     //save exposure Time into moving window | ||||||
|   cam1_moving_window[state_server.imu_state.id] = cam1_image_ptr->image.clone(); |     cam0_moving_window[state_server.imu_state.id].exposureTime_ms = strtod(cam0_img->header.frame_id.data(), NULL) / 1000; | ||||||
|  |     cam1_moving_window[state_server.imu_state.id].exposureTime_ms = strtod(cam1_img->header.frame_id.data(), NULL) / 1000; | ||||||
|  |     printf("exposure: %f\n", cam0_moving_window[state_server.imu_state.id].exposureTime_ms); | ||||||
|  |     // Get the current image. | ||||||
|  |     cv_bridge::CvImageConstPtr cam0_img_ptr = cv_bridge::toCvShare(cam0_img, | ||||||
|  |         sensor_msgs::image_encodings::MONO8); | ||||||
|  |     cv_bridge::CvImageConstPtr cam1_img_ptr = cv_bridge::toCvShare(cam1_img, | ||||||
|  |         sensor_msgs::image_encodings::MONO8); | ||||||
|  |  | ||||||
|  |     // save image information into moving window | ||||||
|  |     cam0_moving_window[state_server.imu_state.id].image = cam0_img_ptr->image.clone(); | ||||||
|  |     cam1_moving_window[state_server.imu_state.id].image = cam1_img_ptr->image.clone(); | ||||||
|  |  | ||||||
|   //TODO handle any massive overflow correctly (should be pruned, before this ever triggers) |   //TODO handle any massive overflow correctly (should be pruned, before this ever triggers) | ||||||
|   while(cam0_moving_window.size() > 100) |   while(cam0_moving_window.size() > 100) | ||||||
|   | |||||||
		Reference in New Issue
	
	Block a user