udpated to (barely) working 3x3 patch

This commit is contained in:
Raphael Maenle 2019-07-12 14:25:41 +02:00
commit 14825c344e
11 changed files with 1272 additions and 395 deletions

View File

@ -18,6 +18,8 @@ namespace msckf_vio {
struct Frame{
cv::Mat image;
cv::Mat dximage;
cv::Mat dyimage;
double exposureTime_ms;
};
@ -39,6 +41,7 @@ struct CameraCalibration{
cv::Vec4d distortion_coeffs;
movingWindow moving_window;
cv::Mat featureVisu;
int id;
};

View File

@ -184,6 +184,12 @@ void Rhocost(const Eigen::Isometry3d& T_c0_ci,
const CameraCalibration& cam,
Eigen::Vector3d& in_p) const;
double CompleteCvKernel(
const cv::Point2f pose,
const StateIDType& cam_state_id,
CameraCalibration& cam,
std::string type) const;
double cvKernel(
const cv::Point2f pose,
std::string type) const;
@ -202,7 +208,7 @@ void Rhocost(const Eigen::Isometry3d& T_c0_ci,
bool estimate_FrameIrradiance(
const CAMState& cam_state,
const StateIDType& cam_state_id,
CameraCalibration& cam0,
CameraCalibration& cam,
std::vector<double>& anchorPatch_estimate,
IlluminationParameter& estimatedIllumination) const;
@ -218,11 +224,9 @@ bool VisualizeKernel(
bool VisualizePatch(
const CAMState& cam_state,
const StateIDType& cam_state_id,
CameraCalibration& cam0,
CameraCalibration& cam,
const Eigen::VectorXd& photo_r,
std::stringstream& ss,
cv::Point2f gradientVector,
cv::Point2f residualVector) const;
std::stringstream& ss) const;
/*
* @brief AnchorPixelToPosition uses the calcualted pixels
* of the anchor patch to generate 3D positions of all of em
@ -490,6 +494,23 @@ bool Feature::checkMotion(const CamStateServer& cam_states) const
else return false;
}
double Feature::CompleteCvKernel(
const cv::Point2f pose,
const StateIDType& cam_state_id,
CameraCalibration& cam,
std::string type) const
{
double delta = 0;
if(type == "Sobel_x")
delta = ((double)cam.moving_window.find(cam_state_id)->second.dximage.at<short>(pose.y, pose.x))/255.;
else if (type == "Sobel_y")
delta = ((double)cam.moving_window.find(cam_state_id)->second.dyimage.at<short>(pose.y, pose.x))/255.;
return delta;
}
double Feature::cvKernel(
const cv::Point2f pose,
std::string type) const
@ -528,7 +549,7 @@ return delta;
bool Feature::estimate_FrameIrradiance(
const CAMState& cam_state,
const StateIDType& cam_state_id,
CameraCalibration& cam0,
CameraCalibration& cam,
std::vector<double>& anchorPatch_estimate,
IlluminationParameter& estimated_illumination) const
{
@ -537,11 +558,11 @@ bool Feature::estimate_FrameIrradiance(
// muliply by a and add b of this frame
auto anchor = observations.begin();
if(cam0.moving_window.find(anchor->first) == cam0.moving_window.end())
if(cam.moving_window.find(anchor->first) == cam.moving_window.end())
return false;
double anchorExposureTime_ms = cam0.moving_window.find(anchor->first)->second.exposureTime_ms;
double frameExposureTime_ms = cam0.moving_window.find(cam_state_id)->second.exposureTime_ms;
double anchorExposureTime_ms = cam.moving_window.find(anchor->first)->second.exposureTime_ms;
double frameExposureTime_ms = cam.moving_window.find(cam_state_id)->second.exposureTime_ms;
double a_A = anchorExposureTime_ms;
@ -711,21 +732,20 @@ bool Feature::VisualizeKernel(
cvWaitKey(0);
}
bool Feature::VisualizePatch(
const CAMState& cam_state,
const StateIDType& cam_state_id,
CameraCalibration& cam0,
CameraCalibration& cam,
const Eigen::VectorXd& photo_r,
std::stringstream& ss,
cv::Point2f gradientVector,
cv::Point2f residualVector) const
std::stringstream& ss) const
{
double rescale = 1;
//visu - anchor
auto anchor = observations.begin();
cv::Mat anchorImage = cam0.moving_window.find(anchor->first)->second.image;
cv::Mat anchorImage = cam.moving_window.find(anchor->first)->second.image;
cv::Mat dottedFrame(anchorImage.size(), CV_8UC3);
cv::cvtColor(anchorImage, dottedFrame, CV_GRAY2RGB);
@ -737,10 +757,10 @@ bool Feature::VisualizePatch(
cv::Point ys(point.x, point.y);
cv::rectangle(dottedFrame, xs, ys, cv::Scalar(0,255,255));
}
cam0.featureVisu = dottedFrame.clone();
cam.featureVisu = dottedFrame.clone();
// visu - feature
cv::Mat current_image = cam0.moving_window.find(cam_state_id)->second.image;
cv::Mat current_image = cam.moving_window.find(cam_state_id)->second.image;
cv::cvtColor(current_image, dottedFrame, CV_GRAY2RGB);
// set position in frame
@ -748,7 +768,7 @@ bool Feature::VisualizePatch(
std::vector<double> projectionPatch;
for(auto point : anchorPatch_3d)
{
cv::Point2f p_in_c0 = projectPositionToCamera(cam_state, cam_state_id, cam0, point);
cv::Point2f p_in_c0 = projectPositionToCamera(cam_state, cam_state_id, cam, point);
projectionPatch.push_back(PixelIrradiance(p_in_c0, current_image));
// visu - feature
cv::Point xs(p_in_c0.x, p_in_c0.y);
@ -756,7 +776,7 @@ bool Feature::VisualizePatch(
cv::rectangle(dottedFrame, xs, ys, cv::Scalar(0,255,0));
}
cv::hconcat(cam0.featureVisu, dottedFrame, cam0.featureVisu);
cv::hconcat(cam.featureVisu, dottedFrame, cam.featureVisu);
// patches visualization
@ -803,11 +823,11 @@ bool Feature::VisualizePatch(
cv::putText(irradianceFrame, namer.str() , cvPoint(30, 65+scale*2*N),
cv::FONT_HERSHEY_COMPLEX_SMALL, 0.8, cvScalar(0,0,0), 1, CV_AA);
cv::Point2f p_f(observations.find(cam_state_id)->second(0)*cam0.intrinsics[0] + cam0.intrinsics[2],observations.find(cam_state_id)->second(1)*cam0.intrinsics[1] + cam0.intrinsics[3]);
cv::Point2f p_f(observations.find(cam_state_id)->second(0)*cam.intrinsics[0] + cam.intrinsics[2],observations.find(cam_state_id)->second(1)*cam.intrinsics[1] + cam.intrinsics[3]);
// move to real pixels
// without distort
// p_f = image_handler::distortPoint(p_f, cam0.intrinsics, cam0.distortion_model, cam0.distortion_coeffs);
p_f = image_handler::distortPoint(p_f, cam.intrinsics, cam.distortion_model, cam.distortion_coeffs);
for(int i = 0; i<N; i++)
{
for(int j = 0; j<N ; j++)
@ -824,22 +844,23 @@ bool Feature::VisualizePatch(
// residual grid projection, positive - red, negative - blue colored
namer.str(std::string());
namer << "residual";
std::cout << "-- photo_r -- \n" << photo_r << " -- " << std::endl;
cv::putText(irradianceFrame, namer.str() , cvPoint(30+scale*N, scale*N/2-5),
cv::FONT_HERSHEY_COMPLEX_SMALL, 0.8, cvScalar(0,0,0), 1, CV_AA);
for(int i = 0; i<N; i++)
for(int j = 0; j<N; j++)
if(photo_r(i*N+j)>0)
if(photo_r(2*(i*N+j))>0)
cv::rectangle(irradianceFrame,
cv::Point(40+scale*(N+i+1), 15+scale*(N/2+j)),
cv::Point(40+scale*(N+i), 15+scale*(N/2+j+1)),
cv::Scalar(255 - photo_r(i*N+j)*255, 255 - photo_r(i*N+j)*255, 255),
cv::Scalar(255 - photo_r(2*(i*N+j)+1)*255, 255 - photo_r(2*(i*N+j)+1)*255, 255),
CV_FILLED);
else
cv::rectangle(irradianceFrame,
cv::Point(40+scale*(N+i+1), 15+scale*(N/2+j)),
cv::Point(40+scale*(N+i), 15+scale*(N/2+j+1)),
cv::Scalar(255, 255 + photo_r(i*N+j)*255, 255 + photo_r(i*N+j)*255),
cv::Scalar(255, 255 + photo_r(2*(i*N+j)+1)*255, 255 + photo_r(2*(i*N+j)+1)*255),
CV_FILLED);
// gradient arrow
@ -860,7 +881,7 @@ bool Feature::VisualizePatch(
3);
*/
cv::hconcat(cam0.featureVisu, irradianceFrame, cam0.featureVisu);
cv::hconcat(cam.featureVisu, irradianceFrame, cam.featureVisu);
/*
// visualize position of used observations and resulting feature position
@ -892,15 +913,15 @@ bool Feature::VisualizePatch(
// draw, x y position and arrow with direction - write z next to it
cv::resize(cam0.featureVisu, cam0.featureVisu, cv::Size(), rescale, rescale);
cv::resize(cam.featureVisu, cam.featureVisu, cv::Size(), rescale, rescale);
cv::hconcat(cam0.featureVisu, positionFrame, cam0.featureVisu);
cv::hconcat(cam.featureVisu, positionFrame, cam.featureVisu);
*/
// write feature position
std::stringstream pos_s;
pos_s << "u: " << observations.begin()->second(0) << " v: " << observations.begin()->second(1);
cv::putText(cam0.featureVisu, ss.str() , cvPoint(anchorImage.rows + 100, anchorImage.cols - 30),
cv::putText(cam.featureVisu, ss.str() , cvPoint(anchorImage.rows + 100, anchorImage.cols - 30),
cv::FONT_HERSHEY_COMPLEX_SMALL, 0.8, cvScalar(200,200,250), 1, CV_AA);
// create line?
@ -908,10 +929,10 @@ bool Feature::VisualizePatch(
std::stringstream loc;
// loc << "/home/raphael/dev/MSCKF_ws/img/feature_" << std::to_string(ros::Time::now().toSec()) << ".jpg";
//cv::imwrite(loc.str(), cam0.featureVisu);
//cv::imwrite(loc.str(), cam.featureVisu);
cv::imshow("patch", cam0.featureVisu);
cvWaitKey(0);
cv::imshow("patch", cam.featureVisu);
cvWaitKey(1);
}
float Feature::PixelIrradiance(cv::Point2f pose, cv::Mat image) const
@ -964,16 +985,33 @@ cv::Point2f Feature::projectPositionToCamera(
Eigen::Isometry3d T_c0_w;
cv::Point2f out_p;
cv::Point2f my_p;
// transfrom position to camera frame
// cam0 position
Eigen::Matrix3d R_w_c0 = quaternionToRotation(cam_state.orientation);
const Eigen::Vector3d& t_c0_w = cam_state.position;
// project point according to model
if(cam.id == 0)
{
Eigen::Vector3d p_c0 = R_w_c0 * (in_p-t_c0_w);
out_p = cv::Point2f(p_c0(0)/p_c0(2), p_c0(1)/p_c0(2));
cv::Point2f my_p;
// test is prewarped or not
}
// if camera is one, calcualte the cam1 position from cam0 position first
else if(cam.id == 1)
{
// cam1 position
Eigen::Matrix3d R_c0_c1 = CAMState::T_cam0_cam1.linear();
Eigen::Matrix3d R_w_c1 = R_c0_c1 * R_w_c0;
Eigen::Vector3d t_c1_w = t_c0_w - R_w_c1.transpose()*CAMState::T_cam0_cam1.translation();
Eigen::Vector3d p_c1 = R_w_c1 * (in_p-t_c1_w);
out_p = cv::Point2f(p_c1(0)/p_c1(2), p_c1(1)/p_c1(2));
}
// undistort point according to camera model
if (cam.distortion_model.substr(0,3) == "pre-")
my_p = cv::Point2f(out_p.x * cam.intrinsics[0] + cam.intrinsics[2], out_p.y * cam.intrinsics[1] + cam.intrinsics[3]);
else
@ -981,11 +1019,6 @@ cv::Point2f Feature::projectPositionToCamera(
cam.intrinsics,
cam.distortion_model,
cam.distortion_coeffs);
// printf("truPosition: %f, %f, %f\n", position.x(), position.y(), position.z());
// printf("camPosition: %f, %f, %f\n", p_c0(0), p_c0(1), p_c0(2));
// printf("Photo projection: %f, %f\n", my_p[0].x, my_p[0].y);
return my_p;
}
@ -1021,8 +1054,8 @@ bool Feature::initializeAnchor(const CameraCalibration& cam, int N)
cv::Sobel(anchorImage_deeper, xderImage, -1, 1, 0, 3);
cv::Sobel(anchorImage_deeper, yderImage, -1, 0, 1, 3);
xderImage/=8;
yderImage/=8;
xderImage/=8.;
yderImage/=8.;
cv::convertScaleAbs(xderImage, abs_xderImage);
cv::convertScaleAbs(yderImage, abs_yderImage);
@ -1093,6 +1126,7 @@ bool Feature::initializeAnchor(const CameraCalibration& cam, int N)
return true;
}
bool Feature::initializeRho(const CamStateServer& cam_states) {
// Organize camera poses and feature observations properly.

View File

@ -202,11 +202,42 @@ class MsckfVio {
Eigen::Vector4d& r);
// This function computes the Jacobian of all measurements viewed
// in the given camera states of this feature.
void featureJacobian(const FeatureIDType& feature_id,
void featureJacobian(
const FeatureIDType& feature_id,
const std::vector<StateIDType>& cam_state_ids,
Eigen::MatrixXd& H_x, Eigen::VectorXd& r);
void twodotMeasurementJacobian(
const StateIDType& cam_state_id,
const FeatureIDType& feature_id,
Eigen::MatrixXd& H_x, Eigen::MatrixXd& H_y, Eigen::VectorXd& r);
bool ConstructJacobians(
Eigen::MatrixXd& H_rho,
Eigen::MatrixXd& H_pl,
Eigen::MatrixXd& H_pA,
const Feature& feature,
const StateIDType& cam_state_id,
Eigen::MatrixXd& H_xl,
Eigen::MatrixXd& H_yl);
bool PhotometricPatchPointResidual(
const StateIDType& cam_state_id,
const Feature& feature,
Eigen::VectorXd& r);
bool PhotometricPatchPointJacobian(
const CAMState& cam_state,
const StateIDType& cam_state_id,
const Feature& feature,
Eigen::Vector3d point,
int count,
Eigen::Matrix<double, 2, 1>& H_rhoj,
Eigen::Matrix<double, 2, 6>& H_plj,
Eigen::Matrix<double, 2, 6>& H_pAj,
Eigen::Matrix<double, 2, 4>& dI_dhj);
bool PhotometricMeasurementJacobian(
const StateIDType& cam_state_id,
const FeatureIDType& feature_id,
@ -214,7 +245,13 @@ class MsckfVio {
Eigen::MatrixXd& H_y,
Eigen::VectorXd& r);
void PhotometricFeatureJacobian(
void twodotFeatureJacobian(
const FeatureIDType& feature_id,
const std::vector<StateIDType>& cam_state_ids,
Eigen::MatrixXd& H_x, Eigen::VectorXd& r);
bool PhotometricFeatureJacobian(
const FeatureIDType& feature_id,
const std::vector<StateIDType>& cam_state_ids,
Eigen::MatrixXd& H_x, Eigen::VectorXd& r);
@ -222,8 +259,10 @@ class MsckfVio {
void photometricMeasurementUpdate(const Eigen::MatrixXd& H, const Eigen::VectorXd& r);
void measurementUpdate(const Eigen::MatrixXd& H,
const Eigen::VectorXd& r);
void twoMeasurementUpdate(const Eigen::MatrixXd& H, const Eigen::VectorXd& r);
bool gatingTest(const Eigen::MatrixXd& H,
const Eigen::VectorXd&r, const int& dof);
const Eigen::VectorXd&r, const int& dof, int filter=0);
void removeLostFeatures();
void findRedundantCamStates(
std::vector<StateIDType>& rm_cam_state_ids);
@ -234,7 +273,7 @@ class MsckfVio {
void onlineReset();
// Photometry flag
bool PHOTOMETRIC;
int FILTER;
// debug flag
bool STREAMPAUSE;
@ -251,6 +290,18 @@ class MsckfVio {
// Chi squared test table.
static std::map<int, double> chi_squared_test_table;
double eval_time;
IMUState timed_old_imu_state;
IMUState timed_old_true_state;
IMUState old_imu_state;
IMUState old_true_state;
// change in position
Eigen::Vector3d delta_position;
Eigen::Vector3d delta_orientation;
// State vector
StateServer state_server;
StateServer photometric_state_server;
@ -316,6 +367,7 @@ class MsckfVio {
// Subscribers and publishers
ros::Subscriber imu_sub;
ros::Subscriber truth_sub;
ros::Publisher truth_odom_pub;
ros::Publisher odom_pub;
ros::Publisher marker_pub;
ros::Publisher feature_pub;

View File

@ -27,9 +27,10 @@
<param name="ransac_threshold" value="3"/>
<param name="stereo_threshold" value="5"/>
<remap from="~imu" to="/imu0"/>
<remap from="~cam0_image" to="/cam0/image_raw"/>
<remap from="~cam1_image" to="/cam1/image_raw"/>
<remap from="~imu" to="/imu02"/>
<remap from="~cam0_image" to="/cam0/image_raw2"/>
<remap from="~cam1_image" to="/cam1/image_raw2"/>
</node>
</group>

View File

@ -17,15 +17,16 @@
args='standalone msckf_vio/MsckfVioNodelet'
output="screen">
<!-- Photometry Flag-->
<param name="PHOTOMETRIC" value="false"/>
<!-- Filter Flag, 0 = msckf, 1 = photometric, 2 = two -->
<param name="FILTER" value="0"/>
<!-- Debugging Flaggs -->
<param name="StreamPause" value="false"/>
<param name="StreamPause" value="true"/>
<param name="PrintImages" value="false"/>
<param name="GroundTruth" value="false"/>
<param name="patch_size_n" value="5"/>
<param name="patch_size_n" value="3"/>
<!-- Calibration parameters -->
<rosparam command="load" file="$(arg calibration_file)"/>

View File

@ -17,15 +17,15 @@
args='standalone msckf_vio/MsckfVioNodelet'
output="screen">
<!-- Photometry Flag-->
<param name="PHOTOMETRIC" value="false"/>
<!-- Filter Flag, 0 = msckf, 1 = photometric, 2 = two -->
<param name="FILTER" value="1"/>
<!-- Debugging Flaggs -->
<param name="StreamPause" value="true"/>
<param name="PrintImages" value="false"/>
<param name="GroundTruth" value="false"/>
<param name="patch_size_n" value="1"/>
<param name="patch_size_n" value="3"/>
<!-- Calibration parameters -->
<rosparam command="load" file="$(arg calibration_file)"/>

View File

@ -17,15 +17,16 @@
args='standalone msckf_vio/MsckfVioNodelet'
output="screen">
<!-- Photometry Flag-->
<param name="PHOTOMETRIC" value="false"/>
<!-- Filter Flag, 0 = msckf, 1 = photometric, 2 = two -->
<param name="FILTER" value="1"/>
<!-- Debugging Flaggs -->
<param name="StreamPause" value="true"/>
<param name="PrintImages" value="false"/>
<param name="GroundTruth" value="false"/>
<param name="patch_size_n" value="1"/>
<param name="patch_size_n" value="3"/>
<!-- Calibration parameters -->
<rosparam command="load" file="$(arg calibration_file)"/>

View File

@ -40,6 +40,12 @@ void undistortImage(
cv::fisheye::undistortImage(src, dst, K, distortion_coeffs, K);
else if (distortion_model == "equidistant")
src.copyTo(dst);
else if (distortion_model == "pre-radtan")
cv::undistort(src, dst, K, distortion_coeffs, K);
else if (distortion_model == "radtan")
src.copyTo(dst);
}
void undistortPoint(
@ -91,7 +97,8 @@ void undistortPoint(
pts_out.push_back(newPoint);
}
}
else if (distortion_model == "pre-equidistant")
else if (distortion_model == "pre-equidistant" or distortion_model == "pre-radtan")
{
std::vector<cv::Point2f> temp_pts_out;
for(int i = 0; i < pts_in.size(); i++)
@ -152,7 +159,7 @@ void undistortPoints(
pts_out.push_back(newPoint);
}
}
else if (distortion_model == "pre-equidistant")
else if (distortion_model == "pre-equidistant" or distortion_model == "pre-radtan")
{
std::vector<cv::Point2f> temp_pts_out;
for(int i = 0; i < pts_in.size(); i++)
@ -205,7 +212,7 @@ std::vector<cv::Point2f> distortPoints(
pts_out.push_back(newPoint);
}
}
else if (distortion_model == "pre-equidistant")
else if (distortion_model == "pre-equidistant" or distortion_model == "pre-radtan")
{
std::vector<cv::Point2f> temp_pts_out;
for(int i = 0; i < pts_in.size(); i++)
@ -262,7 +269,7 @@ cv::Point2f distortPoint(
pts_out.push_back(newPoint);
}
}
else if (distortion_model == "pre-equidistant")
else if (distortion_model == "pre-equidistant" or distortion_model == "pre-radtan")
{
std::vector<cv::Point2f> temp_pts_out;
for(int i = 0; i < pts_in.size(); i++)

View File

@ -224,20 +224,19 @@ void ImageProcessor::stereoCallback(
cam1_curr_img_ptr = cv_bridge::toCvShare(cam1_img,
sensor_msgs::image_encodings::MONO8);
ros::Time start_time = ros::Time::now();
cv::Mat undistortedCam0;
cv::Mat undistortedCam1;
//image_handler::undistortImage(cam0_curr_img_ptr->image, cam0_curr_img_ptr->image, cam0.distortion_model, cam0.intrinsics, cam0.distortion_coeffs);
//image_handler::undistortImage(cam1_curr_img_ptr->image, cam1_curr_img_ptr->image, cam1.distortion_model, cam1.intrinsics, cam1.distortion_coeffs);
image_handler::undistortImage(cam0_curr_img_ptr->image, cam0_curr_img_ptr->image, cam0.distortion_model, cam0.intrinsics, cam0.distortion_coeffs);
image_handler::undistortImage(cam1_curr_img_ptr->image, cam1_curr_img_ptr->image, cam1.distortion_model, cam1.intrinsics, cam1.distortion_coeffs);
//ROS_INFO("Publishing: %f",
// (ros::Time::now()-start_time).toSec());
// Build the image pyramids once since they're used at multiple places
createImagePyramids();
// Detect features in the first frame.
if (is_first_img) {
ros::Time start_time = ros::Time::now();
start_time = ros::Time::now();
initializeFirstFrame();
//ROS_INFO("Detection time: %f",
// (ros::Time::now()-start_time).toSec());
@ -250,7 +249,7 @@ void ImageProcessor::stereoCallback(
// (ros::Time::now()-start_time).toSec());
} else {
// Track the feature in the previous image.
ros::Time start_time = ros::Time::now();
start_time = ros::Time::now();
trackFeatures();
//ROS_INFO("Tracking time: %f",
// (ros::Time::now()-start_time).toSec());
@ -281,8 +280,7 @@ void ImageProcessor::stereoCallback(
// (ros::Time::now()-start_time).toSec());
// Publish features in the current image.
ros::Time start_time = ros::Time::now();
start_time = ros::Time::now();
publish();
//ROS_INFO("Publishing: %f",
// (ros::Time::now()-start_time).toSec());

File diff suppressed because it is too large Load Diff