fixed incorrect undistortion/distortion. residual should now be calculated correctly

This commit is contained in:
Raphael Maenle 2019-04-18 16:22:41 +02:00
parent d91ff7ca9d
commit 1fa2518215
4 changed files with 120 additions and 48 deletions

View File

@ -144,7 +144,12 @@ struct Feature {
inline bool initializePosition(
const CamStateServer& cam_states);
/*
* @brief project PositionToCamera Takes a 3d position in a world frame
* and projects it into the passed camera frame using pinhole projection
* then distorts it using camera information to get
* the resulting distorted pixel position
*/
inline cv::Point2f projectPositionToCamera(
const CAMState& cam_state,
const StateIDType& cam_state_id,
@ -212,12 +217,15 @@ inline Eigen::Vector3d projectPixelToPosition(cv::Point2f in_p,
Eigen::Vector3d position;
// inverse depth representation
double rho;
double anchor_rho;
// A indicator to show if the 3d postion of the feature
// has been initialized or not.
bool is_initialized;
bool is_anchored;
cv::Point2f anchor_center_pos;
cv::Point2f undist_anchor_center_pos;
// Noise for a normalized feature measurement.
static double observation_noise;
@ -397,14 +405,24 @@ bool Feature::FrameIrradiance(
const movingWindow& cam0_moving_window,
std::vector<float>& anchorPatch_measurement) const
{
//project every point in anchorPatch_3d.
if(!is_anchored)
printf("not anchored!\n");
// project every point in anchorPatch_3d.
// int count = 0;
for (auto point : anchorPatch_3d)
{
// testing
//if(cam_state_id == observations.begin()->first)
//if(count == 4)
//printf("\n\ncenter:\n");
cv::Point2f p_in_c0 = projectPositionToCamera(cam_state, cam_state_id, cam0, point);
float irradiance = PixelIrradiance(p_in_c0, cam0_moving_window.find(cam_state_id)->second.image);
anchorPatch_measurement.push_back(irradiance);
// testing
//if(cam_state_id == observations.begin()->first)
//if(count++ == 4)
//printf("dist:\n \tpos: %f, %f\n\ttrue pos: %f, %f\n\n", p_in_c0.x, p_in_c0.y, anchor_center_pos.x, anchor_center_pos.y);
}
}
@ -429,18 +447,20 @@ cv::Point2f Feature::projectPositionToCamera(
Eigen::Vector3d p_c0 = R_w_c0 * (in_p-t_c0_w);
out_p = cv::Point2f(p_c0(0)/p_c0(2), p_c0(1)/p_c0(2));
std::vector<cv::Point2f> out_v;
out_v.push_back(out_p);
std::vector<cv::Point2f> my_p = image_handler::distortPoints(out_v,
cam.intrinsics,
cam.distortion_model,
cam.distortion_coeffs);
// if(cam_state_id == observations.begin()->first)
//printf("undist:\n \tproj pos: %f, %f\n\ttrue pos: %f, %f\n", out_p.x, out_p.y, undist_anchor_center_pos.x, undist_anchor_center_pos.y);
cv::Point2f my_p = image_handler::distortPoint(out_p,
cam.intrinsics,
cam.distortion_model,
cam.distortion_coeffs);
// printf("truPosition: %f, %f, %f\n", position.x(), position.y(), position.z());
// printf("camPosition: %f, %f, %f\n", p_c0(0), p_c0(1), p_c0(2));
// printf("Photo projection: %f, %f\n", my_p[0].x, my_p[0].y);
return my_p[0];
return my_p;
}
Eigen::Vector3d Feature::projectPixelToPosition(cv::Point2f in_p,
@ -450,7 +470,7 @@ Eigen::Vector3d Feature::projectPixelToPosition(cv::Point2f in_p,
// project it back into 3D space using pinhole model
// save resulting NxN positions for this feature
Eigen::Vector3d PositionInCamera(in_p.x/rho, in_p.y/rho, 1/rho);
Eigen::Vector3d PositionInCamera(in_p.x/anchor_rho, in_p.y/anchor_rho, 1/anchor_rho);
Eigen::Vector3d PositionInWorld= T_anchor_w.linear()*PositionInCamera + T_anchor_w.translation();
return PositionInWorld;
//printf("%f, %f, %f\n",PositionInWorld[0], PositionInWorld[1], PositionInWorld[2]);
@ -473,34 +493,56 @@ bool Feature::initializeAnchor(
return false;
cv::Mat anchorImage = cam0_moving_window.find(anchor->first)->second.image;
auto u = anchor->second(0)*cam.intrinsics[0] + cam.intrinsics[2];
auto v = anchor->second(1)*cam.intrinsics[1] + cam.intrinsics[3];
printf("initializing anchor\n");
if(u - n < 0 || u + n >= cam.resolution(0) || v - n < 0 || v + n >= cam.resolution(1))
{
printf("no good: \n");
printf("%f, %f\n", u, v);
return false;
}
auto u = anchor->second(0);//*cam.intrinsics[0] + cam.intrinsics[2];
auto v = anchor->second(1);//*cam.intrinsics[1] + cam.intrinsics[3];
//testing
undist_anchor_center_pos = cv::Point2f(u,v);
//for NxN patch pixels around feature
for(double u_run = u - n; u_run <= u + n; u_run = u_run + 1)
int count = 0;
// get feature in undistorted pixel space
cv::Point2f und_pix_p = image_handler::distortPoint(cv::Point2f(u, v),
cam.intrinsics,
cam.distortion_model,
0);
// create vector of patch in pixel plane
std::vector<cv::Point2f> und_pix_v;
for(double u_run = -n; u_run <= n; u_run++)
for(double v_run = -n; v_run <= n; v_run++)
und_pix_v.push_back(cv::Point2f(und_pix_p.x-u_run, und_pix_p.y-v_run));
//create undistorted pure points
std::vector<cv::Point2f> und_v;
image_handler::undistortPoints(und_pix_v,
cam.intrinsics,
cam.distortion_model,
0,
und_v);
//create distorted pixel points
std::vector<cv::Point2f> vec = image_handler::distortPoints(und_v,
cam.intrinsics,
cam.distortion_model,
cam.distortion_coeffs);
anchor_center_pos = vec[4];
for(auto point : vec)
{
for(double v_run = v - n; v_run <= v + n; v_run = v_run + 1)
if(point.x - n < 0 || point.x + n >= cam.resolution(0) || point.y - n < 0 || point.y + n >= cam.resolution(1))
{
printf("ADDING\n");
// add irradiance information
anchorPatch.push_back((double)anchorImage.at<uint8_t>((int)u_run,(int)v_run));
// project patch pixel to 3D space
auto intr = cam.intrinsics;
cv::Point2f currentPoint((u_run-intr[2])/intr[0], (v_run-intr[3])/intr[1]);
Eigen::Vector3d Npose = projectPixelToPosition(currentPoint, cam);
//save position
anchorPatch_3d.push_back(Npose);
printf("no good\n");
return false;
}
}
printf("set to true!!!\n");
for(auto point : vec)
anchorPatch.push_back((double)anchorImage.at<uint8_t>((int)point.x,(int)point.y));
// project patch pixel to 3D space
for(auto point : und_v)
anchorPatch_3d.push_back(projectPixelToPosition(point, cam));
is_anchored = true;
return true;
}
@ -645,7 +687,7 @@ bool Feature::initializePosition(
}
//save inverse depth distance from camera
rho = solution(2);
anchor_rho = solution(2);
// Convert the feature position to the world frame.
position = T_c0_w.linear()*final_position + T_c0_w.translation();

View File

@ -30,6 +30,12 @@ std::vector<cv::Point2f> distortPoints(
const cv::Vec4d& intrinsics,
const std::string& distortion_model,
const cv::Vec4d& distortion_coeffs);
cv::Point2f distortPoint(
const cv::Point2f& pt_in,
const cv::Vec4d& intrinsics,
const std::string& distortion_model,
const cv::Vec4d& distortion_coeffs);
}
}
#endif

View File

@ -81,5 +81,38 @@ std::vector<cv::Point2f> distortPoints(
return pts_out;
}
cv::Point2f distortPoint(
const cv::Point2f& pt_in,
const cv::Vec4d& intrinsics,
const std::string& distortion_model,
const cv::Vec4d& distortion_coeffs) {
std::vector<cv::Point2f> pts_in;
pts_in.push_back(pt_in);
const cv::Matx33d K(intrinsics[0], 0.0, intrinsics[2],
0.0, intrinsics[1], intrinsics[3],
0.0, 0.0, 1.0);
std::vector<cv::Point2f> pts_out;
if (distortion_model == "radtan") {
std::vector<cv::Point3f> homogenous_pts;
cv::convertPointsToHomogeneous(pts_in, homogenous_pts);
cv::projectPoints(homogenous_pts, cv::Vec3d::zeros(), cv::Vec3d::zeros(), K,
distortion_coeffs, pts_out);
} else if (distortion_model == "equidistant") {
cv::fisheye::distortPoints(pts_in, pts_out, K, distortion_coeffs);
} else {
ROS_WARN_ONCE("The model %s is unrecognized, using radtan instead...",
distortion_model.c_str());
std::vector<cv::Point3f> homogenous_pts;
cv::convertPointsToHomogeneous(pts_in, homogenous_pts);
cv::projectPoints(homogenous_pts, cv::Vec3d::zeros(), cv::Vec3d::zeros(), K,
distortion_coeffs, pts_out);
}
return pts_out[0];
}
} // namespace image_handler
} // namespace msckf_vio

View File

@ -929,15 +929,6 @@ void MsckfVio::PhotometricMeasurementJacobian(
std::vector<cv::Point2f> out_v;
out_v.push_back(out_p);
/*
//prints the feature projection in pixel space
std::vector<cv::Point2f> my_p = image_handler::distortPoints( out_v,
cam0.intrinsics,
cam0.distortion_model,
cam0.distortion_coeffs);
printf("projection: %f, %f\n", my_p[0].x, my_p[0].y);
*/
// Compute the Jacobians.
Matrix<double, 4, 3> dz_dpc0 = Matrix<double, 4, 3>::Zero();
dz_dpc0(0, 0) = 1 / p_c0(2);
@ -1025,7 +1016,7 @@ void MsckfVio::PhotometricFeatureJacobian(
VectorXd r_j = VectorXd::Zero(jacobian_row_size);
int stack_cntr = 0;
printf("_____FEATURE:_____\n");
for (const auto& cam_id : valid_cam_state_ids) {
Matrix<double, 4, 6> H_xi = Matrix<double, 4, 6>::Zero();