added visualization with a ros flag, which shows feature with projection and residual (the features apparent movement)

This commit is contained in:
2019-04-30 17:02:22 +02:00
parent 750d8ecfb1
commit cf40ce8afb
4 changed files with 106 additions and 63 deletions

View File

@ -171,8 +171,8 @@ struct Feature {
bool VisualizePatch(
const CAMState& cam_state,
const StateIDType& cam_state_id,
CameraCalibration& cam0) const;
CameraCalibration& cam0,
const std::vector<double> photo_r) const;
/*
* @brief projectPixelToPosition uses the calcualted pixels
* of the anchor patch to generate 3D positions of all of em
@ -204,6 +204,7 @@ inline Eigen::Vector3d projectPixelToPosition(cv::Point2f in_p,
// NxN Patch of Anchor Image
std::vector<double> anchorPatch;
std::vector<cv::Point2f> anchorPatch_ideal;
std::vector<cv::Point2f> anchorPatch_real;
// Position of NxN Patch in 3D space
std::vector<Eigen::Vector3d> anchorPatch_3d;
@ -408,43 +409,104 @@ bool Feature::estimate_FrameIrradiance(
bool Feature::VisualizePatch(
const CAMState& cam_state,
const StateIDType& cam_state_id,
CameraCalibration& cam0) const
CameraCalibration& cam0,
const std::vector<double> photo_r) const
{
double rescale = 1;
//visu - anchor
auto anchor = observations.begin();
cv::Mat anchorImage = cam0.moving_window.find(anchor->first)->second.image;
cv::Mat dottedFrame(anchorImage.size(), CV_8UC3);
cv::cvtColor(anchorImage, dottedFrame, CV_GRAY2RGB);
for(auto point : anchorPatch_real)
{
// visu - feature
cv::Point xs(point.x, point.y);
cv::Point ys(point.x, point.y);
cv::rectangle(dottedFrame, xs, ys, cv::Scalar(0,255,0));
}
cam0.featureVisu = dottedFrame.clone();
// visu - feature
cv::Mat current_image = cam0.moving_window.find(cam_state_id)->second.image;
cv::Mat dottedFrame(current_image.size(), CV_8UC3);
cv::cvtColor(current_image, dottedFrame, CV_GRAY2RGB);
// project every point in anchorPatch_3d.
auto frame = cam0.moving_window.find(cam_state_id)->second.image;
// set position in frame
// save irradiance of projection
std::vector<double> projectionPatch;
for(auto point : anchorPatch_3d)
{
cv::Point2f p_in_c0 = projectPositionToCamera(cam_state, cam_state_id, cam0, point);
projectionPatch.push_back(PixelIrradiance(p_in_c0, current_image));
// visu - feature
cv::Point xs(p_in_c0.x, p_in_c0.y);
cv::Point ys(p_in_c0.x, p_in_c0.y);
cv::rectangle(dottedFrame, xs, ys, cv::Scalar(0,255,0));
}
// testing
//if(cam_state_id == observations.begin()->first)
//if(count++ == 4)
//printf("dist:\n \tpos: %f, %f\n\ttrue pos: %f, %f\n\n", p_in_c0.x, p_in_c0.y, anchor_center_pos.x, anchor_center_pos.y);
// visu - feature
cv::resize(dottedFrame, dottedFrame, cv::Size(dottedFrame.cols*0.2, dottedFrame.rows*0.2));
if(cam0.featureVisu.empty())
cam0.featureVisu = dottedFrame.clone();
else
cv::hconcat(cam0.featureVisu, dottedFrame, cam0.featureVisu);
cv::hconcat(cam0.featureVisu, dottedFrame, cam0.featureVisu);
// irradiance grid anchor
int N = sqrt(anchorPatch_3d.size());
int scale = 20;
cv::Mat irradianceFrame(anchorImage.size(), CV_8UC3, cv::Scalar(255, 240, 255));
cv::resize(irradianceFrame, irradianceFrame, cv::Size(), rescale, rescale);
for(int i = 0; i<N; i++)
for(int j = 0; j<N; j++)
cv::rectangle(irradianceFrame,
cv::Point(10+scale*(i+1), 10+scale*j),
cv::Point(10+scale*i, 10+scale*(j+1)),
cv::Scalar(anchorPatch[i*N+j]*255, anchorPatch[i*N+j]*255, anchorPatch[i*N+j]*255),
CV_FILLED);
// irradiance grid projection
for(int i = 0; i<N; i++)
for(int j = 0; j<N ; j++)
cv::rectangle(irradianceFrame,
cv::Point(10+scale*(i+1), 20+scale*(N+j)),
cv::Point(10+scale*(i), 20+scale*(N+j+1)),
cv::Scalar(projectionPatch[i*N+j]*255, projectionPatch[i*N+j]*255, projectionPatch[i*N+j]*255),
CV_FILLED);
// residual grid projection, positive - red, negative - blue colored
for(int i = 0; i<N; i++)
for(int j = 0; j<N; j++)
if(photo_r[i*N+j]>0)
cv::rectangle(irradianceFrame,
cv::Point(20+scale*(N+i+1), 15+scale*(N/2+j)),
cv::Point(20+scale*(N+i), 15+scale*(N/2+j+1)),
cv::Scalar(255 - photo_r[i*N+j]*255, 255 - photo_r[i*N+j]*255, 255),
CV_FILLED);
else
cv::rectangle(irradianceFrame,
cv::Point(20+scale*(N+i+1), 15+scale*(N/2+j)),
cv::Point(20+scale*(N+i), 15+scale*(N/2+j+1)),
cv::Scalar(255, 255 + photo_r[i*N+j]*255, 255 + photo_r[i*N+j]*255),
CV_FILLED);
cv::resize(cam0.featureVisu, cam0.featureVisu, cv::Size(), rescale, rescale);
cv::hconcat(cam0.featureVisu, irradianceFrame, cam0.featureVisu);
// create line?
//save image
std::stringstream ss;
ss << "/home/raphael/dev/MSCKF_ws/img/feature_" << std::to_string(ros::Time::now().toSec()) << ".jpg";
cv::imwrite(ss.str(), cam0.featureVisu);
//cv::imshow("patch", cam0.featureVisu);
//cvWaitKey(1);
}
float Feature::PixelIrradiance(cv::Point2f pose, cv::Mat image) const
{
return ((float)image.at<uint8_t>(pose.x, pose.y))/256;
return ((float)image.at<uint8_t>(pose.y, pose.x))/255;
}
cv::Point2f Feature::projectPositionToCamera(
@ -514,16 +576,16 @@ bool Feature::initializeAnchor(const CameraCalibration& cam, int N)
int count = 0;
// get feature in undistorted pixel space
// this only reverts from 'pure' space into undistorted pixel space using camera matrix
cv::Point2f und_pix_p = image_handler::distortPoint(cv::Point2f(u, v),
cam.intrinsics,
cam.distortion_model,
0);
// create vector of patch in pixel plane
std::vector<cv::Point2f> und_pix_v;
std::vector<cv::Point2f>und_pix_v;
for(double u_run = -n; u_run <= n; u_run++)
for(double v_run = -n; v_run <= n; v_run++)
und_pix_v.push_back(cv::Point2f(und_pix_p.x-u_run, und_pix_p.y-v_run));
und_pix_v.push_back(cv::Point2f(und_pix_p.x+u_run, und_pix_p.y+v_run));
//create undistorted pure points
@ -533,23 +595,24 @@ bool Feature::initializeAnchor(const CameraCalibration& cam, int N)
cam.distortion_model,
0,
und_v);
//create distorted pixel points
std::vector<cv::Point2f> vec = image_handler::distortPoints(und_v,
cam.intrinsics,
cam.distortion_model,
cam.distortion_coeffs);
anchorPatch_real = image_handler::distortPoints(und_v,
cam.intrinsics,
cam.distortion_model,
cam.distortion_coeffs);
// save anchor position for later visualisaztion
anchor_center_pos = vec[4];
anchor_center_pos = anchorPatch_real[(N*N-1)/2];
// save true pixel Patch position
for(auto point : vec)
for(auto point : anchorPatch_real)
{
if(point.x - n < 0 || point.x + n >= cam.resolution(0) || point.y - n < 0 || point.y + n >= cam.resolution(1))
return false;
}
for(auto point : vec)
for(auto point : anchorPatch_real)
anchorPatch.push_back(PixelIrradiance(point, anchorImage));
// project patch pixel to 3D space

View File

@ -202,7 +202,9 @@ class MsckfVio {
void onlineReset();
// Photometry flag
// visualization flag
bool PHOTOMETRIC;
bool PRINTIMAGES;
bool nan_flag;