added position calculation

This commit is contained in:
2019-04-12 17:37:01 +02:00
parent a85a4745f2
commit 8227a8e48d
8 changed files with 241 additions and 20 deletions

View File

@ -16,6 +16,8 @@
#include <Eigen/Geometry>
#include <Eigen/StdVector>
#include "image_handler.h"
#include "math_utils.hpp"
#include "imu_state.h"
#include "cam_state.h"
@ -123,8 +125,11 @@ struct Feature {
* @return True if the Anchor can be estimated
*/
inline bool initializeAnchor(
const movingWindow& cam0_moving_window);
bool initializeAnchor(
const movingWindow& cam0_moving_window,
const cv::Vec4d& intrinsics,
const std::string& distortion_model,
const cv::Vec4d& distortion_coeffs);
/*
@ -141,6 +146,15 @@ struct Feature {
inline bool initializePosition(
const CamStateServer& cam_states);
/*
* @brief projectPixelToPosition uses the calcualted pixels
* of the anchor patch to generate 3D positions of all of em
*/
bool projectPixelToPosition(cv::Point2f in_p,
Eigen::Vector3d& out_p,
const cv::Vec4d& intrinsics,
const std::string& distortion_model,
const cv::Vec4d& distortion_coeffs);
// An unique identifier for the feature.
// In case of long time running, the variable
@ -159,9 +173,16 @@ struct Feature {
// NxN Patch of Anchor Image
std::vector<double> anchorPatch;
// Position of NxN Patch in 3D space
std::vector<Eigen::Vector3d> anchorPatch_3d;
// 3d postion of the feature in the world frame.
Eigen::Vector3d position;
// inverse depth representation
double rho;
// A indicator to show if the 3d postion of the feature
// has been initialized or not.
bool is_initialized;
@ -305,8 +326,24 @@ bool Feature::checkMotion(
else return false;
}
bool Feature::projectPixelToPosition(cv::Point2f in_p,
Eigen::Vector3d& out_p,
const cv::Vec4d& intrinsics,
const std::string& distortion_model,
const cv::Vec4d& distortion_coeffs)
{
// use undistorted position of point of interest
// project it back into 3D space using pinhole model
// save resulting NxN positions for this feature
anchorPatch_3d.push_back(Eigen::Vector3d(in_p.x/rho, in_p.y/rho, 1/rho));
printf("%f, %f, %f\n",in_p.x/rho, in_p.y/rho, 1/rho);
}
bool Feature::initializeAnchor(
const movingWindow& cam0_moving_window)
const movingWindow& cam0_moving_window,
const cv::Vec4d& intrinsics,
const std::string& distortion_model,
const cv::Vec4d& distortion_coeffs)
{
int N = 5;
@ -317,15 +354,25 @@ bool Feature::initializeAnchor(
return false;
cv::Mat anchorImage = cam0_moving_window.find(anchor->first)->second;
auto u = anchor->second(0)*anchorImage.rows/2 + anchorImage.rows/2;
auto v = anchor->second(1)*anchorImage.cols/2 + anchorImage.cols/2;
auto u = anchor->second(0)*intrinsics[0] + intrinsics[2];
auto v = anchor->second(1)*intrinsics[1] + intrinsics[3];
int count = 0;
for(int u_run = (int)u - n; u_run <= (int)u + n; u_run++)
for(int v_run = v - n; v_run <= v + n; v_run++)
anchorPatch.push_back(anchorImage.at<uint8_t>(u_run,v_run));
return true;
printf("estimated NxN position: \n");
for(double u_run = u - n; u_run <= u + n; u_run = u_run + 1)
{
for(double v_run = v - n; v_run <= v + n; v_run = v_run + 1)
{
anchorPatch.push_back(anchorImage.at<uint8_t>((int)u_run,(int)v_run));
Eigen::Vector3d Npose;
projectPixelToPosition(cv::Point2f((u_run-intrinsics[2])/intrinsics[0], (v_run-intrinsics[1])/intrinsics[3]),
Npose,
intrinsics,
distortion_model,
distortion_coeffs);
}
}
return true;
}
bool Feature::initializePosition(
@ -465,6 +512,9 @@ bool Feature::initializePosition(
}
}
//save inverse depth distance from camera
rho = solution(2);
// Convert the feature position to the world frame.
position = T_c0_w.linear()*final_position + T_c0_w.translation();