added projection into feature observations camera states
This commit is contained in:
@ -146,16 +146,46 @@ struct Feature {
|
||||
inline bool initializePosition(
|
||||
const CamStateServer& cam_states);
|
||||
|
||||
|
||||
inline cv::Point2f projectPositionToCamera(
|
||||
const CAMState& cam_state,
|
||||
const StateIDType& cam_state_id,
|
||||
const cv::Vec4d& intrinsics,
|
||||
const std::string& distortion_model,
|
||||
const cv::Vec4d& distortion_coeffs,
|
||||
Eigen::Vector3d& in_p) const;
|
||||
|
||||
/*
|
||||
* @brief IrradianceAnchorPatch_Camera returns irradiance values
|
||||
* of the Anchor Patch position in a camera frame
|
||||
*
|
||||
*/
|
||||
|
||||
bool IrradianceOfAnchorPatch(
|
||||
const CAMState& cam_state,
|
||||
const StateIDType& cam_state_id,
|
||||
const cv::Vec4d& intrinsics,
|
||||
const std::string& distortion_model,
|
||||
const cv::Vec4d& distortion_coeffs,
|
||||
const movingWindow& cam0_moving_window,
|
||||
std::vector<uint8_t>& anchorPatch_measurement) const;
|
||||
|
||||
/*
|
||||
* @brief projectPixelToPosition uses the calcualted pixels
|
||||
* of the anchor patch to generate 3D positions of all of em
|
||||
*/
|
||||
bool projectPixelToPosition(cv::Point2f in_p,
|
||||
inline bool projectPixelToPosition(cv::Point2f in_p,
|
||||
Eigen::Vector3d& out_p,
|
||||
const cv::Vec4d& intrinsics,
|
||||
const std::string& distortion_model,
|
||||
const cv::Vec4d& distortion_coeffs);
|
||||
|
||||
/*
|
||||
* @brief Irradiance returns irradiance value of a pixel
|
||||
*/
|
||||
|
||||
inline uint8_t Irradiance(cv::Point2f pose, cv::Mat image) const;
|
||||
|
||||
// An unique identifier for the feature.
|
||||
// In case of long time running, the variable
|
||||
// type of id is set to FeatureIDType in order
|
||||
@ -329,6 +359,61 @@ bool Feature::checkMotion(
|
||||
else return false;
|
||||
}
|
||||
|
||||
bool Feature::IrradianceOfAnchorPatch(
|
||||
const CAMState& cam_state,
|
||||
const StateIDType& cam_state_id,
|
||||
const cv::Vec4d& intrinsics,
|
||||
const std::string& distortion_model,
|
||||
const cv::Vec4d& distortion_coeffs,
|
||||
const movingWindow& cam0_moving_window,
|
||||
std::vector<uint8_t>& anchorPatch_measurement) const
|
||||
{
|
||||
//project every point in anchorPatch_3d.
|
||||
for (auto point : anchorPatch_3d)
|
||||
{
|
||||
cv::Point2f p_in_c0 = projectPositionToCamera(cam_state, cam_state_id, intrinsics, distortion_model, distortion_coeffs, point);
|
||||
uint8_t irradiance = Irradiance(p_in_c0 , cam0_moving_window.find(cam_state_id)->second);
|
||||
anchorPatch_measurement.push_back(irradiance);
|
||||
}
|
||||
}
|
||||
|
||||
uint8_t Feature::Irradiance(cv::Point2f pose, cv::Mat image) const
|
||||
{
|
||||
return image.at<uint8_t>(pose.x, pose.y);
|
||||
}
|
||||
|
||||
cv::Point2f Feature::projectPositionToCamera(
|
||||
const CAMState& cam_state,
|
||||
const StateIDType& cam_state_id,
|
||||
const cv::Vec4d& intrinsics,
|
||||
const std::string& distortion_model,
|
||||
const cv::Vec4d& distortion_coeffs,
|
||||
Eigen::Vector3d& in_p) const
|
||||
{
|
||||
Eigen::Isometry3d T_c0_w;
|
||||
|
||||
cv::Point2f out_p;
|
||||
|
||||
// transfrom position to camera frame
|
||||
Eigen::Matrix3d R_w_c0 = quaternionToRotation(cam_state.orientation);
|
||||
const Eigen::Vector3d& t_c0_w = cam_state.position;
|
||||
Eigen::Vector3d p_c0 = R_w_c0 * (in_p-t_c0_w);
|
||||
|
||||
out_p = cv::Point2f(p_c0(0)/p_c0(2), p_c0(1)/p_c0(2));
|
||||
std::vector<cv::Point2f> out_v;
|
||||
out_v.push_back(out_p);
|
||||
std::vector<cv::Point2f> my_p = image_handler::distortPoints( out_v,
|
||||
intrinsics,
|
||||
distortion_model,
|
||||
distortion_coeffs);
|
||||
|
||||
// printf("truPosition: %f, %f, %f\n", position.x(), position.y(), position.z());
|
||||
// printf("camPosition: %f, %f, %f\n", p_c0(0), p_c0(1), p_c0(2));
|
||||
// printf("Photo projection: %f, %f\n", my_p[0].x, my_p[0].y);
|
||||
|
||||
return out_p;
|
||||
}
|
||||
|
||||
bool Feature::projectPixelToPosition(cv::Point2f in_p,
|
||||
Eigen::Vector3d& out_p,
|
||||
const cv::Vec4d& intrinsics,
|
||||
@ -342,9 +427,11 @@ bool Feature::projectPixelToPosition(cv::Point2f in_p,
|
||||
Eigen::Vector3d PositionInCamera(in_p.x/rho, in_p.y/rho, 1/rho);
|
||||
Eigen::Vector3d PositionInWorld= T_anchor_w.linear()*PositionInCamera + T_anchor_w.translation();
|
||||
anchorPatch_3d.push_back(PositionInWorld);
|
||||
printf("%f, %f, %f\n",PositionInWorld[0], PositionInWorld[1], PositionInWorld[2]);
|
||||
//printf("%f, %f, %f\n",PositionInWorld[0], PositionInWorld[1], PositionInWorld[2]);
|
||||
}
|
||||
|
||||
|
||||
//@test center projection must always be initial feature projection
|
||||
bool Feature::initializeAnchor(
|
||||
const movingWindow& cam0_moving_window,
|
||||
const cv::Vec4d& intrinsics,
|
||||
@ -352,7 +439,7 @@ bool Feature::initializeAnchor(
|
||||
const cv::Vec4d& distortion_coeffs)
|
||||
{
|
||||
|
||||
int N = 5;
|
||||
int N = 3;
|
||||
int n = (int)(N-1)/2;
|
||||
|
||||
auto anchor = observations.begin();
|
||||
@ -364,21 +451,22 @@ bool Feature::initializeAnchor(
|
||||
auto v = anchor->second(1)*intrinsics[1] + intrinsics[3];
|
||||
int count = 0;
|
||||
|
||||
printf("estimated NxN position: \n");
|
||||
//go through surrounding pixels
|
||||
for(double u_run = u - n; u_run <= u + n; u_run = u_run + 1)
|
||||
{
|
||||
for(double v_run = v - n; v_run <= v + n; v_run = v_run + 1)
|
||||
{
|
||||
anchorPatch.push_back(anchorImage.at<uint8_t>((int)u_run,(int)v_run));
|
||||
Eigen::Vector3d Npose;
|
||||
projectPixelToPosition(cv::Point2f((u_run-intrinsics[2])/intrinsics[0], (v_run-intrinsics[1])/intrinsics[3]),
|
||||
projectPixelToPosition(cv::Point2f((u_run-intrinsics[2])/intrinsics[0], (v_run-intrinsics[3])/intrinsics[1]),
|
||||
Npose,
|
||||
intrinsics,
|
||||
distortion_model,
|
||||
distortion_coeffs);
|
||||
}
|
||||
}
|
||||
return true;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool Feature::initializePosition(
|
||||
|
Reference in New Issue
Block a user