added some feature calculations to this branch
This commit is contained in:
parent
8cfbe06945
commit
a0577dfb9d
@ -15,7 +15,7 @@
|
|||||||
#include <Eigen/Dense>
|
#include <Eigen/Dense>
|
||||||
#include <Eigen/Geometry>
|
#include <Eigen/Geometry>
|
||||||
#include <Eigen/StdVector>
|
#include <Eigen/StdVector>
|
||||||
|
#include <math.h>
|
||||||
#include <visualization_msgs/Marker.h>
|
#include <visualization_msgs/Marker.h>
|
||||||
#include <visualization_msgs/MarkerArray.h>
|
#include <visualization_msgs/MarkerArray.h>
|
||||||
#include <geometry_msgs/Point.h>
|
#include <geometry_msgs/Point.h>
|
||||||
@ -70,6 +70,11 @@ struct Feature {
|
|||||||
position(Eigen::Vector3d::Zero()),
|
position(Eigen::Vector3d::Zero()),
|
||||||
is_initialized(false), is_anchored(false) {}
|
is_initialized(false), is_anchored(false) {}
|
||||||
|
|
||||||
|
|
||||||
|
void Rhocost(const Eigen::Isometry3d& T_c0_ci,
|
||||||
|
const double x, const Eigen::Vector2d& z1, const Eigen::Vector2d& z2,
|
||||||
|
double& e) const;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* @brief cost Compute the cost of the camera observations
|
* @brief cost Compute the cost of the camera observations
|
||||||
* @param T_c0_c1 A rigid body transformation takes
|
* @param T_c0_c1 A rigid body transformation takes
|
||||||
@ -82,6 +87,13 @@ struct Feature {
|
|||||||
const Eigen::Vector3d& x, const Eigen::Vector2d& z,
|
const Eigen::Vector3d& x, const Eigen::Vector2d& z,
|
||||||
double& e) const;
|
double& e) const;
|
||||||
|
|
||||||
|
bool initializeRho(const CamStateServer& cam_states);
|
||||||
|
|
||||||
|
inline void RhoJacobian(const Eigen::Isometry3d& T_c0_ci,
|
||||||
|
const double x, const Eigen::Vector2d& z1, const Eigen::Vector2d& z2,
|
||||||
|
Eigen::Matrix<double, 2, 1>& J, Eigen::Vector2d& r,
|
||||||
|
double& w) const;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* @brief jacobian Compute the Jacobian of the camera observation
|
* @brief jacobian Compute the Jacobian of the camera observation
|
||||||
* @param T_c0_c1 A rigid body transformation takes
|
* @param T_c0_c1 A rigid body transformation takes
|
||||||
@ -97,6 +109,10 @@ struct Feature {
|
|||||||
Eigen::Matrix<double, 2, 3>& J, Eigen::Vector2d& r,
|
Eigen::Matrix<double, 2, 3>& J, Eigen::Vector2d& r,
|
||||||
double& w) const;
|
double& w) const;
|
||||||
|
|
||||||
|
inline double generateInitialDepth(
|
||||||
|
const Eigen::Isometry3d& T_c1_c2, const Eigen::Vector2d& z1,
|
||||||
|
const Eigen::Vector2d& z2) const;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* @brief generateInitialGuess Compute the initial guess of
|
* @brief generateInitialGuess Compute the initial guess of
|
||||||
* the feature's 3d position using only two views.
|
* the feature's 3d position using only two views.
|
||||||
@ -148,6 +164,14 @@ struct Feature {
|
|||||||
inline bool initializePosition(
|
inline bool initializePosition(
|
||||||
const CamStateServer& cam_states);
|
const CamStateServer& cam_states);
|
||||||
|
|
||||||
|
cv::Point2f pixelDistanceAt(
|
||||||
|
const CAMState& cam_state,
|
||||||
|
const StateIDType& cam_state_id,
|
||||||
|
const CameraCalibration& cam,
|
||||||
|
Eigen::Vector3d& in_p) const;
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* @brief project PositionToCamera Takes a 3d position in a world frame
|
* @brief project PositionToCamera Takes a 3d position in a world frame
|
||||||
* and projects it into the passed camera frame using pinhole projection
|
* and projects it into the passed camera frame using pinhole projection
|
||||||
@ -160,6 +184,11 @@ struct Feature {
|
|||||||
const CameraCalibration& cam,
|
const CameraCalibration& cam,
|
||||||
Eigen::Vector3d& in_p) const;
|
Eigen::Vector3d& in_p) const;
|
||||||
|
|
||||||
|
double Kernel(
|
||||||
|
const cv::Point2f pose,
|
||||||
|
const cv::Mat frame,
|
||||||
|
std::string type) const;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* @brief IrradianceAnchorPatch_Camera returns irradiance values
|
* @brief IrradianceAnchorPatch_Camera returns irradiance values
|
||||||
* of the Anchor Patch position in a camera frame
|
* of the Anchor Patch position in a camera frame
|
||||||
@ -181,13 +210,15 @@ bool MarkerGeneration(
|
|||||||
const CAMState& cam_state,
|
const CAMState& cam_state,
|
||||||
const StateIDType& cam_state_id,
|
const StateIDType& cam_state_id,
|
||||||
CameraCalibration& cam0,
|
CameraCalibration& cam0,
|
||||||
const std::vector<double> photo_r,
|
const Eigen::VectorXd& photo_r,
|
||||||
std::stringstream& ss) const;
|
std::stringstream& ss,
|
||||||
|
cv::Point2f gradientVector,
|
||||||
|
cv::Point2f residualVector) const;
|
||||||
/*
|
/*
|
||||||
* @brief projectPixelToPosition uses the calcualted pixels
|
* @brief AnchorPixelToPosition uses the calcualted pixels
|
||||||
* of the anchor patch to generate 3D positions of all of em
|
* of the anchor patch to generate 3D positions of all of em
|
||||||
*/
|
*/
|
||||||
inline Eigen::Vector3d projectPixelToPosition(cv::Point2f in_p,
|
inline Eigen::Vector3d AnchorPixelToPosition(cv::Point2f in_p,
|
||||||
const CameraCalibration& cam);
|
const CameraCalibration& cam);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -248,6 +279,26 @@ typedef std::map<FeatureIDType, Feature, std::less<int>,
|
|||||||
Eigen::aligned_allocator<
|
Eigen::aligned_allocator<
|
||||||
std::pair<const FeatureIDType, Feature> > > MapServer;
|
std::pair<const FeatureIDType, Feature> > > MapServer;
|
||||||
|
|
||||||
|
void Feature::Rhocost(const Eigen::Isometry3d& T_c0_ci,
|
||||||
|
const double x, const Eigen::Vector2d& z1, const Eigen::Vector2d& z2,
|
||||||
|
double& e) const
|
||||||
|
{
|
||||||
|
// Compute hi1, hi2, and hi3 as Equation (37).
|
||||||
|
const double& rho = x;
|
||||||
|
|
||||||
|
Eigen::Vector3d h = T_c0_ci.linear()*
|
||||||
|
Eigen::Vector3d(z1(0), z1(1), 1.0) + rho*T_c0_ci.translation();
|
||||||
|
double& h1 = h(0);
|
||||||
|
double& h2 = h(1);
|
||||||
|
double& h3 = h(2);
|
||||||
|
|
||||||
|
// Predict the feature observation in ci frame.
|
||||||
|
Eigen::Vector2d z_hat(h1/h3, h2/h3);
|
||||||
|
|
||||||
|
// Compute the residual.
|
||||||
|
e = (z_hat-z2).squaredNorm();
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
void Feature::cost(const Eigen::Isometry3d& T_c0_ci,
|
void Feature::cost(const Eigen::Isometry3d& T_c0_ci,
|
||||||
const Eigen::Vector3d& x, const Eigen::Vector2d& z,
|
const Eigen::Vector3d& x, const Eigen::Vector2d& z,
|
||||||
@ -260,9 +311,9 @@ void Feature::cost(const Eigen::Isometry3d& T_c0_ci,
|
|||||||
|
|
||||||
Eigen::Vector3d h = T_c0_ci.linear()*
|
Eigen::Vector3d h = T_c0_ci.linear()*
|
||||||
Eigen::Vector3d(alpha, beta, 1.0) + rho*T_c0_ci.translation();
|
Eigen::Vector3d(alpha, beta, 1.0) + rho*T_c0_ci.translation();
|
||||||
double& h1 = h(0);
|
double h1 = h(0);
|
||||||
double& h2 = h(1);
|
double h2 = h(1);
|
||||||
double& h3 = h(2);
|
double h3 = h(2);
|
||||||
|
|
||||||
// Predict the feature observation in ci frame.
|
// Predict the feature observation in ci frame.
|
||||||
Eigen::Vector2d z_hat(h1/h3, h2/h3);
|
Eigen::Vector2d z_hat(h1/h3, h2/h3);
|
||||||
@ -272,6 +323,42 @@ void Feature::cost(const Eigen::Isometry3d& T_c0_ci,
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void Feature::RhoJacobian(const Eigen::Isometry3d& T_c0_ci,
|
||||||
|
const double x, const Eigen::Vector2d& z1, const Eigen::Vector2d& z2,
|
||||||
|
Eigen::Matrix<double, 2, 1>& J, Eigen::Vector2d& r,
|
||||||
|
double& w) const
|
||||||
|
{
|
||||||
|
|
||||||
|
const double& rho = x;
|
||||||
|
|
||||||
|
Eigen::Vector3d h = T_c0_ci.linear()*
|
||||||
|
Eigen::Vector3d(z1(0), z2(1), 1.0) + rho*T_c0_ci.translation();
|
||||||
|
double& h1 = h(0);
|
||||||
|
double& h2 = h(1);
|
||||||
|
double& h3 = h(2);
|
||||||
|
|
||||||
|
// Compute the Jacobian.
|
||||||
|
Eigen::Matrix3d W;
|
||||||
|
W.leftCols<2>() = T_c0_ci.linear().leftCols<2>();
|
||||||
|
W.rightCols<1>() = T_c0_ci.translation();
|
||||||
|
|
||||||
|
J(0,0) = -h1/(h3*h3);
|
||||||
|
J(1,0) = -h2/(h3*h3);
|
||||||
|
|
||||||
|
// Compute the residual.
|
||||||
|
Eigen::Vector2d z_hat(h1/h3, h2/h3);
|
||||||
|
r = z_hat - z2;
|
||||||
|
|
||||||
|
// Compute the weight based on the residual.
|
||||||
|
double e = r.norm();
|
||||||
|
if (e <= optimization_config.huber_epsilon)
|
||||||
|
w = 1.0;
|
||||||
|
else
|
||||||
|
w = optimization_config.huber_epsilon / (2*e);
|
||||||
|
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
void Feature::jacobian(const Eigen::Isometry3d& T_c0_ci,
|
void Feature::jacobian(const Eigen::Isometry3d& T_c0_ci,
|
||||||
const Eigen::Vector3d& x, const Eigen::Vector2d& z,
|
const Eigen::Vector3d& x, const Eigen::Vector2d& z,
|
||||||
Eigen::Matrix<double, 2, 3>& J, Eigen::Vector2d& r,
|
Eigen::Matrix<double, 2, 3>& J, Eigen::Vector2d& r,
|
||||||
@ -311,9 +398,9 @@ void Feature::jacobian(const Eigen::Isometry3d& T_c0_ci,
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
void Feature::generateInitialGuess(
|
double Feature::generateInitialDepth(
|
||||||
const Eigen::Isometry3d& T_c1_c2, const Eigen::Vector2d& z1,
|
const Eigen::Isometry3d& T_c1_c2, const Eigen::Vector2d& z1,
|
||||||
const Eigen::Vector2d& z2, Eigen::Vector3d& p) const
|
const Eigen::Vector2d& z2) const
|
||||||
{
|
{
|
||||||
// Construct a least square problem to solve the depth.
|
// Construct a least square problem to solve the depth.
|
||||||
Eigen::Vector3d m = T_c1_c2.linear() * Eigen::Vector3d(z1(0), z1(1), 1.0);
|
Eigen::Vector3d m = T_c1_c2.linear() * Eigen::Vector3d(z1(0), z1(1), 1.0);
|
||||||
@ -328,6 +415,15 @@ void Feature::generateInitialGuess(
|
|||||||
|
|
||||||
// Solve for the depth.
|
// Solve for the depth.
|
||||||
double depth = (A.transpose() * A).inverse() * A.transpose() * b;
|
double depth = (A.transpose() * A).inverse() * A.transpose() * b;
|
||||||
|
return depth;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void Feature::generateInitialGuess(
|
||||||
|
const Eigen::Isometry3d& T_c1_c2, const Eigen::Vector2d& z1,
|
||||||
|
const Eigen::Vector2d& z2, Eigen::Vector3d& p) const
|
||||||
|
{
|
||||||
|
double depth = generateInitialDepth(T_c1_c2, z1, z2);
|
||||||
p(0) = z1(0) * depth;
|
p(0) = z1(0) * depth;
|
||||||
p(1) = z1(1) * depth;
|
p(1) = z1(1) * depth;
|
||||||
p(2) = depth;
|
p(2) = depth;
|
||||||
@ -377,6 +473,26 @@ bool Feature::checkMotion(const CamStateServer& cam_states) const
|
|||||||
else return false;
|
else return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
double Feature::Kernel(
|
||||||
|
const cv::Point2f pose,
|
||||||
|
const cv::Mat frame,
|
||||||
|
std::string type) const
|
||||||
|
{
|
||||||
|
Eigen::Matrix<double, 3, 3> kernel = Eigen::Matrix<double, 3, 3>::Zero();
|
||||||
|
if(type == "Sobel_x")
|
||||||
|
kernel << -1., 0., 1.,-2., 0., 2. , -1., 0., 1.;
|
||||||
|
else if(type == "Sobel_y")
|
||||||
|
kernel << -1., -2., -1., 0., 0., 0., 1., 2., 1.;
|
||||||
|
|
||||||
|
double delta = 0;
|
||||||
|
int offs = (int)(kernel.rows()-1)/2;
|
||||||
|
|
||||||
|
for(int i = 0; i < kernel.rows(); i++)
|
||||||
|
for(int j = 0; j < kernel.cols(); j++)
|
||||||
|
delta += ((float)frame.at<uint8_t>(pose.y+j-offs , pose.x+i-offs))/255 * (float)kernel(j,i);
|
||||||
|
|
||||||
|
return delta;
|
||||||
|
}
|
||||||
bool Feature::estimate_FrameIrradiance(
|
bool Feature::estimate_FrameIrradiance(
|
||||||
const CAMState& cam_state,
|
const CAMState& cam_state,
|
||||||
const StateIDType& cam_state_id,
|
const StateIDType& cam_state_id,
|
||||||
@ -532,8 +648,10 @@ bool Feature::VisualizePatch(
|
|||||||
const CAMState& cam_state,
|
const CAMState& cam_state,
|
||||||
const StateIDType& cam_state_id,
|
const StateIDType& cam_state_id,
|
||||||
CameraCalibration& cam0,
|
CameraCalibration& cam0,
|
||||||
const std::vector<double> photo_r,
|
const Eigen::VectorXd& photo_r,
|
||||||
std::stringstream& ss) const
|
std::stringstream& ss,
|
||||||
|
cv::Point2f gradientVector,
|
||||||
|
cv::Point2f residualVector) const
|
||||||
{
|
{
|
||||||
|
|
||||||
double rescale = 1;
|
double rescale = 1;
|
||||||
@ -573,44 +691,106 @@ bool Feature::VisualizePatch(
|
|||||||
|
|
||||||
cv::hconcat(cam0.featureVisu, dottedFrame, cam0.featureVisu);
|
cv::hconcat(cam0.featureVisu, dottedFrame, cam0.featureVisu);
|
||||||
|
|
||||||
// irradiance grid anchor
|
|
||||||
|
// patches visualization
|
||||||
int N = sqrt(anchorPatch_3d.size());
|
int N = sqrt(anchorPatch_3d.size());
|
||||||
int scale = 20;
|
int scale = 30;
|
||||||
cv::Mat irradianceFrame(anchorImage.size(), CV_8UC3, cv::Scalar(255, 240, 255));
|
cv::Mat irradianceFrame(anchorImage.size(), CV_8UC3, cv::Scalar(255, 240, 255));
|
||||||
cv::resize(irradianceFrame, irradianceFrame, cv::Size(), rescale, rescale);
|
cv::resize(irradianceFrame, irradianceFrame, cv::Size(), rescale, rescale);
|
||||||
|
|
||||||
|
// irradiance grid anchor
|
||||||
|
std::stringstream namer;
|
||||||
|
namer << "anchor";
|
||||||
|
cv::putText(irradianceFrame, namer.str() , cvPoint(30, 25),
|
||||||
|
cv::FONT_HERSHEY_COMPLEX_SMALL, 0.8, cvScalar(0,0,0), 1, CV_AA);
|
||||||
|
|
||||||
|
|
||||||
for(int i = 0; i<N; i++)
|
for(int i = 0; i<N; i++)
|
||||||
for(int j = 0; j<N; j++)
|
for(int j = 0; j<N; j++)
|
||||||
cv::rectangle(irradianceFrame,
|
cv::rectangle(irradianceFrame,
|
||||||
cv::Point(10+scale*(i+1), 10+scale*j),
|
cv::Point(30+scale*(i+1), 30+scale*j),
|
||||||
cv::Point(10+scale*i, 10+scale*(j+1)),
|
cv::Point(30+scale*i, 30+scale*(j+1)),
|
||||||
cv::Scalar(anchorPatch[i*N+j]*255, anchorPatch[i*N+j]*255, anchorPatch[i*N+j]*255),
|
cv::Scalar(anchorPatch[i*N+j]*255, anchorPatch[i*N+j]*255, anchorPatch[i*N+j]*255),
|
||||||
CV_FILLED);
|
CV_FILLED);
|
||||||
|
|
||||||
// irradiance grid projection
|
// irradiance grid projection
|
||||||
|
|
||||||
|
namer.str(std::string());
|
||||||
|
namer << "projection";
|
||||||
|
cv::putText(irradianceFrame, namer.str() , cvPoint(30, 45+scale*N),
|
||||||
|
cv::FONT_HERSHEY_COMPLEX_SMALL, 0.8, cvScalar(0,0,0), 1, CV_AA);
|
||||||
|
|
||||||
for(int i = 0; i<N; i++)
|
for(int i = 0; i<N; i++)
|
||||||
for(int j = 0; j<N ; j++)
|
for(int j = 0; j<N ; j++)
|
||||||
cv::rectangle(irradianceFrame,
|
cv::rectangle(irradianceFrame,
|
||||||
cv::Point(10+scale*(i+1), 20+scale*(N+j)),
|
cv::Point(30+scale*(i+1), 50+scale*(N+j)),
|
||||||
cv::Point(10+scale*(i), 20+scale*(N+j+1)),
|
cv::Point(30+scale*(i), 50+scale*(N+j+1)),
|
||||||
cv::Scalar(projectionPatch[i*N+j]*255, projectionPatch[i*N+j]*255, projectionPatch[i*N+j]*255),
|
cv::Scalar(projectionPatch[i*N+j]*255, projectionPatch[i*N+j]*255, projectionPatch[i*N+j]*255),
|
||||||
CV_FILLED);
|
CV_FILLED);
|
||||||
|
|
||||||
|
// true irradiance at feature
|
||||||
|
// get current observation
|
||||||
|
|
||||||
|
namer.str(std::string());
|
||||||
|
namer << "feature";
|
||||||
|
cv::putText(irradianceFrame, namer.str() , cvPoint(30, 65+scale*2*N),
|
||||||
|
cv::FONT_HERSHEY_COMPLEX_SMALL, 0.8, cvScalar(0,0,0), 1, CV_AA);
|
||||||
|
|
||||||
|
cv::Point2f p_f(observations.find(cam_state_id)->second(0),observations.find(cam_state_id)->second(1));
|
||||||
|
// move to real pixels
|
||||||
|
p_f = image_handler::distortPoint(p_f, cam0.intrinsics, cam0.distortion_model, cam0.distortion_coeffs);
|
||||||
|
|
||||||
|
for(int i = 0; i<N; i++)
|
||||||
|
{
|
||||||
|
for(int j = 0; j<N ; j++)
|
||||||
|
{
|
||||||
|
float irr = PixelIrradiance(cv::Point2f(p_f.x + (i-(N-1)/2), p_f.y + (j-(N-1)/2)), current_image);
|
||||||
|
cv::rectangle(irradianceFrame,
|
||||||
|
cv::Point(30+scale*(i+1), 70+scale*(2*N+j)),
|
||||||
|
cv::Point(30+scale*(i), 70+scale*(2*N+j+1)),
|
||||||
|
cv::Scalar(irr*255, irr*255, irr*255),
|
||||||
|
CV_FILLED);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// residual grid projection, positive - red, negative - blue colored
|
// residual grid projection, positive - red, negative - blue colored
|
||||||
|
namer.str(std::string());
|
||||||
|
namer << "residual";
|
||||||
|
cv::putText(irradianceFrame, namer.str() , cvPoint(30+scale*N, scale*N/2-5),
|
||||||
|
cv::FONT_HERSHEY_COMPLEX_SMALL, 0.8, cvScalar(0,0,0), 1, CV_AA);
|
||||||
|
|
||||||
for(int i = 0; i<N; i++)
|
for(int i = 0; i<N; i++)
|
||||||
for(int j = 0; j<N; j++)
|
for(int j = 0; j<N; j++)
|
||||||
if(photo_r[i*N+j]>0)
|
if(photo_r(i*N+j)>0)
|
||||||
cv::rectangle(irradianceFrame,
|
cv::rectangle(irradianceFrame,
|
||||||
cv::Point(20+scale*(N+i+1), 15+scale*(N/2+j)),
|
cv::Point(40+scale*(N+i+1), 15+scale*(N/2+j)),
|
||||||
cv::Point(20+scale*(N+i), 15+scale*(N/2+j+1)),
|
cv::Point(40+scale*(N+i), 15+scale*(N/2+j+1)),
|
||||||
cv::Scalar(255 - photo_r[i*N+j]*255, 255 - photo_r[i*N+j]*255, 255),
|
cv::Scalar(255 - photo_r(i*N+j)*255, 255 - photo_r(i*N+j)*255, 255),
|
||||||
CV_FILLED);
|
CV_FILLED);
|
||||||
else
|
else
|
||||||
cv::rectangle(irradianceFrame,
|
cv::rectangle(irradianceFrame,
|
||||||
cv::Point(20+scale*(N+i+1), 15+scale*(N/2+j)),
|
cv::Point(40+scale*(N+i+1), 15+scale*(N/2+j)),
|
||||||
cv::Point(20+scale*(N+i), 15+scale*(N/2+j+1)),
|
cv::Point(40+scale*(N+i), 15+scale*(N/2+j+1)),
|
||||||
cv::Scalar(255, 255 + photo_r[i*N+j]*255, 255 + photo_r[i*N+j]*255),
|
cv::Scalar(255, 255 + photo_r(i*N+j)*255, 255 + photo_r(i*N+j)*255),
|
||||||
CV_FILLED);
|
CV_FILLED);
|
||||||
|
|
||||||
|
// gradient arrow
|
||||||
|
/*
|
||||||
|
cv::arrowedLine(irradianceFrame,
|
||||||
|
cv::Point(30+scale*(N/2 +0.5), 50+scale*(N+(N/2)+0.5)),
|
||||||
|
cv::Point(30+scale*(N/2+0.5)+scale*gradientVector.x, 50+scale*(N+(N/2)+0.5)+scale*gradientVector.y),
|
||||||
|
cv::Scalar(100, 0, 255),
|
||||||
|
1);
|
||||||
|
*/
|
||||||
|
|
||||||
|
// residual gradient direction
|
||||||
|
cv::arrowedLine(irradianceFrame,
|
||||||
|
cv::Point(40+scale*(N+N/2+0.5), 15+scale*((N-0.5))),
|
||||||
|
cv::Point(40+scale*(N+N/2+0.5)+scale*residualVector.x, 15+scale*(N-0.5)+scale*residualVector.y),
|
||||||
|
cv::Scalar(0, 255, 175),
|
||||||
|
3);
|
||||||
|
|
||||||
|
|
||||||
cv::hconcat(cam0.featureVisu, irradianceFrame, cam0.featureVisu);
|
cv::hconcat(cam0.featureVisu, irradianceFrame, cam0.featureVisu);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -671,6 +851,37 @@ float Feature::PixelIrradiance(cv::Point2f pose, cv::Mat image) const
|
|||||||
return ((float)image.at<uint8_t>(pose.y, pose.x))/255;
|
return ((float)image.at<uint8_t>(pose.y, pose.x))/255;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
cv::Point2f Feature::pixelDistanceAt(
|
||||||
|
const CAMState& cam_state,
|
||||||
|
const StateIDType& cam_state_id,
|
||||||
|
const CameraCalibration& cam,
|
||||||
|
Eigen::Vector3d& in_p) const
|
||||||
|
{
|
||||||
|
|
||||||
|
cv::Point2f cam_p = projectPositionToCamera(cam_state, cam_state_id, cam, in_p);
|
||||||
|
|
||||||
|
// create vector of patch in pixel plane
|
||||||
|
std::vector<cv::Point2f> surroundingPoints;
|
||||||
|
surroundingPoints.push_back(cv::Point2f(cam_p.x+1, cam_p.y));
|
||||||
|
surroundingPoints.push_back(cv::Point2f(cam_p.x-1, cam_p.y));
|
||||||
|
surroundingPoints.push_back(cv::Point2f(cam_p.x, cam_p.y+1));
|
||||||
|
surroundingPoints.push_back(cv::Point2f(cam_p.x, cam_p.y-1));
|
||||||
|
|
||||||
|
std::vector<cv::Point2f> pure;
|
||||||
|
image_handler::undistortPoints(surroundingPoints,
|
||||||
|
cam.intrinsics,
|
||||||
|
cam.distortion_model,
|
||||||
|
cam.distortion_coeffs,
|
||||||
|
pure);
|
||||||
|
|
||||||
|
// returns the absolute pixel distance at pixels one metres away
|
||||||
|
cv::Point2f distance(fabs(pure[0].x - pure[1].x), fabs(pure[2].y - pure[3].y));
|
||||||
|
|
||||||
|
return distance;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
cv::Point2f Feature::projectPositionToCamera(
|
cv::Point2f Feature::projectPositionToCamera(
|
||||||
const CAMState& cam_state,
|
const CAMState& cam_state,
|
||||||
const StateIDType& cam_state_id,
|
const StateIDType& cam_state_id,
|
||||||
@ -703,7 +914,7 @@ cv::Point2f Feature::projectPositionToCamera(
|
|||||||
return my_p;
|
return my_p;
|
||||||
}
|
}
|
||||||
|
|
||||||
Eigen::Vector3d Feature::projectPixelToPosition(cv::Point2f in_p, const CameraCalibration& cam)
|
Eigen::Vector3d Feature::AnchorPixelToPosition(cv::Point2f in_p, const CameraCalibration& cam)
|
||||||
{
|
{
|
||||||
// use undistorted position of point of interest
|
// use undistorted position of point of interest
|
||||||
// project it back into 3D space using pinhole model
|
// project it back into 3D space using pinhole model
|
||||||
@ -770,15 +981,162 @@ bool Feature::initializeAnchor(const CameraCalibration& cam, int N)
|
|||||||
|
|
||||||
// project patch pixel to 3D space in camera coordinate system
|
// project patch pixel to 3D space in camera coordinate system
|
||||||
for(auto point : anchorPatch_ideal)
|
for(auto point : anchorPatch_ideal)
|
||||||
anchorPatch_3d.push_back(projectPixelToPosition(point, cam));
|
anchorPatch_3d.push_back(AnchorPixelToPosition(point, cam));
|
||||||
|
|
||||||
is_anchored = true;
|
is_anchored = true;
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool Feature::initializeRho(const CamStateServer& cam_states) {
|
||||||
|
|
||||||
|
// Organize camera poses and feature observations properly.
|
||||||
|
std::vector<Eigen::Isometry3d,
|
||||||
|
Eigen::aligned_allocator<Eigen::Isometry3d> > cam_poses(0);
|
||||||
|
std::vector<Eigen::Vector2d,
|
||||||
|
Eigen::aligned_allocator<Eigen::Vector2d> > measurements(0);
|
||||||
|
|
||||||
|
for (auto& m : observations) {
|
||||||
|
auto cam_state_iter = cam_states.find(m.first);
|
||||||
|
if (cam_state_iter == cam_states.end()) continue;
|
||||||
|
|
||||||
|
// Add the measurement.
|
||||||
|
measurements.push_back(m.second.head<2>());
|
||||||
|
measurements.push_back(m.second.tail<2>());
|
||||||
|
|
||||||
|
// This camera pose will take a vector from this camera frame
|
||||||
|
// to the world frame.
|
||||||
|
Eigen::Isometry3d cam0_pose;
|
||||||
|
cam0_pose.linear() = quaternionToRotation(
|
||||||
|
cam_state_iter->second.orientation).transpose();
|
||||||
|
cam0_pose.translation() = cam_state_iter->second.position;
|
||||||
|
|
||||||
|
Eigen::Isometry3d cam1_pose;
|
||||||
|
cam1_pose = cam0_pose * CAMState::T_cam0_cam1.inverse();
|
||||||
|
|
||||||
|
cam_poses.push_back(cam0_pose);
|
||||||
|
cam_poses.push_back(cam1_pose);
|
||||||
|
}
|
||||||
|
|
||||||
|
// All camera poses should be modified such that it takes a
|
||||||
|
// vector from the first camera frame in the buffer to this
|
||||||
|
// camera frame.
|
||||||
|
Eigen::Isometry3d T_c0_w = cam_poses[0];
|
||||||
|
T_anchor_w = T_c0_w;
|
||||||
|
for (auto& pose : cam_poses)
|
||||||
|
pose = pose.inverse() * T_c0_w;
|
||||||
|
|
||||||
|
// Generate initial guess
|
||||||
|
double initial_depth = 0;
|
||||||
|
initial_depth = generateInitialDepth(cam_poses[cam_poses.size()-1], measurements[0],
|
||||||
|
measurements[measurements.size()-1]);
|
||||||
|
|
||||||
|
double solution = 1.0/initial_depth;
|
||||||
|
|
||||||
|
// Apply Levenberg-Marquart method to solve for the 3d position.
|
||||||
|
double lambda = optimization_config.initial_damping;
|
||||||
|
int inner_loop_cntr = 0;
|
||||||
|
int outer_loop_cntr = 0;
|
||||||
|
bool is_cost_reduced = false;
|
||||||
|
double delta_norm = 0;
|
||||||
|
|
||||||
|
// Compute the initial cost.
|
||||||
|
double total_cost = 0.0;
|
||||||
|
for (int i = 0; i < cam_poses.size(); ++i) {
|
||||||
|
double this_cost = 0.0;
|
||||||
|
Rhocost(cam_poses[i], solution, measurements[0], measurements[i], this_cost);
|
||||||
|
total_cost += this_cost;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Outer loop.
|
||||||
|
do {
|
||||||
|
Eigen::Matrix<double, 1, 1> A = Eigen::Matrix<double, 1, 1>::Zero();
|
||||||
|
Eigen::Matrix<double, 1, 1> b = Eigen::Matrix<double, 1, 1>::Zero();
|
||||||
|
|
||||||
|
for (int i = 0; i < cam_poses.size(); ++i) {
|
||||||
|
Eigen::Matrix<double, 2, 1> J;
|
||||||
|
Eigen::Vector2d r;
|
||||||
|
double w;
|
||||||
|
|
||||||
|
RhoJacobian(cam_poses[i], solution, measurements[0], measurements[i], J, r, w);
|
||||||
|
|
||||||
|
if (w == 1) {
|
||||||
|
A += J.transpose() * J;
|
||||||
|
b += J.transpose() * r;
|
||||||
|
} else {
|
||||||
|
double w_square = w * w;
|
||||||
|
A += w_square * J.transpose() * J;
|
||||||
|
b += w_square * J.transpose() * r;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Inner loop.
|
||||||
|
// Solve for the delta that can reduce the total cost.
|
||||||
|
do {
|
||||||
|
Eigen::Matrix<double, 1, 1> damper = lambda*Eigen::Matrix<double, 1, 1>::Identity();
|
||||||
|
Eigen::Matrix<double, 1, 1> delta = (A+damper).ldlt().solve(b);
|
||||||
|
double new_solution = solution - delta(0,0);
|
||||||
|
delta_norm = delta.norm();
|
||||||
|
|
||||||
|
double new_cost = 0.0;
|
||||||
|
for (int i = 0; i < cam_poses.size(); ++i) {
|
||||||
|
double this_cost = 0.0;
|
||||||
|
Rhocost(cam_poses[i], new_solution, measurements[0], measurements[i], this_cost);
|
||||||
|
new_cost += this_cost;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (new_cost < total_cost) {
|
||||||
|
is_cost_reduced = true;
|
||||||
|
solution = new_solution;
|
||||||
|
total_cost = new_cost;
|
||||||
|
lambda = lambda/10 > 1e-10 ? lambda/10 : 1e-10;
|
||||||
|
} else {
|
||||||
|
is_cost_reduced = false;
|
||||||
|
lambda = lambda*10 < 1e12 ? lambda*10 : 1e12;
|
||||||
|
}
|
||||||
|
|
||||||
|
} while (inner_loop_cntr++ <
|
||||||
|
optimization_config.inner_loop_max_iteration && !is_cost_reduced);
|
||||||
|
|
||||||
|
inner_loop_cntr = 0;
|
||||||
|
|
||||||
|
} while (outer_loop_cntr++ <
|
||||||
|
optimization_config.outer_loop_max_iteration &&
|
||||||
|
delta_norm > optimization_config.estimation_precision);
|
||||||
|
|
||||||
|
// Covert the feature position from inverse depth
|
||||||
|
// representation to its 3d coordinate.
|
||||||
|
Eigen::Vector3d final_position(measurements[0](0)/solution,
|
||||||
|
measurements[0](1)/solution, 1.0/solution);
|
||||||
|
|
||||||
|
// Check if the solution is valid. Make sure the feature
|
||||||
|
// is in front of every camera frame observing it.
|
||||||
|
bool is_valid_solution = true;
|
||||||
|
for (const auto& pose : cam_poses) {
|
||||||
|
Eigen::Vector3d position =
|
||||||
|
pose.linear()*final_position + pose.translation();
|
||||||
|
if (position(2) <= 0) {
|
||||||
|
is_valid_solution = false;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
//save inverse depth distance from camera
|
||||||
|
anchor_rho = solution;
|
||||||
|
|
||||||
|
// Convert the feature position to the world frame.
|
||||||
|
position = T_c0_w.linear()*final_position + T_c0_w.translation();
|
||||||
|
|
||||||
|
if (is_valid_solution)
|
||||||
|
is_initialized = true;
|
||||||
|
|
||||||
|
return is_valid_solution;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
bool Feature::initializePosition(const CamStateServer& cam_states) {
|
bool Feature::initializePosition(const CamStateServer& cam_states) {
|
||||||
|
|
||||||
// Organize camera poses and feature observations properly.
|
// Organize camera poses and feature observations properly.
|
||||||
|
|
||||||
std::vector<Eigen::Isometry3d,
|
std::vector<Eigen::Isometry3d,
|
||||||
Eigen::aligned_allocator<Eigen::Isometry3d> > cam_poses(0);
|
Eigen::aligned_allocator<Eigen::Isometry3d> > cam_poses(0);
|
||||||
std::vector<Eigen::Vector2d,
|
std::vector<Eigen::Vector2d,
|
||||||
@ -916,6 +1274,7 @@ bool Feature::initializePosition(const CamStateServer& cam_states) {
|
|||||||
|
|
||||||
//save inverse depth distance from camera
|
//save inverse depth distance from camera
|
||||||
anchor_rho = solution(2);
|
anchor_rho = solution(2);
|
||||||
|
std::cout << "from feature: " << anchor_rho << std::endl;
|
||||||
|
|
||||||
// Convert the feature position to the world frame.
|
// Convert the feature position to the world frame.
|
||||||
position = T_c0_w.linear()*final_position + T_c0_w.translation();
|
position = T_c0_w.linear()*final_position + T_c0_w.translation();
|
||||||
|
@ -18,7 +18,7 @@
|
|||||||
output="screen">
|
output="screen">
|
||||||
|
|
||||||
<!-- Photometry Flag-->
|
<!-- Photometry Flag-->
|
||||||
<param name="PHOTOMETRIC" value="false"/>
|
<param name="PHOTOMETRIC" value="true"/>
|
||||||
|
|
||||||
<!-- Debugging Flaggs -->
|
<!-- Debugging Flaggs -->
|
||||||
<param name="PrintImages" value="false"/>
|
<param name="PrintImages" value="false"/>
|
||||||
|
@ -1254,7 +1254,6 @@ void MsckfVio::PhotometricMeasurementJacobian(
|
|||||||
auto frame = cam0.moving_window.find(cam_state_id)->second.image;
|
auto frame = cam0.moving_window.find(cam_state_id)->second.image;
|
||||||
|
|
||||||
int count = 0;
|
int count = 0;
|
||||||
double dx, dy;
|
|
||||||
|
|
||||||
auto point = feature.anchorPatch_3d[0];
|
auto point = feature.anchorPatch_3d[0];
|
||||||
|
|
||||||
@ -1367,23 +1366,6 @@ void MsckfVio::PhotometricFeatureJacobian(
|
|||||||
nh.setParam("/play_bag", false);
|
nh.setParam("/play_bag", false);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
// Errstate
|
|
||||||
calcErrorState();
|
|
||||||
/*
|
|
||||||
std::cout << "--- error state: ---\n " << std::endl;
|
|
||||||
std::cout << "imu orientation:\n " << err_state_server.imu_state.orientation << std::endl;
|
|
||||||
std::cout << "imu position\n" << err_state_server.imu_state.position << std::endl;
|
|
||||||
|
|
||||||
int count = 0;
|
|
||||||
for(auto cam_state : err_state_server.cam_states)
|
|
||||||
{
|
|
||||||
std::cout << " - cam " << count++ << " - \n" << std::endl;
|
|
||||||
std::cout << "orientation: " << cam_state.second.orientation(0) << cam_state.second.orientation(1) << " " << cam_state.second.orientation(2) << " " << std::endl;
|
|
||||||
std::cout << "position:" << cam_state.second.position(0) << " " << cam_state.second.position(1) << " " << cam_state.second.position(2) << std::endl;
|
|
||||||
}
|
|
||||||
*/
|
|
||||||
|
|
||||||
const auto& feature = map_server[feature_id];
|
const auto& feature = map_server[feature_id];
|
||||||
|
|
||||||
|
|
||||||
@ -1430,22 +1412,10 @@ void MsckfVio::PhotometricFeatureJacobian(
|
|||||||
// get Nullspace
|
// get Nullspace
|
||||||
FullPivLU<MatrixXd> lu(H_yi.transpose());
|
FullPivLU<MatrixXd> lu(H_yi.transpose());
|
||||||
MatrixXd A_null_space = lu.kernel();
|
MatrixXd A_null_space = lu.kernel();
|
||||||
/*
|
|
||||||
JacobiSVD<MatrixXd> svd_helper(H_yi, ComputeFullU | ComputeThinV);
|
|
||||||
|
|
||||||
int sv_size = 0;
|
|
||||||
Eigen::VectorXd singularValues = svd_helper.singularValues();
|
|
||||||
for(int i = 0; i < singularValues.size(); i++)
|
|
||||||
if(singularValues[i] > 1e-12)
|
|
||||||
sv_size++;
|
|
||||||
|
|
||||||
MatrixXd A = svd_helper.matrixU().rightCols(jacobian_row_size - singularValues.size());
|
|
||||||
*/
|
|
||||||
H_x = A_null_space.transpose() * H_xi;
|
H_x = A_null_space.transpose() * H_xi;
|
||||||
r = A_null_space.transpose() * r_i;
|
r = A_null_space.transpose() * r_i;
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
if(PRINTIMAGES)
|
if(PRINTIMAGES)
|
||||||
{
|
{
|
||||||
std::cout << "resume playback" << std::endl;
|
std::cout << "resume playback" << std::endl;
|
||||||
|
Loading…
Reference in New Issue
Block a user