Compare commits
	
		
			13 Commits
		
	
	
		
			photometry
			...
			photometry
		
	
	| Author | SHA1 | Date | |
|---|---|---|---|
| 2dac361ba2 | |||
| 0712a98c7f | |||
| 6b8dce9876 | |||
| 49374a4323 | |||
| 3e480560e8 | |||
| b3df525060 | |||
| a8d4580812 | |||
| a0577dfb9d | |||
| 8cfbe06945 | |||
| cab56d9494 | |||
| 5e9149eacc | |||
| e4dbe2f060 | |||
| 6b208dbc44 | 
@@ -15,7 +15,7 @@
 | 
			
		||||
#include <Eigen/Dense>
 | 
			
		||||
#include <Eigen/Geometry>
 | 
			
		||||
#include <Eigen/StdVector>
 | 
			
		||||
 | 
			
		||||
#include <math.h>
 | 
			
		||||
#include <visualization_msgs/Marker.h>
 | 
			
		||||
#include <visualization_msgs/MarkerArray.h>
 | 
			
		||||
#include <geometry_msgs/Point.h>
 | 
			
		||||
@@ -70,6 +70,11 @@ struct Feature {
 | 
			
		||||
    position(Eigen::Vector3d::Zero()),
 | 
			
		||||
    is_initialized(false), is_anchored(false) {}
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
void Rhocost(const Eigen::Isometry3d& T_c0_ci,
 | 
			
		||||
    const double x, const Eigen::Vector2d& z1, const Eigen::Vector2d& z2,
 | 
			
		||||
    double& e) const;
 | 
			
		||||
 | 
			
		||||
  /*
 | 
			
		||||
   * @brief cost Compute the cost of the camera observations
 | 
			
		||||
   * @param T_c0_c1 A rigid body transformation takes
 | 
			
		||||
@@ -82,6 +87,13 @@ struct Feature {
 | 
			
		||||
      const Eigen::Vector3d& x, const Eigen::Vector2d& z,
 | 
			
		||||
      double& e) const;
 | 
			
		||||
 | 
			
		||||
  bool initializeRho(const CamStateServer& cam_states);
 | 
			
		||||
 | 
			
		||||
  inline void RhoJacobian(const Eigen::Isometry3d& T_c0_ci,
 | 
			
		||||
    const double x, const Eigen::Vector2d& z1, const Eigen::Vector2d& z2,
 | 
			
		||||
    Eigen::Matrix<double, 2, 1>& J, Eigen::Vector2d& r,
 | 
			
		||||
    double& w) const; 
 | 
			
		||||
 | 
			
		||||
  /*
 | 
			
		||||
   * @brief jacobian Compute the Jacobian of the camera observation
 | 
			
		||||
   * @param T_c0_c1 A rigid body transformation takes
 | 
			
		||||
@@ -97,6 +109,10 @@ struct Feature {
 | 
			
		||||
      Eigen::Matrix<double, 2, 3>& J, Eigen::Vector2d& r,
 | 
			
		||||
      double& w) const;
 | 
			
		||||
 | 
			
		||||
  inline double generateInitialDepth(
 | 
			
		||||
    const Eigen::Isometry3d& T_c1_c2, const Eigen::Vector2d& z1,
 | 
			
		||||
    const Eigen::Vector2d& z2) const;
 | 
			
		||||
 | 
			
		||||
  /*
 | 
			
		||||
   * @brief generateInitialGuess Compute the initial guess of
 | 
			
		||||
   *    the feature's 3d position using only two views.
 | 
			
		||||
@@ -148,6 +164,14 @@ struct Feature {
 | 
			
		||||
  inline bool initializePosition(
 | 
			
		||||
      const CamStateServer& cam_states);
 | 
			
		||||
 | 
			
		||||
  cv::Point2f pixelDistanceAt(
 | 
			
		||||
                  const CAMState& cam_state,
 | 
			
		||||
                  const StateIDType& cam_state_id,
 | 
			
		||||
                  const CameraCalibration& cam,
 | 
			
		||||
                  Eigen::Vector3d& in_p) const;
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
*  @brief project PositionToCamera Takes a 3d position in a world frame
 | 
			
		||||
*         and projects it into the passed camera frame using pinhole projection
 | 
			
		||||
@@ -160,6 +184,11 @@ struct Feature {
 | 
			
		||||
                  const CameraCalibration& cam,
 | 
			
		||||
                  Eigen::Vector3d& in_p) const;
 | 
			
		||||
 | 
			
		||||
  double Kernel(
 | 
			
		||||
            const cv::Point2f pose,
 | 
			
		||||
            const cv::Mat frame,
 | 
			
		||||
            std::string type) const;
 | 
			
		||||
 | 
			
		||||
  /*
 | 
			
		||||
  * @brief IrradianceAnchorPatch_Camera returns irradiance values
 | 
			
		||||
  *        of the Anchor Patch position in a camera frame
 | 
			
		||||
@@ -181,13 +210,15 @@ bool MarkerGeneration(
 | 
			
		||||
                  const CAMState& cam_state,
 | 
			
		||||
                  const StateIDType& cam_state_id,
 | 
			
		||||
                  CameraCalibration& cam0,
 | 
			
		||||
                  const std::vector<double> photo_r,
 | 
			
		||||
                  std::stringstream& ss) const;
 | 
			
		||||
                  const Eigen::VectorXd& photo_r,
 | 
			
		||||
                  std::stringstream& ss,
 | 
			
		||||
                  cv::Point2f gradientVector,
 | 
			
		||||
                  cv::Point2f residualVector) const;
 | 
			
		||||
  /*
 | 
			
		||||
  * @brief projectPixelToPosition uses the calcualted pixels
 | 
			
		||||
  * @brief AnchorPixelToPosition uses the calcualted pixels
 | 
			
		||||
  *     of the anchor patch to generate 3D positions of all of em
 | 
			
		||||
  */
 | 
			
		||||
inline Eigen::Vector3d projectPixelToPosition(cv::Point2f in_p,
 | 
			
		||||
inline Eigen::Vector3d AnchorPixelToPosition(cv::Point2f in_p,
 | 
			
		||||
          const CameraCalibration& cam);
 | 
			
		||||
 | 
			
		||||
  /*
 | 
			
		||||
@@ -248,6 +279,26 @@ typedef std::map<FeatureIDType, Feature, std::less<int>,
 | 
			
		||||
        Eigen::aligned_allocator<
 | 
			
		||||
        std::pair<const FeatureIDType, Feature> > > MapServer;
 | 
			
		||||
 | 
			
		||||
void Feature::Rhocost(const Eigen::Isometry3d& T_c0_ci,
 | 
			
		||||
    const double x, const Eigen::Vector2d& z1, const Eigen::Vector2d& z2,
 | 
			
		||||
    double& e) const 
 | 
			
		||||
{
 | 
			
		||||
  // Compute hi1, hi2, and hi3 as Equation (37).
 | 
			
		||||
  const double& rho = x;
 | 
			
		||||
 | 
			
		||||
  Eigen::Vector3d h = T_c0_ci.linear()*
 | 
			
		||||
    Eigen::Vector3d(z1(0), z1(1), 1.0) + rho*T_c0_ci.translation();
 | 
			
		||||
  double& h1 = h(0);
 | 
			
		||||
  double& h2 = h(1);
 | 
			
		||||
  double& h3 = h(2);
 | 
			
		||||
 | 
			
		||||
  // Predict the feature observation in ci frame.
 | 
			
		||||
  Eigen::Vector2d z_hat(h1/h3, h2/h3);
 | 
			
		||||
 | 
			
		||||
  // Compute the residual.
 | 
			
		||||
  e = (z_hat-z2).squaredNorm();
 | 
			
		||||
  return;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void Feature::cost(const Eigen::Isometry3d& T_c0_ci,
 | 
			
		||||
    const Eigen::Vector3d& x, const Eigen::Vector2d& z,
 | 
			
		||||
@@ -260,9 +311,9 @@ void Feature::cost(const Eigen::Isometry3d& T_c0_ci,
 | 
			
		||||
 | 
			
		||||
  Eigen::Vector3d h = T_c0_ci.linear()*
 | 
			
		||||
    Eigen::Vector3d(alpha, beta, 1.0) + rho*T_c0_ci.translation();
 | 
			
		||||
  double& h1 = h(0);
 | 
			
		||||
  double& h2 = h(1);
 | 
			
		||||
  double& h3 = h(2);
 | 
			
		||||
  double h1 = h(0);
 | 
			
		||||
  double h2 = h(1);
 | 
			
		||||
  double h3 = h(2);
 | 
			
		||||
 | 
			
		||||
  // Predict the feature observation in ci frame.
 | 
			
		||||
  Eigen::Vector2d z_hat(h1/h3, h2/h3);
 | 
			
		||||
@@ -272,6 +323,42 @@ void Feature::cost(const Eigen::Isometry3d& T_c0_ci,
 | 
			
		||||
  return;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void Feature::RhoJacobian(const Eigen::Isometry3d& T_c0_ci,
 | 
			
		||||
    const double x, const Eigen::Vector2d& z1, const Eigen::Vector2d& z2,
 | 
			
		||||
    Eigen::Matrix<double, 2, 1>& J, Eigen::Vector2d& r,
 | 
			
		||||
    double& w) const 
 | 
			
		||||
{
 | 
			
		||||
 | 
			
		||||
  const double& rho = x;
 | 
			
		||||
 | 
			
		||||
  Eigen::Vector3d h = T_c0_ci.linear()*
 | 
			
		||||
    Eigen::Vector3d(z1(0), z2(1), 1.0) + rho*T_c0_ci.translation();
 | 
			
		||||
  double& h1 = h(0);
 | 
			
		||||
  double& h2 = h(1);
 | 
			
		||||
  double& h3 = h(2);
 | 
			
		||||
 | 
			
		||||
  // Compute the Jacobian.
 | 
			
		||||
  Eigen::Matrix3d W;
 | 
			
		||||
  W.leftCols<2>() = T_c0_ci.linear().leftCols<2>();
 | 
			
		||||
  W.rightCols<1>() = T_c0_ci.translation();
 | 
			
		||||
 | 
			
		||||
  J(0,0) = -h1/(h3*h3);
 | 
			
		||||
  J(1,0) = -h2/(h3*h3);
 | 
			
		||||
 | 
			
		||||
  // Compute the residual.
 | 
			
		||||
  Eigen::Vector2d z_hat(h1/h3, h2/h3);
 | 
			
		||||
  r = z_hat - z2;
 | 
			
		||||
 | 
			
		||||
  // Compute the weight based on the residual.
 | 
			
		||||
  double e = r.norm();
 | 
			
		||||
  if (e <= optimization_config.huber_epsilon)
 | 
			
		||||
    w = 1.0;
 | 
			
		||||
  else
 | 
			
		||||
    w = optimization_config.huber_epsilon / (2*e);
 | 
			
		||||
 | 
			
		||||
  return;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void Feature::jacobian(const Eigen::Isometry3d& T_c0_ci,
 | 
			
		||||
    const Eigen::Vector3d& x, const Eigen::Vector2d& z,
 | 
			
		||||
    Eigen::Matrix<double, 2, 3>& J, Eigen::Vector2d& r,
 | 
			
		||||
@@ -311,9 +398,9 @@ void Feature::jacobian(const Eigen::Isometry3d& T_c0_ci,
 | 
			
		||||
  return;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void Feature::generateInitialGuess(
 | 
			
		||||
double Feature::generateInitialDepth(
 | 
			
		||||
    const Eigen::Isometry3d& T_c1_c2, const Eigen::Vector2d& z1,
 | 
			
		||||
    const Eigen::Vector2d& z2, Eigen::Vector3d& p) const 
 | 
			
		||||
    const Eigen::Vector2d& z2) const 
 | 
			
		||||
{
 | 
			
		||||
  // Construct a least square problem to solve the depth.
 | 
			
		||||
  Eigen::Vector3d m = T_c1_c2.linear() * Eigen::Vector3d(z1(0), z1(1), 1.0);
 | 
			
		||||
@@ -328,6 +415,15 @@ void Feature::generateInitialGuess(
 | 
			
		||||
 | 
			
		||||
  // Solve for the depth.
 | 
			
		||||
  double depth = (A.transpose() * A).inverse() * A.transpose() * b;
 | 
			
		||||
  return depth;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
void Feature::generateInitialGuess(
 | 
			
		||||
    const Eigen::Isometry3d& T_c1_c2, const Eigen::Vector2d& z1,
 | 
			
		||||
    const Eigen::Vector2d& z2, Eigen::Vector3d& p) const 
 | 
			
		||||
{
 | 
			
		||||
  double depth = generateInitialDepth(T_c1_c2, z1, z2);
 | 
			
		||||
  p(0) = z1(0) * depth;
 | 
			
		||||
  p(1) = z1(1) * depth;
 | 
			
		||||
  p(2) = depth;
 | 
			
		||||
@@ -377,6 +473,26 @@ bool Feature::checkMotion(const CamStateServer& cam_states) const
 | 
			
		||||
  else return false;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
double Feature::Kernel(
 | 
			
		||||
                const cv::Point2f pose,
 | 
			
		||||
                const cv::Mat frame,
 | 
			
		||||
                std::string type) const
 | 
			
		||||
{
 | 
			
		||||
Eigen::Matrix<double, 3, 3> kernel = Eigen::Matrix<double, 3, 3>::Zero();
 | 
			
		||||
if(type == "Sobel_x")
 | 
			
		||||
  kernel  << -1., 0., 1.,-2., 0., 2. , -1., 0., 1.;
 | 
			
		||||
else if(type == "Sobel_y")
 | 
			
		||||
  kernel << -1., -2., -1., 0., 0., 0., 1., 2., 1.;
 | 
			
		||||
 | 
			
		||||
double delta = 0;
 | 
			
		||||
int offs = (int)(kernel.rows()-1)/2;
 | 
			
		||||
 | 
			
		||||
for(int i = 0; i < kernel.rows(); i++)
 | 
			
		||||
  for(int j = 0; j < kernel.cols(); j++)
 | 
			
		||||
    delta += ((float)frame.at<uint8_t>(pose.y+j-offs , pose.x+i-offs))/255 * (float)kernel(j,i);
 | 
			
		||||
 | 
			
		||||
return delta;
 | 
			
		||||
}
 | 
			
		||||
bool Feature::estimate_FrameIrradiance(
 | 
			
		||||
                  const CAMState& cam_state,
 | 
			
		||||
                  const StateIDType& cam_state_id,
 | 
			
		||||
@@ -532,8 +648,10 @@ bool Feature::VisualizePatch(
 | 
			
		||||
                  const CAMState& cam_state,
 | 
			
		||||
                  const StateIDType& cam_state_id,
 | 
			
		||||
                  CameraCalibration& cam0,
 | 
			
		||||
                  const std::vector<double> photo_r,
 | 
			
		||||
                  std::stringstream& ss) const
 | 
			
		||||
                  const Eigen::VectorXd& photo_r,
 | 
			
		||||
                  std::stringstream& ss,
 | 
			
		||||
                  cv::Point2f gradientVector,
 | 
			
		||||
                  cv::Point2f residualVector) const
 | 
			
		||||
{
 | 
			
		||||
 | 
			
		||||
  double rescale = 1;
 | 
			
		||||
@@ -573,45 +691,107 @@ bool Feature::VisualizePatch(
 | 
			
		||||
 | 
			
		||||
  cv::hconcat(cam0.featureVisu, dottedFrame, cam0.featureVisu);
 | 
			
		||||
 | 
			
		||||
  // irradiance grid anchor
 | 
			
		||||
 | 
			
		||||
  // patches visualization
 | 
			
		||||
  int N = sqrt(anchorPatch_3d.size());
 | 
			
		||||
  int scale = 20;
 | 
			
		||||
  int scale = 30;
 | 
			
		||||
  cv::Mat irradianceFrame(anchorImage.size(), CV_8UC3, cv::Scalar(255, 240, 255));
 | 
			
		||||
  cv::resize(irradianceFrame, irradianceFrame, cv::Size(), rescale, rescale);
 | 
			
		||||
  
 | 
			
		||||
  // irradiance grid anchor
 | 
			
		||||
  std::stringstream namer;
 | 
			
		||||
  namer << "anchor";
 | 
			
		||||
  cv::putText(irradianceFrame, namer.str() , cvPoint(30, 25), 
 | 
			
		||||
    cv::FONT_HERSHEY_COMPLEX_SMALL, 0.8, cvScalar(0,0,0), 1, CV_AA);
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
  for(int i = 0; i<N; i++)
 | 
			
		||||
    for(int j = 0; j<N; j++)
 | 
			
		||||
      cv::rectangle(irradianceFrame, 
 | 
			
		||||
                    cv::Point(10+scale*(i+1), 10+scale*j), 
 | 
			
		||||
                    cv::Point(10+scale*i, 10+scale*(j+1)), 
 | 
			
		||||
                    cv::Point(30+scale*(i+1), 30+scale*j), 
 | 
			
		||||
                    cv::Point(30+scale*i, 30+scale*(j+1)), 
 | 
			
		||||
                    cv::Scalar(anchorPatch[i*N+j]*255, anchorPatch[i*N+j]*255, anchorPatch[i*N+j]*255),  
 | 
			
		||||
                    CV_FILLED);
 | 
			
		||||
 | 
			
		||||
  // irradiance grid projection
 | 
			
		||||
 | 
			
		||||
  namer.str(std::string());
 | 
			
		||||
  namer << "projection";
 | 
			
		||||
  cv::putText(irradianceFrame, namer.str() , cvPoint(30, 45+scale*N), 
 | 
			
		||||
    cv::FONT_HERSHEY_COMPLEX_SMALL, 0.8, cvScalar(0,0,0), 1, CV_AA);
 | 
			
		||||
 | 
			
		||||
  for(int i = 0; i<N; i++)
 | 
			
		||||
    for(int j = 0; j<N ; j++)
 | 
			
		||||
      cv::rectangle(irradianceFrame, 
 | 
			
		||||
                    cv::Point(10+scale*(i+1), 20+scale*(N+j)), 
 | 
			
		||||
                    cv::Point(10+scale*(i), 20+scale*(N+j+1)), 
 | 
			
		||||
                    cv::Point(30+scale*(i+1), 50+scale*(N+j)), 
 | 
			
		||||
                    cv::Point(30+scale*(i), 50+scale*(N+j+1)), 
 | 
			
		||||
                    cv::Scalar(projectionPatch[i*N+j]*255, projectionPatch[i*N+j]*255, projectionPatch[i*N+j]*255),  
 | 
			
		||||
                    CV_FILLED);
 | 
			
		||||
 | 
			
		||||
  // true irradiance at feature
 | 
			
		||||
  // get current observation
 | 
			
		||||
 | 
			
		||||
  namer.str(std::string());
 | 
			
		||||
  namer << "feature";
 | 
			
		||||
  cv::putText(irradianceFrame, namer.str() , cvPoint(30, 65+scale*2*N), 
 | 
			
		||||
    cv::FONT_HERSHEY_COMPLEX_SMALL, 0.8, cvScalar(0,0,0), 1, CV_AA);
 | 
			
		||||
 | 
			
		||||
  cv::Point2f p_f(observations.find(cam_state_id)->second(0),observations.find(cam_state_id)->second(1));
 | 
			
		||||
  // move to real pixels
 | 
			
		||||
  p_f = image_handler::distortPoint(p_f, cam0.intrinsics, cam0.distortion_model, cam0.distortion_coeffs);
 | 
			
		||||
 | 
			
		||||
  for(int i = 0; i<N; i++)
 | 
			
		||||
  {
 | 
			
		||||
    for(int j = 0; j<N ; j++)
 | 
			
		||||
    {
 | 
			
		||||
      float irr = PixelIrradiance(cv::Point2f(p_f.x + (i-(N-1)/2), p_f.y + (j-(N-1)/2)), current_image);
 | 
			
		||||
      cv::rectangle(irradianceFrame, 
 | 
			
		||||
                    cv::Point(30+scale*(i+1), 70+scale*(2*N+j)), 
 | 
			
		||||
                    cv::Point(30+scale*(i), 70+scale*(2*N+j+1)), 
 | 
			
		||||
                    cv::Scalar(irr*255, irr*255, irr*255),  
 | 
			
		||||
                    CV_FILLED);
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  // residual grid projection, positive - red, negative - blue colored 
 | 
			
		||||
  namer.str(std::string());
 | 
			
		||||
  namer << "residual";
 | 
			
		||||
  cv::putText(irradianceFrame, namer.str() , cvPoint(30+scale*N, scale*N/2-5), 
 | 
			
		||||
    cv::FONT_HERSHEY_COMPLEX_SMALL, 0.8, cvScalar(0,0,0), 1, CV_AA);
 | 
			
		||||
 | 
			
		||||
  for(int i = 0; i<N; i++)
 | 
			
		||||
    for(int j = 0; j<N; j++)
 | 
			
		||||
      if(photo_r[i*N+j]>0)
 | 
			
		||||
      if(photo_r(i*N+j)>0)
 | 
			
		||||
        cv::rectangle(irradianceFrame, 
 | 
			
		||||
                      cv::Point(20+scale*(N+i+1), 15+scale*(N/2+j)), 
 | 
			
		||||
                      cv::Point(20+scale*(N+i), 15+scale*(N/2+j+1)), 
 | 
			
		||||
                      cv::Scalar(255 - photo_r[i*N+j]*255, 255 - photo_r[i*N+j]*255, 255),  
 | 
			
		||||
                      cv::Point(40+scale*(N+i+1), 15+scale*(N/2+j)), 
 | 
			
		||||
                      cv::Point(40+scale*(N+i), 15+scale*(N/2+j+1)), 
 | 
			
		||||
                      cv::Scalar(255 - photo_r(i*N+j)*255, 255 - photo_r(i*N+j)*255, 255),  
 | 
			
		||||
                      CV_FILLED);
 | 
			
		||||
      else
 | 
			
		||||
        cv::rectangle(irradianceFrame, 
 | 
			
		||||
                      cv::Point(20+scale*(N+i+1), 15+scale*(N/2+j)), 
 | 
			
		||||
                      cv::Point(20+scale*(N+i), 15+scale*(N/2+j+1)), 
 | 
			
		||||
                      cv::Scalar(255, 255 + photo_r[i*N+j]*255, 255 + photo_r[i*N+j]*255), 
 | 
			
		||||
                      cv::Point(40+scale*(N+i+1), 15+scale*(N/2+j)), 
 | 
			
		||||
                      cv::Point(40+scale*(N+i), 15+scale*(N/2+j+1)), 
 | 
			
		||||
                      cv::Scalar(255, 255 + photo_r(i*N+j)*255, 255 + photo_r(i*N+j)*255), 
 | 
			
		||||
                      CV_FILLED);
 | 
			
		||||
 | 
			
		||||
      cv::hconcat(cam0.featureVisu, irradianceFrame, cam0.featureVisu);
 | 
			
		||||
  // gradient arrow
 | 
			
		||||
  /*
 | 
			
		||||
  cv::arrowedLine(irradianceFrame,
 | 
			
		||||
                  cv::Point(30+scale*(N/2 +0.5), 50+scale*(N+(N/2)+0.5)),
 | 
			
		||||
                  cv::Point(30+scale*(N/2+0.5)+scale*gradientVector.x, 50+scale*(N+(N/2)+0.5)+scale*gradientVector.y),
 | 
			
		||||
                  cv::Scalar(100, 0, 255),
 | 
			
		||||
                  1);
 | 
			
		||||
  */
 | 
			
		||||
 | 
			
		||||
  // residual gradient direction
 | 
			
		||||
  cv::arrowedLine(irradianceFrame,
 | 
			
		||||
                  cv::Point(40+scale*(N+N/2+0.5), 15+scale*((N-0.5))),
 | 
			
		||||
                  cv::Point(40+scale*(N+N/2+0.5)+scale*residualVector.x, 15+scale*(N-0.5)+scale*residualVector.y),
 | 
			
		||||
                  cv::Scalar(0, 255, 175),
 | 
			
		||||
                  3);
 | 
			
		||||
                  
 | 
			
		||||
 | 
			
		||||
  cv::hconcat(cam0.featureVisu, irradianceFrame, cam0.featureVisu);
 | 
			
		||||
      
 | 
			
		||||
/*
 | 
			
		||||
  // visualize position of used observations and resulting feature position
 | 
			
		||||
@@ -671,6 +851,37 @@ float Feature::PixelIrradiance(cv::Point2f pose, cv::Mat image) const
 | 
			
		||||
  return ((float)image.at<uint8_t>(pose.y, pose.x))/255;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
cv::Point2f Feature::pixelDistanceAt(
 | 
			
		||||
                  const CAMState& cam_state,
 | 
			
		||||
                  const StateIDType& cam_state_id,
 | 
			
		||||
                  const CameraCalibration& cam,
 | 
			
		||||
                  Eigen::Vector3d& in_p) const
 | 
			
		||||
{
 | 
			
		||||
 | 
			
		||||
  cv::Point2f cam_p = projectPositionToCamera(cam_state, cam_state_id, cam, in_p);
 | 
			
		||||
 | 
			
		||||
  // create vector of patch in pixel plane
 | 
			
		||||
  std::vector<cv::Point2f> surroundingPoints;
 | 
			
		||||
  surroundingPoints.push_back(cv::Point2f(cam_p.x+1, cam_p.y));
 | 
			
		||||
  surroundingPoints.push_back(cv::Point2f(cam_p.x-1, cam_p.y));
 | 
			
		||||
  surroundingPoints.push_back(cv::Point2f(cam_p.x, cam_p.y+1));
 | 
			
		||||
  surroundingPoints.push_back(cv::Point2f(cam_p.x, cam_p.y-1));
 | 
			
		||||
 | 
			
		||||
  std::vector<cv::Point2f> pure;
 | 
			
		||||
  image_handler::undistortPoints(surroundingPoints, 
 | 
			
		||||
                                cam.intrinsics, 
 | 
			
		||||
                                cam.distortion_model, 
 | 
			
		||||
                                cam.distortion_coeffs, 
 | 
			
		||||
                                pure); 
 | 
			
		||||
 | 
			
		||||
  // returns the absolute pixel distance at pixels one metres away
 | 
			
		||||
  cv::Point2f distance(fabs(pure[0].x - pure[1].x), fabs(pure[2].y - pure[3].y));
 | 
			
		||||
 | 
			
		||||
  return distance;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
cv::Point2f Feature::projectPositionToCamera(
 | 
			
		||||
                  const CAMState& cam_state,
 | 
			
		||||
                  const StateIDType& cam_state_id,
 | 
			
		||||
@@ -703,7 +914,7 @@ cv::Point2f Feature::projectPositionToCamera(
 | 
			
		||||
  return my_p;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
Eigen::Vector3d Feature::projectPixelToPosition(cv::Point2f in_p, const CameraCalibration& cam)
 | 
			
		||||
Eigen::Vector3d Feature::AnchorPixelToPosition(cv::Point2f in_p, const CameraCalibration& cam)
 | 
			
		||||
{
 | 
			
		||||
  // use undistorted position of point of interest
 | 
			
		||||
  // project it back into 3D space using pinhole model
 | 
			
		||||
@@ -742,27 +953,19 @@ bool Feature::initializeAnchor(const CameraCalibration& cam, int N)
 | 
			
		||||
  cv::Point2f und_pix_p = image_handler::distortPoint(cv::Point2f(u, v), 
 | 
			
		||||
                                                    cam.intrinsics, 
 | 
			
		||||
                                                    cam.distortion_model, 
 | 
			
		||||
                                                    0);
 | 
			
		||||
                                                    cam.distortion_coeffs);
 | 
			
		||||
  // create vector of patch in pixel plane
 | 
			
		||||
  std::vector<cv::Point2f>und_pix_v;
 | 
			
		||||
  for(double u_run = -n; u_run <= n; u_run++)
 | 
			
		||||
    for(double v_run = -n; v_run <= n; v_run++)
 | 
			
		||||
      und_pix_v.push_back(cv::Point2f(und_pix_p.x+u_run, und_pix_p.y+v_run));
 | 
			
		||||
      anchorPatch_real.push_back(cv::Point2f(und_pix_p.x+u_run, und_pix_p.y+v_run));
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
  //create undistorted pure points    
 | 
			
		||||
  std::vector<cv::Point2f> und_v;
 | 
			
		||||
  image_handler::undistortPoints(und_pix_v,
 | 
			
		||||
  image_handler::undistortPoints(anchorPatch_real,
 | 
			
		||||
                                cam.intrinsics,
 | 
			
		||||
                                cam.distortion_model,
 | 
			
		||||
                                0,
 | 
			
		||||
                                und_v);
 | 
			
		||||
 | 
			
		||||
  //create distorted pixel points
 | 
			
		||||
  anchorPatch_real = image_handler::distortPoints(und_v,
 | 
			
		||||
                                                   cam.intrinsics,
 | 
			
		||||
                                                   cam.distortion_model,
 | 
			
		||||
                                                   cam.distortion_coeffs);
 | 
			
		||||
                                cam.distortion_coeffs,
 | 
			
		||||
                                anchorPatch_ideal);
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
  // save anchor position for later visualisaztion
 | 
			
		||||
@@ -770,26 +973,170 @@ bool Feature::initializeAnchor(const CameraCalibration& cam, int N)
 | 
			
		||||
 | 
			
		||||
  // save true pixel Patch position
 | 
			
		||||
  for(auto point : anchorPatch_real)
 | 
			
		||||
  {
 | 
			
		||||
    if(point.x - n < 0 || point.x + n >= cam.resolution(0) || point.y - n < 0 || point.y + n >= cam.resolution(1))
 | 
			
		||||
      return false;
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  for(auto point : anchorPatch_real)
 | 
			
		||||
    anchorPatch.push_back(PixelIrradiance(point, anchorImage));
 | 
			
		||||
 | 
			
		||||
  // project patch pixel to 3D space in camera  coordinate system
 | 
			
		||||
  for(auto point : und_v)
 | 
			
		||||
  {
 | 
			
		||||
    anchorPatch_ideal.push_back(point);
 | 
			
		||||
    anchorPatch_3d.push_back(projectPixelToPosition(point, cam));
 | 
			
		||||
  }
 | 
			
		||||
  for(auto point : anchorPatch_ideal)
 | 
			
		||||
    anchorPatch_3d.push_back(AnchorPixelToPosition(point, cam));
 | 
			
		||||
 | 
			
		||||
  is_anchored = true;
 | 
			
		||||
  return true;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
bool Feature::initializeRho(const CamStateServer& cam_states) {
 | 
			
		||||
 | 
			
		||||
  // Organize camera poses and feature observations properly.
 | 
			
		||||
  std::vector<Eigen::Isometry3d,
 | 
			
		||||
    Eigen::aligned_allocator<Eigen::Isometry3d> > cam_poses(0);
 | 
			
		||||
  std::vector<Eigen::Vector2d,
 | 
			
		||||
    Eigen::aligned_allocator<Eigen::Vector2d> > measurements(0);
 | 
			
		||||
 | 
			
		||||
  for (auto& m : observations) {
 | 
			
		||||
    auto cam_state_iter = cam_states.find(m.first);
 | 
			
		||||
    if (cam_state_iter == cam_states.end()) continue;
 | 
			
		||||
 | 
			
		||||
    // Add the measurement.
 | 
			
		||||
    measurements.push_back(m.second.head<2>());
 | 
			
		||||
    measurements.push_back(m.second.tail<2>());
 | 
			
		||||
 | 
			
		||||
    // This camera pose will take a vector from this camera frame
 | 
			
		||||
    // to the world frame.
 | 
			
		||||
    Eigen::Isometry3d cam0_pose;
 | 
			
		||||
    cam0_pose.linear() = quaternionToRotation(
 | 
			
		||||
        cam_state_iter->second.orientation).transpose();
 | 
			
		||||
    cam0_pose.translation() = cam_state_iter->second.position;
 | 
			
		||||
 | 
			
		||||
    Eigen::Isometry3d cam1_pose;
 | 
			
		||||
    cam1_pose = cam0_pose * CAMState::T_cam0_cam1.inverse();
 | 
			
		||||
 | 
			
		||||
    cam_poses.push_back(cam0_pose);
 | 
			
		||||
    cam_poses.push_back(cam1_pose);
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  // All camera poses should be modified such that it takes a
 | 
			
		||||
  // vector from the first camera frame in the buffer to this
 | 
			
		||||
  // camera frame.
 | 
			
		||||
  Eigen::Isometry3d T_c0_w = cam_poses[0];
 | 
			
		||||
  T_anchor_w = T_c0_w;
 | 
			
		||||
  for (auto& pose : cam_poses)
 | 
			
		||||
    pose = pose.inverse() * T_c0_w;
 | 
			
		||||
 | 
			
		||||
  // Generate initial guess
 | 
			
		||||
  double initial_depth = 0;
 | 
			
		||||
  initial_depth = generateInitialDepth(cam_poses[cam_poses.size()-1], measurements[0],
 | 
			
		||||
      measurements[measurements.size()-1]);
 | 
			
		||||
 | 
			
		||||
  double solution = 1.0/initial_depth;
 | 
			
		||||
 | 
			
		||||
  // Apply Levenberg-Marquart method to solve for the 3d position.
 | 
			
		||||
  double lambda = optimization_config.initial_damping;
 | 
			
		||||
  int inner_loop_cntr = 0;
 | 
			
		||||
  int outer_loop_cntr = 0;
 | 
			
		||||
  bool is_cost_reduced = false;
 | 
			
		||||
  double delta_norm = 0;
 | 
			
		||||
 | 
			
		||||
  // Compute the initial cost.
 | 
			
		||||
  double total_cost = 0.0;
 | 
			
		||||
  for (int i = 0; i < cam_poses.size(); ++i) {
 | 
			
		||||
    double this_cost = 0.0;
 | 
			
		||||
    Rhocost(cam_poses[i], solution, measurements[0], measurements[i], this_cost);
 | 
			
		||||
    total_cost += this_cost;
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  // Outer loop.
 | 
			
		||||
  do {
 | 
			
		||||
    Eigen::Matrix<double, 1, 1> A = Eigen::Matrix<double, 1, 1>::Zero();
 | 
			
		||||
    Eigen::Matrix<double, 1, 1>  b = Eigen::Matrix<double, 1, 1>::Zero();
 | 
			
		||||
 | 
			
		||||
    for (int i = 0; i < cam_poses.size(); ++i) {
 | 
			
		||||
      Eigen::Matrix<double, 2, 1> J;
 | 
			
		||||
      Eigen::Vector2d r;
 | 
			
		||||
      double w;
 | 
			
		||||
 | 
			
		||||
      RhoJacobian(cam_poses[i], solution, measurements[0], measurements[i], J, r, w);
 | 
			
		||||
 | 
			
		||||
      if (w == 1) {
 | 
			
		||||
        A += J.transpose() * J;
 | 
			
		||||
        b += J.transpose() * r;
 | 
			
		||||
      } else {
 | 
			
		||||
        double w_square = w * w;
 | 
			
		||||
        A += w_square * J.transpose() * J;
 | 
			
		||||
        b += w_square * J.transpose() * r;
 | 
			
		||||
      }
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    // Inner loop.
 | 
			
		||||
    // Solve for the delta that can reduce the total cost.
 | 
			
		||||
    do {
 | 
			
		||||
      Eigen::Matrix<double, 1, 1> damper = lambda*Eigen::Matrix<double, 1, 1>::Identity();
 | 
			
		||||
      Eigen::Matrix<double, 1, 1> delta = (A+damper).ldlt().solve(b);
 | 
			
		||||
      double new_solution = solution - delta(0,0);
 | 
			
		||||
      delta_norm = delta.norm();
 | 
			
		||||
 | 
			
		||||
      double new_cost = 0.0;
 | 
			
		||||
      for (int i = 0; i < cam_poses.size(); ++i) {
 | 
			
		||||
        double this_cost = 0.0;
 | 
			
		||||
        Rhocost(cam_poses[i], new_solution, measurements[0], measurements[i], this_cost);
 | 
			
		||||
        new_cost += this_cost;
 | 
			
		||||
      }
 | 
			
		||||
 | 
			
		||||
      if (new_cost < total_cost) {
 | 
			
		||||
        is_cost_reduced = true;
 | 
			
		||||
        solution = new_solution;
 | 
			
		||||
        total_cost = new_cost;
 | 
			
		||||
        lambda = lambda/10 > 1e-10 ? lambda/10 : 1e-10;
 | 
			
		||||
      } else {
 | 
			
		||||
        is_cost_reduced = false;
 | 
			
		||||
        lambda = lambda*10 < 1e12 ? lambda*10 : 1e12;
 | 
			
		||||
      }
 | 
			
		||||
 | 
			
		||||
    } while (inner_loop_cntr++ <
 | 
			
		||||
        optimization_config.inner_loop_max_iteration && !is_cost_reduced);
 | 
			
		||||
 | 
			
		||||
    inner_loop_cntr = 0;
 | 
			
		||||
 | 
			
		||||
  } while (outer_loop_cntr++ <
 | 
			
		||||
      optimization_config.outer_loop_max_iteration &&
 | 
			
		||||
      delta_norm > optimization_config.estimation_precision);
 | 
			
		||||
 | 
			
		||||
  // Covert the feature position from inverse depth
 | 
			
		||||
  // representation to its 3d coordinate.
 | 
			
		||||
  Eigen::Vector3d final_position(measurements[0](0)/solution,
 | 
			
		||||
      measurements[0](1)/solution, 1.0/solution);
 | 
			
		||||
 | 
			
		||||
  // Check if the solution is valid. Make sure the feature
 | 
			
		||||
  // is in front of every camera frame observing it.
 | 
			
		||||
  bool is_valid_solution = true;
 | 
			
		||||
  for (const auto& pose : cam_poses) {
 | 
			
		||||
    Eigen::Vector3d position =
 | 
			
		||||
      pose.linear()*final_position + pose.translation();
 | 
			
		||||
    if (position(2) <= 0) {
 | 
			
		||||
      is_valid_solution = false;
 | 
			
		||||
      break;
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  //save inverse depth distance from camera
 | 
			
		||||
  anchor_rho = solution;
 | 
			
		||||
 | 
			
		||||
  // Convert the feature position to the world frame.
 | 
			
		||||
  position = T_c0_w.linear()*final_position + T_c0_w.translation();
 | 
			
		||||
 | 
			
		||||
  if (is_valid_solution)
 | 
			
		||||
    is_initialized = true;
 | 
			
		||||
 | 
			
		||||
  return is_valid_solution;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
bool Feature::initializePosition(const CamStateServer& cam_states) {
 | 
			
		||||
 | 
			
		||||
  // Organize camera poses and feature observations properly.
 | 
			
		||||
 | 
			
		||||
  std::vector<Eigen::Isometry3d,
 | 
			
		||||
    Eigen::aligned_allocator<Eigen::Isometry3d> > cam_poses(0);
 | 
			
		||||
  std::vector<Eigen::Vector2d,
 | 
			
		||||
@@ -927,6 +1274,7 @@ bool Feature::initializePosition(const CamStateServer& cam_states) {
 | 
			
		||||
 | 
			
		||||
  //save inverse depth distance from camera
 | 
			
		||||
  anchor_rho = solution(2);
 | 
			
		||||
  std::cout << "from feature: " << anchor_rho << std::endl;
 | 
			
		||||
 | 
			
		||||
  // Convert the feature position to the world frame.
 | 
			
		||||
  position = T_c0_w.linear()*final_position + T_c0_w.translation();
 | 
			
		||||
 
 | 
			
		||||
@@ -36,6 +36,15 @@ cv::Point2f distortPoint(
 | 
			
		||||
    const cv::Vec4d& intrinsics,
 | 
			
		||||
    const std::string& distortion_model,
 | 
			
		||||
    const cv::Vec4d& distortion_coeffs);
 | 
			
		||||
 | 
			
		||||
void undistortPoint(
 | 
			
		||||
    const cv::Point2f& pt_in,
 | 
			
		||||
    const cv::Vec4d& intrinsics,
 | 
			
		||||
    const std::string& distortion_model,
 | 
			
		||||
    const cv::Vec4d& distortion_coeffs,
 | 
			
		||||
    cv::Point2f& pt_out,
 | 
			
		||||
    const cv::Matx33d &rectification_matrix = cv::Matx33d::eye(),
 | 
			
		||||
    const cv::Vec4d &new_intrinsics = cv::Vec4d(1,1,0,0));
 | 
			
		||||
}
 | 
			
		||||
}
 | 
			
		||||
#endif
 | 
			
		||||
 
 | 
			
		||||
@@ -195,9 +195,9 @@ class MsckfVio {
 | 
			
		||||
    // for a single feature observed at a single camera frame.
 | 
			
		||||
    void measurementJacobian(const StateIDType& cam_state_id,
 | 
			
		||||
        const FeatureIDType& feature_id,
 | 
			
		||||
        Eigen::Matrix<double, 4, 6>& H_x,
 | 
			
		||||
        Eigen::Matrix<double, 4, 3>& H_f,
 | 
			
		||||
        Eigen::Vector4d& r);
 | 
			
		||||
        Eigen::Matrix<double, 2, 6>& H_x,
 | 
			
		||||
        Eigen::Matrix<double, 2, 3>& H_f,
 | 
			
		||||
        Eigen::Vector2d& r);
 | 
			
		||||
    // This function computes the Jacobian of all measurements viewed
 | 
			
		||||
    // in the given camera states of this feature.
 | 
			
		||||
    void featureJacobian(const FeatureIDType& feature_id,
 | 
			
		||||
 
 | 
			
		||||
@@ -22,7 +22,7 @@
 | 
			
		||||
      <param name="PHOTOMETRIC" value="true"/>
 | 
			
		||||
 | 
			
		||||
      <!-- Debugging Flaggs -->
 | 
			
		||||
      <param name="PrintImages" value="false"/>
 | 
			
		||||
      <param name="PrintImages" value="true"/>
 | 
			
		||||
      <param name="GroundTruth" value="false"/>
 | 
			
		||||
 | 
			
		||||
      <param name="patch_size_n" value="7"/>
 | 
			
		||||
 
 | 
			
		||||
@@ -21,10 +21,10 @@
 | 
			
		||||
      <param name="PHOTOMETRIC" value="true"/>
 | 
			
		||||
 | 
			
		||||
      <!-- Debugging Flaggs -->
 | 
			
		||||
      <param name="PrintImages" value="false"/>
 | 
			
		||||
      <param name="PrintImages" value="true"/>
 | 
			
		||||
      <param name="GroundTruth" value="false"/>
 | 
			
		||||
 | 
			
		||||
      <param name="patch_size_n" value="7"/>
 | 
			
		||||
      <param name="patch_size_n" value="1"/>
 | 
			
		||||
      <!-- Calibration parameters -->
 | 
			
		||||
      <rosparam command="load" file="$(arg calibration_file)"/>
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
@@ -14,6 +14,48 @@ namespace msckf_vio {
 | 
			
		||||
namespace image_handler {
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
void undistortPoint(
 | 
			
		||||
    const cv::Point2f& pt_in,
 | 
			
		||||
    const cv::Vec4d& intrinsics,
 | 
			
		||||
    const std::string& distortion_model,
 | 
			
		||||
    const cv::Vec4d& distortion_coeffs,
 | 
			
		||||
    cv::Point2f& pt_out,
 | 
			
		||||
    const cv::Matx33d &rectification_matrix,
 | 
			
		||||
    const cv::Vec4d &new_intrinsics) {
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
  std::vector<cv::Point2f> pts_in;
 | 
			
		||||
  std::vector<cv::Point2f> pts_out;
 | 
			
		||||
  pts_in.push_back(pt_in);
 | 
			
		||||
  if (pts_in.size() == 0) return;
 | 
			
		||||
 | 
			
		||||
  const cv::Matx33d K(
 | 
			
		||||
      intrinsics[0], 0.0, intrinsics[2],
 | 
			
		||||
      0.0, intrinsics[1], intrinsics[3],
 | 
			
		||||
      0.0, 0.0, 1.0);
 | 
			
		||||
 | 
			
		||||
  const cv::Matx33d K_new(
 | 
			
		||||
      new_intrinsics[0], 0.0, new_intrinsics[2],
 | 
			
		||||
      0.0, new_intrinsics[1], new_intrinsics[3],
 | 
			
		||||
      0.0, 0.0, 1.0);
 | 
			
		||||
 | 
			
		||||
  if (distortion_model == "radtan") {
 | 
			
		||||
    cv::undistortPoints(pts_in, pts_out, K, distortion_coeffs,
 | 
			
		||||
                        rectification_matrix, K_new);
 | 
			
		||||
  } else if (distortion_model == "equidistant") {
 | 
			
		||||
    cv::fisheye::undistortPoints(pts_in, pts_out, K, distortion_coeffs,
 | 
			
		||||
                                 rectification_matrix, K_new);
 | 
			
		||||
  } else {
 | 
			
		||||
    ROS_WARN_ONCE("The model %s is unrecognized, use radtan instead...",
 | 
			
		||||
                  distortion_model.c_str());
 | 
			
		||||
    cv::undistortPoints(pts_in, pts_out, K, distortion_coeffs,
 | 
			
		||||
                        rectification_matrix, K_new);
 | 
			
		||||
  }
 | 
			
		||||
  pt_out = pts_out[0];
 | 
			
		||||
  return;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void undistortPoints(
 | 
			
		||||
    const std::vector<cv::Point2f>& pts_in,
 | 
			
		||||
    const cv::Vec4d& intrinsics,
 | 
			
		||||
 
 | 
			
		||||
@@ -404,8 +404,18 @@ void MsckfVio::imageCallback(
 | 
			
		||||
    const sensor_msgs::ImageConstPtr& cam1_img,
 | 
			
		||||
    const CameraMeasurementConstPtr& feature_msg)
 | 
			
		||||
{
 | 
			
		||||
 | 
			
		||||
  if(PRINTIMAGES)
 | 
			
		||||
  {
 | 
			
		||||
    std::cout << "stopped playpack" << std::endl;
 | 
			
		||||
    nh.setParam("/play_bag", false);
 | 
			
		||||
  }
 | 
			
		||||
  // Return if the gravity vector has not been set.
 | 
			
		||||
  if (!is_gravity_set) return;
 | 
			
		||||
  if (!is_gravity_set) 
 | 
			
		||||
  {
 | 
			
		||||
    nh.setParam("/play_bag", true);
 | 
			
		||||
    return;
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  // Start the system if the first image is received.
 | 
			
		||||
  // The frame where the first image is received will be
 | 
			
		||||
@@ -430,7 +440,7 @@ void MsckfVio::imageCallback(
 | 
			
		||||
  
 | 
			
		||||
  //if(ErrState)
 | 
			
		||||
  //{
 | 
			
		||||
    batchTruthProcessing(feature_msg->header.stamp.toSec());
 | 
			
		||||
  //batchTruthProcessing(feature_msg->header.stamp.toSec());
 | 
			
		||||
    
 | 
			
		||||
    if(GROUNDTRUTH)
 | 
			
		||||
    {
 | 
			
		||||
@@ -454,7 +464,7 @@ void MsckfVio::imageCallback(
 | 
			
		||||
    PhotometricStateAugmentation(feature_msg->header.stamp.toSec());
 | 
			
		||||
  }
 | 
			
		||||
  else
 | 
			
		||||
    stateAugmentation(feature_msg->header.stamp.toSec());
 | 
			
		||||
    PhotometricStateAugmentation(feature_msg->header.stamp.toSec());
 | 
			
		||||
  double state_augmentation_time = (
 | 
			
		||||
      ros::Time::now()-start_time).toSec();
 | 
			
		||||
 | 
			
		||||
@@ -512,6 +522,13 @@ void MsckfVio::imageCallback(
 | 
			
		||||
        publish_time, publish_time/processing_time);
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
  if(PRINTIMAGES)
 | 
			
		||||
  {
 | 
			
		||||
    std::cout << "stopped playpack" << std::endl;
 | 
			
		||||
    nh.setParam("/play_bag", true);
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  return;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
@@ -1175,7 +1192,7 @@ void MsckfVio::PhotometricStateAugmentation(const double& time)
 | 
			
		||||
    J * P11 * J.transpose();
 | 
			
		||||
 | 
			
		||||
  // Add photometry P_eta and surrounding Zeros
 | 
			
		||||
  state_server.state_cov(old_rows+6, old_cols+6) = irradiance_frame_bias;
 | 
			
		||||
  state_server.state_cov(old_rows+6, old_cols+6) = 0;
 | 
			
		||||
  
 | 
			
		||||
  // Fix the covariance to be symmetric
 | 
			
		||||
  MatrixXd state_cov_fixed = (state_server.state_cov +
 | 
			
		||||
@@ -1232,17 +1249,10 @@ void MsckfVio::PhotometricMeasurementJacobian(
 | 
			
		||||
  Matrix3d R_w_c0 = quaternionToRotation(cam_state.orientation);
 | 
			
		||||
  const Vector3d& t_c0_w = cam_state.position;
 | 
			
		||||
 | 
			
		||||
  // Cam1 pose.
 | 
			
		||||
  Matrix3d R_c0_c1 = CAMState::T_cam0_cam1.linear();
 | 
			
		||||
  Matrix3d R_w_c1 = CAMState::T_cam0_cam1.linear() * R_w_c0;
 | 
			
		||||
  Vector3d t_c1_w = t_c0_w - R_w_c1.transpose()*CAMState::T_cam0_cam1.translation();
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
  //photometric observation
 | 
			
		||||
  std::vector<double> photo_z;
 | 
			
		||||
 | 
			
		||||
  // individual Jacobians
 | 
			
		||||
  Matrix<double, 1, 2> dI_dhj = Matrix<double, 1, 2>::Zero();
 | 
			
		||||
  Matrix<double, 2, 3> dh_dCpij = Matrix<double, 2, 3>::Zero();
 | 
			
		||||
  Matrix<double, 2, 3> dh_dGpij = Matrix<double, 2, 3>::Zero();
 | 
			
		||||
  Matrix<double, 2, 6> dh_dXplj = Matrix<double, 2, 6>::Zero();
 | 
			
		||||
@@ -1254,107 +1264,77 @@ void MsckfVio::PhotometricMeasurementJacobian(
 | 
			
		||||
  Matrix<double, 3, 3> dCpij_dGpC = Matrix<double, 3, 3>::Zero();
 | 
			
		||||
 | 
			
		||||
  // one line of the NxN Jacobians
 | 
			
		||||
  Eigen::Matrix<double, 1, 1> H_rhoj;
 | 
			
		||||
  Eigen::Matrix<double, 1, 6> H_plj;
 | 
			
		||||
  Eigen::Matrix<double, 1, 6> H_pAj;
 | 
			
		||||
 | 
			
		||||
  // combined Jacobians
 | 
			
		||||
  Eigen::MatrixXd H_rho(N*N, 1);
 | 
			
		||||
  Eigen::MatrixXd H_pl(N*N, 6);
 | 
			
		||||
  Eigen::MatrixXd H_pA(N*N, 6);
 | 
			
		||||
  Eigen::Matrix<double, 2, 1> H_rho;
 | 
			
		||||
  Eigen::Matrix<double, 2, 6> H_plj;
 | 
			
		||||
  Eigen::Matrix<double, 2, 6> H_pAj;
 | 
			
		||||
 | 
			
		||||
  auto frame = cam0.moving_window.find(cam_state_id)->second.image;
 | 
			
		||||
 | 
			
		||||
  int count = 0;
 | 
			
		||||
  double dx, dy;
 | 
			
		||||
 | 
			
		||||
  for (auto point : feature.anchorPatch_3d)
 | 
			
		||||
  {
 | 
			
		||||
    Eigen::Vector3d p_c0 = R_w_c0 * (point-t_c0_w);
 | 
			
		||||
    cv::Point2f p_in_c0 = feature.projectPositionToCamera(cam_state, cam_state_id, cam0, point);
 | 
			
		||||
  auto point = feature.anchorPatch_3d[0];
 | 
			
		||||
 | 
			
		||||
    //add observation
 | 
			
		||||
    photo_z.push_back(feature.PixelIrradiance(p_in_c0, frame));
 | 
			
		||||
  Eigen::Vector3d p_c0 = R_w_c0 * (point-t_c0_w);
 | 
			
		||||
  // add jacobian
 | 
			
		||||
  
 | 
			
		||||
  //dh / d{}^Cp_{ij}
 | 
			
		||||
  dh_dCpij(0, 0) = 1 / p_c0(2);
 | 
			
		||||
  dh_dCpij(1, 1) = 1 / p_c0(2);
 | 
			
		||||
  dh_dCpij(0, 2) = -(p_c0(0))/(p_c0(2)*p_c0(2));
 | 
			
		||||
  dh_dCpij(1, 2) = -(p_c0(1))/(p_c0(2)*p_c0(2));
 | 
			
		||||
 | 
			
		||||
    // add jacobian
 | 
			
		||||
  dCpij_dGpij = quaternionToRotation(cam_state.orientation);
 | 
			
		||||
 | 
			
		||||
    // frame derivative calculated convoluting with kernel [-1, 0, 1]
 | 
			
		||||
    dx = feature.PixelIrradiance(cv::Point2f(p_in_c0.x+1, p_in_c0.y), frame) - feature.PixelIrradiance(cv::Point2f(p_in_c0.x-1, p_in_c0.y), frame);
 | 
			
		||||
    dy = feature.PixelIrradiance(cv::Point2f(p_in_c0.x, p_in_c0.y+1), frame) - feature.PixelIrradiance(cv::Point2f(p_in_c0.x, p_in_c0.y-1), frame);
 | 
			
		||||
    dI_dhj(0, 0) = dx;
 | 
			
		||||
    dI_dhj(0, 1) = dy;
 | 
			
		||||
    
 | 
			
		||||
    //dh / d{}^Cp_{ij}
 | 
			
		||||
    dh_dCpij(0, 0) = 1 / p_c0(2);
 | 
			
		||||
    dh_dCpij(1, 1) = 1 / p_c0(2);
 | 
			
		||||
    dh_dCpij(0, 2) = -(p_c0(0))/(p_c0(2)*p_c0(2));
 | 
			
		||||
    dh_dCpij(1, 2) = -(p_c0(1))/(p_c0(2)*p_c0(2));
 | 
			
		||||
  //orientation takes camera frame to world frame
 | 
			
		||||
  dh_dGpij = dh_dCpij * dCpij_dGpij;
 | 
			
		||||
 | 
			
		||||
    dCpij_dGpij = quaternionToRotation(cam_state.orientation);
 | 
			
		||||
  //dh / d X_{pl}
 | 
			
		||||
  dCpij_dCGtheta = skewSymmetric(p_c0);
 | 
			
		||||
  dCpij_dGpC = -quaternionToRotation(cam_state.orientation);
 | 
			
		||||
  dh_dXplj.block<2, 3>(0, 0) = dh_dCpij * dCpij_dCGtheta;
 | 
			
		||||
  dh_dXplj.block<2, 3>(0, 3) = dh_dCpij * dCpij_dGpC;
 | 
			
		||||
 | 
			
		||||
    //orientation takes camera frame to world frame, we wa
 | 
			
		||||
    dh_dGpij = dh_dCpij * dCpij_dGpij;
 | 
			
		||||
  //d{}^Gp_P{ij} / \rho_i
 | 
			
		||||
  double rho = feature.anchor_rho;
 | 
			
		||||
  // Isometry T_anchor_w takes a vector in anchor frame to world frame
 | 
			
		||||
  dGpj_drhoj = -feature.T_anchor_w.linear() * Eigen::Vector3d(feature.anchorPatch_ideal[count].x/(rho*rho), feature.anchorPatch_ideal[count].y/(rho*rho), 1/(rho*rho));
 | 
			
		||||
 | 
			
		||||
    //dh / d X_{pl}
 | 
			
		||||
    dCpij_dCGtheta = skewSymmetric(p_c0);
 | 
			
		||||
    dCpij_dGpC = -quaternionToRotation(cam_state.orientation);
 | 
			
		||||
    dh_dXplj.block<2, 3>(0, 0) = dh_dCpij * dCpij_dCGtheta;
 | 
			
		||||
    dh_dXplj.block<2, 3>(0, 3) = dh_dCpij * dCpij_dGpC;
 | 
			
		||||
 | 
			
		||||
    //d{}^Gp_P{ij} / \rho_i
 | 
			
		||||
    double rho = feature.anchor_rho;
 | 
			
		||||
    // Isometry T_anchor_w takes a vector in anchor frame to world frame
 | 
			
		||||
    dGpj_drhoj = -feature.T_anchor_w.linear() * Eigen::Vector3d(feature.anchorPatch_ideal[count].x/(rho*rho), feature.anchorPatch_ideal[count].y/(rho*rho), 1/(rho*rho));
 | 
			
		||||
 | 
			
		||||
    dGpj_XpAj.block<3, 3>(0, 0) = - feature.T_anchor_w.linear()
 | 
			
		||||
                                 * skewSymmetric(Eigen::Vector3d(feature.anchorPatch_ideal[count].x/(rho), 
 | 
			
		||||
                                                   feature.anchorPatch_ideal[count].y/(rho), 
 | 
			
		||||
                                                   1/(rho)));
 | 
			
		||||
    dGpj_XpAj.block<3, 3>(0, 3) = Matrix<double, 3, 3>::Identity();
 | 
			
		||||
  // alternative derivation towards feature
 | 
			
		||||
  Matrix3d dCpc0_dpg = R_w_c0;
 | 
			
		||||
  dGpj_XpAj.block<3, 3>(0, 0) = - feature.T_anchor_w.linear()
 | 
			
		||||
                               * skewSymmetric(Eigen::Vector3d(feature.anchorPatch_ideal[count].x/(rho), 
 | 
			
		||||
                                                 feature.anchorPatch_ideal[count].y/(rho), 
 | 
			
		||||
                                                 1/(rho)));
 | 
			
		||||
  dGpj_XpAj.block<3, 3>(0, 3) = Matrix<double, 3, 3>::Identity();
 | 
			
		||||
 | 
			
		||||
    // Intermediate Jakobians
 | 
			
		||||
    H_rhoj = dI_dhj * dh_dGpij * dGpj_drhoj; // 1 x 1
 | 
			
		||||
    H_plj = dI_dhj * dh_dXplj; // 1 x 6
 | 
			
		||||
    H_pAj = dI_dhj * dh_dGpij * dGpj_XpAj; // 1 x 6
 | 
			
		||||
  // Intermediate Jakobians
 | 
			
		||||
  H_rho = dh_dGpij * dGpj_drhoj; // 2 x 1
 | 
			
		||||
  H_plj = dh_dXplj; // 2 x 6
 | 
			
		||||
  H_pAj = dh_dGpij * dGpj_XpAj; // 2 x 6
 | 
			
		||||
 | 
			
		||||
    H_rho.block<1, 1>(count, 0) = H_rhoj;
 | 
			
		||||
    H_pl.block<1, 6>(count, 0) = H_plj;
 | 
			
		||||
    H_pA.block<1, 6>(count, 0) = H_pAj;
 | 
			
		||||
 | 
			
		||||
    count++;
 | 
			
		||||
  }
 | 
			
		||||
  // calculate residual
 | 
			
		||||
 | 
			
		||||
  //observation
 | 
			
		||||
  const Vector4d& z = feature.observations.find(cam_state_id)->second;
 | 
			
		||||
 | 
			
		||||
  //estimate photometric measurement
 | 
			
		||||
  std::vector<double> estimate_irradiance;
 | 
			
		||||
  std::vector<double> estimate_photo_z;
 | 
			
		||||
  IlluminationParameter estimated_illumination;
 | 
			
		||||
  feature.estimate_FrameIrradiance(cam_state, cam_state_id, cam0, estimate_irradiance, estimated_illumination);
 | 
			
		||||
  const Vector4d& total_z = feature.observations.find(cam_state_id)->second;
 | 
			
		||||
  const Vector2d z = Vector2d(total_z[0], total_z[1]);
 | 
			
		||||
 
 | 
			
		||||
  VectorXd r_i = VectorXd::Zero(2);
 | 
			
		||||
  
 | 
			
		||||
  // calculated here, because we need true 'estimate_irradiance' later for jacobi
 | 
			
		||||
  for (auto& estimate_irradiance_j : estimate_irradiance)
 | 
			
		||||
            estimate_photo_z.push_back (estimate_irradiance_j * 
 | 
			
		||||
                    estimated_illumination.frame_gain * estimated_illumination.feature_gain +
 | 
			
		||||
                    estimated_illumination.frame_bias + estimated_illumination.feature_bias);
 | 
			
		||||
  //calculate residual
 | 
			
		||||
 | 
			
		||||
  std::vector<double> photo_r;
 | 
			
		||||
  
 | 
			
		||||
  //calculate photom. residual
 | 
			
		||||
  for(int i = 0; i < photo_z.size(); i++)
 | 
			
		||||
    photo_r.push_back(photo_z[i] - estimate_photo_z[i]);
 | 
			
		||||
 | 
			
		||||
  MatrixXd H_xl = MatrixXd::Zero(N*N, 21+state_server.cam_states.size()*7);
 | 
			
		||||
  MatrixXd H_yl = MatrixXd::Zero(N*N, N*N+state_server.cam_states.size()+1);
 | 
			
		||||
  r_i[0] = z[0] - p_c0(0)/p_c0(2);
 | 
			
		||||
  r_i[1] = z[1] - p_c0(1)/p_c0(2);
 | 
			
		||||
 | 
			
		||||
  MatrixXd H_xl = MatrixXd::Zero(2, 21+state_server.cam_states.size()*7);
 | 
			
		||||
 
 | 
			
		||||
  // set anchor Jakobi
 | 
			
		||||
    // get position of anchor in cam states
 | 
			
		||||
 | 
			
		||||
  auto cam_state_anchor = state_server.cam_states.find(feature.observations.begin()->first);
 | 
			
		||||
  int cam_state_cntr_anchor = std::distance(state_server.cam_states.begin(), cam_state_anchor);
 | 
			
		||||
  H_xl.block(0, 21+cam_state_cntr_anchor*7, N*N, 6) = -H_pA; 
 | 
			
		||||
  H_xl.block(0, 21+cam_state_cntr_anchor*7, 2, 6) = H_pAj; 
 | 
			
		||||
 | 
			
		||||
  // set frame Jakobi
 | 
			
		||||
    //get position of current frame in cam states
 | 
			
		||||
@@ -1362,32 +1342,19 @@ void MsckfVio::PhotometricMeasurementJacobian(
 | 
			
		||||
  int cam_state_cntr = std::distance(state_server.cam_states.begin(), cam_state_iter);
 | 
			
		||||
  
 | 
			
		||||
    // set jakobi of state
 | 
			
		||||
  H_xl.block(0, 21+cam_state_cntr*7, N*N, 6) = -H_pl;
 | 
			
		||||
 | 
			
		||||
    // set ones for irradiance bias
 | 
			
		||||
  H_xl.block(0, 21+cam_state_cntr*7+6, N*N, 1) = Eigen::ArrayXd::Ones(N*N);
 | 
			
		||||
 | 
			
		||||
  // set irradiance error Block
 | 
			
		||||
  H_yl.block(0, 0,N*N, N*N) = estimated_illumination.feature_gain * estimated_illumination.frame_gain * Eigen::MatrixXd::Identity(N*N, N*N);
 | 
			
		||||
  
 | 
			
		||||
  // TODO make this calculation more fluent
 | 
			
		||||
  for(int i = 0; i< N*N; i++)
 | 
			
		||||
    H_yl(i, N*N+cam_state_cntr) = estimate_irradiance[i];
 | 
			
		||||
  H_yl.block(0, N*N+state_server.cam_states.size(), N*N, 1) = -H_rho;
 | 
			
		||||
  H_xl.block(0, 21+cam_state_cntr*7, 2, 6) = H_plj;
 | 
			
		||||
 | 
			
		||||
  H_x = H_xl;
 | 
			
		||||
  H_y = H_yl;
 | 
			
		||||
  H_y = H_rho;
 | 
			
		||||
  r = r_i;
 | 
			
		||||
 | 
			
		||||
  //TODO make this more fluent as well
 | 
			
		||||
  count = 0; 
 | 
			
		||||
  for(auto data : photo_r)
 | 
			
		||||
    r[count++] = data;
 | 
			
		||||
  std::stringstream ss;
 | 
			
		||||
  ss << "INFO:" << " anchor: " << cam_state_cntr_anchor << "  frame: " << cam_state_cntr;
 | 
			
		||||
  if(PRINTIMAGES)
 | 
			
		||||
  {  
 | 
			
		||||
    feature.MarkerGeneration(marker_pub, state_server.cam_states);
 | 
			
		||||
    feature.VisualizePatch(cam_state, cam_state_id, cam0, photo_r, ss);
 | 
			
		||||
    //std::stringstream ss;
 | 
			
		||||
    //ss << "INFO:" << " anchor: " << cam_state_cntr_anchor << "  frame: " << cam_state_cntr;
 | 
			
		||||
    //feature.MarkerGeneration(marker_pub, state_server.cam_states);
 | 
			
		||||
    //feature.VisualizePatch(cam_state, cam_state_id, cam0, photo_r, ss);
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  return;
 | 
			
		||||
@@ -1400,28 +1367,6 @@ void MsckfVio::PhotometricFeatureJacobian(
 | 
			
		||||
{
 | 
			
		||||
 | 
			
		||||
  // stop playing bagfile if printing images
 | 
			
		||||
  if(PRINTIMAGES)
 | 
			
		||||
  {
 | 
			
		||||
    std::cout << "stopped playpack" << std::endl;
 | 
			
		||||
    nh.setParam("/play_bag", false);
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
  // Errstate
 | 
			
		||||
  calcErrorState();
 | 
			
		||||
  /*
 | 
			
		||||
  std::cout << "--- error state: ---\n " << std::endl;
 | 
			
		||||
  std::cout << "imu orientation:\n " << err_state_server.imu_state.orientation << std::endl;
 | 
			
		||||
  std::cout << "imu position\n" << err_state_server.imu_state.position << std::endl;
 | 
			
		||||
  
 | 
			
		||||
  int count = 0;
 | 
			
		||||
  for(auto cam_state : err_state_server.cam_states)
 | 
			
		||||
  {
 | 
			
		||||
    std::cout << " - cam " << count++ << " - \n" << std::endl;
 | 
			
		||||
    std::cout << "orientation: " << cam_state.second.orientation(0) << cam_state.second.orientation(1) << " " << cam_state.second.orientation(2) << " " << std::endl;
 | 
			
		||||
    std::cout << "position:" << cam_state.second.position(0) << " " << cam_state.second.position(1) << " " << cam_state.second.position(2) << std::endl;
 | 
			
		||||
  }
 | 
			
		||||
  */
 | 
			
		||||
 | 
			
		||||
  const auto& feature = map_server[feature_id];
 | 
			
		||||
  
 | 
			
		||||
@@ -1433,15 +1378,17 @@ void MsckfVio::PhotometricFeatureJacobian(
 | 
			
		||||
    if (feature.observations.find(cam_id) ==
 | 
			
		||||
        feature.observations.end()) continue;
 | 
			
		||||
 | 
			
		||||
    if (feature.observations.find(cam_id) ==
 | 
			
		||||
        feature.observations.begin()) continue;
 | 
			
		||||
    valid_cam_state_ids.push_back(cam_id);
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  int jacobian_row_size = 0;
 | 
			
		||||
  jacobian_row_size = N * N * valid_cam_state_ids.size();
 | 
			
		||||
  jacobian_row_size = 2 * valid_cam_state_ids.size();
 | 
			
		||||
 | 
			
		||||
  MatrixXd H_xi = MatrixXd::Zero(jacobian_row_size,
 | 
			
		||||
      21+state_server.cam_states.size()*7);
 | 
			
		||||
  MatrixXd H_yi = MatrixXd::Zero(jacobian_row_size, N*N+state_server.cam_states.size()+1);
 | 
			
		||||
  MatrixXd H_yi = MatrixXd::Zero(jacobian_row_size, 1);
 | 
			
		||||
  VectorXd r_i = VectorXd::Zero(jacobian_row_size);
 | 
			
		||||
  int stack_cntr = 0;
 | 
			
		||||
 | 
			
		||||
@@ -1449,10 +1396,9 @@ void MsckfVio::PhotometricFeatureJacobian(
 | 
			
		||||
 | 
			
		||||
    MatrixXd H_xl;
 | 
			
		||||
    MatrixXd H_yl;
 | 
			
		||||
    Eigen::VectorXd r_l = VectorXd::Zero(N*N);
 | 
			
		||||
    Eigen::VectorXd r_l = VectorXd::Zero(2);
 | 
			
		||||
 | 
			
		||||
    PhotometricMeasurementJacobian(cam_id, feature.id, H_xl, H_yl, r_l);
 | 
			
		||||
 | 
			
		||||
    auto cam_state_iter = state_server.cam_states.find(cam_id);
 | 
			
		||||
    int cam_state_cntr = std::distance(
 | 
			
		||||
        state_server.cam_states.begin(), cam_state_iter);
 | 
			
		||||
@@ -1460,8 +1406,8 @@ void MsckfVio::PhotometricFeatureJacobian(
 | 
			
		||||
    // Stack the Jacobians.
 | 
			
		||||
    H_xi.block(stack_cntr, 0, H_xl.rows(), H_xl.cols()) = H_xl;
 | 
			
		||||
    H_yi.block(stack_cntr, 0, H_yl.rows(), H_yl.cols()) = H_yl;
 | 
			
		||||
    r_i.segment(stack_cntr, N*N) = r_l;
 | 
			
		||||
    stack_cntr += N*N;
 | 
			
		||||
    r_i.segment(stack_cntr, 2) = r_l;
 | 
			
		||||
    stack_cntr += 2;
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  // Project the residual and Jacobians onto the nullspace
 | 
			
		||||
@@ -1470,32 +1416,47 @@ void MsckfVio::PhotometricFeatureJacobian(
 | 
			
		||||
  // get Nullspace
 | 
			
		||||
  FullPivLU<MatrixXd> lu(H_yi.transpose());
 | 
			
		||||
  MatrixXd A_null_space = lu.kernel();
 | 
			
		||||
  /*
 | 
			
		||||
  JacobiSVD<MatrixXd> svd_helper(H_yi, ComputeFullU | ComputeThinV);
 | 
			
		||||
  
 | 
			
		||||
  int sv_size = 0;
 | 
			
		||||
  Eigen::VectorXd singularValues = svd_helper.singularValues();
 | 
			
		||||
  for(int i = 0; i < singularValues.size(); i++)
 | 
			
		||||
    if(singularValues[i] > 1e-12)
 | 
			
		||||
      sv_size++;
 | 
			
		||||
 | 
			
		||||
  MatrixXd A = svd_helper.matrixU().rightCols(jacobian_row_size - singularValues.size());
 | 
			
		||||
  */
 | 
			
		||||
  H_x = A_null_space.transpose() * H_xi;
 | 
			
		||||
  r = A_null_space.transpose() * r_i;
 | 
			
		||||
 | 
			
		||||
  /*
 | 
			
		||||
  if(PRINTIMAGES)
 | 
			
		||||
  {
 | 
			
		||||
 | 
			
		||||
  ofstream myfile;
 | 
			
		||||
  myfile.open("/home/raphael/dev/octave/log2octave");
 | 
			
		||||
  myfile << "# Created by Octave 3.8.1, Wed Jun 12 14:36:37 2019 CEST <raphael@raphael-desktop>\n"
 | 
			
		||||
         << "# name: Hx\n"
 | 
			
		||||
         << "# type: matrix\n"
 | 
			
		||||
         << "# rows: " << H_xi.rows() << "\n"
 | 
			
		||||
         << "# columns: " << H_xi.cols() << "\n"
 | 
			
		||||
         << H_xi << endl;
 | 
			
		||||
 | 
			
		||||
  myfile << "# name: Hy\n"
 | 
			
		||||
         << "# type: matrix\n"
 | 
			
		||||
         << "# rows: " << H_yi.rows() << "\n"  
 | 
			
		||||
         << "# columns: " << H_yi.cols() << "\n"
 | 
			
		||||
         << H_yi << endl;
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
  myfile << "# name: r\n"
 | 
			
		||||
         << "# type: matrix\n"
 | 
			
		||||
         << "# rows: " << r_i.rows() << "\n"  
 | 
			
		||||
         << "# columns: " << 1 << "\n"
 | 
			
		||||
         << r_i << endl;
 | 
			
		||||
 | 
			
		||||
  myfile.close();
 | 
			
		||||
    std::cout << "resume playback" << std::endl;
 | 
			
		||||
    nh.setParam("/play_bag", true);
 | 
			
		||||
  }
 | 
			
		||||
  }*/
 | 
			
		||||
  return;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void MsckfVio::measurementJacobian(
 | 
			
		||||
    const StateIDType& cam_state_id,
 | 
			
		||||
    const FeatureIDType& feature_id,
 | 
			
		||||
    Matrix<double, 4, 6>& H_x, Matrix<double, 4, 3>& H_f, Vector4d& r)
 | 
			
		||||
    Matrix<double, 2, 6>& H_x, Matrix<double, 2, 3>& H_f, Vector2d& r)
 | 
			
		||||
{
 | 
			
		||||
 | 
			
		||||
  // Prepare all the required data.
 | 
			
		||||
@@ -1514,48 +1475,42 @@ void MsckfVio::measurementJacobian(
 | 
			
		||||
  // 3d feature position in the world frame.
 | 
			
		||||
  // And its observation with the stereo cameras.
 | 
			
		||||
  const Vector3d& p_w = feature.position;
 | 
			
		||||
  const Vector4d& z = feature.observations.find(cam_state_id)->second;
 | 
			
		||||
  const Vector2d& z = feature.observations.find(cam_state_id)->second.topRows(2);
 | 
			
		||||
 | 
			
		||||
  // Convert the feature position from the world frame to
 | 
			
		||||
  // the cam0 and cam1 frame.
 | 
			
		||||
  Vector3d p_c0 = R_w_c0 * (p_w-t_c0_w);
 | 
			
		||||
  Vector3d p_c1 = R_w_c1 * (p_w-t_c1_w);
 | 
			
		||||
  //Vector3d p_c1 = R_w_c1 * (p_w-t_c1_w);
 | 
			
		||||
 | 
			
		||||
  // Compute the Jacobians.
 | 
			
		||||
  Matrix<double, 4, 3> dz_dpc0 = Matrix<double, 4, 3>::Zero();
 | 
			
		||||
  Matrix<double, 2, 3> dz_dpc0 = Matrix<double, 2, 3>::Zero();
 | 
			
		||||
  dz_dpc0(0, 0) = 1 / p_c0(2);
 | 
			
		||||
  dz_dpc0(1, 1) = 1 / p_c0(2);
 | 
			
		||||
  dz_dpc0(0, 2) = -p_c0(0) / (p_c0(2)*p_c0(2));
 | 
			
		||||
  dz_dpc0(1, 2) = -p_c0(1) / (p_c0(2)*p_c0(2));
 | 
			
		||||
 | 
			
		||||
  /*
 | 
			
		||||
  Matrix<double, 4, 3> dz_dpc1 = Matrix<double, 4, 3>::Zero();
 | 
			
		||||
  dz_dpc1(2, 0) = 1 / p_c1(2);
 | 
			
		||||
  dz_dpc1(3, 1) = 1 / p_c1(2);
 | 
			
		||||
  dz_dpc1(2, 2) = -p_c1(0) / (p_c1(2)*p_c1(2));
 | 
			
		||||
  dz_dpc1(3, 2) = -p_c1(1) / (p_c1(2)*p_c1(2));
 | 
			
		||||
 | 
			
		||||
  */
 | 
			
		||||
  Matrix<double, 3, 6> dpc0_dxc = Matrix<double, 3, 6>::Zero();
 | 
			
		||||
  
 | 
			
		||||
  // original jacobi
 | 
			
		||||
  //dpc0_dxc.leftCols(3) = skewSymmetric(p_c0);
 | 
			
		||||
  // my version of calculation
 | 
			
		||||
  dpc0_dxc.leftCols(3) = skewSymmetric(p_c0);
 | 
			
		||||
  //dpc0_dxc.leftCols(3) = - skewSymmetric(R_w_c0.transpose() * (t_c0_w - p_w)) * R_w_c0;
 | 
			
		||||
  dpc0_dxc.rightCols(3) = -R_w_c0;
 | 
			
		||||
 | 
			
		||||
  Matrix<double, 3, 6> dpc1_dxc = Matrix<double, 3, 6>::Zero();
 | 
			
		||||
  dpc1_dxc.leftCols(3) = R_c0_c1 * skewSymmetric(p_c0);
 | 
			
		||||
  dpc1_dxc.rightCols(3) = -R_w_c1;
 | 
			
		||||
 | 
			
		||||
  Matrix3d dpc0_dpg = R_w_c0;
 | 
			
		||||
  Matrix3d dpc1_dpg = R_w_c1;
 | 
			
		||||
 | 
			
		||||
  H_x = dz_dpc0*dpc0_dxc + dz_dpc1*dpc1_dxc;
 | 
			
		||||
  H_f = dz_dpc0*dpc0_dpg + dz_dpc1*dpc1_dpg;
 | 
			
		||||
  H_x = dz_dpc0*dpc0_dxc; //+ dz_dpc1*dpc1_dxc;
 | 
			
		||||
  H_f = dz_dpc0*dpc0_dpg; // + dz_dpc1*dpc1_dpg;
 | 
			
		||||
 | 
			
		||||
  // Compute the residual.
 | 
			
		||||
  r = z - Vector4d(p_c0(0)/p_c0(2), p_c0(1)/p_c0(2),
 | 
			
		||||
      p_c1(0)/p_c1(2), p_c1(1)/p_c1(2));
 | 
			
		||||
  r = z - Vector2d(p_c0(0)/p_c0(2), p_c0(1)/p_c0(2));//,
 | 
			
		||||
        //p_c1(0)/p_c1(2), p_c1(1)/p_c1(2));
 | 
			
		||||
 | 
			
		||||
  return;
 | 
			
		||||
}
 | 
			
		||||
@@ -1579,19 +1534,19 @@ void MsckfVio::featureJacobian(
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  int jacobian_row_size = 0;
 | 
			
		||||
  jacobian_row_size = 4 * valid_cam_state_ids.size();
 | 
			
		||||
  jacobian_row_size = 2 * valid_cam_state_ids.size();
 | 
			
		||||
 | 
			
		||||
  MatrixXd H_xj = MatrixXd::Zero(jacobian_row_size,
 | 
			
		||||
      21+state_server.cam_states.size()*6);
 | 
			
		||||
      21+state_server.cam_states.size()*7);
 | 
			
		||||
  MatrixXd H_fj = MatrixXd::Zero(jacobian_row_size, 3);
 | 
			
		||||
  VectorXd r_j = VectorXd::Zero(jacobian_row_size);
 | 
			
		||||
  int stack_cntr = 0;
 | 
			
		||||
 | 
			
		||||
  for (const auto& cam_id : valid_cam_state_ids) {
 | 
			
		||||
 | 
			
		||||
    Matrix<double, 4, 6> H_xi = Matrix<double, 4, 6>::Zero();
 | 
			
		||||
    Matrix<double, 4, 3> H_fi = Matrix<double, 4, 3>::Zero();
 | 
			
		||||
    Vector4d r_i = Vector4d::Zero();
 | 
			
		||||
    Matrix<double, 2, 6> H_xi = Matrix<double, 2, 6>::Zero();
 | 
			
		||||
    Matrix<double, 2, 3> H_fi = Matrix<double, 2, 3>::Zero();
 | 
			
		||||
    Vector2d r_i = Vector2d::Zero();
 | 
			
		||||
    measurementJacobian(cam_id, feature.id, H_xi, H_fi, r_i);
 | 
			
		||||
 | 
			
		||||
    auto cam_state_iter = state_server.cam_states.find(cam_id);
 | 
			
		||||
@@ -1599,10 +1554,10 @@ void MsckfVio::featureJacobian(
 | 
			
		||||
        state_server.cam_states.begin(), cam_state_iter);
 | 
			
		||||
 | 
			
		||||
    // Stack the Jacobians.
 | 
			
		||||
    H_xj.block<4, 6>(stack_cntr, 21+6*cam_state_cntr) = H_xi;
 | 
			
		||||
    H_fj.block<4, 3>(stack_cntr, 0) = H_fi;
 | 
			
		||||
    r_j.segment<4>(stack_cntr) = r_i;
 | 
			
		||||
    stack_cntr += 4;
 | 
			
		||||
    H_xj.block<2, 6>(stack_cntr, 21+7*cam_state_cntr) = H_xi;
 | 
			
		||||
    H_fj.block<2, 3>(stack_cntr, 0) = H_fi;
 | 
			
		||||
    r_j.segment<2>(stack_cntr) = r_i;
 | 
			
		||||
    stack_cntr += 2;
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  // Project the residual and Jacobians onto the nullspace
 | 
			
		||||
@@ -1623,18 +1578,18 @@ void MsckfVio::featureJacobian(
 | 
			
		||||
  FullPivLU<MatrixXd> lu(H_fj.transpose());
 | 
			
		||||
  MatrixXd A = lu.kernel();
 | 
			
		||||
 | 
			
		||||
   H_x = A.transpose() * H_xj;
 | 
			
		||||
   r = A.transpose() * r_j;
 | 
			
		||||
  H_x = A.transpose() * H_xj;
 | 
			
		||||
  r = A.transpose() * r_j;
 | 
			
		||||
 | 
			
		||||
  /*
 | 
			
		||||
  ofstream myfile;
 | 
			
		||||
  myfile.open ("/home/raphael/dev/MSCKF_ws/log.txt");
 | 
			
		||||
  myfile << "-- residual -- \n" << r << "\n---- H ----\n" << H_x << "\n---- state cov ----\n" << state_server.state_cov <<endl;
 | 
			
		||||
  myfile << "Hx\n" << H_x << "r\n" << r << "from residual estimated error state: " << H_x.colPivHouseholderQr().solve(r) << endl;
 | 
			
		||||
  myfile.close();
 | 
			
		||||
  cout << "---------- LOGGED -------- " << endl; 
 | 
			
		||||
  */
 | 
			
		||||
  nh.setParam("/play_bag", false);
 | 
			
		||||
 | 
			
		||||
  cout << "---------- LOGGED -------- " << endl; 
 | 
			
		||||
  nh.setParam("/play_bag", false);
 | 
			
		||||
  */
 | 
			
		||||
  return;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
@@ -1646,7 +1601,7 @@ void MsckfVio::measurementUpdate(const MatrixXd& H, const VectorXd& r) {
 | 
			
		||||
  // complexity as in Equation (28), (29).
 | 
			
		||||
  MatrixXd H_thin;
 | 
			
		||||
  VectorXd r_thin;
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
  if (H.rows() > H.cols()) {
 | 
			
		||||
    // Convert H to a sparse matrix.
 | 
			
		||||
    SparseMatrix<double> H_sparse = H.sparseView();
 | 
			
		||||
@@ -1661,8 +1616,8 @@ void MsckfVio::measurementUpdate(const MatrixXd& H, const VectorXd& r) {
 | 
			
		||||
    (spqr_helper.matrixQ().transpose() * H).evalTo(H_temp);
 | 
			
		||||
    (spqr_helper.matrixQ().transpose() * r).evalTo(r_temp);
 | 
			
		||||
 | 
			
		||||
    H_thin = H_temp.topRows(21+state_server.cam_states.size()*6);
 | 
			
		||||
    r_thin = r_temp.head(21+state_server.cam_states.size()*6);
 | 
			
		||||
    H_thin = H_temp.topRows(21+state_server.cam_states.size()*7);
 | 
			
		||||
    r_thin = r_temp.head(21+state_server.cam_states.size()*7);
 | 
			
		||||
 | 
			
		||||
    //HouseholderQR<MatrixXd> qr_helper(H);
 | 
			
		||||
    //MatrixXd Q = qr_helper.householderQ();
 | 
			
		||||
@@ -1670,10 +1625,10 @@ void MsckfVio::measurementUpdate(const MatrixXd& H, const VectorXd& r) {
 | 
			
		||||
 | 
			
		||||
    //H_thin = Q1.transpose() * H;
 | 
			
		||||
    //r_thin = Q1.transpose() * r;
 | 
			
		||||
  } else {
 | 
			
		||||
  } else {*/
 | 
			
		||||
    H_thin = H;
 | 
			
		||||
    r_thin = r;
 | 
			
		||||
  }
 | 
			
		||||
  //}
 | 
			
		||||
 | 
			
		||||
  // Compute the Kalman gain.
 | 
			
		||||
  const MatrixXd& P = state_server.state_cov;
 | 
			
		||||
@@ -1720,7 +1675,7 @@ void MsckfVio::measurementUpdate(const MatrixXd& H, const VectorXd& r) {
 | 
			
		||||
  auto cam_state_iter = state_server.cam_states.begin();
 | 
			
		||||
  for (int i = 0; i < state_server.cam_states.size();
 | 
			
		||||
      ++i, ++cam_state_iter) {
 | 
			
		||||
    const VectorXd& delta_x_cam = delta_x.segment<6>(21+i*6);
 | 
			
		||||
    const VectorXd& delta_x_cam = delta_x.segment<6>(21+i*7);
 | 
			
		||||
    const Vector4d dq_cam = smallAngleQuaternion(delta_x_cam.head<3>());
 | 
			
		||||
    cam_state_iter->second.orientation = quaternionMultiplication(
 | 
			
		||||
        dq_cam, cam_state_iter->second.orientation);
 | 
			
		||||
@@ -1795,7 +1750,7 @@ void MsckfVio::removeLostFeatures() {
 | 
			
		||||
        invalid_feature_ids.push_back(feature.id);
 | 
			
		||||
        continue;
 | 
			
		||||
      } else {
 | 
			
		||||
        if(!feature.initializePosition(state_server.cam_states)) {
 | 
			
		||||
        if(!feature.initializeRho(state_server.cam_states)) {
 | 
			
		||||
          invalid_feature_ids.push_back(feature.id);
 | 
			
		||||
          continue;
 | 
			
		||||
        }
 | 
			
		||||
@@ -1812,9 +1767,9 @@ void MsckfVio::removeLostFeatures() {
 | 
			
		||||
 | 
			
		||||
    if(PHOTOMETRIC)
 | 
			
		||||
      //just use max. size, as gets shrunken down after anyway
 | 
			
		||||
      jacobian_row_size += N*N*feature.observations.size();
 | 
			
		||||
      jacobian_row_size += 2*feature.observations.size();
 | 
			
		||||
    else
 | 
			
		||||
      jacobian_row_size += 4*feature.observations.size() - 3;
 | 
			
		||||
      jacobian_row_size += 2*feature.observations.size() - 3;
 | 
			
		||||
    processed_feature_ids.push_back(feature.id);
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
@@ -1831,7 +1786,7 @@ void MsckfVio::removeLostFeatures() {
 | 
			
		||||
  if (processed_feature_ids.size() == 0) return;
 | 
			
		||||
 | 
			
		||||
  MatrixXd H_x = MatrixXd::Zero(jacobian_row_size,
 | 
			
		||||
      21+augmentationSize*state_server.cam_states.size());
 | 
			
		||||
      21+7*state_server.cam_states.size());
 | 
			
		||||
  VectorXd r = VectorXd::Zero(jacobian_row_size);
 | 
			
		||||
  int stack_cntr = 0;
 | 
			
		||||
 | 
			
		||||
@@ -1965,7 +1920,7 @@ void MsckfVio::pruneCamStateBuffer() {
 | 
			
		||||
          feature.observations.erase(cam_id);
 | 
			
		||||
        continue;
 | 
			
		||||
      } else {
 | 
			
		||||
        if(!feature.initializePosition(state_server.cam_states)) {
 | 
			
		||||
        if(!feature.initializeRho(state_server.cam_states)) {
 | 
			
		||||
          for (const auto& cam_id : involved_cam_state_ids)
 | 
			
		||||
            feature.observations.erase(cam_id);
 | 
			
		||||
          continue;
 | 
			
		||||
@@ -1982,9 +1937,9 @@ void MsckfVio::pruneCamStateBuffer() {
 | 
			
		||||
      }
 | 
			
		||||
    }
 | 
			
		||||
     if(PHOTOMETRIC)
 | 
			
		||||
      jacobian_row_size += N*N*involved_cam_state_ids.size();
 | 
			
		||||
      jacobian_row_size += 2*involved_cam_state_ids.size();
 | 
			
		||||
    else
 | 
			
		||||
      jacobian_row_size += 4*involved_cam_state_ids.size() - 3;
 | 
			
		||||
      jacobian_row_size += 2*involved_cam_state_ids.size() - 3;
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  //cout << "jacobian row #: " << jacobian_row_size << endl;
 | 
			
		||||
@@ -1992,7 +1947,7 @@ void MsckfVio::pruneCamStateBuffer() {
 | 
			
		||||
  // Compute the Jacobian and residual.
 | 
			
		||||
  MatrixXd H_xj;
 | 
			
		||||
  VectorXd r_j;
 | 
			
		||||
  MatrixXd H_x = MatrixXd::Zero(jacobian_row_size, 21+augmentationSize*state_server.cam_states.size());
 | 
			
		||||
  MatrixXd H_x = MatrixXd::Zero(jacobian_row_size, 21+7*state_server.cam_states.size());
 | 
			
		||||
  VectorXd r = VectorXd::Zero(jacobian_row_size);
 | 
			
		||||
  int stack_cntr = 0;
 | 
			
		||||
  for (auto& item : map_server) {
 | 
			
		||||
@@ -2037,8 +1992,8 @@ void MsckfVio::pruneCamStateBuffer() {
 | 
			
		||||
  for (const auto& cam_id : rm_cam_state_ids) {
 | 
			
		||||
    int cam_sequence = std::distance(state_server.cam_states.begin(),
 | 
			
		||||
        state_server.cam_states.find(cam_id));
 | 
			
		||||
    int cam_state_start = 21 + augmentationSize*cam_sequence;
 | 
			
		||||
    int cam_state_end = cam_state_start + augmentationSize;
 | 
			
		||||
    int cam_state_start = 21 + 7*cam_sequence;
 | 
			
		||||
    int cam_state_end = cam_state_start + 7;
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
    // Remove the corresponding rows and columns in the state
 | 
			
		||||
@@ -2059,10 +2014,10 @@ void MsckfVio::pruneCamStateBuffer() {
 | 
			
		||||
            state_server.state_cov.cols()-cam_state_end);
 | 
			
		||||
 | 
			
		||||
      state_server.state_cov.conservativeResize(
 | 
			
		||||
          state_server.state_cov.rows()-augmentationSize, state_server.state_cov.cols()-augmentationSize);
 | 
			
		||||
          state_server.state_cov.rows()-7, state_server.state_cov.cols()-7);
 | 
			
		||||
    } else {
 | 
			
		||||
      state_server.state_cov.conservativeResize(
 | 
			
		||||
          state_server.state_cov.rows()-augmentationSize, state_server.state_cov.cols()-augmentationSize);
 | 
			
		||||
          state_server.state_cov.rows()-7, state_server.state_cov.cols()-7);
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    // Remove this camera state in the state vector.
 | 
			
		||||
 
 | 
			
		||||
		Reference in New Issue
	
	Block a user