removed merge conflicts

This commit is contained in:
Raphael Maenle 2019-05-22 11:34:00 +02:00
commit 8ff0e9d816
6 changed files with 234 additions and 37 deletions

View File

@ -15,7 +15,7 @@
#include <Eigen/Dense> #include <Eigen/Dense>
#include <Eigen/Geometry> #include <Eigen/Geometry>
#include <Eigen/StdVector> #include <Eigen/StdVector>
#include <math.h>
#include <visualization_msgs/Marker.h> #include <visualization_msgs/Marker.h>
#include <visualization_msgs/MarkerArray.h> #include <visualization_msgs/MarkerArray.h>
#include <geometry_msgs/Point.h> #include <geometry_msgs/Point.h>
@ -191,8 +191,10 @@ struct Feature {
const StateIDType& cam_state_id, const StateIDType& cam_state_id,
CameraCalibration& cam0, CameraCalibration& cam0,
const std::vector<double> photo_r, const std::vector<double> photo_r,
std::stringstream& ss) const; std::stringstream& ss,
cv::Point2f gradientVector,
cv::Point2f residualVector) const;
/* @brief takes a pure pixel position (1m from image) /* @brief takes a pure pixel position (1m from image)
* converts to actual pixel value and returns patch irradiance * converts to actual pixel value and returns patch irradiance
@ -203,11 +205,12 @@ struct Feature {
const CameraCalibration& cam, const CameraCalibration& cam,
const StateIDType& cam_state_id, const StateIDType& cam_state_id,
std::vector<float>& return_i) const; std::vector<float>& return_i) const;
/* /*
* @brief projectPixelToPosition uses the calcualted pixels * @brief AnchorPixelToPosition uses the calcualted pixels
* of the anchor patch to generate 3D positions of all of em * of the anchor patch to generate 3D positions of all of em
*/ */
inline Eigen::Vector3d projectPixelToPosition(cv::Point2f in_p, inline Eigen::Vector3d AnchorPixelToPosition(cv::Point2f in_p,
const CameraCalibration& cam); const CameraCalibration& cam);
/* /*
@ -553,7 +556,9 @@ bool Feature::VisualizePatch(
const StateIDType& cam_state_id, const StateIDType& cam_state_id,
CameraCalibration& cam0, CameraCalibration& cam0,
const std::vector<double> photo_r, const std::vector<double> photo_r,
std::stringstream& ss) const std::stringstream& ss,
cv::Point2f gradientVector,
cv::Point2f residualVector) const
{ {
double rescale = 1; double rescale = 1;
@ -593,45 +598,107 @@ bool Feature::VisualizePatch(
cv::hconcat(cam0.featureVisu, dottedFrame, cam0.featureVisu); cv::hconcat(cam0.featureVisu, dottedFrame, cam0.featureVisu);
// irradiance grid anchor
// patches visualization
int N = sqrt(anchorPatch_3d.size()); int N = sqrt(anchorPatch_3d.size());
int scale = 20; int scale = 30;
cv::Mat irradianceFrame(anchorImage.size(), CV_8UC3, cv::Scalar(255, 240, 255)); cv::Mat irradianceFrame(anchorImage.size(), CV_8UC3, cv::Scalar(255, 240, 255));
cv::resize(irradianceFrame, irradianceFrame, cv::Size(), rescale, rescale); cv::resize(irradianceFrame, irradianceFrame, cv::Size(), rescale, rescale);
// irradiance grid anchor
std::stringstream namer;
namer << "anchor";
cv::putText(irradianceFrame, namer.str() , cvPoint(30, 25),
cv::FONT_HERSHEY_COMPLEX_SMALL, 0.8, cvScalar(0,0,0), 1, CV_AA);
for(int i = 0; i<N; i++) for(int i = 0; i<N; i++)
for(int j = 0; j<N; j++) for(int j = 0; j<N; j++)
cv::rectangle(irradianceFrame, cv::rectangle(irradianceFrame,
cv::Point(10+scale*(i+1), 10+scale*j), cv::Point(30+scale*(i+1), 30+scale*j),
cv::Point(10+scale*i, 10+scale*(j+1)), cv::Point(30+scale*i, 30+scale*(j+1)),
cv::Scalar(anchorPatch[i*N+j]*255, anchorPatch[i*N+j]*255, anchorPatch[i*N+j]*255), cv::Scalar(anchorPatch[i*N+j]*255, anchorPatch[i*N+j]*255, anchorPatch[i*N+j]*255),
CV_FILLED); CV_FILLED);
// irradiance grid projection // irradiance grid projection
namer.str(std::string());
namer << "projection";
cv::putText(irradianceFrame, namer.str() , cvPoint(30, 45+scale*N),
cv::FONT_HERSHEY_COMPLEX_SMALL, 0.8, cvScalar(0,0,0), 1, CV_AA);
for(int i = 0; i<N; i++) for(int i = 0; i<N; i++)
for(int j = 0; j<N ; j++) for(int j = 0; j<N ; j++)
cv::rectangle(irradianceFrame, cv::rectangle(irradianceFrame,
cv::Point(10+scale*(i+1), 20+scale*(N+j)), cv::Point(30+scale*(i+1), 50+scale*(N+j)),
cv::Point(10+scale*(i), 20+scale*(N+j+1)), cv::Point(30+scale*(i), 50+scale*(N+j+1)),
cv::Scalar(projectionPatch[i*N+j]*255, projectionPatch[i*N+j]*255, projectionPatch[i*N+j]*255), cv::Scalar(projectionPatch[i*N+j]*255, projectionPatch[i*N+j]*255, projectionPatch[i*N+j]*255),
CV_FILLED); CV_FILLED);
// true irradiance at feature
// get current observation
namer.str(std::string());
namer << "feature";
cv::putText(irradianceFrame, namer.str() , cvPoint(30, 65+scale*2*N),
cv::FONT_HERSHEY_COMPLEX_SMALL, 0.8, cvScalar(0,0,0), 1, CV_AA);
cv::Point2f p_f(observations.find(cam_state_id)->second(0),observations.find(cam_state_id)->second(1));
// move to real pixels
p_f = image_handler::distortPoint(p_f, cam0.intrinsics, cam0.distortion_model, cam0.distortion_coeffs);
for(int i = 0; i<N; i++)
{
for(int j = 0; j<N ; j++)
{
float irr = PixelIrradiance(cv::Point2f(p_f.x + (i-(N-1)/2), p_f.y + (j-(N-1)/2)), current_image);
cv::rectangle(irradianceFrame,
cv::Point(30+scale*(i+1), 70+scale*(2*N+j)),
cv::Point(30+scale*(i), 70+scale*(2*N+j+1)),
cv::Scalar(irr*255, irr*255, irr*255),
CV_FILLED);
}
}
// residual grid projection, positive - red, negative - blue colored // residual grid projection, positive - red, negative - blue colored
namer.str(std::string());
namer << "residual";
cv::putText(irradianceFrame, namer.str() , cvPoint(30+scale*N, scale*N/2-5),
cv::FONT_HERSHEY_COMPLEX_SMALL, 0.8, cvScalar(0,0,0), 1, CV_AA);
for(int i = 0; i<N; i++) for(int i = 0; i<N; i++)
for(int j = 0; j<N; j++) for(int j = 0; j<N; j++)
if(photo_r[i*N+j]>0) if(photo_r[i*N+j]>0)
cv::rectangle(irradianceFrame, cv::rectangle(irradianceFrame,
cv::Point(20+scale*(N+i+1), 15+scale*(N/2+j)), cv::Point(40+scale*(N+i+1), 15+scale*(N/2+j)),
cv::Point(20+scale*(N+i), 15+scale*(N/2+j+1)), cv::Point(40+scale*(N+i), 15+scale*(N/2+j+1)),
cv::Scalar(255 - photo_r[i*N+j]*255, 255 - photo_r[i*N+j]*255, 255), cv::Scalar(255 - photo_r[i*N+j]*255, 255 - photo_r[i*N+j]*255, 255),
CV_FILLED); CV_FILLED);
else else
cv::rectangle(irradianceFrame, cv::rectangle(irradianceFrame,
cv::Point(20+scale*(N+i+1), 15+scale*(N/2+j)), cv::Point(40+scale*(N+i+1), 15+scale*(N/2+j)),
cv::Point(20+scale*(N+i), 15+scale*(N/2+j+1)), cv::Point(40+scale*(N+i), 15+scale*(N/2+j+1)),
cv::Scalar(255, 255 + photo_r[i*N+j]*255, 255 + photo_r[i*N+j]*255), cv::Scalar(255, 255 + photo_r[i*N+j]*255, 255 + photo_r[i*N+j]*255),
CV_FILLED); CV_FILLED);
cv::hconcat(cam0.featureVisu, irradianceFrame, cam0.featureVisu); // gradient arrow
/*
cv::arrowedLine(irradianceFrame,
cv::Point(30+scale*(N/2 +0.5), 50+scale*(N+(N/2)+0.5)),
cv::Point(30+scale*(N/2+0.5)+scale*gradientVector.x, 50+scale*(N+(N/2)+0.5)+scale*gradientVector.y),
cv::Scalar(100, 0, 255),
1);
*/
// residual gradient direction
cv::arrowedLine(irradianceFrame,
cv::Point(40+scale*(N+N/2+0.5), 15+scale*((N-0.5))),
cv::Point(40+scale*(N+N/2+0.5)+scale*residualVector.x, 15+scale*(N-0.5)+scale*residualVector.y),
cv::Scalar(0, 255, 175),
3);
cv::hconcat(cam0.featureVisu, irradianceFrame, cam0.featureVisu);
/* /*
// visualize position of used observations and resulting feature position // visualize position of used observations and resulting feature position
@ -742,7 +809,7 @@ cv::Point2f Feature::projectPositionToCamera(
return my_p; return my_p;
} }
Eigen::Vector3d Feature::projectPixelToPosition(cv::Point2f in_p, const CameraCalibration& cam) Eigen::Vector3d Feature::AnchorPixelToPosition(cv::Point2f in_p, const CameraCalibration& cam)
{ {
// use undistorted position of point of interest // use undistorted position of point of interest
// project it back into 3D space using pinhole model // project it back into 3D space using pinhole model

View File

@ -36,6 +36,16 @@ cv::Point2f distortPoint(
const cv::Vec4d& intrinsics, const cv::Vec4d& intrinsics,
const std::string& distortion_model, const std::string& distortion_model,
const cv::Vec4d& distortion_coeffs); const cv::Vec4d& distortion_coeffs);
void undistortPoint(
const cv::Point2f& pt_in,
const cv::Vec4d& intrinsics,
const std::string& distortion_model,
const cv::Vec4d& distortion_coeffs,
cv::Point2f& pt_out,
const cv::Matx33d &rectification_matrix = cv::Matx33d::eye(),
const cv::Vec4d &new_intrinsics = cv::Vec4d(1,1,0,0));
} }
} }
#endif #endif

View File

@ -14,7 +14,7 @@
#include <string> #include <string>
#include <Eigen/Dense> #include <Eigen/Dense>
#include <Eigen/Geometry> #include <Eigen/Geometry>
#include <math.h>
#include <boost/shared_ptr.hpp> #include <boost/shared_ptr.hpp>
#include <opencv2/opencv.hpp> #include <opencv2/opencv.hpp>
#include <opencv2/video.hpp> #include <opencv2/video.hpp>
@ -38,6 +38,8 @@
#include <message_filters/subscriber.h> #include <message_filters/subscriber.h>
#include <message_filters/time_synchronizer.h> #include <message_filters/time_synchronizer.h>
#define PI 3.14159265
namespace msckf_vio { namespace msckf_vio {
/* /*
* @brief MsckfVio Implements the algorithm in * @brief MsckfVio Implements the algorithm in

View File

@ -24,7 +24,7 @@
<param name="PrintImages" value="true"/> <param name="PrintImages" value="true"/>
<param name="GroundTruth" value="false"/> <param name="GroundTruth" value="false"/>
<param name="patch_size_n" value="7"/> <param name="patch_size_n" value="5"/>
<!-- Calibration parameters --> <!-- Calibration parameters -->
<rosparam command="load" file="$(arg calibration_file)"/> <rosparam command="load" file="$(arg calibration_file)"/>

View File

@ -14,6 +14,47 @@ namespace msckf_vio {
namespace image_handler { namespace image_handler {
void undistortPoint(
const cv::Point2f& pt_in,
const cv::Vec4d& intrinsics,
const std::string& distortion_model,
const cv::Vec4d& distortion_coeffs,
cv::Point2f& pt_out,
const cv::Matx33d &rectification_matrix,
const cv::Vec4d &new_intrinsics) {
std::vector<cv::Point2f> pts_in;
std::vector<cv::Point2f> pts_out;
pts_in.push_back(pt_in);
if (pts_in.size() == 0) return;
const cv::Matx33d K(
intrinsics[0], 0.0, intrinsics[2],
0.0, intrinsics[1], intrinsics[3],
0.0, 0.0, 1.0);
const cv::Matx33d K_new(
new_intrinsics[0], 0.0, new_intrinsics[2],
0.0, new_intrinsics[1], new_intrinsics[3],
0.0, 0.0, 1.0);
if (distortion_model == "radtan") {
cv::undistortPoints(pts_in, pts_out, K, distortion_coeffs,
rectification_matrix, K_new);
} else if (distortion_model == "equidistant") {
cv::fisheye::undistortPoints(pts_in, pts_out, K, distortion_coeffs,
rectification_matrix, K_new);
} else {
ROS_WARN_ONCE("The model %s is unrecognized, use radtan instead...",
distortion_model.c_str());
cv::undistortPoints(pts_in, pts_out, K, distortion_coeffs,
rectification_matrix, K_new);
}
pt_out = pts_out[0];
return;
}
void undistortPoints( void undistortPoints(
const std::vector<cv::Point2f>& pts_in, const std::vector<cv::Point2f>& pts_in,
const cv::Vec4d& intrinsics, const cv::Vec4d& intrinsics,

View File

@ -1238,10 +1238,31 @@ void MsckfVio::PhotometricMeasurementJacobian(
auto frame = cam0.moving_window.find(cam_state_id)->second.image; auto frame = cam0.moving_window.find(cam_state_id)->second.image;
//observation
const Vector4d& z = feature.observations.find(cam_state_id)->second;
//estimate photometric measurement
std::vector<double> estimate_irradiance;
std::vector<double> estimate_photo_z;
IlluminationParameter estimated_illumination;
feature.estimate_FrameIrradiance(cam_state, cam_state_id, cam0, estimate_irradiance, estimated_illumination);
// calculated here, because we need true 'estimate_irradiance' later for jacobi
for (auto& estimate_irradiance_j : estimate_irradiance)
estimate_photo_z.push_back (estimate_irradiance_j *
estimated_illumination.frame_gain * estimated_illumination.feature_gain +
estimated_illumination.frame_bias + estimated_illumination.feature_bias);
int count = 0; int count = 0;
double dx, dy; double dx, dy;
std::vector<float> z_irr_est; std::vector<float> z_irr_est;
// gradient visualization parameters
cv::Point2f gradientVector(0,0);
// residual change visualization
cv::Point2f residualVector(0,0);
double res_sum = 0;
for (auto point : feature.anchorPatch_3d) for (auto point : feature.anchorPatch_3d)
{ {
@ -1251,6 +1272,10 @@ void MsckfVio::PhotometricMeasurementJacobian(
z_irr_est.push_back(feature.PixelIrradiance(p_in_c0, frame)); z_irr_est.push_back(feature.PixelIrradiance(p_in_c0, frame));
Matrix<double, 1, 2> dI_dhj = Matrix<double, 1, 2>::Zero(); Matrix<double, 1, 2> dI_dhj = Matrix<double, 1, 2>::Zero();
//calculate photom. residual
photo_r.push_back(photo_z[count] - estimate_photo_z[count]);
// add jacobians
// frame derivative calculated convoluting with kernel [-1, 0, 1] // frame derivative calculated convoluting with kernel [-1, 0, 1]
dx = feature.PixelIrradiance(cv::Point2f(p_in_c0.x+1, p_in_c0.y), frame) - feature.PixelIrradiance(cv::Point2f(p_in_c0.x-1, p_in_c0.y), frame); dx = feature.PixelIrradiance(cv::Point2f(p_in_c0.x+1, p_in_c0.y), frame) - feature.PixelIrradiance(cv::Point2f(p_in_c0.x-1, p_in_c0.y), frame);
@ -1276,6 +1301,41 @@ void MsckfVio::PhotometricMeasurementJacobian(
H_xi = dI_dhj*dz_dpc0*dpc0_dxc; H_xi = dI_dhj*dz_dpc0*dpc0_dxc;
H_fi = dI_dhj*dz_dpc0*dpc0_dpg; H_fi = dI_dhj*dz_dpc0*dpc0_dpg;
gradientVector.x += dx;
gradientVector.y += dy;
residualVector.x += dx * photo_r[count];
residualVector.y += dy * photo_r[count];
res_sum += photo_r[count];
//dh / d{}^Cp_{ij}
dh_dCpij(0, 0) = 1 / p_c0(2);
dh_dCpij(1, 1) = 1 / p_c0(2);
dh_dCpij(0, 2) = -(p_c0(0))/(p_c0(2)*p_c0(2));
dh_dCpij(1, 2) = -(p_c0(1))/(p_c0(2)*p_c0(2));
dCpij_dGpij = quaternionToRotation(cam_state.orientation);
//orientation takes camera frame to world frame, we wa
dh_dGpij = dh_dCpij * dCpij_dGpij;
//dh / d X_{pl}
dCpij_dCGtheta = skewSymmetric(p_c0);
dCpij_dGpC = -quaternionToRotation(cam_state.orientation);
dh_dXplj.block<2, 3>(0, 0) = dh_dCpij * dCpij_dCGtheta;
dh_dXplj.block<2, 3>(0, 3) = dh_dCpij * dCpij_dGpC;
//d{}^Gp_P{ij} / \rho_i
double rho = feature.anchor_rho;
// Isometry T_anchor_w takes a vector in anchor frame to world frame
dGpj_drhoj = -feature.T_anchor_w.linear() * Eigen::Vector3d(feature.anchorPatch_ideal[count].x/(rho*rho), feature.anchorPatch_ideal[count].y/(rho*rho), 1/(rho*rho));
dGpj_XpAj.block<3, 3>(0, 0) = - feature.T_anchor_w.linear()
* skewSymmetric(Eigen::Vector3d(feature.anchorPatch_ideal[count].x/(rho),
feature.anchorPatch_ideal[count].y/(rho),
1/(rho)));
dGpj_XpAj.block<3, 3>(0, 3) = Matrix<double, 3, 3>::Identity();
H_xl.block<1, 6>(count, 0) = H_xi; H_xl.block<1, 6>(count, 0) = H_xi;
H_yl.block<1, 3>(count, 0) = H_fi; H_yl.block<1, 3>(count, 0) = H_fi;
@ -1296,7 +1356,23 @@ void MsckfVio::PhotometricMeasurementJacobian(
r = r_l; r = r_l;
H_x = H_xl; H_x = H_xl;
H_y = H_yl; H_y = H_yl;
<<<<<<< HEAD
// calculate residual // calculate residual
=======
//TODO make this more fluent as well
count = 0;
for(auto data : photo_r)
r[count++] = data;
std::stringstream ss;
ss << "INFO:" << " anchor: " << cam_state_cntr_anchor << " frame: " << cam_state_cntr;
if(PRINTIMAGES)
{
feature.MarkerGeneration(marker_pub, state_server.cam_states);
feature.VisualizePatch(cam_state, cam_state_id, cam0, photo_r, ss, gradientVector, residualVector);
}
>>>>>>> photometry-jakobi
return; return;
} }
@ -1392,13 +1468,11 @@ void MsckfVio::PhotometricFeatureJacobian(
H_x = A_null_space.transpose() * H_xi; H_x = A_null_space.transpose() * H_xi;
r = A_null_space.transpose() * r_i; r = A_null_space.transpose() * r_i;
ofstream myfile; ofstream myfile;
myfile.open ("/home/raphael/dev/MSCKF_ws/log.txt"); myfile.open ("/home/raphael/dev/MSCKF_ws/log.txt");
myfile << "-- residual -- \n" << r << "\n---- H ----\n" << H_x << "\n---- state cov ----\n" << state_server.state_cov <<endl; myfile << "Hx\n" << H_x << "r\n" << r << "from residual estimated error state: " << H_x. * r << endl;
myfile.close(); myfile.close();
cout << "---------- LOGGED -------- " << endl; cout << "---------- LOGGED -------- " << endl;
if(PRINTIMAGES) if(PRINTIMAGES)
{ {
@ -1551,13 +1625,11 @@ void MsckfVio::featureJacobian(
ofstream myfile; ofstream myfile;
myfile.open ("/home/raphael/dev/MSCKF_ws/log.txt"); myfile.open ("/home/raphael/dev/MSCKF_ws/log.txt");
myfile << "-- residual -- \n" << r << "\n---- H ----\n" << H_x << "\n---- state cov ----\n" << state_server.state_cov <<endl; myfile << "Hx\n" << H_x << "r\n" << r << "from residual estimated error state: " << H_x.ldlt().solve(r) << endl;
myfile.close(); myfile.close();
cout << "---------- LOGGED -------- " << endl; cout << "---------- LOGGED -------- " << endl;
nh.setParam("/play_bag", false);
return; return;
} }
void MsckfVio::measurementUpdate(const MatrixXd& H, const VectorXd& r) { void MsckfVio::measurementUpdate(const MatrixXd& H, const VectorXd& r) {
@ -1568,7 +1640,11 @@ void MsckfVio::measurementUpdate(const MatrixXd& H, const VectorXd& r) {
// complexity as in Equation (28), (29). // complexity as in Equation (28), (29).
MatrixXd H_thin; MatrixXd H_thin;
VectorXd r_thin; VectorXd r_thin;
int augmentationSize = 6;
if(PHOTOMETRIC)
augmentationSize = 7;
/*
if (H.rows() > H.cols()) { if (H.rows() > H.cols()) {
// Convert H to a sparse matrix. // Convert H to a sparse matrix.
SparseMatrix<double> H_sparse = H.sparseView(); SparseMatrix<double> H_sparse = H.sparseView();
@ -1583,8 +1659,8 @@ void MsckfVio::measurementUpdate(const MatrixXd& H, const VectorXd& r) {
(spqr_helper.matrixQ().transpose() * H).evalTo(H_temp); (spqr_helper.matrixQ().transpose() * H).evalTo(H_temp);
(spqr_helper.matrixQ().transpose() * r).evalTo(r_temp); (spqr_helper.matrixQ().transpose() * r).evalTo(r_temp);
H_thin = H_temp.topRows(21+state_server.cam_states.size()*6); H_thin = H_temp.topRows(21+state_server.cam_states.size()*augmentationSize);
r_thin = r_temp.head(21+state_server.cam_states.size()*6); r_thin = r_temp.head(21+state_server.cam_states.size()*augmentationSize);
//HouseholderQR<MatrixXd> qr_helper(H); //HouseholderQR<MatrixXd> qr_helper(H);
//MatrixXd Q = qr_helper.householderQ(); //MatrixXd Q = qr_helper.householderQ();
@ -1596,18 +1672,19 @@ void MsckfVio::measurementUpdate(const MatrixXd& H, const VectorXd& r) {
H_thin = H; H_thin = H;
r_thin = r; r_thin = r;
} }
*/
// Compute the Kalman gain. // Compute the Kalman gain.
const MatrixXd& P = state_server.state_cov; const MatrixXd& P = state_server.state_cov;
MatrixXd S = H_thin*P*H_thin.transpose() + MatrixXd S = H*P*H.transpose() +
Feature::observation_noise*MatrixXd::Identity( Feature::observation_noise*MatrixXd::Identity(
H_thin.rows(), H_thin.rows()); H.rows(), H.rows());
//MatrixXd K_transpose = S.fullPivHouseholderQr().solve(H_thin*P); //MatrixXd K_transpose = S.fullPivHouseholderQr().solve(H*P);
MatrixXd K_transpose = S.ldlt().solve(H_thin*P); MatrixXd K_transpose = S.ldlt().solve(H*P);
MatrixXd K = K_transpose.transpose(); MatrixXd K = K_transpose.transpose();
// Compute the error of the state. // Compute the error of the state.
VectorXd delta_x = K * r_thin; VectorXd delta_x = K * r;
// Update the IMU state. // Update the IMU state.
const VectorXd& delta_x_imu = delta_x.head<21>(); const VectorXd& delta_x_imu = delta_x.head<21>();
@ -1642,7 +1719,7 @@ void MsckfVio::measurementUpdate(const MatrixXd& H, const VectorXd& r) {
auto cam_state_iter = state_server.cam_states.begin(); auto cam_state_iter = state_server.cam_states.begin();
for (int i = 0; i < state_server.cam_states.size(); for (int i = 0; i < state_server.cam_states.size();
++i, ++cam_state_iter) { ++i, ++cam_state_iter) {
const VectorXd& delta_x_cam = delta_x.segment<6>(21+i*6); const VectorXd& delta_x_cam = delta_x.segment(21+i*augmentationSize, augmentationSize);
const Vector4d dq_cam = smallAngleQuaternion(delta_x_cam.head<3>()); const Vector4d dq_cam = smallAngleQuaternion(delta_x_cam.head<3>());
cam_state_iter->second.orientation = quaternionMultiplication( cam_state_iter->second.orientation = quaternionMultiplication(
dq_cam, cam_state_iter->second.orientation); dq_cam, cam_state_iter->second.orientation);
@ -1650,7 +1727,7 @@ void MsckfVio::measurementUpdate(const MatrixXd& H, const VectorXd& r) {
} }
// Update state covariance. // Update state covariance.
MatrixXd I_KH = MatrixXd::Identity(K.rows(), H_thin.cols()) - K*H_thin; MatrixXd I_KH = MatrixXd::Identity(K.rows(), H.cols()) - K*H;
//state_server.state_cov = I_KH*state_server.state_cov*I_KH.transpose() + //state_server.state_cov = I_KH*state_server.state_cov*I_KH.transpose() +
// K*K.transpose()*Feature::observation_noise; // K*K.transpose()*Feature::observation_noise;
state_server.state_cov = I_KH*state_server.state_cov; state_server.state_cov = I_KH*state_server.state_cov;
@ -1664,7 +1741,7 @@ void MsckfVio::measurementUpdate(const MatrixXd& H, const VectorXd& r) {
} }
bool MsckfVio::gatingTest(const MatrixXd& H, const VectorXd& r, const int& dof) { bool MsckfVio::gatingTest(const MatrixXd& H, const VectorXd& r, const int& dof) {
return true;
MatrixXd P1 = H * state_server.state_cov * H.transpose(); MatrixXd P1 = H * state_server.state_cov * H.transpose();