8 Commits

27 changed files with 652 additions and 2665 deletions

View File

@ -1,16 +0,0 @@
{
"configurations": [
{
"name": "Linux",
"includePath": [
"${workspaceFolder}/**"
],
"defines": [],
"compilerPath": "/usr/bin/gcc",
"cStandard": "c11",
"cppStandard": "c++14",
"intelliSenseMode": "clang-x64"
}
],
"version": 4
}

Binary file not shown.

Binary file not shown.

Binary file not shown.

View File

@ -1,6 +0,0 @@
{
"files.associations": {
"core": "cpp",
"sparsecore": "cpp"
}
}

BIN
GPATH

Binary file not shown.

BIN
GRTAGS

Binary file not shown.

BIN
GSYMS

Binary file not shown.

BIN
GTAGS

Binary file not shown.

View File

@ -1,36 +0,0 @@
cam0:
T_cam_imu:
[-0.9995378259923383, 0.02917807204183088, -0.008530798463872679, 0.047094247958417004,
0.007526588843243184, -0.03435493139706542, -0.9993813532126198, -0.04788273017221637,
-0.029453096117288798, -0.9989836729399656, 0.034119442089241274, -0.0697294754693238,
0.0, 0.0, 0.0, 1.0]
camera_model: pinhole
distortion_coeffs: [0.0034823894022493434, 0.0007150348452162257, -0.0020532361418706202,
0.00020293673591811182]
distortion_model: pre-equidistant
intrinsics: [190.97847715128717, 190.9733070521226, 254.93170605935475, 256.8974428996504]
resolution: [512, 512]
rostopic: /cam0/image_raw
cam1:
T_cam_imu:
[-0.9995240747493029, 0.02986739485347808, -0.007717688852024281, -0.05374086123613335,
0.008095979457928231, 0.01256553460985914, -0.9998882749870535, -0.04648588412432889,
-0.02976708103202316, -0.9994748851595197, -0.0128013601698453, -0.07333210787623645,
0.0, 0.0, 0.0, 1.0]
T_cn_cnm1:
[0.9999994317488622, -0.0008361847221513937, -0.0006612844045898121, -0.10092123225528335,
0.0008042457277382264, 0.9988989443471681, -0.04690684567228134, -0.001964540595211977,
0.0006997790813734836, 0.04690628718225568, 0.9988990492196964, -0.0014663556043866572,
0.0, 0.0, 0.0, 1.0]
camera_model: pinhole
distortion_coeffs: [0.0034003170790442797, 0.001766278153469831, -0.00266312569781606,
0.0003299517423931039]
distortion_model: pre-equidistant
intrinsics: [190.44236969414825, 190.4344384721956, 252.59949716835982, 254.91723064636983]
resolution: [512, 512]
rostopic: /cam1/image_raw
T_imu_body:
[1.0000, 0.0000, 0.0000, 0.0000,
0.0000, 1.0000, 0.0000, 0.0000,
0.0000, 0.0000, 1.0000, 0.0000,
0.0000, 0.0000, 0.0000, 1.0000]

View File

@ -7,7 +7,7 @@ cam0:
camera_model: pinhole camera_model: pinhole
distortion_coeffs: [0.010171079892421483, -0.010816440029919381, 0.005942781769412756, distortion_coeffs: [0.010171079892421483, -0.010816440029919381, 0.005942781769412756,
-0.001662284667857643] -0.001662284667857643]
distortion_model: pre-equidistant distortion_model: equidistant
intrinsics: [380.81042871360756, 380.81194179427075, 510.29465304840727, 514.3304630538506] intrinsics: [380.81042871360756, 380.81194179427075, 510.29465304840727, 514.3304630538506]
resolution: [1024, 1024] resolution: [1024, 1024]
rostopic: /cam0/image_raw rostopic: /cam0/image_raw
@ -25,7 +25,7 @@ cam1:
camera_model: pinhole camera_model: pinhole
distortion_coeffs: [0.01371679169245271, -0.015567360615942622, 0.00905043103315326, distortion_coeffs: [0.01371679169245271, -0.015567360615942622, 0.00905043103315326,
-0.002347858896562788] -0.002347858896562788]
distortion_model: pre-equidistant distortion_model: equidistant
intrinsics: [379.2869884263036, 379.26583742214524, 505.5666703237407, 510.2840961765407] intrinsics: [379.2869884263036, 379.26583742214524, 505.5666703237407, 510.2840961765407]
resolution: [1024, 1024] resolution: [1024, 1024]
rostopic: /cam1/image_raw rostopic: /cam1/image_raw

View File

@ -18,8 +18,6 @@ namespace msckf_vio {
struct Frame{ struct Frame{
cv::Mat image; cv::Mat image;
cv::Mat dximage;
cv::Mat dyimage;
double exposureTime_ms; double exposureTime_ms;
}; };
@ -41,7 +39,6 @@ struct CameraCalibration{
cv::Vec4d distortion_coeffs; cv::Vec4d distortion_coeffs;
movingWindow moving_window; movingWindow moving_window;
cv::Mat featureVisu; cv::Mat featureVisu;
int id;
}; };

View File

@ -6,7 +6,7 @@
*/ */
#ifndef MSCKF_VIO_FEATURE_H #ifndef MSCKF_VIO_FEATURE_H
#define MSCKF_VIO_FEATURE_Hs #define MSCKF_VIO_FEATURE_H
#include <iostream> #include <iostream>
#include <map> #include <map>
@ -70,11 +70,6 @@ struct Feature {
position(Eigen::Vector3d::Zero()), position(Eigen::Vector3d::Zero()),
is_initialized(false), is_anchored(false) {} is_initialized(false), is_anchored(false) {}
void Rhocost(const Eigen::Isometry3d& T_c0_ci,
const double x, const Eigen::Vector2d& z1, const Eigen::Vector2d& z2,
double& e) const;
/* /*
* @brief cost Compute the cost of the camera observations * @brief cost Compute the cost of the camera observations
* @param T_c0_c1 A rigid body transformation takes * @param T_c0_c1 A rigid body transformation takes
@ -87,13 +82,6 @@ void Rhocost(const Eigen::Isometry3d& T_c0_ci,
const Eigen::Vector3d& x, const Eigen::Vector2d& z, const Eigen::Vector3d& x, const Eigen::Vector2d& z,
double& e) const; double& e) const;
bool initializeRho(const CamStateServer& cam_states);
inline void RhoJacobian(const Eigen::Isometry3d& T_c0_ci,
const double x, const Eigen::Vector2d& z1, const Eigen::Vector2d& z2,
Eigen::Matrix<double, 2, 1>& J, Eigen::Vector2d& r,
double& w) const;
/* /*
* @brief jacobian Compute the Jacobian of the camera observation * @brief jacobian Compute the Jacobian of the camera observation
* @param T_c0_c1 A rigid body transformation takes * @param T_c0_c1 A rigid body transformation takes
@ -109,10 +97,6 @@ void Rhocost(const Eigen::Isometry3d& T_c0_ci,
Eigen::Matrix<double, 2, 3>& J, Eigen::Vector2d& r, Eigen::Matrix<double, 2, 3>& J, Eigen::Vector2d& r,
double& w) const; double& w) const;
inline double generateInitialDepth(
const Eigen::Isometry3d& T_c1_c2, const Eigen::Vector2d& z1,
const Eigen::Vector2d& z2) const;
/* /*
* @brief generateInitialGuess Compute the initial guess of * @brief generateInitialGuess Compute the initial guess of
* the feature's 3d position using only two views. * the feature's 3d position using only two views.
@ -137,6 +121,15 @@ void Rhocost(const Eigen::Isometry3d& T_c0_ci,
inline bool checkMotion( inline bool checkMotion(
const CamStateServer& cam_states) const; const CamStateServer& cam_states) const;
/*
* @brief AnchorPixelToPosition projects an undistorted point in the
* anchor frame back into the real world using the rho calculated
* based on featur position
*/
inline Eigen::Vector3d AnchorPixelToPosition(cv::Point2f in_p, const CameraCalibration& cam);
/* /*
* @brief InitializeAnchor generates the NxN patch around the * @brief InitializeAnchor generates the NxN patch around the
* feature in the Anchor image * feature in the Anchor image
@ -164,14 +157,14 @@ void Rhocost(const Eigen::Isometry3d& T_c0_ci,
inline bool initializePosition( inline bool initializePosition(
const CamStateServer& cam_states); const CamStateServer& cam_states);
cv::Point2f pixelDistanceAt(
cv::Point2f pixelDistanceAt(
const CAMState& cam_state, const CAMState& cam_state,
const StateIDType& cam_state_id, const StateIDType& cam_state_id,
const CameraCalibration& cam, const CameraCalibration& cam,
Eigen::Vector3d& in_p) const; Eigen::Vector3d& in_p) const;
/* /*
* @brief project PositionToCamera Takes a 3d position in a world frame * @brief project PositionToCamera Takes a 3d position in a world frame
* and projects it into the passed camera frame using pinhole projection * and projects it into the passed camera frame using pinhole projection
@ -184,21 +177,6 @@ void Rhocost(const Eigen::Isometry3d& T_c0_ci,
const CameraCalibration& cam, const CameraCalibration& cam,
Eigen::Vector3d& in_p) const; Eigen::Vector3d& in_p) const;
double CompleteCvKernel(
const cv::Point2f pose,
const StateIDType& cam_state_id,
CameraCalibration& cam,
std::string type) const;
double cvKernel(
const cv::Point2f pose,
std::string type) const;
double Kernel(
const cv::Point2f pose,
const cv::Mat frame,
std::string type) const;
/* /*
* @brief IrradianceAnchorPatch_Camera returns irradiance values * @brief IrradianceAnchorPatch_Camera returns irradiance values
* of the Anchor Patch position in a camera frame * of the Anchor Patch position in a camera frame
@ -208,31 +186,32 @@ void Rhocost(const Eigen::Isometry3d& T_c0_ci,
bool estimate_FrameIrradiance( bool estimate_FrameIrradiance(
const CAMState& cam_state, const CAMState& cam_state,
const StateIDType& cam_state_id, const StateIDType& cam_state_id,
CameraCalibration& cam, CameraCalibration& cam0,
std::vector<double>& anchorPatch_estimate, std::vector<double>& anchorPatch_estimate,
IlluminationParameter& estimatedIllumination) const; IlluminationParameter& estimatedIllumination) const;
bool MarkerGeneration( bool MarkerGeneration(
ros::Publisher& marker_pub, ros::Publisher& marker_pub,
const CamStateServer& cam_states) const; const CamStateServer& cam_states) const;
bool VisualizeKernel(
const CAMState& cam_state,
const StateIDType& cam_state_id,
CameraCalibration& cam0) const;
bool VisualizePatch( bool VisualizePatch(
const CAMState& cam_state, const CAMState& cam_state,
const StateIDType& cam_state_id, const StateIDType& cam_state_id,
CameraCalibration& cam0, CameraCalibration& cam0,
const Eigen::VectorXd& photo_r, const std::vector<double> photo_r,
std::stringstream& ss) const; std::stringstream& ss) const;
/*
* @brief AnchorPixelToPosition uses the calcualted pixels
* of the anchor patch to generate 3D positions of all of em /* @brief takes a pure pixel position (1m from image)
* converts to actual pixel value and returns patch irradiance
* around this pixel
*/ */
inline Eigen::Vector3d AnchorPixelToPosition(cv::Point2f in_p, void PatchAroundPurePixel(cv::Point2f p,
const CameraCalibration& cam); int N,
const CameraCalibration& cam,
const StateIDType& cam_state_id,
std::vector<float>& return_i) const;
/* /*
* @brief Irradiance returns irradiance value of a pixel * @brief Irradiance returns irradiance value of a pixel
@ -277,14 +256,6 @@ inline Eigen::Vector3d AnchorPixelToPosition(cv::Point2f in_p,
bool is_initialized; bool is_initialized;
bool is_anchored; bool is_anchored;
cv::Mat abs_xderImage;
cv::Mat abs_yderImage;
cv::Mat xderImage;
cv::Mat yderImage;
cv::Mat anchorImage_blurred;
cv::Point2f anchor_center_pos; cv::Point2f anchor_center_pos;
cv::Point2f undist_anchor_center_pos; cv::Point2f undist_anchor_center_pos;
// Noise for a normalized feature measurement. // Noise for a normalized feature measurement.
@ -300,26 +271,6 @@ typedef std::map<FeatureIDType, Feature, std::less<int>,
Eigen::aligned_allocator< Eigen::aligned_allocator<
std::pair<const FeatureIDType, Feature> > > MapServer; std::pair<const FeatureIDType, Feature> > > MapServer;
void Feature::Rhocost(const Eigen::Isometry3d& T_c0_ci,
const double x, const Eigen::Vector2d& z1, const Eigen::Vector2d& z2,
double& e) const
{
// Compute hi1, hi2, and hi3 as Equation (37).
const double& rho = x;
Eigen::Vector3d h = T_c0_ci.linear()*
Eigen::Vector3d(z1(0), z1(1), 1.0) + rho*T_c0_ci.translation();
double& h1 = h(0);
double& h2 = h(1);
double& h3 = h(2);
// Predict the feature observation in ci frame.
Eigen::Vector2d z_hat(h1/h3, h2/h3);
// Compute the residual.
e = (z_hat-z2).squaredNorm();
return;
}
void Feature::cost(const Eigen::Isometry3d& T_c0_ci, void Feature::cost(const Eigen::Isometry3d& T_c0_ci,
const Eigen::Vector3d& x, const Eigen::Vector2d& z, const Eigen::Vector3d& x, const Eigen::Vector2d& z,
@ -332,9 +283,9 @@ void Feature::cost(const Eigen::Isometry3d& T_c0_ci,
Eigen::Vector3d h = T_c0_ci.linear()* Eigen::Vector3d h = T_c0_ci.linear()*
Eigen::Vector3d(alpha, beta, 1.0) + rho*T_c0_ci.translation(); Eigen::Vector3d(alpha, beta, 1.0) + rho*T_c0_ci.translation();
double h1 = h(0); double& h1 = h(0);
double h2 = h(1); double& h2 = h(1);
double h3 = h(2); double& h3 = h(2);
// Predict the feature observation in ci frame. // Predict the feature observation in ci frame.
Eigen::Vector2d z_hat(h1/h3, h2/h3); Eigen::Vector2d z_hat(h1/h3, h2/h3);
@ -344,42 +295,6 @@ void Feature::cost(const Eigen::Isometry3d& T_c0_ci,
return; return;
} }
void Feature::RhoJacobian(const Eigen::Isometry3d& T_c0_ci,
const double x, const Eigen::Vector2d& z1, const Eigen::Vector2d& z2,
Eigen::Matrix<double, 2, 1>& J, Eigen::Vector2d& r,
double& w) const
{
const double& rho = x;
Eigen::Vector3d h = T_c0_ci.linear()*
Eigen::Vector3d(z1(0), z2(1), 1.0) + rho*T_c0_ci.translation();
double& h1 = h(0);
double& h2 = h(1);
double& h3 = h(2);
// Compute the Jacobian.
Eigen::Matrix3d W;
W.leftCols<2>() = T_c0_ci.linear().leftCols<2>();
W.rightCols<1>() = T_c0_ci.translation();
J(0,0) = -h1/(h3*h3);
J(1,0) = -h2/(h3*h3);
// Compute the residual.
Eigen::Vector2d z_hat(h1/h3, h2/h3);
r = z_hat - z2;
// Compute the weight based on the residual.
double e = r.norm();
if (e <= optimization_config.huber_epsilon)
w = 1.0;
else
w = optimization_config.huber_epsilon / (2*e);
return;
}
void Feature::jacobian(const Eigen::Isometry3d& T_c0_ci, void Feature::jacobian(const Eigen::Isometry3d& T_c0_ci,
const Eigen::Vector3d& x, const Eigen::Vector2d& z, const Eigen::Vector3d& x, const Eigen::Vector2d& z,
Eigen::Matrix<double, 2, 3>& J, Eigen::Vector2d& r, Eigen::Matrix<double, 2, 3>& J, Eigen::Vector2d& r,
@ -419,9 +334,9 @@ void Feature::jacobian(const Eigen::Isometry3d& T_c0_ci,
return; return;
} }
double Feature::generateInitialDepth( void Feature::generateInitialGuess(
const Eigen::Isometry3d& T_c1_c2, const Eigen::Vector2d& z1, const Eigen::Isometry3d& T_c1_c2, const Eigen::Vector2d& z1,
const Eigen::Vector2d& z2) const const Eigen::Vector2d& z2, Eigen::Vector3d& p) const
{ {
// Construct a least square problem to solve the depth. // Construct a least square problem to solve the depth.
Eigen::Vector3d m = T_c1_c2.linear() * Eigen::Vector3d(z1(0), z1(1), 1.0); Eigen::Vector3d m = T_c1_c2.linear() * Eigen::Vector3d(z1(0), z1(1), 1.0);
@ -436,15 +351,6 @@ double Feature::generateInitialDepth(
// Solve for the depth. // Solve for the depth.
double depth = (A.transpose() * A).inverse() * A.transpose() * b; double depth = (A.transpose() * A).inverse() * A.transpose() * b;
return depth;
}
void Feature::generateInitialGuess(
const Eigen::Isometry3d& T_c1_c2, const Eigen::Vector2d& z1,
const Eigen::Vector2d& z2, Eigen::Vector3d& p) const
{
double depth = generateInitialDepth(T_c1_c2, z1, z2);
p(0) = z1(0) * depth; p(0) = z1(0) * depth;
p(1) = z1(1) * depth; p(1) = z1(1) * depth;
p(2) = depth; p(2) = depth;
@ -494,62 +400,10 @@ bool Feature::checkMotion(const CamStateServer& cam_states) const
else return false; else return false;
} }
double Feature::CompleteCvKernel(
const cv::Point2f pose,
const StateIDType& cam_state_id,
CameraCalibration& cam,
std::string type) const
{
double delta = 0;
if(type == "Sobel_x")
delta = ((double)cam.moving_window.find(cam_state_id)->second.dximage.at<short>(pose.y, pose.x))/255.;
else if (type == "Sobel_y")
delta = ((double)cam.moving_window.find(cam_state_id)->second.dyimage.at<short>(pose.y, pose.x))/255.;
return delta;
}
double Feature::cvKernel(
const cv::Point2f pose,
std::string type) const
{
double delta = 0;
if(type == "Sobel_x")
delta = ((double)xderImage.at<short>(pose.y, pose.x))/255.;
else if (type == "Sobel_y")
delta = ((double)yderImage.at<short>(pose.y, pose.x))/255.;
return delta;
}
double Feature::Kernel(
const cv::Point2f pose,
const cv::Mat frame,
std::string type) const
{
Eigen::Matrix<double, 3, 3> kernel = Eigen::Matrix<double, 3, 3>::Zero();
if(type == "Sobel_x")
kernel << -1., 0., 1.,-2., 0., 2. , -1., 0., 1.;
else if(type == "Sobel_y")
kernel << -1., -2., -1., 0., 0., 0., 1., 2., 1.;
double delta = 0;
int offs = (int)(kernel.rows()-1)/2;
for(int i = 0; i < kernel.rows(); i++)
for(int j = 0; j < kernel.cols(); j++)
delta += ((float)frame.at<uint8_t>(pose.y+j-offs , pose.x+i-offs))/255. * (float)kernel(j,i);
return delta;
}
bool Feature::estimate_FrameIrradiance( bool Feature::estimate_FrameIrradiance(
const CAMState& cam_state, const CAMState& cam_state,
const StateIDType& cam_state_id, const StateIDType& cam_state_id,
CameraCalibration& cam, CameraCalibration& cam0,
std::vector<double>& anchorPatch_estimate, std::vector<double>& anchorPatch_estimate,
IlluminationParameter& estimated_illumination) const IlluminationParameter& estimated_illumination) const
{ {
@ -558,11 +412,14 @@ bool Feature::estimate_FrameIrradiance(
// muliply by a and add b of this frame // muliply by a and add b of this frame
auto anchor = observations.begin(); auto anchor = observations.begin();
if(cam.moving_window.find(anchor->first) == cam.moving_window.end()) if(cam0.moving_window.find(anchor->first) == cam0.moving_window.end())
{
std::cout << "anchor not in buffer anymore!" << std::endl;
return false; return false;
}
double anchorExposureTime_ms = cam.moving_window.find(anchor->first)->second.exposureTime_ms;
double frameExposureTime_ms = cam.moving_window.find(cam_state_id)->second.exposureTime_ms; double anchorExposureTime_ms = cam0.moving_window.find(anchor->first)->second.exposureTime_ms;
double frameExposureTime_ms = cam0.moving_window.find(cam_state_id)->second.exposureTime_ms;
double a_A = anchorExposureTime_ms; double a_A = anchorExposureTime_ms;
@ -697,51 +554,11 @@ bool Feature::MarkerGeneration(
marker_pub.publish(ma); marker_pub.publish(ma);
} }
bool Feature::VisualizeKernel(
const CAMState& cam_state,
const StateIDType& cam_state_id,
CameraCalibration& cam0) const
{
auto anchor = observations.begin();
cv::Mat anchorImage = cam0.moving_window.find(anchor->first)->second.image;
//cv::Mat xderImage;
//cv::Mat yderImage;
//cv::Sobel(anchorImage, xderImage, CV_8UC1, 1, 0, 3);
//cv::Sobel(anchorImage, yderImage, CV_8UC1, 0, 1, 3);
cv::Mat xderImage2(anchorImage.rows, anchorImage.cols, anchorImage_blurred.type());
cv::Mat yderImage2(anchorImage.rows, anchorImage.cols, anchorImage_blurred.type());
cv::Mat norm_abs_xderImage;
cv::normalize(abs_xderImage, norm_abs_xderImage, 0, 255, cv::NORM_MINMAX, CV_8UC1);
cv::imshow("xder", norm_abs_xderImage);
cv::imshow("yder", abs_yderImage);
for(int i = 1; i < anchorImage.rows-1; i++)
for(int j = 1; j < anchorImage.cols-1; j++)
xderImage2.at<uint8_t>(j,i) = 255.*fabs(Kernel(cv::Point2f(i,j), anchorImage_blurred, "Sobel_x"));
for(int i = 1; i < anchorImage.rows-1; i++)
for(int j = 1; j < anchorImage.cols-1; j++)
yderImage2.at<uint8_t>(j,i) = 255.*fabs(Kernel(cv::Point2f(i,j), anchorImage_blurred, "Sobel_y"));
cv::imshow("anchor", anchorImage);
cv::imshow("xder2", xderImage2);
cv::imshow("yder2", yderImage2);
cvWaitKey(0);
}
bool Feature::VisualizePatch( bool Feature::VisualizePatch(
const CAMState& cam_state, const CAMState& cam_state,
const StateIDType& cam_state_id, const StateIDType& cam_state_id,
CameraCalibration& cam0, CameraCalibration& cam0,
const Eigen::VectorXd& photo_r, const std::vector<double> photo_r,
std::stringstream& ss) const std::stringstream& ss) const
{ {
@ -805,10 +622,9 @@ bool Feature::VisualizePatch(
CV_FILLED); CV_FILLED);
// irradiance grid projection // irradiance grid projection
namer.str(std::string()); namer.str(std::string());
namer << "projection"; namer << "projection";
cv::putText(irradianceFrame, namer.str() , cvPoint(30, 45+scale*N), cv::putText(irradianceFrame, namer.str(), cvPoint(30, 45+scale*N),
cv::FONT_HERSHEY_COMPLEX_SMALL, 0.8, cvScalar(0,0,0), 1, CV_AA); cv::FONT_HERSHEY_COMPLEX_SMALL, 0.8, cvScalar(0,0,0), 1, CV_AA);
for(int i = 0; i<N; i++) for(int i = 0; i<N; i++)
@ -847,23 +663,22 @@ bool Feature::VisualizePatch(
// residual grid projection, positive - red, negative - blue colored // residual grid projection, positive - red, negative - blue colored
namer.str(std::string()); namer.str(std::string());
namer << "residual"; namer << "residual";
std::cout << "-- photo_r -- \n" << photo_r << " -- " << std::endl;
cv::putText(irradianceFrame, namer.str() , cvPoint(30+scale*N, scale*N/2-5), cv::putText(irradianceFrame, namer.str() , cvPoint(30+scale*N, scale*N/2-5),
cv::FONT_HERSHEY_COMPLEX_SMALL, 0.8, cvScalar(0,0,0), 1, CV_AA); cv::FONT_HERSHEY_COMPLEX_SMALL, 0.8, cvScalar(0,0,0), 1, CV_AA);
for(int i = 0; i<N; i++) for(int i = 0; i<N; i++)
for(int j = 0; j<N; j++) for(int j = 0; j<N; j++)
if(photo_r(i*N+j)>0) if(photo_r[i*N+j]>0)
cv::rectangle(irradianceFrame, cv::rectangle(irradianceFrame,
cv::Point(40+scale*(N+i+1), 15+scale*(N/2+j)), cv::Point(40+scale*(N+i+1), 15+scale*(N/2+j)),
cv::Point(40+scale*(N+i), 15+scale*(N/2+j+1)), cv::Point(40+scale*(N+i), 15+scale*(N/2+j+1)),
cv::Scalar(255 - photo_r(i*N+j)*255, 255 - photo_r(i*N+j)*255, 255), cv::Scalar(255 - photo_r[i*N+j]*255, 255 - photo_r[i*N+j]*255, 255),
CV_FILLED); CV_FILLED);
else else
cv::rectangle(irradianceFrame, cv::rectangle(irradianceFrame,
cv::Point(40+scale*(N+i+1), 15+scale*(N/2+j)), cv::Point(40+scale*(N+i+1), 15+scale*(N/2+j)),
cv::Point(40+scale*(N+i), 15+scale*(N/2+j+1)), cv::Point(40+scale*(N+i), 15+scale*(N/2+j+1)),
cv::Scalar(255, 255 + photo_r(i*N+j)*255, 255 + photo_r(i*N+j)*255), cv::Scalar(255, 255 + photo_r[i*N+j]*255, 255 + photo_r[i*N+j]*255),
CV_FILLED); CV_FILLED);
// gradient arrow // gradient arrow
@ -873,21 +688,22 @@ bool Feature::VisualizePatch(
cv::Point(30+scale*(N/2+0.5)+scale*gradientVector.x, 50+scale*(N+(N/2)+0.5)+scale*gradientVector.y), cv::Point(30+scale*(N/2+0.5)+scale*gradientVector.x, 50+scale*(N+(N/2)+0.5)+scale*gradientVector.y),
cv::Scalar(100, 0, 255), cv::Scalar(100, 0, 255),
1); 1);
*/
// residual gradient direction // residual gradient direction
/*
cv::arrowedLine(irradianceFrame, cv::arrowedLine(irradianceFrame,
cv::Point(40+scale*(N+N/2+0.5), 15+scale*((N-0.5))), cv::Point(40+scale*(N+N/2+0.5), 15+scale*((N-0.5))),
cv::Point(40+scale*(N+N/2+0.5)+scale*residualVector.x, 15+scale*(N-0.5)+scale*residualVector.y), cv::Point(40+scale*(N+N/2+0.5)+scale*residualVector.x, 15+scale*(N-0.5)+scale*residualVector.y),
cv::Scalar(0, 255, 175), cv::Scalar(0, 255, 175),
3); 3);
*/
*/
cv::hconcat(cam0.featureVisu, irradianceFrame, cam0.featureVisu); cv::hconcat(cam0.featureVisu, irradianceFrame, cam0.featureVisu);
/*
// visualize position of used observations and resulting feature position // visualize position of used observations and resulting feature position
/*
cv::Mat positionFrame(anchorImage.size(), CV_8UC3, cv::Scalar(255, 240, 255)); cv::Mat positionFrame(anchorImage.size(), CV_8UC3, cv::Scalar(255, 240, 255));
cv::resize(positionFrame, positionFrame, cv::Size(), rescale, rescale); cv::resize(positionFrame, positionFrame, cv::Size(), rescale, rescale);
@ -920,7 +736,9 @@ bool Feature::VisualizePatch(
cv::hconcat(cam0.featureVisu, positionFrame, cam0.featureVisu); cv::hconcat(cam0.featureVisu, positionFrame, cam0.featureVisu);
*/
*/
// write feature position // write feature position
std::stringstream pos_s; std::stringstream pos_s;
pos_s << "u: " << observations.begin()->second(0) << " v: " << observations.begin()->second(1); pos_s << "u: " << observations.begin()->second(0) << " v: " << observations.begin()->second(1);
@ -938,10 +756,29 @@ bool Feature::VisualizePatch(
cvWaitKey(0); cvWaitKey(0);
} }
void Feature::PatchAroundPurePixel(cv::Point2f p,
int N,
const CameraCalibration& cam,
const StateIDType& cam_state_id,
std::vector<float>& return_i) const
{
int n = (int)(N-1)/2;
cv::Mat image = cam.moving_window.find(cam_state_id)->second.image;
cv::Point2f img_p = image_handler::distortPoint(p,
cam.intrinsics,
cam.distortion_model,
cam.distortion_coeffs);
for(double u_run = -n; u_run <= n; u_run++)
for(double v_run = -n; v_run <= n; v_run++)
return_i.push_back(PixelIrradiance(cv::Point2f(img_p.x+u_run, img_p.y+v_run), image));
}
float Feature::PixelIrradiance(cv::Point2f pose, cv::Mat image) const float Feature::PixelIrradiance(cv::Point2f pose, cv::Mat image) const
{ {
return ((float)image.at<uint8_t>(pose.y, pose.x))/255.; return ((float)image.at<uint8_t>(pose.y, pose.x))/255;
} }
cv::Point2f Feature::pixelDistanceAt( cv::Point2f Feature::pixelDistanceAt(
@ -967,20 +804,13 @@ cv::Point2f Feature::pixelDistanceAt(
cam.distortion_coeffs, cam.distortion_coeffs,
pure); pure);
// transfrom position to camera frame // returns the absolute pixel distance at pixels one metres away
// to get distance multiplier
Eigen::Matrix3d R_w_c0 = quaternionToRotation(cam_state.orientation);
const Eigen::Vector3d& t_c0_w = cam_state.position;
Eigen::Vector3d p_c0 = R_w_c0 * (in_p-t_c0_w);
// returns the distance between the pixel points in space
cv::Point2f distance(fabs(pure[0].x - pure[1].x), fabs(pure[2].y - pure[3].y)); cv::Point2f distance(fabs(pure[0].x - pure[1].x), fabs(pure[2].y - pure[3].y));
return distance; return distance;
} }
cv::Point2f Feature::projectPositionToCamera( cv::Point2f Feature::projectPositionToCamera(
const CAMState& cam_state, const CAMState& cam_state,
const StateIDType& cam_state_id, const StateIDType& cam_state_id,
@ -990,41 +820,26 @@ cv::Point2f Feature::projectPositionToCamera(
Eigen::Isometry3d T_c0_w; Eigen::Isometry3d T_c0_w;
cv::Point2f out_p; cv::Point2f out_p;
cv::Point2f my_p;
// transfrom position to camera frame // transfrom position to camera frame
// cam0 position
Eigen::Matrix3d R_w_c0 = quaternionToRotation(cam_state.orientation); Eigen::Matrix3d R_w_c0 = quaternionToRotation(cam_state.orientation);
const Eigen::Vector3d& t_c0_w = cam_state.position; const Eigen::Vector3d& t_c0_w = cam_state.position;
Eigen::Vector3d p_c0 = R_w_c0 * (in_p-t_c0_w);
out_p = cv::Point2f(p_c0(0)/p_c0(2), p_c0(1)/p_c0(2));
// project point according to model // if(cam_state_id == observations.begin()->first)
if(cam.id == 0) //printf("undist:\n \tproj pos: %f, %f\n\ttrue pos: %f, %f\n", out_p.x, out_p.y, undist_anchor_center_pos.x, undist_anchor_center_pos.y);
{
Eigen::Vector3d p_c0 = R_w_c0 * (in_p-t_c0_w); cv::Point2f my_p = image_handler::distortPoint(out_p,
out_p = cv::Point2f(p_c0(0)/p_c0(2), p_c0(1)/p_c0(2)); cam.intrinsics,
cam.distortion_model,
cam.distortion_coeffs);
} // printf("truPosition: %f, %f, %f\n", position.x(), position.y(), position.z());
// if camera is one, calcualte the cam1 position from cam0 position first // printf("camPosition: %f, %f, %f\n", p_c0(0), p_c0(1), p_c0(2));
else if(cam.id == 1) // printf("Photo projection: %f, %f\n", my_p[0].x, my_p[0].y);
{
// cam1 position
Eigen::Matrix3d R_c0_c1 = CAMState::T_cam0_cam1.linear();
Eigen::Matrix3d R_w_c1 = R_c0_c1 * R_w_c0;
Eigen::Vector3d t_c1_w = t_c0_w - R_w_c1.transpose()*CAMState::T_cam0_cam1.translation();
Eigen::Vector3d p_c1 = R_w_c1 * (in_p-t_c1_w);
out_p = cv::Point2f(p_c1(0)/p_c1(2), p_c1(1)/p_c1(2));
}
// undistort point according to camera model
if (cam.distortion_model.substr(0,3) == "pre-")
my_p = cv::Point2f(out_p.x * cam.intrinsics[0] + cam.intrinsics[2], out_p.y * cam.intrinsics[1] + cam.intrinsics[3]);
else
my_p = image_handler::distortPoint(out_p,
cam.intrinsics,
cam.distortion_model,
cam.distortion_coeffs);
return my_p; return my_p;
} }
@ -1052,237 +867,58 @@ bool Feature::initializeAnchor(const CameraCalibration& cam, int N)
return false; return false;
cv::Mat anchorImage = cam.moving_window.find(anchor->first)->second.image; cv::Mat anchorImage = cam.moving_window.find(anchor->first)->second.image;
cv::Mat anchorImage_deeper;
anchorImage.convertTo(anchorImage_deeper,CV_16S);
//TODO remove this?
cv::Sobel(anchorImage_deeper, xderImage, -1, 1, 0, 3);
cv::Sobel(anchorImage_deeper, yderImage, -1, 0, 1, 3);
xderImage/=8.;
yderImage/=8.;
cv::convertScaleAbs(xderImage, abs_xderImage);
cv::convertScaleAbs(yderImage, abs_yderImage);
cv::GaussianBlur(anchorImage, anchorImage_blurred, cv::Size(3,3), 0, 0, cv::BORDER_DEFAULT);
auto u = anchor->second(0);//*cam.intrinsics[0] + cam.intrinsics[2]; auto u = anchor->second(0);//*cam.intrinsics[0] + cam.intrinsics[2];
auto v = anchor->second(1);//*cam.intrinsics[1] + cam.intrinsics[3]; auto v = anchor->second(1);//*cam.intrinsics[1] + cam.intrinsics[3];
//testing
undist_anchor_center_pos = cv::Point2f(u,v);
// check if image has been pre-undistorted //for NxN patch pixels around feature
if(cam.distortion_model.substr(0,3) == "pre-") int count = 0;
{
std::cout << "is a pre" << std::endl;
//project onto pixel plane
undist_anchor_center_pos = cv::Point2f(u * cam.intrinsics[0] + cam.intrinsics[2], v * cam.intrinsics[1] + cam.intrinsics[3]);
// create vector of patch in pixel plane // get feature in undistorted pixel space
for(double u_run = -n; u_run <= n; u_run++) // this only reverts from 'pure' space into undistorted pixel space using camera matrix
for(double v_run = -n; v_run <= n; v_run++) cv::Point2f und_pix_p = image_handler::distortPoint(cv::Point2f(u, v),
anchorPatch_real.push_back(cv::Point2f(undist_anchor_center_pos.x+u_run, undist_anchor_center_pos.y+v_run)); cam.intrinsics,
cam.distortion_model,
//project back into u,v cam.distortion_coeffs);
for(int i = 0; i < N*N; i++) // create vector of patch in pixel plane
anchorPatch_ideal.push_back(cv::Point2f((anchorPatch_real[i].x-cam.intrinsics[2])/cam.intrinsics[0], (anchorPatch_real[i].y-cam.intrinsics[3])/cam.intrinsics[1])); for(double u_run = -n; u_run <= n; u_run++)
} for(double v_run = -n; v_run <= n; v_run++)
anchorPatch_real.push_back(cv::Point2f(und_pix_p.x+u_run, und_pix_p.y+v_run));
else
{
// get feature in undistorted pixel space
// this only reverts from 'pure' space into undistorted pixel space using camera matrix
cv::Point2f und_pix_p = image_handler::distortPoint(cv::Point2f(u, v),
cam.intrinsics,
cam.distortion_model,
cam.distortion_coeffs);
// create vector of patch in pixel plane
for(double u_run = -n; u_run <= n; u_run++)
for(double v_run = -n; v_run <= n; v_run++)
anchorPatch_real.push_back(cv::Point2f(und_pix_p.x+u_run, und_pix_p.y+v_run));
//create undistorted pure points //create undistorted pure points
image_handler::undistortPoints(anchorPatch_real, image_handler::undistortPoints(anchorPatch_real,
cam.intrinsics, cam.intrinsics,
cam.distortion_model, cam.distortion_model,
cam.distortion_coeffs, cam.distortion_coeffs,
anchorPatch_ideal); anchorPatch_ideal);
}
// save anchor position for later visualisaztion // save anchor position for later visualisaztion
anchor_center_pos = anchorPatch_real[(N*N-1)/2]; anchor_center_pos = anchorPatch_real[(N*N-1)/2];
// save true pixel Patch position // save true pixel Patch position
for(auto point : anchorPatch_real) for(auto point : anchorPatch_real)
if(point.x - n < 0 || point.x + n >= cam.resolution(0)-1 || point.y - n < 0 || point.y + n >= cam.resolution(1)-1) if(point.x - n < 0 || point.x + n >= cam.resolution(0) || point.y - n < 0 || point.y + n >= cam.resolution(1))
return false; return false;
for(auto point : anchorPatch_real) for(auto point : anchorPatch_real)
anchorPatch.push_back(PixelIrradiance(point, anchorImage)); anchorPatch.push_back(PixelIrradiance(point, anchorImage));
// project patch pixel to 3D space in camera coordinate system // project patch pixel to 3D space in camera coordinate system
for(auto point : anchorPatch_ideal) for(auto point : anchorPatch_ideal)
anchorPatch_3d.push_back(AnchorPixelToPosition(point, cam)); anchorPatch_3d.push_back(AnchorPixelToPosition(point, cam));
is_anchored = true; is_anchored = true;
return true; return true;
} }
/*
bool Feature::initializeRho(const CamStateServer& cam_states) { bool Feature::initializeRho(const CamStateServer& cam_states) {
// Organize camera poses and feature observations properly. // Organize camera poses and feature observations properly.
std::vector<Eigen::Isometry3d,
Eigen::aligned_allocator<Eigen::Isometry3d> > cam_poses(0);
std::vector<Eigen::Vector2d,
Eigen::aligned_allocator<Eigen::Vector2d> > measurements(0);
for (auto& m : observations) {
auto cam_state_iter = cam_states.find(m.first);
if (cam_state_iter == cam_states.end()) continue;
// Add the measurement.
measurements.push_back(m.second.head<2>());
measurements.push_back(m.second.tail<2>());
// This camera pose will take a vector from this camera frame
// to the world frame.
Eigen::Isometry3d cam0_pose;
cam0_pose.linear() = quaternionToRotation(
cam_state_iter->second.orientation).transpose();
cam0_pose.translation() = cam_state_iter->second.position;
Eigen::Isometry3d cam1_pose;
cam1_pose = cam0_pose * CAMState::T_cam0_cam1.inverse();
cam_poses.push_back(cam0_pose);
cam_poses.push_back(cam1_pose);
}
// All camera poses should be modified such that it takes a
// vector from the first camera frame in the buffer to this
// camera frame.
Eigen::Isometry3d T_c0_w = cam_poses[0];
T_anchor_w = T_c0_w;
for (auto& pose : cam_poses)
pose = pose.inverse() * T_c0_w;
// Generate initial guess
double initial_depth = 0;
initial_depth = generateInitialDepth(cam_poses[cam_poses.size()-1], measurements[0],
measurements[measurements.size()-1]);
double solution = 1.0/initial_depth;
// Apply Levenberg-Marquart method to solve for the 3d position.
double lambda = optimization_config.initial_damping;
int inner_loop_cntr = 0;
int outer_loop_cntr = 0;
bool is_cost_reduced = false;
double delta_norm = 0;
// Compute the initial cost.
double total_cost = 0.0;
for (int i = 0; i < cam_poses.size(); ++i) {
double this_cost = 0.0;
Rhocost(cam_poses[i], solution, measurements[0], measurements[i], this_cost);
total_cost += this_cost;
}
// Outer loop.
do {
Eigen::Matrix<double, 1, 1> A = Eigen::Matrix<double, 1, 1>::Zero();
Eigen::Matrix<double, 1, 1> b = Eigen::Matrix<double, 1, 1>::Zero();
for (int i = 0; i < cam_poses.size(); ++i) {
Eigen::Matrix<double, 2, 1> J;
Eigen::Vector2d r;
double w;
RhoJacobian(cam_poses[i], solution, measurements[0], measurements[i], J, r, w);
if (w == 1) {
A += J.transpose() * J;
b += J.transpose() * r;
} else {
double w_square = w * w;
A += w_square * J.transpose() * J;
b += w_square * J.transpose() * r;
}
}
// Inner loop.
// Solve for the delta that can reduce the total cost.
do {
Eigen::Matrix<double, 1, 1> damper = lambda*Eigen::Matrix<double, 1, 1>::Identity();
Eigen::Matrix<double, 1, 1> delta = (A+damper).ldlt().solve(b);
double new_solution = solution - delta(0,0);
delta_norm = delta.norm();
double new_cost = 0.0;
for (int i = 0; i < cam_poses.size(); ++i) {
double this_cost = 0.0;
Rhocost(cam_poses[i], new_solution, measurements[0], measurements[i], this_cost);
new_cost += this_cost;
}
if (new_cost < total_cost) {
is_cost_reduced = true;
solution = new_solution;
total_cost = new_cost;
lambda = lambda/10 > 1e-10 ? lambda/10 : 1e-10;
} else {
is_cost_reduced = false;
lambda = lambda*10 < 1e12 ? lambda*10 : 1e12;
}
} while (inner_loop_cntr++ <
optimization_config.inner_loop_max_iteration && !is_cost_reduced);
inner_loop_cntr = 0;
} while (outer_loop_cntr++ <
optimization_config.outer_loop_max_iteration &&
delta_norm > optimization_config.estimation_precision);
// Covert the feature position from inverse depth
// representation to its 3d coordinate.
Eigen::Vector3d final_position(measurements[0](0)/solution,
measurements[0](1)/solution, 1.0/solution);
// Check if the solution is valid. Make sure the feature
// is in front of every camera frame observing it.
bool is_valid_solution = true;
for (const auto& pose : cam_poses) {
Eigen::Vector3d position =
pose.linear()*final_position + pose.translation();
if (position(2) <= 0) {
is_valid_solution = false;
break;
}
}
//save inverse depth distance from camera
anchor_rho = solution;
// Convert the feature position to the world frame.
position = T_c0_w.linear()*final_position + T_c0_w.translation();
if (is_valid_solution)
is_initialized = true;
return is_valid_solution;
}
bool Feature::initializePosition(const CamStateServer& cam_states) {
// Organize camera poses and feature observations properly.
std::vector<Eigen::Isometry3d, std::vector<Eigen::Isometry3d,
Eigen::aligned_allocator<Eigen::Isometry3d> > cam_poses(0); Eigen::aligned_allocator<Eigen::Isometry3d> > cam_poses(0);
std::vector<Eigen::Vector2d, std::vector<Eigen::Vector2d,
@ -1420,7 +1056,156 @@ bool Feature::initializePosition(const CamStateServer& cam_states) {
//save inverse depth distance from camera //save inverse depth distance from camera
anchor_rho = solution(2); anchor_rho = solution(2);
std::cout << "from feature: " << anchor_rho << std::endl;
// Convert the feature position to the world frame.
position = T_c0_w.linear()*final_position + T_c0_w.translation();
if (is_valid_solution)
is_initialized = true;
return is_valid_solution;
}
*/
bool Feature::initializePosition(const CamStateServer& cam_states) {
// Organize camera poses and feature observations properly.
std::vector<Eigen::Isometry3d,
Eigen::aligned_allocator<Eigen::Isometry3d> > cam_poses(0);
std::vector<Eigen::Vector2d,
Eigen::aligned_allocator<Eigen::Vector2d> > measurements(0);
for (auto& m : observations) {
// TODO: This should be handled properly. Normally, the
// required camera states should all be available in
// the input cam_states buffer.
auto cam_state_iter = cam_states.find(m.first);
if (cam_state_iter == cam_states.end()) continue;
// Add the measurement.
measurements.push_back(m.second.head<2>());
measurements.push_back(m.second.tail<2>());
// This camera pose will take a vector from this camera frame
// to the world frame.
Eigen::Isometry3d cam0_pose;
cam0_pose.linear() = quaternionToRotation(
cam_state_iter->second.orientation).transpose();
cam0_pose.translation() = cam_state_iter->second.position;
Eigen::Isometry3d cam1_pose;
cam1_pose = cam0_pose * CAMState::T_cam0_cam1.inverse();
cam_poses.push_back(cam0_pose);
cam_poses.push_back(cam1_pose);
}
// All camera poses should be modified such that it takes a
// vector from the first camera frame in the buffer to this
// camera frame.
Eigen::Isometry3d T_c0_w = cam_poses[0];
T_anchor_w = T_c0_w;
for (auto& pose : cam_poses)
pose = pose.inverse() * T_c0_w;
// Generate initial guess
Eigen::Vector3d initial_position(0.0, 0.0, 0.0);
generateInitialGuess(cam_poses[cam_poses.size()-1], measurements[0],
measurements[measurements.size()-1], initial_position);
Eigen::Vector3d solution(
initial_position(0)/initial_position(2),
initial_position(1)/initial_position(2),
1.0/initial_position(2));
// Apply Levenberg-Marquart method to solve for the 3d position.
double lambda = optimization_config.initial_damping;
int inner_loop_cntr = 0;
int outer_loop_cntr = 0;
bool is_cost_reduced = false;
double delta_norm = 0;
// Compute the initial cost.
double total_cost = 0.0;
for (int i = 0; i < cam_poses.size(); ++i) {
double this_cost = 0.0;
cost(cam_poses[i], solution, measurements[i], this_cost);
total_cost += this_cost;
}
// Outer loop.
do {
Eigen::Matrix3d A = Eigen::Matrix3d::Zero();
Eigen::Vector3d b = Eigen::Vector3d::Zero();
for (int i = 0; i < cam_poses.size(); ++i) {
Eigen::Matrix<double, 2, 3> J;
Eigen::Vector2d r;
double w;
jacobian(cam_poses[i], solution, measurements[i], J, r, w);
if (w == 1) {
A += J.transpose() * J;
b += J.transpose() * r;
} else {
double w_square = w * w;
A += w_square * J.transpose() * J;
b += w_square * J.transpose() * r;
}
}
// Inner loop.
// Solve for the delta that can reduce the total cost.
do {
Eigen::Matrix3d damper = lambda * Eigen::Matrix3d::Identity();
Eigen::Vector3d delta = (A+damper).ldlt().solve(b);
Eigen::Vector3d new_solution = solution - delta;
delta_norm = delta.norm();
double new_cost = 0.0;
for (int i = 0; i < cam_poses.size(); ++i) {
double this_cost = 0.0;
cost(cam_poses[i], new_solution, measurements[i], this_cost);
new_cost += this_cost;
}
if (new_cost < total_cost) {
is_cost_reduced = true;
solution = new_solution;
total_cost = new_cost;
lambda = lambda/10 > 1e-10 ? lambda/10 : 1e-10;
} else {
is_cost_reduced = false;
lambda = lambda*10 < 1e12 ? lambda*10 : 1e12;
}
} while (inner_loop_cntr++ <
optimization_config.inner_loop_max_iteration && !is_cost_reduced);
inner_loop_cntr = 0;
} while (outer_loop_cntr++ <
optimization_config.outer_loop_max_iteration &&
delta_norm > optimization_config.estimation_precision);
// Covert the feature position from inverse depth
// representation to its 3d coordinate.
Eigen::Vector3d final_position(solution(0)/solution(2),
solution(1)/solution(2), 1.0/solution(2));
// Check if the solution is valid. Make sure the feature
// is in front of every camera frame observing it.
bool is_valid_solution = true;
for (const auto& pose : cam_poses) {
Eigen::Vector3d position =
pose.linear()*final_position + pose.translation();
if (position(2) <= 0) {
is_valid_solution = false;
break;
}
}
//save inverse depth distance from camera
anchor_rho = solution(2);
// Convert the feature position to the world frame. // Convert the feature position to the world frame.
position = T_c0_w.linear()*final_position + T_c0_w.translation(); position = T_c0_w.linear()*final_position + T_c0_w.translation();

View File

@ -16,16 +16,6 @@ namespace msckf_vio {
*/ */
namespace image_handler { namespace image_handler {
cv::Point2f pinholeDownProject(const cv::Point2f& p_in, const cv::Vec4d& intrinsics);
cv::Point2f pinholeUpProject(const cv::Point2f& p_in, const cv::Vec4d& intrinsics);
void undistortImage(
cv::InputArray src,
cv::OutputArray dst,
const std::string& distortion_model,
const cv::Vec4d& intrinsics,
const cv::Vec4d& distortion_coeffs);
void undistortPoints( void undistortPoints(
const std::vector<cv::Point2f>& pts_in, const std::vector<cv::Point2f>& pts_in,
const cv::Vec4d& intrinsics, const cv::Vec4d& intrinsics,

View File

@ -13,6 +13,13 @@
namespace msckf_vio { namespace msckf_vio {
inline double absoluted(double a){
if(a>0)
return a;
else return -a;
}
/* /*
* @brief Create a skew-symmetric matrix from a 3-element vector. * @brief Create a skew-symmetric matrix from a 3-element vector.
* @note Performs the operation: * @note Performs the operation:

View File

@ -202,85 +202,38 @@ class MsckfVio {
Eigen::Vector4d& r); Eigen::Vector4d& r);
// This function computes the Jacobian of all measurements viewed // This function computes the Jacobian of all measurements viewed
// in the given camera states of this feature. // in the given camera states of this feature.
void featureJacobian( void featureJacobian(const FeatureIDType& feature_id,
const FeatureIDType& feature_id,
const std::vector<StateIDType>& cam_state_ids, const std::vector<StateIDType>& cam_state_ids,
Eigen::MatrixXd& H_x, Eigen::VectorXd& r); Eigen::MatrixXd& H_x, Eigen::VectorXd& r);
void twodotMeasurementJacobian( void PhotometricMeasurementJacobian(
const StateIDType& cam_state_id, const StateIDType& cam_state_id,
const FeatureIDType& feature_id, const FeatureIDType& feature_id,
Eigen::MatrixXd& H_x, Eigen::MatrixXd& H_y, Eigen::VectorXd& r); Eigen::MatrixXd& H_x,
Eigen::MatrixXd& H_y,
Eigen::VectorXd& r);
bool ConstructJacobians( void PhotometricFeatureJacobian(
Eigen::MatrixXd& H_rho, const FeatureIDType& feature_id,
Eigen::MatrixXd& H_pl, const std::vector<StateIDType>& cam_state_ids,
Eigen::MatrixXd& H_pA, Eigen::MatrixXd& H_x, Eigen::VectorXd& r);
const Feature& feature,
const StateIDType& cam_state_id,
Eigen::MatrixXd& H_xl,
Eigen::MatrixXd& H_yl,
int patchsize);
bool PhotometricPatchPointResidual(
const StateIDType& cam_state_id,
const Feature& feature,
Eigen::VectorXd& r,
int patchsize);
bool PhotometricPatchPointJacobian(
const CAMState& cam_state,
const StateIDType& cam_state_id,
const Feature& feature,
Eigen::Vector3d point,
int count,
int patchsize,
Eigen::Matrix<double, 2, 1>& H_rhoj,
Eigen::Matrix<double, 2, 6>& H_plj,
Eigen::Matrix<double, 2, 6>& H_pAj,
Eigen::Matrix<double, 2, 4>& dI_dhj);
bool PhotometricMeasurementJacobian(
const StateIDType& cam_state_id,
const FeatureIDType& feature_id,
Eigen::MatrixXd& H_x,
Eigen::MatrixXd& H_y,
Eigen::VectorXd& r,
int patchsize);
void twodotFeatureJacobian(
const FeatureIDType& feature_id,
const std::vector<StateIDType>& cam_state_ids,
Eigen::MatrixXd& H_x, Eigen::VectorXd& r);
bool PhotometricFeatureJacobian(
const FeatureIDType& feature_id,
const std::vector<StateIDType>& cam_state_ids,
Eigen::MatrixXd& H_x, Eigen::VectorXd& r,
int patchsize);
void photometricMeasurementUpdate(const Eigen::MatrixXd& H, const Eigen::VectorXd& r);
void measurementUpdate(const Eigen::MatrixXd& H, void measurementUpdate(const Eigen::MatrixXd& H,
const Eigen::VectorXd& r); const Eigen::VectorXd& r);
void twoMeasurementUpdate(const Eigen::MatrixXd& H, const Eigen::VectorXd& r);
bool gatingTest(const Eigen::MatrixXd& H, bool gatingTest(const Eigen::MatrixXd& H,
const Eigen::VectorXd&r, const int& dof, int filter=0); const Eigen::VectorXd&r, const int& dof);
void removeLostFeatures(); void removeLostFeatures();
void findRedundantCamStates( void findRedundantCamStates(
std::vector<StateIDType>& rm_cam_state_ids); std::vector<StateIDType>& rm_cam_state_ids);
void pruneLastCamStateBuffer();
void pruneCamStateBuffer(); void pruneCamStateBuffer();
// Reset the system online if the uncertainty is too large. // Reset the system online if the uncertainty is too large.
void onlineReset(); void onlineReset();
// Photometry flag // Photometry flag
int FILTER; bool PHOTOMETRIC;
// debug flag // debug flag
bool STREAMPAUSE;
bool PRINTIMAGES; bool PRINTIMAGES;
bool GROUNDTRUTH; bool GROUNDTRUTH;
@ -294,21 +247,8 @@ class MsckfVio {
// Chi squared test table. // Chi squared test table.
static std::map<int, double> chi_squared_test_table; static std::map<int, double> chi_squared_test_table;
double eval_time;
IMUState timed_old_imu_state;
IMUState timed_old_true_state;
IMUState old_imu_state;
IMUState old_true_state;
// change in position
Eigen::Vector3d delta_position;
Eigen::Vector3d delta_orientation;
// State vector // State vector
StateServer state_server; StateServer state_server;
StateServer photometric_state_server;
// Ground truth state vector // Ground truth state vector
StateServer true_state_server; StateServer true_state_server;
@ -371,7 +311,6 @@ class MsckfVio {
// Subscribers and publishers // Subscribers and publishers
ros::Subscriber imu_sub; ros::Subscriber imu_sub;
ros::Subscriber truth_sub; ros::Subscriber truth_sub;
ros::Publisher truth_odom_pub;
ros::Publisher odom_pub; ros::Publisher odom_pub;
ros::Publisher marker_pub; ros::Publisher marker_pub;
ros::Publisher feature_pub; ros::Publisher feature_pub;

View File

@ -1,34 +0,0 @@
<launch>
<arg name="robot" default="firefly_sbx"/>
<arg name="calibration_file"
default="$(find msckf_vio)/config/camchain-imucam-tum-scaled.yaml"/>
<!-- Image Processor Nodelet -->
<group ns="$(arg robot)">
<node pkg="nodelet" type="nodelet" name="image_processor"
args="standalone msckf_vio/ImageProcessorNodelet"
output="screen"
>
<rosparam command="load" file="$(arg calibration_file)"/>
<param name="grid_row" value="4"/>
<param name="grid_col" value="4"/>
<param name="grid_min_feature_num" value="3"/>
<param name="grid_max_feature_num" value="4"/>
<param name="pyramid_levels" value="3"/>
<param name="patch_size" value="15"/>
<param name="fast_threshold" value="10"/>
<param name="max_iteration" value="30"/>
<param name="track_precision" value="0.01"/>
<param name="ransac_threshold" value="3"/>
<param name="stereo_threshold" value="5"/>
<remap from="~imu" to="/imu02"/>
<remap from="~cam0_image" to="/cam0/image_raw2"/>
<remap from="~cam1_image" to="/cam1/image_raw2"/>
</node>
</group>
</launch>

View File

@ -22,10 +22,10 @@
<param name="PHOTOMETRIC" value="true"/> <param name="PHOTOMETRIC" value="true"/>
<!-- Debugging Flaggs --> <!-- Debugging Flaggs -->
<param name="PrintImages" value="true"/> <param name="PrintImages" value="false"/>
<param name="GroundTruth" value="false"/> <param name="GroundTruth" value="false"/>
<param name="patch_size_n" value="3"/> <param name="patch_size_n" value="7"/>
<!-- Calibration parameters --> <!-- Calibration parameters -->
<rosparam command="load" file="$(arg calibration_file)"/> <rosparam command="load" file="$(arg calibration_file)"/>

View File

@ -17,16 +17,6 @@
args='standalone msckf_vio/MsckfVioNodelet' args='standalone msckf_vio/MsckfVioNodelet'
output="screen"> output="screen">
<!-- Filter Flag, 0 = msckf, 1 = photometric, 2 = two -->
<param name="FILTER" value="0"/>
<!-- Debugging Flaggs -->
<param name="StreamPause" value="true"/>
<param name="PrintImages" value="false"/>
<param name="GroundTruth" value="false"/>
<param name="patch_size_n" value="3"/>
<!-- Calibration parameters --> <!-- Calibration parameters -->
<rosparam command="load" file="$(arg calibration_file)"/> <rosparam command="load" file="$(arg calibration_file)"/>

View File

@ -1,76 +0,0 @@
<launch>
<arg name="robot" default="firefly_sbx"/>
<arg name="fixed_frame_id" default="world"/>
<arg name="calibration_file"
default="$(find msckf_vio)/config/camchain-imucam-tum-scaled.yaml"/>
<!-- Image Processor Nodelet -->
<include file="$(find msckf_vio)/launch/image_processor_tinytum.launch">
<arg name="robot" value="$(arg robot)"/>
<arg name="calibration_file" value="$(arg calibration_file)"/>
</include>
<!-- Msckf Vio Nodelet -->
<group ns="$(arg robot)">
<node pkg="nodelet" type="nodelet" name="vio"
args='standalone msckf_vio/MsckfVioNodelet'
output="screen">
<!-- Filter Flag, 0 = msckf, 1 = photometric, 2 = two -->
<param name="FILTER" value="1"/>
<!-- Debugging Flaggs -->
<param name="StreamPause" value="true"/>
<param name="PrintImages" value="true"/>
<param name="GroundTruth" value="false"/>
<param name="patch_size_n" value="5"/>
<!-- Calibration parameters -->
<rosparam command="load" file="$(arg calibration_file)"/>
<param name="publish_tf" value="true"/>
<param name="frame_rate" value="20"/>
<param name="fixed_frame_id" value="$(arg fixed_frame_id)"/>
<param name="child_frame_id" value="odom"/>
<param name="max_cam_state_size" value="20"/>
<param name="position_std_threshold" value="8.0"/>
<param name="rotation_threshold" value="0.2618"/>
<param name="translation_threshold" value="0.4"/>
<param name="tracking_rate_threshold" value="0.5"/>
<!-- Feature optimization config -->
<param name="feature/config/translation_threshold" value="-1.0"/>
<!-- These values should be standard deviation -->
<param name="noise/gyro" value="0.005"/>
<param name="noise/acc" value="0.05"/>
<param name="noise/gyro_bias" value="0.001"/>
<param name="noise/acc_bias" value="0.01"/>
<param name="noise/feature" value="0.035"/>
<param name="initial_state/velocity/x" value="0.0"/>
<param name="initial_state/velocity/y" value="0.0"/>
<param name="initial_state/velocity/z" value="0.0"/>
<!-- These values should be covariance -->
<param name="initial_covariance/velocity" value="0.25"/>
<param name="initial_covariance/gyro_bias" value="0.01"/>
<param name="initial_covariance/acc_bias" value="0.01"/>
<param name="initial_covariance/extrinsic_rotation_cov" value="3.0462e-4"/>
<param name="initial_covariance/extrinsic_translation_cov" value="2.5e-5"/>
<param name="initial_covariance/irradiance_frame_bias" value="0.1"/>
<remap from="~imu" to="/imu0"/>
<remap from="~ground_truth" to="/vrpn_client/raw_transform"/>
<remap from="~cam0_image" to="/cam0/image_raw"/>
<remap from="~cam1_image" to="/cam1/image_raw"/>
<remap from="~features" to="image_processor/features"/>
</node>
</group>
</launch>

View File

@ -17,15 +17,14 @@
args='standalone msckf_vio/MsckfVioNodelet' args='standalone msckf_vio/MsckfVioNodelet'
output="screen"> output="screen">
<!-- Filter Flag, 0 = msckf, 1 = photometric, 2 = two --> <!-- Photometry Flag-->
<param name="FILTER" value="1"/> <param name="PHOTOMETRIC" value="true"/>
<!-- Debugging Flaggs --> <!-- Debugging Flaggs -->
<param name="StreamPause" value="true"/> <param name="PrintImages" value="true"/>
<param name="PrintImages" value="false"/>
<param name="GroundTruth" value="false"/> <param name="GroundTruth" value="false"/>
<param name="patch_size_n" value="3"/> <param name="patch_size_n" value="7"/>
<!-- Calibration parameters --> <!-- Calibration parameters -->
<rosparam command="load" file="$(arg calibration_file)"/> <rosparam command="load" file="$(arg calibration_file)"/>
@ -33,7 +32,7 @@
<param name="frame_rate" value="20"/> <param name="frame_rate" value="20"/>
<param name="fixed_frame_id" value="$(arg fixed_frame_id)"/> <param name="fixed_frame_id" value="$(arg fixed_frame_id)"/>
<param name="child_frame_id" value="odom"/> <param name="child_frame_id" value="odom"/>
<param name="max_cam_state_size" value="12"/> <param name="max_cam_state_size" value="20"/>
<param name="position_std_threshold" value="8.0"/> <param name="position_std_threshold" value="8.0"/>
<param name="rotation_threshold" value="0.2618"/> <param name="rotation_threshold" value="0.2618"/>
@ -72,6 +71,4 @@
</node> </node>
</group> </group>
<!--node name="player" pkg="bagcontrol" type="control.py" /-->
</launch> </launch>

73
log
View File

@ -1,73 +0,0 @@
# Created by Octave 3.8.1, Wed Jun 12 14:36:37 2019 CEST <raphael@raphael-desktop>
# name: Hx
# type: matrix
# rows: 18
# columns: 49
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 313.795 -58.7912 139.778 -46.7616 144.055 86.9644 0 -314.123 55.6434 -140.648 46.7616 -144.055 -86.9644 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 441.06 -94.50069999999999 174.424 -53.7653 204.822 120.248 0 -441.685 90.1101 -175.657 53.7653 -204.822 -120.248 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 225.35 -54.5629 77.60599999999999 -21.1425 105.886 60.3706 0 -225.756 52.3373 -78.2406 21.1425 -105.886 -60.3706 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 175.128 20.6203 175.127 -79.63939999999999 73.245 62.1868 0 -174.573 -22.5235 -175.576 79.63939999999999 -73.245 -62.1868 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 296.962 43.5469 311.307 -143.667 123.399 108.355 0 -295.905 -46.7952 -312.063 143.667 -123.399 -108.355 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 126.283 117.889 311.864 -161.264 38.8521 71.8019 0 -124.464 -119.541 -312.118 161.264 -38.8521 -71.8019 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 49.2502 63.7166 155.071 -80.81950000000001 12.7732 32.1826 0 -48.2934 -64.4113 -155.157 80.81950000000001 -12.7732 -32.1826 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 69.59699999999999 154.579 335.384 -179.355 9.212580000000001 62.0364 0 -67.35599999999999 -155.735 -335.462 179.355 -9.212580000000001 -62.0364 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 -66.6965 304.947 500.218 -285.589 -71.31010000000001 55.5058 0 70.8009 -305.077 -499.831 285.589 71.31010000000001 -55.5058 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 323.404 -62.2043 141.092 -46.6015 148.737 89.1108 0 0 0 0 0 0 0 0 -324.336 57.8552 -141.991 46.6015 -148.737 -89.1108 1 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 454.208 -99.3095 175.986 -53.4094 211.276 123.174 0 0 0 0 0 0 0 0 -455.779 93.0992 -177.158 53.4094 -211.276 -123.174 1 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 231.884 -57.0266 78.2559 -20.8926 109.118 61.8183 0 0 0 0 0 0 0 0 -232.824 53.8025 -78.80719999999999 20.8926 -109.118 -61.8183 1 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 181.715 18.8525 177.045 -80.08499999999999 76.3716 63.8254 0 0 0 0 0 0 0 0 -181.07 -20.839 -177.959 80.08499999999999 -76.3716 -63.8254 1 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 308.249 40.5812 314.711 -144.494 128.766 111.207 0 0 0 0 0 0 0 0 -306.972 -43.8825 -316.328 144.494 -128.766 -111.207 1 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 133.309 116.865 315.474 -162.598 42.0763 73.8353 0 0 0 0 0 0 0 0 -130.603 -117.454 -316.931 162.598 -42.0763 -73.8353 1 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 52.4426 63.3607 156.902 -81.5288 14.2139 33.1222 0 0 0 0 0 0 0 0 -50.9976 -63.4393 -157.607 81.5288 -14.2139 -33.1222 1 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 75.5508 154.234 339.369 -180.995 11.859 63.8956 0 0 0 0 0 0 0 0 -72.1041 -153.816 -340.865 180.995 -11.859 -63.8956 1 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 -62.0551 306.357 506.351 -288.542 -69.50409999999999 57.4562 0 0 0 0 0 0 0 0 68.6262 -303.028 -508.423 288.542 69.50409999999999 -57.4562 1 0 0 0 0 0 0 0
# name: Hy
# type: matrix
# rows: 18
# columns: 14
1.56267 0 0 0 0 0 0 0 0 0 0.252873 0 0 0.110387
0 1.56267 0 0 0 0 0 0 0 0 0.453168 0 0 0.162961
0 0 1.56267 0 0 0 0 0 0 0 0.638441 0 0 0.0873758
0 0 0 1.56267 0 0 0 0 0 0 0.21031 0 0 0.0321517
0 0 0 0 1.56267 0 0 0 0 0 0.375554 0 0 0.0505151
0 0 0 0 0 1.56267 0 0 0 0 0.638441 0 0 -0.0336034
0 0 0 0 0 0 1.56267 0 0 0 0.157733 0 0 -0.0232704
0 0 0 0 0 0 0 1.56267 0 0 0.212814 0 0 -0.0688343
0 0 0 0 0 0 0 0 1.56267 0 0.360532 0 0 -0.187745
1.56267 0 0 0 0 0 0 0 0 0 0 0.252873 0 0.322811
0 1.56267 0 0 0 0 0 0 0 0 0 0.453168 0 0.467319
0 0 1.56267 0 0 0 0 0 0 0 0 0.638441 0 0.245907
0 0 0 1.56267 0 0 0 0 0 0 0 0.21031 0 0.135725
0 0 0 0 1.56267 0 0 0 0 0 0 0.375554 0 0.225277
0 0 0 0 0 1.56267 0 0 0 0 0 0.638441 0 0.012926
0 0 0 0 0 0 1.56267 0 0 0 0 0.157733 0 -0.0108962
0 0 0 0 0 0 0 1.56267 0 0 0 0.212814 0 -0.06974909999999999
0 0 0 0 0 0 0 0 1.56267 0 0 0.360532 0 -0.318846
# name: r
# type: matrix
# rows: 18
# columns: 1
0.0354809
0.0153183
0.0570191
-0.0372801
0.0878601
0.06811780000000001
-0.00426164
5.162985999041026e-321
6.927779999999998e-310
0
2.121999999910509e-314
0
0
0
3.6073900000086e-313
4.446590812571219e-323
3.952525166729972e-323
3.952525166729972e-323

View File

@ -1,64 +0,0 @@
#!/usr/bin/env python
import rosbag
import rospy
from sensor_msgs.msg import Imu, Image
from geometry_msgs.msg import TransformStamped
import time
import signal
import sys
def signal_handler(sig, frame):
print('gracefully exiting the program.')
bag.close()
sys.exit(0)
def main():
global bag
cam0_topic = '/cam0/image_raw'
cam1_topic = '/cam1/image_raw'
imu0_topic = '/imu0'
grnd_topic = '/vrpn_client/raw_transform'
rospy.init_node('controlbag')
rospy.set_param('play_bag', False)
cam0_pub = rospy.Publisher(cam0_topic, Image, queue_size=10)
cam1_pub = rospy.Publisher(cam1_topic, Image, queue_size=10)
imu0_pub = rospy.Publisher(imu0_topic, Imu, queue_size=10)
grnd_pub = rospy.Publisher(grnd_topic, TransformStamped, queue_size=10)
signal.signal(signal.SIGINT, signal_handler)
bag = rosbag.Bag('/home/raphael/dev/MSCKF_ws/bag/TUM/dataset-corridor1_1024_16.bag')
for topic, msg, t in bag.read_messages(topics=[cam0_topic, cam1_topic, imu0_topic, grnd_topic]):
# pause if parameter set to false
flag = False
while not rospy.get_param('/play_bag'):
time.sleep(0.01)
if not flag:
print("stopped playback")
flag = not flag
if flag:
print("resume playback")
if topic == cam0_topic:
cam0_pub.publish(msg)
elif topic == cam1_topic:
cam1_pub.publish(msg)
elif topic == imu0_topic:
imu0_pub.publish(msg)
elif topic ==grnd_topic:
grnd_pub.publish(msg)
#print msg
bag.close()
if __name__== "__main__":
main()

View File

@ -14,38 +14,6 @@ namespace msckf_vio {
namespace image_handler { namespace image_handler {
cv::Point2f pinholeDownProject(const cv::Point2f& p_in, const cv::Vec4d& intrinsics)
{
return cv::Point2f(p_in.x * intrinsics[0] + intrinsics[2], p_in.y * intrinsics[1] + intrinsics[3]);
}
cv::Point2f pinholeUpProject(const cv::Point2f& p_in, const cv::Vec4d& intrinsics)
{
return cv::Point2f((p_in.x - intrinsics[2])/intrinsics[0], (p_in.y - intrinsics[3])/intrinsics[1]);
}
void undistortImage(
cv::InputArray src,
cv::OutputArray dst,
const std::string& distortion_model,
const cv::Vec4d& intrinsics,
const cv::Vec4d& distortion_coeffs)
{
const cv::Matx33d K(intrinsics[0], 0.0, intrinsics[2],
0.0, intrinsics[1], intrinsics[3],
0.0, 0.0, 1.0);
if (distortion_model == "pre-equidistant")
cv::fisheye::undistortImage(src, dst, K, distortion_coeffs, K);
else if (distortion_model == "equidistant")
src.copyTo(dst);
else if (distortion_model == "pre-radtan")
cv::undistort(src, dst, K, distortion_coeffs, K);
else if (distortion_model == "radtan")
src.copyTo(dst);
}
void undistortPoint( void undistortPoint(
const cv::Point2f& pt_in, const cv::Point2f& pt_in,
const cv::Vec4d& intrinsics, const cv::Vec4d& intrinsics,
@ -74,36 +42,10 @@ void undistortPoint(
if (distortion_model == "radtan") { if (distortion_model == "radtan") {
cv::undistortPoints(pts_in, pts_out, K, distortion_coeffs, cv::undistortPoints(pts_in, pts_out, K, distortion_coeffs,
rectification_matrix, K_new); rectification_matrix, K_new);
} } else if (distortion_model == "equidistant") {
// equidistant
else if (distortion_model == "equidistant") {
cv::fisheye::undistortPoints(pts_in, pts_out, K, distortion_coeffs, cv::fisheye::undistortPoints(pts_in, pts_out, K, distortion_coeffs,
rectification_matrix, K_new); rectification_matrix, K_new);
} } else {
// fov
else if (distortion_model == "fov") {
for(int i = 0; i < pts_in.size(); i++)
{
float omega = distortion_coeffs[0];
float rd = sqrt(pts_in[i].x * pts_in[i].x + pts_in[i].y * pts_in[i].y);
float ru = tan(rd * omega)/(2 * tan(omega / 2));
cv::Point2f newPoint(
((pts_in[i].x - intrinsics[2]) / intrinsics[0]) * (ru / rd),
((pts_in[i].y - intrinsics[3]) / intrinsics[1]) * (ru / rd));
pts_out.push_back(newPoint);
}
}
else if (distortion_model == "pre-equidistant" or distortion_model == "pre-radtan")
{
std::vector<cv::Point2f> temp_pts_out;
for(int i = 0; i < pts_in.size(); i++)
temp_pts_out.push_back(pinholeUpProject(pts_in[i], intrinsics));
pts_out = temp_pts_out;
}
else {
ROS_WARN_ONCE("The model %s is unrecognized, use radtan instead...", ROS_WARN_ONCE("The model %s is unrecognized, use radtan instead...",
distortion_model.c_str()); distortion_model.c_str());
cv::undistortPoints(pts_in, pts_out, K, distortion_coeffs, cv::undistortPoints(pts_in, pts_out, K, distortion_coeffs,
@ -137,35 +79,10 @@ void undistortPoints(
if (distortion_model == "radtan") { if (distortion_model == "radtan") {
cv::undistortPoints(pts_in, pts_out, K, distortion_coeffs, cv::undistortPoints(pts_in, pts_out, K, distortion_coeffs,
rectification_matrix, K_new); rectification_matrix, K_new);
} } else if (distortion_model == "equidistant") {
else if (distortion_model == "equidistant") {
cv::fisheye::undistortPoints(pts_in, pts_out, K, distortion_coeffs, cv::fisheye::undistortPoints(pts_in, pts_out, K, distortion_coeffs,
rectification_matrix, K_new); rectification_matrix, K_new);
} } else {
else if (distortion_model == "fov") {
for(int i = 0; i < pts_in.size(); i++)
{
float omega = distortion_coeffs[0];
float rd = sqrt(pts_in[i].x * pts_in[i].x + pts_in[i].y * pts_in[i].y);
float ru = tan(rd * omega)/(2 * tan(omega / 2));
cv::Point2f newPoint(
((pts_in[i].x - intrinsics[2]) / intrinsics[0]) * (ru / rd),
((pts_in[i].y - intrinsics[3]) / intrinsics[1]) * (ru / rd));
pts_out.push_back(newPoint);
}
}
else if (distortion_model == "pre-equidistant" or distortion_model == "pre-radtan")
{
std::vector<cv::Point2f> temp_pts_out;
for(int i = 0; i < pts_in.size(); i++)
temp_pts_out.push_back(pinholeUpProject(pts_in[i], intrinsics));
pts_out = temp_pts_out;
}
else {
ROS_WARN_ONCE("The model %s is unrecognized, use radtan instead...", ROS_WARN_ONCE("The model %s is unrecognized, use radtan instead...",
distortion_model.c_str()); distortion_model.c_str());
cv::undistortPoints(pts_in, pts_out, K, distortion_coeffs, cv::undistortPoints(pts_in, pts_out, K, distortion_coeffs,
@ -193,31 +110,7 @@ std::vector<cv::Point2f> distortPoints(
distortion_coeffs, pts_out); distortion_coeffs, pts_out);
} else if (distortion_model == "equidistant") { } else if (distortion_model == "equidistant") {
cv::fisheye::distortPoints(pts_in, pts_out, K, distortion_coeffs); cv::fisheye::distortPoints(pts_in, pts_out, K, distortion_coeffs);
} } else {
else if (distortion_model == "fov") {
for(int i = 0; i < pts_in.size(); i++)
{
// based on 'straight lines have to be straight'
float ru = sqrt(pts_in[i].x * pts_in[i].x + pts_in[i].y * pts_in[i].y);
float omega = distortion_coeffs[0];
float rd = 1 / (omega)*atan(2*ru*tan(omega / 2));
cv::Point2f newPoint(
pts_in[i].x * (rd/ru) * intrinsics[0] + intrinsics[2],
pts_in[i].y * (rd/ru) * intrinsics[1] + intrinsics[3]);
pts_out.push_back(newPoint);
}
}
else if (distortion_model == "pre-equidistant" or distortion_model == "pre-radtan")
{
std::vector<cv::Point2f> temp_pts_out;
for(int i = 0; i < pts_in.size(); i++)
temp_pts_out.push_back(pinholeDownProject(pts_in[i], intrinsics));
pts_out = temp_pts_out;
}
else {
ROS_WARN_ONCE("The model %s is unrecognized, using radtan instead...", ROS_WARN_ONCE("The model %s is unrecognized, using radtan instead...",
distortion_model.c_str()); distortion_model.c_str());
std::vector<cv::Point3f> homogenous_pts; std::vector<cv::Point3f> homogenous_pts;
@ -250,31 +143,7 @@ cv::Point2f distortPoint(
distortion_coeffs, pts_out); distortion_coeffs, pts_out);
} else if (distortion_model == "equidistant") { } else if (distortion_model == "equidistant") {
cv::fisheye::distortPoints(pts_in, pts_out, K, distortion_coeffs); cv::fisheye::distortPoints(pts_in, pts_out, K, distortion_coeffs);
} } else {
else if (distortion_model == "fov") {
for(int i = 0; i < pts_in.size(); i++)
{
// based on 'straight lines have to be straight'
float ru = sqrt(pts_in[i].x * pts_in[i].x + pts_in[i].y * pts_in[i].y);
float omega = distortion_coeffs[0];
float rd = 1 / (omega)*atan(2*ru*tan(omega / 2));
cv::Point2f newPoint(
pts_in[i].x * (rd/ru) * intrinsics[0] + intrinsics[2],
pts_in[i].y * (rd/ru) * intrinsics[1] + intrinsics[3]);
pts_out.push_back(newPoint);
}
}
else if (distortion_model == "pre-equidistant" or distortion_model == "pre-radtan")
{
std::vector<cv::Point2f> temp_pts_out;
for(int i = 0; i < pts_in.size(); i++)
pts_out.push_back(pinholeDownProject(pts_in[i], intrinsics));
pts_out = temp_pts_out;
}
else {
ROS_WARN_ONCE("The model %s is unrecognized, using radtan instead...", ROS_WARN_ONCE("The model %s is unrecognized, using radtan instead...",
distortion_model.c_str()); distortion_model.c_str());
std::vector<cv::Point3f> homogenous_pts; std::vector<cv::Point3f> homogenous_pts;
@ -286,5 +155,5 @@ cv::Point2f distortPoint(
return pts_out[0]; return pts_out[0];
} }
} // namespace image_handler } // namespace image_handler
} // namespace msckf_vio } // namespace msckf_vio

View File

@ -219,22 +219,12 @@ void ImageProcessor::stereoCallback(
cam1_curr_img_ptr = cv_bridge::toCvShare(cam1_img, cam1_curr_img_ptr = cv_bridge::toCvShare(cam1_img,
sensor_msgs::image_encodings::MONO8); sensor_msgs::image_encodings::MONO8);
ros::Time start_time = ros::Time::now();
cv::Mat newImage;
image_handler::undistortImage(cam0_curr_img_ptr->image, newImage, cam0.distortion_model, cam0.intrinsics, cam0.distortion_coeffs);
newImage.copyTo(cam0_curr_img_ptr->image);
image_handler::undistortImage(cam1_curr_img_ptr->image, newImage, cam1.distortion_model, cam1.intrinsics, cam1.distortion_coeffs);
newImage.copyTo( cam1_curr_img_ptr->image);
//ROS_INFO("Publishing: %f",
// (ros::Time::now()-start_time).toSec());
// Build the image pyramids once since they're used at multiple places // Build the image pyramids once since they're used at multiple places
createImagePyramids(); createImagePyramids();
// Detect features in the first frame. // Detect features in the first frame.
if (is_first_img) { if (is_first_img) {
start_time = ros::Time::now(); ros::Time start_time = ros::Time::now();
initializeFirstFrame(); initializeFirstFrame();
//ROS_INFO("Detection time: %f", //ROS_INFO("Detection time: %f",
// (ros::Time::now()-start_time).toSec()); // (ros::Time::now()-start_time).toSec());
@ -247,7 +237,7 @@ void ImageProcessor::stereoCallback(
// (ros::Time::now()-start_time).toSec()); // (ros::Time::now()-start_time).toSec());
} else { } else {
// Track the feature in the previous image. // Track the feature in the previous image.
start_time = ros::Time::now(); ros::Time start_time = ros::Time::now();
trackFeatures(); trackFeatures();
//ROS_INFO("Tracking time: %f", //ROS_INFO("Tracking time: %f",
// (ros::Time::now()-start_time).toSec()); // (ros::Time::now()-start_time).toSec());
@ -277,7 +267,7 @@ void ImageProcessor::stereoCallback(
// (ros::Time::now()-start_time).toSec()); // (ros::Time::now()-start_time).toSec());
// Publish features in the current image. // Publish features in the current image.
start_time = ros::Time::now(); ros::Time start_time = ros::Time::now();
publish(); publish();
//ROS_INFO("Publishing: %f", //ROS_INFO("Publishing: %f",
// (ros::Time::now()-start_time).toSec()); // (ros::Time::now()-start_time).toSec());

File diff suppressed because it is too large Load Diff

View File

@ -1,75 +0,0 @@
#!/usr/bin/env python
from __future__ import print_function
import sys
import rospy
import cv2
from std_msgs.msg import String
from sensor_msgs.msg import Image
from cv_bridge import CvBridge, CvBridgeError
class image_converter:
def __init__(self):
self.image0_pub = rospy.Publisher("/cam0/new_image_raw",Image, queue_size=10)
self.image1_pub = rospy.Publisher("/cam1/new_image_raw",Image, queue_size=10)
self.bridge = CvBridge()
self.image0_sub = rospy.Subscriber("/cam0/image_raw",Image,self.callback_cam0)
self.image1_sub = rospy.Subscriber("/cam1/image_raw",Image,self.callback_cam1)
def callback_cam0(self,data):
try:
cv_image = self.bridge.imgmsg_to_cv2(data, "bgr8")
except CvBridgeError as e:
print(e)
imgScale = 0.25
newX,newY = cv_image.shape[1]*imgScale, cv_image.shape[0]*imgScale
newimg = cv2.resize(cv_image,(int(newX),int(newY)))
newpub = self.bridge.cv2_to_imgmsg(newimg, "bgr8")
newdata = data
newdata.height = newpub.height
newdata.width = newpub.width
newdata.step = newpub.step
newdata.data = newpub.data
try:
self.image0_pub.publish(newdata)
except CvBridgeError as e:
print(e)
def callback_cam1(self,data):
try:
cv_image = self.bridge.imgmsg_to_cv2(data, "bgr8")
except CvBridgeError as e:
print(e)
imgScale = 0.25
newX,newY = cv_image.shape[1]*imgScale, cv_image.shape[0]*imgScale
newimg = cv2.resize(cv_image,(int(newX),int(newY)))
newpub = self.bridge.cv2_to_imgmsg(newimg, "bgr8")
newdata = data
newdata.height = newpub.height
newdata.width = newpub.width
newdata.step = newpub.step
newdata.data = newpub.data
try:
self.image1_pub.publish(newdata)
except CvBridgeError as e:
print(e)
def main(args):
ic = image_converter()
rospy.init_node('image_converter', anonymous=True)
try:
rospy.spin()
except KeyboardInterrupt:
print("Shutting down")
cv2.destroyAllWindows()
if __name__ == '__main__':
main(sys.argv)