Compare commits
61 Commits
2dac361ba2
...
master
Author | SHA1 | Date | |
---|---|---|---|
43ac48bf56 | |||
e9d801bdc1 | |||
4a5bdc735c | |||
9253fcab5f | |||
69d385f257 | |||
62cd89fd0d | |||
a8090ca58a | |||
ed2ba61828 | |||
e94d4a06b5 | |||
0ef71b9220 | |||
5a80f97b68 | |||
876fa7617c | |||
14825c344e | |||
02156bd89a | |||
1380ec426f | |||
a7c296ca3d | |||
1a07ba3d3c | |||
3873c978dd | |||
6ee756941c | |||
6bcc72f826 | |||
737c23f32a | |||
58fe991647 | |||
d013a1b080 | |||
7b7e966217 | |||
53e2a5d524 | |||
3ae7bdb13a | |||
715ca6a6b4 | |||
9f528c1ea1 | |||
bfb2a551a7 | |||
af1820a238 | |||
655416a490 | |||
010db87e4b | |||
5451c2d097 | |||
b3e86b3e64 | |||
ebc73c0c5e | |||
273bbf8a94 | |||
1d6100ed13 | |||
6f7f8b7892 | |||
c565033d7f | |||
bcf948bcc1 | |||
02b9e0bc78 | |||
c79fc173b3 | |||
ecab936c62 | |||
2d045660c2 | |||
dcbc69a1c4 | |||
1b29047451 | |||
07f4927128 | |||
acca4ab018 | |||
44fffa19a2 | |||
9b4bf12846 | |||
5f6bcd1784 | |||
2a16fb2fc5 | |||
5d36a123a7 | |||
0d544c5361 | |||
d9899227c2 | |||
e788854fe8 | |||
2aef2089aa | |||
0f96c916f1 | |||
9c7f67d2fd | |||
4842c175a5 | |||
9ded72a366 |
16
.vscode/c_cpp_properties.json
vendored
Normal file
16
.vscode/c_cpp_properties.json
vendored
Normal file
@ -0,0 +1,16 @@
|
|||||||
|
{
|
||||||
|
"configurations": [
|
||||||
|
{
|
||||||
|
"name": "Linux",
|
||||||
|
"includePath": [
|
||||||
|
"${workspaceFolder}/**"
|
||||||
|
],
|
||||||
|
"defines": [],
|
||||||
|
"compilerPath": "/usr/bin/gcc",
|
||||||
|
"cStandard": "c11",
|
||||||
|
"cppStandard": "c++14",
|
||||||
|
"intelliSenseMode": "clang-x64"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"version": 4
|
||||||
|
}
|
BIN
.vscode/ipch/778a17e566a4909e/mmap_address.bin
vendored
Normal file
BIN
.vscode/ipch/778a17e566a4909e/mmap_address.bin
vendored
Normal file
Binary file not shown.
BIN
.vscode/ipch/ccf983af1f87ec2b/mmap_address.bin
vendored
Normal file
BIN
.vscode/ipch/ccf983af1f87ec2b/mmap_address.bin
vendored
Normal file
Binary file not shown.
BIN
.vscode/ipch/e40aedd19a224f8d/mmap_address.bin
vendored
Normal file
BIN
.vscode/ipch/e40aedd19a224f8d/mmap_address.bin
vendored
Normal file
Binary file not shown.
6
.vscode/settings.json
vendored
Normal file
6
.vscode/settings.json
vendored
Normal file
@ -0,0 +1,6 @@
|
|||||||
|
{
|
||||||
|
"files.associations": {
|
||||||
|
"core": "cpp",
|
||||||
|
"sparsecore": "cpp"
|
||||||
|
}
|
||||||
|
}
|
30
README.md
30
README.md
@ -1,12 +1,22 @@
|
|||||||
# MSCKF\_VIO
|
# MSCKF\_VIO
|
||||||
|
|
||||||
|
|
||||||
The `MSCKF_VIO` package is a stereo version of MSCKF. The software takes in synchronized stereo images and IMU messages and generates real-time 6DOF pose estimation of the IMU frame.
|
The `MSCKF_VIO` package is a stereo-photometric version of MSCKF. The software takes in synchronized stereo images and IMU messages and generates real-time 6DOF pose estimation of the IMU frame.
|
||||||
|
|
||||||
The software is tested on Ubuntu 16.04 with ROS Kinetic.
|
This approach is based on the paper written by Ke Sun et al.
|
||||||
|
[https://arxiv.org/abs/1712.00036](https://arxiv.org/abs/1712.00036) and their Stereo MSCKF implementation, which tightly fuse the matched feature information of a stereo image pair into a 6DOF Pose.
|
||||||
|
The approach implemented in this repository follows the semi-dense msckf approach tightly fusing the photometric
|
||||||
|
information around the matched featues into the covariance matrix, as described and derived in the master thesis [Pose Estimation using a Stereo-Photometric Multi-State Constraint Kalman Filter](http://raphael.maenle.net/resources/sp-msckf/maenle_master_thesis.pdf).
|
||||||
|
|
||||||
|
It's positioning is comparable to the approach from Ke Sun et al. with the photometric approach, with a higher
|
||||||
|
computational load, especially with larger image patches around the feature. A video explaining the approach can be
|
||||||
|
found on [https://youtu.be/HrqQywAnenQ](https://youtu.be/HrqQywAnenQ):
|
||||||
|
<br/>
|
||||||
|
[![Stereo-Photometric MSCKF](https://img.youtube.com/vi/HrqQywAnenQ/0.jpg)](https://www.youtube.com/watch?v=HrqQywAnenQ)
|
||||||
|
|
||||||
|
<br/>
|
||||||
|
This software should be deployed using ROS Kinetic on Ubuntu 16.04 or 18.04.
|
||||||
|
|
||||||
Video: [https://www.youtube.com/watch?v=jxfJFgzmNSw&t](https://www.youtube.com/watch?v=jxfJFgzmNSw&t=3s)<br/>
|
|
||||||
Paper Draft: [https://arxiv.org/abs/1712.00036](https://arxiv.org/abs/1712.00036)
|
|
||||||
|
|
||||||
## License
|
## License
|
||||||
|
|
||||||
@ -28,16 +38,6 @@ cd your_work_space
|
|||||||
catkin_make --pkg msckf_vio --cmake-args -DCMAKE_BUILD_TYPE=Release
|
catkin_make --pkg msckf_vio --cmake-args -DCMAKE_BUILD_TYPE=Release
|
||||||
```
|
```
|
||||||
|
|
||||||
## Calibration
|
|
||||||
|
|
||||||
An accurate calibration is crucial for successfully running the software. To get the best performance of the software, the stereo cameras and IMU should be hardware synchronized. Note that for the stereo calibration, which includes the camera intrinsics, distortion, and extrinsics between the two cameras, you have to use a calibration software. **Manually setting these parameters will not be accurate enough.** [Kalibr](https://github.com/ethz-asl/kalibr) can be used for the stereo calibration and also to get the transformation between the stereo cameras and IMU. The yaml file generated by Kalibr can be directly used in this software. See calibration files in the `config` folder for details. The two calibration files in the `config` folder should work directly with the EuRoC and [fast flight](https://github.com/KumarRobotics/msckf_vio/wiki) datasets. The convention of the calibration file is as follows:
|
|
||||||
|
|
||||||
`camx/T_cam_imu`: takes a vector from the IMU frame to the camx frame.
|
|
||||||
`cam1/T_cn_cnm1`: takes a vector from the cam0 frame to the cam1 frame.
|
|
||||||
|
|
||||||
The filter uses the first 200 IMU messages to initialize the gyro bias, acc bias, and initial orientation. Therefore, the robot is required to start from a stationary state in order to initialize the VIO successfully.
|
|
||||||
|
|
||||||
|
|
||||||
## EuRoC and UPenn Fast flight dataset example usage
|
## EuRoC and UPenn Fast flight dataset example usage
|
||||||
|
|
||||||
First obtain either the [EuRoC](https://projects.asl.ethz.ch/datasets/doku.php?id=kmavvisualinertialdatasets) or the [UPenn fast flight](https://github.com/KumarRobotics/msckf_vio/wiki/Dataset) dataset.
|
First obtain either the [EuRoC](https://projects.asl.ethz.ch/datasets/doku.php?id=kmavvisualinertialdatasets) or the [UPenn fast flight](https://github.com/KumarRobotics/msckf_vio/wiki/Dataset) dataset.
|
||||||
@ -75,6 +75,8 @@ To visualize the pose and feature estimates you can use the provided rviz config
|
|||||||
|
|
||||||
## ROS Nodes
|
## ROS Nodes
|
||||||
|
|
||||||
|
The general structure is similar to the structure of the MSCKF implementation this repository is derived from.
|
||||||
|
|
||||||
### `image_processor` node
|
### `image_processor` node
|
||||||
|
|
||||||
**Subscribed Topics**
|
**Subscribed Topics**
|
||||||
|
@ -9,7 +9,7 @@ cam0:
|
|||||||
0, 0, 0, 1.000000000000000]
|
0, 0, 0, 1.000000000000000]
|
||||||
camera_model: pinhole
|
camera_model: pinhole
|
||||||
distortion_coeffs: [-0.28340811, 0.07395907, 0.00019359, 1.76187114e-05]
|
distortion_coeffs: [-0.28340811, 0.07395907, 0.00019359, 1.76187114e-05]
|
||||||
distortion_model: radtan
|
distortion_model: pre-radtan
|
||||||
intrinsics: [458.654, 457.296, 367.215, 248.375]
|
intrinsics: [458.654, 457.296, 367.215, 248.375]
|
||||||
resolution: [752, 480]
|
resolution: [752, 480]
|
||||||
timeshift_cam_imu: 0.0
|
timeshift_cam_imu: 0.0
|
||||||
@ -26,7 +26,7 @@ cam1:
|
|||||||
0, 0, 0, 1.000000000000000]
|
0, 0, 0, 1.000000000000000]
|
||||||
camera_model: pinhole
|
camera_model: pinhole
|
||||||
distortion_coeffs: [-0.28368365, 0.07451284, -0.00010473, -3.55590700e-05]
|
distortion_coeffs: [-0.28368365, 0.07451284, -0.00010473, -3.55590700e-05]
|
||||||
distortion_model: radtan
|
distortion_model: pre-radtan
|
||||||
intrinsics: [457.587, 456.134, 379.999, 255.238]
|
intrinsics: [457.587, 456.134, 379.999, 255.238]
|
||||||
resolution: [752, 480]
|
resolution: [752, 480]
|
||||||
timeshift_cam_imu: 0.0
|
timeshift_cam_imu: 0.0
|
||||||
|
36
config/camchain-imucam-tum-scaled.yaml
Normal file
36
config/camchain-imucam-tum-scaled.yaml
Normal file
@ -0,0 +1,36 @@
|
|||||||
|
cam0:
|
||||||
|
T_cam_imu:
|
||||||
|
[-0.9995378259923383, 0.02917807204183088, -0.008530798463872679, 0.047094247958417004,
|
||||||
|
0.007526588843243184, -0.03435493139706542, -0.9993813532126198, -0.04788273017221637,
|
||||||
|
-0.029453096117288798, -0.9989836729399656, 0.034119442089241274, -0.0697294754693238,
|
||||||
|
0.0, 0.0, 0.0, 1.0]
|
||||||
|
camera_model: pinhole
|
||||||
|
distortion_coeffs: [0.0034823894022493434, 0.0007150348452162257, -0.0020532361418706202,
|
||||||
|
0.00020293673591811182]
|
||||||
|
distortion_model: pre-equidistant
|
||||||
|
intrinsics: [190.97847715128717, 190.9733070521226, 254.93170605935475, 256.8974428996504]
|
||||||
|
resolution: [512, 512]
|
||||||
|
rostopic: /cam0/image_raw
|
||||||
|
cam1:
|
||||||
|
T_cam_imu:
|
||||||
|
[-0.9995240747493029, 0.02986739485347808, -0.007717688852024281, -0.05374086123613335,
|
||||||
|
0.008095979457928231, 0.01256553460985914, -0.9998882749870535, -0.04648588412432889,
|
||||||
|
-0.02976708103202316, -0.9994748851595197, -0.0128013601698453, -0.07333210787623645,
|
||||||
|
0.0, 0.0, 0.0, 1.0]
|
||||||
|
T_cn_cnm1:
|
||||||
|
[0.9999994317488622, -0.0008361847221513937, -0.0006612844045898121, -0.10092123225528335,
|
||||||
|
0.0008042457277382264, 0.9988989443471681, -0.04690684567228134, -0.001964540595211977,
|
||||||
|
0.0006997790813734836, 0.04690628718225568, 0.9988990492196964, -0.0014663556043866572,
|
||||||
|
0.0, 0.0, 0.0, 1.0]
|
||||||
|
camera_model: pinhole
|
||||||
|
distortion_coeffs: [0.0034003170790442797, 0.001766278153469831, -0.00266312569781606,
|
||||||
|
0.0003299517423931039]
|
||||||
|
distortion_model: pre-equidistant
|
||||||
|
intrinsics: [190.44236969414825, 190.4344384721956, 252.59949716835982, 254.91723064636983]
|
||||||
|
resolution: [512, 512]
|
||||||
|
rostopic: /cam1/image_raw
|
||||||
|
T_imu_body:
|
||||||
|
[1.0000, 0.0000, 0.0000, 0.0000,
|
||||||
|
0.0000, 1.0000, 0.0000, 0.0000,
|
||||||
|
0.0000, 0.0000, 1.0000, 0.0000,
|
||||||
|
0.0000, 0.0000, 0.0000, 1.0000]
|
@ -18,6 +18,8 @@ namespace msckf_vio {
|
|||||||
|
|
||||||
struct Frame{
|
struct Frame{
|
||||||
cv::Mat image;
|
cv::Mat image;
|
||||||
|
cv::Mat dximage;
|
||||||
|
cv::Mat dyimage;
|
||||||
double exposureTime_ms;
|
double exposureTime_ms;
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -39,6 +41,7 @@ struct CameraCalibration{
|
|||||||
cv::Vec4d distortion_coeffs;
|
cv::Vec4d distortion_coeffs;
|
||||||
movingWindow moving_window;
|
movingWindow moving_window;
|
||||||
cv::Mat featureVisu;
|
cv::Mat featureVisu;
|
||||||
|
int id;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
|
@ -15,7 +15,7 @@
|
|||||||
#include <Eigen/Dense>
|
#include <Eigen/Dense>
|
||||||
#include <Eigen/Geometry>
|
#include <Eigen/Geometry>
|
||||||
#include <Eigen/StdVector>
|
#include <Eigen/StdVector>
|
||||||
|
#include <math.h>
|
||||||
#include <visualization_msgs/Marker.h>
|
#include <visualization_msgs/Marker.h>
|
||||||
#include <visualization_msgs/MarkerArray.h>
|
#include <visualization_msgs/MarkerArray.h>
|
||||||
#include <geometry_msgs/Point.h>
|
#include <geometry_msgs/Point.h>
|
||||||
@ -70,6 +70,11 @@ struct Feature {
|
|||||||
position(Eigen::Vector3d::Zero()),
|
position(Eigen::Vector3d::Zero()),
|
||||||
is_initialized(false), is_anchored(false) {}
|
is_initialized(false), is_anchored(false) {}
|
||||||
|
|
||||||
|
|
||||||
|
void Rhocost(const Eigen::Isometry3d& T_c0_ci,
|
||||||
|
const double x, const Eigen::Vector2d& z1, const Eigen::Vector2d& z2,
|
||||||
|
double& e) const;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* @brief cost Compute the cost of the camera observations
|
* @brief cost Compute the cost of the camera observations
|
||||||
* @param T_c0_c1 A rigid body transformation takes
|
* @param T_c0_c1 A rigid body transformation takes
|
||||||
@ -82,6 +87,13 @@ struct Feature {
|
|||||||
const Eigen::Vector3d& x, const Eigen::Vector2d& z,
|
const Eigen::Vector3d& x, const Eigen::Vector2d& z,
|
||||||
double& e) const;
|
double& e) const;
|
||||||
|
|
||||||
|
bool initializeRho(const CamStateServer& cam_states);
|
||||||
|
|
||||||
|
inline void RhoJacobian(const Eigen::Isometry3d& T_c0_ci,
|
||||||
|
const double x, const Eigen::Vector2d& z1, const Eigen::Vector2d& z2,
|
||||||
|
Eigen::Matrix<double, 2, 1>& J, Eigen::Vector2d& r,
|
||||||
|
double& w) const;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* @brief jacobian Compute the Jacobian of the camera observation
|
* @brief jacobian Compute the Jacobian of the camera observation
|
||||||
* @param T_c0_c1 A rigid body transformation takes
|
* @param T_c0_c1 A rigid body transformation takes
|
||||||
@ -97,6 +109,10 @@ struct Feature {
|
|||||||
Eigen::Matrix<double, 2, 3>& J, Eigen::Vector2d& r,
|
Eigen::Matrix<double, 2, 3>& J, Eigen::Vector2d& r,
|
||||||
double& w) const;
|
double& w) const;
|
||||||
|
|
||||||
|
inline double generateInitialDepth(
|
||||||
|
const Eigen::Isometry3d& T_c1_c2, const Eigen::Vector2d& z1,
|
||||||
|
const Eigen::Vector2d& z2) const;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* @brief generateInitialGuess Compute the initial guess of
|
* @brief generateInitialGuess Compute the initial guess of
|
||||||
* the feature's 3d position using only two views.
|
* the feature's 3d position using only two views.
|
||||||
@ -121,6 +137,7 @@ struct Feature {
|
|||||||
inline bool checkMotion(
|
inline bool checkMotion(
|
||||||
const CamStateServer& cam_states) const;
|
const CamStateServer& cam_states) const;
|
||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* @brief InitializeAnchor generates the NxN patch around the
|
* @brief InitializeAnchor generates the NxN patch around the
|
||||||
* feature in the Anchor image
|
* feature in the Anchor image
|
||||||
@ -148,6 +165,13 @@ struct Feature {
|
|||||||
inline bool initializePosition(
|
inline bool initializePosition(
|
||||||
const CamStateServer& cam_states);
|
const CamStateServer& cam_states);
|
||||||
|
|
||||||
|
cv::Point2f pixelDistanceAt(
|
||||||
|
const CAMState& cam_state,
|
||||||
|
const StateIDType& cam_state_id,
|
||||||
|
const CameraCalibration& cam,
|
||||||
|
Eigen::Vector3d& in_p) const;
|
||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* @brief project PositionToCamera Takes a 3d position in a world frame
|
* @brief project PositionToCamera Takes a 3d position in a world frame
|
||||||
* and projects it into the passed camera frame using pinhole projection
|
* and projects it into the passed camera frame using pinhole projection
|
||||||
@ -160,6 +184,21 @@ struct Feature {
|
|||||||
const CameraCalibration& cam,
|
const CameraCalibration& cam,
|
||||||
Eigen::Vector3d& in_p) const;
|
Eigen::Vector3d& in_p) const;
|
||||||
|
|
||||||
|
double CompleteCvKernel(
|
||||||
|
const cv::Point2f pose,
|
||||||
|
const StateIDType& cam_state_id,
|
||||||
|
CameraCalibration& cam,
|
||||||
|
std::string type) const;
|
||||||
|
|
||||||
|
double cvKernel(
|
||||||
|
const cv::Point2f pose,
|
||||||
|
std::string type) const;
|
||||||
|
|
||||||
|
double Kernel(
|
||||||
|
const cv::Point2f pose,
|
||||||
|
const cv::Mat frame,
|
||||||
|
std::string type) const;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* @brief IrradianceAnchorPatch_Camera returns irradiance values
|
* @brief IrradianceAnchorPatch_Camera returns irradiance values
|
||||||
* of the Anchor Patch position in a camera frame
|
* of the Anchor Patch position in a camera frame
|
||||||
@ -169,7 +208,7 @@ struct Feature {
|
|||||||
bool estimate_FrameIrradiance(
|
bool estimate_FrameIrradiance(
|
||||||
const CAMState& cam_state,
|
const CAMState& cam_state,
|
||||||
const StateIDType& cam_state_id,
|
const StateIDType& cam_state_id,
|
||||||
CameraCalibration& cam0,
|
CameraCalibration& cam,
|
||||||
std::vector<double>& anchorPatch_estimate,
|
std::vector<double>& anchorPatch_estimate,
|
||||||
IlluminationParameter& estimatedIllumination) const;
|
IlluminationParameter& estimatedIllumination) const;
|
||||||
|
|
||||||
@ -177,17 +216,22 @@ bool MarkerGeneration(
|
|||||||
ros::Publisher& marker_pub,
|
ros::Publisher& marker_pub,
|
||||||
const CamStateServer& cam_states) const;
|
const CamStateServer& cam_states) const;
|
||||||
|
|
||||||
|
bool VisualizeKernel(
|
||||||
|
const CAMState& cam_state,
|
||||||
|
const StateIDType& cam_state_id,
|
||||||
|
CameraCalibration& cam0) const;
|
||||||
|
|
||||||
bool VisualizePatch(
|
bool VisualizePatch(
|
||||||
const CAMState& cam_state,
|
const CAMState& cam_state,
|
||||||
const StateIDType& cam_state_id,
|
const StateIDType& cam_state_id,
|
||||||
CameraCalibration& cam0,
|
CameraCalibration& cam,
|
||||||
const std::vector<double> photo_r,
|
const Eigen::VectorXd& photo_r,
|
||||||
std::stringstream& ss) const;
|
std::stringstream& ss) const;
|
||||||
/*
|
/*
|
||||||
* @brief projectPixelToPosition uses the calcualted pixels
|
* @brief AnchorPixelToPosition uses the calcualted pixels
|
||||||
* of the anchor patch to generate 3D positions of all of em
|
* of the anchor patch to generate 3D positions of all of em
|
||||||
*/
|
*/
|
||||||
inline Eigen::Vector3d projectPixelToPosition(cv::Point2f in_p,
|
inline Eigen::Vector3d AnchorPixelToPosition(cv::Point2f in_p,
|
||||||
const CameraCalibration& cam);
|
const CameraCalibration& cam);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -233,6 +277,14 @@ inline Eigen::Vector3d projectPixelToPosition(cv::Point2f in_p,
|
|||||||
bool is_initialized;
|
bool is_initialized;
|
||||||
bool is_anchored;
|
bool is_anchored;
|
||||||
|
|
||||||
|
|
||||||
|
cv::Mat abs_xderImage;
|
||||||
|
cv::Mat abs_yderImage;
|
||||||
|
|
||||||
|
cv::Mat xderImage;
|
||||||
|
cv::Mat yderImage;
|
||||||
|
|
||||||
|
cv::Mat anchorImage_blurred;
|
||||||
cv::Point2f anchor_center_pos;
|
cv::Point2f anchor_center_pos;
|
||||||
cv::Point2f undist_anchor_center_pos;
|
cv::Point2f undist_anchor_center_pos;
|
||||||
// Noise for a normalized feature measurement.
|
// Noise for a normalized feature measurement.
|
||||||
@ -248,6 +300,26 @@ typedef std::map<FeatureIDType, Feature, std::less<int>,
|
|||||||
Eigen::aligned_allocator<
|
Eigen::aligned_allocator<
|
||||||
std::pair<const FeatureIDType, Feature> > > MapServer;
|
std::pair<const FeatureIDType, Feature> > > MapServer;
|
||||||
|
|
||||||
|
void Feature::Rhocost(const Eigen::Isometry3d& T_c0_ci,
|
||||||
|
const double x, const Eigen::Vector2d& z1, const Eigen::Vector2d& z2,
|
||||||
|
double& e) const
|
||||||
|
{
|
||||||
|
// Compute hi1, hi2, and hi3 as Equation (37).
|
||||||
|
const double& rho = x;
|
||||||
|
|
||||||
|
Eigen::Vector3d h = T_c0_ci.linear()*
|
||||||
|
Eigen::Vector3d(z1(0), z1(1), 1.0) + rho*T_c0_ci.translation();
|
||||||
|
double& h1 = h(0);
|
||||||
|
double& h2 = h(1);
|
||||||
|
double& h3 = h(2);
|
||||||
|
|
||||||
|
// Predict the feature observation in ci frame.
|
||||||
|
Eigen::Vector2d z_hat(h1/h3, h2/h3);
|
||||||
|
|
||||||
|
// Compute the residual.
|
||||||
|
e = (z_hat-z2).squaredNorm();
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
void Feature::cost(const Eigen::Isometry3d& T_c0_ci,
|
void Feature::cost(const Eigen::Isometry3d& T_c0_ci,
|
||||||
const Eigen::Vector3d& x, const Eigen::Vector2d& z,
|
const Eigen::Vector3d& x, const Eigen::Vector2d& z,
|
||||||
@ -260,9 +332,9 @@ void Feature::cost(const Eigen::Isometry3d& T_c0_ci,
|
|||||||
|
|
||||||
Eigen::Vector3d h = T_c0_ci.linear()*
|
Eigen::Vector3d h = T_c0_ci.linear()*
|
||||||
Eigen::Vector3d(alpha, beta, 1.0) + rho*T_c0_ci.translation();
|
Eigen::Vector3d(alpha, beta, 1.0) + rho*T_c0_ci.translation();
|
||||||
double& h1 = h(0);
|
double h1 = h(0);
|
||||||
double& h2 = h(1);
|
double h2 = h(1);
|
||||||
double& h3 = h(2);
|
double h3 = h(2);
|
||||||
|
|
||||||
// Predict the feature observation in ci frame.
|
// Predict the feature observation in ci frame.
|
||||||
Eigen::Vector2d z_hat(h1/h3, h2/h3);
|
Eigen::Vector2d z_hat(h1/h3, h2/h3);
|
||||||
@ -272,6 +344,42 @@ void Feature::cost(const Eigen::Isometry3d& T_c0_ci,
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void Feature::RhoJacobian(const Eigen::Isometry3d& T_c0_ci,
|
||||||
|
const double x, const Eigen::Vector2d& z1, const Eigen::Vector2d& z2,
|
||||||
|
Eigen::Matrix<double, 2, 1>& J, Eigen::Vector2d& r,
|
||||||
|
double& w) const
|
||||||
|
{
|
||||||
|
|
||||||
|
const double& rho = x;
|
||||||
|
|
||||||
|
Eigen::Vector3d h = T_c0_ci.linear()*
|
||||||
|
Eigen::Vector3d(z1(0), z2(1), 1.0) + rho*T_c0_ci.translation();
|
||||||
|
double& h1 = h(0);
|
||||||
|
double& h2 = h(1);
|
||||||
|
double& h3 = h(2);
|
||||||
|
|
||||||
|
// Compute the Jacobian.
|
||||||
|
Eigen::Matrix3d W;
|
||||||
|
W.leftCols<2>() = T_c0_ci.linear().leftCols<2>();
|
||||||
|
W.rightCols<1>() = T_c0_ci.translation();
|
||||||
|
|
||||||
|
J(0,0) = -h1/(h3*h3);
|
||||||
|
J(1,0) = -h2/(h3*h3);
|
||||||
|
|
||||||
|
// Compute the residual.
|
||||||
|
Eigen::Vector2d z_hat(h1/h3, h2/h3);
|
||||||
|
r = z_hat - z2;
|
||||||
|
|
||||||
|
// Compute the weight based on the residual.
|
||||||
|
double e = r.norm();
|
||||||
|
if (e <= optimization_config.huber_epsilon)
|
||||||
|
w = 1.0;
|
||||||
|
else
|
||||||
|
w = optimization_config.huber_epsilon / (2*e);
|
||||||
|
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
void Feature::jacobian(const Eigen::Isometry3d& T_c0_ci,
|
void Feature::jacobian(const Eigen::Isometry3d& T_c0_ci,
|
||||||
const Eigen::Vector3d& x, const Eigen::Vector2d& z,
|
const Eigen::Vector3d& x, const Eigen::Vector2d& z,
|
||||||
Eigen::Matrix<double, 2, 3>& J, Eigen::Vector2d& r,
|
Eigen::Matrix<double, 2, 3>& J, Eigen::Vector2d& r,
|
||||||
@ -311,9 +419,9 @@ void Feature::jacobian(const Eigen::Isometry3d& T_c0_ci,
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
void Feature::generateInitialGuess(
|
double Feature::generateInitialDepth(
|
||||||
const Eigen::Isometry3d& T_c1_c2, const Eigen::Vector2d& z1,
|
const Eigen::Isometry3d& T_c1_c2, const Eigen::Vector2d& z1,
|
||||||
const Eigen::Vector2d& z2, Eigen::Vector3d& p) const
|
const Eigen::Vector2d& z2) const
|
||||||
{
|
{
|
||||||
// Construct a least square problem to solve the depth.
|
// Construct a least square problem to solve the depth.
|
||||||
Eigen::Vector3d m = T_c1_c2.linear() * Eigen::Vector3d(z1(0), z1(1), 1.0);
|
Eigen::Vector3d m = T_c1_c2.linear() * Eigen::Vector3d(z1(0), z1(1), 1.0);
|
||||||
@ -328,6 +436,15 @@ void Feature::generateInitialGuess(
|
|||||||
|
|
||||||
// Solve for the depth.
|
// Solve for the depth.
|
||||||
double depth = (A.transpose() * A).inverse() * A.transpose() * b;
|
double depth = (A.transpose() * A).inverse() * A.transpose() * b;
|
||||||
|
return depth;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void Feature::generateInitialGuess(
|
||||||
|
const Eigen::Isometry3d& T_c1_c2, const Eigen::Vector2d& z1,
|
||||||
|
const Eigen::Vector2d& z2, Eigen::Vector3d& p) const
|
||||||
|
{
|
||||||
|
double depth = generateInitialDepth(T_c1_c2, z1, z2);
|
||||||
p(0) = z1(0) * depth;
|
p(0) = z1(0) * depth;
|
||||||
p(1) = z1(1) * depth;
|
p(1) = z1(1) * depth;
|
||||||
p(2) = depth;
|
p(2) = depth;
|
||||||
@ -377,10 +494,62 @@ bool Feature::checkMotion(const CamStateServer& cam_states) const
|
|||||||
else return false;
|
else return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
double Feature::CompleteCvKernel(
|
||||||
|
const cv::Point2f pose,
|
||||||
|
const StateIDType& cam_state_id,
|
||||||
|
CameraCalibration& cam,
|
||||||
|
std::string type) const
|
||||||
|
{
|
||||||
|
|
||||||
|
double delta = 0;
|
||||||
|
|
||||||
|
if(type == "Sobel_x")
|
||||||
|
delta = ((double)cam.moving_window.find(cam_state_id)->second.dximage.at<short>(pose.y, pose.x))/255.;
|
||||||
|
else if (type == "Sobel_y")
|
||||||
|
delta = ((double)cam.moving_window.find(cam_state_id)->second.dyimage.at<short>(pose.y, pose.x))/255.;
|
||||||
|
|
||||||
|
return delta;
|
||||||
|
}
|
||||||
|
|
||||||
|
double Feature::cvKernel(
|
||||||
|
const cv::Point2f pose,
|
||||||
|
std::string type) const
|
||||||
|
{
|
||||||
|
|
||||||
|
double delta = 0;
|
||||||
|
|
||||||
|
if(type == "Sobel_x")
|
||||||
|
delta = ((double)xderImage.at<short>(pose.y, pose.x))/255.;
|
||||||
|
else if (type == "Sobel_y")
|
||||||
|
delta = ((double)yderImage.at<short>(pose.y, pose.x))/255.;
|
||||||
|
return delta;
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
double Feature::Kernel(
|
||||||
|
const cv::Point2f pose,
|
||||||
|
const cv::Mat frame,
|
||||||
|
std::string type) const
|
||||||
|
{
|
||||||
|
Eigen::Matrix<double, 3, 3> kernel = Eigen::Matrix<double, 3, 3>::Zero();
|
||||||
|
if(type == "Sobel_x")
|
||||||
|
kernel << -1., 0., 1.,-2., 0., 2. , -1., 0., 1.;
|
||||||
|
else if(type == "Sobel_y")
|
||||||
|
kernel << -1., -2., -1., 0., 0., 0., 1., 2., 1.;
|
||||||
|
|
||||||
|
double delta = 0;
|
||||||
|
int offs = (int)(kernel.rows()-1)/2;
|
||||||
|
|
||||||
|
for(int i = 0; i < kernel.rows(); i++)
|
||||||
|
for(int j = 0; j < kernel.cols(); j++)
|
||||||
|
delta += ((float)frame.at<uint8_t>(pose.y+j-offs , pose.x+i-offs))/255. * (float)kernel(j,i);
|
||||||
|
|
||||||
|
return delta;
|
||||||
|
}
|
||||||
bool Feature::estimate_FrameIrradiance(
|
bool Feature::estimate_FrameIrradiance(
|
||||||
const CAMState& cam_state,
|
const CAMState& cam_state,
|
||||||
const StateIDType& cam_state_id,
|
const StateIDType& cam_state_id,
|
||||||
CameraCalibration& cam0,
|
CameraCalibration& cam,
|
||||||
std::vector<double>& anchorPatch_estimate,
|
std::vector<double>& anchorPatch_estimate,
|
||||||
IlluminationParameter& estimated_illumination) const
|
IlluminationParameter& estimated_illumination) const
|
||||||
{
|
{
|
||||||
@ -389,11 +558,11 @@ bool Feature::estimate_FrameIrradiance(
|
|||||||
// muliply by a and add b of this frame
|
// muliply by a and add b of this frame
|
||||||
|
|
||||||
auto anchor = observations.begin();
|
auto anchor = observations.begin();
|
||||||
if(cam0.moving_window.find(anchor->first) == cam0.moving_window.end())
|
if(cam.moving_window.find(anchor->first) == cam.moving_window.end())
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
double anchorExposureTime_ms = cam0.moving_window.find(anchor->first)->second.exposureTime_ms;
|
double anchorExposureTime_ms = cam.moving_window.find(anchor->first)->second.exposureTime_ms;
|
||||||
double frameExposureTime_ms = cam0.moving_window.find(cam_state_id)->second.exposureTime_ms;
|
double frameExposureTime_ms = cam.moving_window.find(cam_state_id)->second.exposureTime_ms;
|
||||||
|
|
||||||
|
|
||||||
double a_A = anchorExposureTime_ms;
|
double a_A = anchorExposureTime_ms;
|
||||||
@ -528,11 +697,47 @@ bool Feature::MarkerGeneration(
|
|||||||
marker_pub.publish(ma);
|
marker_pub.publish(ma);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
bool Feature::VisualizeKernel(
|
||||||
|
const CAMState& cam_state,
|
||||||
|
const StateIDType& cam_state_id,
|
||||||
|
CameraCalibration& cam0) const
|
||||||
|
{
|
||||||
|
auto anchor = observations.begin();
|
||||||
|
cv::Mat anchorImage = cam0.moving_window.find(anchor->first)->second.image;
|
||||||
|
|
||||||
|
//cv::Mat xderImage;
|
||||||
|
//cv::Mat yderImage;
|
||||||
|
|
||||||
|
//cv::Sobel(anchorImage, xderImage, CV_8UC1, 1, 0, 3);
|
||||||
|
//cv::Sobel(anchorImage, yderImage, CV_8UC1, 0, 1, 3);
|
||||||
|
|
||||||
|
|
||||||
|
// cv::Mat xderImage2(anchorImage.rows, anchorImage.cols, anchorImage_blurred.type());
|
||||||
|
// cv::Mat yderImage2(anchorImage.rows, anchorImage.cols, anchorImage_blurred.type());
|
||||||
|
|
||||||
|
/*
|
||||||
|
for(int i = 1; i < anchorImage.rows-1; i++)
|
||||||
|
for(int j = 1; j < anchorImage.cols-1; j++)
|
||||||
|
xderImage2.at<uint8_t>(j,i) = 255.*fabs(Kernel(cv::Point2f(i,j), anchorImage_blurred, "Sobel_x"));
|
||||||
|
|
||||||
|
|
||||||
|
for(int i = 1; i < anchorImage.rows-1; i++)
|
||||||
|
for(int j = 1; j < anchorImage.cols-1; j++)
|
||||||
|
yderImage2.at<uint8_t>(j,i) = 255.*fabs(Kernel(cv::Point2f(i,j), anchorImage_blurred, "Sobel_y"));
|
||||||
|
*/
|
||||||
|
//cv::imshow("anchor", anchorImage);
|
||||||
|
cv::imshow("xder2", xderImage);
|
||||||
|
cv::imshow("yder2", yderImage);
|
||||||
|
|
||||||
|
cvWaitKey(0);
|
||||||
|
}
|
||||||
|
|
||||||
bool Feature::VisualizePatch(
|
bool Feature::VisualizePatch(
|
||||||
const CAMState& cam_state,
|
const CAMState& cam_state,
|
||||||
const StateIDType& cam_state_id,
|
const StateIDType& cam_state_id,
|
||||||
CameraCalibration& cam0,
|
CameraCalibration& cam,
|
||||||
const std::vector<double> photo_r,
|
const Eigen::VectorXd& photo_r,
|
||||||
std::stringstream& ss) const
|
std::stringstream& ss) const
|
||||||
{
|
{
|
||||||
|
|
||||||
@ -540,7 +745,7 @@ bool Feature::VisualizePatch(
|
|||||||
|
|
||||||
//visu - anchor
|
//visu - anchor
|
||||||
auto anchor = observations.begin();
|
auto anchor = observations.begin();
|
||||||
cv::Mat anchorImage = cam0.moving_window.find(anchor->first)->second.image;
|
cv::Mat anchorImage = cam.moving_window.find(anchor->first)->second.image;
|
||||||
cv::Mat dottedFrame(anchorImage.size(), CV_8UC3);
|
cv::Mat dottedFrame(anchorImage.size(), CV_8UC3);
|
||||||
cv::cvtColor(anchorImage, dottedFrame, CV_GRAY2RGB);
|
cv::cvtColor(anchorImage, dottedFrame, CV_GRAY2RGB);
|
||||||
|
|
||||||
@ -552,10 +757,10 @@ bool Feature::VisualizePatch(
|
|||||||
cv::Point ys(point.x, point.y);
|
cv::Point ys(point.x, point.y);
|
||||||
cv::rectangle(dottedFrame, xs, ys, cv::Scalar(0,255,255));
|
cv::rectangle(dottedFrame, xs, ys, cv::Scalar(0,255,255));
|
||||||
}
|
}
|
||||||
cam0.featureVisu = dottedFrame.clone();
|
cam.featureVisu = dottedFrame.clone();
|
||||||
|
|
||||||
// visu - feature
|
// visu - feature
|
||||||
cv::Mat current_image = cam0.moving_window.find(cam_state_id)->second.image;
|
cv::Mat current_image = cam.moving_window.find(cam_state_id)->second.image;
|
||||||
cv::cvtColor(current_image, dottedFrame, CV_GRAY2RGB);
|
cv::cvtColor(current_image, dottedFrame, CV_GRAY2RGB);
|
||||||
|
|
||||||
// set position in frame
|
// set position in frame
|
||||||
@ -563,7 +768,7 @@ bool Feature::VisualizePatch(
|
|||||||
std::vector<double> projectionPatch;
|
std::vector<double> projectionPatch;
|
||||||
for(auto point : anchorPatch_3d)
|
for(auto point : anchorPatch_3d)
|
||||||
{
|
{
|
||||||
cv::Point2f p_in_c0 = projectPositionToCamera(cam_state, cam_state_id, cam0, point);
|
cv::Point2f p_in_c0 = projectPositionToCamera(cam_state, cam_state_id, cam, point);
|
||||||
projectionPatch.push_back(PixelIrradiance(p_in_c0, current_image));
|
projectionPatch.push_back(PixelIrradiance(p_in_c0, current_image));
|
||||||
// visu - feature
|
// visu - feature
|
||||||
cv::Point xs(p_in_c0.x, p_in_c0.y);
|
cv::Point xs(p_in_c0.x, p_in_c0.y);
|
||||||
@ -571,47 +776,115 @@ bool Feature::VisualizePatch(
|
|||||||
cv::rectangle(dottedFrame, xs, ys, cv::Scalar(0,255,0));
|
cv::rectangle(dottedFrame, xs, ys, cv::Scalar(0,255,0));
|
||||||
}
|
}
|
||||||
|
|
||||||
cv::hconcat(cam0.featureVisu, dottedFrame, cam0.featureVisu);
|
cv::hconcat(cam.featureVisu, dottedFrame, cam.featureVisu);
|
||||||
|
|
||||||
// irradiance grid anchor
|
|
||||||
|
// patches visualization
|
||||||
int N = sqrt(anchorPatch_3d.size());
|
int N = sqrt(anchorPatch_3d.size());
|
||||||
int scale = 20;
|
int scale = 30;
|
||||||
cv::Mat irradianceFrame(anchorImage.size(), CV_8UC3, cv::Scalar(255, 240, 255));
|
cv::Mat irradianceFrame(anchorImage.size(), CV_8UC3, cv::Scalar(255, 240, 255));
|
||||||
cv::resize(irradianceFrame, irradianceFrame, cv::Size(), rescale, rescale);
|
cv::resize(irradianceFrame, irradianceFrame, cv::Size(), rescale, rescale);
|
||||||
|
|
||||||
|
// irradiance grid anchor
|
||||||
|
std::stringstream namer;
|
||||||
|
namer << "anchor";
|
||||||
|
cv::putText(irradianceFrame, namer.str() , cvPoint(30, 25),
|
||||||
|
cv::FONT_HERSHEY_COMPLEX_SMALL, 0.8, cvScalar(0,0,0), 1, CV_AA);
|
||||||
|
|
||||||
|
|
||||||
for(int i = 0; i<N; i++)
|
for(int i = 0; i<N; i++)
|
||||||
for(int j = 0; j<N; j++)
|
for(int j = 0; j<N; j++)
|
||||||
cv::rectangle(irradianceFrame,
|
cv::rectangle(irradianceFrame,
|
||||||
cv::Point(10+scale*(i+1), 10+scale*j),
|
cv::Point(30+scale*(i+1), 30+scale*j),
|
||||||
cv::Point(10+scale*i, 10+scale*(j+1)),
|
cv::Point(30+scale*i, 30+scale*(j+1)),
|
||||||
cv::Scalar(anchorPatch[i*N+j]*255, anchorPatch[i*N+j]*255, anchorPatch[i*N+j]*255),
|
cv::Scalar(anchorPatch[i*N+j]*255, anchorPatch[i*N+j]*255, anchorPatch[i*N+j]*255),
|
||||||
CV_FILLED);
|
CV_FILLED);
|
||||||
|
|
||||||
// irradiance grid projection
|
// irradiance grid projection
|
||||||
|
|
||||||
|
namer.str(std::string());
|
||||||
|
namer << "projection";
|
||||||
|
cv::putText(irradianceFrame, namer.str() , cvPoint(30, 45+scale*N),
|
||||||
|
cv::FONT_HERSHEY_COMPLEX_SMALL, 0.8, cvScalar(0,0,0), 1, CV_AA);
|
||||||
|
|
||||||
for(int i = 0; i<N; i++)
|
for(int i = 0; i<N; i++)
|
||||||
for(int j = 0; j<N ; j++)
|
for(int j = 0; j<N ; j++)
|
||||||
cv::rectangle(irradianceFrame,
|
cv::rectangle(irradianceFrame,
|
||||||
cv::Point(10+scale*(i+1), 20+scale*(N+j)),
|
cv::Point(30+scale*(i+1), 50+scale*(N+j)),
|
||||||
cv::Point(10+scale*(i), 20+scale*(N+j+1)),
|
cv::Point(30+scale*(i), 50+scale*(N+j+1)),
|
||||||
cv::Scalar(projectionPatch[i*N+j]*255, projectionPatch[i*N+j]*255, projectionPatch[i*N+j]*255),
|
cv::Scalar(projectionPatch[i*N+j]*255, projectionPatch[i*N+j]*255, projectionPatch[i*N+j]*255),
|
||||||
CV_FILLED);
|
CV_FILLED);
|
||||||
|
|
||||||
|
// true irradiance at feature
|
||||||
|
// get current observation
|
||||||
|
|
||||||
|
namer.str(std::string());
|
||||||
|
namer << "feature";
|
||||||
|
cv::putText(irradianceFrame, namer.str() , cvPoint(30, 65+scale*2*N),
|
||||||
|
cv::FONT_HERSHEY_COMPLEX_SMALL, 0.8, cvScalar(0,0,0), 1, CV_AA);
|
||||||
|
|
||||||
|
cv::Point2f p_f;
|
||||||
|
if(cam.id == 0)
|
||||||
|
p_f = cv::Point2f(observations.find(cam_state_id)->second(0),observations.find(cam_state_id)->second(1));
|
||||||
|
else if(cam.id == 1)
|
||||||
|
p_f = cv::Point2f(observations.find(cam_state_id)->second(2),observations.find(cam_state_id)->second(3));
|
||||||
|
// move to real pixels
|
||||||
|
|
||||||
|
p_f = image_handler::distortPoint(p_f, cam.intrinsics, cam.distortion_model, cam.distortion_coeffs);
|
||||||
|
for(int i = 0; i<N; i++)
|
||||||
|
{
|
||||||
|
for(int j = 0; j<N ; j++)
|
||||||
|
{
|
||||||
|
float irr = PixelIrradiance(cv::Point2f(p_f.x + (i-(N-1)/2), p_f.y + (j-(N-1)/2)), current_image);
|
||||||
|
cv::rectangle(irradianceFrame,
|
||||||
|
cv::Point(30+scale*(i+1), 70+scale*(2*N+j)),
|
||||||
|
cv::Point(30+scale*(i), 70+scale*(2*N+j+1)),
|
||||||
|
cv::Scalar(irr*255, irr*255, irr*255),
|
||||||
|
CV_FILLED);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// residual grid projection, positive - red, negative - blue colored
|
// residual grid projection, positive - red, negative - blue colored
|
||||||
|
namer.str(std::string());
|
||||||
|
namer << "residual";
|
||||||
|
std::cout << "-- photo_r -- \n" << photo_r << " -- " << std::endl;
|
||||||
|
cv::putText(irradianceFrame, namer.str() , cvPoint(30+scale*N, scale*N/2-5),
|
||||||
|
cv::FONT_HERSHEY_COMPLEX_SMALL, 0.8, cvScalar(0,0,0), 1, CV_AA);
|
||||||
|
|
||||||
for(int i = 0; i<N; i++)
|
for(int i = 0; i<N; i++)
|
||||||
for(int j = 0; j<N; j++)
|
for(int j = 0; j<N; j++)
|
||||||
if(photo_r[i*N+j]>0)
|
if(photo_r(2*(i*N+j))>0)
|
||||||
cv::rectangle(irradianceFrame,
|
cv::rectangle(irradianceFrame,
|
||||||
cv::Point(20+scale*(N+i+1), 15+scale*(N/2+j)),
|
cv::Point(40+scale*(N+i+1), 15+scale*(N/2+j)),
|
||||||
cv::Point(20+scale*(N+i), 15+scale*(N/2+j+1)),
|
cv::Point(40+scale*(N+i), 15+scale*(N/2+j+1)),
|
||||||
cv::Scalar(255 - photo_r[i*N+j]*255, 255 - photo_r[i*N+j]*255, 255),
|
cv::Scalar(255 - photo_r(2*(i*N+j)+1)*255, 255 - photo_r(2*(i*N+j)+1)*255, 255),
|
||||||
CV_FILLED);
|
CV_FILLED);
|
||||||
else
|
else
|
||||||
cv::rectangle(irradianceFrame,
|
cv::rectangle(irradianceFrame,
|
||||||
cv::Point(20+scale*(N+i+1), 15+scale*(N/2+j)),
|
cv::Point(40+scale*(N+i+1), 15+scale*(N/2+j)),
|
||||||
cv::Point(20+scale*(N+i), 15+scale*(N/2+j+1)),
|
cv::Point(40+scale*(N+i), 15+scale*(N/2+j+1)),
|
||||||
cv::Scalar(255, 255 + photo_r[i*N+j]*255, 255 + photo_r[i*N+j]*255),
|
cv::Scalar(255, 255 + photo_r(2*(i*N+j)+1)*255, 255 + photo_r(2*(i*N+j)+1)*255),
|
||||||
CV_FILLED);
|
CV_FILLED);
|
||||||
|
|
||||||
cv::hconcat(cam0.featureVisu, irradianceFrame, cam0.featureVisu);
|
// gradient arrow
|
||||||
|
/*
|
||||||
|
cv::arrowedLine(irradianceFrame,
|
||||||
|
cv::Point(30+scale*(N/2 +0.5), 50+scale*(N+(N/2)+0.5)),
|
||||||
|
cv::Point(30+scale*(N/2+0.5)+scale*gradientVector.x, 50+scale*(N+(N/2)+0.5)+scale*gradientVector.y),
|
||||||
|
cv::Scalar(100, 0, 255),
|
||||||
|
1);
|
||||||
|
*/
|
||||||
|
|
||||||
|
// residual gradient direction
|
||||||
|
/*
|
||||||
|
cv::arrowedLine(irradianceFrame,
|
||||||
|
cv::Point(40+scale*(N+N/2+0.5), 15+scale*((N-0.5))),
|
||||||
|
cv::Point(40+scale*(N+N/2+0.5)+scale*residualVector.x, 15+scale*(N-0.5)+scale*residualVector.y),
|
||||||
|
cv::Scalar(0, 255, 175),
|
||||||
|
3);
|
||||||
|
*/
|
||||||
|
|
||||||
|
cv::hconcat(cam.featureVisu, irradianceFrame, cam.featureVisu);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
// visualize position of used observations and resulting feature position
|
// visualize position of used observations and resulting feature position
|
||||||
@ -643,15 +916,15 @@ bool Feature::VisualizePatch(
|
|||||||
|
|
||||||
// draw, x y position and arrow with direction - write z next to it
|
// draw, x y position and arrow with direction - write z next to it
|
||||||
|
|
||||||
cv::resize(cam0.featureVisu, cam0.featureVisu, cv::Size(), rescale, rescale);
|
cv::resize(cam.featureVisu, cam.featureVisu, cv::Size(), rescale, rescale);
|
||||||
|
|
||||||
|
|
||||||
cv::hconcat(cam0.featureVisu, positionFrame, cam0.featureVisu);
|
cv::hconcat(cam.featureVisu, positionFrame, cam.featureVisu);
|
||||||
*/
|
*/
|
||||||
// write feature position
|
// write feature position
|
||||||
std::stringstream pos_s;
|
std::stringstream pos_s;
|
||||||
pos_s << "u: " << observations.begin()->second(0) << " v: " << observations.begin()->second(1);
|
pos_s << "u: " << observations.begin()->second(0) << " v: " << observations.begin()->second(1);
|
||||||
cv::putText(cam0.featureVisu, ss.str() , cvPoint(anchorImage.rows + 100, anchorImage.cols - 30),
|
cv::putText(cam.featureVisu, ss.str() , cvPoint(anchorImage.rows + 100, anchorImage.cols - 30),
|
||||||
cv::FONT_HERSHEY_COMPLEX_SMALL, 0.8, cvScalar(200,200,250), 1, CV_AA);
|
cv::FONT_HERSHEY_COMPLEX_SMALL, 0.8, cvScalar(200,200,250), 1, CV_AA);
|
||||||
// create line?
|
// create line?
|
||||||
|
|
||||||
@ -659,16 +932,51 @@ bool Feature::VisualizePatch(
|
|||||||
|
|
||||||
std::stringstream loc;
|
std::stringstream loc;
|
||||||
// loc << "/home/raphael/dev/MSCKF_ws/img/feature_" << std::to_string(ros::Time::now().toSec()) << ".jpg";
|
// loc << "/home/raphael/dev/MSCKF_ws/img/feature_" << std::to_string(ros::Time::now().toSec()) << ".jpg";
|
||||||
//cv::imwrite(loc.str(), cam0.featureVisu);
|
//cv::imwrite(loc.str(), cam.featureVisu);
|
||||||
|
|
||||||
cv::imshow("patch", cam0.featureVisu);
|
cv::imshow("patch", cam.featureVisu);
|
||||||
cvWaitKey(0);
|
cvWaitKey(1);
|
||||||
}
|
}
|
||||||
|
|
||||||
float Feature::PixelIrradiance(cv::Point2f pose, cv::Mat image) const
|
float Feature::PixelIrradiance(cv::Point2f pose, cv::Mat image) const
|
||||||
{
|
{
|
||||||
|
|
||||||
return ((float)image.at<uint8_t>(pose.y, pose.x))/255;
|
return ((float)image.at<uint8_t>(pose.y, pose.x))/255.;
|
||||||
|
}
|
||||||
|
|
||||||
|
cv::Point2f Feature::pixelDistanceAt(
|
||||||
|
const CAMState& cam_state,
|
||||||
|
const StateIDType& cam_state_id,
|
||||||
|
const CameraCalibration& cam,
|
||||||
|
Eigen::Vector3d& in_p) const
|
||||||
|
{
|
||||||
|
|
||||||
|
cv::Point2f cam_p = projectPositionToCamera(cam_state, cam_state_id, cam, in_p);
|
||||||
|
|
||||||
|
// create vector of patch in pixel plane
|
||||||
|
std::vector<cv::Point2f> surroundingPoints;
|
||||||
|
surroundingPoints.push_back(cv::Point2f(cam_p.x+1, cam_p.y));
|
||||||
|
surroundingPoints.push_back(cv::Point2f(cam_p.x-1, cam_p.y));
|
||||||
|
surroundingPoints.push_back(cv::Point2f(cam_p.x, cam_p.y+1));
|
||||||
|
surroundingPoints.push_back(cv::Point2f(cam_p.x, cam_p.y-1));
|
||||||
|
|
||||||
|
std::vector<cv::Point2f> pure;
|
||||||
|
image_handler::undistortPoints(surroundingPoints,
|
||||||
|
cam.intrinsics,
|
||||||
|
cam.distortion_model,
|
||||||
|
cam.distortion_coeffs,
|
||||||
|
pure);
|
||||||
|
|
||||||
|
// transfrom position to camera frame
|
||||||
|
// to get distance multiplier
|
||||||
|
Eigen::Matrix3d R_w_c0 = quaternionToRotation(cam_state.orientation);
|
||||||
|
const Eigen::Vector3d& t_c0_w = cam_state.position;
|
||||||
|
Eigen::Vector3d p_c0 = R_w_c0 * (in_p-t_c0_w);
|
||||||
|
|
||||||
|
// returns the distance between the pixel points in space
|
||||||
|
cv::Point2f distance(fabs(pure[0].x - pure[1].x), fabs(pure[2].y - pure[3].y));
|
||||||
|
|
||||||
|
return distance;
|
||||||
}
|
}
|
||||||
|
|
||||||
cv::Point2f Feature::projectPositionToCamera(
|
cv::Point2f Feature::projectPositionToCamera(
|
||||||
@ -680,30 +988,44 @@ cv::Point2f Feature::projectPositionToCamera(
|
|||||||
Eigen::Isometry3d T_c0_w;
|
Eigen::Isometry3d T_c0_w;
|
||||||
|
|
||||||
cv::Point2f out_p;
|
cv::Point2f out_p;
|
||||||
|
cv::Point2f my_p;
|
||||||
// transfrom position to camera frame
|
// transfrom position to camera frame
|
||||||
|
|
||||||
|
// cam0 position
|
||||||
Eigen::Matrix3d R_w_c0 = quaternionToRotation(cam_state.orientation);
|
Eigen::Matrix3d R_w_c0 = quaternionToRotation(cam_state.orientation);
|
||||||
const Eigen::Vector3d& t_c0_w = cam_state.position;
|
const Eigen::Vector3d& t_c0_w = cam_state.position;
|
||||||
Eigen::Vector3d p_c0 = R_w_c0 * (in_p-t_c0_w);
|
|
||||||
|
|
||||||
out_p = cv::Point2f(p_c0(0)/p_c0(2), p_c0(1)/p_c0(2));
|
// project point according to model
|
||||||
|
if(cam.id == 0)
|
||||||
|
{
|
||||||
|
Eigen::Vector3d p_c0 = R_w_c0 * (in_p-t_c0_w);
|
||||||
|
out_p = cv::Point2f(p_c0(0)/p_c0(2), p_c0(1)/p_c0(2));
|
||||||
|
|
||||||
// if(cam_state_id == observations.begin()->first)
|
}
|
||||||
//printf("undist:\n \tproj pos: %f, %f\n\ttrue pos: %f, %f\n", out_p.x, out_p.y, undist_anchor_center_pos.x, undist_anchor_center_pos.y);
|
// if camera is one, calcualte the cam1 position from cam0 position first
|
||||||
|
else if(cam.id == 1)
|
||||||
|
{
|
||||||
|
// cam1 position
|
||||||
|
Eigen::Matrix3d R_c0_c1 = CAMState::T_cam0_cam1.linear();
|
||||||
|
Eigen::Matrix3d R_w_c1 = R_c0_c1 * R_w_c0;
|
||||||
|
Eigen::Vector3d t_c1_w = t_c0_w - R_w_c1.transpose()*CAMState::T_cam0_cam1.translation();
|
||||||
|
|
||||||
cv::Point2f my_p = image_handler::distortPoint(out_p,
|
Eigen::Vector3d p_c1 = R_w_c1 * (in_p-t_c1_w);
|
||||||
cam.intrinsics,
|
out_p = cv::Point2f(p_c1(0)/p_c1(2), p_c1(1)/p_c1(2));
|
||||||
cam.distortion_model,
|
}
|
||||||
cam.distortion_coeffs);
|
|
||||||
|
|
||||||
// printf("truPosition: %f, %f, %f\n", position.x(), position.y(), position.z());
|
|
||||||
// printf("camPosition: %f, %f, %f\n", p_c0(0), p_c0(1), p_c0(2));
|
|
||||||
// printf("Photo projection: %f, %f\n", my_p[0].x, my_p[0].y);
|
|
||||||
|
|
||||||
|
// undistort point according to camera model
|
||||||
|
if (cam.distortion_model.substr(0,3) == "pre-")
|
||||||
|
my_p = cv::Point2f(out_p.x * cam.intrinsics[0] + cam.intrinsics[2], out_p.y * cam.intrinsics[1] + cam.intrinsics[3]);
|
||||||
|
else
|
||||||
|
my_p = image_handler::distortPoint(out_p,
|
||||||
|
cam.intrinsics,
|
||||||
|
cam.distortion_model,
|
||||||
|
cam.distortion_coeffs);
|
||||||
return my_p;
|
return my_p;
|
||||||
}
|
}
|
||||||
|
|
||||||
Eigen::Vector3d Feature::projectPixelToPosition(cv::Point2f in_p, const CameraCalibration& cam)
|
Eigen::Vector3d Feature::AnchorPixelToPosition(cv::Point2f in_p, const CameraCalibration& cam)
|
||||||
{
|
{
|
||||||
// use undistorted position of point of interest
|
// use undistorted position of point of interest
|
||||||
// project it back into 3D space using pinhole model
|
// project it back into 3D space using pinhole model
|
||||||
@ -724,72 +1046,240 @@ bool Feature::initializeAnchor(const CameraCalibration& cam, int N)
|
|||||||
|
|
||||||
auto anchor = observations.begin();
|
auto anchor = observations.begin();
|
||||||
if(cam.moving_window.find(anchor->first) == cam.moving_window.end())
|
if(cam.moving_window.find(anchor->first) == cam.moving_window.end())
|
||||||
|
{
|
||||||
return false;
|
return false;
|
||||||
|
}
|
||||||
cv::Mat anchorImage = cam.moving_window.find(anchor->first)->second.image;
|
cv::Mat anchorImage = cam.moving_window.find(anchor->first)->second.image;
|
||||||
|
cv::Mat anchorImage_deeper;
|
||||||
|
anchorImage.convertTo(anchorImage_deeper,CV_16S);
|
||||||
|
//TODO remove this?
|
||||||
|
|
||||||
|
|
||||||
|
cv::Sobel(anchorImage_deeper, xderImage, -1, 1, 0, 3);
|
||||||
|
cv::Sobel(anchorImage_deeper, yderImage, -1, 0, 1, 3);
|
||||||
|
|
||||||
|
xderImage/=8.;
|
||||||
|
yderImage/=8.;
|
||||||
|
|
||||||
|
cv::convertScaleAbs(xderImage, abs_xderImage);
|
||||||
|
cv::convertScaleAbs(yderImage, abs_yderImage);
|
||||||
|
|
||||||
|
cv::GaussianBlur(anchorImage, anchorImage_blurred, cv::Size(3,3), 0, 0, cv::BORDER_DEFAULT);
|
||||||
|
|
||||||
auto u = anchor->second(0);//*cam.intrinsics[0] + cam.intrinsics[2];
|
auto u = anchor->second(0);//*cam.intrinsics[0] + cam.intrinsics[2];
|
||||||
auto v = anchor->second(1);//*cam.intrinsics[1] + cam.intrinsics[3];
|
auto v = anchor->second(1);//*cam.intrinsics[1] + cam.intrinsics[3];
|
||||||
|
|
||||||
//testing
|
|
||||||
undist_anchor_center_pos = cv::Point2f(u,v);
|
|
||||||
|
|
||||||
//for NxN patch pixels around feature
|
// check if image has been pre-undistorted
|
||||||
int count = 0;
|
if(cam.distortion_model.substr(0,3) == "pre")
|
||||||
|
{
|
||||||
// get feature in undistorted pixel space
|
//project onto pixel plane
|
||||||
// this only reverts from 'pure' space into undistorted pixel space using camera matrix
|
undist_anchor_center_pos = cv::Point2f(u * cam.intrinsics[0] + cam.intrinsics[2], v * cam.intrinsics[1] + cam.intrinsics[3]);
|
||||||
cv::Point2f und_pix_p = image_handler::distortPoint(cv::Point2f(u, v),
|
// create vector of patch in pixel plane
|
||||||
cam.intrinsics,
|
for(double u_run = -n; u_run <= n; u_run++)
|
||||||
cam.distortion_model,
|
for(double v_run = -n; v_run <= n; v_run++)
|
||||||
0);
|
anchorPatch_real.push_back(cv::Point2f(undist_anchor_center_pos.x+u_run, undist_anchor_center_pos.y+v_run));
|
||||||
// create vector of patch in pixel plane
|
|
||||||
std::vector<cv::Point2f>und_pix_v;
|
|
||||||
for(double u_run = -n; u_run <= n; u_run++)
|
|
||||||
for(double v_run = -n; v_run <= n; v_run++)
|
|
||||||
und_pix_v.push_back(cv::Point2f(und_pix_p.x+u_run, und_pix_p.y+v_run));
|
|
||||||
|
|
||||||
|
|
||||||
//create undistorted pure points
|
//project back into u,v
|
||||||
std::vector<cv::Point2f> und_v;
|
for(int i = 0; i < N*N; i++)
|
||||||
image_handler::undistortPoints(und_pix_v,
|
anchorPatch_ideal.push_back(cv::Point2f((anchorPatch_real[i].x-cam.intrinsics[2])/cam.intrinsics[0], (anchorPatch_real[i].y-cam.intrinsics[3])/cam.intrinsics[1]));
|
||||||
cam.intrinsics,
|
}
|
||||||
cam.distortion_model,
|
|
||||||
0,
|
|
||||||
und_v);
|
|
||||||
|
|
||||||
//create distorted pixel points
|
else
|
||||||
anchorPatch_real = image_handler::distortPoints(und_v,
|
{
|
||||||
cam.intrinsics,
|
// get feature in undistorted pixel space
|
||||||
cam.distortion_model,
|
// this only reverts from 'pure' space into undistorted pixel space using camera matrix
|
||||||
cam.distortion_coeffs);
|
cv::Point2f und_pix_p = image_handler::distortPoint(cv::Point2f(u, v),
|
||||||
|
cam.intrinsics,
|
||||||
|
cam.distortion_model,
|
||||||
|
cam.distortion_coeffs);
|
||||||
|
// create vector of patch in pixel plane
|
||||||
|
for(double u_run = -n; u_run <= n; u_run++)
|
||||||
|
for(double v_run = -n; v_run <= n; v_run++)
|
||||||
|
anchorPatch_real.push_back(cv::Point2f(und_pix_p.x+u_run, und_pix_p.y+v_run));
|
||||||
|
|
||||||
|
|
||||||
|
//create undistorted pure points
|
||||||
|
image_handler::undistortPoints(anchorPatch_real,
|
||||||
|
cam.intrinsics,
|
||||||
|
cam.distortion_model,
|
||||||
|
cam.distortion_coeffs,
|
||||||
|
anchorPatch_ideal);
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
// save anchor position for later visualisaztion
|
// save anchor position for later visualisaztion
|
||||||
anchor_center_pos = anchorPatch_real[(N*N-1)/2];
|
anchor_center_pos = anchorPatch_real[(N*N-1)/2];
|
||||||
|
|
||||||
// save true pixel Patch position
|
// save true pixel Patch position
|
||||||
for(auto point : anchorPatch_real)
|
for(auto point : anchorPatch_real)
|
||||||
{
|
if(point.x - n < 0 || point.x + n >= cam.resolution(0)-1 || point.y - n < 0 || point.y + n >= cam.resolution(1)-1)
|
||||||
if(point.x - n < 0 || point.x + n >= cam.resolution(0) || point.y - n < 0 || point.y + n >= cam.resolution(1))
|
|
||||||
return false;
|
return false;
|
||||||
}
|
|
||||||
for(auto point : anchorPatch_real)
|
for(auto point : anchorPatch_real)
|
||||||
anchorPatch.push_back(PixelIrradiance(point, anchorImage));
|
anchorPatch.push_back(PixelIrradiance(point, anchorImage));
|
||||||
|
|
||||||
// project patch pixel to 3D space in camera coordinate system
|
// project patch pixel to 3D space in camera coordinate system
|
||||||
for(auto point : und_v)
|
for(auto point : anchorPatch_ideal)
|
||||||
{
|
anchorPatch_3d.push_back(AnchorPixelToPosition(point, cam));
|
||||||
anchorPatch_ideal.push_back(point);
|
|
||||||
anchorPatch_3d.push_back(projectPixelToPosition(point, cam));
|
|
||||||
}
|
|
||||||
is_anchored = true;
|
is_anchored = true;
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
bool Feature::initializeRho(const CamStateServer& cam_states) {
|
||||||
|
|
||||||
|
// Organize camera poses and feature observations properly.
|
||||||
|
std::vector<Eigen::Isometry3d,
|
||||||
|
Eigen::aligned_allocator<Eigen::Isometry3d> > cam_poses(0);
|
||||||
|
std::vector<Eigen::Vector2d,
|
||||||
|
Eigen::aligned_allocator<Eigen::Vector2d> > measurements(0);
|
||||||
|
|
||||||
|
for (auto& m : observations) {
|
||||||
|
auto cam_state_iter = cam_states.find(m.first);
|
||||||
|
if (cam_state_iter == cam_states.end()) continue;
|
||||||
|
|
||||||
|
// Add the measurement.
|
||||||
|
measurements.push_back(m.second.head<2>());
|
||||||
|
measurements.push_back(m.second.tail<2>());
|
||||||
|
|
||||||
|
// This camera pose will take a vector from this camera frame
|
||||||
|
// to the world frame.
|
||||||
|
Eigen::Isometry3d cam0_pose;
|
||||||
|
cam0_pose.linear() = quaternionToRotation(
|
||||||
|
cam_state_iter->second.orientation).transpose();
|
||||||
|
cam0_pose.translation() = cam_state_iter->second.position;
|
||||||
|
|
||||||
|
Eigen::Isometry3d cam1_pose;
|
||||||
|
cam1_pose = cam0_pose * CAMState::T_cam0_cam1.inverse();
|
||||||
|
|
||||||
|
cam_poses.push_back(cam0_pose);
|
||||||
|
cam_poses.push_back(cam1_pose);
|
||||||
|
}
|
||||||
|
|
||||||
|
// All camera poses should be modified such that it takes a
|
||||||
|
// vector from the first camera frame in the buffer to this
|
||||||
|
// camera frame.
|
||||||
|
Eigen::Isometry3d T_c0_w = cam_poses[0];
|
||||||
|
T_anchor_w = T_c0_w;
|
||||||
|
for (auto& pose : cam_poses)
|
||||||
|
pose = pose.inverse() * T_c0_w;
|
||||||
|
|
||||||
|
// Generate initial guess
|
||||||
|
double initial_depth = 0;
|
||||||
|
initial_depth = generateInitialDepth(cam_poses[cam_poses.size()-1], measurements[0],
|
||||||
|
measurements[measurements.size()-1]);
|
||||||
|
|
||||||
|
double solution = 1.0/initial_depth;
|
||||||
|
|
||||||
|
// Apply Levenberg-Marquart method to solve for the 3d position.
|
||||||
|
double lambda = optimization_config.initial_damping;
|
||||||
|
int inner_loop_cntr = 0;
|
||||||
|
int outer_loop_cntr = 0;
|
||||||
|
bool is_cost_reduced = false;
|
||||||
|
double delta_norm = 0;
|
||||||
|
|
||||||
|
// Compute the initial cost.
|
||||||
|
double total_cost = 0.0;
|
||||||
|
for (int i = 0; i < cam_poses.size(); ++i) {
|
||||||
|
double this_cost = 0.0;
|
||||||
|
Rhocost(cam_poses[i], solution, measurements[0], measurements[i], this_cost);
|
||||||
|
total_cost += this_cost;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Outer loop.
|
||||||
|
do {
|
||||||
|
Eigen::Matrix<double, 1, 1> A = Eigen::Matrix<double, 1, 1>::Zero();
|
||||||
|
Eigen::Matrix<double, 1, 1> b = Eigen::Matrix<double, 1, 1>::Zero();
|
||||||
|
|
||||||
|
for (int i = 0; i < cam_poses.size(); ++i) {
|
||||||
|
Eigen::Matrix<double, 2, 1> J;
|
||||||
|
Eigen::Vector2d r;
|
||||||
|
double w;
|
||||||
|
|
||||||
|
RhoJacobian(cam_poses[i], solution, measurements[0], measurements[i], J, r, w);
|
||||||
|
|
||||||
|
if (w == 1) {
|
||||||
|
A += J.transpose() * J;
|
||||||
|
b += J.transpose() * r;
|
||||||
|
} else {
|
||||||
|
double w_square = w * w;
|
||||||
|
A += w_square * J.transpose() * J;
|
||||||
|
b += w_square * J.transpose() * r;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Inner loop.
|
||||||
|
// Solve for the delta that can reduce the total cost.
|
||||||
|
do {
|
||||||
|
Eigen::Matrix<double, 1, 1> damper = lambda*Eigen::Matrix<double, 1, 1>::Identity();
|
||||||
|
Eigen::Matrix<double, 1, 1> delta = (A+damper).ldlt().solve(b);
|
||||||
|
double new_solution = solution - delta(0,0);
|
||||||
|
delta_norm = delta.norm();
|
||||||
|
|
||||||
|
double new_cost = 0.0;
|
||||||
|
for (int i = 0; i < cam_poses.size(); ++i) {
|
||||||
|
double this_cost = 0.0;
|
||||||
|
Rhocost(cam_poses[i], new_solution, measurements[0], measurements[i], this_cost);
|
||||||
|
new_cost += this_cost;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (new_cost < total_cost) {
|
||||||
|
is_cost_reduced = true;
|
||||||
|
solution = new_solution;
|
||||||
|
total_cost = new_cost;
|
||||||
|
lambda = lambda/10 > 1e-10 ? lambda/10 : 1e-10;
|
||||||
|
} else {
|
||||||
|
is_cost_reduced = false;
|
||||||
|
lambda = lambda*10 < 1e12 ? lambda*10 : 1e12;
|
||||||
|
}
|
||||||
|
|
||||||
|
} while (inner_loop_cntr++ <
|
||||||
|
optimization_config.inner_loop_max_iteration && !is_cost_reduced);
|
||||||
|
|
||||||
|
inner_loop_cntr = 0;
|
||||||
|
|
||||||
|
} while (outer_loop_cntr++ <
|
||||||
|
optimization_config.outer_loop_max_iteration &&
|
||||||
|
delta_norm > optimization_config.estimation_precision);
|
||||||
|
|
||||||
|
// Covert the feature position from inverse depth
|
||||||
|
// representation to its 3d coordinate.
|
||||||
|
Eigen::Vector3d final_position(measurements[0](0)/solution,
|
||||||
|
measurements[0](1)/solution, 1.0/solution);
|
||||||
|
|
||||||
|
// Check if the solution is valid. Make sure the feature
|
||||||
|
// is in front of every camera frame observing it.
|
||||||
|
bool is_valid_solution = true;
|
||||||
|
for (const auto& pose : cam_poses) {
|
||||||
|
Eigen::Vector3d position =
|
||||||
|
pose.linear()*final_position + pose.translation();
|
||||||
|
if (position(2) <= 0) {
|
||||||
|
is_valid_solution = false;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
//save inverse depth distance from camera
|
||||||
|
anchor_rho = solution;
|
||||||
|
|
||||||
|
// Convert the feature position to the world frame.
|
||||||
|
position = T_c0_w.linear()*final_position + T_c0_w.translation();
|
||||||
|
|
||||||
|
if (is_valid_solution)
|
||||||
|
is_initialized = true;
|
||||||
|
|
||||||
|
return is_valid_solution;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
bool Feature::initializePosition(const CamStateServer& cam_states) {
|
bool Feature::initializePosition(const CamStateServer& cam_states) {
|
||||||
|
|
||||||
// Organize camera poses and feature observations properly.
|
// Organize camera poses and feature observations properly.
|
||||||
|
|
||||||
std::vector<Eigen::Isometry3d,
|
std::vector<Eigen::Isometry3d,
|
||||||
Eigen::aligned_allocator<Eigen::Isometry3d> > cam_poses(0);
|
Eigen::aligned_allocator<Eigen::Isometry3d> > cam_poses(0);
|
||||||
std::vector<Eigen::Vector2d,
|
std::vector<Eigen::Vector2d,
|
||||||
@ -927,6 +1417,7 @@ bool Feature::initializePosition(const CamStateServer& cam_states) {
|
|||||||
|
|
||||||
//save inverse depth distance from camera
|
//save inverse depth distance from camera
|
||||||
anchor_rho = solution(2);
|
anchor_rho = solution(2);
|
||||||
|
std::cout << "from feature: " << anchor_rho << std::endl;
|
||||||
|
|
||||||
// Convert the feature position to the world frame.
|
// Convert the feature position to the world frame.
|
||||||
position = T_c0_w.linear()*final_position + T_c0_w.translation();
|
position = T_c0_w.linear()*final_position + T_c0_w.translation();
|
||||||
|
@ -16,6 +16,16 @@ namespace msckf_vio {
|
|||||||
*/
|
*/
|
||||||
namespace image_handler {
|
namespace image_handler {
|
||||||
|
|
||||||
|
cv::Point2f pinholeDownProject(const cv::Point2f& p_in, const cv::Vec4d& intrinsics);
|
||||||
|
cv::Point2f pinholeUpProject(const cv::Point2f& p_in, const cv::Vec4d& intrinsics);
|
||||||
|
|
||||||
|
void undistortImage(
|
||||||
|
cv::InputArray src,
|
||||||
|
cv::OutputArray dst,
|
||||||
|
const std::string& distortion_model,
|
||||||
|
const cv::Vec4d& intrinsics,
|
||||||
|
const cv::Vec4d& distortion_coeffs);
|
||||||
|
|
||||||
void undistortPoints(
|
void undistortPoints(
|
||||||
const std::vector<cv::Point2f>& pts_in,
|
const std::vector<cv::Point2f>& pts_in,
|
||||||
const cv::Vec4d& intrinsics,
|
const cv::Vec4d& intrinsics,
|
||||||
@ -36,6 +46,16 @@ cv::Point2f distortPoint(
|
|||||||
const cv::Vec4d& intrinsics,
|
const cv::Vec4d& intrinsics,
|
||||||
const std::string& distortion_model,
|
const std::string& distortion_model,
|
||||||
const cv::Vec4d& distortion_coeffs);
|
const cv::Vec4d& distortion_coeffs);
|
||||||
|
|
||||||
|
void undistortPoint(
|
||||||
|
const cv::Point2f& pt_in,
|
||||||
|
const cv::Vec4d& intrinsics,
|
||||||
|
const std::string& distortion_model,
|
||||||
|
const cv::Vec4d& distortion_coeffs,
|
||||||
|
cv::Point2f& pt_out,
|
||||||
|
const cv::Matx33d &rectification_matrix = cv::Matx33d::eye(),
|
||||||
|
const cv::Vec4d &new_intrinsics = cv::Vec4d(1,1,0,0));
|
||||||
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
@ -320,6 +320,8 @@ private:
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool STREAMPAUSE;
|
||||||
|
|
||||||
// Indicate if this is the first image message.
|
// Indicate if this is the first image message.
|
||||||
bool is_first_img;
|
bool is_first_img;
|
||||||
|
|
||||||
|
@ -14,7 +14,7 @@
|
|||||||
#include <string>
|
#include <string>
|
||||||
#include <Eigen/Dense>
|
#include <Eigen/Dense>
|
||||||
#include <Eigen/Geometry>
|
#include <Eigen/Geometry>
|
||||||
|
#include <math.h>
|
||||||
#include <boost/shared_ptr.hpp>
|
#include <boost/shared_ptr.hpp>
|
||||||
#include <opencv2/opencv.hpp>
|
#include <opencv2/opencv.hpp>
|
||||||
#include <opencv2/video.hpp>
|
#include <opencv2/video.hpp>
|
||||||
@ -38,6 +38,8 @@
|
|||||||
#include <message_filters/subscriber.h>
|
#include <message_filters/subscriber.h>
|
||||||
#include <message_filters/time_synchronizer.h>
|
#include <message_filters/time_synchronizer.h>
|
||||||
|
|
||||||
|
#define PI 3.14159265
|
||||||
|
|
||||||
namespace msckf_vio {
|
namespace msckf_vio {
|
||||||
/*
|
/*
|
||||||
* @brief MsckfVio Implements the algorithm in
|
* @brief MsckfVio Implements the algorithm in
|
||||||
@ -200,53 +202,111 @@ class MsckfVio {
|
|||||||
Eigen::Vector4d& r);
|
Eigen::Vector4d& r);
|
||||||
// This function computes the Jacobian of all measurements viewed
|
// This function computes the Jacobian of all measurements viewed
|
||||||
// in the given camera states of this feature.
|
// in the given camera states of this feature.
|
||||||
void featureJacobian(const FeatureIDType& feature_id,
|
bool featureJacobian(
|
||||||
|
const FeatureIDType& feature_id,
|
||||||
const std::vector<StateIDType>& cam_state_ids,
|
const std::vector<StateIDType>& cam_state_ids,
|
||||||
Eigen::MatrixXd& H_x, Eigen::VectorXd& r);
|
Eigen::MatrixXd& H_x, Eigen::VectorXd& r);
|
||||||
|
|
||||||
|
|
||||||
void PhotometricMeasurementJacobian(
|
void twodotMeasurementJacobian(
|
||||||
const StateIDType& cam_state_id,
|
const StateIDType& cam_state_id,
|
||||||
const FeatureIDType& feature_id,
|
const FeatureIDType& feature_id,
|
||||||
Eigen::MatrixXd& H_x,
|
Eigen::MatrixXd& H_x, Eigen::MatrixXd& H_y, Eigen::VectorXd& r);
|
||||||
Eigen::MatrixXd& H_y,
|
|
||||||
Eigen::VectorXd& r);
|
|
||||||
|
|
||||||
void PhotometricFeatureJacobian(
|
bool ConstructJacobians(
|
||||||
const FeatureIDType& feature_id,
|
Eigen::MatrixXd& H_rho,
|
||||||
const std::vector<StateIDType>& cam_state_ids,
|
Eigen::MatrixXd& H_pl,
|
||||||
Eigen::MatrixXd& H_x, Eigen::VectorXd& r);
|
Eigen::MatrixXd& H_pA,
|
||||||
|
const Feature& feature,
|
||||||
|
const StateIDType& cam_state_id,
|
||||||
|
Eigen::MatrixXd& H_xl,
|
||||||
|
Eigen::MatrixXd& H_yl);
|
||||||
|
|
||||||
|
bool PhotometricPatchPointResidual(
|
||||||
|
const StateIDType& cam_state_id,
|
||||||
|
const Feature& feature,
|
||||||
|
Eigen::VectorXd& r);
|
||||||
|
|
||||||
|
bool PhotometricPatchPointJacobian(
|
||||||
|
const CAMState& cam_state,
|
||||||
|
const StateIDType& cam_state_id,
|
||||||
|
const Feature& feature,
|
||||||
|
Eigen::Vector3d point,
|
||||||
|
int count,
|
||||||
|
Eigen::Matrix<double, 2, 1>& H_rhoj,
|
||||||
|
Eigen::Matrix<double, 2, 6>& H_plj,
|
||||||
|
Eigen::Matrix<double, 2, 6>& H_pAj,
|
||||||
|
Eigen::Matrix<double, 2, 4>& dI_dhj);
|
||||||
|
|
||||||
|
bool PhotometricMeasurementJacobian(
|
||||||
|
const StateIDType& cam_state_id,
|
||||||
|
const FeatureIDType& feature_id,
|
||||||
|
Eigen::MatrixXd& H_x,
|
||||||
|
Eigen::MatrixXd& H_y,
|
||||||
|
Eigen::VectorXd& r);
|
||||||
|
|
||||||
|
|
||||||
|
bool twodotFeatureJacobian(
|
||||||
|
const FeatureIDType& feature_id,
|
||||||
|
const std::vector<StateIDType>& cam_state_ids,
|
||||||
|
Eigen::MatrixXd& H_x, Eigen::VectorXd& r);
|
||||||
|
|
||||||
|
bool PhotometricFeatureJacobian(
|
||||||
|
const FeatureIDType& feature_id,
|
||||||
|
const std::vector<StateIDType>& cam_state_ids,
|
||||||
|
Eigen::MatrixXd& H_x, Eigen::VectorXd& r);
|
||||||
|
|
||||||
|
void photometricMeasurementUpdate(const Eigen::MatrixXd& H, const Eigen::VectorXd& r);
|
||||||
void measurementUpdate(const Eigen::MatrixXd& H,
|
void measurementUpdate(const Eigen::MatrixXd& H,
|
||||||
const Eigen::VectorXd& r);
|
const Eigen::VectorXd& r);
|
||||||
|
void twoMeasurementUpdate(const Eigen::MatrixXd& H, const Eigen::VectorXd& r);
|
||||||
|
|
||||||
bool gatingTest(const Eigen::MatrixXd& H,
|
bool gatingTest(const Eigen::MatrixXd& H,
|
||||||
const Eigen::VectorXd&r, const int& dof);
|
const Eigen::VectorXd&r, const int& dof, int filter=0);
|
||||||
void removeLostFeatures();
|
void removeLostFeatures();
|
||||||
void findRedundantCamStates(
|
void findRedundantCamStates(
|
||||||
std::vector<StateIDType>& rm_cam_state_ids);
|
std::vector<StateIDType>& rm_cam_state_ids);
|
||||||
|
|
||||||
|
void pruneLastCamStateBuffer();
|
||||||
void pruneCamStateBuffer();
|
void pruneCamStateBuffer();
|
||||||
// Reset the system online if the uncertainty is too large.
|
// Reset the system online if the uncertainty is too large.
|
||||||
void onlineReset();
|
void onlineReset();
|
||||||
|
|
||||||
// Photometry flag
|
// Photometry flag
|
||||||
bool PHOTOMETRIC;
|
int FILTER;
|
||||||
|
|
||||||
// debug flag
|
// debug flag
|
||||||
|
bool STREAMPAUSE;
|
||||||
bool PRINTIMAGES;
|
bool PRINTIMAGES;
|
||||||
bool GROUNDTRUTH;
|
bool GROUNDTRUTH;
|
||||||
|
|
||||||
bool nan_flag;
|
bool nan_flag;
|
||||||
bool play;
|
bool play;
|
||||||
double last_time_bound;
|
double last_time_bound;
|
||||||
|
double time_offset;
|
||||||
|
|
||||||
// Patch size for Photometry
|
// Patch size for Photometry
|
||||||
int N;
|
int N;
|
||||||
|
// Image rescale
|
||||||
|
int SCALE;
|
||||||
// Chi squared test table.
|
// Chi squared test table.
|
||||||
static std::map<int, double> chi_squared_test_table;
|
static std::map<int, double> chi_squared_test_table;
|
||||||
|
|
||||||
|
double eval_time;
|
||||||
|
|
||||||
|
IMUState timed_old_imu_state;
|
||||||
|
IMUState timed_old_true_state;
|
||||||
|
|
||||||
|
IMUState old_imu_state;
|
||||||
|
IMUState old_true_state;
|
||||||
|
|
||||||
|
// change in position
|
||||||
|
Eigen::Vector3d delta_position;
|
||||||
|
Eigen::Vector3d delta_orientation;
|
||||||
|
|
||||||
// State vector
|
// State vector
|
||||||
StateServer state_server;
|
StateServer state_server;
|
||||||
|
StateServer photometric_state_server;
|
||||||
|
|
||||||
// Ground truth state vector
|
// Ground truth state vector
|
||||||
StateServer true_state_server;
|
StateServer true_state_server;
|
||||||
@ -309,6 +369,7 @@ class MsckfVio {
|
|||||||
// Subscribers and publishers
|
// Subscribers and publishers
|
||||||
ros::Subscriber imu_sub;
|
ros::Subscriber imu_sub;
|
||||||
ros::Subscriber truth_sub;
|
ros::Subscriber truth_sub;
|
||||||
|
ros::Publisher truth_odom_pub;
|
||||||
ros::Publisher odom_pub;
|
ros::Publisher odom_pub;
|
||||||
ros::Publisher marker_pub;
|
ros::Publisher marker_pub;
|
||||||
ros::Publisher feature_pub;
|
ros::Publisher feature_pub;
|
||||||
|
38
launch/image_processor_tinytum.launch
Normal file
38
launch/image_processor_tinytum.launch
Normal file
@ -0,0 +1,38 @@
|
|||||||
|
<launch>
|
||||||
|
|
||||||
|
<arg name="robot" default="firefly_sbx"/>
|
||||||
|
<arg name="calibration_file"
|
||||||
|
default="$(find msckf_vio)/config/camchain-imucam-tum-scaled.yaml"/>
|
||||||
|
|
||||||
|
<!-- Image Processor Nodelet -->
|
||||||
|
<group ns="$(arg robot)">
|
||||||
|
<node pkg="nodelet" type="nodelet" name="image_processor"
|
||||||
|
args="standalone msckf_vio/ImageProcessorNodelet"
|
||||||
|
output="screen"
|
||||||
|
>
|
||||||
|
|
||||||
|
<!-- Debugging Flaggs -->
|
||||||
|
<param name="StreamPause" value="true"/>
|
||||||
|
|
||||||
|
<rosparam command="load" file="$(arg calibration_file)"/>
|
||||||
|
<param name="grid_row" value="4"/>
|
||||||
|
<param name="grid_col" value="4"/>
|
||||||
|
<param name="grid_min_feature_num" value="3"/>
|
||||||
|
<param name="grid_max_feature_num" value="5"/>
|
||||||
|
<param name="pyramid_levels" value="3"/>
|
||||||
|
<param name="patch_size" value="15"/>
|
||||||
|
<param name="fast_threshold" value="10"/>
|
||||||
|
<param name="max_iteration" value="30"/>
|
||||||
|
<param name="track_precision" value="0.01"/>
|
||||||
|
<param name="ransac_threshold" value="3"/>
|
||||||
|
<param name="stereo_threshold" value="5"/>
|
||||||
|
|
||||||
|
<remap from="~imu" to="/imu0"/>
|
||||||
|
<remap from="~cam0_image" to="/cam0/image_raw"/>
|
||||||
|
<remap from="~cam1_image" to="/cam1/image_raw"/>
|
||||||
|
|
||||||
|
|
||||||
|
</node>
|
||||||
|
</group>
|
||||||
|
|
||||||
|
</launch>
|
@ -11,6 +11,9 @@
|
|||||||
output="screen"
|
output="screen"
|
||||||
>
|
>
|
||||||
|
|
||||||
|
<!-- Debugging Flaggs -->
|
||||||
|
<param name="StreamPause" value="true"/>
|
||||||
|
|
||||||
<rosparam command="load" file="$(arg calibration_file)"/>
|
<rosparam command="load" file="$(arg calibration_file)"/>
|
||||||
<param name="grid_row" value="4"/>
|
<param name="grid_row" value="4"/>
|
||||||
<param name="grid_col" value="4"/>
|
<param name="grid_col" value="4"/>
|
||||||
|
@ -22,10 +22,10 @@
|
|||||||
<param name="PHOTOMETRIC" value="true"/>
|
<param name="PHOTOMETRIC" value="true"/>
|
||||||
|
|
||||||
<!-- Debugging Flaggs -->
|
<!-- Debugging Flaggs -->
|
||||||
<param name="PrintImages" value="false"/>
|
<param name="PrintImages" value="true"/>
|
||||||
<param name="GroundTruth" value="false"/>
|
<param name="GroundTruth" value="false"/>
|
||||||
|
|
||||||
<param name="patch_size_n" value="7"/>
|
<param name="patch_size_n" value="3"/>
|
||||||
|
|
||||||
<!-- Calibration parameters -->
|
<!-- Calibration parameters -->
|
||||||
<rosparam command="load" file="$(arg calibration_file)"/>
|
<rosparam command="load" file="$(arg calibration_file)"/>
|
||||||
|
@ -17,6 +17,18 @@
|
|||||||
args='standalone msckf_vio/MsckfVioNodelet'
|
args='standalone msckf_vio/MsckfVioNodelet'
|
||||||
output="screen">
|
output="screen">
|
||||||
|
|
||||||
|
|
||||||
|
<!-- Filter Flag, 0 = msckf, 1 = photometric, 2 = two -->
|
||||||
|
<param name="FILTER" value="1"/>
|
||||||
|
|
||||||
|
<!-- Debugging Flaggs -->
|
||||||
|
<param name="StreamPause" value="true"/>
|
||||||
|
<param name="PrintImages" value="true"/>
|
||||||
|
<param name="GroundTruth" value="false"/>
|
||||||
|
|
||||||
|
<param name="patch_size_n" value="5"/>
|
||||||
|
<param name="image_scale" value ="1"/>
|
||||||
|
|
||||||
<!-- Calibration parameters -->
|
<!-- Calibration parameters -->
|
||||||
<rosparam command="load" file="$(arg calibration_file)"/>
|
<rosparam command="load" file="$(arg calibration_file)"/>
|
||||||
|
|
||||||
|
77
launch/msckf_vio_tinytum.launch
Normal file
77
launch/msckf_vio_tinytum.launch
Normal file
@ -0,0 +1,77 @@
|
|||||||
|
<launch>
|
||||||
|
|
||||||
|
<arg name="robot" default="firefly_sbx"/>
|
||||||
|
<arg name="fixed_frame_id" default="world"/>
|
||||||
|
<arg name="calibration_file"
|
||||||
|
default="$(find msckf_vio)/config/camchain-imucam-tum-scaled.yaml"/>
|
||||||
|
|
||||||
|
<!-- Image Processor Nodelet -->
|
||||||
|
<include file="$(find msckf_vio)/launch/image_processor_tinytum.launch">
|
||||||
|
<arg name="robot" value="$(arg robot)"/>
|
||||||
|
<arg name="calibration_file" value="$(arg calibration_file)"/>
|
||||||
|
</include>
|
||||||
|
|
||||||
|
<!-- Msckf Vio Nodelet -->
|
||||||
|
<group ns="$(arg robot)">
|
||||||
|
<node pkg="nodelet" type="nodelet" name="vio"
|
||||||
|
args='standalone msckf_vio/MsckfVioNodelet'
|
||||||
|
output="screen">
|
||||||
|
|
||||||
|
<param name="FILTER" value="0"/>
|
||||||
|
|
||||||
|
<!-- Debugging Flaggs -->
|
||||||
|
<param name="StreamPause" value="true"/>
|
||||||
|
<param name="PrintImages" value="false"/>
|
||||||
|
<param name="GroundTruth" value="false"/>
|
||||||
|
|
||||||
|
<param name="patch_size_n" value="3"/>
|
||||||
|
<param name="image_scale" value ="1"/>
|
||||||
|
|
||||||
|
<!-- Calibration parameters -->
|
||||||
|
<rosparam command="load" file="$(arg calibration_file)"/>
|
||||||
|
|
||||||
|
<param name="publish_tf" value="true"/>
|
||||||
|
<param name="frame_rate" value="20"/>
|
||||||
|
<param name="fixed_frame_id" value="$(arg fixed_frame_id)"/>
|
||||||
|
<param name="child_frame_id" value="odom"/>
|
||||||
|
<param name="max_cam_state_size" value="12"/>
|
||||||
|
<param name="position_std_threshold" value="8.0"/>
|
||||||
|
|
||||||
|
<param name="rotation_threshold" value="0.2618"/>
|
||||||
|
<param name="translation_threshold" value="0.4"/>
|
||||||
|
<param name="tracking_rate_threshold" value="0.5"/>
|
||||||
|
|
||||||
|
<!-- Feature optimization config -->
|
||||||
|
<param name="feature/config/translation_threshold" value="-1.0"/>
|
||||||
|
|
||||||
|
<!-- These values should be standard deviation -->
|
||||||
|
<param name="noise/gyro" value="0.005"/>
|
||||||
|
<param name="noise/acc" value="0.05"/>
|
||||||
|
<param name="noise/gyro_bias" value="0.001"/>
|
||||||
|
<param name="noise/acc_bias" value="0.01"/>
|
||||||
|
<param name="noise/feature" value="0.035"/>
|
||||||
|
|
||||||
|
<param name="initial_state/velocity/x" value="0.0"/>
|
||||||
|
<param name="initial_state/velocity/y" value="0.0"/>
|
||||||
|
<param name="initial_state/velocity/z" value="0.0"/>
|
||||||
|
|
||||||
|
<!-- These values should be covariance -->
|
||||||
|
<param name="initial_covariance/velocity" value="0.25"/>
|
||||||
|
<param name="initial_covariance/gyro_bias" value="0.01"/>
|
||||||
|
<param name="initial_covariance/acc_bias" value="0.01"/>
|
||||||
|
<param name="initial_covariance/extrinsic_rotation_cov" value="3.0462e-4"/>
|
||||||
|
<param name="initial_covariance/extrinsic_translation_cov" value="2.5e-5"/>
|
||||||
|
<param name="initial_covariance/irradiance_frame_bias" value="0.1"/>
|
||||||
|
|
||||||
|
<remap from="~imu" to="/imu0"/>
|
||||||
|
<remap from="~ground_truth" to="/vrpn_client/raw_transform"/>
|
||||||
|
<remap from="~cam0_image" to="/cam0/image_raw"/>
|
||||||
|
<remap from="~cam1_image" to="/cam1/image_raw"/>
|
||||||
|
|
||||||
|
<remap from="~features" to="image_processor/features"/>
|
||||||
|
|
||||||
|
</node>
|
||||||
|
</group>
|
||||||
|
|
||||||
|
|
||||||
|
</launch>
|
@ -17,14 +17,16 @@
|
|||||||
args='standalone msckf_vio/MsckfVioNodelet'
|
args='standalone msckf_vio/MsckfVioNodelet'
|
||||||
output="screen">
|
output="screen">
|
||||||
|
|
||||||
<!-- Photometry Flag-->
|
|
||||||
<param name="PHOTOMETRIC" value="true"/>
|
<!-- Filter Flag, 0 = msckf, 1 = photometric, 2 = two -->
|
||||||
|
<param name="FILTER" value="1"/>
|
||||||
|
|
||||||
<!-- Debugging Flaggs -->
|
<!-- Debugging Flaggs -->
|
||||||
|
<param name="StreamPause" value="true"/>
|
||||||
<param name="PrintImages" value="false"/>
|
<param name="PrintImages" value="false"/>
|
||||||
<param name="GroundTruth" value="false"/>
|
<param name="GroundTruth" value="false"/>
|
||||||
|
|
||||||
<param name="patch_size_n" value="7"/>
|
<param name="patch_size_n" value="3"/>
|
||||||
<!-- Calibration parameters -->
|
<!-- Calibration parameters -->
|
||||||
<rosparam command="load" file="$(arg calibration_file)"/>
|
<rosparam command="load" file="$(arg calibration_file)"/>
|
||||||
|
|
||||||
@ -71,4 +73,6 @@
|
|||||||
</node>
|
</node>
|
||||||
</group>
|
</group>
|
||||||
|
|
||||||
|
<!--node name="player" pkg="bagcontrol" type="control.py" /-->
|
||||||
|
|
||||||
</launch>
|
</launch>
|
||||||
|
73
log
Normal file
73
log
Normal file
@ -0,0 +1,73 @@
|
|||||||
|
# Created by Octave 3.8.1, Wed Jun 12 14:36:37 2019 CEST <raphael@raphael-desktop>
|
||||||
|
# name: Hx
|
||||||
|
# type: matrix
|
||||||
|
# rows: 18
|
||||||
|
# columns: 49
|
||||||
|
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 313.795 -58.7912 139.778 -46.7616 144.055 86.9644 0 -314.123 55.6434 -140.648 46.7616 -144.055 -86.9644 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0
|
||||||
|
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 441.06 -94.50069999999999 174.424 -53.7653 204.822 120.248 0 -441.685 90.1101 -175.657 53.7653 -204.822 -120.248 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0
|
||||||
|
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 225.35 -54.5629 77.60599999999999 -21.1425 105.886 60.3706 0 -225.756 52.3373 -78.2406 21.1425 -105.886 -60.3706 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0
|
||||||
|
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 175.128 20.6203 175.127 -79.63939999999999 73.245 62.1868 0 -174.573 -22.5235 -175.576 79.63939999999999 -73.245 -62.1868 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0
|
||||||
|
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 296.962 43.5469 311.307 -143.667 123.399 108.355 0 -295.905 -46.7952 -312.063 143.667 -123.399 -108.355 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0
|
||||||
|
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 126.283 117.889 311.864 -161.264 38.8521 71.8019 0 -124.464 -119.541 -312.118 161.264 -38.8521 -71.8019 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0
|
||||||
|
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 49.2502 63.7166 155.071 -80.81950000000001 12.7732 32.1826 0 -48.2934 -64.4113 -155.157 80.81950000000001 -12.7732 -32.1826 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0
|
||||||
|
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 69.59699999999999 154.579 335.384 -179.355 9.212580000000001 62.0364 0 -67.35599999999999 -155.735 -335.462 179.355 -9.212580000000001 -62.0364 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0
|
||||||
|
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 -66.6965 304.947 500.218 -285.589 -71.31010000000001 55.5058 0 70.8009 -305.077 -499.831 285.589 71.31010000000001 -55.5058 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0
|
||||||
|
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 323.404 -62.2043 141.092 -46.6015 148.737 89.1108 0 0 0 0 0 0 0 0 -324.336 57.8552 -141.991 46.6015 -148.737 -89.1108 1 0 0 0 0 0 0 0
|
||||||
|
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 454.208 -99.3095 175.986 -53.4094 211.276 123.174 0 0 0 0 0 0 0 0 -455.779 93.0992 -177.158 53.4094 -211.276 -123.174 1 0 0 0 0 0 0 0
|
||||||
|
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 231.884 -57.0266 78.2559 -20.8926 109.118 61.8183 0 0 0 0 0 0 0 0 -232.824 53.8025 -78.80719999999999 20.8926 -109.118 -61.8183 1 0 0 0 0 0 0 0
|
||||||
|
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 181.715 18.8525 177.045 -80.08499999999999 76.3716 63.8254 0 0 0 0 0 0 0 0 -181.07 -20.839 -177.959 80.08499999999999 -76.3716 -63.8254 1 0 0 0 0 0 0 0
|
||||||
|
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 308.249 40.5812 314.711 -144.494 128.766 111.207 0 0 0 0 0 0 0 0 -306.972 -43.8825 -316.328 144.494 -128.766 -111.207 1 0 0 0 0 0 0 0
|
||||||
|
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 133.309 116.865 315.474 -162.598 42.0763 73.8353 0 0 0 0 0 0 0 0 -130.603 -117.454 -316.931 162.598 -42.0763 -73.8353 1 0 0 0 0 0 0 0
|
||||||
|
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 52.4426 63.3607 156.902 -81.5288 14.2139 33.1222 0 0 0 0 0 0 0 0 -50.9976 -63.4393 -157.607 81.5288 -14.2139 -33.1222 1 0 0 0 0 0 0 0
|
||||||
|
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 75.5508 154.234 339.369 -180.995 11.859 63.8956 0 0 0 0 0 0 0 0 -72.1041 -153.816 -340.865 180.995 -11.859 -63.8956 1 0 0 0 0 0 0 0
|
||||||
|
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 -62.0551 306.357 506.351 -288.542 -69.50409999999999 57.4562 0 0 0 0 0 0 0 0 68.6262 -303.028 -508.423 288.542 69.50409999999999 -57.4562 1 0 0 0 0 0 0 0
|
||||||
|
|
||||||
|
|
||||||
|
# name: Hy
|
||||||
|
# type: matrix
|
||||||
|
# rows: 18
|
||||||
|
# columns: 14
|
||||||
|
1.56267 0 0 0 0 0 0 0 0 0 0.252873 0 0 0.110387
|
||||||
|
0 1.56267 0 0 0 0 0 0 0 0 0.453168 0 0 0.162961
|
||||||
|
0 0 1.56267 0 0 0 0 0 0 0 0.638441 0 0 0.0873758
|
||||||
|
0 0 0 1.56267 0 0 0 0 0 0 0.21031 0 0 0.0321517
|
||||||
|
0 0 0 0 1.56267 0 0 0 0 0 0.375554 0 0 0.0505151
|
||||||
|
0 0 0 0 0 1.56267 0 0 0 0 0.638441 0 0 -0.0336034
|
||||||
|
0 0 0 0 0 0 1.56267 0 0 0 0.157733 0 0 -0.0232704
|
||||||
|
0 0 0 0 0 0 0 1.56267 0 0 0.212814 0 0 -0.0688343
|
||||||
|
0 0 0 0 0 0 0 0 1.56267 0 0.360532 0 0 -0.187745
|
||||||
|
1.56267 0 0 0 0 0 0 0 0 0 0 0.252873 0 0.322811
|
||||||
|
0 1.56267 0 0 0 0 0 0 0 0 0 0.453168 0 0.467319
|
||||||
|
0 0 1.56267 0 0 0 0 0 0 0 0 0.638441 0 0.245907
|
||||||
|
0 0 0 1.56267 0 0 0 0 0 0 0 0.21031 0 0.135725
|
||||||
|
0 0 0 0 1.56267 0 0 0 0 0 0 0.375554 0 0.225277
|
||||||
|
0 0 0 0 0 1.56267 0 0 0 0 0 0.638441 0 0.012926
|
||||||
|
0 0 0 0 0 0 1.56267 0 0 0 0 0.157733 0 -0.0108962
|
||||||
|
0 0 0 0 0 0 0 1.56267 0 0 0 0.212814 0 -0.06974909999999999
|
||||||
|
0 0 0 0 0 0 0 0 1.56267 0 0 0.360532 0 -0.318846
|
||||||
|
|
||||||
|
|
||||||
|
# name: r
|
||||||
|
# type: matrix
|
||||||
|
# rows: 18
|
||||||
|
# columns: 1
|
||||||
|
0.0354809
|
||||||
|
0.0153183
|
||||||
|
0.0570191
|
||||||
|
-0.0372801
|
||||||
|
0.0878601
|
||||||
|
0.06811780000000001
|
||||||
|
-0.00426164
|
||||||
|
5.162985999041026e-321
|
||||||
|
6.927779999999998e-310
|
||||||
|
0
|
||||||
|
2.121999999910509e-314
|
||||||
|
0
|
||||||
|
0
|
||||||
|
0
|
||||||
|
3.6073900000086e-313
|
||||||
|
4.446590812571219e-323
|
||||||
|
3.952525166729972e-323
|
||||||
|
3.952525166729972e-323
|
||||||
|
|
||||||
|
|
64
src/bagcontrol.py
Normal file
64
src/bagcontrol.py
Normal file
@ -0,0 +1,64 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
import rosbag
|
||||||
|
import rospy
|
||||||
|
from sensor_msgs.msg import Imu, Image
|
||||||
|
from geometry_msgs.msg import TransformStamped
|
||||||
|
import time
|
||||||
|
import signal
|
||||||
|
import sys
|
||||||
|
|
||||||
|
|
||||||
|
def signal_handler(sig, frame):
|
||||||
|
print('gracefully exiting the program.')
|
||||||
|
bag.close()
|
||||||
|
sys.exit(0)
|
||||||
|
|
||||||
|
def main():
|
||||||
|
global bag
|
||||||
|
|
||||||
|
cam0_topic = '/cam0/image_raw'
|
||||||
|
cam1_topic = '/cam1/image_raw'
|
||||||
|
imu0_topic = '/imu0'
|
||||||
|
grnd_topic = '/vrpn_client/raw_transform'
|
||||||
|
|
||||||
|
rospy.init_node('controlbag')
|
||||||
|
rospy.set_param('play_bag', False)
|
||||||
|
|
||||||
|
cam0_pub = rospy.Publisher(cam0_topic, Image, queue_size=10)
|
||||||
|
cam1_pub = rospy.Publisher(cam1_topic, Image, queue_size=10)
|
||||||
|
imu0_pub = rospy.Publisher(imu0_topic, Imu, queue_size=10)
|
||||||
|
grnd_pub = rospy.Publisher(grnd_topic, TransformStamped, queue_size=10)
|
||||||
|
|
||||||
|
signal.signal(signal.SIGINT, signal_handler)
|
||||||
|
|
||||||
|
bag = rosbag.Bag('/home/raphael/dev/MSCKF_ws/bag/TUM/dataset-corridor1_1024_16.bag')
|
||||||
|
for topic, msg, t in bag.read_messages(topics=[cam0_topic, cam1_topic, imu0_topic, grnd_topic]):
|
||||||
|
|
||||||
|
# pause if parameter set to false
|
||||||
|
flag = False
|
||||||
|
while not rospy.get_param('/play_bag'):
|
||||||
|
time.sleep(0.01)
|
||||||
|
if not flag:
|
||||||
|
print("stopped playback")
|
||||||
|
flag = not flag
|
||||||
|
|
||||||
|
if flag:
|
||||||
|
print("resume playback")
|
||||||
|
|
||||||
|
if topic == cam0_topic:
|
||||||
|
cam0_pub.publish(msg)
|
||||||
|
|
||||||
|
elif topic == cam1_topic:
|
||||||
|
cam1_pub.publish(msg)
|
||||||
|
|
||||||
|
elif topic == imu0_topic:
|
||||||
|
imu0_pub.publish(msg)
|
||||||
|
|
||||||
|
elif topic ==grnd_topic:
|
||||||
|
grnd_pub.publish(msg)
|
||||||
|
|
||||||
|
#print msg
|
||||||
|
bag.close()
|
||||||
|
|
||||||
|
if __name__== "__main__":
|
||||||
|
main()
|
64
src/control.py
Executable file
64
src/control.py
Executable file
@ -0,0 +1,64 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
import rosbag
|
||||||
|
import rospy
|
||||||
|
from sensor_msgs.msg import Imu, Image
|
||||||
|
from geometry_msgs.msg import TransformStamped
|
||||||
|
import time
|
||||||
|
import signal
|
||||||
|
import sys
|
||||||
|
|
||||||
|
|
||||||
|
def signal_handler(sig, frame):
|
||||||
|
print('gracefully exiting the program.')
|
||||||
|
bag.close()
|
||||||
|
sys.exit(0)
|
||||||
|
|
||||||
|
def main():
|
||||||
|
global bag
|
||||||
|
|
||||||
|
cam0_topic = '/cam0/image_raw'
|
||||||
|
cam1_topic = '/cam1/image_raw'
|
||||||
|
imu0_topic = '/imu0'
|
||||||
|
grnd_topic = '/vrpn_client/raw_transform'
|
||||||
|
|
||||||
|
rospy.init_node('controlbag')
|
||||||
|
rospy.set_param('play_bag', False)
|
||||||
|
|
||||||
|
cam0_pub = rospy.Publisher(cam0_topic, Image, queue_size=10)
|
||||||
|
cam1_pub = rospy.Publisher(cam1_topic, Image, queue_size=10)
|
||||||
|
imu0_pub = rospy.Publisher(imu0_topic, Imu, queue_size=10)
|
||||||
|
grnd_pub = rospy.Publisher(grnd_topic, TransformStamped, queue_size=10)
|
||||||
|
|
||||||
|
signal.signal(signal.SIGINT, signal_handler)
|
||||||
|
|
||||||
|
bag = rosbag.Bag('/home/raphael/dev/MSCKF_ws/bag/TUM/dataset-corridor1_1024_16.bag')
|
||||||
|
for topic, msg, t in bag.read_messages(topics=[cam0_topic, cam1_topic, imu0_topic, grnd_topic]):
|
||||||
|
|
||||||
|
# pause if parameter set to false
|
||||||
|
flag = False
|
||||||
|
while not rospy.get_param('/play_bag'):
|
||||||
|
time.sleep(0.01)
|
||||||
|
if not flag:
|
||||||
|
print("stopped playback")
|
||||||
|
flag = not flag
|
||||||
|
|
||||||
|
if flag:
|
||||||
|
print("resume playback")
|
||||||
|
|
||||||
|
if topic == cam0_topic:
|
||||||
|
cam0_pub.publish(msg)
|
||||||
|
|
||||||
|
elif topic == cam1_topic:
|
||||||
|
cam1_pub.publish(msg)
|
||||||
|
|
||||||
|
elif topic == imu0_topic:
|
||||||
|
imu0_pub.publish(msg)
|
||||||
|
|
||||||
|
elif topic ==grnd_topic:
|
||||||
|
grnd_pub.publish(msg)
|
||||||
|
|
||||||
|
#print msg
|
||||||
|
bag.close()
|
||||||
|
|
||||||
|
if __name__== "__main__":
|
||||||
|
main()
|
@ -14,6 +14,108 @@ namespace msckf_vio {
|
|||||||
namespace image_handler {
|
namespace image_handler {
|
||||||
|
|
||||||
|
|
||||||
|
cv::Point2f pinholeDownProject(const cv::Point2f& p_in, const cv::Vec4d& intrinsics)
|
||||||
|
{
|
||||||
|
return cv::Point2f(p_in.x * intrinsics[0] + intrinsics[2], p_in.y * intrinsics[1] + intrinsics[3]);
|
||||||
|
}
|
||||||
|
|
||||||
|
cv::Point2f pinholeUpProject(const cv::Point2f& p_in, const cv::Vec4d& intrinsics)
|
||||||
|
{
|
||||||
|
return cv::Point2f((p_in.x - intrinsics[2])/intrinsics[0], (p_in.y - intrinsics[3])/intrinsics[1]);
|
||||||
|
}
|
||||||
|
|
||||||
|
void undistortImage(
|
||||||
|
cv::InputArray src,
|
||||||
|
cv::OutputArray dst,
|
||||||
|
const std::string& distortion_model,
|
||||||
|
const cv::Vec4d& intrinsics,
|
||||||
|
const cv::Vec4d& distortion_coeffs)
|
||||||
|
{
|
||||||
|
|
||||||
|
const cv::Matx33d K(intrinsics[0], 0.0, intrinsics[2],
|
||||||
|
0.0, intrinsics[1], intrinsics[3],
|
||||||
|
0.0, 0.0, 1.0);
|
||||||
|
|
||||||
|
if (distortion_model == "pre-equidistant")
|
||||||
|
cv::fisheye::undistortImage(src, dst, K, distortion_coeffs, K);
|
||||||
|
else if (distortion_model == "equidistant")
|
||||||
|
src.copyTo(dst);
|
||||||
|
|
||||||
|
else if (distortion_model == "pre-radtan")
|
||||||
|
cv::undistort(src, dst, K, distortion_coeffs, K);
|
||||||
|
else if (distortion_model == "radtan")
|
||||||
|
src.copyTo(dst);
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
void undistortPoint(
|
||||||
|
const cv::Point2f& pt_in,
|
||||||
|
const cv::Vec4d& intrinsics,
|
||||||
|
const std::string& distortion_model,
|
||||||
|
const cv::Vec4d& distortion_coeffs,
|
||||||
|
cv::Point2f& pt_out,
|
||||||
|
const cv::Matx33d &rectification_matrix,
|
||||||
|
const cv::Vec4d &new_intrinsics) {
|
||||||
|
|
||||||
|
|
||||||
|
std::vector<cv::Point2f> pts_in;
|
||||||
|
std::vector<cv::Point2f> pts_out;
|
||||||
|
pts_in.push_back(pt_in);
|
||||||
|
if (pts_in.size() == 0) return;
|
||||||
|
|
||||||
|
const cv::Matx33d K(
|
||||||
|
intrinsics[0], 0.0, intrinsics[2],
|
||||||
|
0.0, intrinsics[1], intrinsics[3],
|
||||||
|
0.0, 0.0, 1.0);
|
||||||
|
|
||||||
|
const cv::Matx33d K_new(
|
||||||
|
new_intrinsics[0], 0.0, new_intrinsics[2],
|
||||||
|
0.0, new_intrinsics[1], new_intrinsics[3],
|
||||||
|
0.0, 0.0, 1.0);
|
||||||
|
|
||||||
|
if (distortion_model == "radtan") {
|
||||||
|
cv::undistortPoints(pts_in, pts_out, K, distortion_coeffs,
|
||||||
|
rectification_matrix, K_new);
|
||||||
|
}
|
||||||
|
// equidistant
|
||||||
|
else if (distortion_model == "equidistant") {
|
||||||
|
cv::fisheye::undistortPoints(pts_in, pts_out, K, distortion_coeffs,
|
||||||
|
rectification_matrix, K_new);
|
||||||
|
}
|
||||||
|
// fov
|
||||||
|
else if (distortion_model == "fov") {
|
||||||
|
for(int i = 0; i < pts_in.size(); i++)
|
||||||
|
{
|
||||||
|
float omega = distortion_coeffs[0];
|
||||||
|
float rd = sqrt(pts_in[i].x * pts_in[i].x + pts_in[i].y * pts_in[i].y);
|
||||||
|
float ru = tan(rd * omega)/(2 * tan(omega / 2));
|
||||||
|
|
||||||
|
cv::Point2f newPoint(
|
||||||
|
((pts_in[i].x - intrinsics[2]) / intrinsics[0]) * (ru / rd),
|
||||||
|
((pts_in[i].y - intrinsics[3]) / intrinsics[1]) * (ru / rd));
|
||||||
|
|
||||||
|
pts_out.push_back(newPoint);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
else if (distortion_model == "pre-equidistant" or distortion_model == "pre-radtan")
|
||||||
|
{
|
||||||
|
std::vector<cv::Point2f> temp_pts_out;
|
||||||
|
for(int i = 0; i < pts_in.size(); i++)
|
||||||
|
temp_pts_out.push_back(pinholeUpProject(pts_in[i], intrinsics));
|
||||||
|
|
||||||
|
pts_out = temp_pts_out;
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
ROS_WARN_ONCE("The model %s is unrecognized, use radtan instead...",
|
||||||
|
distortion_model.c_str());
|
||||||
|
cv::undistortPoints(pts_in, pts_out, K, distortion_coeffs,
|
||||||
|
rectification_matrix, K_new);
|
||||||
|
}
|
||||||
|
pt_out = pts_out[0];
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
void undistortPoints(
|
void undistortPoints(
|
||||||
const std::vector<cv::Point2f>& pts_in,
|
const std::vector<cv::Point2f>& pts_in,
|
||||||
const cv::Vec4d& intrinsics,
|
const cv::Vec4d& intrinsics,
|
||||||
@ -38,10 +140,35 @@ void undistortPoints(
|
|||||||
if (distortion_model == "radtan") {
|
if (distortion_model == "radtan") {
|
||||||
cv::undistortPoints(pts_in, pts_out, K, distortion_coeffs,
|
cv::undistortPoints(pts_in, pts_out, K, distortion_coeffs,
|
||||||
rectification_matrix, K_new);
|
rectification_matrix, K_new);
|
||||||
} else if (distortion_model == "equidistant") {
|
}
|
||||||
|
else if (distortion_model == "equidistant") {
|
||||||
cv::fisheye::undistortPoints(pts_in, pts_out, K, distortion_coeffs,
|
cv::fisheye::undistortPoints(pts_in, pts_out, K, distortion_coeffs,
|
||||||
rectification_matrix, K_new);
|
rectification_matrix, K_new);
|
||||||
} else {
|
}
|
||||||
|
else if (distortion_model == "fov") {
|
||||||
|
for(int i = 0; i < pts_in.size(); i++)
|
||||||
|
{
|
||||||
|
float omega = distortion_coeffs[0];
|
||||||
|
float rd = sqrt(pts_in[i].x * pts_in[i].x + pts_in[i].y * pts_in[i].y);
|
||||||
|
float ru = tan(rd * omega)/(2 * tan(omega / 2));
|
||||||
|
|
||||||
|
cv::Point2f newPoint(
|
||||||
|
((pts_in[i].x - intrinsics[2]) / intrinsics[0]) * (ru / rd),
|
||||||
|
((pts_in[i].y - intrinsics[3]) / intrinsics[1]) * (ru / rd));
|
||||||
|
|
||||||
|
pts_out.push_back(newPoint);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
else if (distortion_model == "pre-equidistant" or distortion_model == "pre-radtan")
|
||||||
|
{
|
||||||
|
std::vector<cv::Point2f> temp_pts_out;
|
||||||
|
for(int i = 0; i < pts_in.size(); i++)
|
||||||
|
temp_pts_out.push_back(pinholeUpProject(pts_in[i], intrinsics));
|
||||||
|
|
||||||
|
pts_out = temp_pts_out;
|
||||||
|
|
||||||
|
}
|
||||||
|
else {
|
||||||
ROS_WARN_ONCE("The model %s is unrecognized, use radtan instead...",
|
ROS_WARN_ONCE("The model %s is unrecognized, use radtan instead...",
|
||||||
distortion_model.c_str());
|
distortion_model.c_str());
|
||||||
cv::undistortPoints(pts_in, pts_out, K, distortion_coeffs,
|
cv::undistortPoints(pts_in, pts_out, K, distortion_coeffs,
|
||||||
@ -69,7 +196,31 @@ std::vector<cv::Point2f> distortPoints(
|
|||||||
distortion_coeffs, pts_out);
|
distortion_coeffs, pts_out);
|
||||||
} else if (distortion_model == "equidistant") {
|
} else if (distortion_model == "equidistant") {
|
||||||
cv::fisheye::distortPoints(pts_in, pts_out, K, distortion_coeffs);
|
cv::fisheye::distortPoints(pts_in, pts_out, K, distortion_coeffs);
|
||||||
} else {
|
}
|
||||||
|
else if (distortion_model == "fov") {
|
||||||
|
for(int i = 0; i < pts_in.size(); i++)
|
||||||
|
{
|
||||||
|
// based on 'straight lines have to be straight'
|
||||||
|
float ru = sqrt(pts_in[i].x * pts_in[i].x + pts_in[i].y * pts_in[i].y);
|
||||||
|
float omega = distortion_coeffs[0];
|
||||||
|
float rd = 1 / (omega)*atan(2*ru*tan(omega / 2));
|
||||||
|
|
||||||
|
cv::Point2f newPoint(
|
||||||
|
pts_in[i].x * (rd/ru) * intrinsics[0] + intrinsics[2],
|
||||||
|
pts_in[i].y * (rd/ru) * intrinsics[1] + intrinsics[3]);
|
||||||
|
|
||||||
|
pts_out.push_back(newPoint);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
else if (distortion_model == "pre-equidistant" or distortion_model == "pre-radtan")
|
||||||
|
{
|
||||||
|
std::vector<cv::Point2f> temp_pts_out;
|
||||||
|
for(int i = 0; i < pts_in.size(); i++)
|
||||||
|
temp_pts_out.push_back(pinholeDownProject(pts_in[i], intrinsics));
|
||||||
|
|
||||||
|
pts_out = temp_pts_out;
|
||||||
|
}
|
||||||
|
else {
|
||||||
ROS_WARN_ONCE("The model %s is unrecognized, using radtan instead...",
|
ROS_WARN_ONCE("The model %s is unrecognized, using radtan instead...",
|
||||||
distortion_model.c_str());
|
distortion_model.c_str());
|
||||||
std::vector<cv::Point3f> homogenous_pts;
|
std::vector<cv::Point3f> homogenous_pts;
|
||||||
@ -102,7 +253,31 @@ cv::Point2f distortPoint(
|
|||||||
distortion_coeffs, pts_out);
|
distortion_coeffs, pts_out);
|
||||||
} else if (distortion_model == "equidistant") {
|
} else if (distortion_model == "equidistant") {
|
||||||
cv::fisheye::distortPoints(pts_in, pts_out, K, distortion_coeffs);
|
cv::fisheye::distortPoints(pts_in, pts_out, K, distortion_coeffs);
|
||||||
} else {
|
}
|
||||||
|
else if (distortion_model == "fov") {
|
||||||
|
for(int i = 0; i < pts_in.size(); i++)
|
||||||
|
{
|
||||||
|
// based on 'straight lines have to be straight'
|
||||||
|
float ru = sqrt(pts_in[i].x * pts_in[i].x + pts_in[i].y * pts_in[i].y);
|
||||||
|
float omega = distortion_coeffs[0];
|
||||||
|
float rd = 1 / (omega)*atan(2*ru*tan(omega / 2));
|
||||||
|
|
||||||
|
cv::Point2f newPoint(
|
||||||
|
pts_in[i].x * (rd/ru) * intrinsics[0] + intrinsics[2],
|
||||||
|
pts_in[i].y * (rd/ru) * intrinsics[1] + intrinsics[3]);
|
||||||
|
|
||||||
|
pts_out.push_back(newPoint);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
else if (distortion_model == "pre-equidistant" or distortion_model == "pre-radtan")
|
||||||
|
{
|
||||||
|
std::vector<cv::Point2f> temp_pts_out;
|
||||||
|
for(int i = 0; i < pts_in.size(); i++)
|
||||||
|
pts_out.push_back(pinholeDownProject(pts_in[i], intrinsics));
|
||||||
|
|
||||||
|
pts_out = temp_pts_out;
|
||||||
|
}
|
||||||
|
else {
|
||||||
ROS_WARN_ONCE("The model %s is unrecognized, using radtan instead...",
|
ROS_WARN_ONCE("The model %s is unrecognized, using radtan instead...",
|
||||||
distortion_model.c_str());
|
distortion_model.c_str());
|
||||||
std::vector<cv::Point3f> homogenous_pts;
|
std::vector<cv::Point3f> homogenous_pts;
|
||||||
@ -114,5 +289,5 @@ cv::Point2f distortPoint(
|
|||||||
return pts_out[0];
|
return pts_out[0];
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace image_handler
|
} // namespace image_handler
|
||||||
} // namespace msckf_vio
|
} // namespace msckf_vio
|
@ -42,6 +42,9 @@ ImageProcessor::~ImageProcessor() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
bool ImageProcessor::loadParameters() {
|
bool ImageProcessor::loadParameters() {
|
||||||
|
|
||||||
|
// debug parameters
|
||||||
|
nh.param<bool>("StreamPause", STREAMPAUSE, false);
|
||||||
// Camera calibration parameters
|
// Camera calibration parameters
|
||||||
nh.param<string>("cam0/distortion_model",
|
nh.param<string>("cam0/distortion_model",
|
||||||
cam0.distortion_model, string("radtan"));
|
cam0.distortion_model, string("radtan"));
|
||||||
@ -211,7 +214,9 @@ void ImageProcessor::stereoCallback(
|
|||||||
const sensor_msgs::ImageConstPtr& cam0_img,
|
const sensor_msgs::ImageConstPtr& cam0_img,
|
||||||
const sensor_msgs::ImageConstPtr& cam1_img) {
|
const sensor_msgs::ImageConstPtr& cam1_img) {
|
||||||
|
|
||||||
//cout << "==================================" << endl;
|
// stop playing bagfile if printing images
|
||||||
|
//if(STREAMPAUSE)
|
||||||
|
// nh.setParam("/play_bag_image", false);
|
||||||
|
|
||||||
// Get the current image.
|
// Get the current image.
|
||||||
cam0_curr_img_ptr = cv_bridge::toCvShare(cam0_img,
|
cam0_curr_img_ptr = cv_bridge::toCvShare(cam0_img,
|
||||||
@ -219,12 +224,27 @@ void ImageProcessor::stereoCallback(
|
|||||||
cam1_curr_img_ptr = cv_bridge::toCvShare(cam1_img,
|
cam1_curr_img_ptr = cv_bridge::toCvShare(cam1_img,
|
||||||
sensor_msgs::image_encodings::MONO8);
|
sensor_msgs::image_encodings::MONO8);
|
||||||
|
|
||||||
|
ros::Time start_time = ros::Time::now();
|
||||||
|
|
||||||
|
|
||||||
|
cv::Mat new_cam0;
|
||||||
|
cv::Mat new_cam1;
|
||||||
|
|
||||||
|
image_handler::undistortImage(cam0_curr_img_ptr->image, new_cam0, cam0.distortion_model, cam0.intrinsics, cam0.distortion_coeffs);
|
||||||
|
image_handler::undistortImage(cam1_curr_img_ptr->image, new_cam1, cam1.distortion_model, cam1.intrinsics, cam1.distortion_coeffs);
|
||||||
|
|
||||||
|
new_cam0.copyTo(cam0_curr_img_ptr->image);
|
||||||
|
new_cam1.copyTo(cam1_curr_img_ptr->image);
|
||||||
|
|
||||||
|
//ROS_INFO("Publishing: %f",
|
||||||
|
// (ros::Time::now()-start_time).toSec());
|
||||||
|
|
||||||
// Build the image pyramids once since they're used at multiple places
|
// Build the image pyramids once since they're used at multiple places
|
||||||
createImagePyramids();
|
createImagePyramids();
|
||||||
|
|
||||||
// Detect features in the first frame.
|
// Detect features in the first frame.
|
||||||
if (is_first_img) {
|
if (is_first_img) {
|
||||||
ros::Time start_time = ros::Time::now();
|
start_time = ros::Time::now();
|
||||||
initializeFirstFrame();
|
initializeFirstFrame();
|
||||||
//ROS_INFO("Detection time: %f",
|
//ROS_INFO("Detection time: %f",
|
||||||
// (ros::Time::now()-start_time).toSec());
|
// (ros::Time::now()-start_time).toSec());
|
||||||
@ -237,7 +257,7 @@ void ImageProcessor::stereoCallback(
|
|||||||
// (ros::Time::now()-start_time).toSec());
|
// (ros::Time::now()-start_time).toSec());
|
||||||
} else {
|
} else {
|
||||||
// Track the feature in the previous image.
|
// Track the feature in the previous image.
|
||||||
ros::Time start_time = ros::Time::now();
|
start_time = ros::Time::now();
|
||||||
trackFeatures();
|
trackFeatures();
|
||||||
//ROS_INFO("Tracking time: %f",
|
//ROS_INFO("Tracking time: %f",
|
||||||
// (ros::Time::now()-start_time).toSec());
|
// (ros::Time::now()-start_time).toSec());
|
||||||
@ -245,6 +265,7 @@ void ImageProcessor::stereoCallback(
|
|||||||
// Add new features into the current image.
|
// Add new features into the current image.
|
||||||
start_time = ros::Time::now();
|
start_time = ros::Time::now();
|
||||||
addNewFeatures();
|
addNewFeatures();
|
||||||
|
|
||||||
//ROS_INFO("Addition time: %f",
|
//ROS_INFO("Addition time: %f",
|
||||||
// (ros::Time::now()-start_time).toSec());
|
// (ros::Time::now()-start_time).toSec());
|
||||||
|
|
||||||
@ -267,16 +288,18 @@ void ImageProcessor::stereoCallback(
|
|||||||
// (ros::Time::now()-start_time).toSec());
|
// (ros::Time::now()-start_time).toSec());
|
||||||
|
|
||||||
// Publish features in the current image.
|
// Publish features in the current image.
|
||||||
ros::Time start_time = ros::Time::now();
|
start_time = ros::Time::now();
|
||||||
publish();
|
publish();
|
||||||
//ROS_INFO("Publishing: %f",
|
//ROS_INFO("Publishing: %f",
|
||||||
// (ros::Time::now()-start_time).toSec());
|
// (ros::Time::now()-start_time).toSec());
|
||||||
|
|
||||||
// Update the previous image and previous features.
|
// Update the previous image and previous features.
|
||||||
|
|
||||||
cam0_prev_img_ptr = cam0_curr_img_ptr;
|
cam0_prev_img_ptr = cam0_curr_img_ptr;
|
||||||
prev_features_ptr = curr_features_ptr;
|
prev_features_ptr = curr_features_ptr;
|
||||||
std::swap(prev_cam0_pyramid_, curr_cam0_pyramid_);
|
std::swap(prev_cam0_pyramid_, curr_cam0_pyramid_);
|
||||||
|
|
||||||
|
|
||||||
// Initialize the current features to empty vectors.
|
// Initialize the current features to empty vectors.
|
||||||
curr_features_ptr.reset(new GridFeatures());
|
curr_features_ptr.reset(new GridFeatures());
|
||||||
for (int code = 0; code <
|
for (int code = 0; code <
|
||||||
@ -284,6 +307,10 @@ void ImageProcessor::stereoCallback(
|
|||||||
(*curr_features_ptr)[code] = vector<FeatureMetaData>(0);
|
(*curr_features_ptr)[code] = vector<FeatureMetaData>(0);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// stop playing bagfile if printing images
|
||||||
|
//if(STREAMPAUSE)
|
||||||
|
// nh.setParam("/play_bag_image", true);
|
||||||
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -580,6 +607,7 @@ void ImageProcessor::trackFeatures() {
|
|||||||
++after_ransac;
|
++after_ransac;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
// Compute the tracking rate.
|
// Compute the tracking rate.
|
||||||
int prev_feature_num = 0;
|
int prev_feature_num = 0;
|
||||||
for (const auto& item : *prev_features_ptr)
|
for (const auto& item : *prev_features_ptr)
|
||||||
@ -619,9 +647,14 @@ void ImageProcessor::stereoMatch(
|
|||||||
image_handler::undistortPoints(cam0_points, cam0.intrinsics, cam0.distortion_model,
|
image_handler::undistortPoints(cam0_points, cam0.intrinsics, cam0.distortion_model,
|
||||||
cam0.distortion_coeffs, cam0_points_undistorted,
|
cam0.distortion_coeffs, cam0_points_undistorted,
|
||||||
R_cam0_cam1);
|
R_cam0_cam1);
|
||||||
|
<<<<<<< HEAD
|
||||||
|
cam1_points = distortPoints(cam0_points_undistorted, cam1_intrinsics,
|
||||||
|
cam1_distortion_model, cam1_distortion_coeffs);
|
||||||
|
=======
|
||||||
|
|
||||||
cam1_points = image_handler::distortPoints(cam0_points_undistorted, cam1.intrinsics,
|
cam1_points = image_handler::distortPoints(cam0_points_undistorted, cam1.intrinsics,
|
||||||
cam1.distortion_model, cam1.distortion_coeffs);
|
cam1.distortion_model, cam1.distortion_coeffs);
|
||||||
|
>>>>>>> photometry-jakobi
|
||||||
}
|
}
|
||||||
|
|
||||||
// Track features using LK optical flow method.
|
// Track features using LK optical flow method.
|
||||||
@ -659,6 +692,8 @@ void ImageProcessor::stereoMatch(
|
|||||||
|
|
||||||
// Further remove outliers based on the known
|
// Further remove outliers based on the known
|
||||||
// essential matrix.
|
// essential matrix.
|
||||||
|
|
||||||
|
|
||||||
vector<cv::Point2f> cam0_points_undistorted(0);
|
vector<cv::Point2f> cam0_points_undistorted(0);
|
||||||
vector<cv::Point2f> cam1_points_undistorted(0);
|
vector<cv::Point2f> cam1_points_undistorted(0);
|
||||||
image_handler::undistortPoints(
|
image_handler::undistortPoints(
|
||||||
@ -668,6 +703,7 @@ void ImageProcessor::stereoMatch(
|
|||||||
cam1_points, cam1.intrinsics, cam1.distortion_model,
|
cam1_points, cam1.intrinsics, cam1.distortion_model,
|
||||||
cam1.distortion_coeffs, cam1_points_undistorted);
|
cam1.distortion_coeffs, cam1_points_undistorted);
|
||||||
|
|
||||||
|
|
||||||
double norm_pixel_unit = 4.0 / (
|
double norm_pixel_unit = 4.0 / (
|
||||||
cam0.intrinsics[0]+cam0.intrinsics[1]+
|
cam0.intrinsics[0]+cam0.intrinsics[1]+
|
||||||
cam1.intrinsics[0]+cam1.intrinsics[1]);
|
cam1.intrinsics[0]+cam1.intrinsics[1]);
|
||||||
|
1798
src/msckf_vio.cpp
1798
src/msckf_vio.cpp
File diff suppressed because it is too large
Load Diff
75
src/shrinkImage.py
Executable file
75
src/shrinkImage.py
Executable file
@ -0,0 +1,75 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
from __future__ import print_function
|
||||||
|
|
||||||
|
import sys
|
||||||
|
import rospy
|
||||||
|
import cv2
|
||||||
|
from std_msgs.msg import String
|
||||||
|
from sensor_msgs.msg import Image
|
||||||
|
from cv_bridge import CvBridge, CvBridgeError
|
||||||
|
|
||||||
|
class image_converter:
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
self.image0_pub = rospy.Publisher("/cam0/new_image_raw",Image, queue_size=10)
|
||||||
|
self.image1_pub = rospy.Publisher("/cam1/new_image_raw",Image, queue_size=10)
|
||||||
|
|
||||||
|
self.bridge = CvBridge()
|
||||||
|
self.image0_sub = rospy.Subscriber("/cam0/image_raw",Image,self.callback_cam0)
|
||||||
|
self.image1_sub = rospy.Subscriber("/cam1/image_raw",Image,self.callback_cam1)
|
||||||
|
|
||||||
|
def callback_cam0(self,data):
|
||||||
|
try:
|
||||||
|
cv_image = self.bridge.imgmsg_to_cv2(data, "bgr8")
|
||||||
|
except CvBridgeError as e:
|
||||||
|
print(e)
|
||||||
|
|
||||||
|
imgScale = 0.25
|
||||||
|
newX,newY = cv_image.shape[1]*imgScale, cv_image.shape[0]*imgScale
|
||||||
|
newimg = cv2.resize(cv_image,(int(newX),int(newY)))
|
||||||
|
|
||||||
|
newpub = self.bridge.cv2_to_imgmsg(newimg, "bgr8")
|
||||||
|
newdata = data
|
||||||
|
newdata.height = newpub.height
|
||||||
|
newdata.width = newpub.width
|
||||||
|
newdata.step = newpub.step
|
||||||
|
newdata.data = newpub.data
|
||||||
|
try:
|
||||||
|
self.image0_pub.publish(newdata)
|
||||||
|
except CvBridgeError as e:
|
||||||
|
print(e)
|
||||||
|
|
||||||
|
def callback_cam1(self,data):
|
||||||
|
try:
|
||||||
|
cv_image = self.bridge.imgmsg_to_cv2(data, "bgr8")
|
||||||
|
except CvBridgeError as e:
|
||||||
|
print(e)
|
||||||
|
|
||||||
|
imgScale = 0.25
|
||||||
|
newX,newY = cv_image.shape[1]*imgScale, cv_image.shape[0]*imgScale
|
||||||
|
newimg = cv2.resize(cv_image,(int(newX),int(newY)))
|
||||||
|
|
||||||
|
newpub = self.bridge.cv2_to_imgmsg(newimg, "bgr8")
|
||||||
|
newdata = data
|
||||||
|
newdata.height = newpub.height
|
||||||
|
newdata.width = newpub.width
|
||||||
|
newdata.step = newpub.step
|
||||||
|
newdata.data = newpub.data
|
||||||
|
|
||||||
|
try:
|
||||||
|
self.image1_pub.publish(newdata)
|
||||||
|
except CvBridgeError as e:
|
||||||
|
print(e)
|
||||||
|
|
||||||
|
|
||||||
|
def main(args):
|
||||||
|
ic = image_converter()
|
||||||
|
rospy.init_node('image_converter', anonymous=True)
|
||||||
|
try:
|
||||||
|
rospy.spin()
|
||||||
|
except KeyboardInterrupt:
|
||||||
|
print("Shutting down")
|
||||||
|
cv2.destroyAllWindows()
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
main(sys.argv)
|
Loading…
Reference in New Issue
Block a user