83 lines
		
	
	
		
			2.6 KiB
		
	
	
	
		
			Plaintext
		
	
	
	
	
	
			
		
		
	
	
			83 lines
		
	
	
		
			2.6 KiB
		
	
	
	
		
			Plaintext
		
	
	
	
	
	
featureCallback
 | 
						|
	propagate IMU state()
 | 
						|
	state Augmentation()
 | 
						|
	add Feature Observations()
 | 
						|
 | 
						|
	#the following possibly trigger ekf update step:
 | 
						|
	remove Lost Features ()
 | 
						|
	prune Camera State Buffer ()
 | 
						|
 | 
						|
 | 
						|
remove Lost Features()
 | 
						|
	every feature that does not have a current observation:
 | 
						|
		*just delete if not enough features
 | 
						|
 | 
						|
		check Motion of Feature ()
 | 
						|
			_calculation here makes no sense - he uses pixel position as direction vector for feature?_
 | 
						|
		*initialize Position ()
 | 
						|
 | 
						|
		caculate feature Jakobian and Residual()
 | 
						|
			*for every observation in this feature
 | 
						|
				- calculate u and v in camera frames, based on estimated feature position
 | 
						|
				- input results into jakobi d(measurement)/d(camera 0/1)
 | 
						|
				- input results into jakobi d(camera 0/1)/d(state) and jakobi d(camera 0/1)/d(feature position)
 | 
						|
				- project both jakobis to nullspace of feature position jakobi
 | 
						|
				- calculate residual: measurement - u and v of camera frames
 | 
						|
				- project residual onto nullspace of feature position jakobi
 | 
						|
 | 
						|
			- stack residual and jakobians
 | 
						|
 | 
						|
		gating Test()
 | 
						|
		
 | 
						|
		*measurementUpdate()
 | 
						|
			_use calculated residuals and jakobians to calculate change in error_
 | 
						|
measurementUpdate():
 | 
						|
	- QR reduce the stacked Measurment Jakobis
 | 
						|
	- calcualte Kalman Gain based on Measurement Jakobian, Error-State Covariance and Noise
 | 
						|
		_does some fancy shit here_
 | 
						|
	- calculate estimated error after observation: delta_x = KalmanGain * residual 
 | 
						|
	- add estimated error to state (imu states and cam states)
 | 
						|
 | 
						|
initialize Position ():
 | 
						|
	* create initial guess for global feature position ()
 | 
						|
		_uses first feature measurement on left camera and last feature measurement of right camera_
 | 
						|
		- transform first measurement to plane of last measurement
 | 
						|
		- calcualte least square point between rays
 | 
						|
	* get position approximation using measured feature positions
 | 
						|
		_using Levenberg Marqhart iterative search_
 | 
						|
 | 
						|
 | 
						|
 | 
						|
add Feature Observations()
 | 
						|
	* if feature not in map, add feature to map
 | 
						|
		_and add u0, v0, u1, v1 as first observation
 | 
						|
	* if feature in map, add new observation u0,v0,u1,v1
 | 
						|
 | 
						|
 | 
						|
 | 
						|
state Augmentation()
 | 
						|
	* Add estimated cam position to state
 | 
						|
	* Update P with Jakobian of cam Position 
 | 
						|
 | 
						|
 | 
						|
propagate IMU state ()
 | 
						|
	_uses IMU process model for every saved IMU state_
 | 
						|
	
 | 
						|
	for every buffered imu state:
 | 
						|
		
 | 
						|
		*remove bias
 | 
						|
		
 | 
						|
		*Compute F and G matrix (continuous transition and noise cov.)
 | 
						|
			_using current orientation, gyro and acc. reading_
 | 
						|
		* approximate phi: phi = 1 + Fdt + ...
 | 
						|
 | 
						|
		* calculate new state propagating through runge kutta
 | 
						|
		* modify transition matrix to have a propper null space?
 | 
						|
 | 
						|
		* calculate Q = Phi*G*Q_noise*GT*PhiT
 | 
						|
		* calculate P = Phi*P*PhiT + Q
 | 
						|
 | 
						|
 | 
						|
	stateAugmentation ()
 | 
						|
		_save current IMU state as camera position_
 |