diff --git a/.gitignore b/.gitignore index 0172760..ca5eb19d9 100644 --- a/.gitignore +++ b/.gitignore @@ -17,8 +17,23 @@ Examples/Monocular/mono_euroc Examples/Monocular/mono_kitti Examples/Monocular/mono_tum Examples/RGB-D/rgbd_tum +Examples/ROS/ORB_SLAM2/CMakeLists.txt.user +Examples/ROS/ORB_SLAM2/Mono +Examples/ROS/ORB_SLAM2/MonoAR +Examples/ROS/ORB_SLAM2/RGBD +Examples/ROS/ORB_SLAM2/Stereo +Examples/ROS/ORB_SLAM2/build/ Examples/Stereo/stereo_euroc Examples/Stereo/stereo_kitti +KeyFrameTrajectory.txt +Thirdparty/DBoW2/build/ +Thirdparty/DBoW2/lib/ +Thirdparty/g2o/build/ +Thirdparty/g2o/config.h +Thirdparty/g2o/lib/ +Vocabulary/ORBvoc.txt +build/ +*~ +lib/ Examples/ROS/ORB_VIO/VIO - diff --git a/CMakeLists.txt b/CMakeLists.txt index a5f089a..22ac8fa 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -28,7 +28,14 @@ endif() LIST(APPEND CMAKE_MODULE_PATH ${PROJECT_SOURCE_DIR}/cmake_modules) -find_package(OpenCV 2.4.3 REQUIRED) +find_package(OpenCV 3.0 QUIET) +if(NOT OpenCV_FOUND) + find_package(OpenCV 2.4.3 QUIET) + if(NOT OpenCV_FOUND) + message(FATAL_ERROR "OpenCV > 2.4.3 not found.") + endif() +endif() + find_package(Eigen3 3.1.0 REQUIRED) find_package(Pangolin REQUIRED) find_package(Cholmod REQUIRED) diff --git a/Examples/ROS/ORB_SLAM2/CMakeLists.txt b/Examples/ROS/ORB_SLAM2/CMakeLists.txt index 3c032fe..7cbf71c 100644 --- a/Examples/ROS/ORB_SLAM2/CMakeLists.txt +++ b/Examples/ROS/ORB_SLAM2/CMakeLists.txt @@ -30,7 +30,14 @@ endif() LIST(APPEND CMAKE_MODULE_PATH ${PROJECT_SOURCE_DIR}/../../../cmake_modules) -find_package(OpenCV 2.4.3 REQUIRED) +find_package(OpenCV 3.0 QUIET) +if(NOT OpenCV_FOUND) + find_package(OpenCV 2.4.3 QUIET) + if(NOT OpenCV_FOUND) + message(FATAL_ERROR "OpenCV > 2.4.3 not found.") + endif() +endif() + find_package(Eigen3 3.1.0 REQUIRED) find_package(Pangolin REQUIRED) @@ -59,6 +66,17 @@ target_link_libraries(Mono ${LIBS} ) +# Node for monocular camera (Augmented Reality Demo) +rosbuild_add_executable(MonoAR +src/AR/ros_mono_ar.cc +src/AR/ViewerAR.h +src/AR/ViewerAR.cc +) + +target_link_libraries(MonoAR +${LIBS} +) + # Node for stereo camera rosbuild_add_executable(Stereo src/ros_stereo.cc diff --git a/Examples/ROS/ORB_SLAM2/src/AR/ViewerAR.cc b/Examples/ROS/ORB_SLAM2/src/AR/ViewerAR.cc new file mode 100644 index 0000000..9c548e8 --- /dev/null +++ b/Examples/ROS/ORB_SLAM2/src/AR/ViewerAR.cc @@ -0,0 +1,642 @@ +/** +* This file is part of ORB-SLAM2. +* +* Copyright (C) 2014-2016 Raúl Mur-Artal (University of Zaragoza) +* For more information see +* +* ORB-SLAM2 is free software: you can redistribute it and/or modify +* it under the terms of the GNU General Public License as published by +* the Free Software Foundation, either version 3 of the License, or +* (at your option) any later version. +* +* ORB-SLAM2 is distributed in the hope that it will be useful, +* but WITHOUT ANY WARRANTY; without even the implied warranty of +* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +* GNU General Public License for more details. +* +* You should have received a copy of the GNU General Public License +* along with ORB-SLAM2. If not, see . +*/ + +#include "ViewerAR.h" + +#include + +#include +#include +#include + +using namespace std; + +namespace ORB_SLAM2 +{ + +const float eps = 1e-4; + +cv::Mat ExpSO3(const float &x, const float &y, const float &z) +{ + cv::Mat I = cv::Mat::eye(3,3,CV_32F); + const float d2 = x*x+y*y+z*z; + const float d = sqrt(d2); + cv::Mat W = (cv::Mat_(3,3) << 0, -z, y, + z, 0, -x, + -y, x, 0); + if(d(0),v.at(1),v.at(2)); +} + +ViewerAR::ViewerAR(){} + +void ViewerAR::Run() +{ + int w,h,wui; + + cv::Mat im, Tcw; + int status; + vector vKeys; + vector vMPs; + + while(1) + { + GetImagePose(im,Tcw,status,vKeys,vMPs); + if(im.empty()) + cv::waitKey(mT); + else + { + w = im.cols; + h = im.rows; + break; + } + } + + wui=200; + + pangolin::CreateWindowAndBind("Viewer",w+wui,h); + + glEnable(GL_DEPTH_TEST); + glEnable (GL_BLEND); + + pangolin::CreatePanel("menu").SetBounds(0.0,1.0,0.0,pangolin::Attach::Pix(wui)); + pangolin::Var menu_detectplane("menu.Insert Cube",false,false); + pangolin::Var menu_clear("menu.Clear All",false,false); + pangolin::Var menu_drawim("menu.Draw Image",true,true); + pangolin::Var menu_drawcube("menu.Draw Cube",true,true); + pangolin::Var menu_cubesize("menu. Cube Size",0.05,0.01,0.3); + pangolin::Var menu_drawgrid("menu.Draw Grid",true,true); + pangolin::Var menu_ngrid("menu. Grid Elements",3,1,10); + pangolin::Var menu_sizegrid("menu. Element Size",0.05,0.01,0.3); + pangolin::Var menu_drawpoints("menu.Draw Points",false,true); + + pangolin::Var menu_LocalizationMode("menu.Localization Mode",false,true); + bool bLocalizationMode = false; + + pangolin::View& d_image = pangolin::Display("image") + .SetBounds(0,1.0f,pangolin::Attach::Pix(wui),1.0f,(float)w/h) + .SetLock(pangolin::LockLeft, pangolin::LockTop); + + pangolin::GlTexture imageTexture(w,h,GL_RGB,false,0,GL_RGB,GL_UNSIGNED_BYTE); + + pangolin::OpenGlMatrixSpec P = pangolin::ProjectionMatrixRDF_TopLeft(w,h,fx,fy,cx,cy,0.001,1000); + + vector vpPlane; + + while(1) + { + + if(menu_LocalizationMode && !bLocalizationMode) + { + mpSystem->ActivateLocalizationMode(); + bLocalizationMode = true; + } + else if(!menu_LocalizationMode && bLocalizationMode) + { + mpSystem->DeactivateLocalizationMode(); + bLocalizationMode = false; + } + + glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); + + // Activate camera view + d_image.Activate(); + glColor3f(1.0,1.0,1.0); + + // Get last image and its computed pose from SLAM + GetImagePose(im,Tcw,status,vKeys,vMPs); + + // Add text to image + PrintStatus(status,bLocalizationMode,im); + + if(menu_drawpoints) + DrawTrackedPoints(vKeys,vMPs,im); + + // Draw image + if(menu_drawim) + DrawImageTexture(imageTexture,im); + + glClear(GL_DEPTH_BUFFER_BIT); + + // Load camera projection + glMatrixMode(GL_PROJECTION); + P.Load(); + + glMatrixMode(GL_MODELVIEW); + + // Load camera pose + LoadCameraPose(Tcw); + + // Draw virtual things + if(status==2) + { + if(menu_clear) + { + if(!vpPlane.empty()) + { + for(size_t i=0; iMapChanged()) + { + cout << "Map changed. All virtual elements are recomputed!" << endl; + bRecompute = true; + } + } + + for(size_t i=0; iRecompute(); + } + glPushMatrix(); + pPlane->glTpw.Multiply(); + + // Draw cube + if(menu_drawcube) + { + DrawCube(menu_cubesize); + } + + // Draw grid plane + if(menu_drawgrid) + { + DrawPlane(menu_ngrid,menu_sizegrid); + } + + glPopMatrix(); + } + } + } + + + } + + pangolin::FinishFrame(); + usleep(mT*1000); + } + +} + +void ViewerAR::SetImagePose(const cv::Mat &im, const cv::Mat &Tcw, const int &status, const vector &vKeys, const vector &vMPs) +{ + unique_lock lock(mMutexPoseImage); + mImage = im.clone(); + mTcw = Tcw.clone(); + mStatus = status; + mvKeys = vKeys; + mvMPs = vMPs; +} + +void ViewerAR::GetImagePose(cv::Mat &im, cv::Mat &Tcw, int &status, std::vector &vKeys, std::vector &vMPs) +{ + unique_lock lock(mMutexPoseImage); + im = mImage.clone(); + Tcw = mTcw.clone(); + status = mStatus; + vKeys = mvKeys; + vMPs = mvMPs; +} + +void ViewerAR::LoadCameraPose(const cv::Mat &Tcw) +{ + if(!Tcw.empty()) + { + pangolin::OpenGlMatrix M; + + M.m[0] = Tcw.at(0,0); + M.m[1] = Tcw.at(1,0); + M.m[2] = Tcw.at(2,0); + M.m[3] = 0.0; + + M.m[4] = Tcw.at(0,1); + M.m[5] = Tcw.at(1,1); + M.m[6] = Tcw.at(2,1); + M.m[7] = 0.0; + + M.m[8] = Tcw.at(0,2); + M.m[9] = Tcw.at(1,2); + M.m[10] = Tcw.at(2,2); + M.m[11] = 0.0; + + M.m[12] = Tcw.at(0,3); + M.m[13] = Tcw.at(1,3); + M.m[14] = Tcw.at(2,3); + M.m[15] = 1.0; + + M.Load(); + } +} + +void ViewerAR::PrintStatus(const int &status, const bool &bLocMode, cv::Mat &im) +{ + if(!bLocMode) + { + switch(status) + { + case 1: {AddTextToImage("SLAM NOT INITIALIZED",im,255,0,0); break;} + case 2: {AddTextToImage("SLAM ON",im,0,255,0); break;} + case 3: {AddTextToImage("SLAM LOST",im,255,0,0); break;} + } + } + else + { + switch(status) + { + case 1: {AddTextToImage("SLAM NOT INITIALIZED",im,255,0,0); break;} + case 2: {AddTextToImage("LOCALIZATION ON",im,0,255,0); break;} + case 3: {AddTextToImage("LOCALIZATION LOST",im,255,0,0); break;} + } + } +} + +void ViewerAR::AddTextToImage(const string &s, cv::Mat &im, const int r, const int g, const int b) +{ + int l = 10; + //imText.rowRange(im.rows-imText.rows,imText.rows) = cv::Mat::zeros(textSize.height+10,im.cols,im.type()); + cv::putText(im,s,cv::Point(l,im.rows-l),cv::FONT_HERSHEY_PLAIN,1.5,cv::Scalar(255,255,255),2,8); + cv::putText(im,s,cv::Point(l-1,im.rows-l),cv::FONT_HERSHEY_PLAIN,1.5,cv::Scalar(255,255,255),2,8); + cv::putText(im,s,cv::Point(l+1,im.rows-l),cv::FONT_HERSHEY_PLAIN,1.5,cv::Scalar(255,255,255),2,8); + cv::putText(im,s,cv::Point(l-1,im.rows-(l-1)),cv::FONT_HERSHEY_PLAIN,1.5,cv::Scalar(255,255,255),2,8); + cv::putText(im,s,cv::Point(l,im.rows-(l-1)),cv::FONT_HERSHEY_PLAIN,1.5,cv::Scalar(255,255,255),2,8); + cv::putText(im,s,cv::Point(l+1,im.rows-(l-1)),cv::FONT_HERSHEY_PLAIN,1.5,cv::Scalar(255,255,255),2,8); + cv::putText(im,s,cv::Point(l-1,im.rows-(l+1)),cv::FONT_HERSHEY_PLAIN,1.5,cv::Scalar(255,255,255),2,8); + cv::putText(im,s,cv::Point(l,im.rows-(l+1)),cv::FONT_HERSHEY_PLAIN,1.5,cv::Scalar(255,255,255),2,8); + cv::putText(im,s,cv::Point(l+1,im.rows-(l+1)),cv::FONT_HERSHEY_PLAIN,1.5,cv::Scalar(255,255,255),2,8); + + cv::putText(im,s,cv::Point(l,im.rows-l),cv::FONT_HERSHEY_PLAIN,1.5,cv::Scalar(r,g,b),2,8); +} + +void ViewerAR::DrawImageTexture(pangolin::GlTexture &imageTexture, cv::Mat &im) +{ + if(!im.empty()) + { + imageTexture.Upload(im.data,GL_RGB,GL_UNSIGNED_BYTE); + imageTexture.RenderToViewportFlipY(); + } +} + +void ViewerAR::DrawCube(const float &size,const float x, const float y, const float z) +{ + pangolin::OpenGlMatrix M = pangolin::OpenGlMatrix::Translate(-x,-size-y,-z); + glPushMatrix(); + M.Multiply(); + pangolin::glDrawColouredCube(-size,size); + glPopMatrix(); +} + +void ViewerAR::DrawPlane(Plane *pPlane, int ndivs, float ndivsize) +{ + glPushMatrix(); + pPlane->glTpw.Multiply(); + DrawPlane(ndivs,ndivsize); + glPopMatrix(); +} + +void ViewerAR::DrawPlane(int ndivs, float ndivsize) +{ + // Plane parallel to x-z at origin with normal -y + const float minx = -ndivs*ndivsize; + const float minz = -ndivs*ndivsize; + const float maxx = ndivs*ndivsize; + const float maxz = ndivs*ndivsize; + + + glLineWidth(2); + glColor3f(0.7f,0.7f,1.0f); + glBegin(GL_LINES); + + for(int n = 0; n<=2*ndivs; n++) + { + glVertex3f(minx+ndivsize*n,0,minz); + glVertex3f(minx+ndivsize*n,0,maxz); + glVertex3f(minx,0,minz+ndivsize*n); + glVertex3f(maxx,0,minz+ndivsize*n); + } + + glEnd(); + +} + +void ViewerAR::DrawTrackedPoints(const std::vector &vKeys, const std::vector &vMPs, cv::Mat &im) +{ + const int N = vKeys.size(); + + + for(int i=0; i &vMPs, const int iterations) +{ + // Retrieve 3D points + vector vPoints; + vPoints.reserve(vMPs.size()); + vector vPointMP; + vPointMP.reserve(vMPs.size()); + + for(size_t i=0; iObservations()>5) + { + vPoints.push_back(pMP->GetWorldPos()); + vPointMP.push_back(pMP); + } + } + } + + const int N = vPoints.size(); + + if(N<50) + return NULL; + + + // Indices for minimum set selection + vector vAllIndices; + vAllIndices.reserve(N); + vector vAvailableIndices; + + for(int i=0; i bestvDist; + + //RANSAC + for(int n=0; n(3,0); + const float b = vt.at(3,1); + const float c = vt.at(3,2); + const float d = vt.at(3,3); + + vector vDistances(N,0); + + const float f = 1.0f/sqrt(a*a+b*b+c*c+d*d); + + for(int i=0; i(0)*a+vPoints[i].at(1)*b+vPoints[i].at(2)*c+d)*f; + } + + vector vSorted = vDistances; + sort(vSorted.begin(),vSorted.end()); + + int nth = max((int)(0.2*N),20); + const float medianDist = vSorted[nth]; + + if(medianDist vbInliers(N,false); + int nInliers = 0; + for(int i=0; i vInlierMPs(nInliers,NULL); + int nin = 0; + for(int i=0; i &vMPs, const cv::Mat &Tcw):mvMPs(vMPs),mTcw(Tcw.clone()) +{ + rang = -3.14f/2+((float)rand()/RAND_MAX)*3.14f; + Recompute(); +} + +void Plane::Recompute() +{ + const int N = mvMPs.size(); + + // Recompute plane with all points + cv::Mat A = cv::Mat(N,4,CV_32F); + A.col(3) = cv::Mat::ones(N,1,CV_32F); + + o = cv::Mat::zeros(3,1,CV_32F); + + int nPoints = 0; + for(int i=0; iisBad()) + { + cv::Mat Xw = pMP->GetWorldPos(); + o+=Xw; + A.row(nPoints).colRange(0,3) = Xw.t(); + nPoints++; + } + } + A.resize(nPoints); + + cv::Mat u,w,vt; + cv::SVDecomp(A,w,u,vt,cv::SVD::MODIFY_A | cv::SVD::FULL_UV); + + float a = vt.at(3,0); + float b = vt.at(3,1); + float c = vt.at(3,2); + + o = o*(1.0f/nPoints); + const float f = 1.0f/sqrt(a*a+b*b+c*c); + + // Compute XC just the first time + if(XC.empty()) + { + cv::Mat Oc = -mTcw.colRange(0,3).rowRange(0,3).t()*mTcw.rowRange(0,3).col(3); + XC = Oc-o; + } + + if((XC.at(0)*a+XC.at(1)*b+XC.at(2)*c)>0) + { + a=-a; + b=-b; + c=-c; + } + + const float nx = a*f; + const float ny = b*f; + const float nz = c*f; + + n = (cv::Mat_(3,1)<(3,1) << 0.0f, 1.0f, 0.0f); + + cv::Mat v = up.cross(n); + const float sa = cv::norm(v); + const float ca = up.dot(n); + const float ang = atan2(sa,ca); + Tpw = cv::Mat::eye(4,4,CV_32F); + + + Tpw.rowRange(0,3).colRange(0,3) = ExpSO3(v*ang/sa)*ExpSO3(up*rang); + o.copyTo(Tpw.col(3).rowRange(0,3)); + + glTpw.m[0] = Tpw.at(0,0); + glTpw.m[1] = Tpw.at(1,0); + glTpw.m[2] = Tpw.at(2,0); + glTpw.m[3] = 0.0; + + glTpw.m[4] = Tpw.at(0,1); + glTpw.m[5] = Tpw.at(1,1); + glTpw.m[6] = Tpw.at(2,1); + glTpw.m[7] = 0.0; + + glTpw.m[8] = Tpw.at(0,2); + glTpw.m[9] = Tpw.at(1,2); + glTpw.m[10] = Tpw.at(2,2); + glTpw.m[11] = 0.0; + + glTpw.m[12] = Tpw.at(0,3); + glTpw.m[13] = Tpw.at(1,3); + glTpw.m[14] = Tpw.at(2,3); + glTpw.m[15] = 1.0; + +} + +Plane::Plane(const float &nx, const float &ny, const float &nz, const float &ox, const float &oy, const float &oz) +{ + n = (cv::Mat_(3,1)<(3,1)<(3,1) << 0.0f, 1.0f, 0.0f); + + cv::Mat v = up.cross(n); + const float s = cv::norm(v); + const float c = up.dot(n); + const float a = atan2(s,c); + Tpw = cv::Mat::eye(4,4,CV_32F); + const float rang = -3.14f/2+((float)rand()/RAND_MAX)*3.14f; + cout << rang; + Tpw.rowRange(0,3).colRange(0,3) = ExpSO3(v*a/s)*ExpSO3(up*rang); + o.copyTo(Tpw.col(3).rowRange(0,3)); + + glTpw.m[0] = Tpw.at(0,0); + glTpw.m[1] = Tpw.at(1,0); + glTpw.m[2] = Tpw.at(2,0); + glTpw.m[3] = 0.0; + + glTpw.m[4] = Tpw.at(0,1); + glTpw.m[5] = Tpw.at(1,1); + glTpw.m[6] = Tpw.at(2,1); + glTpw.m[7] = 0.0; + + glTpw.m[8] = Tpw.at(0,2); + glTpw.m[9] = Tpw.at(1,2); + glTpw.m[10] = Tpw.at(2,2); + glTpw.m[11] = 0.0; + + glTpw.m[12] = Tpw.at(0,3); + glTpw.m[13] = Tpw.at(1,3); + glTpw.m[14] = Tpw.at(2,3); + glTpw.m[15] = 1.0; +} + +} diff --git a/Examples/ROS/ORB_SLAM2/src/AR/ViewerAR.h b/Examples/ROS/ORB_SLAM2/src/AR/ViewerAR.h new file mode 100644 index 0000000..cd173ca --- /dev/null +++ b/Examples/ROS/ORB_SLAM2/src/AR/ViewerAR.h @@ -0,0 +1,120 @@ +/** +* This file is part of ORB-SLAM2. +* +* Copyright (C) 2014-2016 Raúl Mur-Artal (University of Zaragoza) +* For more information see +* +* ORB-SLAM2 is free software: you can redistribute it and/or modify +* it under the terms of the GNU General Public License as published by +* the Free Software Foundation, either version 3 of the License, or +* (at your option) any later version. +* +* ORB-SLAM2 is distributed in the hope that it will be useful, +* but WITHOUT ANY WARRANTY; without even the implied warranty of +* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +* GNU General Public License for more details. +* +* You should have received a copy of the GNU General Public License +* along with ORB-SLAM2. If not, see . +*/ + + +#ifndef VIEWERAR_H +#define VIEWERAR_H + +#include +#include +#include +#include +#include"../../../include/System.h" + +namespace ORB_SLAM2 +{ + +class Plane +{ +public: + Plane(const std::vector &vMPs, const cv::Mat &Tcw); + Plane(const float &nx, const float &ny, const float &nz, const float &ox, const float &oy, const float &oz); + + void Recompute(); + + //normal + cv::Mat n; + //origin + cv::Mat o; + //arbitrary orientation along normal + float rang; + //transformation from world to the plane + cv::Mat Tpw; + pangolin::OpenGlMatrix glTpw; + //MapPoints that define the plane + std::vector mvMPs; + //camera pose when the plane was first observed (to compute normal direction) + cv::Mat mTcw, XC; +}; + +class ViewerAR +{ +public: + ViewerAR(); + + void SetFPS(const float fps){ + mFPS = fps; + mT=1e3/fps; + } + + void SetSLAM(ORB_SLAM2::System* pSystem){ + mpSystem = pSystem; + } + + // Main thread function. + void Run(); + + void SetCameraCalibration(const float &fx_, const float &fy_, const float &cx_, const float &cy_){ + fx = fx_; fy = fy_; cx = cx_; cy = cy_; + } + + void SetImagePose(const cv::Mat &im, const cv::Mat &Tcw, const int &status, + const std::vector &vKeys, const std::vector &vMPs); + + void GetImagePose(cv::Mat &im, cv::Mat &Tcw, int &status, + std::vector &vKeys, std::vector &vMPs); + +private: + + //SLAM + ORB_SLAM2::System* mpSystem; + + void PrintStatus(const int &status, const bool &bLocMode, cv::Mat &im); + void AddTextToImage(const std::string &s, cv::Mat &im, const int r=0, const int g=0, const int b=0); + void LoadCameraPose(const cv::Mat &Tcw); + void DrawImageTexture(pangolin::GlTexture &imageTexture, cv::Mat &im); + void DrawCube(const float &size, const float x=0, const float y=0, const float z=0); + void DrawPlane(int ndivs, float ndivsize); + void DrawPlane(Plane* pPlane, int ndivs, float ndivsize); + void DrawTrackedPoints(const std::vector &vKeys, const std::vector &vMPs, cv::Mat &im); + + Plane* DetectPlane(const cv::Mat Tcw, const std::vector &vMPs, const int iterations=50); + + // frame rate + float mFPS, mT; + float fx,fy,cx,cy; + + // Last processed image and computed pose by the SLAM + std::mutex mMutexPoseImage; + cv::Mat mTcw; + cv::Mat mImage; + int mStatus; + std::vector mvKeys; + std::vector mvMPs; + +}; + + +} + + +#endif // VIEWERAR_H + + diff --git a/Examples/ROS/ORB_SLAM2/src/AR/ros_mono_ar.cc b/Examples/ROS/ORB_SLAM2/src/AR/ros_mono_ar.cc new file mode 100644 index 0000000..58d204a --- /dev/null +++ b/Examples/ROS/ORB_SLAM2/src/AR/ros_mono_ar.cc @@ -0,0 +1,169 @@ +/** +* This file is part of ORB-SLAM2. +* +* Copyright (C) 2014-2016 Raúl Mur-Artal (University of Zaragoza) +* For more information see +* +* ORB-SLAM2 is free software: you can redistribute it and/or modify +* it under the terms of the GNU General Public License as published by +* the Free Software Foundation, either version 3 of the License, or +* (at your option) any later version. +* +* ORB-SLAM2 is distributed in the hope that it will be useful, +* but WITHOUT ANY WARRANTY; without even the implied warranty of +* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +* GNU General Public License for more details. +* +* You should have received a copy of the GNU General Public License +* along with ORB-SLAM2. If not, see . +*/ + + +#include +#include +#include +#include + +#include +#include + +#include +#include + +#include"../../../include/System.h" + +#include"ViewerAR.h" + +using namespace std; + + +ORB_SLAM2::ViewerAR viewerAR; +bool bRGB = true; + +cv::Mat K; +cv::Mat DistCoef; + + +class ImageGrabber +{ +public: + ImageGrabber(ORB_SLAM2::System* pSLAM):mpSLAM(pSLAM){} + + void GrabImage(const sensor_msgs::ImageConstPtr& msg); + + ORB_SLAM2::System* mpSLAM; +}; + +int main(int argc, char **argv) +{ + ros::init(argc, argv, "Mono"); + ros::start(); + + if(argc != 3) + { + cerr << endl << "Usage: rosrun ORB_SLAM2 Mono path_to_vocabulary path_to_settings" << endl; + ros::shutdown(); + return 1; + } + + // Create SLAM system. It initializes all system threads and gets ready to process frames. + ORB_SLAM2::System SLAM(argv[1],argv[2],ORB_SLAM2::System::MONOCULAR,false); + + + cout << endl << endl; + cout << "-----------------------" << endl; + cout << "Augmented Reality Demo" << endl; + cout << "1) Translate the camera to initialize SLAM." << endl; + cout << "2) Look at a planar region and translate the camera." << endl; + cout << "3) Press Insert Cube to place a virtual cube in the plane. " << endl; + cout << endl; + cout << "You can place several cubes in different planes." << endl; + cout << "-----------------------" << endl; + cout << endl; + + + viewerAR.SetSLAM(&SLAM); + + ImageGrabber igb(&SLAM); + + ros::NodeHandle nodeHandler; + ros::Subscriber sub = nodeHandler.subscribe("/camera/image_raw", 1, &ImageGrabber::GrabImage,&igb); + + + cv::FileStorage fSettings(argv[2], cv::FileStorage::READ); + bRGB = static_cast((int)fSettings["Camera.RGB"]); + float fps = fSettings["Camera.fps"]; + viewerAR.SetFPS(fps); + + float fx = fSettings["Camera.fx"]; + float fy = fSettings["Camera.fy"]; + float cx = fSettings["Camera.cx"]; + float cy = fSettings["Camera.cy"]; + + viewerAR.SetCameraCalibration(fx,fy,cx,cy); + + K = cv::Mat::eye(3,3,CV_32F); + K.at(0,0) = fx; + K.at(1,1) = fy; + K.at(0,2) = cx; + K.at(1,2) = cy; + + DistCoef = cv::Mat::zeros(4,1,CV_32F); + DistCoef.at(0) = fSettings["Camera.k1"]; + DistCoef.at(1) = fSettings["Camera.k2"]; + DistCoef.at(2) = fSettings["Camera.p1"]; + DistCoef.at(3) = fSettings["Camera.p2"]; + const float k3 = fSettings["Camera.k3"]; + if(k3!=0) + { + DistCoef.resize(5); + DistCoef.at(4) = k3; + } + + thread tViewer = thread(&ORB_SLAM2::ViewerAR::Run,&viewerAR); + + ros::spin(); + + // Stop all threads + SLAM.Shutdown(); + + // Save camera trajectory + SLAM.SaveKeyFrameTrajectoryTUM("KeyFrameTrajectory.txt"); + + ros::shutdown(); + + return 0; +} + +void ImageGrabber::GrabImage(const sensor_msgs::ImageConstPtr& msg) +{ + // Copy the ros image message to cv::Mat. + cv_bridge::CvImageConstPtr cv_ptr; + try + { + cv_ptr = cv_bridge::toCvShare(msg); + } + catch (cv_bridge::Exception& e) + { + ROS_ERROR("cv_bridge exception: %s", e.what()); + return; + } + cv::Mat im = cv_ptr->image.clone(); + cv::Mat imu; + cv::Mat Tcw = mpSLAM->TrackMonocular(cv_ptr->image,cv_ptr->header.stamp.toSec()); + int state = mpSLAM->GetTrackingState(); + vector vMPs = mpSLAM->GetTrackedMapPoints(); + vector vKeys = mpSLAM->GetTrackedKeyPointsUn(); + + cv::undistort(im,imu,K,DistCoef); + + if(bRGB) + viewerAR.SetImagePose(imu,Tcw,state,vKeys,vMPs); + else + { + cv::cvtColor(imu,imu,CV_RGB2BGR); + viewerAR.SetImagePose(imu,Tcw,state,vKeys,vMPs); + } +} + + diff --git a/Examples/ROS/ORB_VIO/src/MsgSync/MsgSynchronizer.cpp b/Examples/ROS/ORB_VIO/src/MsgSync/MsgSynchronizer.cpp index 5a87cee..1886f49 100644 --- a/Examples/ROS/ORB_VIO/src/MsgSync/MsgSynchronizer.cpp +++ b/Examples/ROS/ORB_VIO/src/MsgSync/MsgSynchronizer.cpp @@ -1,4 +1,5 @@ #include "MsgSynchronizer.h" +#include "../../../src/IMU/configparam.h" namespace ORBVIO { @@ -18,6 +19,9 @@ MsgSynchronizer::~MsgSynchronizer() bool MsgSynchronizer::getRecentMsgs(sensor_msgs::ImageConstPtr &imgmsg, std::vector &vimumsgs) { + //unique_lock lock2(_mutexIMUQueue); + unique_lock lock1(_mutexImageQueue); + if(_status == NOTINIT || _status == INIT) { //ROS_INFO("synchronizer not inited"); @@ -54,6 +58,12 @@ bool MsgSynchronizer::getRecentMsgs(sensor_msgs::ImageConstPtr &imgmsg, std::vec imsg = _imageMsgQueue.front(); bmsg = _imuMsgQueue.back(); + // Wait imu messages in case communication block + if(imsg->header.stamp.toSec()-_imageMsgDelaySec > bmsg->header.stamp.toSec()) + { + return false; + } + // Check dis-continuity, tolerance 3 seconds if(imsg->header.stamp.toSec()-_imageMsgDelaySec > bmsg->header.stamp.toSec() + 3.0) { @@ -91,7 +101,10 @@ bool MsgSynchronizer::getRecentMsgs(sensor_msgs::ImageConstPtr &imgmsg, std::vec { // add to imu message vector vimumsgs.push_back(tmpimumsg); - _imuMsgQueue.pop(); + { + unique_lock lock(_mutexIMUQueue); + _imuMsgQueue.pop(); + } _dataUnsyncCnt = 0; } @@ -121,6 +134,8 @@ bool MsgSynchronizer::getRecentMsgs(sensor_msgs::ImageConstPtr &imgmsg, std::vec void MsgSynchronizer::addImuMsg(const sensor_msgs::ImuConstPtr &imumsg) { + unique_lock lock(_mutexIMUQueue); + if(_imageMsgDelaySec>=0) { _imuMsgQueue.push(imumsg); if(_status == NOTINIT) @@ -155,6 +170,8 @@ void MsgSynchronizer::addImuMsg(const sensor_msgs::ImuConstPtr &imumsg) void MsgSynchronizer::addImageMsg(const sensor_msgs::ImageConstPtr &imgmsg) { + unique_lock lock(_mutexImageQueue); + if(_imageMsgDelaySec >= 0) { // if there's no imu messages, don't add image if(_status == NOTINIT) @@ -187,6 +204,13 @@ void MsgSynchronizer::addImageMsg(const sensor_msgs::ImageConstPtr &imgmsg) } } + + if(ORB_SLAM2::ConfigParam::GetRealTimeFlag()) + { + // Ignore earlier frames + if(_imageMsgQueue.size()>2) + _imageMsgQueue.pop(); + } } diff --git a/Examples/ROS/ORB_VIO/src/MsgSync/MsgSynchronizer.h b/Examples/ROS/ORB_VIO/src/MsgSync/MsgSynchronizer.h index b51102e..9c032a9 100644 --- a/Examples/ROS/ORB_VIO/src/MsgSync/MsgSynchronizer.h +++ b/Examples/ROS/ORB_VIO/src/MsgSync/MsgSynchronizer.h @@ -6,6 +6,7 @@ #include #include #include +#include using namespace std; @@ -43,7 +44,9 @@ class MsgSynchronizer private: double _imageMsgDelaySec; // image message delay to imu message, in seconds + std::mutex _mutexImageQueue; std::queue _imageMsgQueue; + std::mutex _mutexIMUQueue; std::queue _imuMsgQueue; ros::Time _imuMsgTimeStart; Status _status; diff --git a/Examples/ROS/ORB_VIO/src/ros_vio.cc b/Examples/ROS/ORB_VIO/src/ros_vio.cc index 952686d..350bfd1 100644 --- a/Examples/ROS/ORB_VIO/src/ros_vio.cc +++ b/Examples/ROS/ORB_VIO/src/ros_vio.cc @@ -75,9 +75,13 @@ int main(int argc, char **argv) double imageMsgDelaySec = config.GetImageDelayToIMU(); ORBVIO::MsgSynchronizer msgsync(imageMsgDelaySec); ros::NodeHandle nh; - //ros::Subscriber imagesub = nh.subscribe("/image_repub", 200, &ORBVIO::MsgSynchronizer::imageCallback, &msgsync); - //ros::Subscriber imusub = nh.subscribe("/imu_repub", 200, &ORBVIO::MsgSynchronizer::imuCallback, &msgsync); - + ros::Subscriber imagesub; + ros::Subscriber imusub; + if(ORB_SLAM2::ConfigParam::GetRealTimeFlag()) + { + imagesub = nh.subscribe(config._imageTopic, /*200*/ 2, &ORBVIO::MsgSynchronizer::imageCallback, &msgsync); + imusub = nh.subscribe(config._imuTopic, 200, &ORBVIO::MsgSynchronizer::imuCallback, &msgsync); + } sensor_msgs::ImageConstPtr imageMsg; std::vector vimuMsg; @@ -85,6 +89,11 @@ int main(int argc, char **argv) const double g3dm = 9.80665; const bool bAccMultiply98 = config.GetAccMultiply9p8(); + ros::Rate r(1000); + + if(!ORB_SLAM2::ConfigParam::GetRealTimeFlag()) + { + ROS_WARN("Run not-realtime"); std::string bagfile = config._bagfile; rosbag::Bag bag; @@ -97,8 +106,6 @@ int main(int argc, char **argv) topics.push_back(imutopic); rosbag::View view(bag, rosbag::TopicQuery(topics)); - - ros::Rate r(1000); //while(ros::ok()) BOOST_FOREACH(rosbag::MessageInstance const m, view) { @@ -183,7 +190,75 @@ int main(int argc, char **argv) break; } + } + else + { + ROS_WARN("Run realtime"); + while(ros::ok()) + { + bool bdata = msgsync.getRecentMsgs(imageMsg,vimuMsg); + + if(bdata) + { + std::vector vimuData; + //ROS_INFO("image time: %.3f",imageMsg->header.stamp.toSec()); + for(unsigned int i=0;ilinear_acceleration.x; + double ay = imuMsg->linear_acceleration.y; + double az = imuMsg->linear_acceleration.z; + if(bAccMultiply98) + { + ax *= g3dm; + ay *= g3dm; + az *= g3dm; + } + ORB_SLAM2::IMUData imudata(imuMsg->angular_velocity.x,imuMsg->angular_velocity.y,imuMsg->angular_velocity.z, + ax,ay,az,imuMsg->header.stamp.toSec()); + vimuData.push_back(imudata); + //ROS_INFO("imu time: %.3f",vimuMsg[i]->header.stamp.toSec()); + } + // Copy the ros image message to cv::Mat. + cv_bridge::CvImageConstPtr cv_ptr; + try + { + cv_ptr = cv_bridge::toCvShare(imageMsg); + } + catch (cv_bridge::Exception& e) + { + ROS_ERROR("cv_bridge exception: %s", e.what()); + return -1; + } + + // Consider delay of image message + //SLAM.TrackMonocular(cv_ptr->image, imageMsg->header.stamp.toSec() - imageMsgDelaySec); + cv::Mat im = cv_ptr->image.clone(); + { + // To test relocalization + static double startT=-1; + if(startT<0) + startT = imageMsg->header.stamp.toSec(); + // Below to test relocalizaiton + //if(imageMsg->header.stamp.toSec() > startT+25 && imageMsg->header.stamp.toSec() < startT+25.3) + if(imageMsg->header.stamp.toSec() < startT+config._testDiscardTime) + im = cv::Mat::zeros(im.rows,im.cols,im.type()); + } + SLAM.TrackMonoVI(im, vimuData, imageMsg->header.stamp.toSec() - imageMsgDelaySec); + //SLAM.TrackMonoVI(cv_ptr->image, vimuData, imageMsg->header.stamp.toSec() - imageMsgDelaySec); + //cv::imshow("image",cv_ptr->image); + + } + + //cv::waitKey(1); + + ros::spinOnce(); + r.sleep(); + if(!ros::ok()) + break; + } + } // ImageGrabber igb(&SLAM); diff --git a/README.md b/README.md index 80c28b2..b477fcf 100644 --- a/README.md +++ b/README.md @@ -1,13 +1,24 @@ +Realtime branch. + +Modification: +1. Add real-time code. +2. Change optimization vertex usage, from PVR to PR-V as in paper. +3. Try inverse depth parameterization in LocalBA, can be changed back to PR-V version in the code of LocalMapping.cc. +4. Some other related changes. + +May 28 + +***** An implementation of [Visual Inertial ORBSLAM](https://arxiv.org/abs/1610.05949) based on [ORB-SLAM2](https://github.com/raulmur/ORB_SLAM2) -Not bug-free. Not real-time. Just try the basic ideas of Visual Inertial SLAM in above paper. Welcome to improve it together! +Not bug-free. Just try the basic ideas of Visual Inertial SLAM in above paper. Welcome to improve it together! Build with `build.sh`. Modify the path in `config/euroc.yaml`. Tested on [EuRoc](http://projects.asl.ethz.ch/datasets/doku.php?id=kmavvisualinertialdatasets) ROS bag data with ROS launch file `Examples/ROS/ORB_VIO/launch/testeuroc.launch`. Files in `pyplotscripts` can be used to visualize some results. -Tested on sensors: [UI-1221-LE](https://en.ids-imaging.com/store/ui-1221le.html) and [3DM-GX3-25](http://www.microstrain.com/inertial/3dm-gx3-25), see video on [Youtube (real-time)](https://youtu.be/AUWBpSj-XtA) or [YouKu](http://v.youku.com/v_show/id_XMTkxMjgzMzMwOA). +Tested on sensors: [UI-1221-LE](https://en.ids-imaging.com/store/ui-1221le.html) and [3DM-GX3-25](http://www.microstrain.com/inertial/3dm-gx3-25), videos on [Youtube (real-time)](https://youtu.be/AUWBpSj-XtA) or [YouKu](http://v.youku.com/v_show/id_XMTkxMjgzMzMwOA). Please contact `jp08-at-foxmail-dot-com` for more details. @@ -18,23 +29,29 @@ Below is the primary README of ORBSLAM2. # ORB-SLAM2 **Authors:** [Raul Mur-Artal](http://webdiis.unizar.es/~raulmur/), [Juan D. Tardos](http://webdiis.unizar.es/~jdtardos/), [J. M. M. Montiel](http://webdiis.unizar.es/~josemari/) and [Dorian Galvez-Lopez](http://doriangalvez.com/) ([DBoW2](https://github.com/dorian3d/DBoW2)) -**Current version:** 1.0.0 +**13 Jan 2017**: OpenCV 3 and Eigen 3.3 are now supported. + +**22 Dec 2016**: Added AR demo (see section 7). ORB-SLAM2 is a real-time SLAM library for **Monocular**, **Stereo** and **RGB-D** cameras that computes the camera trajectory and a sparse 3D reconstruction (in the stereo and RGB-D case with true scale). It is able to detect loops and relocalize the camera in real time. We provide examples to run the SLAM system in the [KITTI dataset](http://www.cvlibs.net/datasets/kitti/eval_odometry.php) as stereo or monocular, in the [TUM dataset](http://vision.in.tum.de/data/datasets/rgbd-dataset) as RGB-D or monocular, and in the [EuRoC dataset](http://projects.asl.ethz.ch/datasets/doku.php?id=kmavvisualinertialdatasets) as stereo or monocular. We also provide a ROS node to process live monocular, stereo or RGB-D streams. **The library can be compiled without ROS**. ORB-SLAM2 provides a GUI to change between a *SLAM Mode* and *Localization Mode*, see section 9 of this document. ORB-SLAM2 + + -###Related Publications: +### Related Publications: [Monocular] Raúl Mur-Artal, J. M. M. Montiel and Juan D. Tardós. **ORB-SLAM: A Versatile and Accurate Monocular SLAM System**. *IEEE Transactions on Robotics,* vol. 31, no. 5, pp. 1147-1163, 2015. (**2015 IEEE Transactions on Robotics Best Paper Award**). **[PDF](http://webdiis.unizar.es/~raulmur/MurMontielTardosTRO15.pdf)**. -[Stereo and RGB-D] Raúl Mur-Artal and Juan D. Tardós. **ORB-SLAM2: an Open-Source SLAM System for Monocular, Stereo and RGB-D Cameras**. *ArXiv preprint arXiv:1610.06475* **[PDF](https://128.84.21.199/pdf/1610.06475.pdf)**. +[Stereo and RGB-D] Raúl Mur-Artal and Juan D. Tardós. **ORB-SLAM2: an Open-Source SLAM System for Monocular, Stereo and RGB-D Cameras**. *IEEE Transactions on Robotics,* vol. 33, no. 5, pp. 1255-1262, 2017. **[PDF](https://128.84.21.199/pdf/1610.06475.pdf)**. [DBoW2 Place Recognizer] Dorian Gálvez-López and Juan D. Tardós. **Bags of Binary Words for Fast Place Recognition in Image Sequences**. *IEEE Transactions on Robotics,* vol. 28, no. 5, pp. 1188-1197, 2012. **[PDF](http://doriangalvez.com/php/dl.php?dlp=GalvezTRO12.pdf)** -#1. License +# 1. License ORB-SLAM2 is released under a [GPLv3 license](https://github.com/raulmur/ORB_SLAM2/blob/master/License-gpl.txt). For a list of all code/library dependencies (and associated licenses), please see [Dependencies.md](https://github.com/raulmur/ORB_SLAM2/blob/master/Dependencies.md). @@ -58,12 +75,16 @@ if you use ORB-SLAM2 (Stereo or RGB-D) in an academic work, please cite: @article{murORB2, title={{ORB-SLAM2}: an Open-Source {SLAM} System for Monocular, Stereo and {RGB-D} Cameras}, author={Mur-Artal, Ra\'ul and Tard\'os, Juan D.}, - journal={arXiv preprint arXiv:1610.06475}, - year={2016} + journal={IEEE Transactions on Robotics}, + volume={33}, + number={5}, + pages={1255--1262}, + doi = {10.1109/TRO.2017.2705103}, + year={2017} } -#2. Prerequisites -We have tested the library in **Ubuntu 12.04** and **14.04**, but it should be easy to compile in other platforms. A powerful computer (e.g. i7) will ensure real-time performance and provide more stable and accurate results. +# 2. Prerequisites +We have tested the library in **Ubuntu 12.04**, **14.04** and **16.04**, but it should be easy to compile in other platforms. A powerful computer (e.g. i7) will ensure real-time performance and provide more stable and accurate results. ## C++11 or C++0x Compiler We use the new thread and chrono functionalities of C++11. @@ -72,7 +93,7 @@ We use the new thread and chrono functionalities of C++11. We use [Pangolin](https://github.com/stevenlovegrove/Pangolin) for visualization and user interface. Dowload and install instructions can be found at: https://github.com/stevenlovegrove/Pangolin. ## OpenCV -We use [OpenCV](http://opencv.org) to manipulate images and features. Dowload and install instructions can be found at: http://opencv.org. **Required at leat 2.4.3. Tested with OpenCV 2.4.11**. +We use [OpenCV](http://opencv.org) to manipulate images and features. Dowload and install instructions can be found at: http://opencv.org. **Required at leat 2.4.3. Tested with OpenCV 2.4.11 and OpenCV 3.2**. ## Eigen3 Required by g2o (see below). Download and install instructions can be found at: http://eigen.tuxfamily.org. **Required at least 3.1.0**. @@ -90,7 +111,7 @@ We use modified versions of the [DBoW2](https://github.com/dorian3d/DBoW2) libra ## ROS (optional) We provide some examples to process the live input of a monocular, stereo or RGB-D camera using [ROS](ros.org). Building these examples is optional. In case you want to use ROS, a version Hydro or newer is needed. -#3. Building ORB-SLAM2 library and TUM/KITTI examples +# 3. Building ORB-SLAM2 library and examples Clone the repository: ``` @@ -106,7 +127,7 @@ chmod +x build.sh This will create **libORB_SLAM2.so** at *lib* folder and the executables **mono_tum**, **mono_kitti**, **rgbd_tum**, **stereo_kitti**, **mono_euroc** and **stereo_euroc** in *Examples* folder. -#4. Monocular Examples +# 4. Monocular Examples ## TUM Dataset @@ -139,7 +160,7 @@ This will create **libORB_SLAM2.so** at *lib* folder and the executables **mono ./Examples/Monocular/mono_euroc Vocabulary/ORBvoc.txt Examples/Monocular/EuRoC.yaml PATH_TO_SEQUENCE/cam0/data Examples/Monocular/EuRoC_TimeStamps/SEQUENCE.txt ``` -#5. Stereo Examples +# 5. Stereo Examples ## KITTI Dataset @@ -162,7 +183,7 @@ This will create **libORB_SLAM2.so** at *lib* folder and the executables **mono ./Examples/Stereo/stereo_euroc Vocabulary/ORBvoc.txt Examples/Stereo/EuRoC.yaml PATH_TO_SEQUENCE/cam0/data PATH_TO_SEQUENCE/cam1/data Examples/Stereo/EuRoC_TimeStamps/SEQUENCE.txt ``` -#6. RGB-D Example +# 6. RGB-D Example ## TUM Dataset @@ -180,22 +201,20 @@ This will create **libORB_SLAM2.so** at *lib* folder and the executables **mono ./Examples/RGB-D/rgbd_tum Vocabulary/ORBvoc.txt Examples/RGB-D/TUMX.yaml PATH_TO_SEQUENCE_FOLDER ASSOCIATIONS_FILE ``` -#7. ROS Examples +# 7. ROS Examples -### Building the nodes for mono, stereo and RGB-D +### Building the nodes for mono, monoAR, stereo and RGB-D 1. Add the path including *Examples/ROS/ORB_SLAM2* to the ROS_PACKAGE_PATH environment variable. Open .bashrc file and add at the end the following line. Replace PATH by the folder where you cloned ORB_SLAM2: ``` export ROS_PACKAGE_PATH=${ROS_PACKAGE_PATH}:PATH/ORB_SLAM2/Examples/ROS ``` -2. Go to *Examples/ROS/ORB_SLAM2* folder and execute: +2. Execute `build_ros.sh` script: ``` - mkdir build - cd build - cmake .. -DROS_BUILD_TYPE=Release - make -j + chmod +x build_ros.sh + ./build_ros.sh ``` ### Running Monocular Node @@ -205,6 +224,14 @@ For a monocular input from topic `/camera/image_raw` run node ORB_SLAM2/Mono. Yo rosrun ORB_SLAM2 Mono PATH_TO_VOCABULARY PATH_TO_SETTINGS_FILE ``` +### Running Monocular Augmented Reality Demo +This is a demo of augmented reality where you can use an interface to insert virtual cubes in planar regions of the scene. +The node reads images from topic `/camera/image_raw`. + + ``` + rosrun ORB_SLAM2 MonoAR PATH_TO_VOCABULARY PATH_TO_SETTINGS_FILE + ``` + ### Running Stereo Node For a stereo input from topic `/camera/left/image_raw` and `/camera/right/image_raw` run node ORB_SLAM2/Stereo. You will need to provide the vocabulary file and a settings file. If you **provide rectification matrices** (see Examples/Stereo/EuRoC.yaml example), the node will recitify the images online, **otherwise images must be pre-rectified**. @@ -234,10 +261,10 @@ For an RGB-D input from topics `/camera/rgb/image_raw` and `/camera/depth_regist rosrun ORB_SLAM2 RGBD PATH_TO_VOCABULARY PATH_TO_SETTINGS_FILE ``` -#8. Processing your own sequences +# 8. Processing your own sequences You will need to create a settings file with the calibration of your camera. See the settings file provided for the TUM and KITTI datasets for monocular, stereo and RGB-D cameras. We use the calibration model of OpenCV. See the examples to learn how to create a program that makes use of the ORB-SLAM2 library and how to pass images to the SLAM system. Stereo input must be synchronized and rectified. RGB-D input must be synchronized and depth registered. -#9. SLAM and Localization Modes +# 9. SLAM and Localization Modes You can change between the *SLAM* and *Localization mode* using the GUI of the map viewer. ### SLAM Mode diff --git a/Thirdparty/DBoW2/CMakeLists.txt b/Thirdparty/DBoW2/CMakeLists.txt index 4da56b2..0eb5126 100644 --- a/Thirdparty/DBoW2/CMakeLists.txt +++ b/Thirdparty/DBoW2/CMakeLists.txt @@ -24,7 +24,13 @@ set(SRCS_DUTILS DUtils/Random.cpp DUtils/Timestamp.cpp) -find_package(OpenCV REQUIRED) +find_package(OpenCV 3.0 QUIET) +if(NOT OpenCV_FOUND) + find_package(OpenCV 2.4.3 QUIET) + if(NOT OpenCV_FOUND) + message(FATAL_ERROR "OpenCV > 2.4.3 not found.") + endif() +endif() set(LIBRARY_OUTPUT_PATH ${PROJECT_SOURCE_DIR}/lib) diff --git a/Thirdparty/g2o/g2o/core/optimization_algorithm_gauss_newton.cpp b/Thirdparty/g2o/g2o/core/optimization_algorithm_gauss_newton.cpp index 04ca445..4360e14 100644 --- a/Thirdparty/g2o/g2o/core/optimization_algorithm_gauss_newton.cpp +++ b/Thirdparty/g2o/g2o/core/optimization_algorithm_gauss_newton.cpp @@ -60,7 +60,10 @@ namespace g2o { if (globalStats) { globalStats->timeResiduals = get_monotonic_time()-t; } - + // ADD BY wangjing + double preChi2 = _optimizer->activeRobustChi2(); + // END ADD + if (iteration == 0 && !online) { // built up the CCS structure, here due to easy time measure ok = _solver->buildStructure(); if (! ok) { @@ -86,6 +89,15 @@ namespace g2o { if (globalStats) { globalStats->timeUpdate = get_monotonic_time()-t; } + + // ADD BY wangjing + _optimizer->computeActiveErrors(); + double afterChi2 = _optimizer->activeRobustChi2(); + + if(fabs(preChi2 - afterChi2)<1e-3/*1e-6*/) + return Terminate; + // END ADD + if (ok) return OK; else diff --git a/build_ros.sh b/build_ros.sh new file mode 100755 index 0000000..86b7c14 --- /dev/null +++ b/build_ros.sh @@ -0,0 +1,7 @@ +echo "Building ROS nodes" + +cd Examples/ROS/ORB_SLAM2 +mkdir build +cd build +cmake .. -DROS_BUILD_TYPE=Release +make -j diff --git a/config/euroc.yaml b/config/euroc.yaml index a7efee2..46b85f0 100644 --- a/config/euroc.yaml +++ b/config/euroc.yaml @@ -1,5 +1,10 @@ %YAML:1.0 +# 1: realtime, 0: non-realtime +test.RealTime: 1 +# Time for visual-inertial initialization +test.VINSInitTime: 15.0 + # Modify test.InitVIOTmpPath and bagfile to the correct path # Path to save tmp files/results test.InitVIOTmpPath: "/home/jp/opensourcecode/OpenSourceORBVIO/tmp/" @@ -17,8 +22,8 @@ test.DiscardTime: 0 #bagfile: "/media/jp/JingpangPassport/3dataset/EuRoC-VIO/un_restamped/MH_04_difficult.bag" #bagfile: "/media/jp/JingpangPassport/3dataset/EuRoC-VIO/un_restamped/MH_05_difficult.bag" #bagfile: "/media/jp/JingpangPassport/3dataset/EuRoC-VIO/un_restamped/MH_02_easy.bag" -#bagfile: "/media/jp/JingpangPassport/3dataset/EuRoC-VIO/un_restamped/MH_01_easy.bag" -bagfile: "/media/jp/JingpangPassport/3dataset/EuRoC-VIO/un_restamped/V2_02_medium.bag" +bagfile: "/media/jp/JingpangPassport/3dataset/EuRoC-VIO/un_restamped/MH_01_easy.bag" +#bagfile: "/media/jp/JingpangPassport/3dataset/EuRoC-VIO/un_restamped/V2_02_medium.bag" #bagfile: "/media/jp/JingpangPassport/3dataset/EuRoC-VIO/un_restamped/V2_01_easy.bag" #bagfile: "/media/jp/JingpangPassport/3dataset/EuRoC-VIO/un_restamped/V1_02_medium.bag" #bagfile: "/media/jp/JingpangPassport/3dataset/EuRoC-VIO/un_restamped/V1_01_easy.bag" @@ -47,7 +52,7 @@ Camera.Tbc: 0.0, 0.0, 0.0, 1.0] # Local Window size -LocalMapping.LocalWindowSize: 10 +LocalMapping.LocalWindowSize: 20 #-------------------------------------------------------------------------------------------- # Camera Parameters. Adjust them! diff --git a/include/LocalMapping.h b/include/LocalMapping.h index 26cd543..b31811d 100644 --- a/include/LocalMapping.h +++ b/include/LocalMapping.h @@ -28,6 +28,7 @@ #include "KeyFrameDatabase.h" #include +#include "IMU/configparam.h" namespace ORB_SLAM2 @@ -47,6 +48,7 @@ class LocalMapping void AddToLocalWindow(KeyFrame* pKF); void DeleteBadInLocalWindow(void); + void VINSInitThread(void); bool TryInitVIO(void); bool GetVINSInited(void); void SetVINSInited(bool flag); @@ -55,16 +57,29 @@ class LocalMapping void SetFirstVINSInited(bool flag); cv::Mat GetGravityVec(void); + cv::Mat GetRwiInit(void); bool GetMapUpdateFlagForTracking(); void SetMapUpdateFlagInTracking(bool bflag); KeyFrame* GetMapUpdateKF(); + const KeyFrame* GetCurrentKF(void) const {return mpCurrentKeyFrame;} + + std::mutex mMutexUpdatingInitPoses; + bool GetUpdatingInitPoses(void); + void SetUpdatingInitPoses(bool flag); + + std::mutex mMutexInitGBAFinish; + bool mbInitGBAFinish; + bool GetFlagInitGBAFinish() { unique_lock lock(mMutexInitGBAFinish); return mbInitGBAFinish; } + void SetFlagInitGBAFinish(bool flag) { unique_lock lock(mMutexInitGBAFinish); mbInitGBAFinish = flag; } + protected: double mnStartTime; bool mbFirstTry; double mnVINSInitScale; cv::Mat mGravityVec; // gravity vector in world frame + cv::Mat mRwiInit; std::mutex mMutexVINSInitFlag; bool mbVINSInited; @@ -79,6 +94,13 @@ class LocalMapping bool mbMapUpdateFlagForTracking; KeyFrame* mpMapUpdateKF; + bool mbUpdatingInitPoses; + + std::mutex mMutexCopyInitKFs; + bool mbCopyInitKFs; + bool GetFlagCopyInitKFs() { unique_lock lock(mMutexCopyInitKFs); return mbCopyInitKFs; } + void SetFlagCopyInitKFs(bool flag) { unique_lock lock(mMutexCopyInitKFs); mbCopyInitKFs = flag; } + public: LocalMapping(Map* pMap, const float bMonocular, ConfigParam* pParams); diff --git a/include/Map.h b/include/Map.h index 31ee8f8..779c386 100644 --- a/include/Map.h +++ b/include/Map.h @@ -56,6 +56,8 @@ class Map void EraseMapPoint(MapPoint* pMP); void EraseKeyFrame(KeyFrame* pKF); void SetReferenceMapPoints(const std::vector &vpMPs); + void InformNewBigChange(); + int GetLastBigChangeIdx(); std::vector GetAllKeyFrames(); std::vector GetAllMapPoints(); @@ -83,6 +85,9 @@ class Map long unsigned int mnMaxKFid; + // Index related to a big change in the map (loop closure, global BA) + int mnBigChangeIdx; + std::mutex mMutexMap; }; diff --git a/include/MapPoint.h b/include/MapPoint.h index 4c7c336..f2554fb 100644 --- a/include/MapPoint.h +++ b/include/MapPoint.h @@ -35,6 +35,12 @@ class KeyFrame; class Map; class Frame; +class cmpKeyFrameId{ +public: + bool operator() (const KeyFrame* a, const KeyFrame* b) const ; +}; + +typedef std::map mapMapPointObs; class MapPoint { @@ -49,7 +55,7 @@ class MapPoint cv::Mat GetNormal(); KeyFrame* GetReferenceKeyFrame(); - std::map GetObservations(); + mapMapPointObs/*std::map*/ GetObservations(); int Observations(); void AddObservation(KeyFrame* pKF,size_t idx); @@ -119,7 +125,7 @@ class MapPoint cv::Mat mWorldPos; // Keyframes observing the point and associated index in keyframe - std::map mObservations; + mapMapPointObs/*std::map*/ mObservations; // Mean viewing direction cv::Mat mNormalVector; diff --git a/include/Optimizer.h b/include/Optimizer.h index 484af6c..8779db8 100644 --- a/include/Optimizer.h +++ b/include/Optimizer.h @@ -37,6 +37,11 @@ class LoopClosing; class Optimizer { public: + void static LocalBAPRVIDP(KeyFrame *pKF, const std::list &lLocalKeyFrames, bool* pbStopFlag, Map* pMap, cv::Mat& gw, LocalMapping* pLM=NULL); + + void static GlobalBundleAdjustmentNavStatePRV(Map* pMap, const cv::Mat& gw, int nIterations, bool* pbStopFlag, const unsigned long nLoopKF, const bool bRobust); + void static LocalBundleAdjustmentNavStatePRV(KeyFrame *pKF, const std::list &lLocalKeyFrames, bool* pbStopFlag, Map* pMap, cv::Mat& gw, LocalMapping* pLM=NULL); + void static GlobalBundleAdjustmentNavState(Map* pMap, const cv::Mat& gw, int nIterations, bool* pbStopFlag, const unsigned long nLoopKF, const bool bRobust); int static PoseOptimization(Frame *pFrame, KeyFrame* pLastKF, const IMUPreintegrator& imupreint, const cv::Mat& gw, const bool& bComputeMarg=false); @@ -47,6 +52,7 @@ class Optimizer Vector3d static OptimizeInitialGyroBias(const std::list &lLocalKeyFrames); Vector3d static OptimizeInitialGyroBias(const std::vector &vLocalKeyFrames); Vector3d static OptimizeInitialGyroBias(const std::vector &vFrames); + Vector3d static OptimizeInitialGyroBias(const vector& vTwc, const vector& vImuPreInt); void static LocalBundleAdjustment(KeyFrame *pKF, const std::list &lLocalKeyFrames, bool* pbStopFlag, Map* pMap, LocalMapping* pLM=NULL); diff --git a/include/System.h b/include/System.h index 43085c5..f431437 100644 --- a/include/System.h +++ b/include/System.h @@ -89,6 +89,10 @@ class System // This resumes local mapping thread and performs SLAM again. void DeactivateLocalizationMode(); + // Returns true if there have been a big map change (loop closure, global BA) + // since last call to this function + bool MapChanged(); + // Reset the system (clear map) void Reset(); @@ -119,6 +123,12 @@ class System // SaveMap(const string &filename); // LoadMap(const string &filename); + // Information from most recent processed frame + // You can call this right after TrackMonocular (or stereo or RGBD) + int GetTrackingState(); + std::vector GetTrackedMapPoints(); + std::vector GetTrackedKeyPointsUn(); + private: // Input sensor @@ -157,6 +167,8 @@ class System std::thread* mptLoopClosing; std::thread* mptViewer; + std::thread* mptLocalMappingVIOInit; + // Reset flag std::mutex mMutexReset; bool mbReset; @@ -165,6 +177,12 @@ class System std::mutex mMutexMode; bool mbActivateLocalizationMode; bool mbDeactivateLocalizationMode; + + // Tracking state + int mTrackingState; + std::vector mTrackedMapPoints; + std::vector mTrackedKeyPointsUn; + std::mutex mMutexState; }; }// namespace ORB_SLAM diff --git a/pyplotscripts/plotnavstate.py b/pyplotscripts/plotnavstate.py index 1edd4bd..d35cdbb 100644 --- a/pyplotscripts/plotnavstate.py +++ b/pyplotscripts/plotnavstate.py @@ -22,21 +22,21 @@ bg = NS[:,11:14]; ba = NS[:,14:17]; -dbg = NS[:,17:20]; -dba = NS[:,20:23]; +#dbg = NS[:,17:20]; +#dba = NS[:,20:23]; fig2 = plt.figure(2); -p21, =plt.plot(time-time[0],(bg+dbg)[:,0]); -p22, =plt.plot(time-time[0],(bg+dbg)[:,1]); -p23, =plt.plot(time-time[0],(bg+dbg)[:,2]); +p21, =plt.plot(time-time[0],(bg)[:,0]); +p22, =plt.plot(time-time[0],(bg)[:,1]); +p23, =plt.plot(time-time[0],(bg)[:,2]); plt.legend([p21,p22,p23],["bgx","bgy","bgz"]); plt.title('gyr bias'); plt.savefig(filepath+'biasgyr.eps', format="eps"); fig3 = plt.figure(3); -p31, =plt.plot(time-time[0],(ba+dba)[:,0]); -p32, =plt.plot(time-time[0],(ba+dba)[:,1]); -p33, =plt.plot(time-time[0],(ba+dba)[:,2]); +p31, =plt.plot(time-time[0],(ba)[:,0]); +p32, =plt.plot(time-time[0],(ba)[:,1]); +p33, =plt.plot(time-time[0],(ba)[:,2]); plt.legend([p31,p32,p33],["bax","bay","baz"]); plt.title('acc bias'); plt.savefig(filepath+'biasacc.eps', format="eps"); diff --git a/src/FrameDrawer.cc b/src/FrameDrawer.cc index 02aadba..6b046cf 100644 --- a/src/FrameDrawer.cc +++ b/src/FrameDrawer.cc @@ -94,7 +94,8 @@ cv::Mat FrameDrawer::DrawFrame() mnTracked=0; mnTrackedVO=0; const float r = 5; - for(int i=0;i(0); + Vector3d upd_Phi = dPR.segment<3>(3); + + _P += upd_P; + _R = _R * Sophus::SO3::exp(upd_Phi); +} + +void NavState::IncSmallV(Vector3d dV) +{ + _V += dV; +} + void NavState::IncSmallPVR(Vector9d updatePVR) { // Update P/V/R in NavState @@ -90,7 +104,7 @@ void NavState::IncSmallPVR(Vector9d updatePVR) Matrix3d R = Get_R().matrix(); // position - _P += R * upd_P; + _P += upd_P; // velocity _V += upd_V; // rotation diff --git a/src/IMU/NavState.h b/src/IMU/NavState.h index 2032c49..2deca59 100644 --- a/src/IMU/NavState.h +++ b/src/IMU/NavState.h @@ -45,6 +45,8 @@ class NavState // incremental addition, delta = [dP, dV, dPhi, dBa, dBg] void IncSmall(Vector15d delta); void IncSmallPVR(Vector9d dPVR); + void IncSmallPR(Vector6d dPR); + void IncSmallV(Vector3d dV); void IncSmallBias(Vector6d dBias); // normalize rotation quaternion. !!! important!!! diff --git a/src/IMU/configparam.cpp b/src/IMU/configparam.cpp index f7a28ca..e676d6a 100644 --- a/src/IMU/configparam.cpp +++ b/src/IMU/configparam.cpp @@ -12,6 +12,8 @@ int ConfigParam::_LocalWindowSize = 10; double ConfigParam::_ImageDelayToIMU = 0; bool ConfigParam::_bAccMultiply9p8 = false; std::string ConfigParam::_tmpFilePath = ""; +double ConfigParam::_nVINSInitTime = 15; +bool ConfigParam::_bRealTime = true; ConfigParam::ConfigParam(std::string configfile) { @@ -20,6 +22,9 @@ ConfigParam::ConfigParam(std::string configfile) std::cout<> _tmpFilePath; std::cout<<"save tmp file in "<<_tmpFilePath<(_vertices[0]); + const VertexNavStatePR* vPR0 = static_cast(_vertices[1]); + const VertexNavStatePR* vPRi = static_cast(_vertices[2]); + const VertexNavStatePR* vPRcb = static_cast(_vertices[3]); + + // + const Matrix3d R0 = vPR0->estimate().Get_RotMatrix(); + const Vector3d t0 = vPR0->estimate().Get_P(); + const Matrix3d Ri = vPRi->estimate().Get_RotMatrix(); + const Vector3d ti = vPRi->estimate().Get_P(); + const Matrix3d Rcb = vPRcb->estimate().Get_RotMatrix(); + const Vector3d tcb = vPRcb->estimate().Get_P(); + + // point inverse depth in reference KF + double rho = vIDP->estimate(); + if(rho<1e-6) + { + std::cerr<<"1. rho = "<(_vertices[0]); + const VertexNavStatePR* vPR0 = static_cast(_vertices[1]); + const VertexNavStatePR* vPRi = static_cast(_vertices[2]); + const VertexNavStatePR* vPRcb = static_cast(_vertices[3]); + + // + const Matrix3d R0 = vPR0->estimate().Get_RotMatrix(); + const Vector3d t0 = vPR0->estimate().Get_P(); + const Matrix3d Ri = vPRi->estimate().Get_RotMatrix(); + const Matrix3d RiT = Ri.transpose(); + const Vector3d ti = vPRi->estimate().Get_P(); + const Matrix3d Rcb = vPRcb->estimate().Get_RotMatrix(); + const Vector3d tcb = vPRcb->estimate().Get_P(); + + // point inverse depth in reference KF + double rho = vIDP->estimate(); + if(rho<1e-6) + { + std::cerr<<"2. rho = "< Maux; + Maux.setZero(); + Maux(0,0) = fx; + Maux(0,1) = 0; + Maux(0,2) = -x/z*fx; + Maux(1,0) = 0; + Maux(1,1) = fy; + Maux(1,2) = -y/z*fy; + Matrix Jpi = Maux/z; + + // 1. J_e_rho, 2x1 + Vector3d J_pi_rho = Rcic0*(-d*P0); + _jacobianOplus[0] = -Jpi * J_pi_rho; + + // 2. J_e_pr0, 2x6 + Matrix3d J_pi_t0 = Rcb*RiT; + Matrix3d J_pi_r0 = -Rcic0*SO3::hat(P0-tcb)*Rcb; + Matrix J_pi_pr0; + J_pi_pr0.topLeftCorner(3,3) = J_pi_t0; + J_pi_pr0.topRightCorner(3,3) = J_pi_r0; + _jacobianOplus[1] = -Jpi * J_pi_pr0; + + // 3. J_e_pri, 2x6 + Matrix3d J_pi_ti = -Rcb*RiT; + Vector3d taux = RiT* ( R0*Rcb.transpose()*(P0 - tcb) + t0 - ti ); + Matrix3d J_pi_ri = Rcb*SO3::hat(taux); + Matrix J_pi_pri; + J_pi_pri.topLeftCorner(3,3) = J_pi_ti; + J_pi_pri.topRightCorner(3,3) = J_pi_ri; + _jacobianOplus[2] = -Jpi * J_pi_pri; + + // 4. J_e_prcb, 2x6 + Matrix3d J_pi_tcb = Matrix3d::Identity() - Rcic0; + Matrix3d J_pi_rcb = - SO3::hat( Rcic0*(P0-tcb) )*Rcb + + Rcic0* SO3::hat( P0-tcb )*Rcb + - Rcb* SO3::hat( RiT*(t0-ti) ); + Matrix J_pi_prcb; + J_pi_prcb.topLeftCorner(3,3) = J_pi_tcb; + J_pi_prcb.topRightCorner(3,3) = J_pi_rcb; + _jacobianOplus[3] = -Jpi * J_pi_prcb; +} + +void EdgeNavStatePRV::computeError() +{ + // + const VertexNavStatePR* vPRi = static_cast(_vertices[0]); + const VertexNavStatePR* vPRj = static_cast(_vertices[1]); + const VertexNavStateV* vVi = static_cast(_vertices[2]); + const VertexNavStateV* vVj = static_cast(_vertices[3]); + const VertexNavStateBias* vBiasi = static_cast(_vertices[4]); + + // terms need to computer error in vertex i, except for bias error + const NavState& NSPRi = vPRi->estimate(); + const Vector3d Pi = NSPRi.Get_P(); + const Sophus::SO3 Ri = NSPRi.Get_R(); + + const NavState& NSVi = vVi->estimate(); + const Vector3d Vi = NSVi.Get_V(); + // Bias from the bias vertex + const NavState& NSBiasi = vBiasi->estimate(); + const Vector3d dBgi = NSBiasi.Get_dBias_Gyr(); + const Vector3d dBai = NSBiasi.Get_dBias_Acc(); + + // terms need to computer error in vertex j, except for bias error + const NavState& NSPRj = vPRj->estimate(); + const Vector3d Pj = NSPRj.Get_P(); + const Sophus::SO3 Rj = NSPRj.Get_R(); + + const NavState& NSVj = vVj->estimate(); + const Vector3d Vj = NSVj.Get_V(); + + // IMU Preintegration measurement + const IMUPreintegrator& M = _measurement; + const double dTij = M.getDeltaTime(); // Delta Time + const double dT2 = dTij*dTij; + const Vector3d dPij = M.getDeltaP(); // Delta Position pre-integration measurement + const Vector3d dVij = M.getDeltaV(); // Delta Velocity pre-integration measurement + const Sophus::SO3 dRij = Sophus::SO3(M.getDeltaR()); // Delta Rotation pre-integration measurement + + // tmp variable, transpose of Ri + const Sophus::SO3 RiT = Ri.inverse(); + // residual error of Delta Position measurement + const Vector3d rPij = RiT*(Pj - Pi - Vi*dTij - 0.5*GravityVec*dT2) + - (dPij + M.getJPBiasg()*dBgi + M.getJPBiasa()*dBai); // this line includes correction term of bias change. + // residual error of Delta Velocity measurement + const Vector3d rVij = RiT*(Vj - Vi - GravityVec*dTij) + - (dVij + M.getJVBiasg()*dBgi + M.getJVBiasa()*dBai); //this line includes correction term of bias change + // residual error of Delta Rotation measurement + const Sophus::SO3 dR_dbg = Sophus::SO3::exp(M.getJRBiasg()*dBgi); + const Sophus::SO3 rRij = (dRij * dR_dbg).inverse() * RiT * Rj; + const Vector3d rPhiij = rRij.log(); + + + Vector9d err; // typedef Matrix ErrorVector; ErrorVector _error; D=9 + err.setZero(); + + // 9-Dim error vector order: + // position-velocity-rotation + // rPij - rPhiij - rVij + err.segment<3>(0) = rPij; // position error + err.segment<3>(3) = rPhiij; // rotation phi error + err.segment<3>(6) = rVij; // velocity error + + _error = err; +} + +void EdgeNavStatePRV::linearizeOplus() +{ + // + const VertexNavStatePR* vPRi = static_cast(_vertices[0]); + const VertexNavStatePR* vPRj = static_cast(_vertices[1]); + const VertexNavStateV* vVi = static_cast(_vertices[2]); + const VertexNavStateV* vVj = static_cast(_vertices[3]); + const VertexNavStateBias* vBiasi = static_cast(_vertices[4]); + + // terms need to computer error in vertex i, except for bias error + const NavState& NSPRi = vPRi->estimate(); + const Vector3d Pi = NSPRi.Get_P(); + const Matrix3d Ri = NSPRi.Get_RotMatrix(); + + const NavState& NSVi = vVi->estimate(); + const Vector3d Vi = NSVi.Get_V(); + // Bias from the bias vertex + const NavState& NSBiasi = vBiasi->estimate(); + const Vector3d dBgi = NSBiasi.Get_dBias_Gyr(); + //const Vector3d dBai = NSBiasi.Get_dBias_Acc(); + + // terms need to computer error in vertex j, except for bias error + const NavState& NSPRj = vPRj->estimate(); + const Vector3d Pj = NSPRj.Get_P(); + const Matrix3d Rj = NSPRj.Get_RotMatrix(); + + const NavState& NSVj = vVj->estimate(); + const Vector3d Vj = NSVj.Get_V(); + + // IMU Preintegration measurement + const IMUPreintegrator& M = _measurement; + const double dTij = M.getDeltaTime(); // Delta Time + const double dT2 = dTij*dTij; + + // some temp variable + //Matrix3d I3x3 = Matrix3d::Identity(); // I_3x3 + Matrix3d O3x3 = Matrix3d::Zero(); // 0_3x3 + Matrix3d RiT = Ri.transpose(); // Ri^T + Matrix3d RjT = Rj.transpose(); // Rj^T + Vector3d rPhiij = _error.segment<3>(3); // residual of rotation, rPhiij + Matrix3d JrInv_rPhi = Sophus::SO3::JacobianRInv(rPhiij); // inverse right jacobian of so3 term #rPhiij# + Matrix3d J_rPhi_dbg = M.getJRBiasg(); // jacobian of preintegrated rotation-angle to gyro bias i + + // 1. + // increment is the same as Forster 15'RSS + // pi = pi + dpi, pj = pj + dpj + // Ri = Ri*Exp(dphi_i), Rj = Rj*Exp(dphi_j) + // vi = vi + dvi, vj = vj + dvj + // Note: the optimized bias term is the 'delta bias' + // dBgi = dBgi + dbgi_update, dBgj = dBgj + dbgj_update + // dBai = dBai + dbai_update, dBaj = dBaj + dbaj_update + + // 2. + // 9-Dim error vector order in PVR: + // position-velocity-rotation + // rPij - rPhiij - rVij + // Jacobian row order: + // J_rPij_xxx + // J_rPhiij_xxx + // J_rVij_xxx + + // 3. + // order in 'update_' in PR + // Vertex_i : dPi, dPhi_i + // Vertex_j : dPj, dPhi_j + // 6-Dim error vector order in Bias: + // dBiasg_i - dBiasa_i + + // Jacobians: + // dP/dPR0, dP/dPR1, dP/dV0, dP/dV1, dP/dBias0 + // dR/dPR0, dR/dPR1, dR/dV0, dR/dV1, dR/dBias0 + // dV/dPR0, dV/dPR1, dV/dV0, dV/dV1, dV/dBias0 + + // 4. PR0 & V0 + // For Vertex_PR_i, J [dP;dR;dV] / [dP0 dR0] + Matrix JPRi; + JPRi.setZero(); + // J_rPij_xxx_i for Vertex_PR_i + JPRi.block<3,3>(0,0) = - RiT; //J_rP_dpi + JPRi.block<3,3>(0,3) = Sophus::SO3::hat( RiT*(Pj-Pi-Vi*dTij-0.5*GravityVec*dT2) ); //J_rP_dPhi_i + // J_rPhiij_xxx_i for Vertex_PR_i + Matrix3d ExprPhiijTrans = Sophus::SO3::exp(rPhiij).inverse().matrix(); + Matrix3d JrBiasGCorr = Sophus::SO3::JacobianR(J_rPhi_dbg*dBgi); + JPRi.block<3,3>(3,0) = O3x3; //dpi + JPRi.block<3,3>(3,3) = - JrInv_rPhi * RjT * Ri; //dphi_i + // J_rVij_xxx_i for Vertex_PVR_i + JPRi.block<3,3>(6,0) = O3x3; //dpi + JPRi.block<3,3>(6,3) = Sophus::SO3::hat( RiT*(Vj-Vi-GravityVec*dTij) ); //dphi_i + + // For Vertex_V_i, J [dP;dR;dV] / dV0 + Matrix JVi; + JVi.setZero(); + JVi.block<3,3>(0,0) = - RiT*dTij; //J_rP_dvi + JVi.block<3,3>(3,0) = O3x3; //rR_dvi + JVi.block<3,3>(6,0) = - RiT; //rV_dvi + + // 5. PR1 & V1 + // For Vertex_PR_j, J [dP;dR;dV] / [dP1 dR1] + Matrix JPRj; + JPRj.setZero(); + // J_rPij_xxx_j for Vertex_PR_j + JPRj.block<3,3>(0,0) = RiT; //rP_dpj + JPRj.block<3,3>(0,3) = O3x3; //rP_dphi_j + // J_rPhiij_xxx_j for Vertex_PR_j + JPRj.block<3,3>(3,0) = O3x3; //rR_dpj + JPRj.block<3,3>(3,3) = JrInv_rPhi; //rR_dphi_j + // J_rVij_xxx_j for Vertex_PR_j + JPRj.block<3,3>(6,0) = O3x3; //rV_dpj + JPRj.block<3,3>(6,3) = O3x3; //rV_dphi_j + + // For Vertex_V_i, J [dP;dR;dV] / dV1 + Matrix JVj; + JVj.setZero(); + JVj.block<3,3>(0,0) = O3x3; //rP_dvj + JVj.block<3,3>(3,0) = O3x3; //rR_dvj + JVj.block<3,3>(6,0) = RiT; //rV_dvj + + // 6. + // For Vertex_Bias_i + Matrix JBiasi; + JBiasi.setZero(); + + // 5.1 + // J_rPij_xxx_j for Vertex_Bias_i + JBiasi.block<3,3>(0,0) = - M.getJPBiasg(); //J_rP_dbgi + JBiasi.block<3,3>(0,3) = - M.getJPBiasa(); //J_rP_dbai + + // J_rPhiij_xxx_j for Vertex_Bias_i + JBiasi.block<3,3>(3,0) = - JrInv_rPhi * ExprPhiijTrans * JrBiasGCorr * J_rPhi_dbg; //dbg_i + JBiasi.block<3,3>(3,3) = O3x3; //dba_i + + // J_rVij_xxx_j for Vertex_Bias_i + JBiasi.block<3,3>(6,0) = - M.getJVBiasg(); //dbg_i + JBiasi.block<3,3>(6,3) = - M.getJVBiasa(); //dba_i + + // Evaluate _jacobianOplus + _jacobianOplus[0] = JPRi; + _jacobianOplus[1] = JPRj; + _jacobianOplus[2] = JVi; + _jacobianOplus[3] = JVj; + _jacobianOplus[4] = JBiasi; +} + +void EdgeNavStatePRPointXYZ::linearizeOplus() +{ + const VertexSBAPointXYZ* vPoint = static_cast(_vertices[0]); + const VertexNavStatePR* vNavState = static_cast(_vertices[1]); + + const NavState& ns = vNavState->estimate(); + Matrix3d Rwb = ns.Get_RotMatrix(); + Vector3d Pwb = ns.Get_P(); + const Vector3d& Pw = vPoint->estimate(); + + Matrix3d Rcb = Rbc.transpose(); + Vector3d Pc = Rcb * Rwb.transpose() * (Pw - Pwb) - Rcb * Pbc; + + double x = Pc[0]; + double y = Pc[1]; + double z = Pc[2]; + + // Jacobian of camera projection + Matrix Maux; + Maux.setZero(); + Maux(0,0) = fx; + Maux(0,1) = 0; + Maux(0,2) = -x/z*fx; + Maux(1,0) = 0; + Maux(1,1) = fy; + Maux(1,2) = -y/z*fy; + Matrix Jpi = Maux/z; + + // error = obs - pi( Pc ) + // Pw <- Pw + dPw, for Point3D + // Rwb <- Rwb*exp(dtheta), for NavState.R + // Pwb <- Pwb + dPwb, for NavState.P + + // Jacobian of error w.r.t Pw + _jacobianOplusXi = - Jpi * Rcb * Rwb.transpose(); + + // Jacobian of Pc/error w.r.t dPwb + //Matrix3d J_Pc_dPwb = -Rcb*Rbw; + Matrix JdPwb = - Jpi * (-Rcb*Rwb.transpose()); + // Jacobian of Pc/error w.r.t dRwb + Vector3d Paux = Rcb*Rwb.transpose()*(Pw-Pwb); + //Matrix3d J_Pc_dRwb = Sophus::SO3::hat(Paux) * Rcb; + Matrix JdRwb = - Jpi * (Sophus::SO3::hat(Paux) * Rcb); + + // Jacobian of Pc w.r.t NavState PR + // order in 'update_': dP, dPhi + Matrix JNavState = Matrix::Zero(); + JNavState.block<2,3>(0,0) = JdPwb; + JNavState.block<2,3>(0,3) = JdRwb; + + // Jacobian of error w.r.t NavState + _jacobianOplusXj = JNavState; +} + +void EdgeNavStatePRPointXYZOnlyPose::linearizeOplus() +{ + const VertexNavStatePR* vNSPR = static_cast(_vertices[0]); + + const NavState& ns = vNSPR->estimate(); + Matrix3d Rwb = ns.Get_RotMatrix(); + Vector3d Pwb = ns.Get_P(); + //const Vector3d& Pw = vPoint->estimate(); + + Matrix3d Rcb = Rbc.transpose(); + Vector3d Pc = Rcb * Rwb.transpose() * (Pw - Pwb) - Rcb * Pbc; + + double x = Pc[0]; + double y = Pc[1]; + double z = Pc[2]; + + // Jacobian of camera projection + Matrix Maux; + Maux.setZero(); + Maux(0,0) = fx; + Maux(0,1) = 0; + Maux(0,2) = -x/z*fx; + Maux(1,0) = 0; + Maux(1,1) = fy; + Maux(1,2) = -y/z*fy; + Matrix Jpi = Maux/z; + + // error = obs - pi( Pc ) + // Pw <- Pw + dPw, for Point3D + // Rwb <- Rwb*exp(dtheta), for NavState.R + // Pwb <- Pwb + dPwb, for NavState.P + + // Jacobian of Pc/error w.r.t dPwb + //Matrix3d J_Pc_dPwb = -Rcb*Rbw; + Matrix JdPwb = - Jpi * (-Rcb*Rwb.transpose()); + // Jacobian of Pc/error w.r.t dRwb + Vector3d Paux = Rcb*Rwb.transpose()*(Pw-Pwb); + //Matrix3d J_Pc_dRwb = Sophus::SO3::hat(Paux) * Rcb; + Matrix JdRwb = - Jpi * (Sophus::SO3::hat(Paux) * Rcb); + + // Jacobian of Pc w.r.t NavStatePR + // order in 'update_': dP, dPhi + Matrix JNavState = Matrix::Zero(); + JNavState.block<2,3>(0,0) = JdPwb; + JNavState.block<2,3>(0,3) = JdRwb; + + // Jacobian of error w.r.t NavStatePR + _jacobianOplusXi = JNavState; +} + + +void EdgeNavStatePriorPRVBias::computeError() +{ + const VertexNavStatePR* vNSPR = static_cast(_vertices[0]); + const VertexNavStateV* vNSV = static_cast(_vertices[1]); + const VertexNavStateBias* vNSBias = static_cast(_vertices[2]); + const NavState& nsPRest = vNSPR->estimate(); + const NavState& nsVest = vNSV->estimate(); + const NavState& nsBiasest = vNSBias->estimate(); + const NavState& nsprior = _measurement; + + // P V R bg+dbg ba+dba + Vector15d err = Vector15d::Zero(); + + // PVR terms + // eP = P_prior - P_est + err.segment<3>(0) = nsprior.Get_P() - nsPRest.Get_P(); + // eR = log(R_prior^-1 * R_est) + err.segment<3>(3) = (nsprior.Get_R().inverse() * nsPRest.Get_R()).log(); + // eV = V_prior - V_est + err.segment<3>(6) = nsprior.Get_V() - nsVest.Get_V(); + + // Bias terms + // eB = Bias_prior - Bias_est + // err_bg = (bg_prior+dbg_prior) - (bg+dbg) + err.segment<3>(9) = (nsprior.Get_BiasGyr() + nsprior.Get_dBias_Gyr()) - (nsBiasest.Get_BiasGyr() + nsBiasest.Get_dBias_Gyr()); + // err_ba = (ba_prior+dba_prior) - (ba+dba) + err.segment<3>(12) = (nsprior.Get_BiasAcc() + nsprior.Get_dBias_Acc()) - (nsBiasest.Get_BiasAcc() + nsBiasest.Get_dBias_Acc()); + + + _error = err; + +} + +void EdgeNavStatePriorPRVBias::linearizeOplus() +{ + // Estimated NavState + //const VertexNavStatePR* vNSPR = static_cast(_vertices[0]); + //const VertexNavStateV* vNSV = static_cast(_vertices[1]); + //const VertexNavStateBias* vNSBias = static_cast(_vertices[2]); + //const NavState& nsPRest = vNSPR->estimate(); + //const NavState& nsVest = vNSV->estimate(); + //const NavState& nsBiasest = vNSBias->estimate(); + //const NavState& nsprior = _measurement; + + Matrix _jacobianOplusPR = Matrix::Zero(); + _jacobianOplusPR.block<3,3>(0,0) = - Matrix3d::Identity();//nsPRest.Get_RotMatrix(); + _jacobianOplusPR.block<3,3>(3,3) = Sophus::SO3::JacobianRInv( _error.segment<3>(3) ); + Matrix _jacobianOplusV = Matrix::Zero(); + _jacobianOplusV.block<3,3>(6,6) = - Matrix3d::Identity(); + +// Matrix _jacobianOplusPR = Matrix::Zero(); +// _jacobianOplusPR.block<3,3>(0,0) = ( nsprior.Get_R().inverse() * nsPRest.Get_R() ).matrix(); +// _jacobianOplusPR.block<3,3>(3,3) = Sophus::SO3::JacobianRInv( _error.segment<3>(3) ); +// Matrix _jacobianOplusV = Matrix::Zero(); +// _jacobianOplusV.block<3,3>(6,0) = Matrix3d::Identity(); + + Matrix _jacobianOplusBias = Matrix::Zero(); + _jacobianOplusBias.block<3,3>(9,0) = - Matrix3d::Identity(); + _jacobianOplusBias.block<3,3>(12,3) = - Matrix3d::Identity(); + + + _jacobianOplus[0] = _jacobianOplusPR; + _jacobianOplus[1] = _jacobianOplusV; + _jacobianOplus[2] = _jacobianOplusBias; +} + void EdgeNavStatePVR::computeError() { // @@ -101,7 +603,7 @@ void EdgeNavStatePVR::linearizeOplus() double dT2 = dTij*dTij; // some temp variable - Matrix3d I3x3 = Matrix3d::Identity(); // I_3x3 + //Matrix3d I3x3 = Matrix3d::Identity(); // I_3x3 Matrix3d O3x3 = Matrix3d::Zero(); // 0_3x3 Matrix3d RiT = Ri.transpose(); // Ri^T Matrix3d RjT = Rj.transpose(); // Rj^T @@ -111,7 +613,7 @@ void EdgeNavStatePVR::linearizeOplus() // 1. // increment is the same as Forster 15'RSS - // pi = pi + Ri*dpi, pj = pj + Rj*dpj + // pi = pi + dpi, pj = pj + dpj // vi = vi + dvi, vj = vj + dvj // Ri = Ri*Exp(dphi_i), Rj = Rj*Exp(dphi_j) // Note: the optimized bias term is the 'delta bias' @@ -141,7 +643,7 @@ void EdgeNavStatePVR::linearizeOplus() // 4.1 // J_rPij_xxx_i for Vertex_PVR_i - JPVRi.block<3,3>(0,0) = - I3x3; //J_rP_dpi + JPVRi.block<3,3>(0,0) = - RiT; //J_rP_dpi JPVRi.block<3,3>(0,3) = - RiT*dTij; //J_rP_dvi //JPVRi.block<3,3>(0,6) = SO3Calc::skew( RiT*(Pj-Pi-Vi*dTij-0.5*GravityVec*dT2) ); //J_rP_dPhi_i JPVRi.block<3,3>(0,6) = Sophus::SO3::hat( RiT*(Pj-Pi-Vi*dTij-0.5*GravityVec*dT2) ); //J_rP_dPhi_i @@ -171,7 +673,7 @@ void EdgeNavStatePVR::linearizeOplus() // 5.1 // J_rPij_xxx_j for Vertex_PVR_j - JPVRj.block<3,3>(0,0) = RiT*Rj; //dpj + JPVRj.block<3,3>(0,0) = RiT; //dpj JPVRj.block<3,3>(0,3) = O3x3; //dvj JPVRj.block<3,3>(0,6) = O3x3; //dphi_j @@ -249,17 +751,6 @@ void EdgeNavStateBias::linearizeOplus() _jacobianOplusXi = - Matrix::Identity(); _jacobianOplusXj = Matrix::Identity(); -// _jacobianOplusXi.block<3,3>(9,9) = - I3x3; //dbg_i -// _jacobianOplusXi.block<3,3>(9,12)= O3x3; //dba_i -// _jacobianOplusXi.block<3,3>(12,9) = O3x3; //dbg_i -// _jacobianOplusXi.block<3,3>(12,12)= -I3x3; //dba_i -// // 5.5 -// // J_rBiasGi_xxx_j for Vertex_Bias_j -// _jacobianOplusXj.block<3,3>(9,9) = I3x3; //dbg_j -// _jacobianOplusXj.block<3,3>(9,12)= O3x3; //dba_j -// // J_rBiasAi_xxx_j for Vertex_Bias_j -// _jacobianOplusXj.block<3,3>(12,9) = O3x3; //dbg_j -// _jacobianOplusXj.block<3,3>(12,12)= I3x3; //dba_j } void EdgeNavStatePVRPointXYZ::linearizeOplus() @@ -293,14 +784,14 @@ void EdgeNavStatePVRPointXYZ::linearizeOplus() // error = obs - pi( Pc ) // Pw <- Pw + dPw, for Point3D // Rwb <- Rwb*exp(dtheta), for NavState.R - // Pwb <- Pwb + Rwb*dPwb, for NavState.P + // Pwb <- Pwb + dPwb, for NavState.P // Jacobian of error w.r.t Pw _jacobianOplusXi = - Jpi * Rcb * Rwb.transpose(); // Jacobian of Pc/error w.r.t dPwb //Matrix3d J_Pc_dPwb = -Rcb; - Matrix JdPwb = - Jpi * (-Rcb); + Matrix JdPwb = - Jpi * (-Rcb*Rwb.transpose()); // Jacobian of Pc/error w.r.t dRwb Vector3d Paux = Rcb*Rwb.transpose()*(Pw-Pwb); //Matrix3d J_Pc_dRwb = Sophus::SO3::hat(Paux) * Rcb; @@ -349,11 +840,11 @@ void EdgeNavStatePVRPointXYZOnlyPose::linearizeOplus() // error = obs - pi( Pc ) // Pw <- Pw + dPw, for Point3D // Rwb <- Rwb*exp(dtheta), for NavState.R - // Pwb <- Pwb + Rwb*dPwb, for NavState.P + // Pwb <- Pwb + dPwb, for NavState.P // Jacobian of Pc/error w.r.t dPwb //Matrix3d J_Pc_dPwb = -Rcb; - Matrix JdPwb = - Jpi * (-Rcb); + Matrix JdPwb = - Jpi * (-Rcb*Rwb.transpose()); // Jacobian of Pc/error w.r.t dRwb Vector3d Paux = Rcb*Rwb.transpose()*(Pw-Pwb); //Matrix3d J_Pc_dRwb = Sophus::SO3::hat(Paux) * Rcb; @@ -414,12 +905,12 @@ void EdgeNavStatePriorPVRBias::computeError() void EdgeNavStatePriorPVRBias::linearizeOplus() { // Estimated NavState - const VertexNavStatePVR* vPVR = static_cast(_vertices[0]); + //const VertexNavStatePVR* vPVR = static_cast(_vertices[0]); //const VertexNavStateBias* vBias = static_cast(_vertices[1]); - const NavState& nsPVRest = vPVR->estimate(); + //const NavState& nsPVRest = vPVR->estimate(); _jacobianOplusXi = Matrix::Zero(); - _jacobianOplusXi.block<3,3>(0,0) = - nsPVRest.Get_RotMatrix(); + _jacobianOplusXi.block<3,3>(0,0) = - Matrix3d::Identity();//nsPVRest.Get_RotMatrix(); _jacobianOplusXi.block<3,3>(3,3) = - Matrix3d::Identity(); _jacobianOplusXi.block<3,3>(6,6) = Sophus::SO3::JacobianRInv( _error.segment<3>(6) ); @@ -477,7 +968,7 @@ void EdgeNavStatePrior::linearizeOplus() { // 1. // increment is the same as Forster 15'RSS - // pi = pi + Ri*dpi + // pi = pi + dpi // vi = vi + dvi // Ri = Ri*Exp(dphi_i) // Note: the optimized bias term is the 'delta bias' @@ -485,16 +976,16 @@ void EdgeNavStatePrior::linearizeOplus() // dBai = dBai + dbai_update // Estimated NavState - const VertexNavState* v = static_cast(_vertices[0]); - const NavState& nsest = v->estimate(); + //const VertexNavState* v = static_cast(_vertices[0]); + //const NavState& nsest = v->estimate(); -// _jacobianOplusXi.block<3,3>(0,0) = nsest.Get_RotMatrix(); +// _jacobianOplusXi.block<3,3>(0,0) = Matrix3d::Identity();//nsest.Get_RotMatrix(); // _jacobianOplusXi.block<3,3>(3,3) = Matrix3d::Identity(); // _jacobianOplusXi.block<3,3>(6,6) = Sophus::SO3::JacobianLInv( _error.segment<3>(6) ) * nsest.Get_RotMatrix(); // _jacobianOplusXi.block<3,3>(9,9) = Matrix3d::Identity(); // _jacobianOplusXi.block<3,3>(12,12) = Matrix3d::Identity(); - _jacobianOplusXi.block<3,3>(0,0) = - nsest.Get_RotMatrix(); + _jacobianOplusXi.block<3,3>(0,0) = - Matrix3d::Identity();//nsest.Get_RotMatrix(); _jacobianOplusXi.block<3,3>(3,3) = - Matrix3d::Identity(); _jacobianOplusXi.block<3,3>(6,6) = Sophus::SO3::JacobianRInv( _error.segment<3>(6) ); _jacobianOplusXi.block<3,3>(9,9) = - Matrix3d::Identity(); @@ -504,282 +995,6 @@ void EdgeNavStatePrior::linearizeOplus() //std::cout<<"prior edge jacobian: "<() -{} - -void VertexGravityW::oplusImpl(const double* update_) -{ - Eigen::Map update(update_); - Vector3d update3 = Vector3d::Zero(); - update3.head(2) = update; - Sophus::SO3 dR = Sophus::SO3::exp(update3); - _estimate = dR*_estimate; -} - -/** - * @brief EdgeNavStateGw::EdgeNavStateGw - */ -EdgeNavStateGw::EdgeNavStateGw() : BaseMultiEdge<15, IMUPreintegrator>() -{ - resize(3); -} - -void EdgeNavStateGw::computeError() -{ - const VertexNavState* vi = static_cast(_vertices[0]); - const VertexNavState* vj = static_cast(_vertices[1]); - const VertexGravityW* vg = static_cast(_vertices[2]); - - Vector3d GravityVec = vg->estimate(); - - // terms need to computer error in vertex i, except for bias error - const NavState& NSi = vi->estimate(); - Vector3d Pi = NSi.Get_P(); - Vector3d Vi = NSi.Get_V(); - //Matrix3d Ri = NSi.Get_RotMatrix(); - Sophus::SO3 Ri = NSi.Get_R(); - Vector3d dBgi = NSi.Get_dBias_Gyr(); - Vector3d dBai = NSi.Get_dBias_Acc(); - - // terms need to computer error in vertex j, except for bias error - const NavState& NSj = vj->estimate(); - Vector3d Pj = NSj.Get_P(); - Vector3d Vj = NSj.Get_V(); - //Matrix3d Rj = NSj.Get_RotMatrix(); - Sophus::SO3 Rj = NSj.Get_R(); - - // IMU Preintegration measurement - const IMUPreintegrator& M = _measurement; - double dTij = M.getDeltaTime(); // Delta Time - double dT2 = dTij*dTij; - Vector3d dPij = M.getDeltaP(); // Delta Position pre-integration measurement - Vector3d dVij = M.getDeltaV(); // Delta Velocity pre-integration measurement - //Matrix3d dRij = M.getDeltaR(); // Delta Rotation pre-integration measurement - Sophus::SO3 dRij = Sophus::SO3(M.getDeltaR()); - - // tmp variable, transpose of Ri - //Matrix3d RiT = Ri.transpose(); - Sophus::SO3 RiT = Ri.inverse(); - // residual error of Delta Position measurement - Vector3d rPij = RiT*(Pj - Pi - Vi*dTij - 0.5*GravityVec*dT2) - - (dPij + M.getJPBiasg()*dBgi + M.getJPBiasa()*dBai); // this line includes correction term of bias change. - // residual error of Delta Velocity measurement - Vector3d rVij = RiT*(Vj - Vi - GravityVec*dTij) - - (dVij + M.getJVBiasg()*dBgi + M.getJVBiasa()*dBai); //this line includes correction term of bias change - // residual error of Delta Rotation measurement - //Matrix3d dR_dbg = Sophus::SO3::exp(M.Get_J_Phi_Biasg()*dBgi).matrix(); - //Matrix3d rRij = (dRij * dR_dbg).transpose() * Ri.transpose()*Rj; - //Vector3d rPhiij = Sophus::SO3(rRij).log(); - Sophus::SO3 dR_dbg = Sophus::SO3::exp(M.getJRBiasg()*dBgi); - Sophus::SO3 rRij = (dRij * dR_dbg).inverse() * RiT * Rj; - Vector3d rPhiij = rRij.log(); - - // residual error of Gyroscope's bias, Forster 15'RSS - Vector3d rBiasG = (NSj.Get_BiasGyr() + NSj.Get_dBias_Gyr()) - - (NSi.Get_BiasGyr() + NSi.Get_dBias_Gyr()); - - // residual error of Accelerometer's bias, Forster 15'RSS - Vector3d rBiasA = (NSj.Get_BiasAcc() + NSj.Get_dBias_Acc()) - - (NSi.Get_BiasAcc() + NSi.Get_dBias_Acc()); - - Vector15d err; // typedef Matrix ErrorVector; ErrorVector _error; D=15 - err.setZero(); - // 15-Dim error vector order: - // position-velocity-rotation-deltabiasGyr_i-deltabiasAcc_i - // rPij - rVij - rPhiij - rBiasGi - rBiasAi - err.segment<3>(0) = rPij; // position error - err.segment<3>(3) = rVij; // velocity error - err.segment<3>(6) = rPhiij; // rotation phi error - err.segment<3>(9) = rBiasG; // bias gyro error - err.segment<3>(12) = rBiasA; // bias acc error - - _error = err; -} - -void EdgeNavStateGw::linearizeOplus() -{ - const VertexNavState* vi = static_cast(_vertices[0]); - const VertexNavState* vj = static_cast(_vertices[1]); - const VertexGravityW* vg = static_cast(_vertices[2]); - - Vector3d GravityVec = vg->estimate(); - - - // terms need to computer error in vertex i, except for bias error - const NavState& NSi = vi->estimate(); - Vector3d Pi = NSi.Get_P(); - Vector3d Vi = NSi.Get_V(); - Matrix3d Ri = NSi.Get_RotMatrix(); - Vector3d dBgi = NSi.Get_dBias_Gyr(); - // Vector3d dBai = NSi.Get_dBias_Acc(); - - // terms need to computer error in vertex j, except for bias error - const NavState& NSj = vj->estimate(); - Vector3d Pj = NSj.Get_P(); - Vector3d Vj = NSj.Get_V(); - Matrix3d Rj = NSj.Get_RotMatrix(); - - // IMU Preintegration measurement - const IMUPreintegrator& M = _measurement; - double dTij = M.getDeltaTime(); // Delta Time - double dT2 = dTij*dTij; - - // some temp variable - Matrix3d I3x3 = Matrix3d::Identity(); // I_3x3 - Matrix3d O3x3 = Matrix3d::Zero(); // 0_3x3 - Matrix3d RiT = Ri.transpose(); // Ri^T - Matrix3d RjT = Rj.transpose(); // Rj^T - Vector3d rPhiij = _error.segment<3>(6); // residual of rotation, rPhiij - //Matrix3d JrInv_rPhi = SO3Calc::JacobianRInv(rPhiij); // inverse right jacobian of so3 term #rPhiij# - Matrix3d JrInv_rPhi = Sophus::SO3::JacobianRInv(rPhiij); // inverse right jacobian of so3 term #rPhiij# - Matrix3d J_rPhi_dbg = M.getJRBiasg(); // jacobian of preintegrated rotation-angle to gyro bias i - - // 1. - // increment is the same as Forster 15'RSS - // pi = pi + Ri*dpi, pj = pj + Rj*dpj - // vi = vi + dvi, vj = vj + dvj - // Ri = Ri*Exp(dphi_i), Rj = Rj*Exp(dphi_j) - // Note: the optimized bias term is the 'delta bias' - // dBgi = dBgi + dbgi_update, dBgj = dBgj + dbgj_update - // dBai = dBai + dbai_update, dBaj = dBaj + dbaj_update - - // 2. - // 15-Dim error vector order: - // position-velocity-rotation-deltabiasGyr_i-deltabiasAcc_i - // rPij - rVij - rPhiij - rBiasGi - rBiasAi - // Jacobian row order: - // J_rPij_xxx - // J_rVij_xxx - // J_rPhiij_xxx - // J_rBiasGi_xxx - // J_rBiasAi_xxx - - // 3. - // order in 'update_' - // Vertex_i : dPi, dVi, dPhi_i, dBiasGyr_i, dBiasAcc_i, - // Vertex_j : dPj, dVj, dPhi_j, dBiasGyr_j, dBiasAcc_j - // Jacobian column order: - // J_xxx_dPi, J_xxx_dVi, J_xxx_dPhi_i, J_xxx_dBiasGyr_i, J_xxx_dBiasAcc_i - // J_xxx_dPj, J_xxx_dVj, J_xxx_dPhi_j, J_xxx_dBiasGyr_j, J_xxx_dBiasAcc_j - - // 4. - // For Vertex_i - Matrix _jacobianOplusXi; - _jacobianOplusXi.setZero(); - - // 4.1 - // J_rPij_xxx_i for Vertex_i - _jacobianOplusXi.block<3,3>(0,0) = - I3x3; //J_rP_dpi - _jacobianOplusXi.block<3,3>(0,3) = - RiT*dTij; //J_rP_dvi - //_jacobianOplusXi.block<3,3>(0,6) = SO3Calc::skew( RiT*(Pj-Pi-Vi*dTij-0.5*GravityVec*dT2) ); //J_rP_dPhi_i - _jacobianOplusXi.block<3,3>(0,6) = Sophus::SO3::hat( RiT*(Pj-Pi-Vi*dTij-0.5*GravityVec*dT2) ); //J_rP_dPhi_i - _jacobianOplusXi.block<3,3>(0,9) = - M.getJPBiasg(); //J_rP_dbgi - _jacobianOplusXi.block<3,3>(0,12)= - M.getJPBiasa(); //J_rP_dbai - - // 4.2 - // J_rVij_xxx_i for Vertex_i - _jacobianOplusXi.block<3,3>(3,0) = O3x3; //dpi - _jacobianOplusXi.block<3,3>(3,3) = - RiT; //dvi - //_jacobianOplusXi.block<3,3>(3,6) = SO3Calc::skew( RiT*(Vj-Vi-GravityVec*dTij) ); //dphi_i - _jacobianOplusXi.block<3,3>(3,6) = Sophus::SO3::hat( RiT*(Vj-Vi-GravityVec*dTij) ); //dphi_i - _jacobianOplusXi.block<3,3>(3,9) = - M.getJVBiasg(); //dbg_i - _jacobianOplusXi.block<3,3>(3,12)= - M.getJVBiasa(); //dba_i - - // 4.3 - // J_rPhiij_xxx_i for Vertex_i - //Matrix3d ExprPhiijTrans = SO3Calc::Expmap(rPhiij).transpose(); //Exp( rPhi )^T - //Matrix3d JrBiasGCorr = SO3Calc::JacobianR(J_rPhi_dbg*dBgi); //Jr( M.J_rPhi_bg * dBgi ) - Matrix3d ExprPhiijTrans = Sophus::SO3::exp(rPhiij).inverse().matrix(); - Matrix3d JrBiasGCorr = Sophus::SO3::JacobianR(J_rPhi_dbg*dBgi); - _jacobianOplusXi.block<3,3>(6,0) = O3x3; //dpi - _jacobianOplusXi.block<3,3>(6,3) = O3x3; //dvi - _jacobianOplusXi.block<3,3>(6,6) = - JrInv_rPhi * RjT * Ri; //dphi_i - _jacobianOplusXi.block<3,3>(6,9) = - JrInv_rPhi * ExprPhiijTrans * JrBiasGCorr * J_rPhi_dbg; //dbg_i - _jacobianOplusXi.block<3,3>(6,12)= O3x3; //dba_i - - // 4.4 - // J_rBiasGi_xxx_i for Vertex_i - _jacobianOplusXi.block<3,3>(9,0) = O3x3; //dpi - _jacobianOplusXi.block<3,3>(9,3) = O3x3; //dvi - _jacobianOplusXi.block<3,3>(9,6) = O3x3; //dphi_i - _jacobianOplusXi.block<3,3>(9,9) = - I3x3; //dbg_i - _jacobianOplusXi.block<3,3>(9,12)= O3x3; //dba_i - - // 4.5 - // J_rBiasAi_xxx_i for Vertex_i - _jacobianOplusXi.block<3,3>(12,0) = O3x3; //dpi - _jacobianOplusXi.block<3,3>(12,3) = O3x3; //dvi - _jacobianOplusXi.block<3,3>(12,6) = O3x3; //dphi_i - _jacobianOplusXi.block<3,3>(12,9) = O3x3; //dbg_i - _jacobianOplusXi.block<3,3>(12,12)= -I3x3; //dba_i - - // 5. - // For Vertex_j - Matrix _jacobianOplusXj; - _jacobianOplusXj.setZero(); - - // 5.1 - // J_rPij_xxx_j for Vertex_j - _jacobianOplusXj.block<3,3>(0,0) = RiT*Rj; //dpj - _jacobianOplusXj.block<3,3>(0,3) = O3x3; //dvj - _jacobianOplusXj.block<3,3>(0,6) = O3x3; //dphi_j - _jacobianOplusXj.block<3,3>(0,9) = O3x3; //dbg_j, all zero - _jacobianOplusXj.block<3,3>(0,12)= O3x3; //dba_j, all zero - - // 5.2 - // J_rVij_xxx_j for Vertex_j - _jacobianOplusXj.block<3,3>(3,0) = O3x3; //dpj - _jacobianOplusXj.block<3,3>(3,3) = RiT; //dvj - _jacobianOplusXj.block<3,3>(3,6) = O3x3; //dphi_j - _jacobianOplusXj.block<3,3>(3,9) = O3x3; //dbg_j, all zero - _jacobianOplusXj.block<3,3>(3,12)= O3x3; //dba_j, all zero - - // 5.3 - // J_rPhiij_xxx_j for Vertex_j - _jacobianOplusXj.block<3,3>(6,0) = O3x3; //dpj - _jacobianOplusXj.block<3,3>(6,3) = O3x3; //dvj - _jacobianOplusXj.block<3,3>(6,6) = JrInv_rPhi; //dphi_j - _jacobianOplusXj.block<3,3>(6,9) = O3x3; //dbg_j, all zero - _jacobianOplusXj.block<3,3>(6,12)= O3x3; //dba_j, all zero - - // 5.4 - // J_rBiasGi_xxx_j for Vertex_j - _jacobianOplusXj.block<3,3>(9,0) = O3x3; //dpj - _jacobianOplusXj.block<3,3>(9,3) = O3x3; //dvj - _jacobianOplusXj.block<3,3>(9,6) = O3x3; //dphi_j - _jacobianOplusXj.block<3,3>(9,9) = I3x3; //dbg_j - _jacobianOplusXj.block<3,3>(9,12)= O3x3; //dba_j - - // 5.5 - // J_rBiasAi_xxx_j for Vertex_j - _jacobianOplusXj.block<3,3>(12,0) = O3x3; //dpj - _jacobianOplusXj.block<3,3>(12,3) = O3x3; //dvj - _jacobianOplusXj.block<3,3>(12,6) = O3x3; //dphi_j - _jacobianOplusXj.block<3,3>(12,9) = O3x3; //dbg_j - _jacobianOplusXj.block<3,3>(12,12)= I3x3; //dba_j - - - // Gravity in world - Matrix Jgw; - Jgw.setZero(); - Matrix3d GwHat = Sophus::SO3::hat(GravityVec); - - Jgw.block<3,2>(0,0) = RiT*0.5*dT2*GwHat.block<3,2>(0,0); //rPij - Jgw.block<3,2>(3,0) = RiT*dTij*GwHat.block<3,2>(0,0); //rVij - //Jgw.block<3,2>(3,0) = ; //rRij - //Jgw.block<3,2>(3,0) = 0; //rBg - //Jgw.block<3,2>(3,0) = 0; //rBa - - // evaluate - _jacobianOplus[0] = _jacobianOplusXi; - _jacobianOplus[1] = _jacobianOplusXj; - _jacobianOplus[2] = Jgw; -} - - /** * @brief VertexNavState::VertexNavState */ @@ -798,7 +1013,7 @@ void VertexNavState::oplusImpl(const double* update_) // 2. // the same as Forster 15'RSS - // pi = pi + Ri*dpi, pj = pj + Rj*dpj + // pi = pi + dpi, pj = pj + dpj // vi = vi + dvi, vj = vj + dvj // Ri = Ri*Exp(dphi_i), Rj = Rj*Exp(dphi_j) // Note: the optimized bias term is the 'delta bias' @@ -948,7 +1163,7 @@ void EdgeNavState::linearizeOplus() // 1. // increment is the same as Forster 15'RSS - // pi = pi + Ri*dpi, pj = pj + Rj*dpj + // pi = pi + dpi, pj = pj + dpj // vi = vi + dvi, vj = vj + dvj // Ri = Ri*Exp(dphi_i), Rj = Rj*Exp(dphi_j) // Note: the optimized bias term is the 'delta bias' @@ -980,7 +1195,7 @@ void EdgeNavState::linearizeOplus() // 4.1 // J_rPij_xxx_i for Vertex_i - _jacobianOplusXi.block<3,3>(0,0) = - I3x3; //J_rP_dpi + _jacobianOplusXi.block<3,3>(0,0) = - RiT;//I3x3; //J_rP_dpi _jacobianOplusXi.block<3,3>(0,3) = - RiT*dTij; //J_rP_dvi //_jacobianOplusXi.block<3,3>(0,6) = SO3Calc::skew( RiT*(Pj-Pi-Vi*dTij-0.5*GravityVec*dT2) ); //J_rP_dPhi_i _jacobianOplusXi.block<3,3>(0,6) = Sophus::SO3::hat( RiT*(Pj-Pi-Vi*dTij-0.5*GravityVec*dT2) ); //J_rP_dPhi_i @@ -1030,7 +1245,7 @@ void EdgeNavState::linearizeOplus() // 5.1 // J_rPij_xxx_j for Vertex_j - _jacobianOplusXj.block<3,3>(0,0) = RiT*Rj; //dpj + _jacobianOplusXj.block<3,3>(0,0) = RiT;//*Rj; //dpj _jacobianOplusXj.block<3,3>(0,3) = O3x3; //dvj _jacobianOplusXj.block<3,3>(0,6) = O3x3; //dphi_j _jacobianOplusXj.block<3,3>(0,9) = O3x3; //dbg_j, all zero @@ -1112,14 +1327,14 @@ void EdgeNavStatePointXYZ::linearizeOplus() // error = obs - pi( Pc ) // Pw <- Pw + dPw, for Point3D // Rwb <- Rwb*exp(dtheta), for NavState.R - // Pwb <- Pwb + Rwb*dPwb, for NavState.P + // Pwb <- Pwb + dPwb, for NavState.P // Jacobian of error w.r.t Pw _jacobianOplusXi = - Jpi * Rcb * Rwb.transpose(); // Jacobian of Pc/error w.r.t dPwb //Matrix3d J_Pc_dPwb = -Rcb; - Matrix JdPwb = - Jpi * (-Rcb); + Matrix JdPwb = - Jpi * (-Rcb*Rwb.transpose()); // Jacobian of Pc/error w.r.t dRwb Vector3d Paux = Rcb*Rwb.transpose()*(Pw-Pwb); //Matrix3d J_Pc_dRwb = Sophus::SO3::hat(Paux) * Rcb; @@ -1168,11 +1383,11 @@ void EdgeNavStatePointXYZOnlyPose::linearizeOplus() // error = obs - pi( Pc ) // Pw <- Pw + dPw, for Point3D // Rwb <- Rwb*exp(dtheta), for NavState.R - // Pwb <- Pwb + Rwb*dPwb, for NavState.P + // Pwb <- Pwb + dPwb, for NavState.P // Jacobian of Pc/error w.r.t dPwb //Matrix3d J_Pc_dPwb = -Rcb; - Matrix JdPwb = - Jpi * (-Rcb); + Matrix JdPwb = - Jpi * (-Rcb*Rwb.transpose()); // Jacobian of Pc/error w.r.t dRwb Vector3d Paux = Rcb*Rwb.transpose()*(Pw-Pwb); //Matrix3d J_Pc_dRwb = Sophus::SO3::hat(Paux) * Rcb; diff --git a/src/IMU/g2otypes.h b/src/IMU/g2otypes.h index 2dfa8af..4de90d0 100644 --- a/src/IMU/g2otypes.h +++ b/src/IMU/g2otypes.h @@ -16,6 +16,334 @@ namespace g2o using namespace ORB_SLAM2; +/* + * IDP, (I)nverse (d)epth vertex for a map(p)oint + */ +class VertexIDP : public BaseVertex<1, double> +{ +public: + EIGEN_MAKE_ALIGNED_OPERATOR_NEW + VertexIDP() : BaseVertex<1, double>(){} + bool read(std::istream &is) {return true;} + bool write(std::ostream &os) const {return true;} + + virtual void setToOriginImpl() { + _estimate = 1; + } + + virtual void oplusImpl(const double* update_) { + _estimate += update_[0]; + if(_estimate < 1e-6) _estimate = 1e-6; //todo + } + +}; + +/* + * Edge of reprojection error in one frame. + * Vertex 0: mappoint IDP + * Veretx 1: reference KF PR + * Vertex 2: current frame PR + * Vertex 3: extrinsic pose Tbc(or Tcb) + */ +class EdgePRIDP : public BaseMultiEdge<2, Vector2d> +{ +public: + EIGEN_MAKE_ALIGNED_OPERATOR_NEW + EdgePRIDP() : BaseMultiEdge<2, Vector2d>() { + resize(4); + } + bool read(std::istream& is) {return true;} + bool write(std::ostream& os) const {return true;} + void computeError(); + virtual void linearizeOplus(); + + void SetParams(double x, double y, double fx_, double fy_, double cx_, double cy_) { + refnormxy[0] = x; refnormxy[1] = y; + fx = fx_; fy = fy_; cx = cx_; cy = cy_; + } + + inline Vector2d project2d(const Vector3d& v) const { + Vector2d res; + res(0) = v(0)/v(2); + res(1) = v(1)/v(2); + return res; + } + Vector2d cam_project(const Vector3d & trans_xyz) const { + Vector2d proj = project2d(trans_xyz); + Vector2d res; + res[0] = proj[0]*fx + cx; + res[1] = proj[1]*fy + cy; + return res; + } + bool isDepthPositive() { + Vector3d Pc = computePc(); + return Pc(2)>0.01; + } + + Vector3d computePc() ; + +protected: + // [x,y] in normalized image plane in reference KF + double refnormxy[2]; + double fx,fy,cx,cy; +}; + +class VertexNavStatePR : public BaseVertex<6, NavState> +{ +public: + EIGEN_MAKE_ALIGNED_OPERATOR_NEW + + VertexNavStatePR() : BaseVertex<6, NavState>(){} + + bool read(std::istream& is) {return true;} + + bool write(std::ostream& os) const {return true;} + + virtual void setToOriginImpl() { + _estimate = NavState(); + } + + virtual void oplusImpl(const double* update_) { + Eigen::Map update(update_); + _estimate.IncSmallPR(update); + } +}; + +class VertexNavStateV : public BaseVertex<3, NavState> +{ +public: + EIGEN_MAKE_ALIGNED_OPERATOR_NEW + + VertexNavStateV() : BaseVertex<3, NavState>(){} + + bool read(std::istream& is) {return true;} + + bool write(std::ostream& os) const {return true;} + + virtual void setToOriginImpl() { + _estimate = NavState(); + } + + virtual void oplusImpl(const double* update_) { + Eigen::Map update(update_); + _estimate.IncSmallV(update); + } +}; + +/* + * Connect 5 vertex: PR0, V0, bias0 and PR1, V1 + * Vertex 0: PR0 + * Vertex 1: PR1 + * Vertex 2: V0 + * Vertex 3: V1 + * Vertex 4: bias0 + * + * Error order: error_P, error_R, error_V + * different from PVR edge + * + */ +class EdgeNavStatePRV : public BaseMultiEdge<9, IMUPreintegrator> +{ +public: + EIGEN_MAKE_ALIGNED_OPERATOR_NEW + + EdgeNavStatePRV() : BaseMultiEdge<9, IMUPreintegrator>() { + resize(5); + } + + bool read(std::istream& is) {return true;} + + bool write(std::ostream& os) const {return true;} + + void computeError(); + + virtual void linearizeOplus(); + + void SetParams(const Vector3d& gw) { + GravityVec = gw; + } + +protected: + // Gravity vector in 'world' frame + Vector3d GravityVec; +}; + +class EdgeNavStatePRPointXYZ : public BaseBinaryEdge<2, Vector2d, VertexSBAPointXYZ, VertexNavStatePR> +{ +public: + EIGEN_MAKE_ALIGNED_OPERATOR_NEW + + EdgeNavStatePRPointXYZ() : BaseBinaryEdge<2, Vector2d, VertexSBAPointXYZ, VertexNavStatePR>() {} + + bool read(std::istream& is) {return true;} + + bool write(std::ostream& os) const {return true;} + + void computeError() { + Vector3d Pc = computePc(); + Vector2d obs(_measurement); + + _error = obs - cam_project(Pc); + } + + bool isDepthPositive() { + Vector3d Pc = computePc(); + return Pc(2)>0.0; + } + + Vector3d computePc() { + const VertexSBAPointXYZ* vPoint = static_cast(_vertices[0]); + const VertexNavStatePR* vNavState = static_cast(_vertices[1]); + + const NavState& ns = vNavState->estimate(); + Matrix3d Rwb = ns.Get_RotMatrix(); + Vector3d Pwb = ns.Get_P(); + const Vector3d& Pw = vPoint->estimate(); + + Matrix3d Rcb = Rbc.transpose(); + Vector3d Pc = Rcb * Rwb.transpose() * (Pw - Pwb) - Rcb * Pbc; + + return Pc; + //Vector3d Pwc = Rwb*Pbc + Pwb; + //Matrix3d Rcw = (Rwb*Rbc).transpose(); + //Vector3d Pcw = -Rcw*Pwc; + //Vector3d Pc = Rcw*Pw + Pcw; + } + inline Vector2d project2d(const Vector3d& v) const { + Vector2d res; + res(0) = v(0)/v(2); + res(1) = v(1)/v(2); + return res; + } + Vector2d cam_project(const Vector3d & trans_xyz) const { + Vector2d proj = project2d(trans_xyz); + Vector2d res; + res[0] = proj[0]*fx + cx; + res[1] = proj[1]*fy + cy; + return res; + } + + // + virtual void linearizeOplus(); + + void SetParams(const double& fx_, const double& fy_, const double& cx_, const double& cy_, + const Matrix3d& Rbc_, const Vector3d& Pbc_) { + fx = fx_; + fy = fy_; + cx = cx_; + cy = cy_; + Rbc = Rbc_; + Pbc = Pbc_; + } + +protected: + // Camera intrinsics + double fx, fy, cx, cy; + // Camera-IMU extrinsics + Matrix3d Rbc; + Vector3d Pbc; +}; + +class EdgeNavStatePRPointXYZOnlyPose : public BaseUnaryEdge<2, Vector2d, VertexNavStatePR> +{ +public: + EIGEN_MAKE_ALIGNED_OPERATOR_NEW + + EdgeNavStatePRPointXYZOnlyPose(){} + + bool read(std::istream& is){return true;} + + bool write(std::ostream& os) const{return true;} + + void computeError() { + Vector3d Pc = computePc(); + Vector2d obs(_measurement); + + _error = obs - cam_project(Pc); + } + + bool isDepthPositive() { + Vector3d Pc = computePc(); + return Pc(2)>0.0; + } + + Vector3d computePc() { + const VertexNavStatePR* vNSPR = static_cast(_vertices[0]); + + const NavState& ns = vNSPR->estimate(); + Matrix3d Rwb = ns.Get_RotMatrix(); + Vector3d Pwb = ns.Get_P(); + //const Vector3d& Pw = vPoint->estimate(); + + Matrix3d Rcb = Rbc.transpose(); + Vector3d Pc = Rcb * Rwb.transpose() * (Pw - Pwb) - Rcb * Pbc; + + return Pc; + //Vector3d Pwc = Rwb*Pbc + Pwb; + //Matrix3d Rcw = (Rwb*Rbc).transpose(); + //Vector3d Pcw = -Rcw*Pwc; + //Vector3d Pc = Rcw*Pw + Pcw; + } + inline Vector2d project2d(const Vector3d& v) const { + Vector2d res; + res(0) = v(0)/v(2); + res(1) = v(1)/v(2); + return res; + } + Vector2d cam_project(const Vector3d & trans_xyz) const { + Vector2d proj = project2d(trans_xyz); + Vector2d res; + res[0] = proj[0]*fx + cx; + res[1] = proj[1]*fy + cy; + return res; + } + + // + virtual void linearizeOplus(); + + void SetParams(const double& fx_, const double& fy_, const double& cx_, const double& cy_, + const Matrix3d& Rbc_, const Vector3d& Pbc_, const Vector3d& Pw_) { + fx = fx_; + fy = fy_; + cx = cx_; + cy = cy_; + Rbc = Rbc_; + Pbc = Pbc_; + Pw = Pw_; + } + +protected: + // Camera intrinsics + double fx, fy, cx, cy; + // Camera-IMU extrinsics + Matrix3d Rbc; + Vector3d Pbc; + // Point position in world frame + Vector3d Pw; +}; + +/* + * Vertex order: PR, V, Bias + * Error order: dP, dV, dBg, dBa + */ +class EdgeNavStatePriorPRVBias : public BaseMultiEdge<15, NavState> +{ +public: + EIGEN_MAKE_ALIGNED_OPERATOR_NEW + + EdgeNavStatePriorPRVBias() : BaseMultiEdge<15, NavState>() {resize(3);} + + bool read(std::istream &is){return true;} + + bool write(std::ostream &os) const{return true;} + + void computeError(); + + virtual void linearizeOplus(); + +}; + + /** * @brief The VertexNavStatePVR class */ @@ -324,46 +652,6 @@ class EdgeNavStatePrior : public BaseUnaryEdge<15, NavState, VertexNavState> }; -/** - * @brief The VertexGravityW class - */ -class VertexGravityW : public BaseVertex<2, Vector3d> -{ -public: - EIGEN_MAKE_ALIGNED_OPERATOR_NEW - - VertexGravityW(); - - bool read(std::istream& is){return true;} - - bool write(std::ostream& os) const{return true;} - - virtual void setToOriginImpl() { - _estimate = Vector3d(0,0,9.81); - } - - virtual void oplusImpl(const double *update_); -}; - -/** - * @brief The EdgeNavStateGw class - */ -class EdgeNavStateGw : public BaseMultiEdge<15, IMUPreintegrator> -{ -public: - EIGEN_MAKE_ALIGNED_OPERATOR_NEW - - EdgeNavStateGw(); - - bool read(std::istream& is){return true;} - - bool write(std::ostream& os) const{return true;} - - void computeError(); - - virtual void linearizeOplus(); -}; - /** * @brief The EdgeNavState class * Edge between NavState_i and NavState_j, vertex[0]~i, vertex[1]~j diff --git a/src/KeyFrame.cc b/src/KeyFrame.cc index 2089b2b..c56a9b5 100644 --- a/src/KeyFrame.cc +++ b/src/KeyFrame.cc @@ -228,7 +228,7 @@ void KeyFrame::ComputePreInt(void) // Test log if(dt < 0) { - cerr<mTimeStamp<<" vs "<isBad()) continue; - map observations = pMP->GetObservations(); + mapMapPointObs/*map*/ observations = pMP->GetObservations(); - for(map::iterator mit=observations.begin(), mend=observations.end(); mit!=mend; mit++) + for(mapMapPointObs/*map*/::iterator mit=observations.begin(), mend=observations.end(); mit!=mend; mit++) { if(mit->first->mnId==mnId) continue; @@ -754,7 +754,7 @@ void KeyFrame::SetBadFlag() mpMap->EraseKeyFrame(this); } mpKeyFrameDB->erase(this); - //cerr<<"KeyFrame "< -#include "IMU/configparam.h" #include "Converter.h" namespace ORB_SLAM2 { - +using namespace std; //------------------------------------------------------------------------------------------- //------------------------------------------------------------------------------------------- //------------------------------------------------------------------------------------------- +class KeyFrameInit +{ +public: + EIGEN_MAKE_ALIGNED_OPERATOR_NEW + + double mTimeStamp; + KeyFrameInit* mpPrevKeyFrame; + cv::Mat Twc; + IMUPreintegrator mIMUPreInt; + std::vector mvIMUData; + Vector3d bg; + + + KeyFrameInit(KeyFrame& kf): + mTimeStamp(kf.mTimeStamp), mpPrevKeyFrame(NULL), Twc(kf.GetPoseInverse().clone()), + mIMUPreInt(kf.GetIMUPreInt()), mvIMUData(kf.GetVectorIMUData()), bg(0,0,0) + { + } + + void ComputePreInt(void) + { + if(mpPrevKeyFrame == NULL) + { + return; + } + else + { + // Reset pre-integrator first + mIMUPreInt.reset(); + + if(mvIMUData.empty()) + return; + + // remember to consider the gap between the last KF and the first IMU + { + const IMUData& imu = mvIMUData.front(); + double dt = std::max(0., imu._t - mpPrevKeyFrame->mTimeStamp); + mIMUPreInt.update(imu._g - bg,imu._a ,dt); // Acc bias not considered here + } + // integrate each imu + for(size_t i=0; i lock(mMutexUpdatingInitPoses); + return mbUpdatingInitPoses; +} + +void LocalMapping::SetUpdatingInitPoses(bool flag) +{ + unique_lock lock(mMutexUpdatingInitPoses); + mbUpdatingInitPoses = flag; +} KeyFrame* LocalMapping::GetMapUpdateKF() { @@ -86,6 +155,36 @@ cv::Mat LocalMapping::GetGravityVec() return mGravityVec.clone(); } +cv::Mat LocalMapping::GetRwiInit() +{ + return mRwiInit.clone(); +} + +void LocalMapping::VINSInitThread() +{ + unsigned long initedid = 0; + cerr<<"start VINSInitThread"< 2) + if(!GetVINSInited() && mpCurrentKeyFrame->mnId > initedid) + { + initedid = mpCurrentKeyFrame->mnId; + + bool tmpbool = TryInitVIO(); + if(tmpbool) + { + //SetFirstVINSInited(true); + //SetVINSInited(true); + break; + } + } + usleep(3000); + if(isFinished()) + break; + } +} + bool LocalMapping::TryInitVIO(void) { if(mpMap->KeyFramesInMap()<=mnLocalWindowSize) @@ -93,10 +192,10 @@ bool LocalMapping::TryInitVIO(void) static bool fopened = false; static ofstream fgw,fscale,fbiasa,fcondnum,ftime,fbiasg; + string tmpfilepath = ConfigParam::getTmpFilePath(); if(!fopened) { // Need to modify this to correct path - string tmpfilepath = ConfigParam::getTmpFilePath(); fgw.open(tmpfilepath+"gw.txt"); fscale.open(tmpfilepath+"scale.txt"); fbiasa.open(tmpfilepath+"biasa.txt"); @@ -119,6 +218,8 @@ bool LocalMapping::TryInitVIO(void) fbiasg< vScaleGravityKF = mpMap->GetAllKeyFrames(); int N = vScaleGravityKF.size(); + KeyFrame* pNewestKF = vScaleGravityKF[N-1]; + vector vTwc; + vector vIMUPreInt; + // Store initialization-required KeyFrame data + vector vKFInit; + + for(int i=0;iGetPoseInverse()); + vIMUPreInt.push_back(pKF->GetIMUPreInt()); + KeyFrameInit* pkfi = new KeyFrameInit (*pKF); + if(i!=0) + { + pkfi->mpPrevKeyFrame = vKFInit[i-1]; + } + vKFInit.push_back(pkfi); + } + + SetFlagCopyInitKFs(false); // Step 1. // Try to compute initial gyro bias, using optimization with Gauss-Newton - Vector3d bgest = Optimizer::OptimizeInitialGyroBias(vScaleGravityKF); + Vector3d bgest = Optimizer::OptimizeInitialGyroBias(vTwc,vIMUPreInt); + //Vector3d bgest = Optimizer::OptimizeInitialGyroBias(vScaleGravityKF); // Update biasg and pre-integration in LocalWindow. Remember to reset back to zero - for(vector::const_iterator vit=vScaleGravityKF.begin(), vend=vScaleGravityKF.end(); vit!=vend; vit++) + for(int i=0;iSetNavStateBiasGyr(bgest); + vKFInit[i]->bg = bgest; } - for(vector::const_iterator vit=vScaleGravityKF.begin(), vend=vScaleGravityKF.end(); vit!=vend; vit++) + for(int i=0;iComputePreInt(); + vKFInit[i]->ComputePreInt(); } // Solve A*x=B for x=[s,gw] 4x1 vector @@ -155,24 +287,21 @@ bool LocalMapping::TryInitVIO(void) // Approx Scale and Gravity vector in 'world' frame (first KF's camera frame) for(int i=0; iGetIMUPreInt().getDeltaTime(); - double dt23 = pKF3->GetIMUPreInt().getDeltaTime(); + double dt12 = pKF2->mIMUPreInt.getDeltaTime(); + double dt23 = pKF3->mIMUPreInt.getDeltaTime(); // Pre-integrated measurements - cv::Mat dp12 = Converter::toCvMat(pKF2->GetIMUPreInt().getDeltaP()); - cv::Mat dv12 = Converter::toCvMat(pKF2->GetIMUPreInt().getDeltaV()); - cv::Mat dp23 = Converter::toCvMat(pKF3->GetIMUPreInt().getDeltaP()); - // Test log - if(dt12!=pKF2->mTimeStamp-pKF1->mTimeStamp) cerr<<"dt12!=pKF2->mTimeStamp-pKF1->mTimeStamp"<mTimeStamp-pKF2->mTimeStamp) cerr<<"dt23!=pKF3->mTimeStamp-pKF2->mTimeStamp"<mIMUPreInt.getDeltaP()); + cv::Mat dv12 = Converter::toCvMat(pKF2->mIMUPreInt.getDeltaV()); + cv::Mat dp23 = Converter::toCvMat(pKF3->mIMUPreInt.getDeltaP()); // Pose of camera in world frame - cv::Mat Twc1 = pKF1->GetPoseInverse(); - cv::Mat Twc2 = pKF2->GetPoseInverse(); - cv::Mat Twc3 = pKF3->GetPoseInverse(); + cv::Mat Twc1 = vTwc[i].clone();//pKF1->GetPoseInverse(); + cv::Mat Twc2 = vTwc[i+1].clone();//pKF2->GetPoseInverse(); + cv::Mat Twc3 = vTwc[i+2].clone();//pKF3->GetPoseInverse(); // Position of camera center cv::Mat pc1 = Twc1.rowRange(0,3).col(3); cv::Mat pc2 = Twc2.rowRange(0,3).col(3); @@ -263,23 +392,23 @@ bool LocalMapping::TryInitVIO(void) for(int i=0; iGetIMUPreInt().getDeltaTime(); - double dt23 = pKF3->GetIMUPreInt().getDeltaTime(); + double dt12 = pKF2->mIMUPreInt.getDeltaTime(); + double dt23 = pKF3->mIMUPreInt.getDeltaTime(); // Pre-integrated measurements - cv::Mat dp12 = Converter::toCvMat(pKF2->GetIMUPreInt().getDeltaP()); - cv::Mat dv12 = Converter::toCvMat(pKF2->GetIMUPreInt().getDeltaV()); - cv::Mat dp23 = Converter::toCvMat(pKF3->GetIMUPreInt().getDeltaP()); - cv::Mat Jpba12 = Converter::toCvMat(pKF2->GetIMUPreInt().getJPBiasa()); - cv::Mat Jvba12 = Converter::toCvMat(pKF2->GetIMUPreInt().getJVBiasa()); - cv::Mat Jpba23 = Converter::toCvMat(pKF3->GetIMUPreInt().getJPBiasa()); + cv::Mat dp12 = Converter::toCvMat(pKF2->mIMUPreInt.getDeltaP()); + cv::Mat dv12 = Converter::toCvMat(pKF2->mIMUPreInt.getDeltaV()); + cv::Mat dp23 = Converter::toCvMat(pKF3->mIMUPreInt.getDeltaP()); + cv::Mat Jpba12 = Converter::toCvMat(pKF2->mIMUPreInt.getJPBiasa()); + cv::Mat Jvba12 = Converter::toCvMat(pKF2->mIMUPreInt.getJVBiasa()); + cv::Mat Jpba23 = Converter::toCvMat(pKF3->mIMUPreInt.getJPBiasa()); // Pose of camera in world frame - cv::Mat Twc1 = pKF1->GetPoseInverse(); - cv::Mat Twc2 = pKF2->GetPoseInverse(); - cv::Mat Twc3 = pKF3->GetPoseInverse(); + cv::Mat Twc1 = vTwc[i].clone();//pKF1->GetPoseInverse(); + cv::Mat Twc2 = vTwc[i+1].clone();//pKF2->GetPoseInverse(); + cv::Mat Twc3 = vTwc[i+2].clone();//pKF3->GetPoseInverse(); // Position of camera center cv::Mat pc1 = Twc1.rowRange(0,3).col(3); cv::Mat pc2 = Twc2.rowRange(0,3).col(3); @@ -367,6 +496,12 @@ bool LocalMapping::TryInitVIO(void) // <<(t3-t0)/cv::getTickFrequency()*1000<<" "<mTimeStamp<<" " <mTimeStamp; } - if(mpCurrentKeyFrame->mTimeStamp - mnStartTime >= 15.0) + if(pNewestKF->mTimeStamp - mnStartTime >= ConfigParam::GetVINSInitTime()) { bVIOInited = true; } - // When failed. Or when you're debugging. - // Reset biasg to zero, and re-compute imu-preintegrator. - if(!bVIOInited) - { - for(vector::const_iterator vit=vScaleGravityKF.begin(), vend=vScaleGravityKF.end(); vit!=vend; vit++) - { - KeyFrame* pKF = *vit; - pKF->SetNavStateBiasGyr(Vector3d::Zero()); - pKF->SetNavStateBiasAcc(Vector3d::Zero()); - pKF->SetNavStateDeltaBg(Eigen::Vector3d::Zero()); - pKF->SetNavStateDeltaBa(Eigen::Vector3d::Zero()); - } - for(vector::const_iterator vit=vScaleGravityKF.begin(), vend=vScaleGravityKF.end(); vit!=vend; vit++) - { - KeyFrame* pKF = *vit; - pKF->ComputePreInt(); - } - } - else + if(bVIOInited) { // Set NavState , scale and bias for all KeyFrames // Scale @@ -410,65 +527,326 @@ bool LocalMapping::TryInitVIO(void) mnVINSInitScale = s_; // gravity vector in world frame cv::Mat gw = Rwi_*GI; - mGravityVec = gw; + mGravityVec = gw.clone(); Vector3d gweig = Converter::toVector3d(gw); + mRwiInit = Rwi_.clone(); - for(vector::const_iterator vit=vScaleGravityKF.begin(), vend=vScaleGravityKF.end(); vit!=vend; vit++) + // Update NavState for the KeyFrames not in vScaleGravityKF + // Update Tcw-type pose for these KeyFrames, need mutex lock + if(ConfigParam::GetRealTimeFlag()) { - KeyFrame* pKF = *vit; - // Position and rotation of visual SLAM - cv::Mat wPc = pKF->GetPoseInverse().rowRange(0,3).col(3); // wPc - cv::Mat Rwc = pKF->GetPoseInverse().rowRange(0,3).colRange(0,3); // Rwc - // Set position and rotation of navstate - cv::Mat wPb = scale*wPc + Rwc*pcb; - pKF->SetNavStatePos(Converter::toVector3d(wPb)); - pKF->SetNavStateRot(Converter::toMatrix3d(Rwc*Rcb)); - // Update bias of Gyr & Acc - pKF->SetNavStateBiasGyr(bgest); - pKF->SetNavStateBiasAcc(dbiasa_eig); - // Set delta_bias to zero. (only updated during optimization) - pKF->SetNavStateDeltaBg(Eigen::Vector3d::Zero()); - pKF->SetNavStateDeltaBa(Eigen::Vector3d::Zero()); - // Step 4. - // compute velocity - if(pKF != vScaleGravityKF.back()) + // Stop local mapping + RequestStop(); + + // Wait until Local Mapping has effectively stopped + while(!isStopped() && !isFinished()) { - KeyFrame* pKFnext = pKF->GetNextKeyFrame(); - // IMU pre-int between pKF ~ pKFnext - const IMUPreintegrator& imupreint = pKFnext->GetIMUPreInt(); - // Time from this(pKF) to next(pKFnext) - double dt = imupreint.getDeltaTime(); // deltaTime - cv::Mat dp = Converter::toCvMat(imupreint.getDeltaP()); // deltaP - cv::Mat Jpba = Converter::toCvMat(imupreint.getJPBiasa()); // J_deltaP_biasa - cv::Mat wPcnext = pKFnext->GetPoseInverse().rowRange(0,3).col(3); // wPc next - cv::Mat Rwcnext = pKFnext->GetPoseInverse().rowRange(0,3).colRange(0,3); // Rwc next - - cv::Mat vel = - 1./dt*( scale*(wPc - wPcnext) + (Rwc - Rwcnext)*pcb + Rwc*Rcb*(dp + Jpba*dbiasa_) + 0.5*gw*dt*dt ); - Eigen::Vector3d veleig = Converter::toVector3d(vel); - pKF->SetNavStateVel(veleig); + usleep(1000); } - else + } + + SetUpdatingInitPoses(true); + { + unique_lock lock(mpMap->mMutexMapUpdate); + + int cnt=0; + for(vector::const_iterator vit=vScaleGravityKF.begin(), vend=vScaleGravityKF.end(); vit!=vend; vit++,cnt++) + { + KeyFrame* pKF = *vit; + if(pKF->isBad()) continue; + if(pKF!=vScaleGravityKF[cnt]) cerr<<"pKF!=vScaleGravityKF[cnt], id: "<mnId<<" != "<mnId<GetPoseInverse().rowRange(0,3).col(3); // wPc + cv::Mat Rwc = pKF->GetPoseInverse().rowRange(0,3).colRange(0,3); // Rwc + // Set position and rotation of navstate + cv::Mat wPb = scale*wPc + Rwc*pcb; + pKF->SetNavStatePos(Converter::toVector3d(wPb)); + pKF->SetNavStateRot(Converter::toMatrix3d(Rwc*Rcb)); + // Update bias of Gyr & Acc + pKF->SetNavStateBiasGyr(bgest); + pKF->SetNavStateBiasAcc(dbiasa_eig); + // Set delta_bias to zero. (only updated during optimization) + pKF->SetNavStateDeltaBg(Eigen::Vector3d::Zero()); + pKF->SetNavStateDeltaBa(Eigen::Vector3d::Zero()); + // Step 4. + // compute velocity + if(pKF != vScaleGravityKF.back()) + { + KeyFrame* pKFnext = pKF->GetNextKeyFrame(); + if(!pKFnext) cerr<<"pKFnext is NULL, cnt="<mnId<<" != "<mnId<GetIMUPreInt(); + // Time from this(pKF) to next(pKFnext) + double dt = imupreint.getDeltaTime(); // deltaTime + cv::Mat dp = Converter::toCvMat(imupreint.getDeltaP()); // deltaP + cv::Mat Jpba = Converter::toCvMat(imupreint.getJPBiasa()); // J_deltaP_biasa + cv::Mat wPcnext = pKFnext->GetPoseInverse().rowRange(0,3).col(3); // wPc next + cv::Mat Rwcnext = pKFnext->GetPoseInverse().rowRange(0,3).colRange(0,3); // Rwc next + + cv::Mat vel = - 1./dt*( scale*(wPc - wPcnext) + (Rwc - Rwcnext)*pcb + Rwc*Rcb*(dp + Jpba*dbiasa_) + 0.5*gw*dt*dt ); + Eigen::Vector3d veleig = Converter::toVector3d(vel); + pKF->SetNavStateVel(veleig); + } + else + { + cerr<<"-----------here is the last KF in vScaleGravityKF------------"<GetPrevKeyFrame(); + if(!pKFprev) cerr<<"pKFprev is NULL, cnt="<mnId<<" != "<mnId<GetIMUPreInt(); + double dt = imupreint_prev_cur.getDeltaTime(); + Eigen::Matrix3d Jvba = imupreint_prev_cur.getJVBiasa(); + Eigen::Vector3d dv = imupreint_prev_cur.getDeltaV(); + // + Eigen::Vector3d velpre = pKFprev->GetNavState().Get_V(); + Eigen::Matrix3d rotpre = pKFprev->GetNavState().Get_RotMatrix(); + Eigen::Vector3d veleig = velpre + gweig*dt + rotpre*( dv + Jvba*dbiasa_eig ); + pKF->SetNavStateVel(veleig); + } + } + + // Re-compute IMU pre-integration at last. Should after usage of pre-int measurements. + for(vector::const_iterator vit=vScaleGravityKF.begin(), vend=vScaleGravityKF.end(); vit!=vend; vit++) + { + KeyFrame* pKF = *vit; + if(pKF->isBad()) continue; + pKF->ComputePreInt(); + } + + // Update poses (multiply metric scale) + vector mspKeyFrames = mpMap->GetAllKeyFrames(); + for(std::vector::iterator sit=mspKeyFrames.begin(), send=mspKeyFrames.end(); sit!=send; sit++) + { + KeyFrame* pKF = *sit; + cv::Mat Tcw = pKF->GetPose(); + cv::Mat tcw = Tcw.rowRange(0,3).col(3)*scale; + tcw.copyTo(Tcw.rowRange(0,3).col(3)); + pKF->SetPose(Tcw); + } + vector mspMapPoints = mpMap->GetAllMapPoints(); + for(std::vector::iterator sit=mspMapPoints.begin(), send=mspMapPoints.end(); sit!=send; sit++) { - // If this is the last KeyFrame, no 'next' KeyFrame exists - KeyFrame* pKFprev = pKF->GetPrevKeyFrame(); - const IMUPreintegrator& imupreint_prev_cur = pKF->GetIMUPreInt(); - double dt = imupreint_prev_cur.getDeltaTime(); - Eigen::Matrix3d Jvba = imupreint_prev_cur.getJVBiasa(); - Eigen::Vector3d dv = imupreint_prev_cur.getDeltaV(); - // - Eigen::Vector3d velpre = pKFprev->GetNavState().Get_V(); - Eigen::Matrix3d rotpre = pKFprev->GetNavState().Get_RotMatrix(); - Eigen::Vector3d veleig = velpre + gweig*dt + rotpre*( dv + Jvba*dbiasa_eig ); - pKF->SetNavStateVel(veleig); + MapPoint* pMP = *sit; + //pMP->SetWorldPos(pMP->GetWorldPos()*scale); + pMP->UpdateScale(scale); } + std::cout<GetNextKeyFrame(); + + // Update bias of Gyr & Acc + pKF->SetNavStateBiasGyr(bgest); + pKF->SetNavStateBiasAcc(dbiasa_eig); + // Set delta_bias to zero. (only updated during optimization) + pKF->SetNavStateDeltaBg(Eigen::Vector3d::Zero()); + pKF->SetNavStateDeltaBa(Eigen::Vector3d::Zero()); + }while(pKF!=mpCurrentKeyFrame); + + // step2. re-compute pre-integration + pKF = pNewestKF; + do + { + pKF = pKF->GetNextKeyFrame(); + + pKF->ComputePreInt(); + }while(pKF!=mpCurrentKeyFrame); + + // step3. update pos/rot + pKF = pNewestKF; + do + { + pKF = pKF->GetNextKeyFrame(); + + // Update rot/pos + // Position and rotation of visual SLAM + cv::Mat wPc = pKF->GetPoseInverse().rowRange(0,3).col(3); // wPc + cv::Mat Rwc = pKF->GetPoseInverse().rowRange(0,3).colRange(0,3); // Rwc + cv::Mat wPb = wPc + Rwc*pcb; + pKF->SetNavStatePos(Converter::toVector3d(wPb)); + pKF->SetNavStateRot(Converter::toMatrix3d(Rwc*Rcb)); + + //pKF->SetNavState(); + + if(pKF != mpCurrentKeyFrame) + { + KeyFrame* pKFnext = pKF->GetNextKeyFrame(); + // IMU pre-int between pKF ~ pKFnext + const IMUPreintegrator& imupreint = pKFnext->GetIMUPreInt(); + // Time from this(pKF) to next(pKFnext) + double dt = imupreint.getDeltaTime(); // deltaTime + cv::Mat dp = Converter::toCvMat(imupreint.getDeltaP()); // deltaP + cv::Mat Jpba = Converter::toCvMat(imupreint.getJPBiasa()); // J_deltaP_biasa + cv::Mat wPcnext = pKFnext->GetPoseInverse().rowRange(0,3).col(3); // wPc next + cv::Mat Rwcnext = pKFnext->GetPoseInverse().rowRange(0,3).colRange(0,3); // Rwc next + + cv::Mat vel = - 1./dt*( (wPc - wPcnext) + (Rwc - Rwcnext)*pcb + Rwc*Rcb*(dp + Jpba*dbiasa_) + 0.5*gw*dt*dt ); + Eigen::Vector3d veleig = Converter::toVector3d(vel); + pKF->SetNavStateVel(veleig); + } + else + { + // If this is the last KeyFrame, no 'next' KeyFrame exists + KeyFrame* pKFprev = pKF->GetPrevKeyFrame(); + const IMUPreintegrator& imupreint_prev_cur = pKF->GetIMUPreInt(); + double dt = imupreint_prev_cur.getDeltaTime(); + Eigen::Matrix3d Jvba = imupreint_prev_cur.getJVBiasa(); + Eigen::Vector3d dv = imupreint_prev_cur.getDeltaV(); + // + Eigen::Vector3d velpre = pKFprev->GetNavState().Get_V(); + Eigen::Matrix3d rotpre = pKFprev->GetNavState().Get_RotMatrix(); + Eigen::Vector3d veleig = velpre + gweig*dt + rotpre*( dv + Jvba*dbiasa_eig ); + pKF->SetNavStateVel(veleig); + } + + }while(pKF!=mpCurrentKeyFrame); + + } + + std::cout<::const_iterator vit=vScaleGravityKF.begin(), vend=vScaleGravityKF.end(); vit!=vend; vit++) + if(ConfigParam::GetRealTimeFlag()) { - KeyFrame* pKF = *vit; - pKF->ComputePreInt(); + Release(); } + + // Run global BA after inited + unsigned long nGBAKF = mpCurrentKeyFrame->mnId; + //Optimizer::GlobalBundleAdjustmentNavState(mpMap,mGravityVec,10,NULL,nGBAKF,false); + Optimizer::GlobalBundleAdjustmentNavStatePRV(mpMap,mGravityVec,10,NULL,nGBAKF,false); + cerr<<"finish global BA after vins init"< lock(mpMap->mMutexMapUpdate); + + // Correct keyframes starting at map first keyframe + list lpKFtoCheck(mpMap->mvpKeyFrameOrigins.begin(),mpMap->mvpKeyFrameOrigins.end()); + + while(!lpKFtoCheck.empty()) + { + KeyFrame* pKF = lpKFtoCheck.front(); + const set sChilds = pKF->GetChilds(); + cv::Mat Twc = pKF->GetPoseInverse(); + for(set::const_iterator sit=sChilds.begin();sit!=sChilds.end();sit++) + { + KeyFrame* pChild = *sit; + if(pChild->mnBAGlobalForKF!=nGBAKF) + { + cerr<<"correct KF after gBA in VI init: "<mnId<GetPose()*Twc; + pChild->mTcwGBA = Tchildc*pKF->mTcwGBA;//*Tcorc*pKF->mTcwGBA; + pChild->mnBAGlobalForKF=nGBAKF; + + // Set NavStateGBA and correct the P/V/R + pChild->mNavStateGBA = pChild->GetNavState(); + cv::Mat TwbGBA = Converter::toCvMatInverse(cvTbc*pChild->mTcwGBA); + Matrix3d RwbGBA = Converter::toMatrix3d(TwbGBA.rowRange(0,3).colRange(0,3)); + Vector3d PwbGBA = Converter::toVector3d(TwbGBA.rowRange(0,3).col(3)); + Matrix3d Rw1 = pChild->mNavStateGBA.Get_RotMatrix(); + Vector3d Vw1 = pChild->mNavStateGBA.Get_V(); + Vector3d Vw2 = RwbGBA*Rw1.transpose()*Vw1; // bV1 = bV2 ==> Rwb1^T*wV1 = Rwb2^T*wV2 ==> wV2 = Rwb2*Rwb1^T*wV1 + pChild->mNavStateGBA.Set_Pos(PwbGBA); + pChild->mNavStateGBA.Set_Rot(RwbGBA); + pChild->mNavStateGBA.Set_Vel(Vw2); + } + lpKFtoCheck.push_back(pChild); + } + + pKF->mTcwBefGBA = pKF->GetPose(); + //pKF->SetPose(pKF->mTcwGBA); + pKF->mNavStateBefGBA = pKF->GetNavState(); + pKF->SetNavState(pKF->mNavStateGBA); + pKF->UpdatePoseFromNS(cvTbc); + + lpKFtoCheck.pop_front(); + + } + + // Correct MapPoints + const vector vpMPs = mpMap->GetAllMapPoints(); + + for(size_t i=0; iisBad()) + continue; + + if(pMP->mnBAGlobalForKF==nGBAKF) + { + // If optimized by Global BA, just update + pMP->SetWorldPos(pMP->mPosGBA); + } + else + { + // Update according to the correction of its reference keyframe + KeyFrame* pRefKF = pMP->GetReferenceKeyFrame(); + + if(pRefKF->mnBAGlobalForKF!=nGBAKF) + continue; + + // Map to non-corrected camera + cv::Mat Rcw = pRefKF->mTcwBefGBA.rowRange(0,3).colRange(0,3); + cv::Mat tcw = pRefKF->mTcwBefGBA.rowRange(0,3).col(3); + cv::Mat Xc = Rcw*pMP->GetWorldPos()+tcw; + + // Backproject using corrected camera + cv::Mat Twc = pRefKF->GetPoseInverse(); + cv::Mat Rwc = Twc.rowRange(0,3).colRange(0,3); + cv::Mat twc = Twc.rowRange(0,3).col(3); + + pMP->SetWorldPos(Rwc*Xc+twc); + } + } + + cout << "Map updated!" << endl; + + // Map updated, set flag for Tracking + SetMapUpdateFlagInTracking(true); + + // Release LocalMapping + + Release(); + } + } + + SetFlagInitGBAFinish(true); + } + + for(int i=0;iGetPrevKeyFrame()!=NULL) + { + pKF0 = pKF0->GetPrevKeyFrame(); + mlLocalKeyFrames.push_front(pKF0); + } + } } void LocalMapping::DeleteBadInLocalWindow(void) @@ -517,6 +904,10 @@ LocalMapping::LocalMapping(Map *pMap, const float bMonocular, ConfigParam* pPara mbVINSInited = false; mbFirstTry = true; mbFirstVINSInited = false; + + mbUpdatingInitPoses = false; + mbCopyInitKFs = false; + mbInitGBAFinish = false; } void LocalMapping::SetLoopCloser(LoopClosing* pLoopCloser) @@ -572,21 +963,26 @@ void LocalMapping::Run() } else { - Optimizer::LocalBundleAdjustmentNavState(mpCurrentKeyFrame,mlLocalKeyFrames,&mbAbortBA, mpMap, mGravityVec, this); + //Optimizer::LocalBundleAdjustmentNavStatePRV(mpCurrentKeyFrame,mlLocalKeyFrames,&mbAbortBA, mpMap, mGravityVec, this); + Optimizer::LocalBAPRVIDP(mpCurrentKeyFrame,mlLocalKeyFrames,&mbAbortBA, mpMap, mGravityVec, this); } } - // Try to initialize VIO, if not inited - if(!GetVINSInited()) + // Visual-Inertial initialization for non-realtime mode + if(!ConfigParam::GetRealTimeFlag()) { - bool tmpbool = TryInitVIO(); - SetVINSInited(tmpbool); - if(tmpbool) + // Try to initialize VIO, if not inited + if(!GetVINSInited()) { - // Update map scale - mpMap->UpdateScale(mnVINSInitScale); - // Set initialization flag - SetFirstVINSInited(true); + bool tmpbool = TryInitVIO(); + SetVINSInited(tmpbool); + if(tmpbool) + { + // Update map scale + mpMap->UpdateScale(mnVINSInitScale); + // Set initialization flag + SetFirstVINSInited(true); + } } } @@ -595,7 +991,8 @@ void LocalMapping::Run() KeyFrameCulling(); } - mpLoopCloser->InsertKeyFrame(mpCurrentKeyFrame); + if(GetFlagInitGBAFinish()) + mpLoopCloser->InsertKeyFrame(mpCurrentKeyFrame); } else if(Stop()) { @@ -1147,6 +1544,14 @@ void LocalMapping::InterruptBA() void LocalMapping::KeyFrameCulling() { + + if(ConfigParam::GetRealTimeFlag()) + { + if(GetFlagCopyInitKFs()) + return; + } + SetFlagCopyInitKFs(true); + // Check redundant keyframes (only local keyframes) // A keyframe is considered redundant if the 90% of the MapPoints it sees, are seen // in at least other 3 keyframes (in the same or finer scale) @@ -1176,31 +1581,27 @@ void LocalMapping::KeyFrameCulling() // Note, the KF just out of Local is similarly considered as Local KeyFrame* pPrevKF = pKF->GetPrevKeyFrame(); KeyFrame* pNextKF = pKF->GetNextKeyFrame(); - if(pPrevKF && pNextKF) + if(pPrevKF && pNextKF && !GetVINSInited()) { - double timegap=0.5; - if(GetVINSInited()) - timegap = 3; + if(fabs(pNextKF->mTimeStamp - pPrevKF->mTimeStamp) > /*0.2*/0.5) + continue; + } + // Don't drop the KF before current KF + if(pKF->GetNextKeyFrame() == mpCurrentKeyFrame) + continue; + if(pKF->mTimeStamp >= mpCurrentKeyFrame->mTimeStamp - 0.11) + continue; - // Test log - if(pOldestLocalKF->isBad()) cerr<<"pOldestLocalKF is bad, check 1. id: "<mnId<isBad()) cerr<<"pPrevLocalKF is bad, check 1. id: "<mnId<isBad()) cerr<<"pNewestLocalKF is bad, check 1. id: "<mnId<mTimeStamp < mpCurrentKeyFrame->mTimeStamp - 4.0) + timegap = 3.01; - if(pKF->mnId >= pOldestLocalKF->mnId) - { - timegap = 0.1; // third tested, good - if(GetVINSInited()) - timegap = 0.5; - // Test log - if(pKF->mnId >= pNewestLocalKF->mnId) - cerr<<"Want to cull Newer KF than LocalWindow? id/currentKFid:"<mnId<<"/"<mnId<mTimeStamp - pPrevKF->mTimeStamp) > timegap) continue; } - const vector vpMapPoints = pKF->GetMapPointMatches(); int nObs = 3; @@ -1224,9 +1625,9 @@ void LocalMapping::KeyFrameCulling() if(pMP->Observations()>thObs) { const int &scaleLevel = pKF->mvKeysUn[i].octave; - const map observations = pMP->GetObservations(); + const mapMapPointObs/*map*/ observations = pMP->GetObservations(); int nObs=0; - for(map::const_iterator mit=observations.begin(), mend=observations.end(); mit!=mend; mit++) + for(mapMapPointObs/*map*/::const_iterator mit=observations.begin(), mend=observations.end(); mit!=mend; mit++) { KeyFrame* pKFi = mit->first; if(pKFi==pKF) @@ -1252,6 +1653,8 @@ void LocalMapping::KeyFrameCulling() if(nRedundantObservations>0.9*nMPs) pKF->SetBadFlag(); } + + SetFlagCopyInitKFs(false); } cv::Mat LocalMapping::SkewSymmetricMatrix(const cv::Mat &v) diff --git a/src/LoopClosing.cc b/src/LoopClosing.cc index a02ba8d..657b122 100644 --- a/src/LoopClosing.cc +++ b/src/LoopClosing.cc @@ -431,6 +431,7 @@ void LoopClosing::CorrectLoop() // If a Global Bundle Adjustment is running, abort it if(isRunningGBA()) { + cout<<"Abort last global BA..."< lock(mMutexGBA); mbStopGBA = true; @@ -441,6 +442,7 @@ void LoopClosing::CorrectLoop() mpThreadGBA->detach(); delete mpThreadGBA; } + cout<<"Last global BA aborted."<InformNewBigChange(); // Add loop edge mpMatchedKF->AddLoopEdge(mpCurrentKF); @@ -668,11 +671,12 @@ void LoopClosing::ResetIfRequested() void LoopClosing::RunGlobalBundleAdjustment(unsigned long nLoopKF) { + std::chrono::steady_clock::time_point begin= std::chrono::steady_clock::now(); cout << "Starting Global Bundle Adjustment" << endl; int idx = mnFullBAIdx; //Optimizer::GlobalBundleAdjustemnt(mpMap,10,&mbStopGBA,nLoopKF,false); - Optimizer::GlobalBundleAdjustmentNavState(mpMap,mpLocalMapper->GetGravityVec(),10,&mbStopGBA,nLoopKF,false); + Optimizer::GlobalBundleAdjustmentNavStatePRV(mpMap,mpLocalMapper->GetGravityVec(),10,&mbStopGBA,nLoopKF,false); // Update all MapPoints and KeyFrames // Local Mapping was active during BA, that means that there might be new keyframes @@ -778,6 +782,8 @@ void LoopClosing::RunGlobalBundleAdjustment(unsigned long nLoopKF) } } + mpMap->InformNewBigChange(); + mpLocalMapper->Release(); cout << "Map updated!" << endl; @@ -789,6 +795,9 @@ void LoopClosing::RunGlobalBundleAdjustment(unsigned long nLoopKF) mbFinishedGBA = true; mbRunningGBA = false; } + + std::chrono::steady_clock::time_point end= std::chrono::steady_clock::now(); + std::cout << "globalBA Time consumption = " << std::chrono::duration_cast(end - begin).count() < &vpMPs) mvpReferenceMapPoints = vpMPs; } +void Map::InformNewBigChange() +{ + unique_lock lock(mMutexMap); + mnBigChangeIdx++; +} + +int Map::GetLastBigChangeIdx() +{ + unique_lock lock(mMutexMap); + return mnBigChangeIdx; +} + vector Map::GetAllKeyFrames() { unique_lock lock(mMutexMap); diff --git a/src/MapPoint.cc b/src/MapPoint.cc index 9a99bab..70464e9 100644 --- a/src/MapPoint.cc +++ b/src/MapPoint.cc @@ -29,6 +29,12 @@ namespace ORB_SLAM2 long unsigned int MapPoint::nNextId=0; mutex MapPoint::mGlobalMutex; + +bool cmpKeyFrameId::operator() (const KeyFrame* a, const KeyFrame* b) const { + return a->mnId < b->mnId; +} + + MapPoint::MapPoint(const cv::Mat &Pos, KeyFrame *pRefKF, Map* pMap): mnFirstKFid(pRefKF->mnId), mnFirstFrame(pRefKF->mnFrameId), nObs(0), mnTrackReferenceForFrame(0), mnLastFrameSeen(0), mnBALocalForKF(0), mnFuseCandidateForKF(0), mnLoopPointForKF(0), mnCorrectedByKF(0), @@ -130,9 +136,26 @@ void MapPoint::EraseObservation(KeyFrame* pKF) mObservations.erase(pKF); + KeyFrame* pKFrefnew = NULL; + for(auto ob : mObservations) + { + KeyFrame* pkfi = ob.first; + if(!pkfi->isBad()) + { + pKFrefnew = pkfi; + break; + } + } + if(mpRefKF==pKF) + { mpRefKF=mObservations.begin()->first; + if(!pKFrefnew) + mpRefKF = pKFrefnew; + else + bBad = true; + } // If only 2 observations or less, discard point if(nObs<=2) bBad=true; @@ -143,7 +166,7 @@ void MapPoint::EraseObservation(KeyFrame* pKF) SetBadFlag(); } -map MapPoint::GetObservations() +mapMapPointObs/*map*/ MapPoint::GetObservations() { unique_lock lock(mMutexFeatures); return mObservations; @@ -157,7 +180,7 @@ int MapPoint::Observations() void MapPoint::SetBadFlag() { - map obs; + mapMapPointObs/*map*/ obs; { unique_lock lock1(mMutexFeatures); unique_lock lock2(mMutexPos); @@ -165,7 +188,7 @@ void MapPoint::SetBadFlag() obs = mObservations; mObservations.clear(); } - for(map::iterator mit=obs.begin(), mend=obs.end(); mit!=mend; mit++) + for(mapMapPointObs/*map*/::iterator mit=obs.begin(), mend=obs.end(); mit!=mend; mit++) { KeyFrame* pKF = mit->first; pKF->EraseMapPointMatch(mit->second); @@ -187,7 +210,7 @@ void MapPoint::Replace(MapPoint* pMP) return; int nvisible, nfound; - map obs; + mapMapPointObs/*map*/ obs; { unique_lock lock1(mMutexFeatures); unique_lock lock2(mMutexPos); @@ -199,7 +222,7 @@ void MapPoint::Replace(MapPoint* pMP) mpReplaced = pMP; } - for(map::iterator mit=obs.begin(), mend=obs.end(); mit!=mend; mit++) + for(mapMapPointObs/*map*/::iterator mit=obs.begin(), mend=obs.end(); mit!=mend; mit++) { // Replace measurement in keyframe KeyFrame* pKF = mit->first; @@ -251,7 +274,7 @@ void MapPoint::ComputeDistinctiveDescriptors() // Retrieve all observed descriptors vector vDescriptors; - map observations; + mapMapPointObs/*map*/ observations; { unique_lock lock1(mMutexFeatures); @@ -265,7 +288,7 @@ void MapPoint::ComputeDistinctiveDescriptors() vDescriptors.reserve(observations.size()); - for(map::iterator mit=observations.begin(), mend=observations.end(); mit!=mend; mit++) + for(mapMapPointObs/*map*/::iterator mit=observations.begin(), mend=observations.end(); mit!=mend; mit++) { KeyFrame* pKF = mit->first; @@ -336,7 +359,7 @@ bool MapPoint::IsInKeyFrame(KeyFrame *pKF) void MapPoint::UpdateNormalAndDepth() { - map observations; + mapMapPointObs/*map*/ observations; KeyFrame* pRefKF; cv::Mat Pos; { @@ -354,7 +377,7 @@ void MapPoint::UpdateNormalAndDepth() cv::Mat normal = cv::Mat::zeros(3,1,CV_32F); int n=0; - for(map::iterator mit=observations.begin(), mend=observations.end(); mit!=mend; mit++) + for(mapMapPointObs/*map*/::iterator mit=observations.begin(), mend=observations.end(); mit!=mend; mit++) { KeyFrame* pKF = mit->first; cv::Mat Owi = pKF->GetCameraCenter(); diff --git a/src/Optimizer.cc b/src/Optimizer.cc index 018a97f..0fc907a 100644 --- a/src/Optimizer.cc +++ b/src/Optimizer.cc @@ -47,6 +47,1369 @@ namespace ORB_SLAM2 //------------------------------------------------------------------------------------------- //------------------------------------------------------------------------------------------- +void Optimizer::LocalBAPRVIDP(KeyFrame *pCurKF, const std::list &lLocalKeyFrames, bool* pbStopFlag, Map* pMap, cv::Mat& gw, LocalMapping* pLM) +{ + // Check current KeyFrame in local window + if(pCurKF != lLocalKeyFrames.back()) + cerr<<"pCurKF != lLocalKeyFrames.back. check"<::const_iterator lit=lLocalKeyFrames.begin(), lend=lLocalKeyFrames.end(); lit!=lend; lit++) + { + KeyFrame* pKFi = *lit; + if(!pKFi) cerr<<"!pKFi. why??????"<mnBALocalForKF = pCurKF->mnId; + } + + // Local MapPoints seen in Local KeyFrames + list lLocalMapPoints; + for(list::const_iterator lit=lLocalKeyFrames.begin() , lend=lLocalKeyFrames.end(); lit!=lend; lit++) + { + vector vpMPs = (*lit)->GetMapPointMatches(); + for(vector::iterator vit=vpMPs.begin(), vend=vpMPs.end(); vit!=vend; vit++) + { + MapPoint* pMP = *vit; + if(pMP) + if(!pMP->isBad()) + if(pMP->mnBALocalForKF!=pCurKF->mnId) + { + lLocalMapPoints.push_back(pMP); + pMP->mnBALocalForKF=pCurKF->mnId; + } + } + } + + // Fixed Keyframes. Keyframes that see Local MapPoints but that are not Local Keyframes + list lFixedCameras; + // Add the KeyFrame before local window. + KeyFrame* pKFPrevLocal = lLocalKeyFrames.front()->GetPrevKeyFrame(); + if(pKFPrevLocal) + { + // Test log + if(pKFPrevLocal->isBad()) cerr<<"KeyFrame before local window is bad?"<mnBAFixedForKF==pCurKF->mnId) cerr<<"KeyFrame before local, has been added to lFixedKF?"<mnBALocalForKF==pCurKF->mnId) cerr<<"KeyFrame before local, has been added to lLocalKF?"<mnBAFixedForKF = pCurKF->mnId; + if(!pKFPrevLocal->isBad()) + lFixedCameras.push_back(pKFPrevLocal); + else + cerr<<"pKFPrevLocal is Bad?"<::iterator lit=lLocalMapPoints.begin(), lend=lLocalMapPoints.end(); lit!=lend; lit++) + { + mapMapPointObs/*map*/ observations = (*lit)->GetObservations(); + for(mapMapPointObs/*map*/::iterator mit=observations.begin(), mend=observations.end(); mit!=mend; mit++) + { + KeyFrame* pKFi = mit->first; + + if(pKFi->mnBALocalForKF!=pCurKF->mnId && pKFi->mnBAFixedForKF!=pCurKF->mnId) + { + pKFi->mnBAFixedForKF=pCurKF->mnId; + if(!pKFi->isBad()) + lFixedCameras.push_back(pKFi); + } + } + } + + // Setup optimizer + g2o::SparseOptimizer optimizer; + g2o::BlockSolverX::LinearSolverType * linearSolver; + + linearSolver = new g2o::LinearSolverEigen(); + //linearSolver = new g2o::LinearSolverCholmod(); + + g2o::BlockSolverX * solver_ptr = new g2o::BlockSolverX(linearSolver); + + g2o::OptimizationAlgorithmGaussNewton* solver = new g2o::OptimizationAlgorithmGaussNewton(solver_ptr); + //g2o::OptimizationAlgorithmLevenberg* solver = new g2o::OptimizationAlgorithmLevenberg(solver_ptr); + optimizer.setAlgorithm(solver); +//optimizer.setVerbose(true); + if(pbStopFlag) + optimizer.setForceStopFlag(pbStopFlag); + + int maxKFid = 0; + + // Extrinsic vertex + int ExtrinsicVertexId = KeyFrame::nNextId*3 + MapPoint::nNextId + 1; + { + g2o::VertexNavStatePR * vTcb = new g2o::VertexNavStatePR(); + NavState tmpNSTcb; + tmpNSTcb.Set_Pos(Pcb); + tmpNSTcb.Set_Rot(Rcb); + vTcb->setEstimate(tmpNSTcb); + vTcb->setId(ExtrinsicVertexId); + vTcb->setFixed(true); //todo + optimizer.addVertex(vTcb); + } + + // Set Local KeyFrame vertices + for(list::const_iterator lit=lLocalKeyFrames.begin(), lend=lLocalKeyFrames.end(); lit!=lend; lit++) + { + KeyFrame* pKFi = *lit; + if(!pKFi) cerr<<"!pKFi when setting local KF vertex???"<mnId*3; + // Vertex of PR/V + { + g2o::VertexNavStatePR * vNSPR = new g2o::VertexNavStatePR(); + vNSPR->setEstimate(pKFi->GetNavState()); + vNSPR->setId(idKF); + vNSPR->setFixed(false); + optimizer.addVertex(vNSPR); + + g2o::VertexNavStateV * vNSV = new g2o::VertexNavStateV(); + vNSV->setEstimate(pKFi->GetNavState()); + vNSV->setId(idKF+1); + vNSV->setFixed(false); + optimizer.addVertex(vNSV); + } + // Vertex of Bias + { + g2o::VertexNavStateBias * vNSBias = new g2o::VertexNavStateBias(); + vNSBias->setEstimate(pKFi->GetNavState()); + vNSBias->setId(idKF+2); + vNSBias->setFixed(false); + optimizer.addVertex(vNSBias); + } + + if(idKF+2>maxKFid) + maxKFid=idKF+2; + + } + + // Set Fixed KeyFrame vertices. Including the pKFPrevLocal. + for(list::iterator lit=lFixedCameras.begin(), lend=lFixedCameras.end(); lit!=lend; lit++) + { + KeyFrame* pKFi = *lit; + int idKF = pKFi->mnId*3; + // For common fixed KeyFrames, only add PR vertex + { + g2o::VertexNavStatePR * vNSPR = new g2o::VertexNavStatePR(); + vNSPR->setEstimate(pKFi->GetNavState()); + vNSPR->setId(idKF); + vNSPR->setFixed(true); + optimizer.addVertex(vNSPR); + } + // For Local-Window-Previous KeyFrame, add V and Bias vertex + if(pKFi == pKFPrevLocal) + { + g2o::VertexNavStateV * vNSV = new g2o::VertexNavStateV(); + vNSV->setEstimate(pKFi->GetNavState()); + vNSV->setId(idKF+1); + vNSV->setFixed(true); //todo + optimizer.addVertex(vNSV); + + g2o::VertexNavStateBias * vNSBias = new g2o::VertexNavStateBias(); + vNSBias->setEstimate(pKFi->GetNavState()); + vNSBias->setId(idKF+2); + vNSBias->setFixed(true); //todo. true or false + optimizer.addVertex(vNSBias); + } + + if(idKF+2>maxKFid) + maxKFid=idKF+2; + + } + + // Edges between KeyFrames in Local Window + // and + // Edges between 1st KeyFrame of Local Window and its previous (fixed)KeyFrame - pKFPrevLocal + vector vpEdgesNavStatePRV; + vector vpEdgesNavStateBias; + // Use chi2inv() in MATLAB to compute the value corresponding to 0.95/0.99 prob. w.r.t 15DOF: 24.9958/30.5779 + // 12.592/16.812 for 0.95/0.99 6DoF + // 16.919/21.666 for 0.95/0.99 9DoF + //const float thHuberNavState = sqrt(30.5779); + const float thHuberNavStatePRV = sqrt(/*1e4*/100*21.666); + const float thHuberNavStateBias = sqrt(/*1e4*/100*16.812); + // Inverse covariance of bias random walk + + Matrix InvCovBgaRW = Matrix::Identity(); + InvCovBgaRW.topLeftCorner(3,3) = Matrix3d::Identity()/IMUData::getGyrBiasRW2(); // Gyroscope bias random walk, covariance INVERSE + InvCovBgaRW.bottomRightCorner(3,3) = Matrix3d::Identity()/IMUData::getAccBiasRW2(); // Accelerometer bias random walk, covariance INVERSE + + for(list::const_iterator lit=lLocalKeyFrames.begin(), lend=lLocalKeyFrames.end(); lit!=lend; lit++) + { + KeyFrame* pKF1 = *lit; // Current KF, store the IMU pre-integration between previous-current + if(!pKF1) cerr<<"pKF1"<GetPrevKeyFrame(); // Previous KF + + if(!pKF1 || !pKF0) cerr<<"pKF1="<<(size_t)pKF1<<", pKF0="<<(size_t)pKF0<setVertex(0, dynamic_cast(optimizer.vertex(3*pKF0->mnId))); + epvr->setVertex(1, dynamic_cast(optimizer.vertex(3*pKF1->mnId))); + epvr->setVertex(2, dynamic_cast(optimizer.vertex(3*pKF0->mnId+1))); + epvr->setVertex(3, dynamic_cast(optimizer.vertex(3*pKF1->mnId+1))); + epvr->setVertex(4, dynamic_cast(optimizer.vertex(3*pKF0->mnId+2))); + epvr->setMeasurement(pKF1->GetIMUPreInt()); + + //Matrix9d InvCovPVR = pKF1->GetIMUPreInt().getCovPVPhi().inverse(); + Matrix9d CovPRV = pKF1->GetIMUPreInt().getCovPVPhi(); + CovPRV.col(3).swap(CovPRV.col(6)); + CovPRV.col(4).swap(CovPRV.col(7)); + CovPRV.col(5).swap(CovPRV.col(8)); + CovPRV.row(3).swap(CovPRV.row(6)); + CovPRV.row(4).swap(CovPRV.row(7)); + CovPRV.row(5).swap(CovPRV.row(8)); + epvr->setInformation(CovPRV.inverse()); + + epvr->SetParams(GravityVec); + + g2o::RobustKernelHuber* rk = new g2o::RobustKernelHuber; + epvr->setRobustKernel(rk); + rk->setDelta(thHuberNavStatePRV); + + optimizer.addEdge(epvr); + vpEdgesNavStatePRV.push_back(epvr); + } + // Bias edge + { + g2o::EdgeNavStateBias * ebias = new g2o::EdgeNavStateBias(); + ebias->setVertex(0, dynamic_cast(optimizer.vertex(3*pKF0->mnId+2))); + ebias->setVertex(1, dynamic_cast(optimizer.vertex(3*pKF1->mnId+2))); + ebias->setMeasurement(pKF1->GetIMUPreInt()); + + ebias->setInformation(InvCovBgaRW/pKF1->GetIMUPreInt().getDeltaTime()); + + g2o::RobustKernelHuber* rk = new g2o::RobustKernelHuber; + ebias->setRobustKernel(rk); + rk->setDelta(thHuberNavStateBias); + + optimizer.addEdge(ebias); + vpEdgesNavStateBias.push_back(ebias); + } + + } + + // Set MapPoint vertices + const int nExpectedSize = (lLocalKeyFrames.size()+lFixedCameras.size())*lLocalMapPoints.size(); + + //vector vpEdgesMono; + vector vpEdgesMono; + vpEdgesMono.reserve(nExpectedSize); + + vector vpEdgeKFMono; + vpEdgeKFMono.reserve(nExpectedSize); + + vector vpMapPointEdgeMono; + vpMapPointEdgeMono.reserve(nExpectedSize); + + const float thHuberMono = sqrt(5.991); + + vector > vRefNormXY; + vRefNormXY.resize(lLocalMapPoints.size()); + vector vMPGood; + vMPGood.resize(lLocalMapPoints.size(), false); + int mpcnt=0; + + for(list::iterator lit=lLocalMapPoints.begin(), lend=lLocalMapPoints.end(); lit!=lend; lit++, mpcnt++) + { + MapPoint* pMP = *lit; + + Vector3d Pw = Converter::toVector3d(pMP->GetWorldPos()); + KeyFrame* pRefKF = pMP->GetReferenceKeyFrame(); + if(!pRefKF) + cerr<<("!pRefKF???"); + if(/*!sLocalFixKFs.count(pRefKF) */pRefKF->isBad()) + { + cerr<mnId<<" pRefKF not in local or fixed window, bBad="<isBad()<GetRotation()); + Vector3d tcw = Converter::toVector3d(pRefKF->GetTranslation()); + Vector3d Pc = Rcw*Pw+tcw; + double dc = Pc[2]; //depth + if(dc < 0.01) + { + std::cerr<<"depth < 0.01, = "<mnId+maxKFid+1; + g2o::VertexIDP* vPoint = new g2o::VertexIDP(); + vPoint->setEstimate(1.0/dc); + vPoint->setId(mpVertexId); + vPoint->setMarginalized(true); + //optimizer.addVertex(vPoint); + + mapMapPointObs/*map*/ observations = pMP->GetObservations(); + // compute normalized image coordinate [x,y] + if(!observations.count(pRefKF)) + cerr<<"!observations.count(pRefKF), why???"<mnId<<" refKFid: "<mvKeysUn[kpIdxInRefKF]; + double normx = (kpRefUn.pt.x-pRefKF->cx)/pRefKF->fx; + double normy = (kpRefUn.pt.y-pRefKF->cy)/pRefKF->fy; + vRefNormXY[mpcnt] << normx, normy; + + // g2o::VertexSBAPointXYZ* vPoint = new g2o::VertexSBAPointXYZ(); + // vPoint->setEstimate(Pw); + // int id = pMP->mnId+maxKFid+1; + // vPoint->setId(id); + // vPoint->setMarginalized(true); + // optimizer.addVertex(vPoint); + + bool baddmpvertex = false; + + // Set edges between KeyFrame and MapPoint + for(mapMapPointObs/*map*/::const_iterator mit=observations.begin(), mend=observations.end(); mit!=mend; mit++) + { + KeyFrame* pKFi = mit->first; + if(pKFi == pRefKF) + { + continue; // no edge for reference keyframe + } + + if(!pKFi->isBad()) + { + const cv::KeyPoint &kpUn = pKFi->mvKeysUn[mit->second]; + + // Monocular observation + if(pKFi->mvuRight[mit->second]<0) + { + // There may be no available observation. So when there's observation then add vertex of mappoint + if(!baddmpvertex) + { + baddmpvertex = true; + optimizer.addVertex(vPoint); + vMPGood[mpcnt] = true; + } + + // 0-idp, 1-refPR, 2-curPR, 3-TcbPR + g2o::EdgePRIDP* e = new g2o::EdgePRIDP(); + e->setVertex(0, dynamic_cast(optimizer.vertex(mpVertexId))); + e->setVertex(1, dynamic_cast(optimizer.vertex(3*pRefKF->mnId))); + e->setVertex(2, dynamic_cast(optimizer.vertex(3*pKFi->mnId))); + e->setVertex(3, dynamic_cast(optimizer.vertex(ExtrinsicVertexId))); + e->SetParams(normx, normy, pRefKF->fx, pRefKF->fy, pRefKF->cx, pRefKF->cy); + + //g2o::EdgeNavStatePRPointXYZ* e = new g2o::EdgeNavStatePRPointXYZ(); + //e->setVertex(0, dynamic_cast(optimizer.vertex(id))); + //e->setVertex(1, dynamic_cast(optimizer.vertex(3*pKFi->mnId))); + //e->SetParams(pKFi->fx,pKFi->fy,pKFi->cx,pKFi->cy,Rcb.transpose(),-Rcb.transpose()*Pcb); + + Eigen::Matrix obs; + obs << kpUn.pt.x, kpUn.pt.y; + e->setMeasurement(obs); + + const float &invSigma2 = pKFi->mvInvLevelSigma2[kpUn.octave]; + e->setInformation(Eigen::Matrix2d::Identity()*invSigma2); + + g2o::RobustKernelHuber* rk = new g2o::RobustKernelHuber; + e->setRobustKernel(rk); + rk->setDelta(thHuberMono); + + optimizer.addEdge(e); + + vpEdgesMono.push_back(e); + vpEdgeKFMono.push_back(pKFi); + vpMapPointEdgeMono.push_back(pMP); + } + else + { + // Test log + cerr<<"Stereo not supported yet, why here?? check."<isBad()) + continue; + + if(e->chi2()>5.991 || !e->isDepthPositive() || static_cast(e->vertex(0))->estimate()<2e-6) + { + e->setLevel(1); + //cerr<<"e->chi2(): "<chi2()<<", inverse depth rho: "<(e->vertex(0))->estimate()<setRobustKernel(0); + } + + // Optimize again without the outliers + optimizer.initializeOptimization(0); + optimizer.optimize(10); + + } + + // + vector > vToErase; + vToErase.reserve(vpEdgesMono.size()); + + double PosePointchi2=0; + // Check inlier observations + for(size_t i=0, iend=vpEdgesMono.size(); iisBad()) + continue; + + if(e->chi2()>5.991 || !e->isDepthPositive() || static_cast(e->vertex(0))->estimate()<2e-6 || e->level()!=0) + { + KeyFrame* pKFi = vpEdgeKFMono[i]; + vToErase.push_back(make_pair(pKFi,pMP)); + } + + PosePointchi2 += e->chi2(); + } + +// // Debug log +// // Check inlier observations +// int tmpcnt2=0; +// for(size_t i=0, iend=vpEdgesNavStatePRV.size(); ichi2()>21.666) +// { +// cout<<"2 PRVedge "<chi2()<<". "; +// tmpcnt2++; +// } +// } +// //cout<chi2()>16.812) +// { +// cout<<"2 Bias edge "<chi2()<<". "; +// tmpcnt2++; +// } +// } +// if(tmpcnt2>0) +// cout< lock(pMap->mMutexMapUpdate); + + if(!vToErase.empty()) + { + for(size_t i=0;iEraseMapPointMatch(pMPi); + pMPi->EraseObservation(pKFi); + } + } + + //Keyframes + for(list::const_iterator lit=lLocalKeyFrames.begin(), lend=lLocalKeyFrames.end(); lit!=lend; lit++) + { + KeyFrame* pKFi = *lit; + g2o::VertexNavStatePR* vNSPR = static_cast(optimizer.vertex(3*pKFi->mnId)); + g2o::VertexNavStateV* vNSV = static_cast(optimizer.vertex(3*pKFi->mnId+1)); + g2o::VertexNavStateBias* vNSBias = static_cast(optimizer.vertex(3*pKFi->mnId+2)); + // In optimized navstate, bias not changed, delta_bias not zero, should be added to bias + const NavState& optPRns = vNSPR->estimate(); + const NavState& optVns = vNSV->estimate(); + const NavState& optBiasns = vNSBias->estimate(); + NavState primaryns = pKFi->GetNavState(); + // Update NavState + pKFi->SetNavStatePos(optPRns.Get_P()); + pKFi->SetNavStateRot(optPRns.Get_R()); + pKFi->SetNavStateVel(optVns.Get_V()); + //if(optBiasns.Get_dBias_Acc().norm()<1e-2 && optBiasns.Get_BiasGyr().norm()<1e-4) + //{ + pKFi->SetNavStateDeltaBg(optBiasns.Get_dBias_Gyr()); + pKFi->SetNavStateDeltaBa(optBiasns.Get_dBias_Acc()); + //} + + // Update pose Tcw + pKFi->UpdatePoseFromNS(ConfigParam::GetMatTbc()); + + // Test log + if( (primaryns.Get_BiasGyr() - optPRns.Get_BiasGyr()).norm() > 1e-6 || (primaryns.Get_BiasGyr() - optBiasns.Get_BiasGyr()).norm() > 1e-6 ) + cerr<<"gyr bias change in optimization?"< 1e-6 || (primaryns.Get_BiasAcc() - optBiasns.Get_BiasAcc()).norm() > 1e-6 ) + cerr<<"acc bias change in optimization?"<::const_iterator lit=lLocalMapPoints.begin(), lend=lLocalMapPoints.end(); lit!=lend; lit++, mpcnt++) + { + MapPoint* pMP = *lit; + if(!pMP) cerr<<("!pMP????"); + KeyFrame* pRefKF = pMP->GetReferenceKeyFrame(); + if(!pRefKF) cerr<<("!pRefKF???"); + if(/*!sLocalFixKFs.count(pRefKF)*/pRefKF->isBad()) + { + continue; + } + if(!vMPGood[mpcnt]) + continue; + //g2o::VertexSBAPointXYZ* vPoint = static_cast(optimizer.vertex(pMP->mnId+maxKFid+1)); + g2o::VertexIDP* vPoint = static_cast(optimizer.vertex(pMP->mnId+maxKFid+1)); + const Vector2d& refXY = vRefNormXY[mpcnt]; + Vector3d Pref; + Pref << refXY[0], refXY[1], 1.0; + double rho = vPoint->estimate(); + //if(rho<1e-4) {cerr<<"rho="<GetRotation().t(); // Rwr = Rrw^T + cv::Mat twr = pRefKF->GetCameraCenter(); // twr = - Rrw^T * trw + + pMP->SetWorldPos(Rwr*Converter::toCvMat(Pref) + twr); // point coordinate in world frame + pMP->UpdateNormalAndDepth(); + } + + // update Tcb + { + g2o::VertexNavStatePR* vTcb = static_cast(optimizer.vertex(ExtrinsicVertexId)); + Matrix3d Rcb = vTcb->estimate().Get_RotMatrix(); + Vector3d tcb = vTcb->estimate().Get_P(); + if(!vTcb->fixed()) + cerr<<"opt Rcb/tcb:"<SetMapUpdateFlagInTracking(true); + } + +} + +void Optimizer::GlobalBundleAdjustmentNavStatePRV(Map* pMap, const cv::Mat& gw, int nIterations, bool* pbStopFlag, const unsigned long nLoopKF, const bool bRobust) +{ + vector vpKFs = pMap->GetAllKeyFrames(); + vector vpMP = pMap->GetAllMapPoints(); + + // Extrinsics + Matrix4d Tbc = ConfigParam::GetEigTbc(); + Matrix3d Rbc = Tbc.topLeftCorner(3,3); + Vector3d Pbc = Tbc.topRightCorner(3,1); + // Gravity vector in world frame + Vector3d GravityVec = Converter::toVector3d(gw); + + vector vbNotIncludedMP; + vbNotIncludedMP.resize(vpMP.size()); + + g2o::SparseOptimizer optimizer; + g2o::BlockSolverX::LinearSolverType * linearSolver; + + linearSolver = new g2o::LinearSolverEigen(); + //linearSolver = new g2o::LinearSolverCholmod(); + + g2o::BlockSolverX * solver_ptr = new g2o::BlockSolverX(linearSolver); + + g2o::OptimizationAlgorithmLevenberg* solver = new g2o::OptimizationAlgorithmLevenberg(solver_ptr); + //g2o::OptimizationAlgorithmGaussNewton* solver = new g2o::OptimizationAlgorithmGaussNewton(solver_ptr); + optimizer.setAlgorithm(solver); + + if(pbStopFlag) + optimizer.setForceStopFlag(pbStopFlag); + + long unsigned int maxKFid = 0; + + // Set KeyFrame vertices + for(size_t i=0; iisBad()) + continue; + + // PR + g2o::VertexNavStatePR * vNSPR = new g2o::VertexNavStatePR(); + vNSPR->setEstimate(pKF->GetNavState()); + vNSPR->setId(pKF->mnId*3); + vNSPR->setFixed(pKF->mnId==0); + optimizer.addVertex(vNSPR); + // V + g2o::VertexNavStateV * vNSV = new g2o::VertexNavStateV(); + vNSV->setEstimate(pKF->GetNavState()); + vNSV->setId(pKF->mnId*3+1); + vNSV->setFixed(false); + optimizer.addVertex(vNSV); + // Bias + g2o::VertexNavStateBias * vNSBias = new g2o::VertexNavStateBias(); + vNSBias->setEstimate(pKF->GetNavState()); + vNSBias->setId(pKF->mnId*3+2); + vNSBias->setFixed(pKF->mnId==0); + optimizer.addVertex(vNSBias); + + if(pKF->mnId*3+2>maxKFid) + maxKFid=pKF->mnId*3+2; + } + + // Add NavState PRV/Bias edges + const float thHuberNavStatePRV = sqrt(100*21.666); + const float thHuberNavStateBias = sqrt(100*16.812); + // Inverse covariance of bias random walk + + Matrix InvCovBgaRW = Matrix::Identity(); + InvCovBgaRW.topLeftCorner(3,3) = Matrix3d::Identity()/IMUData::getGyrBiasRW2(); // Gyroscope bias random walk, covariance INVERSE + InvCovBgaRW.bottomRightCorner(3,3) = Matrix3d::Identity()/IMUData::getAccBiasRW2(); // Accelerometer bias random walk, covariance INVERSE + + + for(size_t i=0; iisBad()) + { + cout<<"pKF is bad in gBA, id "<mnId<GetPrevKeyFrame(); + if(!pKF0) + { + if(pKF1->mnId!=0) cerr<<"Previous KeyFrame is NULL?"<setVertex(0, dynamic_cast(optimizer.vertex(3*pKF0->mnId))); + epvr->setVertex(1, dynamic_cast(optimizer.vertex(3*pKF1->mnId))); + epvr->setVertex(2, dynamic_cast(optimizer.vertex(3*pKF0->mnId+1))); + epvr->setVertex(3, dynamic_cast(optimizer.vertex(3*pKF1->mnId+1))); + epvr->setVertex(4, dynamic_cast(optimizer.vertex(3*pKF0->mnId+2))); + epvr->setMeasurement(pKF1->GetIMUPreInt()); + + //Matrix9d InvCovPVR = pKF1->GetIMUPreInt().getCovPVPhi().inverse(); + Matrix9d CovPRV = pKF1->GetIMUPreInt().getCovPVPhi(); + CovPRV.col(3).swap(CovPRV.col(6)); + CovPRV.col(4).swap(CovPRV.col(7)); + CovPRV.col(5).swap(CovPRV.col(8)); + CovPRV.row(3).swap(CovPRV.row(6)); + CovPRV.row(4).swap(CovPRV.row(7)); + CovPRV.row(5).swap(CovPRV.row(8)); + epvr->setInformation(CovPRV.inverse()); + + epvr->SetParams(GravityVec); + + if(bRobust) + { + g2o::RobustKernelHuber* rk = new g2o::RobustKernelHuber; + epvr->setRobustKernel(rk); + rk->setDelta(thHuberNavStatePRV); + } + + optimizer.addEdge(epvr); + } + // Bias edge + { + g2o::EdgeNavStateBias * ebias = new g2o::EdgeNavStateBias(); + ebias->setVertex(0, dynamic_cast(optimizer.vertex(3*pKF0->mnId+2))); + ebias->setVertex(1, dynamic_cast(optimizer.vertex(3*pKF1->mnId+2))); + ebias->setMeasurement(pKF1->GetIMUPreInt()); + + ebias->setInformation(InvCovBgaRW/pKF1->GetIMUPreInt().getDeltaTime()); + + if(bRobust) + { + g2o::RobustKernelHuber* rk = new g2o::RobustKernelHuber; + ebias->setRobustKernel(rk); + rk->setDelta(thHuberNavStateBias); + } + + optimizer.addEdge(ebias); + } + + } + + const float thHuber2D = sqrt(5.99); + + // Set MapPoint vertices + for(size_t i=0; iisBad()) + continue; + g2o::VertexSBAPointXYZ* vPoint = new g2o::VertexSBAPointXYZ(); + vPoint->setEstimate(Converter::toVector3d(pMP->GetWorldPos())); + const int id = pMP->mnId+maxKFid+1; + vPoint->setId(id); + vPoint->setMarginalized(true); + optimizer.addVertex(vPoint); + + const mapMapPointObs/*map*/ observations = pMP->GetObservations(); + + int nEdges = 0; + //SET EDGES + for(mapMapPointObs/*map*/::const_iterator mit=observations.begin(); mit!=observations.end(); mit++) + { + + KeyFrame* pKF = mit->first; + if(pKF->isBad() || 3*pKF->mnId>maxKFid) + continue; + + nEdges++; + + const cv::KeyPoint &kpUn = pKF->mvKeysUn[mit->second]; + + if(pKF->mvuRight[mit->second]<0) + { + Eigen::Matrix obs; + obs << kpUn.pt.x, kpUn.pt.y; + + g2o::EdgeNavStatePRPointXYZ* e = new g2o::EdgeNavStatePRPointXYZ(); + + e->setVertex(0, dynamic_cast(optimizer.vertex(id))); + e->setVertex(1, dynamic_cast(optimizer.vertex(3*pKF->mnId))); + e->setMeasurement(obs); + const float &invSigma2 = pKF->mvInvLevelSigma2[kpUn.octave]; + e->setInformation(Eigen::Matrix2d::Identity()*invSigma2); + + if(bRobust) + { + g2o::RobustKernelHuber* rk = new g2o::RobustKernelHuber; + e->setRobustKernel(rk); + rk->setDelta(thHuber2D); + } + + e->SetParams(pKF->fx,pKF->fy,pKF->cx,pKF->cy,Rbc,Pbc); + + optimizer.addEdge(e); + } + else + { + cerr<<"Stereo not supported"<isBad()) + continue; + //g2o::VertexSE3Expmap* vSE3 = static_cast(optimizer.vertex(pKF->mnId)); + //g2o::SE3Quat SE3quat = vSE3->estimate(); + g2o::VertexNavStatePR* vNSPR = static_cast(optimizer.vertex(3*pKF->mnId)); + g2o::VertexNavStateV* vNSV = static_cast(optimizer.vertex(3*pKF->mnId+1)); + g2o::VertexNavStateBias* vNSBias = static_cast(optimizer.vertex(3*pKF->mnId+2)); + const NavState& nspr = vNSPR->estimate(); + const NavState& nsv = vNSV->estimate(); + const NavState& nsbias = vNSBias->estimate(); + NavState ns_recov = nspr; + ns_recov.Set_Pos(nspr.Get_P()); + ns_recov.Set_Rot(nspr.Get_R()); + ns_recov.Set_Vel(nsv.Get_V()); + ns_recov.Set_DeltaBiasGyr(nsbias.Get_dBias_Gyr()); + ns_recov.Set_DeltaBiasAcc(nsbias.Get_dBias_Acc()); + + if(nLoopKF==0) + { + //pKF->SetPose(Converter::toCvMat(SE3quat)); + pKF->SetNavState(ns_recov); + pKF->UpdatePoseFromNS(ConfigParam::GetMatTbc()); + } + else + { + pKF->mNavStateGBA = ns_recov; + + pKF->mTcwGBA.create(4,4,CV_32F); + //Converter::toCvMat(SE3quat).copyTo(pKF->mTcwGBA); + cv::Mat Twb_ = cv::Mat::eye(4,4,CV_32F); + Converter::toCvMat(pKF->mNavStateGBA.Get_RotMatrix()).copyTo(Twb_.rowRange(0,3).colRange(0,3)); + Converter::toCvMat(pKF->mNavStateGBA.Get_P()).copyTo(Twb_.rowRange(0,3).col(3)); + cv::Mat Twc_ = Twb_*ConfigParam::GetMatTbc(); + pKF->mTcwGBA = Converter::toCvMatInverse(Twc_); + + pKF->mnBAGlobalForKF = nLoopKF; + } + + } + + //Points + for(size_t i=0; iisBad()) + continue; + g2o::VertexSBAPointXYZ* vPoint = static_cast(optimizer.vertex(pMP->mnId+maxKFid+1)); + + if(nLoopKF==0) + { + pMP->SetWorldPos(Converter::toCvMat(vPoint->estimate())); + pMP->UpdateNormalAndDepth(); + } + else + { + pMP->mPosGBA.create(3,1,CV_32F); + Converter::toCvMat(vPoint->estimate()).copyTo(pMP->mPosGBA); + pMP->mnBAGlobalForKF = nLoopKF; + } + } + +} + +void Optimizer::LocalBundleAdjustmentNavStatePRV(KeyFrame *pCurKF, const std::list &lLocalKeyFrames, bool* pbStopFlag, Map* pMap, cv::Mat& gw, LocalMapping* pLM) +{ + // Check current KeyFrame in local window + if(pCurKF != lLocalKeyFrames.back()) + cerr<<"pCurKF != lLocalKeyFrames.back. check"<::const_iterator lit=lLocalKeyFrames.begin(), lend=lLocalKeyFrames.end(); lit!=lend; lit++) + { + KeyFrame* pKFi = *lit; + if(!pKFi) cerr<<"!pKFi. why??????"<mnBALocalForKF = pCurKF->mnId; + } + + // Local MapPoints seen in Local KeyFrames + list lLocalMapPoints; + for(list::const_iterator lit=lLocalKeyFrames.begin() , lend=lLocalKeyFrames.end(); lit!=lend; lit++) + { + vector vpMPs = (*lit)->GetMapPointMatches(); + for(vector::iterator vit=vpMPs.begin(), vend=vpMPs.end(); vit!=vend; vit++) + { + MapPoint* pMP = *vit; + if(pMP) + if(!pMP->isBad()) + if(pMP->mnBALocalForKF!=pCurKF->mnId) + { + lLocalMapPoints.push_back(pMP); + pMP->mnBALocalForKF=pCurKF->mnId; + } + } + } + + // Fixed Keyframes. Keyframes that see Local MapPoints but that are not Local Keyframes + list lFixedCameras; + // Add the KeyFrame before local window. + KeyFrame* pKFPrevLocal = lLocalKeyFrames.front()->GetPrevKeyFrame(); + if(pKFPrevLocal) + { + // Test log + if(pKFPrevLocal->isBad()) cerr<<"KeyFrame before local window is bad?"<mnBAFixedForKF==pCurKF->mnId) cerr<<"KeyFrame before local, has been added to lFixedKF?"<mnBALocalForKF==pCurKF->mnId) cerr<<"KeyFrame before local, has been added to lLocalKF?"<mnBAFixedForKF = pCurKF->mnId; + if(!pKFPrevLocal->isBad()) + lFixedCameras.push_back(pKFPrevLocal); + else + cerr<<"pKFPrevLocal is Bad?"<::iterator lit=lLocalMapPoints.begin(), lend=lLocalMapPoints.end(); lit!=lend; lit++) + { + mapMapPointObs/*map*/ observations = (*lit)->GetObservations(); + for(mapMapPointObs/*map*/::iterator mit=observations.begin(), mend=observations.end(); mit!=mend; mit++) + { + KeyFrame* pKFi = mit->first; + + if(pKFi->mnBALocalForKF!=pCurKF->mnId && pKFi->mnBAFixedForKF!=pCurKF->mnId) + { + pKFi->mnBAFixedForKF=pCurKF->mnId; + if(!pKFi->isBad()) + lFixedCameras.push_back(pKFi); + } + } + } + + // Setup optimizer + g2o::SparseOptimizer optimizer; + g2o::BlockSolverX::LinearSolverType * linearSolver; + + linearSolver = new g2o::LinearSolverEigen(); + //linearSolver = new g2o::LinearSolverCholmod(); + + g2o::BlockSolverX * solver_ptr = new g2o::BlockSolverX(linearSolver); + + //g2o::OptimizationAlgorithmGaussNewton* solver = new g2o::OptimizationAlgorithmGaussNewton(solver_ptr); + g2o::OptimizationAlgorithmLevenberg* solver = new g2o::OptimizationAlgorithmLevenberg(solver_ptr); + optimizer.setAlgorithm(solver); + + if(pbStopFlag) + optimizer.setForceStopFlag(pbStopFlag); + + int maxKFid = 0; + + // Set Local KeyFrame vertices + for(list::const_iterator lit=lLocalKeyFrames.begin(), lend=lLocalKeyFrames.end(); lit!=lend; lit++) + { + KeyFrame* pKFi = *lit; + if(!pKFi) cerr<<"!pKFi when setting local KF vertex???"<mnId*3; + // Vertex of PR/V + { + g2o::VertexNavStatePR * vNSPR = new g2o::VertexNavStatePR(); + vNSPR->setEstimate(pKFi->GetNavState()); + vNSPR->setId(idKF); + vNSPR->setFixed(false); + optimizer.addVertex(vNSPR); + + g2o::VertexNavStateV * vNSV = new g2o::VertexNavStateV(); + vNSV->setEstimate(pKFi->GetNavState()); + vNSV->setId(idKF+1); + vNSV->setFixed(false); + optimizer.addVertex(vNSV); + } + // Vertex of Bias + { + g2o::VertexNavStateBias * vNSBias = new g2o::VertexNavStateBias(); + vNSBias->setEstimate(pKFi->GetNavState()); + vNSBias->setId(idKF+2); + vNSBias->setFixed(false); + optimizer.addVertex(vNSBias); + } + + if(idKF+2>maxKFid) + maxKFid=idKF+2; + } + + // Set Fixed KeyFrame vertices. Including the pKFPrevLocal. + for(list::iterator lit=lFixedCameras.begin(), lend=lFixedCameras.end(); lit!=lend; lit++) + { + KeyFrame* pKFi = *lit; + int idKF = pKFi->mnId*3; + // For common fixed KeyFrames, only add PR vertex + { + g2o::VertexNavStatePR * vNSPR = new g2o::VertexNavStatePR(); + vNSPR->setEstimate(pKFi->GetNavState()); + vNSPR->setId(idKF); + vNSPR->setFixed(true); + optimizer.addVertex(vNSPR); + } + // For Local-Window-Previous KeyFrame, add V and Bias vertex + if(pKFi == pKFPrevLocal) + { + g2o::VertexNavStateV * vNSV = new g2o::VertexNavStateV(); + vNSV->setEstimate(pKFi->GetNavState()); + vNSV->setId(idKF+1); + vNSV->setFixed(true); + optimizer.addVertex(vNSV); + + g2o::VertexNavStateBias * vNSBias = new g2o::VertexNavStateBias(); + vNSBias->setEstimate(pKFi->GetNavState()); + vNSBias->setId(idKF+2); + vNSBias->setFixed(true); + optimizer.addVertex(vNSBias); + } + + if(idKF+2>maxKFid) + maxKFid=idKF+2; + } + + // Edges between KeyFrames in Local Window + // and + // Edges between 1st KeyFrame of Local Window and its previous (fixed)KeyFrame - pKFPrevLocal + vector vpEdgesNavStatePRV; + vector vpEdgesNavStateBias; + // Use chi2inv() in MATLAB to compute the value corresponding to 0.95/0.99 prob. w.r.t 15DOF: 24.9958/30.5779 + // 12.592/16.812 for 0.95/0.99 6DoF + // 16.919/21.666 for 0.95/0.99 9DoF + //const float thHuberNavState = sqrt(30.5779); + const float thHuberNavStatePRV = sqrt(100*21.666); + const float thHuberNavStateBias = sqrt(100*16.812); + // Inverse covariance of bias random walk + + Matrix InvCovBgaRW = Matrix::Identity(); + InvCovBgaRW.topLeftCorner(3,3) = Matrix3d::Identity()/IMUData::getGyrBiasRW2(); // Gyroscope bias random walk, covariance INVERSE + InvCovBgaRW.bottomRightCorner(3,3) = Matrix3d::Identity()/IMUData::getAccBiasRW2(); // Accelerometer bias random walk, covariance INVERSE + + for(list::const_iterator lit=lLocalKeyFrames.begin(), lend=lLocalKeyFrames.end(); lit!=lend; lit++) + { + KeyFrame* pKF1 = *lit; // Current KF, store the IMU pre-integration between previous-current + if(!pKF1) cerr<<"pKF1"<GetPrevKeyFrame(); // Previous KF + + if(!pKF1 || !pKF0) cerr<<"pKF1="<<(size_t)pKF1<<", pKF0="<<(size_t)pKF0<setVertex(0, dynamic_cast(optimizer.vertex(3*pKF0->mnId))); + epvr->setVertex(1, dynamic_cast(optimizer.vertex(3*pKF1->mnId))); + epvr->setVertex(2, dynamic_cast(optimizer.vertex(3*pKF0->mnId+1))); + epvr->setVertex(3, dynamic_cast(optimizer.vertex(3*pKF1->mnId+1))); + epvr->setVertex(4, dynamic_cast(optimizer.vertex(3*pKF0->mnId+2))); + epvr->setMeasurement(pKF1->GetIMUPreInt()); + + //Matrix9d InvCovPVR = pKF1->GetIMUPreInt().getCovPVPhi().inverse(); + Matrix9d CovPRV = pKF1->GetIMUPreInt().getCovPVPhi(); + CovPRV.col(3).swap(CovPRV.col(6)); + CovPRV.col(4).swap(CovPRV.col(7)); + CovPRV.col(5).swap(CovPRV.col(8)); + CovPRV.row(3).swap(CovPRV.row(6)); + CovPRV.row(4).swap(CovPRV.row(7)); + CovPRV.row(5).swap(CovPRV.row(8)); + epvr->setInformation(CovPRV.inverse()); + + epvr->SetParams(GravityVec); + + g2o::RobustKernelHuber* rk = new g2o::RobustKernelHuber; + epvr->setRobustKernel(rk); + rk->setDelta(thHuberNavStatePRV); + + optimizer.addEdge(epvr); + vpEdgesNavStatePRV.push_back(epvr); + } + // Bias edge + { + g2o::EdgeNavStateBias * ebias = new g2o::EdgeNavStateBias(); + ebias->setVertex(0, dynamic_cast(optimizer.vertex(3*pKF0->mnId+2))); + ebias->setVertex(1, dynamic_cast(optimizer.vertex(3*pKF1->mnId+2))); + ebias->setMeasurement(pKF1->GetIMUPreInt()); + + ebias->setInformation(InvCovBgaRW/pKF1->GetIMUPreInt().getDeltaTime()); + + g2o::RobustKernelHuber* rk = new g2o::RobustKernelHuber; + ebias->setRobustKernel(rk); + rk->setDelta(thHuberNavStateBias); + + optimizer.addEdge(ebias); + vpEdgesNavStateBias.push_back(ebias); + } + +// // Test log +// if(pKF1->GetIMUPreInt().getDeltaTime() < 1e-3) +// { +// cerr<<"IMU pre-integrator delta time between 2 KFs too small: "<GetIMUPreInt().getDeltaTime()<mnId<<","<mnId< vpEdgesMono; + vpEdgesMono.reserve(nExpectedSize); + + vector vpEdgeKFMono; + vpEdgeKFMono.reserve(nExpectedSize); + + vector vpMapPointEdgeMono; + vpMapPointEdgeMono.reserve(nExpectedSize); + + const float thHuberMono = sqrt(5.991); + + for(list::iterator lit=lLocalMapPoints.begin(), lend=lLocalMapPoints.end(); lit!=lend; lit++) + { + MapPoint* pMP = *lit; + g2o::VertexSBAPointXYZ* vPoint = new g2o::VertexSBAPointXYZ(); + vPoint->setEstimate(Converter::toVector3d(pMP->GetWorldPos())); + int id = pMP->mnId+maxKFid+1; + vPoint->setId(id); + vPoint->setMarginalized(true); + optimizer.addVertex(vPoint); + + cv::Mat Pw = pMP->GetWorldPos(); + + const mapMapPointObs/*map*/ observations = pMP->GetObservations(); + + // Set edges between KeyFrame and MapPoint + for(mapMapPointObs/*map*/::const_iterator mit=observations.begin(), mend=observations.end(); mit!=mend; mit++) + { + KeyFrame* pKFi = mit->first; + + if(!pKFi->isBad()) + { + const cv::KeyPoint &kpUn = pKFi->mvKeysUn[mit->second]; + + // Monocular observation + if(pKFi->mvuRight[mit->second]<0) + { + Eigen::Matrix obs; + obs << kpUn.pt.x, kpUn.pt.y; + + g2o::EdgeNavStatePRPointXYZ* e = new g2o::EdgeNavStatePRPointXYZ(); + + e->setVertex(0, dynamic_cast(optimizer.vertex(id))); + e->setVertex(1, dynamic_cast(optimizer.vertex(3*pKFi->mnId))); + e->setMeasurement(obs); + const float &invSigma2 = pKFi->mvInvLevelSigma2[kpUn.octave]; + + e->setInformation(Eigen::Matrix2d::Identity()*invSigma2); + + g2o::RobustKernelHuber* rk = new g2o::RobustKernelHuber; + e->setRobustKernel(rk); + rk->setDelta(thHuberMono); + + e->SetParams(pKFi->fx,pKFi->fy,pKFi->cx,pKFi->cy,Rbc,Pbc); + + optimizer.addEdge(e); + vpEdgesMono.push_back(e); + vpEdgeKFMono.push_back(pKFi); + vpMapPointEdgeMono.push_back(pMP); + } + else + { + // Test log + cerr<<"Stereo not supported yet, why here?? check."<isBad()) + continue; + + if(e->chi2()>5.991 || !e->isDepthPositive()) + { + e->setLevel(1); + } + + e->setRobustKernel(0); + } + +// // Check inlier observations +// int tmpcnt=0; +// for(size_t i=0, iend=vpEdgesNavStatePRV.size(); ichi2()>21.666) +// { +// //e->setLevel(1); +// cout<<"1 PRVedge "<chi2()<<". "; +// tmpcnt++; +// } +// //e->setRobustKernel(0); +// } +// //cout<chi2()>16.812) +// { +// //e->setLevel(1); +// cout<<"1 Bias edge "<chi2()<<". "; +// tmpcnt++; +// } +// //e->setRobustKernel(0); +// } +// if(tmpcnt>0) +// cout< > vToErase; + vToErase.reserve(vpEdgesMono.size()); + + double PosePointchi2=0; + // Check inlier observations + for(size_t i=0, iend=vpEdgesMono.size(); iisBad()) + continue; + + if(e->chi2()>5.991 || !e->isDepthPositive()) + { + KeyFrame* pKFi = vpEdgeKFMono[i]; + vToErase.push_back(make_pair(pKFi,pMP)); + } + + PosePointchi2 += e->chi2(); + } + +// // Debug log +// // Check inlier observations +// int tmpcnt2=0; +// for(size_t i=0, iend=vpEdgesNavStatePRV.size(); ichi2()>21.666) +// { +// cout<<"2 PRVedge "<chi2()<<". "; +// tmpcnt2++; +// } +// } +// //cout<chi2()>16.812) +// { +// cout<<"2 Bias edge "<chi2()<<". "; +// tmpcnt2++; +// } +// } +// if(tmpcnt2>0) +// cout< lock(pMap->mMutexMapUpdate); + + if(!vToErase.empty()) + { + for(size_t i=0;iEraseMapPointMatch(pMPi); + pMPi->EraseObservation(pKFi); + } + } + + //Keyframes + for(list::const_iterator lit=lLocalKeyFrames.begin(), lend=lLocalKeyFrames.end(); lit!=lend; lit++) + { + KeyFrame* pKFi = *lit; + g2o::VertexNavStatePR* vNSPR = static_cast(optimizer.vertex(3*pKFi->mnId)); + g2o::VertexNavStateV* vNSV = static_cast(optimizer.vertex(3*pKFi->mnId+1)); + g2o::VertexNavStateBias* vNSBias = static_cast(optimizer.vertex(3*pKFi->mnId+2)); + // In optimized navstate, bias not changed, delta_bias not zero, should be added to bias + const NavState& optPRns = vNSPR->estimate(); + const NavState& optVns = vNSV->estimate(); + const NavState& optBiasns = vNSBias->estimate(); + NavState primaryns = pKFi->GetNavState(); + // Update NavState + pKFi->SetNavStatePos(optPRns.Get_P()); + pKFi->SetNavStateRot(optPRns.Get_R()); + pKFi->SetNavStateVel(optVns.Get_V()); + //if(optBiasns.Get_dBias_Acc().norm()<1e-2 && optBiasns.Get_BiasGyr().norm()<1e-4) + //{ + pKFi->SetNavStateDeltaBg(optBiasns.Get_dBias_Gyr()); + pKFi->SetNavStateDeltaBa(optBiasns.Get_dBias_Acc()); + //} + + // Update pose Tcw + pKFi->UpdatePoseFromNS(ConfigParam::GetMatTbc()); + + // Test log + if( (primaryns.Get_BiasGyr() - optPRns.Get_BiasGyr()).norm() > 1e-6 || (primaryns.Get_BiasGyr() - optBiasns.Get_BiasGyr()).norm() > 1e-6 ) + cerr<<"gyr bias change in optimization?"< 1e-6 || (primaryns.Get_BiasAcc() - optBiasns.Get_BiasAcc()).norm() > 1e-6 ) + cerr<<"acc bias change in optimization?"<::const_iterator lit=lLocalMapPoints.begin(), lend=lLocalMapPoints.end(); lit!=lend; lit++) + { + MapPoint* pMP = *lit; + g2o::VertexSBAPointXYZ* vPoint = static_cast(optimizer.vertex(pMP->mnId+maxKFid+1)); + pMP->SetWorldPos(Converter::toCvMat(vPoint->estimate())); + pMP->UpdateNormalAndDepth(); + } + + if(pLM) + { + pLM->SetMapUpdateFlagInTracking(true); + } + +} + void Optimizer::GlobalBundleAdjustmentNavState(Map* pMap, const cv::Mat& gw, int nIterations, bool* pbStopFlag, const unsigned long nLoopKF, const bool bRobust) { vector vpKFs = pMap->GetAllKeyFrames(); @@ -182,11 +1545,11 @@ void Optimizer::GlobalBundleAdjustmentNavState(Map* pMap, const cv::Mat& gw, int vPoint->setMarginalized(true); optimizer.addVertex(vPoint); - const map observations = pMP->GetObservations(); + const mapMapPointObs/*map*/ observations = pMP->GetObservations(); int nEdges = 0; //SET EDGES - for(map::const_iterator mit=observations.begin(); mit!=observations.end(); mit++) + for(mapMapPointObs/*map*/::const_iterator mit=observations.begin(); mit!=observations.end(); mit++) { KeyFrame* pKF = mit->first; @@ -1012,8 +2375,8 @@ void Optimizer::LocalBundleAdjustmentNavState(KeyFrame *pCurKF, const std::list< // Covisible KeyFrames for(list::iterator lit=lLocalMapPoints.begin(), lend=lLocalMapPoints.end(); lit!=lend; lit++) { - map observations = (*lit)->GetObservations(); - for(map::iterator mit=observations.begin(), mend=observations.end(); mit!=mend; mit++) + mapMapPointObs/*map*/ observations = (*lit)->GetObservations(); + for(mapMapPointObs/*map*/::iterator mit=observations.begin(), mend=observations.end(); mit!=mend; mit++) { KeyFrame* pKFi = mit->first; @@ -1107,8 +2470,8 @@ void Optimizer::LocalBundleAdjustmentNavState(KeyFrame *pCurKF, const std::list< // 12.592/16.812 for 0.95/0.99 6DoF // 16.919/21.666 for 0.95/0.99 9DoF //const float thHuberNavState = sqrt(30.5779); - const float thHuberNavStatePVR = sqrt(21.666); - const float thHuberNavStateBias = sqrt(16.812); + const float thHuberNavStatePVR = sqrt(100*21.666); + const float thHuberNavStateBias = sqrt(100*16.812); // Inverse covariance of bias random walk Matrix InvCovBgaRW = Matrix::Identity(); InvCovBgaRW.topLeftCorner(3,3) = Matrix3d::Identity()/IMUData::getGyrBiasRW2(); // Gyroscope bias random walk, covariance INVERSE @@ -1197,10 +2560,12 @@ void Optimizer::LocalBundleAdjustmentNavState(KeyFrame *pCurKF, const std::list< vPoint->setMarginalized(true); optimizer.addVertex(vPoint); - const map observations = pMP->GetObservations(); + cv::Mat Pw = pMP->GetWorldPos(); + + const mapMapPointObs/*map*/ observations = pMP->GetObservations(); // Set edges between KeyFrame and MapPoint - for(map::const_iterator mit=observations.begin(), mend=observations.end(); mit!=mend; mit++) + for(mapMapPointObs/*map*/::const_iterator mit=observations.begin(), mend=observations.end(); mit!=mend; mit++) { KeyFrame* pKFi = mit->first; @@ -1553,6 +2918,69 @@ Vector3d Optimizer::OptimizeInitialGyroBias(const std::vector &vpKFs return vBgEst->estimate(); } +Vector3d Optimizer::OptimizeInitialGyroBias(const vector& vTwc, const vector& vImuPreInt) +{ + int N = vTwc.size(); if(vTwc.size()!=vImuPreInt.size()) cerr<<"vTwc.size()!=vImuPreInt.size()"<(); + + g2o::BlockSolverX * solver_ptr = new g2o::BlockSolverX(linearSolver); + + g2o::OptimizationAlgorithmGaussNewton* solver = new g2o::OptimizationAlgorithmGaussNewton(solver_ptr); + //g2o::OptimizationAlgorithmLevenberg* solver = new g2o::OptimizationAlgorithmLevenberg(solver_ptr); + optimizer.setAlgorithm(solver); + + // Add vertex of gyro bias, to optimizer graph + g2o::VertexGyrBias * vBiasg = new g2o::VertexGyrBias(); + vBiasg->setEstimate(Eigen::Vector3d::Zero()); + vBiasg->setId(0); + optimizer.addVertex(vBiasg); + + // Add unary edges for gyro bias vertex + //for(std::vector::const_iterator lit=vpKFs.begin(), lend=vpKFs.end(); lit!=lend; lit++) + for(int i=0; isetVertex(0, dynamic_cast(optimizer.vertex(0))); + // measurement is not used in EdgeGyrBias + eBiasg->dRbij = imupreint.getDeltaR(); + eBiasg->J_dR_bg = imupreint.getJRBiasg(); + eBiasg->Rwbi = Rwci*Rcb; + eBiasg->Rwbj = Rwcj*Rcb; + //eBiasg->setInformation(Eigen::Matrix3d::Identity()); + eBiasg->setInformation(imupreint.getCovPVPhi().bottomRightCorner(3,3).inverse()); + optimizer.addEdge(eBiasg); + } + + // It's actualy a linear estimator, so 1 iteration is enough. + //optimizer.setVerbose(true); + optimizer.initializeOptimization(); + optimizer.optimize(1); + + g2o::VertexGyrBias * vBgEst = static_cast(optimizer.vertex(0)); + + return vBgEst->estimate(); +} + void Optimizer::LocalBundleAdjustment(KeyFrame *pKF, const std::list &lLocalKeyFrames, bool* pbStopFlag, Map* pMap, LocalMapping* pLM) { // Check current KeyFrame in local window @@ -1604,8 +3032,8 @@ void Optimizer::LocalBundleAdjustment(KeyFrame *pKF, const std::list // Covisible KeyFrames for(list::iterator lit=lLocalMapPoints.begin(), lend=lLocalMapPoints.end(); lit!=lend; lit++) { - map observations = (*lit)->GetObservations(); - for(map::iterator mit=observations.begin(), mend=observations.end(); mit!=mend; mit++) + mapMapPointObs/*map*/ observations = (*lit)->GetObservations(); + for(mapMapPointObs/*map*/::iterator mit=observations.begin(), mend=observations.end(); mit!=mend; mit++) { KeyFrame* pKFi = mit->first; @@ -1694,10 +3122,10 @@ void Optimizer::LocalBundleAdjustment(KeyFrame *pKF, const std::list vPoint->setMarginalized(true); optimizer.addVertex(vPoint); - const map observations = pMP->GetObservations(); + const mapMapPointObs/*map*/ observations = pMP->GetObservations(); //Set edges - for(map::const_iterator mit=observations.begin(), mend=observations.end(); mit!=mend; mit++) + for(mapMapPointObs/*map*/::const_iterator mit=observations.begin(), mend=observations.end(); mit!=mend; mit++) { KeyFrame* pKFi = mit->first; @@ -1961,11 +3389,11 @@ void Optimizer::BundleAdjustment(const vector &vpKFs, const vectorsetMarginalized(true); optimizer.addVertex(vPoint); - const map observations = pMP->GetObservations(); + const mapMapPointObs/*map*/ observations = pMP->GetObservations(); int nEdges = 0; //SET EDGES - for(map::const_iterator mit=observations.begin(); mit!=observations.end(); mit++) + for(mapMapPointObs/*map*/::const_iterator mit=observations.begin(); mit!=observations.end(); mit++) { KeyFrame* pKF = mit->first; @@ -2369,8 +3797,8 @@ void Optimizer::LocalBundleAdjustment(KeyFrame *pKF, bool* pbStopFlag, Map* pMap list lFixedCameras; for(list::iterator lit=lLocalMapPoints.begin(), lend=lLocalMapPoints.end(); lit!=lend; lit++) { - map observations = (*lit)->GetObservations(); - for(map::iterator mit=observations.begin(), mend=observations.end(); mit!=mend; mit++) + mapMapPointObs/*map*/ observations = (*lit)->GetObservations(); + for(mapMapPointObs/*map*/::iterator mit=observations.begin(), mend=observations.end(); mit!=mend; mit++) { KeyFrame* pKFi = mit->first; @@ -2459,10 +3887,10 @@ void Optimizer::LocalBundleAdjustment(KeyFrame *pKF, bool* pbStopFlag, Map* pMap vPoint->setMarginalized(true); optimizer.addVertex(vPoint); - const map observations = pMP->GetObservations(); + const mapMapPointObs/*map*/ observations = pMP->GetObservations(); //Set edges - for(map::const_iterator mit=observations.begin(), mend=observations.end(); mit!=mend; mit++) + for(mapMapPointObs/*map*/::const_iterator mit=observations.begin(), mend=observations.end(); mit!=mend; mit++) { KeyFrame* pKFi = mit->first; diff --git a/src/System.cc b/src/System.cc index b6ae421..bbbfdd0 100644 --- a/src/System.cc +++ b/src/System.cc @@ -148,7 +148,7 @@ cv::Mat System::TrackMonoVI(const cv::Mat &im, const std::vector &vimu, //------------------------------------------------------------------------------------------- System::System(const string &strVocFile, const string &strSettingsFile, const eSensor sensor, - const bool bUseViewer):mSensor(sensor),mbReset(false),mbActivateLocalizationMode(false), + const bool bUseViewer):mSensor(sensor), mpViewer(static_cast(NULL)), mbReset(false),mbActivateLocalizationMode(false), mbDeactivateLocalizationMode(false) { // Output welcome message @@ -220,11 +220,12 @@ System::System(const string &strVocFile, const string &strSettingsFile, const eS mptLoopClosing = new thread(&ORB_SLAM2::LoopClosing::Run, mpLoopCloser); //Initialize the Viewer thread and launch - mpViewer = new Viewer(this, mpFrameDrawer,mpMapDrawer,mpTracker,strSettingsFile); if(bUseViewer) + { + mpViewer = new Viewer(this, mpFrameDrawer,mpMapDrawer,mpTracker,strSettingsFile); mptViewer = new thread(&Viewer::Run, mpViewer); - - mpTracker->SetViewer(mpViewer); + mpTracker->SetViewer(mpViewer); + } //Set pointers between threads mpTracker->SetLocalMapper(mpLocalMapper); @@ -235,6 +236,12 @@ System::System(const string &strVocFile, const string &strSettingsFile, const eS mpLoopCloser->SetTracker(mpTracker); mpLoopCloser->SetLocalMapper(mpLocalMapper); + + if(ConfigParam::GetRealTimeFlag()) + { + //Thread for VINS initialization + mptLocalMappingVIOInit = new thread(&ORB_SLAM2::LocalMapping::VINSInitThread,mpLocalMapper); + } } cv::Mat System::TrackStereo(const cv::Mat &imLeft, const cv::Mat &imRight, const double ×tamp) @@ -279,7 +286,13 @@ cv::Mat System::TrackStereo(const cv::Mat &imLeft, const cv::Mat &imRight, const } } - return mpTracker->GrabImageStereo(imLeft,imRight,timestamp); + cv::Mat Tcw = mpTracker->GrabImageStereo(imLeft,imRight,timestamp); + + unique_lock lock2(mMutexState); + mTrackingState = mpTracker->mState; + mTrackedMapPoints = mpTracker->mCurrentFrame.mvpMapPoints; + mTrackedKeyPointsUn = mpTracker->mCurrentFrame.mvKeysUn; + return Tcw; } cv::Mat System::TrackRGBD(const cv::Mat &im, const cv::Mat &depthmap, const double ×tamp) @@ -324,7 +337,13 @@ cv::Mat System::TrackRGBD(const cv::Mat &im, const cv::Mat &depthmap, const doub } } - return mpTracker->GrabImageRGBD(im,depthmap,timestamp); + cv::Mat Tcw = mpTracker->GrabImageRGBD(im,depthmap,timestamp); + + unique_lock lock2(mMutexState); + mTrackingState = mpTracker->mState; + mTrackedMapPoints = mpTracker->mCurrentFrame.mvpMapPoints; + mTrackedKeyPointsUn = mpTracker->mCurrentFrame.mvKeysUn; + return Tcw; } cv::Mat System::TrackMonocular(const cv::Mat &im, const double ×tamp) @@ -369,7 +388,14 @@ cv::Mat System::TrackMonocular(const cv::Mat &im, const double ×tamp) } } - return mpTracker->GrabImageMonocular(im,timestamp); + cv::Mat Tcw = mpTracker->GrabImageMonocular(im,timestamp); + + unique_lock lock2(mMutexState); + mTrackingState = mpTracker->mState; + mTrackedMapPoints = mpTracker->mCurrentFrame.mvpMapPoints; + mTrackedKeyPointsUn = mpTracker->mCurrentFrame.mvKeysUn; + + return Tcw; } void System::ActivateLocalizationMode() @@ -384,6 +410,19 @@ void System::DeactivateLocalizationMode() mbDeactivateLocalizationMode = true; } +bool System::MapChanged() +{ + static int n=0; + int curn = mpMap->GetLastBigChangeIdx(); + if(n lock(mMutexReset); @@ -394,7 +433,6 @@ void System::Shutdown() { mpLocalMapper->RequestFinish(); mpLoopCloser->RequestFinish(); - mpViewer->RequestFinish(); // Wait until all thread have effectively stopped while(!mpLocalMapper->isFinished() || !mpLoopCloser->isFinished() || @@ -403,7 +441,8 @@ void System::Shutdown() usleep(5000); } - pangolin::BindToContext("ORB-SLAM2: Map Viewer"); + if(mpViewer) + pangolin::BindToContext("ORB-SLAM2: Map Viewer"); } void System::SaveTrajectoryTUM(const string &filename) @@ -558,4 +597,22 @@ void System::SaveTrajectoryKITTI(const string &filename) cout << endl << "trajectory saved!" << endl; } +int System::GetTrackingState() +{ + unique_lock lock(mMutexState); + return mTrackingState; +} + +vector System::GetTrackedMapPoints() +{ + unique_lock lock(mMutexState); + return mTrackedMapPoints; +} + +vector System::GetTrackedKeyPointsUn() +{ + unique_lock lock(mMutexState); + return mTrackedKeyPointsUn; +} + } //namespace ORB_SLAM diff --git a/src/Tracking.cc b/src/Tracking.cc index 6723233..2171314 100644 --- a/src/Tracking.cc +++ b/src/Tracking.cc @@ -236,7 +236,7 @@ bool Tracking::TrackLocalMapWithIMU(bool bMapUpdated) IMUPreintegrator imupreint = GetIMUPreIntSinceLastKF(&mCurrentFrame, mpLastKeyFrame, mvIMUSinceLastKF); // Test log - if(mpLocalMapper->GetFirstVINSInited() && !bMapUpdated) cerr<<"1-FirstVinsInit, but not bMapUpdated. shouldn't"<GetFirstVINSInited() && !bMapUpdated) cerr<<"1-FirstVinsInit, but not bMapUpdated. shouldn't"< 1e-6) cerr<<"TrackLocalMapWithIMU current Frame dBias acc not zero"< 1e-6) cerr<<"TrackLocalMapWithIMU current Frame dBias gyr not zero"<GetFirstVINSInited() || bMapUpdated) { - if(mpLocalMapper->GetFirstVINSInited() && !bMapUpdated) cerr<<"2-FirstVinsInit, but not bMapUpdated. shouldn't"<GetFirstVINSInited() && !bMapUpdated) cerr<<"2-FirstVinsInit, but not bMapUpdated. shouldn't"<20; } - return nmatchesMap>=10; + return nmatchesMap>=/*10*/6; } @@ -498,7 +498,7 @@ cv::Mat Tracking::GrabImageMonoVI(const cv::Mat &im, const std::vector Tracking::Tracking(System *pSys, ORBVocabulary* pVoc, FrameDrawer *pFrameDrawer, MapDrawer *pMapDrawer, Map *pMap, KeyFrameDatabase* pKFDB, const string &strSettingPath, const int sensor,ConfigParam* pParams): mState(NO_IMAGES_YET), mSensor(sensor), mbOnlyTracking(false), mbVO(false), mpORBVocabulary(pVoc), - mpKeyFrameDB(pKFDB), mpInitializer(static_cast(NULL)), mpSystem(pSys), + mpKeyFrameDB(pKFDB), mpInitializer(static_cast(NULL)), mpSystem(pSys), mpViewer(NULL), mpFrameDrawer(pFrameDrawer), mpMapDrawer(pMapDrawer), mpMap(pMap), mnLastRelocFrameId(0) { mbCreateNewKFAfterReloc = false; @@ -1494,6 +1494,13 @@ bool Tracking::NeedNewKeyFrame() if(mbOnlyTracking) return false; + // While updating initial poses + if(mpLocalMapper->GetUpdatingInitPoses()) + { + cerr<<"mpLocalMapper->GetUpdatingInitPoses, no new KF"<isStopped() || mpLocalMapper->stopRequested()) return false; @@ -1546,12 +1553,13 @@ bool Tracking::NeedNewKeyFrame() double timegap = 0.1; if(mpLocalMapper->GetVINSInited()) - timegap = 0.4; + timegap = 0.5; //const bool cTimeGap = (fabs(mCurrentFrame.mTimeStamp - mpLastKeyFrame->mTimeStamp)>=0.3 && mnMatchesInliers>15); - const bool cTimeGap = (fabs(mCurrentFrame.mTimeStamp - mpLastKeyFrame->mTimeStamp)>=timegap && mnMatchesInliers>15); + const bool cTimeGap = ((mCurrentFrame.mTimeStamp - mpLastKeyFrame->mTimeStamp)>=timegap) && bLocalMappingIdle && mnMatchesInliers>15; // Condition 1a: More than "MaxFrames" have passed from last keyframe insertion - const bool c1a = mCurrentFrame.mnId>=mnLastKeyFrameId+mMaxFrames; + //const bool c1a = mCurrentFrame.mnId>=mnLastKeyFrameId+mMaxFrames; + const bool c1a = mCurrentFrame.mTimeStamp>=mpLastKeyFrame->mTimeStamp+3.0; // Condition 1b: More than "MinFrames" have passed and Local Mapping is idle const bool c1b = (mCurrentFrame.mnId>=mnLastKeyFrameId+mMinFrames && bLocalMappingIdle); //Condition 1c: tracking is weak @@ -1705,10 +1713,8 @@ void Tracking::SearchLocalPoints() if(pMP->isBad()) continue; // Project (this fills MapPoint variables for matching) - if(mCurrentFrame.isInFrustum(pMP,0.5)) { - pMP->IncreaseVisible(); nToMatch++; } @@ -1774,8 +1780,8 @@ void Tracking::UpdateLocalKeyFrames() MapPoint* pMP = mCurrentFrame.mvpMapPoints[i]; if(!pMP->isBad()) { - const map observations = pMP->GetObservations(); - for(map::const_iterator it=observations.begin(), itend=observations.end(); it!=itend; it++) + const mapMapPointObs/*map*/ observations = pMP->GetObservations(); + for(mapMapPointObs/*map*/::const_iterator it=observations.begin(), itend=observations.end(); it!=itend; it++) keyframeCounter[it->first]++; } else @@ -2062,11 +2068,14 @@ bool Tracking::Relocalization() void Tracking::Reset() { - mpViewer->RequestStop(); cout << "System Reseting" << endl; - while(!mpViewer->isStopped()) - usleep(3000); + if(mpViewer) + { + mpViewer->RequestStop(); + while(!mpViewer->isStopped()) + usleep(3000); + } // Reset Local Mapping cout << "Reseting Local Mapper..."; @@ -2101,7 +2110,8 @@ void Tracking::Reset() mlFrameTimes.clear(); mlbLost.clear(); - mpViewer->Release(); + if(mpViewer) + mpViewer->Release(); } void Tracking::ChangeCalibration(const string &strSettingPath) diff --git a/src/Viewer.cc b/src/Viewer.cc index 92fa846..dec3204 100644 --- a/src/Viewer.cc +++ b/src/Viewer.cc @@ -28,7 +28,7 @@ namespace ORB_SLAM2 Viewer::Viewer(System* pSystem, FrameDrawer *pFrameDrawer, MapDrawer *pMapDrawer, Tracking *pTracking, const string &strSettingPath): mpSystem(pSystem), mpFrameDrawer(pFrameDrawer),mpMapDrawer(pMapDrawer), mpTracker(pTracking), - mbFinishRequested(false), mbFinished(true), mbStopped(false), mbStopRequested(false) + mbFinishRequested(false), mbFinished(true), mbStopped(true), mbStopRequested(false) { cv::FileStorage fSettings(strSettingPath, cv::FileStorage::READ); @@ -54,6 +54,7 @@ Viewer::Viewer(System* pSystem, FrameDrawer *pFrameDrawer, MapDrawer *pMapDrawer void Viewer::Run() { mbFinished = false; + mbStopped = false; pangolin::CreateWindowAndBind("ORB-SLAM2: Map Viewer",1024,768);