NDEVR
API Documentation
block_solver.hpp
1// g2o - General Graph Optimization
2// Copyright (C) 2011 R. Kuemmerle, G. Grisetti, W. Burgard
3// All rights reserved.
4//
5// Redistribution and use in source and binary forms, with or without
6// modification, are permitted provided that the following conditions are
7// met:
8//
9// * Redistributions of source code must retain the above copyright notice,
10// this list of conditions and the following disclaimer.
11// * Redistributions in binary form must reproduce the above copyright
12// notice, this list of conditions and the following disclaimer in the
13// documentation and/or other materials provided with the distribution.
14//
15// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
16// IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
17// TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
18// PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
19// HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
20// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
21// TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
22// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
23// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
24// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
25// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26
27#include "sparse_optimizer.h"
28#include <Eigen/LU>
29#include "macros.h"
30#include "misc.h"
31
32namespace NDEVR
33{
34
35using namespace Eigen;
36
37template <typename Traits>
39 : Solver()
40{
41 _numPoses=0;
43 _sizePoses=0;
45 _doSchur=true;
46}
47
48template <typename Traits>
50{
51
52 resizeVector(s);
53
54 if (_doSchur) {
55 // the following two are only used in schur
56 assert(_sizePoses > 0 && "allocating with wrong size");
57 _coefficients.setSize(s);
58 _bschur.setSize(_sizePoses);
59 }
60
61 _Hpp = PoseHessianType(scratch.block_pose_indices.begin(), scratch.block_pose_indices.begin(), scratch.block_pose_indices.size(), scratch.block_pose_indices.size());
62 if (_doSchur) {
63 _Hschur = PoseHessianType(scratch.block_pose_indices.begin(), scratch.block_pose_indices.begin(), scratch.block_pose_indices.size(), scratch.block_pose_indices.size());
64 _Hll = LandmarkHessianType(scratch.block_landmark_indices.begin(), scratch.block_landmark_indices.begin(), scratch.block_landmark_indices.size(), scratch.block_landmark_indices.size());
66 _Hpl = PoseLandmarkHessianType(scratch.block_pose_indices.begin(), scratch.block_landmark_indices.begin(), scratch.block_pose_indices.size(), scratch.block_landmark_indices.size());
67 _HplCCS = SparseBlockMatrixCCS<PoseLandmarkMatrixType>(_Hpl.rowBlockIndices(), _Hpl.colBlockIndices());
68 _HschurTransposedCCS = SparseBlockMatrixCCS<PoseMatrixType>(_Hschur.colBlockIndices(), _Hschur.rowBlockIndices());
69#ifdef G2O_OPENMP
70 _coefficientsMutex.resize(numPoseBlocks);
71#endif
72 }
73}
74
75template <typename Traits>
77{
78 uint04 sparseDim = 0;
79 scratch.block_pose_indices.clear();
80 scratch.block_landmark_indices.clear();
81 _numPoses = 0;
82 _numLandmarks = 0;
83 _sizePoses = 0;
85 const uint04 size = solver.optimizer.indexMapping().size();
86 for (uint04 i = 0; i < size; ++i)
87 {
88 OptimizableGraph::OGVertex* v = solver.optimizer.indexMapping()[i];
89 int dim = v->dimension();
90 if (!v->marginalized())
91 {
92 v->setColInHessian(_sizePoses);
93 _sizePoses += dim;
94 scratch.block_pose_indices.add(_sizePoses);
95 ++_numPoses;
96 }
97 else
98 {
100 _sizeLandmarks += dim;
103 }
104 sparseDim += dim;
105 }
106 resize(scratch, sparseDim);
107
108 // allocate the diagonal on Hpp and Hll
109 uint04 poseIdx = 0;
110 uint04 landmarkIdx = 0;
111 for (uint04 i = 0; i < solver.optimizer.indexMapping().size(); ++i)
112 {
113 OptimizableGraph::OGVertex* v = solver.optimizer.indexMapping()[i];
114 if (!v->marginalized())
115 {
116 //assert(poseIdx == v.hessianIndex());
117 PoseMatrixType* m = _Hpp.block(poseIdx, poseIdx, true);
118 if (zeroBlocks)
119 m->setZero();
120 v->mapHessianMemory(m->data());
121 ++poseIdx;
122 }
123 else
124 {
125 LandmarkMatrixType* m = _Hll.block(landmarkIdx, landmarkIdx, true);
126 if (zeroBlocks)
127 m->setZero();
128 v->mapHessianMemory(m->data());
129 ++landmarkIdx;
130 }
131 }
132 assert(poseIdx == _numPoses && landmarkIdx == _numLandmarks);
133
134 // temporary structures for building the pattern of the Schur complement
135 SparseBlockMatrixHashMap<PoseMatrixType>* schurMatrixLookup = 0;
136 if (_doSchur) {
137 schurMatrixLookup = new SparseBlockMatrixHashMap<PoseMatrixType>(_Hschur.rowBlockIndices(), _Hschur.colBlockIndices());
138 schurMatrixLookup->columns().setSize(_Hschur.blockCols().size());
139 }
140
141 // here we assume that the landmark indices start after the pose ones
142 // create the structure in Hpp, Hll and in Hpl
143 for (uint04 i = 0; i < solver.optimizer.activeEdges().size(); i++)
144 {
145 OptimizableGraph::OGEdge* e = solver.optimizer.activeEdges()[i];
146
147 for (uint04 viIdx = 0; viIdx < e->vertexCount(); ++viIdx)
148 {
150 int ind1 = v1->hessianIndex();
151 if (ind1 == -1)
152 continue;
153 int indexV1Bak = ind1;
154 for (uint04 vjIdx = viIdx + 1; vjIdx < e->vertexCount(); ++vjIdx)
155 {
157 int ind2 = v2->hessianIndex();
158 if (ind2 == -1)
159 continue;
160 ind1 = indexV1Bak;
161 bool transposedBlock = ind1 > ind2;
162 if (transposedBlock) // make sure, we allocate the upper triangle block
163 std::swap(ind1, ind2);
164 if (!v1->marginalized() && !v2->marginalized()) {
165 PoseMatrixType* m = _Hpp.block(ind1, ind2, true);
166 if (zeroBlocks)
167 m->setZero();
168 e->mapHessianMemory(m->data(), viIdx, vjIdx, transposedBlock);
169 if (_Hschur.cols() > 0) {// assume this is only needed in case we solve with the schur complement
170 schurMatrixLookup->addBlock(ind1, ind2);
171 }
172 }
173 else if (v1->marginalized() && v2->marginalized()) {
174 // RAINER hmm.... should we ever reach this here????
175 LandmarkMatrixType* m = _Hll.block(ind1 - _numPoses, ind2 - _numPoses, true);
176 if (zeroBlocks)
177 m->setZero();
178 e->mapHessianMemory(m->data(), viIdx, vjIdx, false);
179 }
180 else
181 {
182 if (v1->marginalized())
183 {
184 PoseLandmarkMatrixType* m = _Hpl.block(v2->hessianIndex(), v1->hessianIndex() - _numPoses, true);
185 if (zeroBlocks)
186 m->setZero();
187 e->mapHessianMemory(m->data(), viIdx, vjIdx, true); // transpose the block before writing to it
188 }
189 else
190 {
191 PoseLandmarkMatrixType* m = _Hpl.block(v1->hessianIndex(), v2->hessianIndex() - _numPoses, true);
192 if (zeroBlocks)
193 m->setZero();
194 e->mapHessianMemory(m->data(), viIdx, vjIdx, false); // directly the block
195 }
196 }
197 }
198 }
199 }
200
201 if (!_doSchur)
202 return true;
203
204 _DInvSchur.diagonal().resize(landmarkIdx);
205 _Hpl.fillSparseBlockMatrixCCS(_HplCCS);
206
207 for (uint04 i = 0; i < solver.optimizer.indexMapping().size(); ++i) {
208 OptimizableGraph::OGVertex* v = solver.optimizer.indexMapping()[i];
209 if (v->marginalized())
210 {
211 const Set<HyperGraph::HGEdge*>& vedges = v->edges();
212 for (auto it1 = vedges.begin(); it1 != vedges.end(); ++it1)
213 {
214 for (uint04 i = 0; i < (*it1)->vertexCount(); ++i)
215 {
217 if (v1->hessianIndex() == -1 || v1 == v)
218 continue;
219 for (auto it2 = vedges.begin(); it2 != vedges.end(); ++it2) {
220 for (uint04 j = 0; j < (*it2)->vertexCount(); ++j)
221 {
223 if (v2->hessianIndex() == -1 || v2 == v)
224 continue;
225 int i1 = v1->hessianIndex();
226 int i2 = v2->hessianIndex();
227 if (i1 <= i2) {
228 schurMatrixLookup->addBlock(i1, i2);
229 }
230 }
231 }
232 }
233 }
234 }
235 }
236
237 _Hschur.takePatternFromHash(*schurMatrixLookup);
238 delete schurMatrixLookup;
239 _Hschur.fillSparseBlockMatrixCCSTransposed(_HschurTransposedCCS);
240
241 return true;
242}
243
244template <typename Traits>
246{
247 for (auto vit : vset)
248 {
250 int dim = v->dimension();
251 if (! v->marginalized())
252 {
253 v->setColInHessian(_sizePoses);
254 _sizePoses+=dim;
255 _Hpp.rowBlockIndices().add(_sizePoses);
256 _Hpp.colBlockIndices().add(_sizePoses);
257 _Hpp.blockCols().add(typename SparseBlockMatrix<PoseMatrixType>::IntBlockMap());
258 ++_numPoses;
259 int ind = v->hessianIndex();
260 PoseMatrixType* m = _Hpp.block(ind, ind, true);
261 v->mapHessianMemory(m->data());
262 }
263 else
264 {
265 //std::cerr << "updateStructure(): Schur not supported" << std::endl;
266 abort();
267 }
268 }
269 resizeVector(_sizePoses + _sizeLandmarks);
270
271 for (auto it = edges.begin(); it != edges.end(); ++it)
272 {
274
275 for (uint04 viIdx = 0; viIdx < e->vertexCount(); ++viIdx)
276 {
278 int ind1 = v1->hessianIndex();
279 int indexV1Bak = ind1;
280 if (ind1 == -1)
281 continue;
282 for (uint04 vjIdx = viIdx + 1; vjIdx < e->vertexCount(); ++vjIdx)
283 {
285 int ind2 = v2->hessianIndex();
286 if (ind2 == -1)
287 continue;
288 ind1 = indexV1Bak;
289 bool transposedBlock = ind1 > ind2;
290 if (transposedBlock) // make sure, we allocate the upper triangular block
291 std::swap(ind1, ind2);
292
293 if (! v1->marginalized() && !v2->marginalized())
294 {
295 PoseMatrixType* m = _Hpp.block(ind1, ind2, true);
296 e->mapHessianMemory(m->data(), viIdx, vjIdx, transposedBlock);
297 } else {
298 //std::cerr << __PRETTY_FUNCTION__ << ": not supported" << std::endl;
299 }
300 }
301 }
302
303 }
304
305 return true;
306}
307
308
309/*
310template <typename Traits>
311bool BlockSolver<Traits>::computeMarginals(SparseBlockMatrix<MatrixXd>& spinv, const Buffer<std::pair<int, int> >& blockIndices)
312{
313 bool ok = solver.solvePattern(spinv, blockIndices, *_Hpp);
314
315 return ok;
316}*/
317
318template <typename Traits>
319bool BlockSolver<Traits>::setLambda(g_type lambda, bool backup)
320{
321 if (backup) {
322 _diagonalBackupPose.resize(_numPoses);
324 }
325# ifdef G2O_OPENMP
326//# pragma omp parallel for default (shared) if (_numPoses > 100)
327# endif
328 for (uint04 i = 0; i < _numPoses; ++i)
329 {
330 PoseMatrixType *b=_Hpp.block(i,i);
331 if (backup)
332 _diagonalBackupPose[i] = b->diagonal();
333 b->diagonal().array() += lambda;
334 }
335# ifdef G2O_OPENMP
336//# pragma omp parallel for default (shared) if (_numLandmarks > 100)
337# endif
338 for (uint04 i = 0; i < _numLandmarks; ++i)
339 {
340 LandmarkMatrixType *b=_Hll.block(i,i);
341 if (backup)
342 _diagonalBackupLandmark[i] = b->diagonal();
343 b->diagonal().array() += lambda;
344 }
345 return true;
346}
347
348template <typename Traits>
350{
351 lib_assert(_diagonalBackupPose.size() == _numPoses, "Mismatch in dimensions");
352 lib_assert(_diagonalBackupLandmark.size() == _numLandmarks, "Mismatch in dimensions");
353 for (uint04 i = 0; i < _numPoses; ++i) {
354 PoseMatrixType *b=_Hpp.block(i,i);
355 b->diagonal() = _diagonalBackupPose[i];
356 }
357 for (uint04 i = 0; i < _numLandmarks; ++i) {
358 LandmarkMatrixType *b=_Hll.block(i,i);
359 b->diagonal() = _diagonalBackupLandmark[i];
360 }
361}
362
363template <typename Traits>
365{
366 if (! online) {
367 _Hpp.clear();
368 _Hpl.clear();
369 _Hll.clear();
370 }
371 solver.init();
372 return true;
373}
374
375} // end namespace
void resize(IndexScratch &scratch, int totalDim)
Resizes all internal matrices and vectors for the given problem size.
Buffer< g_type > _bschur
Right-hand side for the Schur complement system.
SparseBlockMatrixCCS< PoseLandmarkMatrixType > _HplCCS
CCS view of the pose-landmark Hessian.
Buffer< g_type > _coefficients
Coefficient buffer for Schur elimination.
LinearSolverType solver
The underlying linear solver.
SparseBlockMatrix< PoseMatrixType > _Hpp
Pose-pose Hessian block.
bool _doSchur
Whether to use the Schur complement.
SparseBlockMatrix< LandmarkMatrixType > _Hll
Landmark-landmark Hessian block.
SparseBlockMatrixCCS< PoseMatrixType > _HschurTransposedCCS
Transposed CCS view of the Schur complement.
bool buildStructure(bool zeroBlocks, IndexScratch &scratch)
Builds the sparse block matrix structure from the graph.
SparseBlockMatrix< PoseLandmarkMatrixType > _Hpl
Pose-landmark Hessian block.
bool updateStructure(const Buffer< HyperGraph::HGVertex * > &vset, const Set< HyperGraph::HGVertex * > &edges)
Updates the structure after vertices or edges have changed.
SparseBlockMatrix< PoseMatrixType > _Hschur
Schur complement of the Hessian.
Buffer< PoseVectorType > _diagonalBackupPose
Backup of pose Hessian diagonal.
bool setLambda(g_type lambda, bool backup=false)
Adds a damping factor to the diagonal of the Hessian.
void restoreDiagonal()
Restores the Hessian diagonal from the backup.
uint04 _sizeLandmarks
Total scalar dimensions of poses and landmarks.
BlockSolver()
Default constructor.
bool init(bool online=false)
Initializes the block solver.
SparseBlockMatrixDiagonal< LandmarkMatrixType > _DInvSchur
Inverse diagonal of the landmark Hessian.
Buffer< LandmarkVectorType > _diagonalBackupLandmark
Backup of landmark Hessian diagonal.
uint04 _numLandmarks
Number of pose and landmark blocks.
The equivelent of std::vector but with a bit more control.
Definition Buffer.hpp:58
void add(t_type &&object)
Adds object to the end of the buffer.
Definition Buffer.hpp:190
virtual const HGVertex * vertex(uint04 i) const =0
Returns a const pointer to the i-th vertex.
virtual uint04 vertexCount() const =0
Returns the number of vertices connected by this edge.
const Buffer< HGEdge * > & edges() const
returns the set of hyper-edges that are leaving/entering in this vertex
Definition hyper_graph.h:41
Base edge class for the optimizable graph, adding error computation and robust kernels.
virtual void mapHessianMemory(g_type *d, int i, int j, bool rowMajor)=0
maps the internal matrix to some external memory location, you need to provide the memory before call...
A general case Vertex for optimization.
virtual void mapHessianMemory(g_type *d)=0
maps the internal matrix to some external memory location
void setColInHessian(int c)
set the row of this vertex in the Hessian
virtual sint04 dimension() const =0
dimension of the estimated state belonging to this node
int hessianIndex() const
temporary index of this node in the parameter vector obtained from linearization
bool marginalized() const
true => this node is marginalized out during the optimization
Container that stores unique elements in no particular order, and which allow for fast retrieval or i...
Definition Set.h:59
g_type * b()
return b, the right hand side of the system
Definition solver.h:33
void resizeVector(uint04 sx)
Resizes the solution and right-hand side vectors.
Definition solver.h:45
Solver()
Default constructor.
Definition solver.h:19
Sparse matrix which uses blocks.
Sparse matrix which uses blocks on the diagonal.
Sparse matrix which uses blocks based on hash structures.
MatrixType * addBlock(int r, uint04 c, bool zeroBlock=false)
add a block to the pattern, return a pointer to the added block
const Buffer< SparseColumn > & columns() const
the block matrices per block-column
some general case utility functions
The primary namespace for the NDEVR SDK.
uint32_t uint04
-Defines an alias representing a 4 byte, unsigned integer -Can represent exact integer values 0 throu...
Scratch buffers for block index assignments during structure building.
Buffer< int > block_pose_indices
Block indices for pose vertices.
Buffer< int > block_landmark_indices
Block indices for landmark vertices.