NDEVR
API Documentation
optimization_algorithm_dogleg.h
1// g2o - General Graph Optimization
2// Copyright (C) 2011 R. Kuemmerle, G. Grisetti, W. Burgard
3// All rights reserved.
4//
5// Redistribution and use in source and binary forms, with or without
6// modification, are permitted provided that the following conditions are
7// met:
8//
9// * Redistributions of source code must retain the above copyright notice,
10// this list of conditions and the following disclaimer.
11// * Redistributions in binary form must reproduce the above copyright
12// notice, this list of conditions and the following disclaimer in the
13// documentation and/or other materials provided with the distribution.
14//
15// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
16// IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
17// TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
18// PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
19// HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
20// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
21// TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
22// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
23// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
24// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
25// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26
27#ifndef G2O_OPTIMIZATION_ALGORITHM_DOGLEG_H
28#define G2O_OPTIMIZATION_ALGORITHM_DOGLEG_H
29
30#include "optimization_algorithm_with_hessian.h"
31
32namespace NDEVR
33{
34
35 class BlockSolverBase;
36
40 template<class t_solver>
42 {
43 public:
45 enum {
46 STEP_UNDEFINED,
47 STEP_SD, STEP_GN, STEP_DL
48 };
49
50 public:
56 {
57 _userDeltaInit = 1e4;
59 _initialLambda = 1e-7;
60 _lamdbaFactor = 10.;
62 _lastStep = STEP_UNDEFINED;
64 }
66
73 virtual OptimizationAlgorithm::SolverResult solve(int iteration, bool online, int* blockPoseIndices, int* blockLandmarkIndices)
74 {
75 lib_assert(OptimizationAlgorithmWithHessian<t_solver>::_optimizer, "_optimizer not set");
76 lib_assert(OptimizationAlgorithmWithHessian<t_solver>::solver.optimizer() == OptimizationAlgorithmWithHessian<t_solver>::_optimizer, "underlying linear solver operates on different graph");
77
78
79 if (iteration == 0 && !online)
80 { // built up the CCS structure, here due to easy time measure
81 bool ok = OptimizationAlgorithmWithHessian<t_solver>::template solver.buildStructure(false, blockPoseIndices, blockLandmarkIndices);
82 if (!ok)
84
85 // init some members to the current size of the problem
92 }
93 OptimizationAlgorithmWithHessian::template solver.optimizer.computeActiveErrors();
94
95 g_type currentChi = OptimizationAlgorithmWithHessian::template optimizer.activeRobustChi2();
96
97 OptimizationAlgorithmWithHessian::template solver.buildSystem();
98
99 Eigen::VectorX<g_type>::ConstMapType b(OptimizationAlgorithmWithHessian::template solver.b(), OptimizationAlgorithmWithHessian::template solver.vectorSize());
100
101 // compute alpha
102 _auxVector.setZero();
103 OptimizationAlgorithmWithHessian::template solver.multiplyHessian(_auxVector.data(), OptimizationAlgorithmWithHessian::template solver.b());
104 g_type bNormSquared = b.squaredNorm();
105 g_type alpha = bNormSquared / _auxVector.dot(b);
106
107 _hsd = alpha * b;
108 g_type hsdNorm = _hsd.norm();
109 g_type hgnNorm = -1.;
110
111 bool solvedGaussNewton = false;
112 bool goodStep = false;
113 int& numTries = _lastNumTries;
114 numTries = 0;
115 do {
116 ++numTries;
117
118 if (!solvedGaussNewton) {
119 const g_type minLambda = 1e-12;
120 const g_type maxLambda = 1e3;
121 solvedGaussNewton = true;
122 // apply a damping factor to enforce positive definite Hessian, if the matrix appeared
123 // to be not positive definite in at least one iteration before.
124 // We apply a damping factor to obtain a PD matrix.
125 bool solverOk = false;
126 while (!solverOk) {
128 OptimizationAlgorithmWithHessian::template solver.setLambda(_currentLambda, true); // add _currentLambda to the diagonal
129 solverOk = OptimizationAlgorithmWithHessian::template solver.solve();
131 OptimizationAlgorithmWithHessian::template solver.restoreDiagonal();
134 // simple strategy to control the damping factor
135 if (solverOk) {
136 _currentLambda = getMax(minLambda, _currentLambda / (0.5 * _lamdbaFactor));
137 }
138 else {
140 if (_currentLambda > maxLambda) {
141 _currentLambda = maxLambda;
143 }
144 }
145 }
146 }
147 if (!solverOk) {
149 }
150 hgnNorm = Eigen::VectorX<g_type>::ConstMapType(OptimizationAlgorithmWithHessian::template solver.x(), OptimizationAlgorithmWithHessian::template solver.vectorSize()).norm();
151 }
152
153 Eigen::VectorX<g_type>::ConstMapType hgn(OptimizationAlgorithmWithHessian::template solver.x(), OptimizationAlgorithmWithHessian::template solver.vectorSize());
154 lib_assert(hgnNorm >= 0., "Norm of the GN step is not computed");
155
156 if (hgnNorm < _delta) {
157 _hdl = hgn;
158 _lastStep = STEP_GN;
159 }
160 else if (hsdNorm > _delta) {
161 _hdl = _delta / hsdNorm * _hsd;
162 _lastStep = STEP_SD;
163 }
164 else {
165 _auxVector = hgn - _hsd; // b - a
166 g_type c = _hsd.dot(_auxVector);
167 g_type bmaSquaredNorm = _auxVector.squaredNorm();
168 g_type beta;
169 if (c <= 0.)
170 beta = (-c + sqrt(c * c + bmaSquaredNorm * (_delta * _delta - _hsd.squaredNorm()))) / bmaSquaredNorm;
171 else {
172 g_type hsdSqrNorm = _hsd.squaredNorm();
173 beta = (_delta * _delta - hsdSqrNorm) / (c + sqrt(c * c + bmaSquaredNorm * (_delta * _delta - hsdSqrNorm)));
174 }
175 assert(beta > 0. && beta < 1 && "Error while computing beta");
176 _hdl = _hsd + beta * (hgn - _hsd);
177 _lastStep = STEP_DL;
178 assert(_hdl.norm() < _delta + 1e-5 && "Computed step does not correspond to the trust region");
179 }
180
181 // compute the linear gain
182 _auxVector.setZero();
183 OptimizationAlgorithmWithHessian::template solver.multiplyHessian(_auxVector.data(), _hdl.data());
184 g_type linearGain = -1 * (_auxVector.dot(_hdl)) + 2 * (b.dot(_hdl));
185
186 // apply the update and see what happens
187 OptimizationAlgorithmWithHessian::template solver.optimizer.push();
188 OptimizationAlgorithmWithHessian::template solver.optimizer.update(_hdl.data());
189 OptimizationAlgorithmWithHessian::template solver.optimizer.computeActiveErrors();
190 g_type newChi = OptimizationAlgorithmWithHessian::template optimizer.activeRobustChi2();
191 g_type nonLinearGain = currentChi - newChi;
192 if (fabs(linearGain) < 1e-12)
193 linearGain = 1e-12;
194 g_type rho = nonLinearGain / linearGain;
195 //cerr << PVAR(nonLinearGain) << " " << PVAR(linearGain) << " " << PVAR(rho) << endl;
196 if (rho > 0) { // step is good and will be accepted
197 OptimizationAlgorithmWithHessian::template solver.optimizer.discardTop();
198 goodStep = true;
199 }
200 else { // recover previous state
201 OptimizationAlgorithmWithHessian::template solver.optimizer.pop();
202 }
203
204 // update trust region based on the step quality
205 if (rho > 0.75)
206 _delta = std::max(_delta, 3. * _hdl.norm());
207 else if (rho < 0.25)
208 _delta *= 0.5;
209 } while (!goodStep && numTries < _maxTrialsAfterFailure);
210 if (numTries == _maxTrialsAfterFailure || !goodStep)
213 }
214
217 virtual void printVerbose(std::ostream& os) const
218 {
219 os
220 << "\t Delta= " << _delta
221 << "\t step= " << stepType2Str(_lastStep)
222 << "\t tries= " << _lastNumTries;
224 os << "\t lambda= " << _currentLambda;
225 }
226
228 int lastStep() const { return _lastStep;}
230 g_type trustRegion() const { return _delta;}
231
233 static const char* stepType2Str(int stepType)
234 {
235 switch (stepType) {
236 case STEP_SD: return "Descent";
237 case STEP_GN: return "GN";
238 case STEP_DL: return "Dogleg";
239 default: return "Undefined";
240 }
241 }
242
243 protected:
248
249 Eigen::VectorX<g_type> _hsd;
250 Eigen::VectorX<g_type> _hdl;
251 Eigen::VectorX<g_type> _auxVector;
252
254 g_type _delta;
258 };
259
260} // end namespace
261
262#endif
Implementation of Powell's Dogleg Algorithm.
virtual void printVerbose(std::ostream &os) const
Prints verbose information about the current Dogleg state.
g_type _userDeltaInit
User-specified initial trust region radius.
int _lastStep
Type of the step taken by the algorithm.
OptimizationAlgorithmDogleg()
construct the Dogleg algorithm, which will use the given Solver for solving the linearized system.
g_type trustRegion() const
return the diameter of the trust region
g_type _currentLambda
The damping factor to force positive definite matrix.
bool _wasPDInAllIterations
Whether the matrix was positive definite in all iterations.
int _maxTrialsAfterFailure
Maximum number of trials before declaring failure.
virtual OptimizationAlgorithm::SolverResult solve(int iteration, bool online, int *blockPoseIndices, int *blockLandmarkIndices)
Performs a single Dogleg iteration.
Eigen::VectorX< g_type > _hsd
steepest decent step
static const char * stepType2Str(int stepType)
convert the type into an integer
g_type _lamdbaFactor
Multiplicative factor for adjusting lambda.
Eigen::VectorX< g_type > _auxVector
auxilary vector used to perform multiplications or other stuff
int _lastNumTries
Number of tries in the last iteration.
int lastStep() const
return the type of the last step taken by the algorithm
g_type _initialLambda
Initial damping factor for positive-definite enforcement.
Eigen::VectorX< g_type > _hdl
final dogleg step
t_solver solver
The underlying solver instance.
SparseOptimizer & optimizer()
Returns a mutable reference to the underlying sparse optimizer.
SolverResult
Result codes returned by the solver after each iteration.
@ Fail
Iteration failed (e.g., singular matrix).
@ OK
Iteration succeeded, continue.
@ Terminate
Optimization should stop (converged or no improvement).
The primary namespace for the NDEVR SDK.
constexpr t_type getMax(const t_type &left, const t_type &right)
Finds the max of the given arguments using the > operator The only requirement is that t_type have > ...
t_type sqrt(const t_type &value)