| 1 | /* | 
|---|
| 2 | * FunctionApproximation.hpp | 
|---|
| 3 | * | 
|---|
| 4 | *  Created on: 02.10.2012 | 
|---|
| 5 | *      Author: heber | 
|---|
| 6 | */ | 
|---|
| 7 |  | 
|---|
| 8 | #ifndef FUNCTIONAPPROXIMATION_HPP_ | 
|---|
| 9 | #define FUNCTIONAPPROXIMATION_HPP_ | 
|---|
| 10 |  | 
|---|
| 11 | // include config.h | 
|---|
| 12 | #ifdef HAVE_CONFIG_H | 
|---|
| 13 | #include <config.h> | 
|---|
| 14 | #endif | 
|---|
| 15 |  | 
|---|
| 16 | #include <vector> | 
|---|
| 17 |  | 
|---|
| 18 | #include "FunctionApproximation/FunctionModel.hpp" | 
|---|
| 19 |  | 
|---|
| 20 | class TrainingData; | 
|---|
| 21 |  | 
|---|
| 22 | /** This class encapsulates the solution to approximating a high-dimensional | 
|---|
| 23 | * function represented by two vectors of tuples, being input variables and | 
|---|
| 24 | * output of the function via a model function, manipulated by a set of | 
|---|
| 25 | * parameters. | 
|---|
| 26 | * | 
|---|
| 27 | * \note For this reason the input and output dimension has to be given in | 
|---|
| 28 | * the constructor since these are fixed parameters to the problem as a | 
|---|
| 29 | * whole and usually: a different input dimension means we have a completely | 
|---|
| 30 | * different problem (and hence we may as well construct and new instance of | 
|---|
| 31 | * this class). | 
|---|
| 32 | * | 
|---|
| 33 | * The "training data", i.e. the two sets of input and output values, is | 
|---|
| 34 | * given extra. | 
|---|
| 35 | * | 
|---|
| 36 | * The problem is then that a given high-dimensional function is supplied, | 
|---|
| 37 | * the "model", and we have to fit this function via its set of variable | 
|---|
| 38 | * parameters. This fitting procedure is executed via a Levenberg-Marquardt | 
|---|
| 39 | * algorithm as implemented in the | 
|---|
| 40 | * <a href="http://www.ics.forth.gr/~lourakis/levmar/index.html">LevMar</a> | 
|---|
| 41 | * package. | 
|---|
| 42 | * | 
|---|
| 43 | * \section FunctionApproximation-details Details on the inner workings. | 
|---|
| 44 | * | 
|---|
| 45 | *  FunctionApproximation::operator() is the main function that performs the | 
|---|
| 46 | *  non-linear regression. It consists of the following steps: | 
|---|
| 47 | *  -# hand given (initial) parameters over to model. | 
|---|
| 48 | *  -# convert output vector to format suitable to levmar | 
|---|
| 49 | *  -# allocate memory for levmar to work in | 
|---|
| 50 | *  -# depending on whether the model is constrained or not and whether we | 
|---|
| 51 | *   have a derivative, we make use of various levmar functions with prepared | 
|---|
| 52 | *   parameters. | 
|---|
| 53 | *  -# memory is free'd and some final infos is given. | 
|---|
| 54 | * | 
|---|
| 55 | *  levmar needs to evaluate the model. To this end, FunctionApproximation has | 
|---|
| 56 | *  two functions whose signatures is such as to match with the one required | 
|---|
| 57 | *  by the levmar package. Hence, | 
|---|
| 58 | *  -# FunctionApproximation::LevMarCallback() | 
|---|
| 59 | *  -# FunctionApproximation::LevMarDerivativeCallback() | 
|---|
| 60 | *  are used as callbacks by levmar only. | 
|---|
| 61 | *  These hand over the current set of parameters to the model, then both bind | 
|---|
| 62 | *  FunctionApproximation::evaluate() and | 
|---|
| 63 | *  FunctionApproximation::evaluateDerivative(), respectively, and execute | 
|---|
| 64 | *  FunctionModel::operator() or FunctionModel::parameter_derivative(), | 
|---|
| 65 | *  respectively. | 
|---|
| 66 | * | 
|---|
| 67 | */ | 
|---|
| 68 | class FunctionApproximation | 
|---|
| 69 | { | 
|---|
| 70 | public: | 
|---|
| 71 | //!> typedef for a vector of input arguments | 
|---|
| 72 | typedef std::vector<FunctionModel::arguments_t> inputs_t; | 
|---|
| 73 | //!> typedef for a vector of input arguments | 
|---|
| 74 | typedef std::vector<FunctionModel::list_of_arguments_t> filtered_inputs_t; | 
|---|
| 75 | //!> typedef for a vector of output values | 
|---|
| 76 | typedef std::vector<FunctionModel::results_t> outputs_t; | 
|---|
| 77 | public: | 
|---|
| 78 | /** Constructor of the class FunctionApproximation. | 
|---|
| 79 | * | 
|---|
| 80 | * \param _data container with tuple of (input, output) values | 
|---|
| 81 | * \param _model FunctionModel to use in approximation | 
|---|
| 82 | */ | 
|---|
| 83 | FunctionApproximation( | 
|---|
| 84 | const TrainingData &_data, | 
|---|
| 85 | FunctionModel &_model); | 
|---|
| 86 |  | 
|---|
| 87 | /** Constructor of the class FunctionApproximation. | 
|---|
| 88 | * | 
|---|
| 89 | * \param _input_dimension input dimension for this function approximation | 
|---|
| 90 | * \param _output_dimension output dimension for this function approximation | 
|---|
| 91 | * \param _model FunctionModel to use in approximation | 
|---|
| 92 | */ | 
|---|
| 93 | FunctionApproximation( | 
|---|
| 94 | const size_t &_input_dimension, | 
|---|
| 95 | const size_t &_output_dimension, | 
|---|
| 96 | FunctionModel &_model) : | 
|---|
| 97 | input_dimension(_input_dimension), | 
|---|
| 98 | output_dimension(_output_dimension), | 
|---|
| 99 | model(_model) | 
|---|
| 100 | {} | 
|---|
| 101 | /** Destructor for class FunctionApproximation. | 
|---|
| 102 | * | 
|---|
| 103 | */ | 
|---|
| 104 | ~FunctionApproximation() | 
|---|
| 105 | {} | 
|---|
| 106 |  | 
|---|
| 107 | /** Setter for the training data to be used. | 
|---|
| 108 | * | 
|---|
| 109 | * \param input vector of input tuples, needs to be of | 
|---|
| 110 | *        FunctionApproximation::input_dimension size | 
|---|
| 111 | * \param output vector of output tuples, needs to be of | 
|---|
| 112 | *        FunctionApproximation::output_dimension size | 
|---|
| 113 | */ | 
|---|
| 114 | void setTrainingData(const filtered_inputs_t &input, const outputs_t &output); | 
|---|
| 115 |  | 
|---|
| 116 | /** Setter for the model function to be used in the approximation. | 
|---|
| 117 | * | 
|---|
| 118 | */ | 
|---|
| 119 | void setModelFunction(FunctionModel &_model); | 
|---|
| 120 |  | 
|---|
| 121 | /** This enum steers whether we use finite differences or | 
|---|
| 122 | * FunctionModel::parameter_derivative to calculate the jacobian. | 
|---|
| 123 | * | 
|---|
| 124 | */ | 
|---|
| 125 | enum JacobianMode { | 
|---|
| 126 | FiniteDifferences, | 
|---|
| 127 | ParameterDerivative, | 
|---|
| 128 | MAXMODE | 
|---|
| 129 | }; | 
|---|
| 130 |  | 
|---|
| 131 | /** This starts the fitting process, resulting in the parameters to | 
|---|
| 132 | * the model function being optimized with respect to the given training | 
|---|
| 133 | * data. | 
|---|
| 134 | * | 
|---|
| 135 | * \param mode whether to use finite differences or the parameter derivative | 
|---|
| 136 | *        in calculating the jacobian | 
|---|
| 137 | */ | 
|---|
| 138 | void operator()(const enum JacobianMode mode = FiniteDifferences); | 
|---|
| 139 |  | 
|---|
| 140 | /** Evaluates the model function for each pair of training tuple and returns | 
|---|
| 141 | * the output of the function as a vector. | 
|---|
| 142 | * | 
|---|
| 143 | * This function as a signature compatible to the one required by the | 
|---|
| 144 | * LevMar package (with double precision). | 
|---|
| 145 | * | 
|---|
| 146 | * \param *p array of parameters for the model function of dimension \a m | 
|---|
| 147 | * \param *x array of result values of dimension \a n | 
|---|
| 148 | * \param m parameter dimension | 
|---|
| 149 | * \param n output dimension | 
|---|
| 150 | * \param *data additional data, unused here | 
|---|
| 151 | */ | 
|---|
| 152 | void evaluate(double *p, double *x, int m, int n, void *data); | 
|---|
| 153 |  | 
|---|
| 154 | /** Evaluates the parameter derivative of the model function for each pair of | 
|---|
| 155 | * training tuple and returns the output of the function as vector. | 
|---|
| 156 | * | 
|---|
| 157 | * This function as a signature compatible to the one required by the | 
|---|
| 158 | * LevMar package (with double precision). | 
|---|
| 159 | * | 
|---|
| 160 | * \param *p array of parameters for the model function of dimension \a m | 
|---|
| 161 | * \param *jac on output jacobian matrix of result values of dimension \a n times \a m | 
|---|
| 162 | * \param m parameter dimension | 
|---|
| 163 | * \param n output dimension times parameter dimension | 
|---|
| 164 | * \param *data additional data, unused here | 
|---|
| 165 | */ | 
|---|
| 166 | void evaluateDerivative(double *p, double *jac, int m, int n, void *data); | 
|---|
| 167 |  | 
|---|
| 168 | /** This functions checks whether the parameter derivative of the FunctionModel | 
|---|
| 169 | * has been correctly implemented by validating against finite differences. | 
|---|
| 170 | * | 
|---|
| 171 | * We use LevMar's dlevmar_chkjac() function. | 
|---|
| 172 | * | 
|---|
| 173 | * \return true - gradients are ok (>0.5), false - else | 
|---|
| 174 | */ | 
|---|
| 175 | bool checkParameterDerivatives(); | 
|---|
| 176 |  | 
|---|
| 177 | private: | 
|---|
| 178 | static void LevMarCallback(double *p, double *x, int m, int n, void *data); | 
|---|
| 179 |  | 
|---|
| 180 | static void LevMarDerivativeCallback(double *p, double *x, int m, int n, void *data); | 
|---|
| 181 |  | 
|---|
| 182 | void prepareModel(double *p, int m); | 
|---|
| 183 |  | 
|---|
| 184 | void prepareParameters(double *&p, int &m) const; | 
|---|
| 185 |  | 
|---|
| 186 | void prepareOutput(double *&x, int &n) const; | 
|---|
| 187 |  | 
|---|
| 188 | private: | 
|---|
| 189 | //!> input dimension (is fixed from construction) | 
|---|
| 190 | const size_t input_dimension; | 
|---|
| 191 | //!> output dimension (is fixed from construction) | 
|---|
| 192 | const size_t output_dimension; | 
|---|
| 193 |  | 
|---|
| 194 | //!> current input set of training data | 
|---|
| 195 | filtered_inputs_t input_data; | 
|---|
| 196 | //!> current output set of training data | 
|---|
| 197 | outputs_t output_data; | 
|---|
| 198 |  | 
|---|
| 199 | //!> the model function to be used in the high-dimensional approximation | 
|---|
| 200 | FunctionModel &model; | 
|---|
| 201 | }; | 
|---|
| 202 |  | 
|---|
| 203 | #endif /* FUNCTIONAPPROXIMATION_HPP_ */ | 
|---|