AutomaticDifferentiation/examples/gradient_vector_var.cpp

93 lines
2.3 KiB
C++
Raw Permalink Normal View History

#include "../AutomaticDifferentiation.hpp"
#include <iostream>
#include <iomanip>
#include <Eigen/Dense>
using std::cout;
using std::endl;
using std::setw;
#define PRINT_VAR(x) std::cout << #x << "\t= " << std::setprecision(16) << (x) << std::endl
#define PRINT_DUAL(x) std::cout << #x << "\t= " << std::fixed << std::setprecision(4) << std::setw(10) << (x).a << ", " << std::setw(10) << (x).b << std::endl
template<typename T>
T f(const T & x)
{
return 1 + x + x*x + 1/x + log(x);
}
template<typename T>
T df(const T & x)
{
return 2*x + 1 + 1.0/x - 1/pow(x, 2);
}
template<typename T>
T ddf(const T & x)
{
return 2 - 1/pow(x, 2) + 2/pow(x, 3);
}
CREATE_GRAD_FUNCTION_OBJECT(f, GradF);
int main()
{
cout.precision(16);
double xdbl = 1.5;
{
cout << "Analytical\n";
cout << "f(x) = " << f(xdbl) << endl;
cout << "df(x)/dx = " << df(xdbl) << endl;
cout << "d²f(x)/dx² = " << ddf(xdbl) << endl;
}
// 1st derivative forward
{
using Fd = Dual<double>;
Fd x = xdbl;
x.diff(0);
Fd y = f(x);
cout << "\nForward\n";
cout << "f(x) = " << y.a << endl;
cout << "df(x)/dx = " << y.d(0) << endl;
}
// first derivative using the gradient functor
{
GradFunc gradf(f<Dual<double>>, xdbl);
double fx, dfdx;
gradf.get_f_grad(xdbl, fx, dfdx);
cout << "\nForward using gradient function object\n";
cout << "f(x) = " << fx << endl;
cout << "df(x)/dx = " << dfdx << endl;
cout << "df(x)/dx = " << gradf(xdbl) << endl;
}
// using a vector type
{
// using vtype = std::valarray<double>;
using vtype = Eigen::Array3d;
GradF gradf;
// vtype x(3, xdbl);
vtype x(xdbl/2, xdbl, xdbl*2);
vtype fx, dfx;
gradf.get_f_grad(x, fx, dfx);
// Dual<vtype> X(x);
// X.diff(0,1);
// Dual<vtype> Fx = f(X);
// fx = Fx.x();
// dfx = Fx.d(0);
PRINT_VAR(x);
PRINT_VAR(fx);
PRINT_VAR(dfx);
// also, using a single dual<vector_type> in a R^n -> R function to get the full gradient does not work : no operator[] in Dual, and adding it does not seem to work either...
}
return 0;
}