#include "../AutomaticDifferentiation.hpp" #include #include #include using std::cout; using std::endl; using std::setw; #define PRINT_VAR(x) std::cout << #x << "\t= " << std::setprecision(16) << (x) << std::endl #define PRINT_DUAL(x) std::cout << #x << "\t= " << std::fixed << std::setprecision(4) << std::setw(10) << (x).a << ", " << std::setw(10) << (x).b << std::endl template T f(const T & x) { return 1 + x + x*x + 1/x + log(x); } template T df(const T & x) { return 2*x + 1 + 1.0/x - 1/pow(x, 2); } template T ddf(const T & x) { return 2 - 1/pow(x, 2) + 2/pow(x, 3); } CREATE_GRAD_FUNCTION_OBJECT(f, GradF); int main() { cout.precision(16); double xdbl = 1.5; { cout << "Analytical\n"; cout << "f(x) = " << f(xdbl) << endl; cout << "df(x)/dx = " << df(xdbl) << endl; cout << "d²f(x)/dx² = " << ddf(xdbl) << endl; } // 1st derivative forward { using Fd = Dual; Fd x = xdbl; x.diff(0); Fd y = f(x); cout << "\nForward\n"; cout << "f(x) = " << y.a << endl; cout << "df(x)/dx = " << y.d(0) << endl; } // first derivative using the gradient functor { GradFunc gradf(f>, xdbl); double fx, dfdx; gradf.get_f_grad(xdbl, fx, dfdx); cout << "\nForward using gradient function object\n"; cout << "f(x) = " << fx << endl; cout << "df(x)/dx = " << dfdx << endl; cout << "df(x)/dx = " << gradf(xdbl) << endl; } // using a vector type { // using vtype = std::valarray; using vtype = Eigen::Array3d; GradF gradf; // vtype x(3, xdbl); vtype x(xdbl/2, xdbl, xdbl*2); vtype fx, dfx; gradf.get_f_grad(x, fx, dfx); // Dual X(x); // X.diff(0,1); // Dual Fx = f(X); // fx = Fx.x(); // dfx = Fx.d(0); PRINT_VAR(x); PRINT_VAR(fx); PRINT_VAR(dfx); // also, using a single dual in a R^n -> R function to get the full gradient does not work : no operator[] in Dual, and adding it does not seem to work either... } return 0; }