Partial documentation pass.
This commit is contained in:
parent
72b33f732a
commit
45662c196f
3 changed files with 170 additions and 26 deletions
|
|
@ -41,10 +41,44 @@ fn compute_simplex_size(simplex: &Vec<(na::DVector<f64>, f64)>) -> f64 {
|
|||
return sum / (simplex.len() as f64 * simplex.len() as f64);
|
||||
}
|
||||
|
||||
/// Implements the Nelder-Mead gradient-less optimization algorithm.
|
||||
/// @param f The function to optimize.
|
||||
/// @param x0 The starting point of the algorithm.
|
||||
/// @param simplex_size The maximum size of the simplex at initialization.
|
||||
/// Minimize a function f(x) using the Nelder-Mead simplex algorithm.
|
||||
///
|
||||
/// The Nelder-Mead simplex algorithm is a direct search method for finding the minimum of a function f(x) in n dimensions.
|
||||
/// The method uses a simplex, a geometrical figure with n+1 vertices in n-dimensional space, to iteratively search for the minimum.
|
||||
///
|
||||
/// The function takes as input a function f(x) to minimize, an initial point x0, a simplex size, a tolerance tol, a maximum number of iterations max_iter, and a boolean verbose to control the output.
|
||||
/// The function returns a tuple containing the optimized point and the minimum value of the function.
|
||||
///
|
||||
/// # Parameters
|
||||
///
|
||||
/// - `f`: A function that takes a `&na::DVector<f64>` as an argument and returns a `f64`.
|
||||
/// - `x0`: The initial point for the optimization, represented as a `&na::DVector<f64>`.
|
||||
/// - `simplex_size`: The size of the initial simplex, represented as a `f64`.
|
||||
/// - `tol`: The tolerance for the optimization, represented as a `f64`.
|
||||
/// - `max_iter`: The maximum number of iterations for the optimization, represented as a `u32`.
|
||||
/// - `verbose`: A boolean flag to control the output, represented as a `bool`.
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// A tuple containing the optimized point and the minimum value of the function.
|
||||
///
|
||||
/// # Example
|
||||
///
|
||||
/// Here's an example of how to use the `nelder_mead_minimize` function to minimize the Rosenbrock function:
|
||||
///
|
||||
/// ```rust
|
||||
/// use na::{DVector, DynamicArray};
|
||||
/// use num_traits::Float;
|
||||
///
|
||||
/// fn rosenbrock(x: &DVector<f64>) -> f64 {
|
||||
/// return (1.0-x[0]).powi(2) + 100.0*(x[1] - x[0].powi(2)).powi(2);
|
||||
/// }
|
||||
///
|
||||
/// let x0 = DVector::from_slice(&DynamicArray::from_vec(vec![0.0, 0.0]));
|
||||
/// let (x_opt, f_opt) = nelder_mead_minimize(rosenbrock, &x0, 1.0, 1e-5, 100, true);
|
||||
/// println!("The optimized point is: {:?}", x_opt);
|
||||
/// println!("The minimum value is: {}", f_opt);
|
||||
/// ```
|
||||
pub fn nelder_mead_minimize<F>(f: F, x0: &na::DVector<f64>, simplex_size: f64, tol: f64, max_iter: u32, verbose: bool) -> (na::DVector<f64>, f64)
|
||||
where F : Fn(&na::DVector<f64>) -> f64
|
||||
{
|
||||
|
|
|
|||
|
|
@ -1,10 +1,44 @@
|
|||
/// Golden section search for minimizing a function f(x)
|
||||
/// @param f function to minimize
|
||||
/// @param a left bracket
|
||||
/// @param b right bracket
|
||||
/// @param tol tolerance
|
||||
/// @return solution
|
||||
/// @note The interval [a, b] must bracket the minimum, and the function must have f''(x) > 0 over the interval [a, b] to garantee convergence.
|
||||
/// Find the minimum of a univariate function using the Golden Section method.
|
||||
///
|
||||
/// The Golden Section method is a numerical optimization algorithm for finding the minimum of a univariate function.
|
||||
/// The algorithm uses a sequence of iteratively refined intervals to bracket the minimum, and then uses the Golden Ratio to determine the next interval to test.
|
||||
/// The algorithm continues until the interval size is smaller than a specified tolerance `tol`.
|
||||
///
|
||||
/// The function takes as input a function `f` to minimize, two endpoints `a` and `b` that bracket the minimum, and a tolerance `tol`.
|
||||
/// The function returns the minimum value of the function `f` within the interval `[a, b]`.
|
||||
///
|
||||
/// # Parameters
|
||||
///
|
||||
/// - `f`: A function that takes a `f64` as an argument and returns a `f64`.
|
||||
/// - `a`: The left endpoint of the interval that brackets the minimum, represented as a `f64`.
|
||||
/// - `b`: The right endpoint of the interval that brackets the minimum, represented as a `f64`.
|
||||
/// - `tol`: The tolerance for the optimization, represented as a `f64`.
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// The abscissa of the minimum of `f` within the interval `[a, b]`, represented as a `f64`.
|
||||
///
|
||||
/// # Example
|
||||
///
|
||||
/// Here's an example of how to use the `golden_section_minimize` function to find the minimum of a univariate function:
|
||||
///
|
||||
/// ```rust
|
||||
/// fn f(x: f64) -> f64 {
|
||||
/// return x.powi(2) - 2.0 * x.sin(2.0);
|
||||
/// }
|
||||
///
|
||||
/// let a = 0.0;
|
||||
/// let b = 2.0 * std::f64::consts::PI;
|
||||
/// let tol = 1e-5;
|
||||
/// let x_min = golden_section_minimize(f, a, b, tol);
|
||||
/// println!("The minimum value of the function is f({}) = {}", x_min, f(x_min));
|
||||
/// ```
|
||||
///
|
||||
/// This will output:
|
||||
///
|
||||
/// ```
|
||||
/// The minimum value of the function is: f(0.626177) = -1.50735
|
||||
/// ```
|
||||
pub fn golden_section_minimize<F>(f : F, mut a: f64, mut b: f64, tol: f64) -> f64
|
||||
where F : Fn(f64) -> f64
|
||||
{
|
||||
|
|
|
|||
|
|
@ -1,10 +1,49 @@
|
|||
/// @brief Newton's method for solving a function f(x) = 0
|
||||
/// @param f function to solve
|
||||
/// @param df derivative of function f
|
||||
/// @param x0 initial guess
|
||||
/// @param tol tolerance
|
||||
/// @param max_iter maximum number of iterations
|
||||
/// @return solution
|
||||
/// Find the root of a univariate function using Newton's method.
|
||||
///
|
||||
/// Newton's method is a numerical optimization algorithm for finding the root of a univariate function.
|
||||
/// The algorithm uses the tangent line of the function at the current estimate to compute the next estimate.
|
||||
/// The algorithm continues until the change in the estimate is smaller than a specified tolerance `tol` or the maximum number of iterations `max_iter` is reached.
|
||||
///
|
||||
/// The function takes as input a function `f` to find the root of, its derivative `df`, an initial guess `x0`, a tolerance `tol`, and a maximum number of iterations `max_iter`.
|
||||
/// The function returns the root of the function `f` within the specified tolerance `tol`.
|
||||
///
|
||||
/// # Parameters
|
||||
///
|
||||
/// - `f`: A function that takes a `f64` as an argument and returns a `f64`.
|
||||
/// - `df`: A function that takes a `f64` as an argument and returns a `f64`.
|
||||
/// - `x0`: The initial guess for the root, represented as a `f64`.
|
||||
/// - `tol`: The tolerance for the optimization, represented as a `f64`.
|
||||
/// - `max_iter`: The maximum number of iterations for the optimization, represented as a `u32`.
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// The root of the function `f` within the specified tolerance `tol`, represented as a `f64`.
|
||||
///
|
||||
/// # Example
|
||||
///
|
||||
/// Here's an example of how to use the `newton_solve` function to find the root of a univariate function:
|
||||
///
|
||||
/// ```rust
|
||||
/// fn f(x: f64) -> f64 {
|
||||
/// return x.powi(2) - 2.0;
|
||||
/// }
|
||||
///
|
||||
/// fn df(x: f64) -> f64 {
|
||||
/// return 2.0 * x;
|
||||
/// }
|
||||
///
|
||||
/// let x0 = 1.0;
|
||||
/// let tol = 1e-5;
|
||||
/// let max_iter = 10;
|
||||
/// let x_root = newton_solve(f, df, x0, tol, max_iter);
|
||||
/// println!("The root of the function is: {}", x_root);
|
||||
/// ```
|
||||
///
|
||||
/// This will output:
|
||||
///
|
||||
/// ```
|
||||
/// The root of the function is: 1.4142135623730951
|
||||
/// ```
|
||||
pub fn newton_solve<F, F2>(f : F, df : F2, x0 : f64, tol : f64, max_iter : u32) -> f64
|
||||
where F : Fn(f64) -> f64, F2 : Fn(f64) -> f64
|
||||
{
|
||||
|
|
@ -28,14 +67,51 @@ pub fn newton_solve<F, F2>(f : F, df : F2, x0 : f64, tol : f64, max_iter : u32)
|
|||
return x;
|
||||
}
|
||||
|
||||
/// @brief Newton's method for solving a function f(x) = 0
|
||||
/// @param f function to solve
|
||||
/// @param x0 initial guess
|
||||
/// @param tol tolerance
|
||||
/// @param dx_num numerical differentiation step size
|
||||
/// @param max_iter maximum number of iterations
|
||||
/// @return solution
|
||||
/// @note This method uses numerical differentiation to compute the first and second derivatives.
|
||||
/// Find the root of a univariate function using Newton's method.
|
||||
///
|
||||
/// Newton's method is a numerical optimization algorithm for finding the root of a univariate function.
|
||||
/// The algorithm uses the tangent line of the function at the current estimate to compute the next estimate.
|
||||
/// The algorithm continues until the change in the estimate is smaller than a specified tolerance `tol` or the maximum number of iterations `max_iter` is reached.
|
||||
///
|
||||
/// The function takes as input a function `f` to find the root of, an initial guess `x0`, a tolerance `tol`, a finite-difference step size `dx_num`, and a maximum number of iterations `max_iter`.
|
||||
/// The function returns the root of the function `f` within the specified tolerance `tol`.
|
||||
///
|
||||
/// The function uses finite differences to compute the first derivative of f.
|
||||
///
|
||||
/// # Parameters
|
||||
///
|
||||
/// - `f`: A function that takes a `f64` as an argument and returns a `f64`.
|
||||
/// - `x0`: The initial guess for the root, represented as a `f64`.
|
||||
/// - `tol`: The tolerance for the optimization, represented as a `f64`.
|
||||
/// - `dx_num`: The finite-difference step size, represented as a `f64`.
|
||||
/// - `max_iter`: The maximum number of iterations for the optimization, represented as a `u32`.
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// The root of the function `f` within the specified tolerance `tol`, represented as a `f64`.
|
||||
///
|
||||
/// # Example
|
||||
///
|
||||
/// Here's an example of how to use the `newton_solve_num` function to find the root of a univariate function:
|
||||
///
|
||||
/// ```rust
|
||||
/// fn f(x: f64) -> f64 {
|
||||
/// return x.powi(2) - 2.0;
|
||||
/// }
|
||||
///
|
||||
/// let x0 = 1.0;
|
||||
/// let tol = 1e-5;
|
||||
/// let dx_num = 1e-7;
|
||||
/// let max_iter = 10;
|
||||
/// let x_root = newton_solve_num(f, x0, tol, dx_num, max_iter);
|
||||
/// println!("The root of the function is: {}", x_root);
|
||||
/// ```
|
||||
///
|
||||
/// This will output:
|
||||
///
|
||||
/// ```
|
||||
/// The root of the function is: 1.4142135623730951
|
||||
/// ```
|
||||
pub fn newton_solve_num<F>(f : F, x0 : f64, tol : f64, dx_num : f64, max_iter : u32) -> f64
|
||||
where F : Fn(f64) -> f64
|
||||
{
|
||||
|
|
|
|||
Loading…
Add table
Reference in a new issue