summaryrefslogtreecommitdiff
path: root/lib/Numeric/GSL/Minimization.hs
diff options
context:
space:
mode:
Diffstat (limited to 'lib/Numeric/GSL/Minimization.hs')
-rw-r--r--lib/Numeric/GSL/Minimization.hs34
1 files changed, 27 insertions, 7 deletions
diff --git a/lib/Numeric/GSL/Minimization.hs b/lib/Numeric/GSL/Minimization.hs
index a1d009b..b95765f 100644
--- a/lib/Numeric/GSL/Minimization.hs
+++ b/lib/Numeric/GSL/Minimization.hs
@@ -2,14 +2,14 @@
2----------------------------------------------------------------------------- 2-----------------------------------------------------------------------------
3{- | 3{- |
4Module : Numeric.GSL.Minimization 4Module : Numeric.GSL.Minimization
5Copyright : (c) Alberto Ruiz 2006 5Copyright : (c) Alberto Ruiz 2006-9
6License : GPL-style 6License : GPL-style
7 7
8Maintainer : Alberto Ruiz (aruiz at um dot es) 8Maintainer : Alberto Ruiz (aruiz at um dot es)
9Stability : provisional 9Stability : provisional
10Portability : uses ffi 10Portability : uses ffi
11 11
12Minimization of a multidimensional function Minimization of a multidimensional function using some of the algorithms described in: 12Minimization of a multidimensional function using some of the algorithms described in:
13 13
14<http://www.gnu.org/software/gsl/manual/html_node/Multidimensional-Minimization.html> 14<http://www.gnu.org/software/gsl/manual/html_node/Multidimensional-Minimization.html>
15 15
@@ -17,6 +17,7 @@ Minimization of a multidimensional function Minimization of a multidimensional f
17----------------------------------------------------------------------------- 17-----------------------------------------------------------------------------
18module Numeric.GSL.Minimization ( 18module Numeric.GSL.Minimization (
19 minimizeConjugateGradient, 19 minimizeConjugateGradient,
20 minimizeVectorBFGS2,
20 minimizeNMSimplex 21 minimizeNMSimplex
21) where 22) where
22 23
@@ -132,7 +133,7 @@ The path to the solution can be graphically shown by means of:
132 133
133@'Graphics.Plot.mplot' $ drop 2 ('toColumns' p)@ 134@'Graphics.Plot.mplot' $ drop 2 ('toColumns' p)@
134 135
135-} 136-}
136minimizeConjugateGradient :: 137minimizeConjugateGradient ::
137 Double -- ^ initial step size 138 Double -- ^ initial step size
138 -> Double -- ^ minimization parameter 139 -> Double -- ^ minimization parameter
@@ -142,7 +143,27 @@ minimizeConjugateGradient ::
142 -> ([Double] -> [Double]) -- ^ gradient 143 -> ([Double] -> [Double]) -- ^ gradient
143 -> [Double] -- ^ starting point 144 -> [Double] -- ^ starting point
144 -> ([Double], Matrix Double) -- ^ solution vector, and the optimization trajectory followed by the algorithm 145 -> ([Double], Matrix Double) -- ^ solution vector, and the optimization trajectory followed by the algorithm
145minimizeConjugateGradient istep minimpar tol maxit f df xi = unsafePerformIO $ do 146minimizeConjugateGradient = minimizeWithDeriv 0
147
148{- | Taken from the GSL manual:
149
150The vector Broyden-Fletcher-Goldfarb-Shanno (BFGS) algorithm. This is a quasi-Newton method which builds up an approximation to the second derivatives of the function f using the difference between successive gradient vectors. By combining the first and second derivatives the algorithm is able to take Newton-type steps towards the function minimum, assuming quadratic behavior in that region.
151
152The bfgs2 version of this minimizer is the most efficient version available, and is a faithful implementation of the line minimization scheme described in Fletcher's Practical Methods of Optimization, Algorithms 2.6.2 and 2.6.4. It supercedes the original bfgs routine and requires substantially fewer function and gradient evaluations. The user-supplied tolerance tol corresponds to the parameter \sigma used by Fletcher. A value of 0.1 is recommended for typical use (larger values correspond to less accurate line searches).
153-}
154minimizeVectorBFGS2 ::
155 Double -- ^ initial step size
156 -> Double -- ^ minimization parameter tol
157 -> Double -- ^ desired precision of the solution (gradient test)
158 -> Int -- ^ maximum number of iterations allowed
159 -> ([Double] -> Double) -- ^ function to minimize
160 -> ([Double] -> [Double]) -- ^ gradient
161 -> [Double] -- ^ starting point
162 -> ([Double], Matrix Double) -- ^ solution vector, and the optimization trajectory followed by the algorithm
163minimizeVectorBFGS2 = minimizeWithDeriv 1
164
165
166minimizeWithDeriv method istep minimpar tol maxit f df xi = unsafePerformIO $ do
146 let xiv = fromList xi 167 let xiv = fromList xi
147 n = dim xiv 168 n = dim xiv
148 f' = f . toList 169 f' = f . toList
@@ -151,7 +172,7 @@ minimizeConjugateGradient istep minimpar tol maxit f df xi = unsafePerformIO $ d
151 dfp <- mkVecVecfun (aux_vTov df') 172 dfp <- mkVecVecfun (aux_vTov df')
152 rawpath <- withVector xiv $ \xiv' -> 173 rawpath <- withVector xiv $ \xiv' ->
153 createMIO maxit (n+2) 174 createMIO maxit (n+2)
154 (c_minimizeConjugateGradient fp dfp istep minimpar tol (fi maxit) // xiv') 175 (c_minimizeWithDeriv method fp dfp istep minimpar tol (fi maxit) // xiv')
155 "minimizeDerivV" 176 "minimizeDerivV"
156 let it = round (rawpath @@> (maxit-1,0)) 177 let it = round (rawpath @@> (maxit-1,0))
157 path = takeRows it rawpath 178 path = takeRows it rawpath
@@ -160,9 +181,8 @@ minimizeConjugateGradient istep minimpar tol maxit f df xi = unsafePerformIO $ d
160 freeHaskellFunPtr dfp 181 freeHaskellFunPtr dfp
161 return (sol,path) 182 return (sol,path)
162 183
163
164foreign import ccall "gsl-aux.h minimizeWithDeriv" 184foreign import ccall "gsl-aux.h minimizeWithDeriv"
165 c_minimizeConjugateGradient :: FunPtr (CInt -> Ptr Double -> Double) 185 c_minimizeWithDeriv :: CInt -> FunPtr (CInt -> Ptr Double -> Double)
166 -> FunPtr (CInt -> Ptr Double -> Ptr Double -> IO ()) 186 -> FunPtr (CInt -> Ptr Double -> Ptr Double -> IO ())
167 -> Double -> Double -> Double -> CInt 187 -> Double -> Double -> Double -> CInt
168 -> TVM 188 -> TVM