summaryrefslogtreecommitdiff
path: root/lib/Numeric/GSL/Minimization.hs
diff options
context:
space:
mode:
Diffstat (limited to 'lib/Numeric/GSL/Minimization.hs')
-rw-r--r--lib/Numeric/GSL/Minimization.hs208
1 files changed, 92 insertions, 116 deletions
diff --git a/lib/Numeric/GSL/Minimization.hs b/lib/Numeric/GSL/Minimization.hs
index b95765f..048c717 100644
--- a/lib/Numeric/GSL/Minimization.hs
+++ b/lib/Numeric/GSL/Minimization.hs
@@ -13,12 +13,49 @@ Minimization of a multidimensional function using some of the algorithms describ
13 13
14<http://www.gnu.org/software/gsl/manual/html_node/Multidimensional-Minimization.html> 14<http://www.gnu.org/software/gsl/manual/html_node/Multidimensional-Minimization.html>
15 15
16The example in the GSL manual:
17
18@
19
20f [x,y] = 10*(x-1)^2 + 20*(y-2)^2 + 30
21
22main = do
23 let (s,p) = minimize NMSimplex2 1E-2 30 [1,1] f [5,7]
24 print s
25 print p
26
27\> main
28[0.9920430849306288,1.9969168063253182]
29 0.000 512.500 1.130 6.500 5.000
30 1.000 290.625 1.409 5.250 4.000
31 2.000 290.625 1.409 5.250 4.000
32 3.000 252.500 1.409 5.500 1.000
33 ...
3422.000 30.001 0.013 0.992 1.997
3523.000 30.001 0.008 0.992 1.997
36@
37
38The path to the solution can be graphically shown by means of:
39
40@'Graphics.Plot.mplot' $ drop 3 ('toColumns' p)@
41
42Taken from the GSL manual:
43
44The vector Broyden-Fletcher-Goldfarb-Shanno (BFGS) algorithm is a quasi-Newton method which builds up an approximation to the second derivatives of the function f using the difference between successive gradient vectors. By combining the first and second derivatives the algorithm is able to take Newton-type steps towards the function minimum, assuming quadratic behavior in that region.
45
46The bfgs2 version of this minimizer is the most efficient version available, and is a faithful implementation of the line minimization scheme described in Fletcher's Practical Methods of Optimization, Algorithms 2.6.2 and 2.6.4. It supercedes the original bfgs routine and requires substantially fewer function and gradient evaluations. The user-supplied tolerance tol corresponds to the parameter \sigma used by Fletcher. A value of 0.1 is recommended for typical use (larger values correspond to less accurate line searches).
47
48The nmsimplex2 version is a new O(N) implementation of the earlier O(N^2) nmsimplex minimiser. It calculates the size of simplex as the rms distance of each vertex from the center rather than the mean distance, which has the advantage of allowing a linear update.
49
16-} 50-}
51
17----------------------------------------------------------------------------- 52-----------------------------------------------------------------------------
18module Numeric.GSL.Minimization ( 53module Numeric.GSL.Minimization (
54 minimize, MinimizeMethod(..),
55 minimizeD, MinimizeMethodD(..),
56 minimizeNMSimplex,
19 minimizeConjugateGradient, 57 minimizeConjugateGradient,
20 minimizeVectorBFGS2, 58 minimizeVectorBFGS2
21 minimizeNMSimplex
22) where 59) where
23 60
24 61
@@ -27,68 +64,67 @@ import Data.Packed.Matrix
27import Foreign 64import Foreign
28import Foreign.C.Types(CInt) 65import Foreign.C.Types(CInt)
29 66
67------------------------------------------------------------------------
68
69{-# DEPRECATED minimizeNMSimplex "use minimize NMSimplex2 eps maxit sizes f xi" #-}
70minimizeNMSimplex f xi szs eps maxit = minimize NMSimplex eps maxit szs f xi
71
72{-# DEPRECATED minimizeConjugateGradient "use minimizeD ConjugateFR eps maxit step tol f g xi" #-}
73minimizeConjugateGradient step tol eps maxit f g xi = minimizeD ConjugateFR eps maxit step tol f g xi
74
75{-# DEPRECATED minimizeVectorBFGS2 "use minimizeD VectorBFGS2 eps maxit step tol f g xi" #-}
76minimizeVectorBFGS2 step tol eps maxit f g xi = minimizeD VectorBFGS2 eps maxit step tol f g xi
77
30------------------------------------------------------------------------- 78-------------------------------------------------------------------------
31 79
32{- | The method of Nelder and Mead, implemented by /gsl_multimin_fminimizer_nmsimplex/. The gradient of the function is not required. This is the example in the GSL manual: 80data MinimizeMethod = NMSimplex
81 | NMSimplex2
82 deriving (Enum,Eq,Show)
83
84-- | Minimization without derivatives.
85minimize :: MinimizeMethod
86 -> Double -- ^ desired precision of the solution (size test)
87 -> Int -- ^ maximum number of iterations allowed
88 -> [Double] -- ^ sizes of the initial search box
89 -> ([Double] -> Double) -- ^ function to minimize
90 -> [Double] -- ^ starting point
91 -> ([Double], Matrix Double) -- ^ solution vector and optimization path
92
93minimize method = minimizeGen (fi (fromEnum method))
94
95data MinimizeMethodD = ConjugateFR
96 | ConjugatePR
97 | VectorBFGS
98 | VectorBFGS2
99 | SteepestDescent
100 deriving (Enum,Eq,Show)
101
102-- | Minimization with derivatives.
103minimizeD :: MinimizeMethodD
104 -> Double -- ^ desired precision of the solution (gradient test)
105 -> Int -- ^ maximum number of iterations allowed
106 -> Double -- ^ size of the first trial step
107 -> Double -- ^ tol (precise meaning depends on method)
108 -> ([Double] -> Double) -- ^ function to minimize
109 -> ([Double] -> [Double]) -- ^ gradient
110 -> [Double] -- ^ starting point
111 -> ([Double], Matrix Double) -- ^ solution vector and optimization path
112
113minimizeD method = minimizeDGen (fi (fromEnum method))
33 114
34@minimize f xi = minimizeNMSimplex f xi (replicate (length xi) 1) 1e-2 100 115-------------------------------------------------------------------------
35\ --
36f [x,y] = 10*(x-1)^2 + 20*(y-2)^2 + 30
37\ --
38main = do
39 let (s,p) = minimize f [5,7]
40 print s
41 print p
42\ --
43\> main
44[0.9920430849306285,1.9969168063253164]
450. 512.500 1.082 6.500 5.
46 1. 290.625 1.372 5.250 4.
47 2. 290.625 1.372 5.250 4.
48 3. 252.500 1.372 5.500 1.
49 4. 101.406 1.823 2.625 3.500
50 5. 101.406 1.823 2.625 3.500
51 6. 60. 1.823 0. 3.
52 7. 42.275 1.303 2.094 1.875
53 8. 42.275 1.303 2.094 1.875
54 9. 35.684 1.026 0.258 1.906
5510. 35.664 0.804 0.588 2.445
5611. 30.680 0.467 1.258 2.025
5712. 30.680 0.356 1.258 2.025
5813. 30.539 0.285 1.093 1.849
5914. 30.137 0.168 0.883 2.004
6015. 30.137 0.123 0.883 2.004
6116. 30.090 0.100 0.958 2.060
6217. 30.005 6.051e-2 1.022 2.004
6318. 30.005 4.249e-2 1.022 2.004
6419. 30.005 4.249e-2 1.022 2.004
6520. 30.005 2.742e-2 1.022 2.004
6621. 30.005 2.119e-2 1.022 2.004
6722. 30.001 1.530e-2 0.992 1.997
6823. 30.001 1.259e-2 0.992 1.997
6924. 30.001 7.663e-3 0.992 1.997@
70 116
71The path to the solution can be graphically shown by means of:
72 117
73@'Graphics.Plot.mplot' $ drop 3 ('toColumns' p)@
74 118
75-} 119minimizeGen method eps maxit sz f xi = unsafePerformIO $ do
76minimizeNMSimplex :: ([Double] -> Double) -- ^ function to minimize
77 -> [Double] -- ^ starting point
78 -> [Double] -- ^ sizes of the initial search box
79 -> Double -- ^ desired precision of the solution
80 -> Int -- ^ maximum number of iterations allowed
81 -> ([Double], Matrix Double)
82 -- ^ solution vector, and the optimization trajectory followed by the algorithm
83minimizeNMSimplex f xi sz tol maxit = unsafePerformIO $ do
84 let xiv = fromList xi 120 let xiv = fromList xi
85 szv = fromList sz 121 szv = fromList sz
86 n = dim xiv 122 n = dim xiv
87 fp <- mkVecfun (iv (f.toList)) 123 fp <- mkVecfun (iv (f.toList))
88 rawpath <- ww2 withVector xiv withVector szv $ \xiv' szv' -> 124 rawpath <- ww2 withVector xiv withVector szv $ \xiv' szv' ->
89 createMIO maxit (n+3) 125 createMIO maxit (n+3)
90 (c_minimizeNMSimplex fp tol (fi maxit) // xiv' // szv') 126 (c_minimize method fp eps (fi maxit) // xiv' // szv')
91 "minimizeNMSimplex" 127 "minimize"
92 let it = round (rawpath @@> (maxit-1,0)) 128 let it = round (rawpath @@> (maxit-1,0))
93 path = takeRows it rawpath 129 path = takeRows it rawpath
94 [sol] = toLists $ dropRows (it-1) path 130 [sol] = toLists $ dropRows (it-1) path
@@ -97,73 +133,13 @@ minimizeNMSimplex f xi sz tol maxit = unsafePerformIO $ do
97 133
98 134
99foreign import ccall "gsl-aux.h minimize" 135foreign import ccall "gsl-aux.h minimize"
100 c_minimizeNMSimplex:: FunPtr (CInt -> Ptr Double -> Double) -> Double -> CInt -> TVVM 136 c_minimize:: CInt -> FunPtr (CInt -> Ptr Double -> Double) -> Double -> CInt -> TVVM
101 137
102---------------------------------------------------------------------------------- 138----------------------------------------------------------------------------------
103 139
104{- | The Fletcher-Reeves conjugate gradient algorithm /gsl_multimin_fminimizer_conjugate_fr/. This is the example in the GSL manual:
105 140
106@minimize = minimizeConjugateGradient 1E-2 1E-4 1E-3 30
107f [x,y] = 10*(x-1)^2 + 20*(y-2)^2 + 30
108\ --
109df [x,y] = [20*(x-1), 40*(y-2)]
110\ --
111main = do
112 let (s,p) = minimize f df [5,7]
113 print s
114 print p
115\ --
116\> main
117[1.0,2.0]
118 0. 687.848 4.996 6.991
119 1. 683.555 4.989 6.972
120 2. 675.013 4.974 6.935
121 3. 658.108 4.944 6.861
122 4. 625.013 4.885 6.712
123 5. 561.684 4.766 6.415
124 6. 446.467 4.528 5.821
125 7. 261.794 4.053 4.632
126 8. 75.498 3.102 2.255
127 9. 67.037 2.852 1.630
12810. 45.316 2.191 1.762
12911. 30.186 0.869 2.026
13012. 30. 1. 2.@
131
132The path to the solution can be graphically shown by means of:
133
134@'Graphics.Plot.mplot' $ drop 2 ('toColumns' p)@
135 141
136-} 142minimizeDGen method eps maxit istep tol f df xi = unsafePerformIO $ do
137minimizeConjugateGradient ::
138 Double -- ^ initial step size
139 -> Double -- ^ minimization parameter
140 -> Double -- ^ desired precision of the solution (gradient test)
141 -> Int -- ^ maximum number of iterations allowed
142 -> ([Double] -> Double) -- ^ function to minimize
143 -> ([Double] -> [Double]) -- ^ gradient
144 -> [Double] -- ^ starting point
145 -> ([Double], Matrix Double) -- ^ solution vector, and the optimization trajectory followed by the algorithm
146minimizeConjugateGradient = minimizeWithDeriv 0
147
148{- | Taken from the GSL manual:
149
150The vector Broyden-Fletcher-Goldfarb-Shanno (BFGS) algorithm. This is a quasi-Newton method which builds up an approximation to the second derivatives of the function f using the difference between successive gradient vectors. By combining the first and second derivatives the algorithm is able to take Newton-type steps towards the function minimum, assuming quadratic behavior in that region.
151
152The bfgs2 version of this minimizer is the most efficient version available, and is a faithful implementation of the line minimization scheme described in Fletcher's Practical Methods of Optimization, Algorithms 2.6.2 and 2.6.4. It supercedes the original bfgs routine and requires substantially fewer function and gradient evaluations. The user-supplied tolerance tol corresponds to the parameter \sigma used by Fletcher. A value of 0.1 is recommended for typical use (larger values correspond to less accurate line searches).
153-}
154minimizeVectorBFGS2 ::
155 Double -- ^ initial step size
156 -> Double -- ^ minimization parameter tol
157 -> Double -- ^ desired precision of the solution (gradient test)
158 -> Int -- ^ maximum number of iterations allowed
159 -> ([Double] -> Double) -- ^ function to minimize
160 -> ([Double] -> [Double]) -- ^ gradient
161 -> [Double] -- ^ starting point
162 -> ([Double], Matrix Double) -- ^ solution vector, and the optimization trajectory followed by the algorithm
163minimizeVectorBFGS2 = minimizeWithDeriv 1
164
165
166minimizeWithDeriv method istep minimpar tol maxit f df xi = unsafePerformIO $ do
167 let xiv = fromList xi 143 let xiv = fromList xi
168 n = dim xiv 144 n = dim xiv
169 f' = f . toList 145 f' = f . toList
@@ -172,7 +148,7 @@ minimizeWithDeriv method istep minimpar tol maxit f df xi = unsafePerformIO $ do
172 dfp <- mkVecVecfun (aux_vTov df') 148 dfp <- mkVecVecfun (aux_vTov df')
173 rawpath <- withVector xiv $ \xiv' -> 149 rawpath <- withVector xiv $ \xiv' ->
174 createMIO maxit (n+2) 150 createMIO maxit (n+2)
175 (c_minimizeWithDeriv method fp dfp istep minimpar tol (fi maxit) // xiv') 151 (c_minimizeWithDeriv method fp dfp istep tol eps (fi maxit) // xiv')
176 "minimizeDerivV" 152 "minimizeDerivV"
177 let it = round (rawpath @@> (maxit-1,0)) 153 let it = round (rawpath @@> (maxit-1,0))
178 path = takeRows it rawpath 154 path = takeRows it rawpath
@@ -181,7 +157,7 @@ minimizeWithDeriv method istep minimpar tol maxit f df xi = unsafePerformIO $ do
181 freeHaskellFunPtr dfp 157 freeHaskellFunPtr dfp
182 return (sol,path) 158 return (sol,path)
183 159
184foreign import ccall "gsl-aux.h minimizeWithDeriv" 160foreign import ccall "gsl-aux.h minimizeD"
185 c_minimizeWithDeriv :: CInt -> FunPtr (CInt -> Ptr Double -> Double) 161 c_minimizeWithDeriv :: CInt -> FunPtr (CInt -> Ptr Double -> Double)
186 -> FunPtr (CInt -> Ptr Double -> Ptr Double -> IO ()) 162 -> FunPtr (CInt -> Ptr Double -> Ptr Double -> IO ())
187 -> Double -> Double -> Double -> CInt 163 -> Double -> Double -> Double -> CInt