summaryrefslogtreecommitdiff
path: root/packages/hmatrix/src/Numeric/GSL/Minimization.hs
diff options
context:
space:
mode:
Diffstat (limited to 'packages/hmatrix/src/Numeric/GSL/Minimization.hs')
-rw-r--r--packages/hmatrix/src/Numeric/GSL/Minimization.hs222
1 files changed, 0 insertions, 222 deletions
diff --git a/packages/hmatrix/src/Numeric/GSL/Minimization.hs b/packages/hmatrix/src/Numeric/GSL/Minimization.hs
deleted file mode 100644
index 056d463..0000000
--- a/packages/hmatrix/src/Numeric/GSL/Minimization.hs
+++ /dev/null
@@ -1,222 +0,0 @@
1{- |
2Module : Numeric.GSL.Minimization
3Copyright : (c) Alberto Ruiz 2006-9
4License : GPL
5Maintainer : Alberto Ruiz
6Stability : provisional
7
8Minimization of a multidimensional function using some of the algorithms described in:
9
10<http://www.gnu.org/software/gsl/manual/html_node/Multidimensional-Minimization.html>
11
12The example in the GSL manual:
13
14@
15f [x,y] = 10*(x-1)^2 + 20*(y-2)^2 + 30
16
17main = do
18 let (s,p) = minimize NMSimplex2 1E-2 30 [1,1] f [5,7]
19 print s
20 print p
21@
22
23>>> main
24[0.9920430849306288,1.9969168063253182]
25 0.000 512.500 1.130 6.500 5.000
26 1.000 290.625 1.409 5.250 4.000
27 2.000 290.625 1.409 5.250 4.000
28 3.000 252.500 1.409 5.500 1.000
29 ...
3022.000 30.001 0.013 0.992 1.997
3123.000 30.001 0.008 0.992 1.997
32
33The path to the solution can be graphically shown by means of:
34
35@'Graphics.Plot.mplot' $ drop 3 ('toColumns' p)@
36
37Taken from the GSL manual:
38
39The vector Broyden-Fletcher-Goldfarb-Shanno (BFGS) algorithm is a quasi-Newton method which builds up an approximation to the second derivatives of the function f using the difference between successive gradient vectors. By combining the first and second derivatives the algorithm is able to take Newton-type steps towards the function minimum, assuming quadratic behavior in that region.
40
41The bfgs2 version of this minimizer is the most efficient version available, and is a faithful implementation of the line minimization scheme described in Fletcher's Practical Methods of Optimization, Algorithms 2.6.2 and 2.6.4. It supercedes the original bfgs routine and requires substantially fewer function and gradient evaluations. The user-supplied tolerance tol corresponds to the parameter \sigma used by Fletcher. A value of 0.1 is recommended for typical use (larger values correspond to less accurate line searches).
42
43The nmsimplex2 version is a new O(N) implementation of the earlier O(N^2) nmsimplex minimiser. It calculates the size of simplex as the rms distance of each vertex from the center rather than the mean distance, which has the advantage of allowing a linear update.
44
45-}
46
47
48module Numeric.GSL.Minimization (
49 minimize, minimizeV, MinimizeMethod(..),
50 minimizeD, minimizeVD, MinimizeMethodD(..),
51 uniMinimize, UniMinimizeMethod(..),
52
53 minimizeNMSimplex,
54 minimizeConjugateGradient,
55 minimizeVectorBFGS2
56) where
57
58
59import Data.Packed
60import Numeric.GSL.Internal
61
62import Foreign.Ptr(Ptr, FunPtr, freeHaskellFunPtr)
63import Foreign.C.Types
64import System.IO.Unsafe(unsafePerformIO)
65
66------------------------------------------------------------------------
67
68{-# DEPRECATED minimizeNMSimplex "use minimize NMSimplex2 eps maxit sizes f xi" #-}
69minimizeNMSimplex f xi szs eps maxit = minimize NMSimplex eps maxit szs f xi
70
71{-# DEPRECATED minimizeConjugateGradient "use minimizeD ConjugateFR eps maxit step tol f g xi" #-}
72minimizeConjugateGradient step tol eps maxit f g xi = minimizeD ConjugateFR eps maxit step tol f g xi
73
74{-# DEPRECATED minimizeVectorBFGS2 "use minimizeD VectorBFGS2 eps maxit step tol f g xi" #-}
75minimizeVectorBFGS2 step tol eps maxit f g xi = minimizeD VectorBFGS2 eps maxit step tol f g xi
76
77-------------------------------------------------------------------------
78
79data UniMinimizeMethod = GoldenSection
80 | BrentMini
81 | QuadGolden
82 deriving (Enum, Eq, Show, Bounded)
83
84-- | Onedimensional minimization.
85
86uniMinimize :: UniMinimizeMethod -- ^ The method used.
87 -> Double -- ^ desired precision of the solution
88 -> Int -- ^ maximum number of iterations allowed
89 -> (Double -> Double) -- ^ function to minimize
90 -> Double -- ^ guess for the location of the minimum
91 -> Double -- ^ lower bound of search interval
92 -> Double -- ^ upper bound of search interval
93 -> (Double, Matrix Double) -- ^ solution and optimization path
94
95uniMinimize method epsrel maxit fun xmin xl xu = uniMinimizeGen (fi (fromEnum method)) fun xmin xl xu epsrel maxit
96
97uniMinimizeGen m f xmin xl xu epsrel maxit = unsafePerformIO $ do
98 fp <- mkDoublefun f
99 rawpath <- createMIO maxit 4
100 (c_uniMinize m fp epsrel (fi maxit) xmin xl xu)
101 "uniMinimize"
102 let it = round (rawpath @@> (maxit-1,0))
103 path = takeRows it rawpath
104 [sol] = toLists $ dropRows (it-1) path
105 freeHaskellFunPtr fp
106 return (sol !! 1, path)
107
108
109foreign import ccall safe "uniMinimize"
110 c_uniMinize:: CInt -> FunPtr (Double -> Double) -> Double -> CInt -> Double -> Double -> Double -> TM Res
111
112data MinimizeMethod = NMSimplex
113 | NMSimplex2
114 deriving (Enum,Eq,Show,Bounded)
115
116-- | Minimization without derivatives
117minimize :: MinimizeMethod
118 -> Double -- ^ desired precision of the solution (size test)
119 -> Int -- ^ maximum number of iterations allowed
120 -> [Double] -- ^ sizes of the initial search box
121 -> ([Double] -> Double) -- ^ function to minimize
122 -> [Double] -- ^ starting point
123 -> ([Double], Matrix Double) -- ^ solution vector and optimization path
124
125-- | Minimization without derivatives (vector version)
126minimizeV :: MinimizeMethod
127 -> Double -- ^ desired precision of the solution (size test)
128 -> Int -- ^ maximum number of iterations allowed
129 -> Vector Double -- ^ sizes of the initial search box
130 -> (Vector Double -> Double) -- ^ function to minimize
131 -> Vector Double -- ^ starting point
132 -> (Vector Double, Matrix Double) -- ^ solution vector and optimization path
133
134minimize method eps maxit sz f xi = v2l $ minimizeV method eps maxit (fromList sz) (f.toList) (fromList xi)
135 where v2l (v,m) = (toList v, m)
136
137ww2 w1 o1 w2 o2 f = w1 o1 $ \a1 -> w2 o2 $ \a2 -> f a1 a2
138
139minimizeV method eps maxit szv f xiv = unsafePerformIO $ do
140 let n = dim xiv
141 fp <- mkVecfun (iv f)
142 rawpath <- ww2 vec xiv vec szv $ \xiv' szv' ->
143 createMIO maxit (n+3)
144 (c_minimize (fi (fromEnum method)) fp eps (fi maxit) // xiv' // szv')
145 "minimize"
146 let it = round (rawpath @@> (maxit-1,0))
147 path = takeRows it rawpath
148 sol = flatten $ dropColumns 3 $ dropRows (it-1) path
149 freeHaskellFunPtr fp
150 return (sol, path)
151
152
153foreign import ccall safe "gsl-aux.h minimize"
154 c_minimize:: CInt -> FunPtr (CInt -> Ptr Double -> Double) -> Double -> CInt -> TV(TV(TM Res))
155
156----------------------------------------------------------------------------------
157
158
159data MinimizeMethodD = ConjugateFR
160 | ConjugatePR
161 | VectorBFGS
162 | VectorBFGS2
163 | SteepestDescent
164 deriving (Enum,Eq,Show,Bounded)
165
166-- | Minimization with derivatives.
167minimizeD :: MinimizeMethodD
168 -> Double -- ^ desired precision of the solution (gradient test)
169 -> Int -- ^ maximum number of iterations allowed
170 -> Double -- ^ size of the first trial step
171 -> Double -- ^ tol (precise meaning depends on method)
172 -> ([Double] -> Double) -- ^ function to minimize
173 -> ([Double] -> [Double]) -- ^ gradient
174 -> [Double] -- ^ starting point
175 -> ([Double], Matrix Double) -- ^ solution vector and optimization path
176
177-- | Minimization with derivatives (vector version)
178minimizeVD :: MinimizeMethodD
179 -> Double -- ^ desired precision of the solution (gradient test)
180 -> Int -- ^ maximum number of iterations allowed
181 -> Double -- ^ size of the first trial step
182 -> Double -- ^ tol (precise meaning depends on method)
183 -> (Vector Double -> Double) -- ^ function to minimize
184 -> (Vector Double -> Vector Double) -- ^ gradient
185 -> Vector Double -- ^ starting point
186 -> (Vector Double, Matrix Double) -- ^ solution vector and optimization path
187
188minimizeD method eps maxit istep tol f df xi = v2l $ minimizeVD
189 method eps maxit istep tol (f.toList) (fromList.df.toList) (fromList xi)
190 where v2l (v,m) = (toList v, m)
191
192
193minimizeVD method eps maxit istep tol f df xiv = unsafePerformIO $ do
194 let n = dim xiv
195 f' = f
196 df' = (checkdim1 n . df)
197 fp <- mkVecfun (iv f')
198 dfp <- mkVecVecfun (aux_vTov df')
199 rawpath <- vec xiv $ \xiv' ->
200 createMIO maxit (n+2)
201 (c_minimizeD (fi (fromEnum method)) fp dfp istep tol eps (fi maxit) // xiv')
202 "minimizeD"
203 let it = round (rawpath @@> (maxit-1,0))
204 path = takeRows it rawpath
205 sol = flatten $ dropColumns 2 $ dropRows (it-1) path
206 freeHaskellFunPtr fp
207 freeHaskellFunPtr dfp
208 return (sol,path)
209
210foreign import ccall safe "gsl-aux.h minimizeD"
211 c_minimizeD :: CInt
212 -> FunPtr (CInt -> Ptr Double -> Double)
213 -> FunPtr (TV (TV Res))
214 -> Double -> Double -> Double -> CInt
215 -> TV (TM Res)
216
217---------------------------------------------------------------------
218
219checkdim1 n v
220 | dim v == n = v
221 | otherwise = error $ "Error: "++ show n
222 ++ " components expected in the result of the gradient supplied to minimizeD"