summaryrefslogtreecommitdiff
path: root/lib/Numeric
diff options
context:
space:
mode:
authorAlberto Ruiz <aruiz@um.es>2009-06-08 07:53:34 +0000
committerAlberto Ruiz <aruiz@um.es>2009-06-08 07:53:34 +0000
commit34de6154086224a0e9f774bd8a2ab804d78e8a10 (patch)
tree788b49f8889a166db2fb61365afedf7ac2c29bf6 /lib/Numeric
parent7697c6dc27fd0d9601728af576e8d7b9d1c800ee (diff)
included all GSL minimization methods
Diffstat (limited to 'lib/Numeric')
-rw-r--r--lib/Numeric/GSL/Minimization.hs208
-rw-r--r--lib/Numeric/GSL/gsl-aux.c19
-rw-r--r--lib/Numeric/GSL/gsl-aux.h10
-rw-r--r--lib/Numeric/LinearAlgebra/Tests.hs8
4 files changed, 109 insertions, 136 deletions
diff --git a/lib/Numeric/GSL/Minimization.hs b/lib/Numeric/GSL/Minimization.hs
index b95765f..048c717 100644
--- a/lib/Numeric/GSL/Minimization.hs
+++ b/lib/Numeric/GSL/Minimization.hs
@@ -13,12 +13,49 @@ Minimization of a multidimensional function using some of the algorithms describ
13 13
14<http://www.gnu.org/software/gsl/manual/html_node/Multidimensional-Minimization.html> 14<http://www.gnu.org/software/gsl/manual/html_node/Multidimensional-Minimization.html>
15 15
16The example in the GSL manual:
17
18@
19
20f [x,y] = 10*(x-1)^2 + 20*(y-2)^2 + 30
21
22main = do
23 let (s,p) = minimize NMSimplex2 1E-2 30 [1,1] f [5,7]
24 print s
25 print p
26
27\> main
28[0.9920430849306288,1.9969168063253182]
29 0.000 512.500 1.130 6.500 5.000
30 1.000 290.625 1.409 5.250 4.000
31 2.000 290.625 1.409 5.250 4.000
32 3.000 252.500 1.409 5.500 1.000
33 ...
3422.000 30.001 0.013 0.992 1.997
3523.000 30.001 0.008 0.992 1.997
36@
37
38The path to the solution can be graphically shown by means of:
39
40@'Graphics.Plot.mplot' $ drop 3 ('toColumns' p)@
41
42Taken from the GSL manual:
43
44The vector Broyden-Fletcher-Goldfarb-Shanno (BFGS) algorithm is a quasi-Newton method which builds up an approximation to the second derivatives of the function f using the difference between successive gradient vectors. By combining the first and second derivatives the algorithm is able to take Newton-type steps towards the function minimum, assuming quadratic behavior in that region.
45
46The bfgs2 version of this minimizer is the most efficient version available, and is a faithful implementation of the line minimization scheme described in Fletcher's Practical Methods of Optimization, Algorithms 2.6.2 and 2.6.4. It supercedes the original bfgs routine and requires substantially fewer function and gradient evaluations. The user-supplied tolerance tol corresponds to the parameter \sigma used by Fletcher. A value of 0.1 is recommended for typical use (larger values correspond to less accurate line searches).
47
48The nmsimplex2 version is a new O(N) implementation of the earlier O(N^2) nmsimplex minimiser. It calculates the size of simplex as the rms distance of each vertex from the center rather than the mean distance, which has the advantage of allowing a linear update.
49
16-} 50-}
51
17----------------------------------------------------------------------------- 52-----------------------------------------------------------------------------
18module Numeric.GSL.Minimization ( 53module Numeric.GSL.Minimization (
54 minimize, MinimizeMethod(..),
55 minimizeD, MinimizeMethodD(..),
56 minimizeNMSimplex,
19 minimizeConjugateGradient, 57 minimizeConjugateGradient,
20 minimizeVectorBFGS2, 58 minimizeVectorBFGS2
21 minimizeNMSimplex
22) where 59) where
23 60
24 61
@@ -27,68 +64,67 @@ import Data.Packed.Matrix
27import Foreign 64import Foreign
28import Foreign.C.Types(CInt) 65import Foreign.C.Types(CInt)
29 66
67------------------------------------------------------------------------
68
69{-# DEPRECATED minimizeNMSimplex "use minimize NMSimplex2 eps maxit sizes f xi" #-}
70minimizeNMSimplex f xi szs eps maxit = minimize NMSimplex eps maxit szs f xi
71
72{-# DEPRECATED minimizeConjugateGradient "use minimizeD ConjugateFR eps maxit step tol f g xi" #-}
73minimizeConjugateGradient step tol eps maxit f g xi = minimizeD ConjugateFR eps maxit step tol f g xi
74
75{-# DEPRECATED minimizeVectorBFGS2 "use minimizeD VectorBFGS2 eps maxit step tol f g xi" #-}
76minimizeVectorBFGS2 step tol eps maxit f g xi = minimizeD VectorBFGS2 eps maxit step tol f g xi
77
30------------------------------------------------------------------------- 78-------------------------------------------------------------------------
31 79
32{- | The method of Nelder and Mead, implemented by /gsl_multimin_fminimizer_nmsimplex/. The gradient of the function is not required. This is the example in the GSL manual: 80data MinimizeMethod = NMSimplex
81 | NMSimplex2
82 deriving (Enum,Eq,Show)
83
84-- | Minimization without derivatives.
85minimize :: MinimizeMethod
86 -> Double -- ^ desired precision of the solution (size test)
87 -> Int -- ^ maximum number of iterations allowed
88 -> [Double] -- ^ sizes of the initial search box
89 -> ([Double] -> Double) -- ^ function to minimize
90 -> [Double] -- ^ starting point
91 -> ([Double], Matrix Double) -- ^ solution vector and optimization path
92
93minimize method = minimizeGen (fi (fromEnum method))
94
95data MinimizeMethodD = ConjugateFR
96 | ConjugatePR
97 | VectorBFGS
98 | VectorBFGS2
99 | SteepestDescent
100 deriving (Enum,Eq,Show)
101
102-- | Minimization with derivatives.
103minimizeD :: MinimizeMethodD
104 -> Double -- ^ desired precision of the solution (gradient test)
105 -> Int -- ^ maximum number of iterations allowed
106 -> Double -- ^ size of the first trial step
107 -> Double -- ^ tol (precise meaning depends on method)
108 -> ([Double] -> Double) -- ^ function to minimize
109 -> ([Double] -> [Double]) -- ^ gradient
110 -> [Double] -- ^ starting point
111 -> ([Double], Matrix Double) -- ^ solution vector and optimization path
112
113minimizeD method = minimizeDGen (fi (fromEnum method))
33 114
34@minimize f xi = minimizeNMSimplex f xi (replicate (length xi) 1) 1e-2 100 115-------------------------------------------------------------------------
35\ --
36f [x,y] = 10*(x-1)^2 + 20*(y-2)^2 + 30
37\ --
38main = do
39 let (s,p) = minimize f [5,7]
40 print s
41 print p
42\ --
43\> main
44[0.9920430849306285,1.9969168063253164]
450. 512.500 1.082 6.500 5.
46 1. 290.625 1.372 5.250 4.
47 2. 290.625 1.372 5.250 4.
48 3. 252.500 1.372 5.500 1.
49 4. 101.406 1.823 2.625 3.500
50 5. 101.406 1.823 2.625 3.500
51 6. 60. 1.823 0. 3.
52 7. 42.275 1.303 2.094 1.875
53 8. 42.275 1.303 2.094 1.875
54 9. 35.684 1.026 0.258 1.906
5510. 35.664 0.804 0.588 2.445
5611. 30.680 0.467 1.258 2.025
5712. 30.680 0.356 1.258 2.025
5813. 30.539 0.285 1.093 1.849
5914. 30.137 0.168 0.883 2.004
6015. 30.137 0.123 0.883 2.004
6116. 30.090 0.100 0.958 2.060
6217. 30.005 6.051e-2 1.022 2.004
6318. 30.005 4.249e-2 1.022 2.004
6419. 30.005 4.249e-2 1.022 2.004
6520. 30.005 2.742e-2 1.022 2.004
6621. 30.005 2.119e-2 1.022 2.004
6722. 30.001 1.530e-2 0.992 1.997
6823. 30.001 1.259e-2 0.992 1.997
6924. 30.001 7.663e-3 0.992 1.997@
70 116
71The path to the solution can be graphically shown by means of:
72 117
73@'Graphics.Plot.mplot' $ drop 3 ('toColumns' p)@
74 118
75-} 119minimizeGen method eps maxit sz f xi = unsafePerformIO $ do
76minimizeNMSimplex :: ([Double] -> Double) -- ^ function to minimize
77 -> [Double] -- ^ starting point
78 -> [Double] -- ^ sizes of the initial search box
79 -> Double -- ^ desired precision of the solution
80 -> Int -- ^ maximum number of iterations allowed
81 -> ([Double], Matrix Double)
82 -- ^ solution vector, and the optimization trajectory followed by the algorithm
83minimizeNMSimplex f xi sz tol maxit = unsafePerformIO $ do
84 let xiv = fromList xi 120 let xiv = fromList xi
85 szv = fromList sz 121 szv = fromList sz
86 n = dim xiv 122 n = dim xiv
87 fp <- mkVecfun (iv (f.toList)) 123 fp <- mkVecfun (iv (f.toList))
88 rawpath <- ww2 withVector xiv withVector szv $ \xiv' szv' -> 124 rawpath <- ww2 withVector xiv withVector szv $ \xiv' szv' ->
89 createMIO maxit (n+3) 125 createMIO maxit (n+3)
90 (c_minimizeNMSimplex fp tol (fi maxit) // xiv' // szv') 126 (c_minimize method fp eps (fi maxit) // xiv' // szv')
91 "minimizeNMSimplex" 127 "minimize"
92 let it = round (rawpath @@> (maxit-1,0)) 128 let it = round (rawpath @@> (maxit-1,0))
93 path = takeRows it rawpath 129 path = takeRows it rawpath
94 [sol] = toLists $ dropRows (it-1) path 130 [sol] = toLists $ dropRows (it-1) path
@@ -97,73 +133,13 @@ minimizeNMSimplex f xi sz tol maxit = unsafePerformIO $ do
97 133
98 134
99foreign import ccall "gsl-aux.h minimize" 135foreign import ccall "gsl-aux.h minimize"
100 c_minimizeNMSimplex:: FunPtr (CInt -> Ptr Double -> Double) -> Double -> CInt -> TVVM 136 c_minimize:: CInt -> FunPtr (CInt -> Ptr Double -> Double) -> Double -> CInt -> TVVM
101 137
102---------------------------------------------------------------------------------- 138----------------------------------------------------------------------------------
103 139
104{- | The Fletcher-Reeves conjugate gradient algorithm /gsl_multimin_fminimizer_conjugate_fr/. This is the example in the GSL manual:
105 140
106@minimize = minimizeConjugateGradient 1E-2 1E-4 1E-3 30
107f [x,y] = 10*(x-1)^2 + 20*(y-2)^2 + 30
108\ --
109df [x,y] = [20*(x-1), 40*(y-2)]
110\ --
111main = do
112 let (s,p) = minimize f df [5,7]
113 print s
114 print p
115\ --
116\> main
117[1.0,2.0]
118 0. 687.848 4.996 6.991
119 1. 683.555 4.989 6.972
120 2. 675.013 4.974 6.935
121 3. 658.108 4.944 6.861
122 4. 625.013 4.885 6.712
123 5. 561.684 4.766 6.415
124 6. 446.467 4.528 5.821
125 7. 261.794 4.053 4.632
126 8. 75.498 3.102 2.255
127 9. 67.037 2.852 1.630
12810. 45.316 2.191 1.762
12911. 30.186 0.869 2.026
13012. 30. 1. 2.@
131
132The path to the solution can be graphically shown by means of:
133
134@'Graphics.Plot.mplot' $ drop 2 ('toColumns' p)@
135 141
136-} 142minimizeDGen method eps maxit istep tol f df xi = unsafePerformIO $ do
137minimizeConjugateGradient ::
138 Double -- ^ initial step size
139 -> Double -- ^ minimization parameter
140 -> Double -- ^ desired precision of the solution (gradient test)
141 -> Int -- ^ maximum number of iterations allowed
142 -> ([Double] -> Double) -- ^ function to minimize
143 -> ([Double] -> [Double]) -- ^ gradient
144 -> [Double] -- ^ starting point
145 -> ([Double], Matrix Double) -- ^ solution vector, and the optimization trajectory followed by the algorithm
146minimizeConjugateGradient = minimizeWithDeriv 0
147
148{- | Taken from the GSL manual:
149
150The vector Broyden-Fletcher-Goldfarb-Shanno (BFGS) algorithm. This is a quasi-Newton method which builds up an approximation to the second derivatives of the function f using the difference between successive gradient vectors. By combining the first and second derivatives the algorithm is able to take Newton-type steps towards the function minimum, assuming quadratic behavior in that region.
151
152The bfgs2 version of this minimizer is the most efficient version available, and is a faithful implementation of the line minimization scheme described in Fletcher's Practical Methods of Optimization, Algorithms 2.6.2 and 2.6.4. It supercedes the original bfgs routine and requires substantially fewer function and gradient evaluations. The user-supplied tolerance tol corresponds to the parameter \sigma used by Fletcher. A value of 0.1 is recommended for typical use (larger values correspond to less accurate line searches).
153-}
154minimizeVectorBFGS2 ::
155 Double -- ^ initial step size
156 -> Double -- ^ minimization parameter tol
157 -> Double -- ^ desired precision of the solution (gradient test)
158 -> Int -- ^ maximum number of iterations allowed
159 -> ([Double] -> Double) -- ^ function to minimize
160 -> ([Double] -> [Double]) -- ^ gradient
161 -> [Double] -- ^ starting point
162 -> ([Double], Matrix Double) -- ^ solution vector, and the optimization trajectory followed by the algorithm
163minimizeVectorBFGS2 = minimizeWithDeriv 1
164
165
166minimizeWithDeriv method istep minimpar tol maxit f df xi = unsafePerformIO $ do
167 let xiv = fromList xi 143 let xiv = fromList xi
168 n = dim xiv 144 n = dim xiv
169 f' = f . toList 145 f' = f . toList
@@ -172,7 +148,7 @@ minimizeWithDeriv method istep minimpar tol maxit f df xi = unsafePerformIO $ do
172 dfp <- mkVecVecfun (aux_vTov df') 148 dfp <- mkVecVecfun (aux_vTov df')
173 rawpath <- withVector xiv $ \xiv' -> 149 rawpath <- withVector xiv $ \xiv' ->
174 createMIO maxit (n+2) 150 createMIO maxit (n+2)
175 (c_minimizeWithDeriv method fp dfp istep minimpar tol (fi maxit) // xiv') 151 (c_minimizeWithDeriv method fp dfp istep tol eps (fi maxit) // xiv')
176 "minimizeDerivV" 152 "minimizeDerivV"
177 let it = round (rawpath @@> (maxit-1,0)) 153 let it = round (rawpath @@> (maxit-1,0))
178 path = takeRows it rawpath 154 path = takeRows it rawpath
@@ -181,7 +157,7 @@ minimizeWithDeriv method istep minimpar tol maxit f df xi = unsafePerformIO $ do
181 freeHaskellFunPtr dfp 157 freeHaskellFunPtr dfp
182 return (sol,path) 158 return (sol,path)
183 159
184foreign import ccall "gsl-aux.h minimizeWithDeriv" 160foreign import ccall "gsl-aux.h minimizeD"
185 c_minimizeWithDeriv :: CInt -> FunPtr (CInt -> Ptr Double -> Double) 161 c_minimizeWithDeriv :: CInt -> FunPtr (CInt -> Ptr Double -> Double)
186 -> FunPtr (CInt -> Ptr Double -> Ptr Double -> IO ()) 162 -> FunPtr (CInt -> Ptr Double -> Ptr Double -> IO ())
187 -> Double -> Double -> Double -> CInt 163 -> Double -> Double -> Double -> CInt
diff --git a/lib/Numeric/GSL/gsl-aux.c b/lib/Numeric/GSL/gsl-aux.c
index c6b052f..2ecfb51 100644
--- a/lib/Numeric/GSL/gsl-aux.c
+++ b/lib/Numeric/GSL/gsl-aux.c
@@ -369,7 +369,7 @@ double only_f_aux_min(const gsl_vector*x, void *pars) {
369} 369}
370 370
371// this version returns info about intermediate steps 371// this version returns info about intermediate steps
372int minimize(double f(int, double*), double tolsize, int maxit, 372int minimize(int method, double f(int, double*), double tolsize, int maxit,
373 KRVEC(xi), KRVEC(sz), RMAT(sol)) { 373 KRVEC(xi), KRVEC(sz), RMAT(sol)) {
374 REQUIRES(xin==szn && solr == maxit && solc == 3+xin,BAD_SIZE); 374 REQUIRES(xin==szn && solr == maxit && solc == 3+xin,BAD_SIZE);
375 DEBUGMSG("minimizeList (nmsimplex)"); 375 DEBUGMSG("minimizeList (nmsimplex)");
@@ -388,7 +388,11 @@ int minimize(double f(int, double*), double tolsize, int maxit,
388 // Starting point 388 // Starting point
389 KDVVIEW(xi); 389 KDVVIEW(xi);
390 // Minimizer nmsimplex, without derivatives 390 // Minimizer nmsimplex, without derivatives
391 T = gsl_multimin_fminimizer_nmsimplex; 391 switch(method) {
392 case 0 : {T = gsl_multimin_fminimizer_nmsimplex; break; }
393 case 1 : {T = gsl_multimin_fminimizer_nmsimplex2; break; }
394 default: ERROR(BAD_CODE);
395 }
392 s = gsl_multimin_fminimizer_alloc (T, my_func.n); 396 s = gsl_multimin_fminimizer_alloc (T, my_func.n);
393 gsl_multimin_fminimizer_set (s, &my_func, V(xi), V(sz)); 397 gsl_multimin_fminimizer_set (s, &my_func, V(xi), V(sz));
394 do { 398 do {
@@ -458,9 +462,9 @@ void fdf_aux_min(const gsl_vector * x, void * pars, double * f, gsl_vector * g)
458} 462}
459 463
460 464
461int minimizeWithDeriv(int method, double f(int, double*), void df(int, double*, double*), 465int minimizeD(int method, double f(int, double*), void df(int, double*, double*),
462 double initstep, double minimpar, double tolgrad, int maxit, 466 double initstep, double minimpar, double tolgrad, int maxit,
463 KRVEC(xi), RMAT(sol)) { 467 KRVEC(xi), RMAT(sol)) {
464 REQUIRES(solr == maxit && solc == 2+xin,BAD_SIZE); 468 REQUIRES(solr == maxit && solc == 2+xin,BAD_SIZE);
465 DEBUGMSG("minimizeWithDeriv (conjugate_fr)"); 469 DEBUGMSG("minimizeWithDeriv (conjugate_fr)");
466 gsl_multimin_function_fdf my_func; 470 gsl_multimin_function_fdf my_func;
@@ -482,7 +486,10 @@ int minimizeWithDeriv(int method, double f(int, double*), void df(int, double*,
482 // conjugate gradient fr 486 // conjugate gradient fr
483 switch(method) { 487 switch(method) {
484 case 0 : {T = gsl_multimin_fdfminimizer_conjugate_fr; break; } 488 case 0 : {T = gsl_multimin_fdfminimizer_conjugate_fr; break; }
485 case 1 : {T = gsl_multimin_fdfminimizer_vector_bfgs2; break; } 489 case 1 : {T = gsl_multimin_fdfminimizer_conjugate_pr; break; }
490 case 2 : {T = gsl_multimin_fdfminimizer_vector_bfgs; break; }
491 case 3 : {T = gsl_multimin_fdfminimizer_vector_bfgs2; break; }
492 case 4 : {T = gsl_multimin_fdfminimizer_steepest_descent; break; }
486 default: ERROR(BAD_CODE); 493 default: ERROR(BAD_CODE);
487 } 494 }
488 s = gsl_multimin_fdfminimizer_alloc (T, my_func.n); 495 s = gsl_multimin_fdfminimizer_alloc (T, my_func.n);
diff --git a/lib/Numeric/GSL/gsl-aux.h b/lib/Numeric/GSL/gsl-aux.h
index c9fd546..881d0d0 100644
--- a/lib/Numeric/GSL/gsl-aux.h
+++ b/lib/Numeric/GSL/gsl-aux.h
@@ -38,13 +38,3 @@ int integrate_qags(double f(double,void*), double a, double b, double prec, int
38 38
39int polySolve(KRVEC(a), CVEC(z)); 39int polySolve(KRVEC(a), CVEC(z));
40 40
41int minimize(double f(int, double*), double tolsize, int maxit,
42 KRVEC(xi), KRVEC(sz), RMAT(sol));
43
44int minimizeWithDeriv(int method, double f(int, double*), void df(int, double*, double*),
45 double initstep, double minimpar, double tolgrad, int maxit,
46 KRVEC(xi), RMAT(sol));
47
48int root(int method, void f(int, double*, int, double*),
49 double epsabs, int maxit,
50 KRVEC(xi), RMAT(sol));
diff --git a/lib/Numeric/LinearAlgebra/Tests.hs b/lib/Numeric/LinearAlgebra/Tests.hs
index 83f581f..464ed27 100644
--- a/lib/Numeric/LinearAlgebra/Tests.hs
+++ b/lib/Numeric/LinearAlgebra/Tests.hs
@@ -109,13 +109,13 @@ expmTest2 = expm nd2 :~15~: (2><2)
109 109
110--------------------------------------------------------------------- 110---------------------------------------------------------------------
111 111
112minimizationTest = TestList [ utest "minimization conj grad" (minim1 f df [5,7] ~~ [1,2]) 112minimizationTest = TestList [ utest "minimization conjugatefr" (minim1 f df [5,7] ~~ [1,2])
113 , utest "minimization bg2" (minim2 f df [5,7] ~~ [1,2]) 113 , utest "minimization nmsimplex2" (minim2 f [5,7] == 24)
114 ] 114 ]
115 where f [x,y] = 10*(x-1)^2 + 20*(y-2)^2 + 30 115 where f [x,y] = 10*(x-1)^2 + 20*(y-2)^2 + 30
116 df [x,y] = [20*(x-1), 40*(y-2)] 116 df [x,y] = [20*(x-1), 40*(y-2)]
117 minim1 g dg ini = fst $ minimizeConjugateGradient 1E-2 1E-4 1E-3 30 g dg ini 117 minim1 g dg ini = fst $ minimizeD ConjugateFR 1E-3 30 1E-2 1E-4 g dg ini
118 minim2 g dg ini = fst $ minimizeVectorBFGS2 1E-2 1E-2 1E-3 30 g dg ini 118 minim2 g ini = rows $ snd $ minimize NMSimplex2 1E-2 30 [1,1] g ini
119 119
120--------------------------------------------------------------------- 120---------------------------------------------------------------------
121 121