From: <te...@us...> - 2009-03-24 17:14:54
|
Revision: 5635 http://octave.svn.sourceforge.net/octave/?rev=5635&view=rev Author: tealev Date: 2009-03-24 17:14:47 +0000 (Tue, 24 Mar 2009) Log Message: ----------- Non-linear conjugate gradient method and examples. Modified Paths: -------------- trunk/octave-forge/main/optim/INDEX Added Paths: ----------- trunk/octave-forge/main/optim/inst/__bracket_min.m trunk/octave-forge/main/optim/inst/__poly_2_extrema.m trunk/octave-forge/main/optim/inst/__semi_bracket.m trunk/octave-forge/main/optim/inst/brent_line_min.m trunk/octave-forge/main/optim/inst/cg_min.m trunk/octave-forge/main/optim/inst/cg_min_example_1.m trunk/octave-forge/main/optim/inst/cg_min_example_2.m trunk/octave-forge/main/optim/inst/cg_min_example_3.m Modified: trunk/octave-forge/main/optim/INDEX =================================================================== --- trunk/octave-forge/main/optim/INDEX 2009-03-24 09:42:13 UTC (rev 5634) +++ trunk/octave-forge/main/optim/INDEX 2009-03-24 17:14:47 UTC (rev 5635) @@ -9,6 +9,7 @@ fmins adsmax mdsmax nmsmax bfgsmin samin battery fminsearch + cg_min Data fitting expfit expdemo wpolyfit wpolyfitdemo @@ -34,3 +35,4 @@ bfgsmin_example rosenbrock samin_example + cg_min_test_1 cg_min_test_2 cg_min_test_3 Added: trunk/octave-forge/main/optim/inst/__bracket_min.m =================================================================== --- trunk/octave-forge/main/optim/inst/__bracket_min.m (rev 0) +++ trunk/octave-forge/main/optim/inst/__bracket_min.m 2009-03-24 17:14:47 UTC (rev 5635) @@ -0,0 +1,30 @@ +## [a, b, ga, gb, nev] = semi_bracket (f, dx, a, narg, args) +## +## Find an interval containing a local minimum of the function +## g : h in reals ---> f (x+h*dx) where x = args{narg} +## +## a < b. +## nev is the number of function evaluations + +## Author : Etienne Grossmann <et...@is...> +## Modified by: Levente Torok <Tor...@gm...> +## This software is distributed under the terms of the GPL + +function [a, b, ga, gb, n] = __bracket_min (f, dx, narg, args) + + [a,b, ga,gb, n] = __semi_bracket (f, dx, 0, narg, args); + + if a != 0, return; end + + [a2,b2, ga2,gb2, n2] = __semi_bracket (f, -dx, 0, narg, args); + + n += n2; + + if a2 == 0, + a = -b2; ga = gb2; + else + a = -b2; + b = -a2; + ga = gb2; + gb = ga2; +end Added: trunk/octave-forge/main/optim/inst/__poly_2_extrema.m =================================================================== --- trunk/octave-forge/main/optim/inst/__poly_2_extrema.m (rev 0) +++ trunk/octave-forge/main/optim/inst/__poly_2_extrema.m 2009-03-24 17:14:47 UTC (rev 5635) @@ -0,0 +1,39 @@ +## ex = poly_2_ex (l, f) - Extremum of a 1-var deg-2 polynomial +## +## l : 3 : variable values +## f : 3 : f(i) = value of polynomial at l(i) +## +## ex : 1 : Value at which f reaches an extremum +## +## Assuming that f(i) = a*l(i)^2 + b* l(i) + c = P(l(i)) for some a, b, c, +## ex is the extremum of the polynome P. +## +function ex = __poly_2_ex (l, f) + + +### This somewhat helps if solution is very close to one of the points. +[f,i] = sort (f); +l = l(i); + + +m = (l(2) - l(1))/(l(3) - l(1)); +d = (2*(f(1)*(m-1)+f(2)-f(3)*m)); +if abs (d) < eps, + printf ("poly_2_ex : divisor is small (solution at infinity)\n"); + printf ("%8.3e %8.3e %8.3e, %8.3e %8.3e\n",\ + f(1), diff (f), diff (sort (l))); + + ex = (2*(l(1)>l(2))-1)*inf; + ## keyboard +else + ex = ((l(3) - l(1))*((f(1)*(m^2-1) + f(2) - f(3)*m^2))) / d ; + +## Not an improvement +# n = ((l(2)+l(3))*(l(2)-l(3)) + 2*(l(3)-l(2))*l(1)) / (l(3)-l(1))^2 ; +# ex = ((l(3) - l(1))*((f(1)*n + f(2) - f(3)*m^2))) / \ +# (2*(f(1)*(m-1)+f(2)-f(3)*m)); +# if ex != ex0, +# ex - ex0 +# end + ex = l(1) + ex; +end \ No newline at end of file Added: trunk/octave-forge/main/optim/inst/__semi_bracket.m =================================================================== --- trunk/octave-forge/main/optim/inst/__semi_bracket.m (rev 0) +++ trunk/octave-forge/main/optim/inst/__semi_bracket.m 2009-03-24 17:14:47 UTC (rev 5635) @@ -0,0 +1,38 @@ +## [a, b, ga, gb, nev] = semi_bracket (f, dx, a, narg, args) +## +## Find an interval containing a local minimum of the function +## g : h in [a, inf[ ---> f (x+h*dx) where x = nth (args, narg) +## +## The local minimum may be in a. +## a < b. +## nev is the number of function evaluations. + +## Author : Etienne Grossmann <et...@is...> +## Modified by: Levente Torok <Tor...@gm...> +## This software is distributed under the terms of the GPL + +function [a,b,ga,gb,n] = __semi_bracket (f, dx, a, narg, args) + +step = 1; + +x = nth (args, narg); +args{narg} = x+a*dx; ga = feval (f, args ); +b = a + step; +args{narg} = x+b*dx; gb = feval (f, args ); +n = 2; + +if gb >= ga, return ; end + +while 1, + + c = b + step; + args{narg} = x+c*dx; gc = feval( f, args ); + n++; + + if gc >= gb, # ga >= gb <= gc + gb = gc; b = c; + return; + end + step *= 2; + a = b; b = c; ga = gb; gb = gc; +end Added: trunk/octave-forge/main/optim/inst/brent_line_min.m =================================================================== --- trunk/octave-forge/main/optim/inst/brent_line_min.m (rev 0) +++ trunk/octave-forge/main/optim/inst/brent_line_min.m 2009-03-24 17:14:47 UTC (rev 5635) @@ -0,0 +1,220 @@ +## -*- texinfo -*- +## @deftypefn {Function File} {[@var{s},@var{v},@var{n}]} brent_line_min ( @var{f},@var{df},@var{args},@var{ctl} ) +## Line minimization of f along df +## +## Finds minimum of f on line @math{ x0 + dx*w | a < w < b } by +## bracketing. a and b are passed through argument ctl. +## +## @subheading Arguments +## @itemize @bullet +## @item @var{f} : string : Name of function. Must return a real value +## @item @var{args} : cell : Arguments passed to f or RxC : f's only argument. x0 must be at @var{args}@{ @var{ctl}(2) @} +## @item @var{ctl} : 5 : (optional) Control variables, described below. +## @end itemize +## +## @subheading Returned values +## @itemize @bullet +## @item @var{s} : 1 : Minimum is at x0 + s*dx +## @item @var{v} : 1 : Value of f at x0 + s*dx +## @item @var{nev} : 1 : Number of function evaluations +## @end itemize +## +## @subheading Control Variables +## @itemize @bullet +## @item @var{ctl}(1) : Upper bound for error on s Default=sqrt(eps) +## @item @var{ctl}(2) : Position of minimized argument in args Default= 1 +## @item @var{ctl}(3) : Maximum number of function evaluations Default= inf +## @item @var{ctl}(4) : a Default=-inf +## @item @var{ctl}(5) : b Default= inf +## @end itemize +## +## Default values will be used if ctl is not passed or if nan values are +## given. +## @end deftypefn + + +function [s,gs,nev] = brent_line_min( f,dx,args,ctl ) + +verbose = 0; + +seps = sqrt (eps); + + # Default control variables +tol = 10*eps; # sqrt (eps); +narg = 1; +maxev = inf; +a = -inf; +b = inf; + +if nargin >= 4, # Read arguments + if !isnan (ctl (1)), tol = ctl(1); end + if length (ctl)>=2 && !isnan (ctl(2)), narg = ctl(2); end + if length (ctl)>=3 && !isnan (ctl(3)), maxev = ctl(3); end + if length (ctl)>=4 && !isnan (ctl(4)), a = ctl(4); end + if length (ctl)>=5 && !isnan (ctl(5)), b = ctl(5); end + +end # Otherwise, use defaults, def'd above + +if a>b, tmp=a; a=b; b=tmp; end + +if narg > length (args), + printf ("brent_line_min : narg==%i > length (args)==%i",\ + narg, length (args)); + keyboard +end + + +if ! iscell (args), + args = {args}; +endif + +x = args{ narg }; + +[R,C] = size (x); +N = R*C; # Size of argument + +gs0 = gs = feval (f, args); +nev = 1; + # Initial value +s = 0; + +if all (dx==0), return; end # Trivial case + + # If any of the bounds is infinite, find + # finite bounds that bracket minimum +if !isfinite (a) || !isfinite (b), + if !isfinite (a) && !isfinite (b), + [a,b, ga,gb, n] = __bracket_min (f, dx, narg, args); + elseif !isfinite (a), + [a,b, ga,gb, n] = __semi_bracket (f, -dx, -b, narg, args); + tmp = a; a = -b; b = -tmp; + tmp = ga; ga = gb; gb = tmp; + else + [a,b, ga,gb, n] = __semi_bracket (f, dx, a, narg, args); + end + nev += n; +else + args{narg} = x+a*dx; ga = feval( f, args ); + args{narg} = x+b*dx; gb = feval( f, args ); + nev += 2; +end # End of finding bracket for minimum + +if a > b, # Check assumptions + printf ("brent_line_min : a > b\n"); + keyboard +end + +s = 0.5*(a+b); +args{narg} = x+ s*dx; gs = feval( f, args ); +end +nev++; + +if verbose, + printf ("[a,s,b]=[%.3e,%.3e,%.3e], [ga,gs,gb]=[%.3e,%.3e,%.3e]\n",\ + a,s,b,ga,gs,gb); +end + +maxerr = 2*tol; + +while ( b-a > maxerr ) && nev < maxev, + + if gs > ga && gs > gb, # Check assumptions + printf ("brent_line_min : gs > ga && gs > gb\n"); + keyboard + end + + if ga == gb && gb == gs, break end + + # Don't trust poly_2_ex for glued points + # (see test_poly_2_ex). + + ## if min (b-s, s-a) > 10*seps, + + # If s is not glued to a or b and does not + # look linear + ## mydet = sum (l([2 3 1]).*f([3 1 2])-l([3 1 2]).*f([2 3 1])) + mydet = sum ([s b a].*[gb ga gs] - [b a s].*[gs gb ga]); + if min (b-s, s-a) > 10*seps && abs (mydet) > 10*seps && \ + (t = poly_2_ex ([a,s,b], [ga, gs, gb])) < b && t > a, + + # t has already been set + + ## if t>=b || t<=a, + ## printf ("brent_line_min : t is not in ]a,b[\n"); + ## keyboard + ## end + + # Otherwise, reduce the biggest of the + # intervals, but not too much + elseif s-a > b-s, + t = max (0.5*(a+s), s-100*seps); + else + t = min (0.5*(s+b), s+100*seps); + end + + if abs (t-s) < 0.51*maxerr, + #sayif (verbose, "ungluing t and s\n"); + t = s + (1 - 2*(s-a > b-s))*0.49*maxerr ; + end + + if a > s || s > b, # Check assumptions + printf ("brent_line_min : a > s || s > b\n"); + keyboard + end + + xt = args; + args{narg} = x+t*dx; + gt = feval( f, args ); + nev++; + + if verbose, + printf ("t = %.3e, gt = %.3e\n",t,gt); + end + + if t<s, # New point is in ]a,s[ + + if gt > ga + seps, # Check assumptions + printf ("brent_line_min : gt > ga\n"); + keyboard + end + + if gt < gs, + b = s; gb = gs; + s = t; gs = gt; + else + a = t; ga = gt; + end + else # New point is in ]s,b[ + if gt > gb + seps, # Check assumptions + printf ("brent_line_min : gt > gb\n"); + keyboard + end + + if gt < gs, + a = s; ga = gs; + s = t; gs = gt; + else + b = t; gb = gt; + end + end + + if verbose, + printf ("[a,s,b]=[%.3e,%.3e,%.3e], [ga,gs,gb]=[%.3e,%.3e,%.3e]\n",\ + a,s,b,ga,gs,gb); + end + ## keyboard + ## [b-a, maxerr] +end + +s2 = 0.5*(a+b); +args{narg} = x + s2*dx; gs2 = feval (f, args ); +nev++; + +if gs2 < gs, + s = s2; gs = gs2; +end + +if gs > gs0, + printf ("brent_line_min : goes uphill by %8.3e\n",gs-gs0); + keyboard +end \ No newline at end of file Added: trunk/octave-forge/main/optim/inst/cg_min.m =================================================================== --- trunk/octave-forge/main/optim/inst/cg_min.m (rev 0) +++ trunk/octave-forge/main/optim/inst/cg_min.m 2009-03-24 17:14:47 UTC (rev 5635) @@ -0,0 +1,202 @@ +## -*- texinfo -*- +## @deftypefn {Function File} {[@var{x0},@var{v},@var{nev}]} cg_min ( @var{f},@var{df},@var{args},@var{ctl} ) +## NonLinear Conjugate Gradient method to minimize function @var{f}. +## +## @subheading Arguments +## @itemize @bullet +## @item @var{f} : string : Name of function. Return a real value +## @item @var{df} : string : Name of f's derivative. Returns a (R*C) x 1 vector +## @item @var{args}: cell : Arguments passed to f.@* +## @item @var{ctl} : 5-vec : (Optional) Control variables, described below +## @end itemize +## +## @subheading Returned values +## @itemize @bullet +## @item @var{x0} : matrix : Local minimum of f +## @item @var{v} : real : Value of f in x0 +## @item @var{nev} : 1 x 2 : Number of evaluations of f and of df +## @end itemize +## +## @subheading Control Variables +## @itemize @bullet +## @item @var{ctl}(1) : 1 or 2 : Select stopping criterion amongst : +## @item @var{ctl}(1)==0 : Default value +## @item @var{ctl}(1)==1 : Stopping criterion : Stop search when value doesn't +## improve, as tested by @math{ ctl(2) > Deltaf/max(|f(x)|,1) } +## where Deltaf is the decrease in f observed in the last iteration +## (each iteration consists R*C line searches). +## @item @var{ctl}(1)==2 : Stopping criterion : Stop search when updates are small, +## as tested by @math{ ctl(2) > max { dx(i)/max(|x(i)|,1) | i in 1..N }} +## where dx is the change in the x that occured in the last iteration. +## @item @var{ctl}(2) : Threshold used in stopping tests. Default=10*eps +## @item @var{ctl}(2)==0 : Default value +## @item @var{ctl}(3) : Position of the minimized argument in args Default=1 +## @item @var{ctl}(3)==0 : Default value +## @item @var{ctl}(4) : Maximum number of function evaluations Default=inf +## @item @var{ctl}(4)==0 : Default value +## @item @var{ctl}(5) : Type of optimization: +## @item @var{ctl}(5)==1 : "Fletcher-Reves" method +## @item @var{ctl}(5)==2 : "Polak-Ribiere" (Default) +## @item @var{ctl}(5)==3 : "Hestenes-Stiefel" method +## @end itemize +## +## @var{ctl} may have length smaller than 4. Default values will be used if ctl is +## not passed or if nan values are given. +## @subheading Example: +## +## function r=df( l ) b=[1;0;-1]; r = -( 2*l@{1@} - 2*b + rand(size(l{1}))); endfunction @* +## function r=ff( l ) b=[1;0;-1]; r = (l@{1@}-b)' * (l@{1@}-b); endfunction @* +## ll = @{ [10; 2; 3] @}; @* +## ctl(5) = 3; @* +## [x0,v,nev]=cg_min( "ff", "df", ll, ctl ) @* +## +## Comment: In general, BFGS method seems to be better performin in many cases but requires more computation per iteration +## @seealso{ bfgsmin, http://en.wikipedia.org/wiki/Nonlinear_conjugate_gradient } +## @end deftypefn + +## Author : Etienne Grossmann <et...@is...> +## Modified by: Levente Torok <Tor...@gm...> +## This software is distributed under the terms of the GPL + +function [x,v,nev] = cg_min (f, dfn, args, ctl) + +verbose = 0; + +crit = 1; # Default control variables +tol = 10*eps; +narg = 1; +maxev = inf; +method = 2; + +if nargin >= 4, # Read arguments + if !isnan (ctl(1)) && ctl(1) ~= 0, crit = ctl(1); end + if length (ctl)>=2 && !isnan (ctl(2)) && ctl(2) ~= 0, tol = ctl(2); end + if length (ctl)>=3 && !isnan (ctl(3)) && ctl(3) ~= 0, narg = ctl(3); end + if length (ctl)>=4 && !isnan (ctl(4)) && ctl(4) ~= 0, maxev = ctl(4); end + if length (ctl)>=5 && !isnan (ctl(5)) && ctl(5) ~= 0, method= ctl(5); end +end + +if iscell (args), # List of arguments + x = args{narg}; +else # Single argument + x = args; + args = {args}; +end + +if narg > length (args), # Check + error ("cg_min : narg==%i, length (args)==%i\n", + narg, length (args)); +end + +[R, C] = size(x); +N = R*C; +x = reshape (x,N,1) ; + +nev = [0, 0]; + +v = feval (f, args); +nev(1)++; + +dxn = lxn = dxn_1 = -feval( dfn, args ); +nev(2)++; + +done = 0; + +## TEMP +## tb = ts = zeros (1,100); + + # Control params for line search +ctlb = [10*sqrt(eps), narg, maxev]; +if crit == 2, ctlb(1) = tol; end + +x0 = x; +v0 = v; + +nline = 0; +while nev(1) <= maxev , + ## xprev = x ; + ctlb(3) = maxev - nev(1); # Update # of evals + + + ## wiki alg 4. + [alpha, vnew, nev0] = brent_line_min (f, dxn, args, ctlb); + + nev += nev0; + ## wiki alg 5. + x = x + alpha * dxn; + + if nline >= N, + if crit == 1, + done = tol > (v0 - vnew) / max (1, abs (v0)); + else + done = tol > norm ((x-x0)(:)); + end + nline = 1; + x0 = x; + v0 = vnew; + else + nline++; + end + if done || nev(1) >= maxev, return end + + if vnew > v + eps , + printf("cg_min: step increased cost function\n"); + keyboard + end + + # if abs(1-(x-xprev)'*dxn/norm(dxn)/norm(x-xprev))>1000*eps, + # printf("cg_min: step is not in the right direction\n"); + # keyboard + # end + + # update x at the narg'th position of args cellarray + args{narg} = reshape (x, R, C); + + v = feval (f, args); + nev(1)++; + + if verbose, printf("cg_min : nev=%4i, v=%8.3g\n",nev(1),v) ; end + + ## wiki alg 1: + dxn = -feval (dfn, args); + nev(2)++; + + # wiki alg 2: + switch method + + case 1 # Fletcher-Reenves method + nu = dxn' * dxn; + de = dxn_1' * dxn_1; + + case 2 # Polak-Ribiere method + nu = (dxn-dxn_1)' * dxn; + de = dxn_1' * dxn_1; + + case 3 # Hestenes-Stiefel method + nu = (dxn-dxn_1)' * dxn; + de = (dxn-dxn_1)' * lxn; + + otherwise + error("No method like this"); + + endswitch + + if nu == 0, + return + endif + + if de == 0, + error("Numerical instability!"); + endif + beta = nu / de; + beta = max( 0, beta ); + ## wiki alg 3. update dxn, lxn, point + dxn_1 = dxn; + dxn = lxn = dxn_1 + beta*lxn ; + +end + +if verbose, printf ("cg_min: Too many evaluatiosn!\n"); end + +endfunction + Added: trunk/octave-forge/main/optim/inst/cg_min_example_1.m =================================================================== --- trunk/octave-forge/main/optim/inst/cg_min_example_1.m (rev 0) +++ trunk/octave-forge/main/optim/inst/cg_min_example_1.m 2009-03-24 17:14:47 UTC (rev 5635) @@ -0,0 +1,83 @@ +## ok = test_conjgrad_min - Test that conjgrad_min works +## +## Defines some simple functions and verifies that calling +## +## conjgrad_min on them returns the correct minimum. +## +## Sets 'ok' to 1 if success, 0 otherwise + +## The name of the optimizing function + + +optim_func = "cg_min"; + +ok = 1; + +if ! exist ("verbose"), verbose = 0; end + +if 0, + P = 10+floor(30*rand(1)) ; # Nparams + R = P+floor(30*rand(1)) ; # Nobses +else + P = 15; + R = 20; # must have R >= P +end + +noise = 0 ; +global obsmat ; +obsmat = randn(R,P) ; +global truep ; +truep = randn(P,1) ; +xinit = randn(P,1) ; + +global obses ; +obses = obsmat*truep ; +if noise, obses = adnois(obses,noise); end + +function s = msq(x) + try + s = mean(x(find(!isnan(x))).^2); + catch + s = nan; + end +endfunction + +function v = ff(x) + global obsmat; + global obses; + v = msq( obses - obsmat*x{1} ) + 1 ; +endfunction + + +function dv = dff(x) + global obsmat; + global obses; + er = -obses + obsmat*x{1} ; + dv = 2*er'*obsmat / rows(obses) ; + dv = dv'; + ## dv = 2*er'*obsmat ; +endfunction + +printf("gonna test : %s\n",optim_func); +if verbose, + printf ("Nparams = P = %i, Nobses = R = %i\n",P,R); +end + + +tic; +[xlev,vlev,nlev] = feval(optim_func, "ff", "dff", xinit) ; +toc; + + +if max (abs(xlev-truep)) > 100*sqrt (eps), + if verbose, + printf ("Error is too big : %8.3g\n", max (abs (xlev-truep))); + end + ok = 0; +end +if verbose, + printf (" Costs : init=%8.3g, final=%8.3g, best=%8.3g\n",\ + ff(xinit), vlev, ff(truep)); +end +if (ok) printf("All tests ok\n"); endif + Added: trunk/octave-forge/main/optim/inst/cg_min_example_2.m =================================================================== --- trunk/octave-forge/main/optim/inst/cg_min_example_2.m (rev 0) +++ trunk/octave-forge/main/optim/inst/cg_min_example_2.m 2009-03-24 17:14:47 UTC (rev 5635) @@ -0,0 +1,57 @@ + + +## +## Test conjgrad_min +optim_func = "cg_min"; +printf("gonna test : %s\n",optim_func); + + +N = 1+floor(30*rand(1)) ; +global truemin ; +truemin = randn(N,1) ; +global offset ; +offset = 100*randn(1) ; +global metric ; +metric = randn(2*N,N) ; +metric = metric'*metric ; + +if N>1, + [u,d,v] = svd(metric); + d = (0.1+[0:(1/(N-1)):1]).^2 ; + metric = u*diag(d)*u' ; +end + +function v = testfunc(x) + global offset ; + global truemin ; + global metric ; + v = sum((x{1}-truemin)'*metric*(x{1}-truemin)) + offset ; +end + +function df = dtestf(x) + global truemin ; + global metric ; + df = metric' * 2*(x{1}-truemin); +end + +xinit = 10*randn(N,1); + +[x,v,niter] = feval(optim_func, "testfunc","dtestf", xinit) ; +## [x,v,niter] = conjgrad_min("testfunc","dtestf",xinit) ; +printf("test_conjgrad_min\n"); + +if any(abs( x-truemin ) > 100*sqrt(eps) ) , + printf("NOT OK 1\n"); +else + printf("OK 1\n"); +end + +if v-offset > 1e-8 , + printf("NOT OK 2\n"); +else + printf("OK 2\n"); +end + +printf("nev=%d N=%d errx=%8.3g errv=%8.3g\n",... + niter(1),N,max(abs( x-truemin )),v-offset); + Added: trunk/octave-forge/main/optim/inst/cg_min_example_3.m =================================================================== --- trunk/octave-forge/main/optim/inst/cg_min_example_3.m (rev 0) +++ trunk/octave-forge/main/optim/inst/cg_min_example_3.m 2009-03-24 17:14:47 UTC (rev 5635) @@ -0,0 +1,110 @@ + +## ok = test_conjgrad_min - Test that conjgrad_min works with extra +## arguments +## +## Defines some simple functions and verifies that calling +## +## conjgrad_min on them returns the correct minimum. +## +## Sets 'ok' to 1 if success, 0 otherwise + +## The name of the optimizing function +optim_func = "cg_min"; + +ok = 1; +verbose = true; + +if ! exist ("verbose"), verbose = 0; end + +if 0, + P = 10+floor(30*rand(1)) ; # Nparams + R = P+floor(30*rand(1)) ; # Nobses +else + P = 2; + R = 3; +end + +noise = 0 ; +obsmat = randn(R,P) ; + +truep = randn(P,1) ; +xinit = randn(P,1) ; + +global obses ; +obses = obsmat*truep ; + +function [v,stddev] = addnoise(u,db) + + if ! isfinite (db), v = u; stddev = 0; return; end + + if prod(size(u)) == 0, + error("adnois called with void signal"); + end + n = randn(size(u)); + + dbn = 1; + + ## dbu = cov(u(:)); + dbu = msq( u(:) ) - mean( u(find(!isnan(u(:)))) )^2 ; + + stddev = 10^(-db/20)*sqrt(dbu) ; + + v = u+n*stddev; + % noislev(u,v) +endfunction + +if noise, obses = addnoise(obses,noise); end + +extra = list (obsmat, obses); + +function v = ff( xx ) + x = xx{1}; + obsmat = xx{2}; + obses = xx{3}; + v = msq( obses - obsmat*x ) + 1 ; +endfunction + + +function dv = dff( xx ) + x = xx{1}; + obsmat = xx{2}; + obses = xx{3}; + er = -obses + obsmat*x ; + dv = 2*er'*obsmat / rows(obses) ; + dv = dv'; + ## dv = 2*er'*obsmat ; +endfunction + + +printf( "gonna test : %s\n",optim_func); +if verbose, + printf ("Nparams = P = %i, Nobses = R = %i\n",P,R); +end + + +tic; +## [xlev,vlev,nlev] = feval (optim_func, "ff", "dff", xinit, "extra", extra) ; +x = {xinit, obsmat, obses}; +[xlev,vlev,nlev] = feval \ + (optim_func, "ff", "dff", x); +toc; + + +if max (abs(xlev-truep)) > 100*sqrt (eps), + if verbose, + printf ("Error is too big : %8.3g\n", max (abs (xlev-truep))); + end + ok = 0; +end + +if verbose, + xinit_ = {xinit,obsmat,obses}; + xtrue_ = {truep,obsmat,obses}; + printf (" Costs : init=%8.3g, final=%8.3g, best=%8.3g\n",\ + ff(xinit_), vlev, ff(xtrue_)); +end + +if (ok) + printf("All tests ok\n"); +endif + This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |