最优化——FR共轭梯度法matlab程序
2012-12-31 15:57
155 查看
% cg.m
function [x, output] = cg(fun, dfun, x0)
% fun: character variable, the name of function computing objective function
% dfun: character variable, the name of function computing the gradient of objective function
% x0: real vector, input initial point
% x :real vector, output solution
% output: structure variable, output the value of f, the number ofiteration, the number of function evaluations and the norm of gradient at last point x
%
% Step 1: initialization
%
epsi = 1.0e-3; % termination tolenrence
k = 0; % the number of iteration
funcN = 0; % the number of function evaluation
rho = 0.01; l = 0.15; u = 0.85;
x = x0;
f = feval(fun, x);
funcN = funcN + 1;
n = length(x0);
%
% Step 2: check convergence condition
%
g = feval(dfun, x);
while norm(g) > epsi & k <= 150
%
% Step 3: form search direction
%
iterm = k - floor(k/(n+1))*(n+1);
iterm = iterm + 1;
k = k + 1;
if iterm == 1
d = -g;
gd = g'*d;
else
beta = g'*g/(gold'*gold);
d = -g + beta*dold;
gd = g'*d;
if gd >= 0.0
d = -g; gd = g' *d;
end
end
%
% Step 4: line search
%
alpha_0 = 1.0;
[alpha, funcNk, exitflag] = lines(fun, rho, l, u, alpha_0, f, gd, x, d);
funcN = funcN + funcNk;
if exitflag == -1
break;
end
%
% Step 5: compute new point
%
s = alpha*d; x = x + s;
f = feval(fun, x);
funcN = funcN + 1;
gold = g;
dold = d;
g = feval(dfun, x);
k = k + 1;
end
%
% Step 6: output
%
output.fval = f;
output.iteration = k;
output.funcount = funcN;
output.gnorm = norm(g);
end
测试
% f1.m
function f = f1(x)
% objective function
f = (x(2)-x(1)^2)^2+(1-x(1))^2;
end
% df1.m
function g = df1(x)
% grads function
g = [4.0*(x(1)^3-x(1)*x(2))+2*x(1)-2;2.0*(x(2)-x(1)^2)];
end
% Command
x0=[-1.9;2];
x=cg('f1', 'df1', x0)
结果
x =
1.0007
1.0019
function [x, output] = cg(fun, dfun, x0)
% fun: character variable, the name of function computing objective function
% dfun: character variable, the name of function computing the gradient of objective function
% x0: real vector, input initial point
% x :real vector, output solution
% output: structure variable, output the value of f, the number ofiteration, the number of function evaluations and the norm of gradient at last point x
%
% Step 1: initialization
%
epsi = 1.0e-3; % termination tolenrence
k = 0; % the number of iteration
funcN = 0; % the number of function evaluation
rho = 0.01; l = 0.15; u = 0.85;
x = x0;
f = feval(fun, x);
funcN = funcN + 1;
n = length(x0);
%
% Step 2: check convergence condition
%
g = feval(dfun, x);
while norm(g) > epsi & k <= 150
%
% Step 3: form search direction
%
iterm = k - floor(k/(n+1))*(n+1);
iterm = iterm + 1;
k = k + 1;
if iterm == 1
d = -g;
gd = g'*d;
else
beta = g'*g/(gold'*gold);
d = -g + beta*dold;
gd = g'*d;
if gd >= 0.0
d = -g; gd = g' *d;
end
end
%
% Step 4: line search
%
alpha_0 = 1.0;
[alpha, funcNk, exitflag] = lines(fun, rho, l, u, alpha_0, f, gd, x, d);
funcN = funcN + funcNk;
if exitflag == -1
break;
end
%
% Step 5: compute new point
%
s = alpha*d; x = x + s;
f = feval(fun, x);
funcN = funcN + 1;
gold = g;
dold = d;
g = feval(dfun, x);
k = k + 1;
end
%
% Step 6: output
%
output.fval = f;
output.iteration = k;
output.funcount = funcN;
output.gnorm = norm(g);
end
测试
% f1.m
function f = f1(x)
% objective function
f = (x(2)-x(1)^2)^2+(1-x(1))^2;
end
% df1.m
function g = df1(x)
% grads function
g = [4.0*(x(1)^3-x(1)*x(2))+2*x(1)-2;2.0*(x(2)-x(1)^2)];
end
% Command
x0=[-1.9;2];
x=cg('f1', 'df1', x0)
结果
x =
1.0007
1.0019
相关文章推荐
- 共轭梯度法程序/最优化-无约束共轭梯度法程序
- 共轭梯度法解线性方程组(Matlab程序)
- 最优化-约束/无约束共轭梯度法程序(c++)
- 用MATLAB实现FR共轭梯度法求解实例
- Matlab实现FR共轭梯度法
- 最优化——拟牛顿方法matlab程序
- 最优化——线搜索matlab程序
- matlab仿真二维光子晶体最简程序
- 求两个二维矩阵的root mean square(matlab程序)
- 使用Java程序调用Matlab
- matlab中调用c/c++程序
- Matlab调用C程序
- matlab练习程序(Arnold图像置乱)
- 如何分析matlab程序的主要效率问题
- matlab练习程序(meanshift图像聚类)
- matlab程序调试方法
- 决策树分类——matlab程序
- 最优化中单纯形法的matlab举例
- matlab练习程序(图像水平/竖直移动)
- matlab里实现最大值最小值最优化代码,实现【0,1】归一化代码