LWR 局部加权线性回归算法
2015-05-13 13:50
239 查看
http://blog.csdn.net/tianguokaka/article/details/14227187
分类: 统计学习方法2013-11-06
14:44 1649人阅读 评论(3) 收藏 举报
看了机器学习的第三课,实现了一下LWR算法。
[cpp] view
plaincopy
#include<iostream>
using namespace std;
const int Number = 6;
const int Dimesion = 3;
const float learningRate=0.001;
const float errorThr=1; //variance threshold
const int MAX=1000; //Max times of iteration
typedef struct Data{
float vectorComponent[Dimesion];
}vectorData;
vectorData x[Number] = {
/* {1,1,4},
{1,2,5},
{1,5,1},
{1,4,2},*/
{1,1,1},
{1,1,3},
{1,1,2},
{1,2,3},
{1,2,1},
{1,2,2},
};
float y[Number]={2,10,5,13,5,8};
/lwr(局部线性回归)
float weightValue(vectorData xi,vectorData x){
float weight = 0.0;
for(int i=0;i<Dimesion;i++){
weight+=pow(xi.vectorComponent[i]-x.vectorComponent[i],2);
}
float tempWeight = exp(-(weight/(2*36)));
if(tempWeight<0.02)
tempWeight = 0.0;
return tempWeight;
}
float multiPly(vectorData x1,vectorData x2){
float temp = 0.0;
for(int i=0;i<Dimesion;i++){
temp += x1.vectorComponent[i]*x2.vectorComponent[i];
}
return temp;
}
vectorData addVectorData(vectorData x1,vectorData x2){
vectorData temp;
for(int i=0;i<Dimesion;i++)
temp.vectorComponent[i] = x1.vectorComponent[i]+x2.vectorComponent[i];
return temp;
}
vectorData minusVectorData(vectorData x1,vectorData x2){
vectorData temp;
for(int i=0;i<Dimesion;i++)
temp.vectorComponent[i] = x1.vectorComponent[i]-x2.vectorComponent[i];
return temp;
}
vectorData numberMultiVectorData(float para,vectorData x1){
vectorData temp;
for(int i=0;i<Dimesion;i++)
temp.vectorComponent[i] = x1.vectorComponent[i]*para;
return temp;
}
float costFunction(vectorData parameter[],vectorData inputData[],float inputResultData[],vectorData object){
float costValue = 0.0;
float tempValue = 0.0;
float weightedValue = 0.0;
for(int i=0;i<Number;i++){
tempValue = 0.0;
//consider all the parameters although most of them is zero
for(int j=0;j<Number;j++)
tempValue += multiPly(parameter[j],inputData[i]);
costValue += weightValue(inputData[i],object)*pow((inputResultData[i]-tempValue),2);
}
return (costValue/2*4);
}
int LocallyWeightedAgression(vectorData parameter[],vectorData inputData[],float resultData[],vectorData objectVector){
float tempValue = 0.0;
float errorCost = 0.0;
float weightedValue = 0.0;
errorCost=costFunction(parameter,inputData,resultData,objectVector);
if(errorCost<errorThr)
return 1;
for(int iteration=0;iteration<MAX;iteration++){
//stochastic
for(int i=0;i<Number;i++){
//calculate the h(x)
weightedValue = weightValue(inputData[i],objectVector);
tempValue=0.0;
for(int j=0;j<Number;j++)
tempValue+=multiPly(parameter[j],inputData[i]);
//update the parameter by stochastic(随机梯度下降)
printf("the next parameter is ");
for(int ii=0;ii<Number;ii++){
parameter[ii] = addVectorData(parameter[ii],numberMultiVectorData(weightedValue*learningRate*(resultData[i]-tempValue),inputData[i]));
if(multiPly(parameter[ii],parameter[ii])!=0){
for(int jj=0;jj<Dimesion;jj++){
printf("%f ",parameter[ii].vectorComponent[jj]);
}
}
}
printf("\n");
errorCost=costFunction(parameter,inputData,resultData,objectVector);
printf("error cost is %f\n",errorCost);
if(errorCost<errorThr)
break;
}//end stochastic one time
}//end when the iteration becomes MAX
//calculate the object vector
float resultValue = 0.0;
for(int i=0;i<Number;i++){
resultValue += weightValue(inputData[i],objectVector)*multiPly(parameter[i],objectVector);
}
printf("result value is %f \n",resultValue);
return 1;
}
int testLWA(){
vectorData objectData = {1,1.5,1.5};
vectorData localParameter[Number] = {0.0};
LocallyWeightedAgression(localParameter,x,y,objectData);
return 1;
}
int main(){
// DescendAlgorithm(parameter,x,y);
// system("pause");
//clearParameter(parameter);
//Stochastic(parameter,x,y);
//float ForTestData[] = {1,10,20};
//testData(ForTestData);
testLWA();
system("pause");
return 1;
}
设定的目标函数是平面方程 z=x^2+y^2
实验数据是1.5,1.5
计算结果是 5.124
分类: 统计学习方法2013-11-06
14:44 1649人阅读 评论(3) 收藏 举报
看了机器学习的第三课,实现了一下LWR算法。
[cpp] view
plaincopy
#include<iostream>
using namespace std;
const int Number = 6;
const int Dimesion = 3;
const float learningRate=0.001;
const float errorThr=1; //variance threshold
const int MAX=1000; //Max times of iteration
typedef struct Data{
float vectorComponent[Dimesion];
}vectorData;
vectorData x[Number] = {
/* {1,1,4},
{1,2,5},
{1,5,1},
{1,4,2},*/
{1,1,1},
{1,1,3},
{1,1,2},
{1,2,3},
{1,2,1},
{1,2,2},
};
float y[Number]={2,10,5,13,5,8};
/lwr(局部线性回归)
float weightValue(vectorData xi,vectorData x){
float weight = 0.0;
for(int i=0;i<Dimesion;i++){
weight+=pow(xi.vectorComponent[i]-x.vectorComponent[i],2);
}
float tempWeight = exp(-(weight/(2*36)));
if(tempWeight<0.02)
tempWeight = 0.0;
return tempWeight;
}
float multiPly(vectorData x1,vectorData x2){
float temp = 0.0;
for(int i=0;i<Dimesion;i++){
temp += x1.vectorComponent[i]*x2.vectorComponent[i];
}
return temp;
}
vectorData addVectorData(vectorData x1,vectorData x2){
vectorData temp;
for(int i=0;i<Dimesion;i++)
temp.vectorComponent[i] = x1.vectorComponent[i]+x2.vectorComponent[i];
return temp;
}
vectorData minusVectorData(vectorData x1,vectorData x2){
vectorData temp;
for(int i=0;i<Dimesion;i++)
temp.vectorComponent[i] = x1.vectorComponent[i]-x2.vectorComponent[i];
return temp;
}
vectorData numberMultiVectorData(float para,vectorData x1){
vectorData temp;
for(int i=0;i<Dimesion;i++)
temp.vectorComponent[i] = x1.vectorComponent[i]*para;
return temp;
}
float costFunction(vectorData parameter[],vectorData inputData[],float inputResultData[],vectorData object){
float costValue = 0.0;
float tempValue = 0.0;
float weightedValue = 0.0;
for(int i=0;i<Number;i++){
tempValue = 0.0;
//consider all the parameters although most of them is zero
for(int j=0;j<Number;j++)
tempValue += multiPly(parameter[j],inputData[i]);
costValue += weightValue(inputData[i],object)*pow((inputResultData[i]-tempValue),2);
}
return (costValue/2*4);
}
int LocallyWeightedAgression(vectorData parameter[],vectorData inputData[],float resultData[],vectorData objectVector){
float tempValue = 0.0;
float errorCost = 0.0;
float weightedValue = 0.0;
errorCost=costFunction(parameter,inputData,resultData,objectVector);
if(errorCost<errorThr)
return 1;
for(int iteration=0;iteration<MAX;iteration++){
//stochastic
for(int i=0;i<Number;i++){
//calculate the h(x)
weightedValue = weightValue(inputData[i],objectVector);
tempValue=0.0;
for(int j=0;j<Number;j++)
tempValue+=multiPly(parameter[j],inputData[i]);
//update the parameter by stochastic(随机梯度下降)
printf("the next parameter is ");
for(int ii=0;ii<Number;ii++){
parameter[ii] = addVectorData(parameter[ii],numberMultiVectorData(weightedValue*learningRate*(resultData[i]-tempValue),inputData[i]));
if(multiPly(parameter[ii],parameter[ii])!=0){
for(int jj=0;jj<Dimesion;jj++){
printf("%f ",parameter[ii].vectorComponent[jj]);
}
}
}
printf("\n");
errorCost=costFunction(parameter,inputData,resultData,objectVector);
printf("error cost is %f\n",errorCost);
if(errorCost<errorThr)
break;
}//end stochastic one time
}//end when the iteration becomes MAX
//calculate the object vector
float resultValue = 0.0;
for(int i=0;i<Number;i++){
resultValue += weightValue(inputData[i],objectVector)*multiPly(parameter[i],objectVector);
}
printf("result value is %f \n",resultValue);
return 1;
}
int testLWA(){
vectorData objectData = {1,1.5,1.5};
vectorData localParameter[Number] = {0.0};
LocallyWeightedAgression(localParameter,x,y,objectData);
return 1;
}
int main(){
// DescendAlgorithm(parameter,x,y);
// system("pause");
//clearParameter(parameter);
//Stochastic(parameter,x,y);
//float ForTestData[] = {1,10,20};
//testData(ForTestData);
testLWA();
system("pause");
return 1;
}
设定的目标函数是平面方程 z=x^2+y^2
实验数据是1.5,1.5
计算结果是 5.124
相关文章推荐
- 局部加权线性回归算法
- [置顶] 《机器学习实战》--局部加权线性回归(LWR)
- 机器学习与算法(8)--局部加权学习算法(LWR)
- 线性回归-4-欠拟合、过拟合与局部加权线性回归
- lwlr(局部加权线性回归)
- 局部加权回归
- 回归算法学习笔记(二)局部加权线性回归
- 局部加权线性回归
- 2.局部加权线性回归和逻辑回归
- 局部加权回归、逻辑斯蒂回归、感知器算法—斯坦福ML公开课笔记3
- 详解局部加权回归
- 机器学习-学习笔记3.1-局部加权回归
- 基本线性回归、局部加权线性回归和缩减方法(岭回归、前向逐步回归) in Python
- 局部加权线性回归
- 第3节-局部加权回归、概率解释和罗蒂斯特回归
- 局部加权线性回归
- 局部加权线性回归
- 机器学习之局部加权线性回归
- [笔记]概率解释·局部加权回归·Logistic回归
- 局部加权线性回归