您的位置:首页 > 理论基础 > 计算机网络

[模式分类] 三维高斯分布数据训练三层神经网络实现分类

2017-10-27 12:04 746 查看
neuralNetwork.m

clear;
% 均值 0 0 0
MU1 = [0 0 0];
% 协方差矩阵 I
SIGMA1 = [ 1 0 0; 0 1 0; 0 0 1 ];
DATA1 = mvnrnd(MU1, SIGMA1, 1000);

% scatter3(DATA1(:, 1), DATA1(:, 2), DATA1(:, 3))

% 均值 0 1 0
MU2 = [0 1 0];
% 协方差矩阵 I
SIGMA2 = [ 1 0 1; 0 2 2; 1 2 5 ];
DATA2 = mvnrnd(MU2, SIGMA2, 1000);

% 均值 -1 0 1
MU3 = [-1 0 1];
% 协方差矩阵 I
SIGMA3 = [ 2 0 0; 0 6 0; 0 0 1 ];
DATA3 = mvnrnd(MU3, SIGMA3, 1000);

% 均值 0 0.5 1
MU4 = [0 0.5 1];
% 协方差矩阵 I
SIGMA4 = [ 2 0 0; 0 1 0; 0 0 3 ];
DATA4 = mvnrnd(MU4, SIGMA4, 1000);

% 计算标记样本在各分布的概率分布函数
Y1 = probability(DATA1, MU1, SIGMA1);
Y2 = probability(DATA1, MU2, SIGMA2);
Y3 = probability(DATA1, MU3, SIGMA3);
Y4 = probability(DATA1, MU4, SIGMA4);
Y = [Y1 Y2 Y3 Y4];
Z = sum(Y, 2);
% 计算实际分类概率
for i = 1:length(Y)
for j = 1:4
Y(i, j) = Y(i, j) / Z(i);
end
end
[LABEL, LABELIDX] = max(Y, [], 2);
% 初始化输入层与隐层之间的权值矩阵
W1 = 2*rand(3)-1;
% 初始化经过激活的隐层与输出层之间的权值矩阵
W2 = 2*rand(3, 4)-1;
% 偏置B1,B2
B1 = 2*rand(1)-1;
B2 = 2*rand(1)-1;

learningrate = 0.01;
for m = 1:50
for i = 1:length(DATA1)
% 前向传播
% 网络结构采用3-3-4结构,最后用softmax激活
NEURON1 = DATA1(i, :) * W1 + B1;
NEURON1_ACTIVE = sigmoid(NEURON1);
NEURON2 = NEURON1_ACTIVE * W2 + B2;
OUTPUT = softmax(NEURON2);

% 反向传播
% 计算实际根据概率分布的概率值
% 计算交叉熵的导数
loss = OUTPUT - Y(i, :);
softmaxloss = softmax_loss(NEURON2) .* loss;
% 计算权重矩阵W2的损失
W2_LOSS = [NEURON1_ACTIVE' NEURON1_ACTIVE' NEURON1_ACTIVE' NEURON1_ACTIVE'];
for j = 1:3
W2_LOSS(j, :) = W2_LOSS(j, :) .* softmaxloss;
end
% 计算隐层的损失
NEURON_ACTIVE_LOSSTEMP = W2;
for j = 1:3
NEURON_ACTIVE_LOSSTEMP(j, :) = W2(j, :) .* softmaxloss;
end
NEURON_ACTIVE_LOSS = sum(NEURON_ACTIVE_LOSSTEMP, 2);
NEURON1_LOSS = sigmoid_loss(NEURON1) .* NEURON_ACTIVE_LOSS';
% 计算权重矩阵W1的损失
W1_LOSS = [ DATA1(i, :)' DATA1(i, :)' DATA1(i, :)' ];
for j = 1:3
W1_LOSS(j, :) = W1_LOSS(j, :) .* NEURON1_LOSS;
end

% 更新权值
B1 = B1 - sum(NEURON1_LOSS, 2) * learningrate;
W1 = W1 - W1_LOSS * learningrate;
B2 = B2 - sum(NEURON_ACTIVE_LOSS) * learningrate;
W2 = W2 - W2_LOSS * learningrate;
end
end
value = 0;
for i = 1:length(DATA1)
% 前向传播
NEURON1 = DATA1(i, :) * W1 + B1;
NEURON1_ACTIVE = sigmoid(NEURON1);
NEURON2 = NEURON1_ACTIVE * W2 + B2;
OUTPUT = softmax(NEURON2);
[tLABEL, tLABELIDX] = max(OUTPUT, [], 2);
if tLABELIDX == LABELIDX(i)
value = value + 1;
end
end
RATE = value / 1000;


probability.m

function output = probability( X, MU1, SIGMA1 )
%PROBILITY Summary of this function goes here
%   Detailed explanation goes here

MU1_DIAG = diag(MU1);
output = [];
for i = 1:length(X)
Y1 = prod( diag( normcdf( diag(X(i, :)), MU1_DIAG, SIGMA1 ) ) );
output = [output; Y1];
end

end


sigmoid.m

function output = sigmoid( x )
%SIGMOID Summary of this function goes here
%   Detailed explanation goes here
output = 1./(1+exp(-x));
end


sigmoid_loss.m

function output = sigmoid_loss( x )
%SIGMOID_LOSS Summary of this function goes here
%   Detailed explanation goes here
output = exp(-x) ./ (1+exp(-x));

end


softmax.m

function output = softmax( x )
%SOFTMAX Summary of this function goes here
%   Detailed explanation goes here
output = exp(x) / sum(exp(x));
end


softmax_loss.m

function output = softmax_loss( x )
%SOFT Summary of this function goes here
%   Detailed explanation goes here
output = exp(x) / sum(exp(x)) + exp(x).^2 / sum(exp(x)).^2;

end
内容来自用户分享和网络整理,不保证内容的准确性,如有侵权内容,可联系管理员处理 点击这里给我发消息
标签:  神经网络
相关文章推荐