您的位置:首页 > 理论基础 > 计算机网络

两层神经网络的设计与实现

2017-12-08 15:12 417 查看
       这几天看到了浅层网络(仅含有一层隐藏层单元的神经网络)的设计原理及思想,为了加深理解及记忆,下面做个总结。
       为了简化原理思想,假设现有隐藏层只有四个隐藏单元的两层神经网络,如下图所示:


        
       和单个神经元工作原理类似,包括前向传播和后向传播两个过程。其中,后向传播多了几个公式推导。因此,这里主要推导后向传播的相关公式。

1. 前向传播

       假设有m个输入样例,并且每个输入样例有n个输入特征,则X为n行m列矩阵。对于每一个隐藏单元,都对应于一个列向量w和b,因此W1为n行4列的矩阵,B1为列向量(长为4)。则Z1 = np.dot(W1.T , X) + B1为4行m列的矩阵,Y1_hat=sigmod(Z1)表示隐藏单元的输入值。接下来就变成以Y1_hat作为输入的单个神经单元。Z2 = np.dot(w2.T, Y1_hat) + b2, y2_hat = sigmod(z2)即为最终输出值。

2. 后向传播

       根据基于神经网络的二分类问题中定义的损失函数,这里给出单个样例的各个参数的偏导公式推导。
       第二层反向过程:


         第一层反向过程:
<
4000
p style="text-align:center;">


3. 代码实现

        基于以上思想,通过python进行实现,并通过一个例子来观察预测效果。
import numpy as np
import math
from planar_utils import plot_decision_boundary, sigmoid, load_planar_dataset, load_extra_datasets

def train_model(learning_rate=0.1):
train_data_x, train_data_y = load_planar_dataset() # 获取数据集

row_count = 2 # 表示输入样例的特征数
examples_count = 400 # 表示样例的数量
hidden_count = 4 # 表示隐藏层单元数量为4

weight1 = np.random.randn(row_count, hidden_count) * 0.01
# 初始化隐藏层权值向量,这里不能全部初始化为0,
# 都为0的效果和单层网络是一样的,而且这里的0.01是经验值
b1 = np.zeros((hidden_count, 1)) # 初始化阈值,这里可以初始化为0
weight2 = np.zeros((hidden_count, 1)) # 初始化第二层的权值
b2 = 0

# 迭代
for i in range(10000):
z1 = np.dot(weight1.T, train_data_x) + b1 # 计算z1
y1_hat = 1.0 / (1 + math.e**(-z1)) # 计算y1_hat
z2 = np.dot(weight2.T, y1_hat) + b2 # 计算z2
y2_hat = 1.0 / (1 + math.e**(-z2)) # 计算y2_hat

loss = - np.sum(train_data_y*np.log(y2_hat) +
(1-train_data_y)*np.log(1-y2_hat)) / examples_count # 计算loss
if loss == 0:
break
print '%d:%f' %(i, loss)

dz2 = y2_hat - train_data_y # 计算dL/dz2
dw2 = np.dot(y1_hat, dz2.T) # 计算dL/dw2
db2 = np.sum(dz2)
dw2 = dw2 * 1.0 / examples_count
db2 = db2 * 1.0 / examples_count

dz1 = np.dot(weight2, dz2) * (y1_hat * (1 - y1_hat)) # 计算dL/dz1
dw1 = np.dot(train_data_x, dz1.T) # 计算dL/dw1
db1 = np.sum(dz1, axis=1, keepdims=True)
dw1 = dw1 * 1.0 / examples_count
db1 = db1 * 1.0 / examples_count

weight2 = weight2 - learning_rate * dw2 # 更新w2
b2 = b2 - learning_rate * db2 # 更新b2
weight1 = weight1 - learning_rate * dw1 # 更新w1
b1 = b1 - learning_rate * db1 # 更新b1

return weight1, b1, weight2, b2

def predict_data(weight1, b1, weight2, b2):
test_data_x, test_data_y = load_planar_dataset() # 获取数据集
examples_count = 400 # 表示样例的数量

z1 = np.dot(weight1.T, test_data_x) + b1
y1_hat = 1.0 / (1 + math.e**(-z1))
z2 = np.dot(weight2.T, y1_hat) + b2 # 计算z2
y2_hat = 1.0 / (1 + math.e**(-z2)) # 计算y2_hat

prediction = np.round(y2_hat) # 计算预测值
print 'correct rate:', np.sum(prediction == test_data_y)* 1.0 / examples_count
return prediction

if __name__ == '__main__':
train_data_x, train_data_y = load_planar_dataset() # 获取数据集
weight1, b1, weight2, b2 = train_model(learning_rate = 1.2) # 训练参数
prediction = predict_data(weight1, b1, weight2, b2) # 根据训练的参数进行预测



          最终发现,隐藏层神经单元设置成4或者5,分类效果最佳,大约89.5%。这里引用的“planar_utils”是课程中提供的数据集,我只用到了其中的数据集,其中的函数定义写得也很好,这里也贴上来供大家一起参考学习。import matplotlib.pyplot as plt
import numpy as np
import sklearn
import sklearn.datasets
import sklearn.linear_model

def plot_decision_boundary(model, X, y):
# Set min and max values and give it some padding
x_min, x_max = X[0, :].min() - 1, X[0, :].max() + 1
y_min, y_max = X[1, :].min() - 1, X[1, :].max() + 1
h = 0.01
# Generate a grid of points with distance h between them
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
# Predict the function value for the whole grid
Z = model(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
# Plot the contour and training examples
plt.contourf(xx, yy, Z, cmap=plt.cm.Spectral)
plt.ylabel('x2')
plt.xlabel('x1')
plt.scatter(X[0, :], X[1, :], c=y, cmap=plt.cm.Spectral)

def sigmoid(x):
"""
Compute the sigmoid of x

Arguments:
x -- A scalar or numpy array of any size.

Return:
s -- sigmoid(x)
"""
s = 1/(1+np.exp(-x))
return s

def load_planar_dataset():
np.random.seed(1)
m = 400 # number of examples
N = int(m/2) # number of points per class
D = 2 # dimensionality
X = np.zeros((m,D)) # data matrix where each row is a single example
Y = np.zeros((m,1), dtype='uint8') # labels vector (0 for red, 1 for blue)
a = 4 # maximum ray of the flower

for j in range(2):
ix = range(N*j,N*(j+1))
t = np.linspace(j*3.12,(j+1)*3.12,N) + np.random.randn(N)*0.2 # theta
r = a*np.sin(4*t) + np.random.randn(N)*0.2 # radius
X[ix] = np.c_[r*np.sin(t), r*np.cos(t)]
Y[ix] = j

X = X.T
Y = Y.T

return X, Y

def load_extra_datasets():
N = 200
noisy_circles = sklearn.datasets.make_circles(n_samples=N, factor=.5, noise=.3)
noisy_moons = sklearn.datasets.make_moons(n_samples=N, noise=.2)
blobs = sklearn.datasets.make_blobs(n_samples=N, random_state=5, n_features=2, centers=6)
gaussian_quantiles = sklearn.datasets.make_gaussian_quantiles(mean=None, cov=0.5, n_samples=N, n_features=2, n_classes=2, shuffle=True, random_state=None)
no_structure = np.random.rand(N, 2), np.random.rand(N, 2)

return noisy_circles, noisy_moons, blobs, gaussian_quantiles, no_structure
内容来自用户分享和网络整理,不保证内容的准确性,如有侵权内容,可联系管理员处理 点击这里给我发消息
标签: