吴恩达Coursera深度学习课程 DeepLearning.ai 编程作业——Gradients_check(2-1.3)
2017-11-09 19:36
1071 查看
import numpy as np from testCases import * from gc_utils import sigmoid,relu,dictionary_to_vector,vector_to_dictionary,gradients_to_vector from testCases import gradient_check_n_test_case
Gradient_check.py
import numpy as np from testCases import * from gc_utils import sigmoid,relu,dictionary_to_vector,vector_to_dictionary,gradients_to_vector from testCases import gradient_check_n_test_case
def gradient_check(x,theta,epsilon= 1e-7):
J=x*theta
dtheta=x
gradapprox=(x*(theta+epsilon)-x*(theta-epsilon))/(2*epsilon)
grad=dtheta
numerator=np.linalg.norm(grad-gradapprox)
denomitor=np.linalg.norm(grad)+np.linalg.norm(gradapprox)
difference=numerator/denomitor
if difference < epsilon:
print "the gradient is correct"
elif difference >= epsilon:
print "the gradient is not so ideal"
return J,difference
x,theta=2,4
J,difference = gradient_check(x,theta)
print("difference = "+str(difference))
def forward_propagation_n(X,Y,parameters):
m=X.shape[1]
W1=parameters["W1"]
b1=parameters["b1"]
W2=parameters["W2"]
b2=parameters["b2"]
W3=parameters["W3"]
b3=parameters["b3"]
Z1=np.dot(W1,X)+b1
A1=relu(Z1)
Z2=np.dot(W2,A1)+b2
A2=relu(Z2)
Z3=np.dot(W3,A2)+b3
A3=sigmoid(Z3)
cost=(-1.0/m)*(np.sum(Y*np.log(A3)+(1-Y)*np.log(1-A3)))
cache=(Z1,A1,W1,b1,Z2,A2,W2,b2,Z3,A3,W3,b3)
return cost,cache
def backward_propagation_n(X,Y,cache):
(Z1,A1,W1,b1,Z2,A2,W2,b2,Z3,A3,W3,b3)=cache
m=X.shape[1]
grads={}
dZ3=A3-Y
dW3=(1.0/m)*np.dot(dZ3,A2.T)
db3=(1.0/m)*np.sum(dZ3,axis=1,keepdims=True)
dA2=np.dot(W3.T,dZ3)
dZ2=np.multiply(dA2,np.int64(Z2>0))
dW2=(1.0/m)*np.dot(dZ2,A1.T)
db2=(1.0/m)*np.sum(dZ2,axis=1
108e6
,keepdims=True)
dA1=np.dot(W2.T,dZ2)
dZ1=np.multiply(dA1,np.int64(Z1>0))
dW1=(1.0/m)*np.dot(dZ1,X.T)
db1=(1.0/m)*np.sum(dZ1,axis=1,keepdims=True)
grads={"dZ3": dZ3, "dW3": dW3, "db3": db3,
"dA2": dA2, "dZ2": dZ2, "dW2": dW2, "db2": db2,
"dA1": dA1, "dZ1": dZ1, "dW1": dW1, "db1": db1}
return grads
def implement_gradient_check_n(X,Y,grads,parameter,epsilon=1e-6):
parameters_value,_=dictionary_to_vector(parameter)
grads=gradients_to_vector(grads)
num_parameters=parameters_value.shape[0]
J_plus=np.zeros((num_parameters,1))
J_minus=np.zeros((num_parameters,1))
gradapprox=np.zeros((num_parameters,1))
for i in range(num_parameters):
thetaplus=np.copy(parameters_value)
thetaplus[i][0]=thetaplus[i][0]+epsilon
J_plus[i],_=forward_propagation_n(X,Y,vector_to_dictionary(thetaplus))
thetaminus=np.copy(parameters_value)
thetaminus[i][0]=thetaminus[i][0]-epsilon
J_minus[i],_=forward_propagation_n(X,Y,vector_to_dictionary(thetaminus))
gradapprox[i]=(J_plus[i]-J_minus[i])/(2*epsilon)
difference=np.linalg.norm(grads-gradapprox)/(np.linalg.norm(grads)+np.linalg.norm(gradapprox))
if difference<1e-6:
print ("\033[92m" + "Your backward propagation works perfectly fine! difference = " + str(difference) + "\033[0m")
else:
print ("\033[93m" + "There is a mistake in the backward propagation! difference = " + str(difference) + "\033[0m")
return difference
X, Y, parameters = gradient_check_n_test_case()
print parameters
cost, cache = forward_propagation_n(X, Y, parameters)
gradients = backward_propagation_n(X, Y, cache)
print gradients
difference = implement_gradient_check_n(X, Y,gradients,parameters)
gc_utils.py:
import numpy as np def sigmoid(x): """ Compute the sigmoid of x Arguments: x -- A scalar or numpy array of any size. Return: s -- sigmoid(x) """ s = 1/(1+np.exp(-x)) return s def relu(x): """ Compute the relu of x Arguments: x -- A scalar or numpy array of any size. Return: s -- relu(x) """ s = np.maximum(0,x) return s def dictionary_to_vector(parameters): """ Roll all our parameters dictionary into a single vector satisfying our specific required shape. """ keys = [] count = 0 for key in ["W1", "b1", "W2", "b2", "W3", "b3"]: # flatten parameter new_vector = np.reshape(parameters[key], (-1,1)) keys = keys + [key]*new_vector.shape[0] if count == 0: theta = new_vector else: theta = np.concatenate((theta, new_vector), axis=0) count = count + 1 return theta, keys def vector_to_dictionary(theta): """ Unroll all our parameters dictionary from a single vector satisfying our specific required shape. """ parameters = {} parameters["W1"] = theta[:20].reshape((5,4)) parameters["b1"] = theta[20:25].reshape((5,1)) parameters["W2"] = theta[25:40].reshape((3,5)) parameters["b2"] = theta[40:43].reshape((3,1)) parameters["W3"] = theta[43:46].reshape((1,3)) parameters["b3"] = theta[46:47].reshape((1,1)) return parameters def gradients_to_vector(gradients): """ Roll all our gradients dictionary into a single vector satisfying our specific required shape. """ count = 0 for key in ["dW1", "db1", "dW2", "db2", "dW3", "db3"]: # flatten parameter new_vector = np.reshape(gradients[key], (-1,1)) if count == 0: theta = new_vector else: theta = np.concatenate((theta, new_vector), axis=0) count = count + 1 return theta
def gradient_check(x,theta,epsilon= 1e-7): #定义一元梯度检测函数 J=x*theta #函数主体 dtheta=x #对theta求导 gradapprox=(x*(theta+epsilon)-x*(theta-epsilon))/(2*epsilon) #梯度估计,就是上述公式(1) grad=dtheta numerator=np.linalg.norm(grad-gradapprox) #其中np.linalg.norm()相当于二范数 denomitor=np.linalg.norm(grad)+np.linalg.norm(gradapprox) difference=numerator/denomitor #上述公式(2) if difference < epsilon: #梯度误差估计,小于epsilon说明正确 print "the gradient is correct" elif difference >= epsilon: print "the gradient is not so ideal" return J,difference x,theta=2,4 J,difference = gradient_check(x,theta) print("difference = "+str(difference))
Expected output:
the gradient is correct difference = 2.91933588329e-10
def forward_propagation_n(X, Y, parameters): """ Implements the forward propagation (and computes the cost) presented in Figure 3. Arguments: X -- training set for m examples Y -- labels for m examples parameters -- python dictionary containing your parameters "W1", "b1", "W2", "b2", "W3", "b3": W1 -- weight matrix of shape (5, 4) b1 -- bias vector of shape (5, 1) W2 -- weight matrix of shape (3, 5) b2 -- bias vector of shape (3, 1) W3 -- weight matrix of shape (1, 3) b3 -- bias vector of shape (1, 1) Returns: cost -- the cost function (logistic cost for one example) """ # retrieve parameters m = X.shape[1] W1 = parameters["W1"] b1 = parameters["b1"] W2 = parameters["W2"] b2 = parameters["b2"] W3 = parameters["W3"] b3 = parameters["b3"] # LINEAR -> RELU -> LINEAR -> RELU -> LINEAR -> SIGMOID Z1 = np.dot(W1, X) + b1 A1 = relu(Z1) Z2 = np.dot(W2, A1) + b2 A2 = relu(Z2) Z3 = np.dot(W3, A2) + b3 A3 = sigmoid(Z3) # Cost logprobs = np.multiply(-np.log(A3),Y) + np.multiply(-np.log(1 - A3), 1 - Y) cost = 1./m * np.sum(logprobs) cache = (Z1, A1, W1, b1, Z2, A2, W2, b2, Z3, A3, W3, b3) return cost, cache
def backward_propagation_n(X, Y, cache): """ Implement the backward propagation presented in figure 2. Arguments: X -- input datapoint, of shape (input size, 1) Y -- true "label" cache -- cache output from forward_propagation_n() Returns: gradients -- A dictionary with the gradients of the cost with respect to each parameter, activation and pre-activation variables. """ m = X.shape[1] (Z1, A1, W1, b1, Z2, A2, W2, b2, Z3, A3, W3, b3) = cache dZ3 = A3 - Y dW3 = 1./m * np.dot(dZ3, A2.T) db3 = 1./m * np.sum(dZ3, axis=1, keepdims = True) dA2 = np.dot(W3.T, dZ3) dZ2 = np.multiply(dA2, np.int64(A2 > 0)) dW2 = 1./m * np.dot(dZ2, A1.T) db2 = 1./m * np.sum(dZ2, axis=1, keepdims = True) dA1 = np.dot(W2.T, dZ2) dZ1 = np.multiply(dA1, np.int64(A1 > 0)) dW1 = 1./m * np.dot(dZ1, X.T) db1 = 1./m * np.sum(dZ1, axis=1, keepdims = True) gradients = {"dZ3": dZ3, "dW3": dW3, "db3": db3, "dA2": dA2, "dZ2": dZ2, "dW2": dW2, "db2": db2, "dA1": dA1, "dZ1": dZ1, "dW1": dW1, "db1": db1} return gradients
def implement_gradient_check_n(X,Y,grads,parameter,epsilon=1e-6): #定义n维梯度检测函数 parameters_value,_=dictionary_to_vector(parameter) #将parameter字典reshape成向量 grads=gradients_to_vector(grads) num_parameters=parameters_value.shape[0] J_plus=np.zeros((num_parameters,1)) J_minus=np.zeros((num_parameters,1)) gradapprox=np.zeros((num_parameters,1)) for i in range(num_parameters): thetaplus=np.copy(parameters_value) #将parameter_value复制给thetaplus thetaplus[i][0]=thetaplus[i][0]+epsilon #thetaplus+epsilon J_plus[i],_=forward_propagation_n(X,Y,vector_to_dictionary(thetaplus)) #求出代价函数值 thetaminus=np.copy(parameters_value) thetaminus[i][0]=thetaminus[i][0]-epsilon J_minus[i],_=forward_propagation_n(X,Y,vector_to_dictionary(thetaminus))#同样的,求出第i个元素减去epsilon之后得到的代价函数值 gradapprox[i]=(J_plus[i]-J_minus[i])/(2*epsilon) #梯度估计 difference=np.linalg.norm(grads-gradapprox)/(np.linalg.norm(grads)+np.linalg.norm(gradapprox)) if difference<1e-6: print ("\033[92m" + "Your backward propagation works perfectly fine! difference = " + str(difference) + "\033[0m") else: print ("\033[93m" + "There is a mistake in the backward propagation! difference = " + str(difference) + "\033[0m") return difference X, Y, parameters = gradient_check_n_test_case() print parameters cost, cache = forward_propagation_n(X, Y, parameters) gradients = backward_propagation_n(X, Y, cache) print gradients difference = implement_gradient_check_n(X, Y,gradients,parameters)
Expected output:
Your backward propagation works perfectly fine! difference = 8.26588224678e-09
相关文章推荐
- 吴恩达Coursera深度学习课程 DeepLearning.ai 编程作业——Keras tutorial - the Happy House (4.2)
- 吴恩达Coursera深度学习课程 DeepLearning.ai 编程作业——Autonomous driving - Car detection(4.3)
- 吴恩达Coursera深度学习课程 DeepLearning.ai 编程作业——Tensorflow+tutorial(2-3)
- 吴恩达Coursera深度学习课程 DeepLearning.ai 编程作业——Convolution model:step by step and application (4.1)
- 吴恩达Coursera深度学习课程 DeepLearning.ai 编程作业(1-4)自定义图像预测错误的解决方法
- 吴恩达Coursera深度学习课程 DeepLearning.ai 编程作业——Regularization(2-1.2)
- 吴恩达Coursera深度学习课程 DeepLearning.ai 编程作业——Optimization Methods(2-2)
- 吴恩达Coursera深度学习课程 DeepLearning.ai 编程作业——Initialize parameter(2-1.1)
- 吴恩达Coursera深度学习课程 DeepLearning第一课第二周编程作业
- 1.3 吴恩达Coursera深度学习课程 DeepLearning.ai 提炼笔记(1-4)-- 为什么深度学习会兴起?
- 吴恩达深度学习课程deeplearning.ai课程作业:Class 2 Week 3 TensorFlow Tutorial
- Andrew Ng 深度学习课程Deeplearning.ai 编程作业——forward and backward propagation(1-4.1)
- 吴恩达深度学习课程deeplearning.ai课程作业:Class 2 Week 2 Optimization methods
- Andrew Ng 深度学习课程deeplearning.ai 编程作业——shallow network for datesets classification (1-3)
- 吴恩达Coursera深度学习课程 DeepLearning.ai 提炼笔记(1-2)-- 神经网络基础
- 吴恩达Coursera深度学习课程 DeepLearning.ai 提炼笔记(5-2)-- NLP和词嵌入
- 吴恩达深度学习课程deeplearning.ai课程作业:Class 4 Week 3 Car detection
- Andrew Ng 深度学习课程Deeplearning.ai 编程作业——deep Neural network for image classification(1-4.2)
- 吴恩达Coursera深度学习课程 DeepLearning.ai 提炼笔记(3-1)-- 机器学习策略(1)
- 吴恩达深度学习课程deeplearning.ai课程作业:Class 1 Week 2 assignment2_2