您的位置:首页 > 编程语言 > Python开发

[CS231n@Stanford] Assignment1-Q2 (python) SVM实现

2016-09-07 10:36 507 查看
linear_svm.py

<span style="font-size:18px;">import numpy as np
from random import shuffle

def svm_loss_naive(W, X, y, reg):
"""
Structured SVM loss function, naive implementation (with loops).

Inputs have dimension D, there are C classes, and we operate on minibatches
of N examples.

Inputs:
- W: A numpy array of shape (D, C) containing weights.
- X: A numpy array of shape (N, D) containing a minibatch of data.
- y: A numpy array of shape (N,) containing training labels; y[i] = c means
that X[i] has label c, where 0 <= c < C.
- reg: (float) regularization strength

Returns a tuple of:
- loss as single float
- gradient with respect to weights W; an array of same shape as W
"""
dW = np.zeros(W.shape) # initialize the gradient as zero
# compute the loss and the gradient
num_classes = W.shape[1]
num_train = X.shape[0]
loss = 0.0
for i in xrange(num_train):
scores = X[i].dot(W)
correct_class_score = scores[y[i]]
for j in xrange(num_classes):
if j == y[i]:
continue
margin = scores[j] - correct_class_score + 1 # note delta = 1
if margin > 0:
loss += margin

dW[:, j] += X[i].T
dW[:, y[i]] -= X[i].T

# Right now the loss is a sum over all training examples, but we want it
# to be an average instead so we divide by num_train.
loss /= num_train

# Add regularization to the loss.
loss += 0.5 * reg * np.sum(W * W)

#############################################################################
# TODO: #
# Compute the gradient of the loss function and store it dW. #
# Rather that first computing the loss and then computing the derivative, #
# it may be simpler to compute the derivative at the same time that the #
# loss is being computed. As a result you may need to modify some of the #
# code above to compute the gradient. #
#############################################################################
dW = dW/num_train + reg * W

return loss, dW

def svm_loss_vectorized(W, X, y, reg):
"""
Structured SVM loss function, vectorized implementation.

Inputs and outputs are the same as svm_loss_naive.
"""
loss = 0.0
dW = np.zeros(W.shape) # initialize the gradient as zero

#############################################################################
# TODO: #
# Implement a vectorized version of the structured SVM loss, storing the #
# result in loss. #
#############################################################################
num_train = X.shape[0]
num_classes = W.shape[1]
scores = X.dot(W)

scores_correct = scores[np.arange(num_train), y]
scores_correct = np.reshape(scores_correct, (num_train, 1))

margins = scores - scores_correct + 1.0
margins[np.arange(num_train), y] = 0.0
margins[margins <= 0] = 0.0

loss = np.sum(margins)
loss /= num_train
loss += 0.5 * reg * np.sum(W * W)

pass
#############################################################################
# END OF YOUR CODE #
#############################################################################

#############################################################################
# TODO: #
# Implement a vectorized version of the gradient for the structured SVM #
# loss, storing the result in dW. #
# #
# Hint: Instead of computing the gradient from scratch, it may be easier #
# to reuse some of the intermediate values that you used to compute the #
# loss. #
#############################################################################

margins[margins > 0] = 1.0
margins[np.arange(num_train), y] = -np.sum(margins, axis=1)
dW += np.dot(X.T, margins)/num_train + reg * W # D by C

pass
#############################################################################
# END OF YOUR CODE #
#############################################################################

return loss, dW
</span>

linear_classifier.py
<span style="font-size:18px;">import numpy as np
from linear_svm import *
from softmax import *

class LinearClassifier(object):

def __init__(self):
self.W = None

def train(self, X, y, learning_rate=1e-3, reg=1e-5, num_iters=100,
batch_size=200, verbose=False):
"""
Train this linear classifiers using stochastic gradient descent.

Inputs:
- X: A numpy array of shape (N, D) containing training data; there are N
training samples each of dimension D.
- y: A numpy array of shape (N,) containing training labels; y[i] = c
means that X[i] has label 0 <= c < C for C classes.
- learning_rate: (float) learning rate for optimization.
- reg: (float) regularization strength.
- num_iters: (integer) number of steps to take when optimizing
- batch_size: (integer) number of training examples to use at each step.
- verbose: (boolean) If true, print progress during optimization.

Outputs:
A list containing the value of the loss function at each training iteration.
"""
num_train, dim = X.shape
num_classes = np.max(y) + 1 # assume y takes values 0...K-1 where K is number of classes
if self.W is None:
# lazily initialize W
self.W = 0.001 * np.random.randn(dim, num_classes)

# Run stochastic gradient descent to optimize W
loss_history = []
for it in xrange(num_iters):
X_batch = None
y_batch = None

#########################################################################
# TODO: #
# Sample batch_size elements from the training data and their #
# corresponding labels to use in this round of gradient descent. #
# Store the data in X_batch and their corresponding labels in #
# y_batch; after sampling X_batch should have shape (dim, batch_size) #
# and y_batch should have shape (batch_size,) #
# #
# Hint: Use np.random.choice to generate indices. Sampling with #
# replacement is faster than sampling without replacement. #
#########################################################################

sample_index = np.random.choice(num_train, batch_size ,replace = False)
X_batch = X[sample_index,:]
y_batch = y[sample_index]

pass
#########################################################################
# END OF YOUR CODE #
#########################################################################

# evaluate loss and gradient
loss, grad = self.loss(X_batch, y_batch, reg)
loss_history.append(loss)

# perform parameter update
#########################################################################
# TODO:
4000
#
# Update the weights using the gradient and the learning rate. #
#########################################################################
self.W -= learning_rate * grad

pass
#########################################################################
# END OF YOUR CODE #
#########################################################################

if verbose and it % 100 == 0:
print 'iteration %d / %d: loss %f' % (it, num_iters, loss)

return loss_history

def predict(self, X):
"""
Use the trained weights of this linear classifiers to predict labels for
data points.

Inputs:
- X: D x N array of training data. Each column is a D-dimensional point.

Returns:
- y_pred: Predicted labels for the data in X. y_pred is a 1-dimensional
array of length N, and each element is an integer giving the predicted
class.
"""
y_pred = np.zeros(X.shape[1])
###########################################################################
# TODO: #
# Implement this method. Store the predicted labels in y_pred. #
###########################################################################
pass

scores = X.dot(self.W)
y_pred = np.argmax(scores, axis = 1)

###########################################################################
# END OF YOUR CODE #
###########################################################################
return y_pred

def loss(self, X_batch, y_batch, reg):
"""
Compute the loss function and its derivative.
Subclasses will override this.

Inputs:
- X_batch: A numpy array of shape (N, D) containing a minibatch of N
data points; each point has dimension D.
- y_batch: A numpy array of shape (N,) containing labels for the minibatch.
- reg: (float) regularization strength.

Returns: A tuple containing:
- loss as a single float
- gradient with respect to self.W; an array of the same shape as W
"""
pass

class LinearSVM(LinearClassifier):
""" A subclass that uses the Multiclass SVM loss function """

def loss(self, X_batch, y_batch, reg):
return svm_loss_vectorized(self.W, X_batch, y_batch, reg)

class Softmax(LinearClassifier):
""" A subclass that uses the Softmax + Cross-entropy loss function """

def loss(self, X_batch, y_batch, reg):
return softmax_loss_vectorized(self.W, X_batch, y_batch, reg)</span><span style="font-size:14px;">
</span>

svm.ipynb的需完成代码
<span style="font-size:18px;"># In the file linear_classifier.py, implement SGD in the function
# LinearClassifier.train() and then run it with the code below.
from linear_classifier import LinearSVM

# Use the validation set to tune hyperparameters (regularization strength and
# learning rate). You should experiment with different ranges for the learning
# rates and regularization strengths; if you are careful you should be able to
# get a classification accuracy of about 0.4 on the validation set.
learning_rates = [1e-7, 5e-5]
regularization_strengths = [5e4, 1e5]

# results is dictionary mapping tuples of the form
# (learning_rate, regularization_strength) to tuples of the form
# (training_accuracy, validation_accuracy). The accuracy is simply the fraction
# of data points that are correctly classified.
results = {}
best_val = -1 # The highest validation accuracy that we have seen so far.
best_svm = None # The LinearSVM object that achieved the highest validation rate.

################################################################################
# TODO: #
# Write code that chooses the best hyperparameters by tuning on the validation #
# set. For each combination of hyperparameters, train a linear SVM on the #
# training set, compute its accuracy on the training and validation sets, and #
# store these numbers in the results dictionary. In addition, store the best #
# validation accuracy in best_val and the LinearSVM object that achieves this #
# accuracy in best_svm. #
# #
# Hint: You should use a small value for num_iters as you develop your #
# validation code so that the SVMs don't take much time to train; once you are #
# confident that your validation code works, you should rerun the validation #
# code with a larger value for num_iters. #
################################################################################
pass

iters = 2000
for lr in learning_rates:
for reg in regularization_strengths:
svm = LinearSVM()
svm.train(X_train, y_train, learning_rate=lr, reg=reg, num_iters=iters)

y_train_pred = svm.predict(X_train)
acc_train = np.mean(y_train == y_train_pred)

y_val_pred = svm.predict(X_val)
acc_val = np.mean(y_val == y_val_pred)

results[(lr, reg)] = (acc_train, acc_val)

if best_val < acc_val:
best_val = acc_val
best_svm = svm
################################################################################
# END OF YOUR CODE #
################################################################################

# Print out results.
for lr, reg in sorted(results):
train_accuracy, val_accuracy = results[(lr, reg)]
print 'lr %e reg %e train accuracy: %f val accuracy: %f' % (
lr, reg, train_accuracy, val_accuracy)

print 'best validation accuracy achieved during cross-validation: %f' % best_val</span>

lr 1.000000e-07 reg 5.000000e+04 train accuracy: 0.370367 val accuracy: 0.375000

lr 1.000000e-07 reg 1.000000e+05 train accuracy: 0.354571 val accuracy: 0.364000

lr 5.000000e-05 reg 5.000000e+04 train accuracy: 0.100265 val accuracy: 0.087000

lr 5.000000e-05 reg 1.000000e+05 train accuracy: 0.100265 val accuracy: 0.087000

best validation accuracy achieved during cross-validation: 0.375000

linear SVM on raw pixels final test set accuracy: 0.369000



内容来自用户分享和网络整理,不保证内容的准确性,如有侵权内容,可联系管理员处理 点击这里给我发消息