您的位置:首页 > 其它

TensorFlow学习--MNIST入门与提高 (附脚本input_data.py)

2017-10-26 10:59 453 查看
建立softmax回归模型:

y=softmax(Wx+b) y=softmax(Wx+b)

损失函数采用交叉熵来衡量:

CostEntropy=−∑y′logy CostEntropy=−∑y′logy

其中y′y′代表真实值.由于真实值的类别通常采用one-hot编码,即对应类别位置为1其他为0,因此损失函数就可以写成:

CostEntropy=−y′logy CostEntropy=−y′logy

即:

cross_entropy = -tf.reduce_sum(y_*tf.log(y))


训练数据时每次随机抓取100个批处理数据点进行训练.

MNIST入门

代码&注释:

#!/usr/bin/python
# coding:utf-8

import tensorflow as tf
import input_data
# 加载数据
mnist = input_data.read_data_sets('Mnist_data', one_hot=True)

# x不是一个特定的值,而是一个占位符
# 能够输入任意数量的MNIST图像,每一张图展平成784维的向量
x = tf.placeholder("float", [None, 784])
#  一个Variable代表一个可修改的张量
W = tf.Variable(tf.zeros([784, 10]))
b = tf.Variable(tf.zeros([10]))
# y=softmax(Wx+b)
y = tf.nn.softmax(tf.matmul(x, W)+b)
# 添加一个新的占位符用于输入正确值
y_ = tf.placeholder("float", [None, 10])
# 计算交叉熵
# 用tf.reduce_sum 计算张量的所有元素的总和
cross_entropy = -tf.reduce_sum(y_*tf.log(y))
# 以0.01的学习速率最小化交叉熵
train_step = tf.train.GradientDescentOptimizer(0.01).minimize(cross_entropy)

# 在Session里面启动模型
sess = tf.Session()
# 初始化变量
init = tf.initialize_all_variables()
sess.run(init)

# 让模型循环训练1000次
for i in range(1000):
# 随机抓取训练数据中的100个批处理数据点
batch_xs, batch_ys = mnist.train.next_batch(100)
# 用这些数据点作为参数替换之前的占位符来运行train_step
sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys})

# 检测预测是否与实际标签匹配,返回一组布尔值
correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
# 把布尔值转换成浮点数,然后取平均值
# [True, False, True, True]变成[1,0,1,1],平均后得0.75
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
# 计算所学习到的模型在测试数据集上面的正确率
print sess.run(accuracy, feed_dict={x: mnist.test.images, y_: mnist.test.labels})


输出:

result:0.9149


MNIST提高

#!/usr/bin/python
# coding:utf-8

import tensorflow as tf
import input_data
# 加载数据
mnist = input_data.read_data_sets('Mnist_data', one_hot=True)

# 用占位符定义输入图片x与输出类别y_
x = tf.placeholder("float", shape=[None, 784])
y_ = tf.placeholder("float", shape=[None, 10])
# 将权重W和偏置b定义为变量,并初始化为0向量
W = tf.Variable(tf.zeros([784, 10]))
b = tf.Variable(tf.zeros([10]))
# 类别预测与损失函数
y = tf.nn.softmax(tf.matmul(x, W) + b)
# 交叉熵损失函数
cross_entropy = -tf.reduce_sum(y_*tf.log(y))
# 训练模型
# 用最速下降法让交叉熵下降,步长为0.01
train_step = tf.train.GradientDescentOptimizer(0.01).minimize(cross_entropy)

# 运行交互计算图
sess = tf.InteractiveSession()
# 变量初始化
sess.run(tf.initialize_all_variables())
# 每次加载50个训练样本,然后执行一次train_step,通过feed_dict将x和y_用训练训练数据替代
for i in range(1000):
# 每次加载50个样本,返回一个tuple,元素1为样本,元素2为标签
batch = mnist.train.next_batch(50)
train_step.run(feed_dict={x: batch[0], y_: batch[1]})

# 评估模型
# 用tf.equal来检测预测是与否真实标签匹配
correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
# 将布尔值转换为浮点数来代表对错然后取平均值
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
print accuracy.eval(feed_dict={x: mnist.test.images, y_:mnist.test.labels})


输出:

0.9092


相关API

计算一个张量维数的总和。

def reduce_sum(
input_tensor,
axis=None,    
keep_dims=False, 
name=None,
reduction_indices=None):


沿axis给出的维度减少input_tensor.若keep_dims为true,则对于axis中的每个条目,张量的秩降低1。 如果keep_dims为true,则长度尺寸将缩小为1.若axis没有指明,则所有尺寸都减小,并返回带有单个元素的张量。

附:脚本input_data.py

脚本input_data.py用于自动下载和读取数据集.

input_data.py:

# #!/usr/bin/python
# # coding:utf-8

# 用于下载和读取MNIST数据的函数
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gzip
import os
import tensorflow.python.platform
import numpy
from six.moves import urllib
from six.moves import xrange  # pylint: disable=redefined-builtin
import tensorflow as tf
SOURCE_URL = 'http://yann.lecun.com/exdb/mnist/'

# 若数据不存在,则从Yann的网站下载数据
def maybe_download(filename, work_directory):
if not os.path.exists(work_directory):
os.mkdir(work_directory)
filepath = os.path.join(work_directory, filename)
# 若指定路径不存在,则开始从原网站上下载
if not os.path.exists(filepath):
filepath, _ = urllib.request.urlretrieve(SOURCE_URL + filename, filepath)
statinfo = os.stat(filepath)
print('Successfully downloaded', filename, statinfo.st_size, 'bytes.')
return filepath
def _read32(bytestream):
dt = numpy.dtype(numpy.uint32).newbyteorder('>')
return numpy.frombuffer(bytestream.read(4), dtype=dt)[0]

# 将图像提取到一个4维uint8类型的numpy数组[index, y, x, depth]
def extract_images(filename):
print('Extracting', filename)
with gzip.open(filename) as bytestream:
magic = _read32(bytestream)
if magic != 2051:
raise ValueError('Invalid magic number %d in MNIST image file: %s' % (magic, filename))
num_images = _read32(bytestream)
rows = _read32(bytestream)
cols = _read32(bytestream)
buf = bytestream.read(rows * cols * num_images)
data = numpy.frombuffer(buf, dtype=numpy.uint8)
data = data.reshape(num_images, rows, cols, 1)
return data

# 将类标签从标量转换为一个one-hot向量
def dense_to_one_hot(labels_dense, num_classes=10):
num_labels = labels_dense.shape[0]
index_offset = numpy.arange(num_labels) * num_classes
labels_one_hot = numpy.zeros((num_labels, num_classes))
labels_one_hot.flat[index_offset + labels_dense.ravel()] = 1
return labels_one_hot

# 将标签提取到一维uint8类型的numpy数组[index]中
def extract_labels(filename, one_hot=False):
print('Extracting', filename)
with gzip.open(filename) as bytestream:
magic = _read32(bytestream)
if magic != 2049:
raise ValueError('Invalid magic number %d in MNIST label file: %s' % (magic, filename))
num_items = _read32(bytestream)
buf = bytestream.read(num_items)
labels = numpy.frombuffer(buf, dtype=numpy.uint8)
if one_hot:
return dense_to_one_hot(labels)
return labels

# 构造DataSet类
# one_hot arg仅在fake_data为true时使用
# `dtype`可以是`uint8`,将输入保留为`[0,255]`,或`float32`以重新调整为[0,1]。
class DataSet(object):
def __init__(self, images, labels, fake_data=False, one_hot=False, dtype=tf.float32):
dtype = tf.as_dtype(dtype).base_dtype
if dtype not in (tf.uint8, tf.float32):
raise TypeError('Invalid image dtype %r, expected uint8 or float32' % dtype)
if fake_data:
self._num_examples = 10000
self.one_hot = one_hot
else:
assert images.shape[0] == labels.shape[0], ('images.shape: %s labels.shape: %s' % (images.shape, labels.shape))
self._num_examples = images.shape[0]
# 将[num examples, rows, columns, depth]转换形状成[num examples, rows*columns] (assuming depth == 1)
assert images.shape[3] == 1
images = images.reshape(images.shape[0], images.shape[1] * images.shape[2])
if dtype == tf.float32:
# 将[0, 255]转换为[0.0, 1.0].
images = images.astype(numpy.float32)
images = numpy.multiply(images, 1.0 / 255.0)
self._images = images
self._labels = labels
self._epochs_completed = 0
self._index_in_epoch = 0
@property
def images(self):
return self._images
@property
def labels(self):
return self._labels
@property
def num_examples(self):
return self._num_examples
@property
def epochs_completed(self):
return self._epochs_completed

# 从数据集返回下一个`batch_size`示例
def next_batch(self, batch_size, fake_data=False):
if fake_data:
fake_image = [1] * 784
if self.one_hot:
fake_label = [1] + [0] * 9
else:
fake_label = 0
return [fake_image for _ in xrange(batch_size)], [fake_label for _ in xrange(batch_size)]
start = self._index_in_epoch
self._index_in_epoch += batch_size
# 完成一个epoch
if self._index_in_epoch > self._num_examples:
# 随机抽取数据
self._epochs_completed += 1
perm = numpy.arange(self._num_examples)
numpy.random.shuffle(perm)
self._images = self._images[perm]
self._labels = self._labels[perm]
# 开始下一个epoch
start = 0
self._index_in_epoch = batch_size
assert batch_size <= self._num_examples
end = self._index_in_epoch
return self._images[start:end], self._labels[start:end]

# 读取训练数据
def read_data_sets(train_dir, fake_data=False, one_hot=False, dtype=tf.float32):
class DataSets(object):
pass
data_sets = DataSets()
# 若fake_data为true则返回空数据
if fake_data:
def fake():
return DataSet([], [], fake_data=True, one_hot=one_hot, dtype=dtype)
data_sets.train = fake()
data_sets.validation = fake()
data_sets.test = fake()
return data_sets
# 训练和测试数据文件名
TRAIN_IMAGES = 'train-images-idx3-ubyte.gz'
TRAIN_LABELS = 'train-labels-idx1-ubyte.gz'
TEST_IMAGES = 't10k-images-idx3-ubyte.gz'
TEST_LABELS = 't10k-labels-idx1-ubyte.gz'
VALIDATION_SIZE = 5000
# 读取训练和测试数据
local_file = maybe_download(TRAIN_IMAGES, train_dir)
train_images = extract_images(local_file)
local_file = maybe_download(TRAIN_LABELS, train_dir)
train_labels = extract_labels(local_file, one_hot=one_hot)
local_file = maybe_download(TEST_IMAGES, train_dir)
test_images = extract_images(local_file)
local_file = maybe_download(TEST_LABELS, train_dir)
test_labels = extract_labels(local_file, one_hot=one_hot)
# 取前5000个作为验证数据
validation_images = train_images[:VALIDATION_SIZE]
validation_labels = train_labels[:VALIDATION_SIZE]
# 取前5000个以后的作为训练数据
train_images = train_images[VALIDATION_SIZE:]
train_labels = train_labels[VALIDATION_SIZE:]
# 定义训练,验证和测试
data_sets.train = DataSet(train_images, train_labels, dtype=dtype)
data_sets.validation = DataSet(validation_images, validation_labels, dtype=dtype)
data_sets.test = DataSet(test_images, test_labels, dtype=dtype)
return data_sets
内容来自用户分享和网络整理,不保证内容的准确性,如有侵权内容,可联系管理员处理 点击这里给我发消息
标签: