# -*- coding: utf-8 -*-
'''
Created on 2018年4月19日
@author: userhttps://github.com/mnielsen/neural-networks-and-deep-learning
@summary: MLP(MultiLayer Perceptrons) 多层神经网络  sigmoid neurons
A module to implement the stochastic gradient descent learning
algorithm for a feedforward neural network.  Gradients are calculated
using backpropagation.  Note that I have focused on making the code
simple, easily readable, and easily modifiable.  It is not optimized,
and omits many desirable features.
'''#### Libraries
# Standard library
import random
import time
import mnist_loader
from PIL import Image
# Third-party libraries
import numpy as npclass Network(object):def __init__(self, sizes):"""The list ``sizes`` contains the number of neurons in therespective layers of the network.  For example, if the listwas [2, 3, 1] then it would be a three-layer network, with thefirst layer containing 2 neurons, the second layer 3 neurons,and the third layer 1 neuron.  The biases and weights for thenetwork are initialized randomly, using a Gaussiandistribution with mean 0, and variance 1.  Note that the firstlayer is assumed to be an input layer, and by convention wewon't set any biases for those neurons, since biases are onlyever used in computing the outputs from later layers."""self.num_layers = len(sizes)self.sizes = sizes#随机初始化偏置和权重self.biases = [np.random.randn(y, 1) for y in sizes[1:]]self.weights = [np.random.randn(y, x)for x, y in zip(sizes[:-1], sizes[1:])]#前向传播def feedforward(self, a):"""Return the output of the network if ``a`` is input."""for b, w in zip(self.biases, self.weights):a = sigmoid(np.dot(w, a)+b)return adef SGD(self, training_data, epochs, mini_batch_size, eta,test_data=None):"""Train the neural network using mini-batch stochasticgradient descent.  The ``training_data`` is a list of tuples``(x, y)`` representing the training inputs and the desiredoutputs.  The other non-optional parameters areself-explanatory.  If ``test_data`` is provided then thenetwork will be evaluated against the test data after eachepoch, and partial progress printed out.  This is useful fortracking progress, but slows things down substantially."""if test_data: n_test = len(test_data)n = len(training_data)for j in xrange(epochs):random.shuffle(training_data)#mini_batches是训练数据分批mini_batches = [training_data[k:k+mini_batch_size]for k in xrange(0, n, mini_batch_size)]#每个mini_batch都更新一次,重复完整个数据集for mini_batch in mini_batches:self.update_mini_batch(mini_batch, eta)if test_data:print "Epoch {0}: {1} / {2}".format(j, self.evaluate(test_data), n_test)else:print "Epoch {0} complete".format(j)def update_mini_batch(self, mini_batch, eta):"""Update the network's weights and biases by applyinggradient descent using backpropagation to a single mini batch.The ``mini_batch`` is a list of tuples ``(x, y)``, and ``eta``is the learning rate."""nabla_b = [np.zeros(b.shape) for b in self.biases]nabla_w = [np.zeros(w.shape) for w in self.weights]#mini_batch中的一个实例调用梯度下降得到各个参数的偏导for x, y in mini_batch:delta_nabla_b, delta_nabla_w = self.backprop(x, y)nabla_b = [nb+dnb for nb, dnb in zip(nabla_b, delta_nabla_b)]nabla_w = [nw+dnw for nw, dnw in zip(nabla_w, delta_nabla_w)]self.weights = [w-(eta/len(mini_batch))*nwfor w, nw in zip(self.weights, nabla_w)]self.biases = [b-(eta/len(mini_batch))*nbfor b, nb in zip(self.biases, nabla_b)]def backprop(self, x, y):"""Return a tuple ``(nabla_b, nabla_w)`` representing thegradient for the cost function C_x.  ``nabla_b`` and``nabla_w`` are layer-by-layer lists of numpy arrays, similarto ``self.biases`` and ``self.weights``."""nabla_b = [np.zeros(b.shape) for b in self.biases]nabla_w = [np.zeros(w.shape) for w in self.weights]# feedforwardactivation = xactivations = [x] # list to store all the activations, layer by layer#存储所有的激活值,一层一层的形式zs = [] # list to store all the z vectors, layer by layerfor b, w in zip(self.biases, self.weights):z = np.dot(w, activation)+bzs.append(z)activation = sigmoid(z)activations.append(activation)# backward passdelta = self.cost_derivative(activations[-1], y) * \sigmoid_prime(zs[-1])nabla_b[-1] = deltanabla_w[-1] = np.dot(delta, activations[-2].transpose())# Note that the variable l in the loop below is used a little# differently to the notation in Chapter 2 of the book.  Here,# l = 1 means the last layer of neurons, l = 2 is the# second-last layer, and so on.  It's a renumbering of the# scheme in the book, used here to take advantage of the fact# that Python can use negative indices in lists.for l in xrange(2, self.num_layers):z = zs[-l]sp = sigmoid_prime(z)delta = np.dot(self.weights[-l+1].transpose(), delta) * spnabla_b[-l] = deltanabla_w[-l] = np.dot(delta, activations[-l-1].transpose())return (nabla_b, nabla_w)def evaluate(self, test_data):"""Return the number of test inputs for which the neuralnetwork outputs the correct result. Note that the neuralnetwork's output is assumed to be the index of whicheverneuron in the final layer has the highest activation."""test_results = [(np.argmax(self.feedforward(x)), y)for (x, y) in test_data]return sum(int(x == y) for (x, y) in test_results)def cost_derivative(self, output_activations, y):"""Return the vector of partial derivatives \partial C_x /\partial a for the output activations."""return (output_activations-y)#### Miscellaneous functions
def sigmoid(z):"""The sigmoid function."""return 1.0/(1.0+np.exp(-z))def sigmoid_prime(z):"""Derivative of the sigmoid function."""return sigmoid(z)*(1-sigmoid(z))if __name__ == "__main__":    start = time.clock() training_data,validation_data,test_data = mnist_loader.load_data_wrapper()#显示图片I = training_data[0][0]I.resize((28, 28))im = Image.fromarray((I*256).astype('uint8'))im.show()#MLP训练net = Network([784,30,10])net.SGD(training_data,30,10,3.0,test_data=test_data)end = time.clock()    print('finish all in %s' % str(end - start))
# -*- coding: utf-8 -*-
'''
Created on 2018年4月19日@author: user
'''
"""
mnist_loader
~~~~~~~~~~~~
A library to load the MNIST image data.  For details of the data
structures that are returned, see the doc strings for ``load_data``
and ``load_data_wrapper``.  In practice, ``load_data_wrapper`` is the
function usually called by our neural network code.
"""#### Libraries
# Standard library
import cPickle
import gzip# Third-party libraries
import numpy as npdef load_data():"""Return the MNIST data as a tuple containing the training data,the validation data, and the test data.The ``training_data`` is returned as a tuple with two entries.The first entry contains the actual training images.  This is anumpy ndarray with 50,000 entries.  Each entry is, in turn, anumpy ndarray with 784 values, representing the 28 * 28 = 784pixels in a single MNIST image.The second entry in the ``training_data`` tuple is a numpy ndarraycontaining 50,000 entries.  Those entries are just the digitvalues (0...9) for the corresponding images contained in the firstentry of the tuple.The ``validation_data`` and ``test_data`` are similar, excepteach contains only 10,000 images.This is a nice data format, but for use in neural networks it'shelpful to modify the format of the ``training_data`` a little.That's done in the wrapper function ``load_data_wrapper()``, seebelow."""f = gzip.open('./data/mnist.pkl.gz', 'rb')training_data, validation_data, test_data = cPickle.load(f)f.close()return (training_data, validation_data, test_data)def load_data_wrapper():"""Return a tuple containing ``(training_data, validation_data,test_data)``. Based on ``load_data``, but the format is moreconvenient for use in our implementation of neural networks.In particular, ``training_data`` is a list containing 50,0002-tuples ``(x, y)``.  ``x`` is a 784-dimensional numpy.ndarraycontaining the input image.  ``y`` is a 10-dimensionalnumpy.ndarray representing the unit vector corresponding to thecorrect digit for ``x``.``validation_data`` and ``test_data`` are lists containing 10,0002-tuples ``(x, y)``.  In each case, ``x`` is a 784-dimensionalnumpy.ndarry containing the input image, and ``y`` is thecorresponding classification, i.e., the digit values (integers)corresponding to ``x``.Obviously, this means we're using slightly different formats forthe training data and the validation / test data.  These formatsturn out to be the most convenient for use in our neural networkcode."""tr_d, va_d, te_d = load_data()training_inputs = [np.reshape(x, (784, 1)) for x in tr_d[0]]training_results = [vectorized_result(y) for y in tr_d[1]]training_data = zip(training_inputs, training_results)validation_inputs = [np.reshape(x, (784, 1)) for x in va_d[0]]validation_data = zip(validation_inputs, va_d[1])test_inputs = [np.reshape(x, (784, 1)) for x in te_d[0]]test_data = zip(test_inputs, te_d[1])return (training_data, validation_data, test_data)def vectorized_result(j):"""Return a 10-dimensional unit vector with a 1.0 in the jthposition and zeroes elsewhere.  This is used to convert a digit(0...9) into a corresponding desired output from the neuralnetwork."""e = np.zeros((10, 1))e[j] = 1.0return e

【Deep Learning】MLP识别手写 MNIST数字集相关推荐

  1. python识别数字程序_详解python实现识别手写MNIST数字集的程序

    我们需要做的第⼀件事情是获取 MNIST 数据.如果你是⼀个 git ⽤⼾,那么你能够通过克隆这本书的代码仓库获得数据,实现我们的⽹络来分类数字 git clone https://github.co ...

  2. Java软件研发工程师转行之深度学习(Deep Learning)进阶:手写数字识别+人脸识别+图像中物体分类+视频分类+图像与文字特征+猫狗分类

    本文适合于对机器学习和数据挖掘有所了解,想深入研究深度学习的读者 1.对概率基本概率有所了解 2.具有微积分和线性代数的基本知识 3.有一定的编程基础(Python) Java软件研发工程师转行之深度 ...

  3. Python神经网络识别手写数字-MNIST数据集

    Python神经网络识别手写数字-MNIST数据集 一.手写数字集-MNIST 二.数据预处理 输入数据处理 输出数据处理 三.神经网络的结构选择 四.训练网络 测试网络 测试正确率的函数 五.完整的 ...

  4. Android TensorFlow Lite 深度学习识别手写数字mnist demo

    一. TensorFlow Lite TensorFlow Lite介绍.jpeg TensorFlow Lite特性.jpeg TensorFlow Lite使用.jpeg TensorFlow L ...

  5. 识别手写字体app_我如何构建手写识别器并将其运送到App Store

    识别手写字体app 从构建卷积神经网络到将OCR部署到iOS (From constructing a Convolutional Neural Network to deploying an OCR ...

  6. BP神经网络理解原理——用Python编程实现识别手写数字(翻译英文文献)

    BP神经网络理解原理--用Python编程实现识别手写数字   备注,这里可以用这个方法在csdn中编辑公式: https://www.zybuluo.com/codeep/note/163962 一 ...

  7. svm手写数字识别_KNN 算法实战篇如何识别手写数字

    上篇文章介绍了KNN 算法的原理,今天来介绍如何使用KNN 算法识别手写数字? 1,手写数字数据集 手写数字数据集是一个用于图像处理的数据集,这些数据描绘了 [0, 9] 的数字,我们可以用KNN 算 ...

  8. Python 神经网络是这样识别手写字符哒?

    当谷歌的 AlphaGo 战胜了人类顶级棋手,人工智能开始更多进入大众视野.而谷歌 AI 教父认为:"AlphaGo 有直觉神经网络已接近大脑". 千百年来,人类试图了解智能的机制 ...

  9. Python神经网络是这样识别手写字符哒?

    当谷歌的AlphaGo战胜了人类顶级棋手,人工智能开始更多进入大众视野.而谷歌AI教父认为:"AlphaGo有直觉神经网络已接近大脑". 千百年来,人类试图了解智能的机制,并将它复 ...

最新文章

  1. Markdown 中画图
  2. 一个功能非常全面的增强出口查找工具
  3. maven 安装m2e 报错
  4. 亿级流量系统架构演进之路
  5. C#模拟http 发送post或get请求
  6. php 在函数里打开链接,JavaScript中打开链接的几种方法介绍
  7. 高可用-软件heartbeat的入门介绍
  8. [转载] Java内存管理-你真的理解Java中的数据类型吗(十)
  9. 固定 顶部_一楼小院想建阳光房?固定的怕违建,那这样可伸缩的阳光房怎么样...
  10. java入门的注意点_Java基础之Integer使用的注意事项及面试题
  11. HTML1.0 - html 环境搭建 开发工具
  12. 【论文阅读】医疗影像分割中的半监督学习Semi-supervised
  13. 怎么将高版本DWG格式图纸转换成低版本的DXF格式?
  14. MySQL更新死锁问题
  15. Java小游戏 超级玛丽源码分享
  16. Mac M1 安装JDK8
  17. 用树莓派做一个语音机器人
  18. 求求大家!急需一张云栖大会1920的单日票
  19. sklearn 学习笔记 —— Nearest Neighbors
  20. Holt-Winters方法预测水电发电量

热门文章

  1. java并发性是指什么_java – 什么是“非阻塞”并发,它与普通并发性有什么不同?...
  2. Spark详解(三):Spark编程模型(RDD概述)
  3. Java高并发编程:总线锁定和缓存一致性的问题
  4. win2003 vps IIS6中添加站点并绑定域名的配置方法
  5. Oracle创建表空间、用户、分配权限语句
  6. SDL2源码分析6:拷贝到渲染器(SDL_RenderCopy())
  7. Java-protected的使用范围
  8. JAVA线程池管理及分布式HADOOP调度框架搭建
  9. 基于eclipse创建android的helloworld工程
  10. 专家:香港拥有人民币资产配置独特优势