该楼层疑似违规已被系统折叠 隐藏此楼查看此楼

"""network.py~~~~~~~~~~

A module to implement the stochastic gradient descent learningalgorithm for a feedforward neural network. Gradients are calculatedusing backpropagation. Note that I have focused on making the codesimple, easily readable, and easily modifiable. It is not optimized,and omits many desirable features."""

#### Libraries

# Standard library

import random

# Third-party libraries

import numpy as np

class Network(object):

def __init__(self, sizes): """The list ``sizes`` contains the number of neurons in the respective layers of the network. For example, if the list was [2, 3, 1] then it would be a three-layer network, with the first layer containing 2 neurons, the second layer 3 neurons, and the third layer 1 neuron. The biases and weights for the network are initialized randomly, using a Gaussian distribution with mean 0, and variance 1. Note that the first layer is assumed to be an input layer, and by convention we won't set any biases for those neurons, since biases are only ever used in computing the outputs from later layers.""" self.num_layers = len(sizes) self.sizes = sizes self.biases = [np.random.randn(y, 1) for y in sizes[1:]] self.weights = [np.random.randn(y, x) for x, y in zip(sizes[:-1], sizes[1:])] def feedforward(self, a): """Return the output of the network if ``a`` is input.""" for b, w in zip(self.biases, self.weights): a = sigmoid(np.dot(w, a)+b) return a def SGD(self, training_data, epochs, mini_batch_size, eta, test_data=None): """Train the neural network using mini-batch stochastic gradient descent. The ``training_data`` is a list of tuples ``(x, y)`` representing the training inputs and the desired outputs. The other non-optional parameters are self-explanatory. If ``test_data`` is provided then the network will be evaluated against the test data after each epoch, and partial progress printed out. This is useful for tracking progress, but slows things down substantially.""" if test_data: n_test = len(test_data) n = len(training_data) for j in xrange(epochs): random.shuffle(training_data) mini_batches = [ training_data[k:k+mini_batch_size] for k in xrange(0, n, mini_batch_size)] for mini_batch in mini_batches: self.update_mini_batch(mini_batch, eta) if test_data: print "Epoch {0}: {1} / {2}".format( j, self.evaluate(test_data), n_test) else: print "Epoch {0} complete".format(j) def update_mini_batch(self, mini_batch, eta): """Update the network's weights and biases by applying gradient descent using backpropagation to a single mini batch. The ``mini_batch`` is a list of tuples ``(x, y)``, and ``eta`` is the learning rate.""" nabla_b = [np.zeros(b.shape) for b in self.biases] nabla_w = [np.zeros(w.shape) for w in self.weights] for x, y in mini_batch: delta_nabla_b, delta_nabla_w = self.backprop(x, y) nabla_b = [nb+dnb for nb, dnb in zip(nabla_b, delta_nabla_b)] nabla_w = [nw+dnw for nw, dnw in zip(nabla_w, delta_nabla_w)] self.weights = [w-(eta/len(mini_batch))*nw for w, nw in zip(self.weights, nabla_w)] self.biases = [b-(eta/len(mini_batch))*nb for b, nb in zip(self.biases, nabla_b)] def backprop(self, x, y): """Return a tuple ``(nabla_b, nabla_w)`` representing the gradient for the cost function C_x. ``nabla_b`` and ``nabla_w`` are layer-by-layer lists of numpy arrays, similar to ``self.biases`` and ``self.weights``.""" nabla_b = [np.zeros(b.shape) for b in self.biases] nabla_w = [np.zeros(w.shape) for w in self.weights] # feedforward activation = x activations = [x] # list to store all the activations, layer by layer zs = [] # list to store all the z vectors, layer by layer for b, w in zip(self.biases, self.weights): z = np.dot(w, activation)+b zs.append(z) activation = sigmoid(z) activations.append(activation) # backward pass delta = self.cost_derivative(activations[-1], y) * \ sigmoid_prime(zs[-1]) nabla_b[-1] = delta nabla_w[-1] = np.dot(delta, activations[-2].transpose()) # Note that the variable l in the loop below is used a little # differently to the notation in Chapter 2 of the book. Here, # l = 1 means the last layer of neurons, l = 2 is the # second-last layer, and so on. It's a renumbering of the # scheme in the book, used here to take advantage of the fact # that Python can use negative indices in lists. for l in xrange(2, self.num_layers): z = zs[-l] sp = sigmoid_prime(z) delta = np.dot(self.weights[-l+1].transpose(), delta) * sp nabla_b[-l] = delta nabla_w[-l] = np.dot(delta, activations[-l-1].transpose()) return (nabla_b, nabla_w) def evaluate(self, test_data): """Return the number of test inputs for which the neural network outputs the correct result. Note that the neural network's output is assumed to be the index of whichever neuron in the final layer has the highest activation.""" test_results = [(np.argmax(self.feedforward(x)), y) for (x, y) in test_data] return sum(int(x == y) for (x, y) in test_results) def cost_derivative(self, output_activations, y): """Return the vector of partial derivatives \partial C_x / \partial a for the output activations.""" return (output_activations-y)#### Miscellaneous functionsdef sigmoid(z): """The sigmoid function.""" return 1.0/(1.0+np.exp(-z))def sigmoid_prime(z): """Derivative of the sigmoid function.""" return sigmoid(z)*(1-sigmoid(z))

python数字的鲁棒输入_请教关于python的手写数字识别神经网络问题~~~~相关推荐

  1. python opencv数字识别_基于模板匹配的手写数字识别(python+opencv)

    智能计算课第一周的实验是做基于模板匹配的手写数字识别,光听见就很感兴趣,于是决定认真做做这个实验,本实验基于python3+opencv的python版本,所用到的知识都比较简单,基本上边学边做,技术 ...

  2. Keras【Deep Learning With Python】keras框架下的MNIST数据集训练及自己手写数字照片的识别(分类神经网络)

    文章目录 前言 mnist_model.py predict.py 前言 深度学习领域的"hello,world"可能就是这个超级出名的MNIST手写数字数据集的训练(想多了,要是 ...

  3. 开根号的笔算算法图解_机器学习KNN算法之手写数字识别

    1.算法简介 手写数字识别是KNN算法一个特别经典的实例,其数据源获取方式有两种,一种是来自MNIST数据集,另一种是从UCI欧文大学机器学习存储库中下载,本文基于后者讲解该例. 基本思想就是利用KN ...

  4. Python仿真及应用结课大作业—基于CNN的手写数字识别与涂鸦识别

    提示:文章写完后,目录可以自动生成,如何生成可参考右边的帮助文档 文章目录 前言 一.结课文档目录 二.涂鸦识别(篇幅问题只展示其一) 涂鸦识别 引入必要的库函数 导入数据 为各个数据文件添加标签 数 ...

  5. matlab朴素贝叶斯手写数字识别_基于MNIST数据集实现手写数字识别

    介绍 在TensorFlow的官方入门课程中,多次用到mnist数据集.mnist数据集是一个数字手写体图片库,但它的存储格式并非常见的图片格式,所有的图片都集中保存在四个扩展名为idx*-ubyte ...

  6. python 暂停程序 等待用户输入_遇上Python程序暂停时,不要慌,教你正确的处理方法...

    今天为大家带来的内容是:遇上Python程序暂停时,不要慌,教你正确的处理方法! 文章内容主要介绍了Python程序暂停的实现代码,非常不错,具有一定的参考借鉴价值,需要的朋友可以参考下,喜欢的记得点 ...

  7. 【深度学习】【caffe】【python】【docker】微信 二维码检测,手写数字识别

    文章目录 介绍 docker环境 MNIST 数据(废弃) 微信二维码 微信二维码 python代码试试 用于造yolov5的目标检测数据 了解caffe,使用caffe,做个小例子熟悉caffe. ...

  8. 基于TensorFlow深度学习框架,运用python搭建LeNet-5卷积神经网络模型和mnist手写数字识别数据集,设计一个手写数字识别软件。

    本软件是基于TensorFlow深度学习框架,运用LeNet-5卷积神经网络模型和mnist手写数字识别数据集所设计的手写数字识别软件. 具体实现如下: 1.读入数据:运用TensorFlow深度学习 ...

  9. 基于深度学习的手写数字识别算法Python实现

    摘 要 深度学习是传统机器学习下的一个分支,得益于近些年来计算机硬件计算能力质的飞跃,使得深度学习成为了当下热门之一.手写数字识别更是深度学习入门的经典案例,学习和理解其背后的原理对于深度学习的理解有 ...

最新文章

  1. linux系统宿主定制之初窥门径
  2. Forever +nginx部署Node站点
  3. 如何给Pycharm加上头行 # *_*coding:utf-8 *_*?
  4. Programming Pearls: Chatper3 Problem6 [Form letter generator]
  5. 前端学习(2432):关于组件和目录名
  6. java 数据库连接实例,Java连接各种数据库的实例
  7. python输出结果每5个换行_python for循环 - python基础入门(11)
  8. python双向链表
  9. python函数拟合求导_python – 使用scipy curve_fit通过两个数据点拟合指数函数
  10. 全志F1C100s使用记录:u-boot linux rootfs 编译与烧录测试(基于SD卡)
  11. 微信公众号网页分享设置及问题
  12. k8s部署微服务项目
  13. 信息系统安全等级保护相关法规及重要国家标准汇总目录
  14. Retbleed:针对英特尔和AMD处理器的推断性执行攻击
  15. go语言byte类型报错cannot use c (type string) as type byte in assignment
  16. java复合语句与条件语句
  17. python中的整型是什么意思_Python中整型的基本介绍(代码示例)
  18. 计算机网络本直通线的制作方法,一种用于计算机网络对接网线接头的制作方法...
  19. 程序yuan开发-windows小工具:gif截图-LICEcap、文件查找-Everything、远程软件-TeamView、视频播放-PotPlayer、markdown编辑-Typora、
  20. Linux0.11 文件打开open函数(五)

热门文章

  1. ELFHash的理解
  2. CentOs7安装gitlab(转!)
  3. 数论总结 (常用定理+ 模板)
  4. OpenLayers学习笔记2——坐标转换问题
  5. 黑马程序员--java基础--其他对象
  6. SQL存储过程解密研究
  7. 解决VS2008 开发Windows Mobile 项目生成速度慢的问题
  8. admui 能再php上用吗,Javascript 方法
  9. 怎么让项目断开svn连接服务器,SVN断开与服务器连接
  10. Shell 条件判断汇总