'''
#2018-06-25 272015 June Monday the 26 week, the 176 day SZ
手写字体识别程序文件1:
这个程序使用了卷积神经网络LeNet - 5模型。
定义了前向传播的过程以及神经网络中的参数,无论训练还是测试,都可以直接调用inference这个函数
NUM_LABELS =10 #标签数目#regularizer正则化矩阵,变量属性:维度,shape;
#tf.get_variable创建过滤器的权重变量和偏置项变量。[CONV1_SIZE, CONV1_SIZE, NUM_CHANNELS, CONV1_DEEP]是参数变量,
CONV1_SIZE代表过滤器尺寸,NUM_CHANNELS当前层的深度,CONV1_DEEP过滤器的深度。tf.nn.conv2d函数实现卷积前向传播算法;input_tensor当前层节点矩阵(四维矩阵,第一维代表一个输入batch,表示第几张图片。后面三个维度代表一个节点矩阵
conv1_weights,卷积层的权重,
strides=[1,1,1,1]不同维度上的步长,
padding= 'SAME'填充的方法:全零填充,VALID不添加全零填充。全零填充保证前向传播结果和当前层矩阵大小保持一致。
conv1 = tf.nn.conv2d(input_tensor, conv1_weights, strides=[1,1,1,1],padding= 'SAME')tf.nn.bias_add函数给每个节点添加偏置项。
tf.truncated_normal_initializer 从截断的正态分布中输出随机值。seed:一个Python整数。用于创建随机种子。查看 tf.set_random_seed 行为。  tf.nn.relu() 激活函数实现去线性化 tf.variable_scope使用不同的命名空间隔离不同层的变量,让每一层中的变量命名只需要考虑在当前层的作用,不需要担心重名的问题。pool1 = tf.nn.max_pool(relu1, ksize=[1,2,2,1], strides = [1,2,2,1],padding='SAME')实现最大池化层前向传播过程。
ksize提供过滤器尺寸,但是第一个和第四个元素都必须为1,strides提供步长,但是第一个和第四个元素都必须为1,padding填充方法神经网络结果加上激活函数和偏置项:f(Wx +b); f(x)是激活函数,b是偏置项
每个神经元的输出经过非线性函数,整个模型就不是非线性了。这个非线性函数就是激活函数。
三个常见激活函数:ReLU激活函数,Sigmoid激活函数,tanh函数;
'''import tensorflow as tf #定义输入,输出,隐藏层1的节点个数
INPUT_NODE = 784 #28*28的图片
OUTPUT_NODE = 10 #输出10个结点,十种分类结果,对应0-9数字IMAGE_SIZE = 28
NUM_CHANNELS = 1 #黑白图片是1,彩色图片是3;;当前层的深度
NUM_LABELS =10 #标签数目#第一层卷积层的尺寸和深度
CONV1_DEEP =32 #过滤器深度
CONV1_SIZE = 5 #过滤器尺寸#第二层卷积层的尺寸和深度
CONV2_DEEP = 64
CONV2_SIZE = 5#全连接层结点个数
FC_SIZE = 512#定义卷积神经网络的前向传播过程,dropout提升模型可靠性并且防止过拟合。只在训练时候使用。参数train区分训练过程和测试过程
def inference(input_tensor, train, regularizer):#声明第一层卷积层的变量'layer1 - conv1'并且实现前向传播的过程;使用全0填充,输入28*28*1,输出28*28*32with tf.variable_scope('layer1 - conv1'):#声明第一层卷积层的变量并且实现前向传播的过程,输入28*28*1的原始图片像素矩阵,使用全零填充,输出28*28*32矩阵。conv1_weights = tf.get_variable('weight', [CONV1_SIZE, CONV1_SIZE, NUM_CHANNELS, CONV1_DEEP],initializer = tf.truncated_normal_initializer(stddev=0.1))#[CONV1_DEEP]过滤器的深度,也是下一层节点矩阵的深度。conv1_biases = tf.get_variable(   'bias', [CONV1_DEEP], initializer= tf.constant_initializer(0.0))#使用边长为5,深度为32的过滤器,过滤器移动步长为1,使用全0填充。 使用全零填充:输出边长=输入边长/步长 28/1=28conv1 = tf.nn.conv2d(input_tensor, conv1_weights, strides=[1,1,1,1],padding= 'SAME')relu1 = tf.nn.relu(tf.nn.bias_add(conv1, conv1_biases))#声明第二层池化层的前向传播过程,选用最大池化层,池化层过滤器边长为2,全零填充,移动步长2,输入为上一层输出:28*28*32,这层输出:14*14*32with tf.name_scope('layer2-pool1'):pool1 = tf.nn.max_pool(relu1, ksize=[1,2,2,1], strides = [1,2,2,1],padding='SAME')#第三层卷积层前向传播过程,输出14*14*64的矩阵with tf.variable_scope('layer3-conv2'):conv2_weights = tf.get_variable('weight', [CONV2_SIZE, CONV2_SIZE, CONV1_DEEP,CONV2_DEEP],initializer = tf.truncated_normal_initializer(stddev=0.1))conv2_biases = tf.get_variable('bias', [CONV2_DEEP], initializer= tf.constant_initializer(0.0))#使用边长为5,深度为64的过滤器,过滤器移动步长为1,使用全0填充。conv2 = tf.nn.conv2d(pool1, conv2_weights, strides=[1,1,1,1],padding= 'SAME')relu2 = tf.nn.relu(tf.nn.bias_add(conv2, conv2_biases))#实现第四层:池化层的前向传播过程。过滤器尺寸:2*2;步长2*2;输出7*7*64的矩阵with tf.name_scope('layer4-pool2'):pool2 = tf.nn.max_pool(relu2, ksize=[1,2,2,1], strides = [1,2,2,1],padding='SAME')#池化层输出转为第五层全连接层的输入格式:多维矩阵转一维向量pool_shape = pool2.get_shape().as_list()  #得到维度,维度也包含了batch中数据的个数#计算矩阵拉成向量后的长度,长度就是矩阵长宽高的乘积;pool1_shape[0]为一个batch中数据的个数nodes = pool_shape[1] * pool_shape[2] * pool_shape[3]#把第四层池化层输出变成一个batch的向量reshaped = tf.reshape(pool2, [pool1_shape[0], nodes])#声明第五层全连接层的变量并且实现前向传播过程。输入是一个拉直的向量7*7*64=3136,长度3136,输出是长度为512的向量。#这里引入了dropout,随机把全连接层的部分节点输出变为0,从而避免过拟合。with tf.variable_scope('layer5-fc1'):fc1_weights = tf.get_variable('weight', [nodes, FC_SIZE],initializer = tf.truncated_normal_initializer(stddev=0.1))#只有全连接层的权重需要加入正则化if regularizer != None:tf.add_to_collection('losses', regularizer(fc1_weights))fc1_biases = tf.get_variable('bias', [FC_SIZE],initializer=tf.constant_initializer(0.1))fc1 = tf.nn.relu(tf.matmul(reshaped, fc1_weights) + fc1_biases)if train: fc1 = tf.nn.dropout(fc1, 0.5)#声明第六层全连接层变量并且实现前向传播过程。输入512长度向量,输出长度为10的向量。这一层的输出通过softmax之后就得到了最后的分类结果。with tf.variable_scope('layer6-fc2'):fc2_weights = tf.get_variable('weight', [FC_SIZE, NUM_LABELS], initializer = tf.truncated_normal_initializer(stddev=0.1))if regularizer != None:tf.add_to_collection('losses', regularizer(fc2_weights))fc2_biases = tf.get_variable('bias',[NUM_LABELS],initializer=tf.constant_initializer(0.1))logit = tf.matmul(fc1, fc2_weights) + fc2_biases#返回第六层的输出return logit ###########################################以下是训练部分###########################################import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
import mnist_inference
import osBATCH_SIZE = 100  #每批次取100个;一个批次中训练个数。
LEARNING_RATE_BASE = 0.8 #学习率初始值
LEARNING_RATE_DECAY = 0.99 #学习率衰减率
REGULARIZATION_RATE = 0.0001 #正则化系数
TRAINING_STEPS = 30000 #训练轮数
MOVING_AVERAGE_DECAY = 0.99 #滑动平均衰减率,控制模型更新的速度,让模型在测试数据上更健壮
MODEL_SAVE_PATH = 'D:\\ST\\Python_work\\program\\手写识别'
MODEL_NAME = "mnist_model"def train(mnist):# 定义输入输出placeholder。placeholder定义了一个位置,程序运行时候给这个位置提供数据。这个机制提供输入数据,输入为思维矩阵x = tf.placeholder(tf.float32, [ BATCH_SIZE,                             #第一维表示一个batch中样例的个数mnist_inference.IMAGE_SIZE,          #第二,三维表示图片的尺寸mnist_inference.IMAGE_SIZE,mnist_inference.NUM_CHANNELS],        #第四维表示图片深度,黑白图片,深度1,彩色图片,深度3name='x-input')#把输入训练数据格式调整为一个四维矩阵,并把调整后的数据传入sess.run的过程reshaped_xs = np.reshape(xs, (BATCH_SIZE,                           #第一维表示一个batch中样例的个数mnist_inference.IMAGE_SIZE,          #第二,三维表示图片的尺寸mnist_inference.IMAGE_SIZE,mnist_inference.NUM_CHANNELS,         #第四维表示图片深度,黑白图片,深度1,彩色图片,深度3))y_ = tf.placeholder(tf.float32, [None, mnist_inference.OUTPUT_NODE], name='y-input')regularizer = tf.contrib.layers.l2_regularizer(REGULARIZATION_RATE) #L2范数正则化y = mnist_inference.inference(x, regularizer) #预测值global_step = tf.Variable(0, trainable=False) #定义存储训练轮数的变量# 定义损失函数、学习率、滑动平均操作以及训练过程。variable_averages = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY, global_step)variables_averages_op = variable_averages.apply(tf.trainable_variables())cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=y, labels=tf.argmax(y_, 1))cross_entropy_mean = tf.reduce_mean(cross_entropy)loss = cross_entropy_mean + tf.add_n(tf.get_collection('losses')) #总损失=交叉熵损失和正则化损失learning_rate = tf.train.exponential_decay(LEARNING_RATE_BASE, #基础学习率global_step,        #当前迭代轮数mnist.train.num_examples / BATCH_SIZE, #过完所有训练数据需要的迭代次数LEARNING_RATE_DECAY,                  #学习率衰减速度staircase=True)train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss, global_step=global_step)#每次循环需要通过反向传播来更新参数,又要更新参数的每一个滑动平均值with tf.control_dependencies([train_step, variables_averages_op]):train_op = tf.no_op(name='train')# 初始化TensorFlow持久化类。saver = tf.train.Saver()#初始化会话,开始训练过程with tf.Session() as sess:tf.global_variables_initializer().run()for i in range(TRAINING_STEPS):xs, ys = mnist.train.next_batch(BATCH_SIZE)_, loss_value, step = sess.run([train_op, loss, global_step], feed_dict={x: xs, y_: ys})if i % 1000 == 0: #每1000轮输出一次损失,保存模型,实现持久化print("After %d training step(s), loss on training batch is %g." % (step, loss_value))saver.save(sess, os.path.join(MODEL_SAVE_PATH, MODEL_NAME), global_step=global_step) #def main(argv=None):mnist = input_data.read_data_sets('D:\\ST\\Python_work\\program\\手写识别', one_hot=True)train(mnist)if __name__ == '__main__':main()'''Successfully downloaded train-images-idx3-ubyte.gz 9912422 bytes.
Extracting D:\ST\Python_work\program\手写识别\train-images-idx3-ubyte.gz
Successfully downloaded train-labels-idx1-ubyte.gz 28881 bytes.
Extracting D:\ST\Python_work\program\手写识别\train-labels-idx1-ubyte.gz
Successfully downloaded t10k-images-idx3-ubyte.gz 1648877 bytes.
Extracting D:\ST\Python_work\program\手写识别\t10k-images-idx3-ubyte.gz
Successfully downloaded t10k-labels-idx1-ubyte.gz 4542 bytes.
Extracting D:\ST\Python_work\program\手写识别\t10k-labels-idx1-ubyte.gz
2018-06-25 19:14:55.952000: I C:\tf_jenkins\home\workspace\rel-win\M\windows\PY\35\tensorflow\core\platform\cpu_feature_guard.cc:137] Your CPU supports instructions that this TensorFlow binary was not compiled to use: AVX AVX2
After 1 training step(s), loss on training batch is 2.96337.
After 1001 training step(s), loss on training batch is 0.21122.
After 2001 training step(s), loss on training batch is 0.195296.
After 3001 training step(s), loss on training batch is 0.147966.
After 4001 training step(s), loss on training batch is 0.121113.
After 5001 training step(s), loss on training batch is 0.104925.
After 6001 training step(s), loss on training batch is 0.0969063.
After 7001 training step(s), loss on training batch is 0.0967676.
After 8001 training step(s), loss on training batch is 0.0805094.
After 9001 training step(s), loss on training batch is 0.0758026.
After 10001 training step(s), loss on training batch is 0.0662473.
After 11001 training step(s), loss on training batch is 0.0667674.
After 12001 training step(s), loss on training batch is 0.0615224.
After 13001 training step(s), loss on training batch is 0.0548805.
After 14001 training step(s), loss on training batch is 0.0576472.
After 15001 training step(s), loss on training batch is 0.0558432.
After 16001 training step(s), loss on training batch is 0.050817.
After 17001 training step(s), loss on training batch is 0.04974.
After 18001 training step(s), loss on training batch is 0.0424435.
After 19001 training step(s), loss on training batch is 0.0423194.
After 20001 training step(s), loss on training batch is 0.0413847.
After 21001 training step(s), loss on training batch is 0.0433296.
After 22001 training step(s), loss on training batch is 0.0370582.
After 23001 training step(s), loss on training batch is 0.0422068.
After 24001 training step(s), loss on training batch is 0.0377206.
After 25001 training step(s), loss on training batch is 0.0377879.
After 26001 training step(s), loss on training batch is 0.0397268.
After 27001 training step(s), loss on training batch is 0.035891.
After 28001 training step(s), loss on training batch is 0.0405907.
After 29001 training step(s), loss on training batch is 0.0337722.
[Finished in 479.9s]
''''''
'''
###########################################我的问题代码训练部分###########################################我的问题代码2
源码地址:
https://github.com/caicloud/tensorflow-tutorial/tree/master/Deep_Learning_with_TensorFlow#2018-06-25 272015 June Monday the 26 week, the 176 day SZ
手写字体识别程序文件1:
定义了神经网络的训练过程'''
import os
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data#加载mnist_inference.py中定义的常量和前向传播的函数
import mnist_inference#配置神经网络的参数
BATCH_SIZE = 100
LEARNING_RATE_BASE = 0.8
LEARNING_RATE_DECAY = 0.99
REGULARAZTION_RATE = 0.0001
TRAINING_STEPS = 30000
MOVING_AVERAGE_DECAY = 0.99
#模型保存路径和文件名
MODEL_SAVE_PATH = 'D:\\ST\\Python_work\\program\\手写识别'
MODEL_NAME = 'model.ckpt'def train(mnist):#定义输入输出placeholder.x = tf.placeholder(tf.float32, [None, mnist_inference.INPUT_NODE],name = 'x-input')y_ = tf.placeholder(tf.float32, [None, mnist_inference.OUTPUT_NODE], name = 'y-input')regularizer = tf.contrib.layers.l2_regularizer(REGULARAZTION_RATE)#直接使用mnist_inference.py中定义的前向传播过程y = mnist_inference.inference(x, regularizer)global_step = tf.Variable(0, trainable = False)#定义损失函数,学习率,滑动平均操作以及训练过程variable_averages = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY, global_step)variables_averages_op = variable_averages.apply(tf.trainable_variables())cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(y, tf.argmax(y_, 1))cross_entropy_mean = tf.reduce_mean(cross_entropy)loss = cross_entropy_mean +tf.add_n(tf.get_collection('losses'))learning_rate = tf.train.exponential_decay(LEARNING_RATE_BASE, global_step, mnist.train.num_examples / BATCH_SIZE, LEARNING_RATE_DECAY)train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss, global_step = global_step)with tf.control_dependencies([train_step, variables_averages_op]):train_op = tf.no_op(name='train')#初始化tf持久化类saver = tf.train.Saver()with tf.Session() as sess:tf.initialize_all_variables().run()tf.global_variables_initializer().run()#在训练过程中不再测试模型在验证数据上的表现,验证和测试的过程有一个独立程序完成for i in range(TRAINING_STEPS):xs, ys = mnist.train.next_batch(BATCH_SIZE)_, loss_value, step = sess.run([train_op, loss, global_step], feed_dict = {x:xs, y_:ys})#每1000轮保存一次模型if i % 1000 ==0:#输出当前损失函数print('After %d training step(s) , loss on training batch is %g'%(step, loss_value))#保存模型,文件尾部加上训练轮数saver.save(sess, os.path.join(MODEL_SAVE_PATH, MODEL_NAME), global_step = global_step)def main(argv = None):mnist = input_data.read_data_sets('D:\\ST\\Python_work\\program\\手写识别', one_hot = True)train(mnist)if __name__ == '__main__':tf.app.run()'''###########################################以下是预测部分############################################老师代码3
import time
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
import mnist_inference
import mnist_train# 加载的时间间隔。每十秒加载一次最新模型,并在测试数据上测试最新模型的正确率
EVAL_INTERVAL_SECS = 50def evaluate(mnist):with tf.Graph().as_default() as g:x = tf.placeholder(tf.float32, [None, mnist_inference.INPUT_NODE], name='x-input')y_ = tf.placeholder(tf.float32, [None, mnist_inference.OUTPUT_NODE], name='y-input')#准备验证数据集validate_feed = {x: mnist.validation.images, y_: mnist.validation.labels}#预测值,用封装好的函数来计算前向传播的结果y = mnist_inference.inference(x, None)#正确预测,使用前向传播结果计算正确率。tf.argmax(y_, 1)得到输入样例的预测类别correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))#变量重命名的方式加载模型,这样不用调用求滑动平均的函数来获取平均值了。variable_averages = tf.train.ExponentialMovingAverage(mnist_train.MOVING_AVERAGE_DECAY)variables_to_restore = variable_averages.variables_to_restore()saver = tf.train.Saver(variables_to_restore)while True:with tf.Session() as sess:#MODEL_SAVE_PATH = 'D:\\ST\\Python_work\\program\\手写识别'#checkpoint文件自动找到目录中最新模型的文件名ckpt = tf.train.get_checkpoint_state(mnist_train.MODEL_SAVE_PATH)#print(ckpt)  #None找不到该文件,我明明看到有这个文件呢。if ckpt and ckpt.model_checkpoint_path:#加载模型saver.restore(sess, ckpt.model_checkpoint_path)#通过文件名得到模型保存时迭代的轮数global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]accuracy_score = sess.run(accuracy, feed_dict=validate_feed)print("After %s training step(s), validation accuracy = %g" % (global_step, accuracy_score))else:print('No checkpoint file found')returntime.sleep(EVAL_INTERVAL_SECS)def main(argv=None):mnist = input_data.read_data_sets('D:\\ST\\Python_work\\program\\手写识别', one_hot=True)evaluate(mnist)if __name__ == '__main__':main()'''
Successfully downloaded train-labels-idx1-ubyte.gz 28881 bytes.
Extracting D:\ST\Python_work\program\手写识别\train-labels-idx1-ubyte.gz
Successfully downloaded t10k-images-idx3-ubyte.gz 1648877 bytes.
Extracting D:\ST\Python_work\program\手写识别\t10k-images-idx3-ubyte.gz
Successfully downloaded t10k-labels-idx1-ubyte.gz 4542 bytes.
Extracting D:\ST\Python_work\program\手写识别\t10k-labels-idx1-ubyte.gz
2018-06-25 19:35:27.932000: I C:\tf_jenkins\home\workspace\rel-win\M\windows\PY\35\tensorflow\core\platform\cpu_feature_guard.cc:137] Your CPU supports instructions that this TensorFlow binary was not compiled to use: AVX AVX2
No checkpoint file found
[Finished in 19.6s]
''''''
###########################################我的问题代码预测部分############################################自己代码
'''
#2018-06-25 272015 June Monday the 26 week, the 176 day SZ
手写字体识别程序文件3:
定义了前向传播的过程以及神经网络中的参数
'''
import time
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data#加载mnist_inference.py 和mnist_train.py中定义的常量和函数
import mnist_inference
import mnist_train#10秒加载一次最新的模型,并且在测试集上测试最新模型的正确率
EVAL_INTERVAL_SECS = 10def evaluate(mnist):with tf.Graph().as_default() as g:#定义输入输出格式x = tf.placeholder(tf.float32, [None, mnist_inference.INPUT_NODE], name = 'x- input')y_ = tf.placeholder(tf.float32, [None, mnist_inference.OUTPUT_NODE], name = 'y- input')validate_feed = {x:mnist.validation.images, y_:mnist.validation.labels}#直接调用其他文档的函数进行计算前向传播结果y = mnist_inference.inference(x, None)#使用前向结果计算正确率correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))#通过变量重命名的方式加载模型,这样在前向传播过程中就不要调用求滑动平均的函数来获取平均值了。这样可以完全共用#mnist_inference.py中定义的前向传播过程variable_averages = tf.train.ExponentialMovingAverage(mnist_train.MOVING_AVERAGE_DECAY)variables_to_restore = variable_averages.variables_to_restore()saver = tf.train.Saver(variables_to_restore)#每隔EVAL_INTERVAL_SECS秒调用一次计算正确率的过程来检测训练过程中正确率的变化while True:with tf.Session() as sess:#tf.train.get_checkpoint_state()会通过checkpoint文件自动找到目录中最新模型的文件名字ckpt = tf.train.get_checkpoint_state(mnist_train.MODEL_SAVE_PATH)if ckpt and ckpt.model_checkpoint_path:#加载模型saver.restore(sess, ckpt.model_checkpoint_path)#通过文件名字得到模型保存时候迭代的轮数global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]accuracy_score = sess.run(accuracy, feed_dict = validation_feed)print('After %s training steps, validation accuracy = %g'%(global_step, accuracy_score))else:print('no checkpoint file found')return time.sleep(EVAL_INTERVAL_SECS)def main(argv=None):mnist = input_data.read_data_sets('D:\\ST\\Python_work\\program\\手写识别', one_hot = True)evaluate(mnist)if __name__ == '__main__':tf.app.run()
'''

手写数字识别项目代码——卷积神经网络LeNet-5模型相关推荐

  1. 基于CNN的MINIST手写数字识别项目代码以及原理详解

    文章目录 项目简介 项目下载地址 项目开发软件环境 项目开发硬件环境 前言 一.数据加载的作用 二.Pytorch进行数据加载所需工具 2.1 Dataset 2.2 Dataloader 2.3 T ...

  2. MNIST手写数字识别 —— ResNet-经典卷积神经网络

    了解ResNet18的网络结构:掌握模型的保存和加载方法:掌握批量测试图片的方法. 结合图像分类任务,使用典型的图像分类网络ResNet18,实现手写数字识别. ResNet作为经典的图像分类网络有其 ...

  3. 手写数字识别项目介绍

     #2018-06-28 024709 June Thursday the 26 week, the 179 day SZ 这份文档主要是对项目的总结,应对面试准备的 手写数字识别项目介绍 Ten ...

  4. 深度学习--TensorFlow(项目)识别自己的手写数字(基于CNN卷积神经网络)

    目录 基础理论 一.训练CNN卷积神经网络 1.载入数据 2.改变数据维度 3.归一化 4.独热编码 5.搭建CNN卷积神经网络 5-1.第一层:第一个卷积层 5-2.第二层:第二个卷积层 5-3.扁 ...

  5. 【图像识别】基于卷积神经网络CNN手写数字识别matlab代码

    1 简介 针对传统手写数字的随机性,无规律性等问题,为了提高手写数字识别的检测准确性,本文在研究手写数字区域特点的基础上,提出了一种新的手写数字识别检测方法.首先,对采集的手写数字图像进行预处理,由于 ...

  6. 深度学习数字仪表盘识别_深度学习之手写数字识别项目(Sequential方法amp;Class方法进阶版)...

    此项目使用LeNet模型针对手写数字进行分类.项目中我们分别采用了顺序式API和子类方法两种方式构建了LeNet模型训练mnist数据集,并编写了给图识物应用程序用于手写数字识别. 一.LeNet模型 ...

  7. 学习笔记CB009:人工神经网络模型、手写数字识别、多层卷积网络、词向量、word2vec...

    人工神经网络,借鉴生物神经网络工作原理数学模型. 由n个输入特征得出与输入特征几乎相同的n个结果,训练隐藏层得到意想不到信息.信息检索领域,模型训练合理排序模型,输入特征,文档质量.文档点击历史.文档 ...

  8. 深度学习项目实战——手写数字识别项目

    摘要 本文将介绍的有关于的paddle的实战的相关的问题,并分析相关的代码的阅读和解释.并扩展有关于的python的有关的语言.介绍了深度学习步骤: 1. 数据处理:读取数据 和 预处理操作 2. 模 ...

  9. python手写数字识别实验报告_python神经网络编程实现手写数字识别

    本文实例为大家分享了python实现手写数字识别的具体代码,供大家参考,具体内容如下 import numpy import scipy.special #import matplotlib.pypl ...

最新文章

  1. docker 简单入门(一)
  2. Stream Processing: S4系统模型分析和关键源码读解
  3. Andy's First Dictionary
  4. 带你初窥谷歌TV的硬软之秘
  5. SVN使用方法及问题解决
  6. oracle 查询脚本分析,一个oracle数据查询脚本
  7. linux下repair filesystem模式修复方法
  8. python获取快手无水印demo
  9. 如何在电脑表格中用计算机,如何制表(如何使用电脑制作表格)
  10. rpg服务器修改数据,ATOM RPG 修改数据方法 怎么修改游戏数据-游侠网
  11. mysql取值范围1-10_mysql各种数据类型取值范围
  12. Tomcat 配置文件详解
  13. 微信公众号网页分享设置及问题
  14. 语音识别语言模型介绍
  15. linux下的超级服务器inetd
  16. (干货)备战2021年软考中级网络工程师-03-计算机系统开发基础
  17. 关闭虚拟机linux的防火墙,Linux新建虚拟机网络配置,防火墙关闭
  18. idea解决Command line is too long. Shorten command line for ServiceStarter or also for Application报错
  19. Ubuntu16.04 + Geforce GT630 OEM安装cuda 8.0
  20. switch完成输出星期一到七

热门文章

  1. 《Reids 设计与实现》第十章 客户端
  2. 【LeetCode】剑指 Offer 13. 机器人的运动范围
  3. 【重难点】【Java基础 07】变量类型、内部类、处理器指令优化
  4. 力扣645.错误的集合
  5. 同一台主机部署两个比特币钱包以及rpc服务的摘要
  6. P1959 遗址_NOI导刊2009普及(6)
  7. 关于跨域问题的解决办法
  8. [python]网络编程基础学习笔记(一)客户/服务器网络介绍
  9. (转) 淘淘商城系列——搜索服务搭建
  10. 将request.getParameterMap()转换成可操作的普通Map