import os

import numpy as np

import matplotlib.pyplot as plt

from PIL import Image, ImageChops

from skimage import color,data,transform,io

#获取所有数据文件夹名称

fileList = os.listdir("F:\\data\\flowers")

trainDataList = []

trianLabel = []

testDataList = []

testLabel = []

for j in range(len(fileList)):

data = os.listdir("F:\\data\\flowers\\"+fileList[j])

testNum = int(len(data)*0.25)

while(testNum>0):

np.random.shuffle(data)

testNum -= 1

trainData = np.array(data[:-(int(len(data)*0.25))])

testData = np.array(data[-(int(len(data)*0.25)):])

for i in range(len(trainData)):

if(trainData[i][-3:]=="jpg"):

image = io.imread("F:\\data\\flowers\\"+fileList[j]+"\\"+trainData[i])

image=transform.resize(image,(64,64))

trainDataList.append(image)

trianLabel.append(int(j))

angle = np.random.randint(-90,90)

image =transform.rotate(image, angle)

image=transform.resize(image,(64,64))

trainDataList.append(image)

trianLabel.append(int(j))

for i in range(len(testData)):

if(testData[i][-3:]=="jpg"):

image = io.imread("F:\\data\\flowers\\"+fileList[j]+"\\"+testData[i])

image=transform.resize(image,(64,64))

testDataList.append(image)

testLabel.append(int(j))

print("图片数据读取完了...")

print(np.shape(trainDataList))

print(np.shape(trianLabel))

print(np.shape(testDataList))

print(np.shape(testLabel))

print("正在写磁盘...")

np.save("G:\\trainDataList",trainDataList)

np.save("G:\\trianLabel",trianLabel)

np.save("G:\\testDataList",testDataList)

np.save("G:\\testLabel",testLabel)

print("数据处理完了...")

import numpy as np

from keras.utils import to_categorical

trainLabel = np.load("G:\\trianLabel.npy")

testLabel = np.load("G:\\testLabel.npy")

trainLabel_encoded = to_categorical(trainLabel)

testLabel_encoded = to_categorical(testLabel)

np.save("G:\\trianLabel",trainLabel_encoded)

np.save("G:\\testLabel",testLabel_encoded)

print("转码类别写盘完了...")

import random

import numpy as np

trainDataList = np.load("G:\\trainDataList.npy")

trianLabel = np.load("G:\\trianLabel.npy")

print("数据加载完了...")

trainIndex = [i for i in range(len(trianLabel))]

random.shuffle(trainIndex)

trainData = []

trainClass = []

for i in range(len(trainIndex)):

trainData.append(trainDataList[trainIndex[i]])

trainClass.append(trianLabel[trainIndex[i]])

print("训练数据shuffle完了...")

np.save("G:\\trainDataList",trainData)

np.save("G:\\trianLabel",trainClass)

print("训练数据写盘完毕...")

import random

import numpy as np

testDataList = np.load("G:\\testDataList.npy")

testLabel = np.load("G:\\testLabel.npy")

testIndex = [i for i in range(len(testLabel))]

random.shuffle(testIndex)

testData = []

testClass = []

for i in range(len(testIndex)):

testData.append(testDataList[testIndex[i]])

testClass.append(testLabel[testIndex[i]])

print("测试数据shuffle完了...")

np.save("G:\\testDataList",testData)

np.save("G:\\testLabel",testClass)

print("测试数据写盘完毕...")

# coding: utf-8

import tensorflow as tf

from random import shuffle

INPUT_NODE = 64*64

OUT_NODE = 5

IMAGE_SIZE = 64

NUM_CHANNELS = 3

NUM_LABELS = 5

#第一层卷积层的尺寸和深度

CONV1_DEEP = 16

CONV1_SIZE = 5

#第二层卷积层的尺寸和深度

CONV2_DEEP = 32

CONV2_SIZE = 5

#全连接层的节点数

FC_SIZE = 512

def inference(input_tensor, train, regularizer):

#卷积

with tf.variable_scope('layer1-conv1'):

conv1_weights = tf.Variable(tf.random_normal([CONV1_SIZE,CONV1_SIZE,NUM_CHANNELS,CONV1_DEEP],stddev=0.1),name='weight')

tf.summary.histogram('convLayer1/weights1', conv1_weights)

conv1_biases = tf.Variable(tf.Variable(tf.random_normal([CONV1_DEEP])),name="bias")

tf.summary.histogram('convLayer1/bias1', conv1_biases)

conv1 = tf.nn.conv2d(input_tensor,conv1_weights,strides=[1,1,1,1],padding='SAME')

tf.summary.histogram('convLayer1/conv1', conv1)

relu1 = tf.nn.relu(tf.nn.bias_add(conv1,conv1_biases))

tf.summary.histogram('ConvLayer1/relu1', relu1)

#池化

with tf.variable_scope('layer2-pool1'):

pool1 = tf.nn.max_pool(relu1,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME')

tf.summary.histogram('ConvLayer1/pool1', pool1)

#卷积

with tf.variable_scope('layer3-conv2'):

conv2_weights = tf.Variable(tf.random_normal([CONV2_SIZE,CONV2_SIZE,CONV1_DEEP,CONV2_DEEP],stddev=0.1),name='weight')

tf.summary.histogram('convLayer2/weights2', conv2_weights)

conv2_biases = tf.Variable(tf.random_normal([CONV2_DEEP]),name="bias")

tf.summary.histogram('convLayer2/bias2', conv2_biases)

#卷积向前学习

conv2 = tf.nn.conv2d(pool1,conv2_weights,strides=[1,1,1,1],padding='SAME')

tf.summary.histogram('convLayer2/conv2', conv2)

relu2 = tf.nn.relu(tf.nn.bias_add(conv2,conv2_biases))

tf.summary.histogram('ConvLayer2/relu2', relu2)

#池化

with tf.variable_scope('layer4-pool2'):

pool2 = tf.nn.max_pool(relu2,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME')

tf.summary.histogram('ConvLayer2/pool2', pool2)

#变型

pool_shape = pool2.get_shape().as_list()

#计算最后一次池化后对象的体积(数据个数\节点数\像素个数)

nodes = pool_shape[1]*pool_shape[2]*pool_shape[3]

#根据上面的nodes再次把最后池化的结果pool2变为batch行nodes列的数据

reshaped = tf.reshape(pool2,[-1,nodes])

#全连接层

with tf.variable_scope('layer5-fc1'):

fc1_weights = tf.Variable(tf.random_normal([nodes,FC_SIZE],stddev=0.1),name='weight')

if(regularizer != None):

tf.add_to_collection('losses',tf.contrib.layers.l2_regularizer(0.03)(fc1_weights))

fc1_biases = tf.Variable(tf.random_normal([FC_SIZE]),name="bias")

#预测

fc1 = tf.nn.relu(tf.matmul(reshaped,fc1_weights)+fc1_biases)

if(train):

fc1 = tf.nn.dropout(fc1,0.5)

#全连接层

with tf.variable_scope('layer6-fc2'):

fc2_weights = tf.Variable(tf.random_normal([FC_SIZE,64],stddev=0.1),name="weight")

if(regularizer != None):

tf.add_to_collection('losses',tf.contrib.layers.l2_regularizer(0.03)(fc2_weights))

fc2_biases = tf.Variable(tf.random_normal([64]),name="bias")

#预测

fc2 = tf.nn.relu(tf.matmul(fc1,fc2_weights)+fc2_biases)

if(train):

fc2 = tf.nn.dropout(fc2,0.5)

#全连接层

with tf.variable_scope('layer7-fc3'):

fc3_weights = tf.Variable(tf.random_normal([64,NUM_LABELS],stddev=0.1),name="weight")

if(regularizer != None):

tf.add_to_collection('losses',tf.contrib.layers.l2_regularizer(0.03)(fc3_weights))

fc3_biases = tf.Variable(tf.random_normal([NUM_LABELS]),name="bias")

#预测

logit = tf.matmul(fc2,fc3_weights)+fc3_biases

return logit

import time

import keras

import numpy as np

from keras.utils import np_utils

X = np.load("G:\\trainDataList.npy")

Y = np.load("G:\\trianLabel.npy")

print(np.shape(X))

print(np.shape(Y))

print(np.shape(testData))

print(np.shape(testLabel))

batch_size = 10

n_classes=5

epochs=16#循环次数

learning_rate=1e-4

batch_num=int(np.shape(X)[0]/batch_size)

dropout=0.75

x=tf.placeholder(tf.float32,[None,64,64,3])

y=tf.placeholder(tf.float32,[None,n_classes])

# keep_prob = tf.placeholder(tf.float32)

#加载测试数据集

test_X = np.load("G:\\testDataList.npy")

test_Y = np.load("G:\\testLabel.npy")

back = 64

ro = int(len(test_X)/back)

#调用神经网络方法

pred=inference(x,1,"regularizer")

cost=tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred,labels=y))

# 三种优化方法选择一个就可以

optimizer=tf.train.AdamOptimizer(1e-4).minimize(cost)

# train_step = tf.train.GradientDescentOptimizer(0.001).minimize(cost)

# train_step = tf.train.MomentumOptimizer(0.001,0.9).minimize(cost)

#将预测label与真实比较

correct_pred=tf.equal(tf.argmax(pred,1),tf.argmax(y,1))

#计算准确率

accuracy=tf.reduce_mean(tf.cast(correct_pred,tf.float32))

merged=tf.summary.merge_all()

#将tensorflow变量实例化

init=tf.global_variables_initializer()

start_time = time.time()

with tf.Session() as sess:

sess.run(init)

#保存tensorflow参数可视化文件

writer=tf.summary.FileWriter('F:/Flower_graph', sess.graph)

for i in range(epochs):

for j in range(batch_num):

offset = (j * batch_size) % (Y.shape[0] - batch_size)

# 准备数据

batch_data = X[offset:(offset + batch_size), :]

batch_labels = Y[offset:(offset + batch_size), :]

sess.run(optimizer, feed_dict={x:batch_data,y:batch_labels})

result=sess.run(merged, feed_dict={x:batch_data,y:batch_labels})

writer.add_summary(result, i)

loss,acc = sess.run([cost,accuracy],feed_dict={x:batch_data,y:batch_labels})

print("Epoch:", '%04d' % (i+1),"cost=", "{:.9f}".format(loss),"Training accuracy","{:.5f}".format(acc*100))

writer.close()

print("########################训练结束,下面开始测试###################")

for i in range(ro):

s = i*back

e = s+back

test_accuracy = sess.run(accuracy,feed_dict={x:test_X[s:e],y:test_Y[s:e]})

print("step:%d test accuracy = %.4f%%" % (i,test_accuracy*100))

print("Final test accuracy = %.4f%%" % (test_accuracy*100))

end_time = time.time()

print('Times:',(end_time-start_time))

print('Optimization Completed')

........................................

import os

import numpy as np

from scipy import ndimage

from skimage import color,data,transform,io

move=np.arange(-3,3,1)

moveIndex = np.random.randint(len(move))

lightStrong=np.arange(0.01,3,0.1)

lightStrongIndex = np.random.randint(len(lightStrong))

moveImage=ndimage.shift(transImageGray,move[moveIndex],cval=lightStrong[lightStrongIndex])

moveImage[moveImage>1.0]=1.0

from numpy import array

from numpy import argmax

from keras.utils import to_categorical

from sklearn.preprocessing import LabelEncoder

#对数值

data=[1, 3, 2, 0, 3, 2, 2, 1, 0, 1]

data=array(data)

print(data)

encoded = to_categorical(data)

print(encoded)

inverted = argmax(encoded[0])

print(inverted)

import numpy as np

from numpy import argmax

data = 'hello world'

print(len(data))

alphabet = 'abcdefghijklmnopqrstuvwxyz '

char_to_int = dict((c, i) for i, c in enumerate(alphabet))

print(char_to_int)

int_to_char = dict((i, c) for i, c in enumerate(alphabet))

print(int_to_char)

integer_encoded = [char_to_int[char] for char in data]

print(integer_encoded)

onehot_encoded = list()

for value in integer_encoded:

letter = [0 for _ in range(len(alphabet))]

letter[value] = 1

onehot_encoded.append(letter)

print(np.shape(onehot_encoded))

print(onehot_encoded)

inverted = int_to_char[argmax(onehot_encoded[0])]

print(inverted)

from numpy import array

from numpy import argmax

from sklearn.preprocessing import LabelEncoder

from sklearn.preprocessing import OneHotEncoder

data = ['cold', 'cold', 'warm', 'cold', 'hot', 'hot', 'warm', 'cold', 'warm', 'hot']

values = array(data)

print(values)

label_encoder = LabelEncoder()

integer_encoded = label_encoder.fit_transform(values)

print(integer_encoded)

integer_encoded = integer_encoded.reshape(len(integer_encoded), 1)

print(integer_encoded)

onehot_encoder = OneHotEncoder(sparse=False)

onehot_encoded = onehot_encoder.fit_transform(integer_encoded)

print(onehot_encoded)

inverted = label_encoder.inverse_transform([argmax(onehot_encoded[0, :])])

print(inverted)

from numpy import array

from numpy import argmax

from keras.utils import to_categorical

data = ['cold', 'cold', 'warm', 'cold', 'hot', 'hot', 'warm', 'cold', 'warm', 'hot']

values = array(data)

print(values)

label_encoder = LabelEncoder()

integer_encoded = label_encoder.fit_transform(values)

print(integer_encoded)

##对数值

#data=[1, 3, 2, 0, 3, 2, 2, 1, 0, 1]

#data=array(data)

#print(data)

# one hot encode

encoded = to_categorical(integer_encoded)

print(encoded)

inverted = argmax(encoded[0])

print(inverted)

import os

import numpy as np

import matplotlib.pyplot as plt

from scipy import ndimage

from skimage import color,data,transform,io

image = data.imread("F:\\data\\flowers\\daisy\\5547758_eea9edfd54_n.jpg")

io.imshow(image)

plt.show()

x = np.random.randint(-100,100)

print(x)

y = np.random.randint(-100,100)

print(y)

moveImage=ndimage.shift(image,(x,y,0),cval=0.5)

io.imshow(moveImage)

plt.show()

python识别花草_吴裕雄 python神经网络 花朵图片识别(9)相关推荐

  1. python识别花草_吴裕雄 python神经网络 花朵图片识别(10)

    import os import numpy as np import matplotlib.pyplot as plt from PIL import Image, ImageChops from ...

  2. python医疗系统设计_吴裕雄 python 人工智能——智能医疗系统后台用户复诊模块简约版代码展示...

    #复诊 importsysimportosimporttimeimportoperatorimportcx_Oracleimportnumpy as npimportpandas as pdimpor ...

  3. python识别花草_荐 【python】TensorFlow框架下CNN神经网络的花卉识别系统

    from skimage import io,transform import glob import os import tensorflow as tf import numpy as np im ...

  4. python预测疾病_吴裕雄--天生自然python机器学习:使用Logistic回归从疝气病症预测病马的死亡率...

    ,除了部分指标主观和难以测量外,该数据还存在一个问题,数据集中有 30%的值是缺失的.下面将首先介绍如何处理数据集中的数据缺失问题,然 后 再 利 用 Logistic回 归 和随机梯度上升算法来预测 ...

  5. cnn图像二分类 python_人工智能Keras图像分类器(CNN卷积神经网络的图片识别篇)...

    上期文章我们分享了人工智能Keras图像分类器(CNN卷积神经网络的图片识别的训练模型),本期我们使用预训练模型对图片进行识别:Keras CNN卷积神经网络模型训练 导入第三方库 from kera ...

  6. mysql 查询 系统字段 自然日_吴裕雄--天生自然python数据清洗与数据可视化:MYSQL、MongoDB数据库连接与查询、爬取天猫连衣裙数据保存到MongoDB...

    本博文使用的数据库是MySQL和MongoDB数据库.安装MySQL可以参照我的这篇博文:https://www.cnblogs.com/tszr/p/12112777.html 其中操作Mysql使 ...

  7. python人工智能图像识别_人工智能之Python人脸识别技术,人人都能做识别!

    原标题:人工智能之Python人脸识别技术,人人都能做识别! 作者丨Python小哥哥 https://www.jianshu.com/p/dce1498ef0ee 一.环境搭建 1.系统环境 Ubu ...

  8. 网易图灵学院python公开课_图灵学院 Python全系列教程全栈工程师 python视频教程下载...

    大家怎么说? 老师很好,我认为,若想学好python,应该多练.多想.多看.学习资料不能仅限于老师给定的这些内容,这些毕竟是入门资料 老师讲的真不错,对于我们这种小白来说 也比较容易懂,虽然有些时候自 ...

  9. 文科生自学python要多久_怎么自学python,大概要多久?

    都让开!本人文科生,自学Python 2年半,作为一个曾经完全0基础,啥都不懂纯靠自学学会python的文科生,有一些不成熟的小建议可以分享一下. 首先不要觉着编程难,只要你认识26个英文字母,有一点 ...

最新文章

  1. leetcode 91. Decode Ways | 91. 解码方法(动态规划)
  2. 整整240套Axure原型设计元件库 组件库 控件库分享
  3. 【PCB设计工具】在线 mil到mm单位转换、mm到mils换算
  4. 最新最全的微信小程序入门学习教程,微信小程序零基础入门到精通
  5. 让手机可以边打电话边上网
  6. 63%的农村孩子没上高中,中国如何跨越中等收入陷阱?
  7. YOLOv5基础知识点——卷积神经网络
  8. 未来的量子计算机算圆周率吗,圆周率的诡异现象,圆周率算尽了会怎么样
  9. bzoj1375 双调路径
  10. 【机器学习入门】决策树算法(四):CART算法(Classification and Regression Tree)
  11. javascript中in用法介绍
  12. 【数据处理】格式化数据
  13. 地理坐标系介绍:国家2000、西安80、WGS84、火星GCJ02、百度BD09
  14. 投屏软件 支持android 4,投屏神器app手机版-投屏神器下载 2.3.4 安卓版 - 河东软件园...
  15. Adobe DPS Storefront开发
  16. pe盘启动MySQL_【转】老毛桃WINPE U盘版 制作全攻略
  17. 维生素B3的全球与中国市场2022-2028年:技术、参与者、趋势、市场规模及占有率研究报告
  18. 聊聊如何度过寒冬(个人篇)
  19. android资料转移到iphone,怎么将安卓手机数据资料转到iPhone上
  20. micropython是什么意思_MicroPython能做什么

热门文章

  1. WAIC 2021 | 第四范式戴文渊:AI决策助力金融转型走向质变
  2. C# 委托与事件应用,不同窗体之间通信
  3. 如何获取RGB图像的单色图像
  4. 高级搜索题集(夏天的风分类)
  5. Oracle 时间处理
  6. C/C++课程设计题目[2023-03-06]
  7. 旱地粮油作物如何防灾减灾
  8. 万泽云库平台怎么样,靠谱么
  9. loading linux img2a,科学网—GAMIT学习中遇到的问题 - 陈超的博文
  10. 远距离遥控智能驱鸟器