python识别花草_荐 【python】TensorFlow框架下CNN神经网络的花卉识别系统
from skimage import io,transform
import glob
import os
import tensorflow as tf
import numpy as np
import time
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
#读取花朵图片
def read_img(path):
cate=[path+x for x in os.listdir(path) if os.path.isdir(path+x)]
imgs=[]
labels=[]
for idx,folder in enumerate(cate):
print('reading the dirs :%s' % (folder))
for im in glob.glob(folder+'/*.jpg'):
img=io.imread(im)
img=transform.resize(img,(w,h))
imgs.append(img)
labels.append(idx)
return np.asarray(imgs,np.float32),np.asarray(labels,np.int32)
def inference(input_tensor, train, regularizer):
with tf.variable_scope('layer1-conv1'):
conv1_weights = tf.get_variable("weight",[5,5,3,32],initializer=tf.truncated_normal_initializer(stddev=0.1))
conv1_biases = tf.get_variable("bias", [32], initializer=tf.constant_initializer(0.0))
conv1 = tf.nn.conv2d(input_tensor, conv1_weights, strides=[1, 1, 1, 1], padding='SAME')
relu1 = tf.nn.relu(tf.nn.bias_add(conv1, conv1_biases))
with tf.name_scope("layer2-pool1"):
pool1 = tf.nn.max_pool(relu1, ksize = [1,2,2,1],strides=[1,2,2,1],padding="VALID")
with tf.variable_scope("layer3-conv2"):
conv2_weights = tf.get_variable("weight",[5,5,32,64],initializer=tf.truncated_normal_initializer(stddev=0.1))
conv2_biases = tf.get_variable("bias", [64], initializer=tf.constant_initializer(0.0))
conv2 = tf.nn.conv2d(pool1, conv2_weights, strides=[1, 1, 1, 1], padding='SAME')
relu2 = tf.nn.relu(tf.nn.bias_add(conv2, conv2_biases))
with tf.name_scope("layer4-pool2"):
pool2 = tf.nn.max_pool(relu2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID')
with tf.variable_scope("layer5-conv3"):
conv3_weights = tf.get_variable("weight",[3,3,64,128],initializer=tf.truncated_normal_initializer(stddev=0.1))
conv3_biases = tf.get_variable("bias", [128], initializer=tf.constant_initializer(0.0))
conv3 = tf.nn.conv2d(pool2, conv3_weights, strides=[1, 1, 1, 1], padding='SAME')
relu3 = tf.nn.relu(tf.nn.bias_add(conv3, conv3_biases))
with tf.name_scope("layer6-pool3"):
pool3 = tf.nn.max_pool(relu3, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID')
with tf.variable_scope("layer7-conv4"):
conv4_weights = tf.get_variable("weight",[3,3,128,128],initializer=tf.truncated_normal_initializer(stddev=0.1))
conv4_biases = tf.get_variable("bias", [128], initializer=tf.constant_initializer(0.0))
conv4 = tf.nn.conv2d(pool3, conv4_weights, strides=[1, 1, 1, 1], padding='SAME')
relu4 = tf.nn.relu(tf.nn.bias_add(conv4, conv4_biases))
with tf.name_scope("layer8-pool4"):
pool4 = tf.nn.max_pool(relu4, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID')
nodes = 6*6*128
reshaped = tf.reshape(pool4,[-1,nodes])
with tf.variable_scope('layer9-fc1'):
fc1_weights = tf.get_variable("weight", [nodes, 1024],
initializer=tf.truncated_normal_initializer(stddev=0.1))
if regularizer != None: tf.add_to_collection('losses', regularizer(fc1_weights))
fc1_biases = tf.get_variable("bias", [1024], initializer=tf.constant_initializer(0.1))
fc1 = tf.nn.relu(tf.matmul(reshaped, fc1_weights) + fc1_biases)
if train: fc1 = tf.nn.dropout(fc1, 0.5)
with tf.variable_scope('layer10-fc2'):
fc2_weights = tf.get_variable("weight", [1024, 512],
initializer=tf.truncated_normal_initializer(stddev=0.1))
if regularizer != None: tf.add_to_collection('losses', regularizer(fc2_weights))
fc2_biases = tf.get_variable("bias", [512], initializer=tf.constant_initializer(0.1))
fc2 = tf.nn.relu(tf.matmul(fc1, fc2_weights) + fc2_biases)
if train: fc2 = tf.nn.dropout(fc2, 0.5)
with tf.variable_scope('layer11-fc3'):
fc3_weights = tf.get_variable("weight", [512, 5],
initializer=tf.truncated_normal_initializer(stddev=0.1))
if regularizer != None: tf.add_to_collection('losses', regularizer(fc3_weights))
fc3_biases = tf.get_variable("bias", [5], initializer=tf.constant_initializer(0.1))
logit = tf.matmul(fc2, fc3_weights) + fc3_biases
return logit
def minibatches(inputs=None, targets=None, batch_size=None, shuffle=False):
assert len(inputs) == len(targets)
if shuffle:
indices = np.arange(len(inputs))
np.random.shuffle(indices)
for start_idx in range(0, len(inputs) - batch_size + 1, batch_size):
if shuffle:
excerpt = indices[start_idx:start_idx + batch_size]
else:
excerpt = slice(start_idx, start_idx + batch_size)
yield inputs[excerpt], targets[excerpt]
def semantic_alignment(src_feature, tgt_feature, src_label, tgt_label, num_classes=2):
''' input: src_feature: feature from source domain tgt_feature: feature from target somain src_label: source label(one-hot encoding) tgt_label: target label(one-hot encoding) num_classes : the number of class(e.g., 2) output: semantic_loss : the semantic loss between domains. '''
source_result = tf.argmax(src_label, 1) # source label
target_result = tf.argmax(tgt_label, 1) # target label
ones = tf.ones_like(src_feature)#得到一个与源域数据格式一致的全1的张量
print('ones',ones.shape)
print('source_result', source_result.shape)
print('target_result', target_result.shape)
current_source_count = tf.unsorted_segment_sum(ones, source_result, num_classes)#计算出当前源域数据
current_target_count = tf.unsorted_segment_sum(ones, target_result, num_classes)#计算出当前目标域数据
current_positive_source_count = tf.maximum(current_source_count, tf.ones_like(current_source_count))#返回当前源域数据与之间的最大值
current_positive_target_count = tf.maximum(current_target_count, tf.ones_like(current_target_count))#返回当前目标域数据与之间的最大值
current_source_centroid = tf.divide(tf.unsorted_segment_sum(data=src_feature, segment_ids= \
source_result, num_segments=num_classes), current_positive_source_count)
current_target_centroid = tf.divide(tf.unsorted_segment_sum(data=tgt_feature, segment_ids= \
target_result, num_segments=num_classes), current_positive_target_count)
semantic_loss = tf.reduce_mean((tf.square(current_source_centroid - current_target_centroid)))
return semantic_loss
if __name__ == '__main__':
# 数据集地址
path = 'D:/python/workspace/flower/data/flowers/'
# 模型保存地址
model_path = 'D:/python/workspace/flower/model/model.ckpt'
#测试集地址
path1= 'D:/python/workspace/flower/test1/'
# 将所有的图片resize成100*100
w = 100
h = 100
c = 3
data, label = read_img(path)
newlabel=[]
# 打乱顺序
num_example = data.shape[0]
arr = np.arange(num_example)
np.random.shuffle(arr)
data = data[arr]
label = label[arr]
# 将所有数据分为训练集和验证集
ratio = 0.8
s = np.int(num_example * ratio)
x_train = data[:s]
y_train = label[:s]
x_val = data[s:]
y_val = label[s:]
# -----------------构建网络----------------------
# 占位符
x = tf.placeholder(tf.float32, shape=[None, w, h, c], name='x')
y_ = tf.placeholder(tf.int32, shape=[None, ], name='y_')
def inference(input_tensor, train, regularizer):
with tf.variable_scope('layer1-conv1'):
conv1_weights = tf.get_variable("weight", [5, 5, 3, 32],
initializer=tf.truncated_normal_initializer(stddev=0.1))
conv1_biases = tf.get_variable("bias", [32], initializer=tf.constant_initializer(0.0))
conv1 = tf.nn.conv2d(input_tensor, conv1_weights, strides=[1, 1, 1, 1], padding='SAME')
relu1 = tf.nn.relu(tf.nn.bias_add(conv1, conv1_biases))
with tf.name_scope("layer2-pool1"):
pool1 = tf.nn.max_pool(relu1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding="VALID")
with tf.variable_scope("layer3-conv2"):
conv2_weights = tf.get_variable("weight", [5, 5, 32, 64],
initializer=tf.truncated_normal_initializer(stddev=0.1))
conv2_biases = tf.get_variable("bias", [64], initializer=tf.constant_initializer(0.0))
conv2 = tf.nn.conv2d(pool1, conv2_weights, strides=[1, 1, 1, 1], padding='SAME')
relu2 = tf.nn.relu(tf.nn.bias_add(conv2, conv2_biases))
with tf.name_scope("layer4-pool2"):
pool2 = tf.nn.max_pool(relu2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID')
with tf.variable_scope("layer5-conv3"):
conv3_weights = tf.get_variable("weight", [3, 3, 64, 128],
initializer=tf.truncated_normal_initializer(stddev=0.1))
conv3_biases = tf.get_variable("bias", [128], initializer=tf.constant_initializer(0.0))
conv3 = tf.nn.conv2d(pool2, conv3_weights, strides=[1, 1, 1, 1], padding='SAME')
relu3 = tf.nn.relu(tf.nn.bias_add(conv3, conv3_biases))
with tf.name_scope("layer6-pool3"):
pool3 = tf.nn.max_pool(relu3, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID')
with tf.variable_scope("layer7-conv4"):
conv4_weights = tf.get_variable("weight", [3, 3, 128, 128],
initializer=tf.truncated_normal_initializer(stddev=0.1))
conv4_biases = tf.get_variable("bias", [128], initializer=tf.constant_initializer(0.0))
conv4 = tf.nn.conv2d(pool3, conv4_weights, strides=[1, 1, 1, 1], padding='SAME')
relu4 = tf.nn.relu(tf.nn.bias_add(conv4, conv4_biases))
with tf.name_scope("layer8-pool4"):
pool4 = tf.nn.max_pool(relu4, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID')
nodes = 6 * 6 * 128
reshaped = tf.reshape(pool4, [-1, nodes])
with tf.variable_scope('layer9-fc1'):
fc1_weights = tf.get_variable("weight", [nodes, 1024],
initializer=tf.truncated_normal_initializer(stddev=0.1))
if regularizer != None: tf.add_to_collection('losses', regularizer(fc1_weights))
fc1_biases = tf.get_variable("bias", [1024], initializer=tf.constant_initializer(0.1))
fc1 = tf.nn.relu(tf.matmul(reshaped, fc1_weights) + fc1_biases)
if train: fc1 = tf.nn.dropout(fc1, 0.5)
with tf.variable_scope('layer10-fc2'):
fc2_weights = tf.get_variable("weight", [1024, 512],
initializer=tf.truncated_normal_initializer(stddev=0.1))
if regularizer != None: tf.add_to_collection('losses', regularizer(fc2_weights))
fc2_biases = tf.get_variable("bias", [512], initializer=tf.constant_initializer(0.1))
fc2 = tf.nn.relu(tf.matmul(fc1, fc2_weights) + fc2_biases)
if train: fc2 = tf.nn.dropout(fc2, 0.5)
with tf.variable_scope('layer11-fc3'):
fc3_weights = tf.get_variable("weight", [512, 5],
initializer=tf.truncated_normal_initializer(stddev=0.1))
if regularizer != None: tf.add_to_collection('losses', regularizer(fc3_weights))
fc3_biases = tf.get_variable("bias", [5], initializer=tf.constant_initializer(0.1))
logit = tf.matmul(fc2, fc3_weights) + fc3_biases
return logit
# ---------------------------网络结束---------------------------
regularizer = tf.contrib.layers.l2_regularizer(0.0001)
logits = inference(x, False, regularizer)
# (小处理)将logits乘以1赋值给logits_eval,定义name,方便在后续调用模型时通过tensor名字调用输出tensor
b = tf.constant(value=1, dtype=tf.float32)
logits_eval = tf.multiply(logits, b, name='logits_eval')
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=y_)
train_op = tf.train.AdamOptimizer(learning_rate=0.001).minimize(loss)
correct_prediction = tf.equal(tf.cast(tf.argmax(logits, 1), tf.int32), y_)
acc = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# 定义一个函数,按批次取数据
def minibatches(inputs=None, targets=None, batch_size=None, shuffle=False):
assert len(inputs) == len(targets)
if shuffle:
indices = np.arange(len(inputs))
np.random.shuffle(indices)
for start_idx in range(0, len(inputs) - batch_size + 1, batch_size):
if shuffle:
excerpt = indices[start_idx:start_idx + batch_size]
else:
excerpt = slice(start_idx, start_idx + batch_size)
yield inputs[excerpt], targets[excerpt]
# 训练和测试数据,可将n_epoch设置更大一些
n_epoch = 10
batch_size = 64
saver = tf.train.Saver()
sess = tf.Session()
sess.run(tf.global_variables_initializer())
for epoch in range(n_epoch):
start_time = time.time()
# training
train_loss, train_acc, n_batch = 0, 0, 0
for x_train_a, y_train_a in minibatches(x_train, y_train, batch_size, shuffle=True):
_, err, ac = sess.run([train_op, loss, acc], feed_dict={x: x_train_a, y_: y_train_a})
train_loss += err;
train_acc += ac;
n_batch += 1
print("%d epoch" % epoch)
print(" train loss: %f" % (np.sum(train_loss) / n_batch))
print(" train acc: %f" % (np.sum(train_acc) / n_batch))
# validation
val_loss, val_acc, n_batch = 0, 0, 0
for x_val_a, y_val_a in minibatches(x_val, y_val, batch_size, shuffle=False):
err, ac = sess.run([loss, acc], feed_dict={x: x_val_a, y_: y_val_a})
val_loss += err;
val_acc += ac;
n_batch += 1
print(" validation loss: %f" % (np.sum(val_loss) / n_batch))
print(" validation acc: %f" % (np.sum(val_acc) / n_batch))
print("============================================================ ")
saver.save(sess, model_path)
sess.close()
python识别花草_荐 【python】TensorFlow框架下CNN神经网络的花卉识别系统相关推荐
- 【python】TensorFlow框架下CNN神经网络的花卉识别系统
提前说明一下,本文的CNN神经网络模型是参考网上诸多相关CNN图像分类大牛的博客修改的,在模型的基础上,用python的Flask框架搭载了一个web页面用来可视化展示. 第一步,爬取图片数据集 用p ...
- python百度识别花草_用python代码实现调用百度的免费植物识别接口
# coding=utf-8 import requests import base64 import unittest import time class WordScanTest(unittest ...
- CV之IC之AlexNet:基于tensorflow框架采用CNN卷积神经网络算法(改进的AlexNet,训练/评估/推理)实现猫狗分类识别案例应用
CV之IC之AlexNet:基于tensorflow框架采用CNN卷积神经网络算法(改进的AlexNet,训练/评估/推理)实现猫狗分类识别案例应用 目录 基于tensorflow框架采用CNN(改进 ...
- python 时间序列预测_使用Python进行动手时间序列预测
python 时间序列预测 Time series analysis is the endeavor of extracting meaningful summary and statistical ...
- CV之CNN:基于tensorflow框架采用CNN(改进的AlexNet,训练/评估/推理)卷积神经网络算法实现猫狗图像分类识别
CV之CNN:基于tensorflow框架采用CNN(改进的AlexNet,训练/评估/推理)卷积神经网络算法实现猫狗图像分类识别 目录 基于tensorflow框架采用CNN(改进的AlexNet, ...
- python 概率分布模型_使用python的概率模型进行公司估值
python 概率分布模型 Note from Towards Data Science's editors: While we allow independent authors to publis ...
- Python 暴力绘制RGB三色图 Pygame框架下实例
Python 暴力绘制RGB三色图 Pygame框架下 昨天水群,有个群友想要绘制一个三色图,如下 对于Pygame或者其他的GUI框架,画面显示都是依靠图像的一层层覆盖来实现刷新的.所以当绿色的圆覆 ...
- ResNet在分别在Keras和tensorflow框架下的应用案例
一.残差神经网络--ResNet的综述 深度学习网络的深度对最后的分类和识别的效果有着很大的影响,所以正常想法就是能把网络设计的越深越好, 但是事实上却不是这样,常规的网络的堆叠(plain netw ...
- tensorflow2caffe(3) : 如何将tensorflow框架下训练得到的权重转化为caffe框架下的权重参数
版权声明:本文为博主原创文章,转载时请附加博文链接. https://blog.csdn.net/jiongnima/article/details/78382972 在前两期专栏tensorflow ...
最新文章
- linux驱动:音频驱动(六)ASoc之codec设备
- zabbix Server 4.0 监控TCP的12种状态
- 利用ctrl+z停掉任务再kill的暴力方法关闭ctrl+c ctrl+v都关不了的任务
- (组合数求模=乘法逆元+快速幂) Problem Makes Problem
- FFmpeg Android 学习(一):Android 如何调用 FFMPEG 编辑音视频
- Apache Flink 误用之痛
- Visual Studio 开源控件扩展 NuGet 常用组件安装命令
- JS读取excel内容
- 腾讯通RTX 多文件服务器部署
- JMeter下载及使用
- 华为荣耀鸿蒙系统安装谷歌商店
- 音视频中的码率控制(CBR、VBR、CVBR、FIXQP)
- ThreadPoolExecutor线程池终止
- 简述利用PE系统破解Windows密码
- 开关电源中开关管与二极管EMI抑制方法分析
- Ps素描效果引用说明
- Swift 版本很好的卡片切换效果基于ZLSwipeableView(类似于[陌陌点点][探探])
- (转)图解如何制作网线
- motan学习笔记 五 opentracing学习入门
- 微信群营销:有哪些加微信群的渠道?
热门文章
- 学会快速装系统 图解硬盘分区软件Norton Ghost使用
- Linux服务上普通用户下磁盘莫名,创建文件夹报错cannot create directory `log‘: No space left on device
- SitePoint播客#163:倒下
- centos DNS问题(只能ping通IP域名白费)
- 网络图片加载时显示loading
- pythonos模块使用教程_Python os模块详解
- python3 IOT设备模拟器
- 腾讯云建站,wordpress管理后台
- 剑三千岛湖服务器是不是维护了,剑网3千岛秘辛成就怎么做 探知千岛湖不为人知的秘密...
- 区块链如何解决供应链挑战?