过几天再补细节()

import cv2
import os
import sys
import numpy as np
import tensorflow as tfchar_table = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K','L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', '川', '鄂', '赣', '甘', '贵','桂', '黑', '沪', '冀', '津', '京', '吉', '辽', '鲁', '蒙', '闽', '宁', '青', '琼', '陕', '苏', '晋','皖', '湘', '新', '豫', '渝', '粤', '云', '藏', '浙']def hist_image(img):assert img.ndim==2hist = [0 for i in range(256)]img_h,img_w = img.shape[0],img.shape[1]for row in range(img_h):for col in range(img_w):hist[img[row,col]] += 1p = [hist[n]/(img_w*img_h) for n in range(256)]p1 = np.cumsum(p)for row in range(img_h):for col in range(img_w):v = img[row,col]img[row,col] = p1[v]*255return imgdef find_board_area(img):assert img.ndim==2img_h,img_w = img.shape[0],img.shape[1]top,bottom,left,right = 0,img_h,0,img_wflag = Falseh_proj = [0 for i in range(img_h)]v_proj = [0 for i in range(img_w)]for row in range(round(img_h*0.5),round(img_h*0.8),3):for col in range(img_w):if img[row,col]==255:h_proj[row] += 1if flag==False and h_proj[row]>12:flag = Truetop = rowif flag==True and row>top+8 and h_proj[row]<12:bottom = rowflag = Falsefor col in range(round(img_w*0.3),img_w,1):for row in range(top,bottom,1):if img[row,col]==255:v_proj[col] += 1if flag==False and (v_proj[col]>10 or v_proj[col]-v_proj[col-1]>5):left = colbreakreturn left,top,120,bottom-top-10def verify_scale(rotate_rect):error = 0.4aspect = 4#4.7272min_area = 10*(10*aspect)max_area = 150*(150*aspect)min_aspect = aspect*(1-error)max_aspect = aspect*(1+error)theta = 30# 宽或高为0,不满足矩形直接返回Falseif rotate_rect[1][0]==0 or rotate_rect[1][1]==0:return Falser = rotate_rect[1][0]/rotate_rect[1][1]r = max(r,1/r)area = rotate_rect[1][0]*rotate_rect[1][1]if area>min_area and area<max_area and r>min_aspect and r<max_aspect:# 矩形的倾斜角度在不超过thetaif ((rotate_rect[1][0] < rotate_rect[1][1] and rotate_rect[2] >= -90 and rotate_rect[2] < -(90 - theta)) or(rotate_rect[1][1] < rotate_rect[1][0] and rotate_rect[2] > -theta and rotate_rect[2] <= 0)):return Truereturn Falsedef img_Transform(car_rect,image):img_h,img_w = image.shape[:2]rect_w,rect_h = car_rect[1][0],car_rect[1][1]angle = car_rect[2]return_flag = Falseif car_rect[2]==0:return_flag = Trueif car_rect[2]==-90 and rect_w<rect_h:rect_w, rect_h = rect_h, rect_wreturn_flag = Trueif return_flag:car_img = image[int(car_rect[0][1]-rect_h/2):int(car_rect[0][1]+rect_h/2),int(car_rect[0][0]-rect_w/2):int(car_rect[0][0]+rect_w/2)]return car_imgcar_rect = (car_rect[0],(rect_w,rect_h),angle)box = cv2.boxPoints(car_rect)heigth_point = right_point = [0,0]left_point = low_point = [car_rect[0][0], car_rect[0][1]]for point in box:if left_point[0] > point[0]:left_point = pointif low_point[1] > point[1]:low_point = pointif heigth_point[1] < point[1]:heigth_point = pointif right_point[0] < point[0]:right_point = pointif left_point[1] <= right_point[1]:  # 正角度new_right_point = [right_point[0], heigth_point[1]]pts1 = np.float32([left_point, heigth_point, right_point])pts2 = np.float32([left_point, heigth_point, new_right_point])  # 字符只是高度需要改变M = cv2.getAffineTransform(pts1, pts2)dst = cv2.warpAffine(image, M, (round(img_w*2), round(img_h*2)))car_img = dst[int(left_point[1]):int(heigth_point[1]), int(left_point[0]):int(new_right_point[0])]elif left_point[1] > right_point[1]:  # 负角度new_left_point = [left_point[0], heigth_point[1]]pts1 = np.float32([left_point, heigth_point, right_point])pts2 = np.float32([new_left_point, heigth_point, right_point])  # 字符只是高度需要改变M = cv2.getAffineTransform(pts1, pts2)dst = cv2.warpAffine(image, M, (round(img_w*2), round(img_h*2)))car_img = dst[int(right_point[1]):int(heigth_point[1]), int(new_left_point[0]):int(right_point[0])]return car_imgdef pre_process(orig_img):gray_img = cv2.cvtColor(orig_img, cv2.COLOR_BGR2GRAY)cv2.imshow('gray_img', gray_img)blur_img = cv2.blur(gray_img, (3, 3))cv2.imshow('blur', blur_img)sobel_img = cv2.Sobel(blur_img, cv2.CV_16S, 1, 0, ksize=3)sobel_img = cv2.convertScaleAbs(sobel_img)cv2.imshow('sobel', sobel_img)hsv_img = cv2.cvtColor(orig_img, cv2.COLOR_BGR2HSV)h, s, v = hsv_img[:, :, 0], hsv_img[:, :, 1], hsv_img[:, :, 2]# 黄色色调区间[26,34],蓝色色调区间:[100,124]blue_img = (((h > 26) & (h < 34)) | ((h > 100) & (h < 124))) & (s > 70) & (v > 70)blue_img = blue_img.astype('float32')mix_img = np.multiply(sobel_img, blue_img)cv2.imshow('mix', mix_img)mix_img = mix_img.astype(np.uint8)ret, binary_img = cv2.threshold(mix_img, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)cv2.imshow('binary',binary_img)kernel = cv2.getStructuringElement(cv2.MORPH_RECT,(21,5))close_img = cv2.morphologyEx(binary_img, cv2.MORPH_CLOSE, kernel)cv2.imshow('close', close_img)return close_img# 给候选车牌区域做漫水填充算法,一方面补全上一步求轮廓可能存在轮廓歪曲的问题,
# 另一方面也可以将非车牌区排除掉
def verify_color(rotate_rect,src_image):img_h,img_w = src_image.shape[:2]mask = np.zeros(shape=[img_h+2,img_w+2],dtype=np.uint8)connectivity = 4 #种子点上下左右4邻域与种子颜色值在[loDiff,upDiff]的被涂成new_value,也可设置8邻域loDiff,upDiff = 30,30new_value = 255flags = connectivityflags |= cv2.FLOODFILL_FIXED_RANGE  #考虑当前像素与种子象素之间的差,不设置的话则和邻域像素比较flags |= new_value << 8flags |= cv2.FLOODFILL_MASK_ONLY #设置这个标识符则不会去填充改变原始图像,而是去填充掩模图像(mask)rand_seed_num = 5000 #生成多个随机种子valid_seed_num = 200 #从rand_seed_num中随机挑选valid_seed_num个有效种子adjust_param = 0.1box_points = cv2.boxPoints(rotate_rect)box_points_x = [n[0] for n in box_points]box_points_x.sort(reverse=False)adjust_x = int((box_points_x[2]-box_points_x[1])*adjust_param)col_range = [box_points_x[1]+adjust_x,box_points_x[2]-adjust_x]box_points_y = [n[1] for n in box_points]box_points_y.sort(reverse=False)adjust_y = int((box_points_y[2]-box_points_y[1])*adjust_param)row_range = [box_points_y[1]+adjust_y, box_points_y[2]-adjust_y]# 如果以上方法种子点在水平或垂直方向可移动的范围很小,则采用旋转矩阵对角线来设置随机种子点if (col_range[1]-col_range[0])/(box_points_x[3]-box_points_x[0])<0.4\or (row_range[1]-row_range[0])/(box_points_y[3]-box_points_y[0])<0.4:points_row = []points_col = []for i in range(2):pt1,pt2 = box_points[i],box_points[i+2]x_adjust,y_adjust = int(adjust_param*(abs(pt1[0]-pt2[0]))),int(adjust_param*(abs(pt1[1]-pt2[1])))if (pt1[0] <= pt2[0]):pt1[0], pt2[0] = pt1[0] + x_adjust, pt2[0] - x_adjustelse:pt1[0], pt2[0] = pt1[0] - x_adjust, pt2[0] + x_adjustif (pt1[1] <= pt2[1]):pt1[1], pt2[1] = pt1[1] + adjust_y, pt2[1] - adjust_yelse:pt1[1], pt2[1] = pt1[1] - y_adjust, pt2[1] + y_adjusttemp_list_x = [int(x) for x in np.linspace(pt1[0],pt2[0],int(rand_seed_num /2))]temp_list_y = [int(y) for y in np.linspace(pt1[1],pt2[1],int(rand_seed_num /2))]points_col.extend(temp_list_x)points_row.extend(temp_list_y)else:points_row = np.random.randint(row_range[0],row_range[1],size=rand_seed_num)points_col = np.linspace(col_range[0],col_range[1],num=rand_seed_num).astype(np.uint16)points_row = np.array(points_row)points_col = np.array(points_col)hsv_img = cv2.cvtColor(src_image, cv2.COLOR_BGR2HSV)h,s,v = hsv_img[:,:,0],hsv_img[:,:,1],hsv_img[:,:,2]# 将随机生成的多个种子依次做漫水填充,理想情况是整个车牌被填充flood_img = src_image.copy()seed_cnt = 0for i in range(rand_seed_num):rand_index = np.random.choice(rand_seed_num,1,replace=False)row,col = points_row[rand_index],points_col[rand_index]# 限制随机种子必须是车牌背景色if (((h[row,col]>26)&(h[row,col]<34))|((h[row,col]>100)&(h[row,col]<124)))&(s[row,col]>70)&(v[row,col]>70):cv2.floodFill(src_image, mask, (col,row), (255, 255, 255), (loDiff,) * 3, (upDiff,) * 3, flags)cv2.circle(flood_img,center=(col,row),radius=2,color=(0,0,255),thickness=2)seed_cnt += 1if seed_cnt >= valid_seed_num:break#======================调试用======================#show_seed = np.random.uniform(1,100,1).astype(np.uint16)cv2.imshow('floodfill'+str(show_seed),flood_img)cv2.imshow('flood_mask'+str(show_seed),mask)#======================调试用======================## 获取掩模上被填充点的像素点,并求点集的最小外接矩形mask_points = []for row in range(1,img_h+1):for col in range(1,img_w+1):if mask[row,col] != 0:mask_points.append((col-1,row-1))mask_rotateRect = cv2.minAreaRect(np.array(mask_points))if verify_scale(mask_rotateRect):return True,mask_rotateRectelse:return False,mask_rotateRect# 车牌定位
def locate_carPlate(orig_img,pred_image):carPlate_list = []temp1_orig_img = orig_img.copy() #调试用temp2_orig_img = orig_img.copy() #调试用cloneImg,contours,heriachy = cv2.findContours(pred_image,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)for i,contour in enumerate(contours):cv2.drawContours(temp1_orig_img, contours, i, (0, 255, 255), 2)# 获取轮廓最小外接矩形,返回值rotate_rectrotate_rect = cv2.minAreaRect(contour)# 根据矩形面积大小和长宽比判断是否是车牌if verify_scale(rotate_rect):ret,rotate_rect2 = verify_color(rotate_rect,temp2_orig_img)if ret == False:continue# 车牌位置矫正car_plate = img_Transform(rotate_rect2, temp2_orig_img)car_plate = cv2.resize(car_plate,(car_plate_w,car_plate_h)) #调整尺寸为后面CNN车牌识别做准备#========================调试看效果========================#box = cv2.boxPoints(rotate_rect2)for k in range(4):n1,n2 = k%4,(k+1)%4cv2.line(temp1_orig_img,(box[n1][0],box[n1][1]),(box[n2][0],box[n2][1]),(255,0,0),2)cv2.imshow('opencv_' + str(i), car_plate)#========================调试看效果========================#carPlate_list.append(car_plate)cv2.imshow('contour', temp1_orig_img)return carPlate_list# 左右切割
def horizontal_cut_chars(plate):char_addr_list = []area_left,area_right,char_left,char_right= 0,0,0,0img_w = plate.shape[1]# 获取车牌每列边缘像素点个数def getColSum(img,col):sum = 0for i in range(img.shape[0]):sum += round(img[i,col]/255)return sum;sum = 0for col in range(img_w):sum += getColSum(plate,col)# 每列边缘像素点必须超过均值的60%才能判断属于字符区域col_limit = 0#round(0.5*sum/img_w)# 每个字符宽度也进行限制charWid_limit = [round(img_w/12),round(img_w/5)]is_char_flag = Falsefor i in range(img_w):colValue = getColSum(plate,i)if colValue > col_limit:if is_char_flag == False:area_right = round((i+char_right)/2)area_width = area_right-area_leftchar_width = char_right-char_leftif (area_width>charWid_limit[0]) and (area_width<charWid_limit[1]):char_addr_list.append((area_left,area_right,char_width))char_left = iarea_left = round((char_left+char_right) / 2)is_char_flag = Trueelse:if is_char_flag == True:char_right = i-1is_char_flag = False# 手动结束最后未完成的字符分割if area_right < char_left:area_right,char_right = img_w,img_warea_width = area_right - area_leftchar_width = char_right - char_leftif (area_width > charWid_limit[0]) and (area_width < charWid_limit[1]):char_addr_list.append((area_left, area_right, char_width))return char_addr_listdef get_chars(car_plate):img_h,img_w = car_plate.shape[:2]h_proj_list = [] # 水平投影长度列表h_temp_len,v_temp_len = 0,0h_startIndex,h_end_index = 0,0 # 水平投影记索引h_proj_limit = [0.2,0.8] # 车牌在水平方向得轮廓长度少于20%或多余80%过滤掉char_imgs = []# 将二值化的车牌水平投影到Y轴,计算投影后的连续长度,连续投影长度可能不止一段h_count = [0 for i in range(img_h)]for row in range(img_h):temp_cnt = 0for col in range(img_w):if car_plate[row,col] == 255:temp_cnt += 1h_count[row] = temp_cntif temp_cnt/img_w<h_proj_limit[0] or temp_cnt/img_w>h_proj_limit[1]:if h_temp_len != 0:h_end_index = row-1h_proj_list.append((h_startIndex,h_end_index))h_temp_len = 0continueif temp_cnt > 0:if h_temp_len == 0:h_startIndex = rowh_temp_len = 1else:h_temp_len += 1else:if h_temp_len > 0:h_end_index = row-1h_proj_list.append((h_startIndex,h_end_index))h_temp_len = 0# 手动结束最后得水平投影长度累加if h_temp_len != 0:h_end_index = img_h-1h_proj_list.append((h_startIndex, h_end_index))# 选出最长的投影,该投影长度占整个截取车牌高度的比值必须大于0.5h_maxIndex,h_maxHeight = 0,0for i,(start,end) in enumerate(h_proj_list):if h_maxHeight < (end-start):h_maxHeight = (end-start)h_maxIndex = iif h_maxHeight/img_h < 0.5:return char_imgschars_top,chars_bottom = h_proj_list[h_maxIndex][0],h_proj_list[h_maxIndex][1]plates = car_plate[chars_top:chars_bottom+1,:]cv2.imwrite('./carIdentityData/opencv_output/car.jpg',car_plate)cv2.imwrite('./carIdentityData/opencv_output/plate.jpg', plates)char_addr_list = horizontal_cut_chars(plates)for i,addr in enumerate(char_addr_list):char_img = car_plate[chars_top:chars_bottom+1,addr[0]:addr[1]]char_img = cv2.resize(char_img,(char_w,char_h))char_imgs.append(char_img)return char_imgsdef extract_char(car_plate):gray_plate = cv2.cvtColor(car_plate,cv2.COLOR_BGR2GRAY)ret,binary_plate = cv2.threshold(gray_plate,0,255,cv2.THRESH_BINARY|cv2.THRESH_OTSU)char_img_list = get_chars(binary_plate)return char_img_listdef cnn_select_carPlate(plate_list,model_path):if len(plate_list) == 0:return False,plate_listg1 = tf.Graph()sess1 = tf.Session(graph=g1)with sess1.as_default():with sess1.graph.as_default():model_dir = os.path.dirname(model_path)saver = tf.train.import_meta_graph(model_path)saver.restore(sess1, tf.train.latest_checkpoint(model_dir))graph = tf.get_default_graph()net1_x_place = graph.get_tensor_by_name('x_place:0')net1_keep_place = graph.get_tensor_by_name('keep_place:0')net1_out = graph.get_tensor_by_name('out_put:0')input_x = np.array(plate_list)net_outs = tf.nn.softmax(net1_out)preds = tf.argmax(net_outs,1) #预测结果probs = tf.reduce_max(net_outs,reduction_indices=[1]) #结果概率值pred_list,prob_list = sess1.run([preds,probs],feed_dict={net1_x_place:input_x,net1_keep_place:1.0})# 选出概率最大的车牌result_index,result_prob = -1,0.for i,pred in enumerate(pred_list):if pred==1 and prob_list[i]>result_prob:result_index,result_prob = i,prob_list[i]if result_index == -1:return False,plate_list[0]else:return True,plate_list[result_index]def cnn_recongnize_char(img_list,model_path):g2 = tf.Graph()sess2 = tf.Session(graph=g2)text_list = []if len(img_list) == 0:return text_listwith sess2.as_default():with sess2.graph.as_default():model_dir = os.path.dirname(model_path)saver = tf.train.import_meta_graph(model_path)saver.restore(sess2, tf.train.latest_checkpoint(model_dir))graph = tf.get_default_graph()net2_x_place = graph.get_tensor_by_name('x_place:0')net2_keep_place = graph.get_tensor_by_name('keep_place:0')net2_out = graph.get_tensor_by_name('out_put:0')data = np.array(img_list)# 数字、字母、汉字,从67维向量找到概率最大的作为预测结果net_out = tf.nn.softmax(net2_out)preds = tf.argmax(net_out,1)my_preds= sess2.run(preds, feed_dict={net2_x_place: data, net2_keep_place: 1.0})for i in my_preds:text_list.append(char_table[i])return text_listif __name__ == '__main__':cur_dir = sys.path[0]car_plate_w,car_plate_h = 136,36char_w,char_h = 20,20plate_model_path = os.path.join(cur_dir, './carIdentityData/model/plate_recongnize/model.ckpt-540.meta')char_model_path = os.path.join(cur_dir,'./carIdentityData/model/char_recongnize/model.ckpt-510.meta')img = cv2.imread('./carIdentityData/test/4.jpg')# 预处理pred_img = pre_process(img)# 车牌定位car_plate_list = locate_carPlate(img,pred_img)# CNN车牌过滤ret,car_plate = cnn_select_carPlate(car_plate_list,plate_model_path)if ret == False:print("未检测到车牌")sys.exit(-1)cv2.imshow('cnn_plate',car_plate)# 字符提取char_img_list = extract_char(car_plate)num = 0for plate_chars in char_img_list:cv2.imwrite('./carIdentityData/opencv_output/char' + str(num) + '.jpg', plate_chars)num = num + 1# CNN字符识别text = cnn_recongnize_char(char_img_list,char_model_path)print(text)cv2.waitKey(0)
import sys
import os
import numpy as np
import cv2
import tensorflow as tf
from sklearn.model_selection import train_test_splitnumbers = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']
alphbets = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T','U', 'V', 'W', 'X', 'Y', 'Z']
chinese = ['zh_cuan', 'zh_e', 'zh_gan', 'zh_gan1', 'zh_gui', 'zh_gui1', 'zh_hei', 'zh_hu', 'zh_ji', 'zh_jin','zh_jing', 'zh_jl', 'zh_liao', 'zh_lu', 'zh_meng', 'zh_min', 'zh_ning', 'zh_qing', 'zh_qiong','zh_shan', 'zh_su', 'zh_sx', 'zh_wan', 'zh_xiang', 'zh_xin', 'zh_yu', 'zh_yu1', 'zh_yue', 'zh_yun','zh_zang', 'zh_zhe']class char_cnn_net:def __init__(self):self.dataset = numbers + alphbets + chineseself.dataset_len = len(self.dataset)self.img_size = 20self.y_size = len(self.dataset)self.batch_size = 100self.x_place = tf.placeholder(dtype=tf.float32, shape=[None, self.img_size, self.img_size], name='x_place')self.y_place = tf.placeholder(dtype=tf.float32, shape=[None, self.y_size], name='y_place')self.keep_place = tf.placeholder(dtype=tf.float32, name='keep_place')def cnn_construct(self):x_input = tf.reshape(self.x_place, shape=[-1, 20, 20, 1])cw1 = tf.Variable(tf.random_normal(shape=[3, 3, 1, 32], stddev=0.01), dtype=tf.float32)cb1 = tf.Variable(tf.random_normal(shape=[32]), dtype=tf.float32)conv1 = tf.nn.relu(tf.nn.bias_add(tf.nn.conv2d(x_input,filter=cw1,strides=[1,1,1,1],padding='SAME'),cb1))conv1 = tf.nn.max_pool(conv1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')conv1 = tf.nn.dropout(conv1, self.keep_place)cw2 = tf.Variable(tf.random_normal(shape=[3, 3, 32, 64], stddev=0.01), dtype=tf.float32)cb2 = tf.Variable(tf.random_normal(shape=[64]), dtype=tf.float32)conv2 = tf.nn.relu(tf.nn.bias_add(tf.nn.conv2d(conv1,filter=cw2,strides=[1,1,1,1],padding='SAME'),cb2))conv2 = tf.nn.max_pool(conv2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')conv2 = tf.nn.dropout(conv2, self.keep_place)cw3 = tf.Variable(tf.random_normal(shape=[3, 3, 64, 128], stddev=0.01), dtype=tf.float32)cb3 = tf.Variable(tf.random_normal(shape=[128]), dtype=tf.float32)conv3 = tf.nn.relu(tf.nn.bias_add(tf.nn.conv2d(conv2,filter=cw3,strides=[1,1,1,1],padding='SAME'),cb3))conv3 = tf.nn.max_pool(conv3, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')conv3 = tf.nn.dropout(conv3, self.keep_place)conv_out = tf.reshape(conv3, shape=[-1, 3 * 3 * 128])fw1 = tf.Variable(tf.random_normal(shape=[3 * 3 * 128, 1024], stddev=0.01), dtype=tf.float32)fb1 = tf.Variable(tf.random_normal(shape=[1024]), dtype=tf.float32)fully1 = tf.nn.relu(tf.add(tf.matmul(conv_out, fw1), fb1))fully1 = tf.nn.dropout(fully1, self.keep_place)fw2 = tf.Variable(tf.random_normal(shape=[1024, 1024], stddev=0.01), dtype=tf.float32)fb2 = tf.Variable(tf.random_normal(shape=[1024]), dtype=tf.float32)fully2 = tf.nn.relu(tf.add(tf.matmul(fully1, fw2), fb2))fully2 = tf.nn.dropout(fully2, self.keep_place)fw3 = tf.Variable(tf.random_normal(shape=[1024, self.dataset_len], stddev=0.01), dtype=tf.float32)fb3 = tf.Variable(tf.random_normal(shape=[self.dataset_len]), dtype=tf.float32)fully3 = tf.add(tf.matmul(fully2, fw3), fb3, name='out_put')return fully3def train(self,data_dir,save_model_path):print('ready load train dataset')X, y = self.init_data(data_dir)print('success load' + str(len(y)) + 'datas')train_x, test_x, train_y, test_y = train_test_split(X, y, test_size=0.2, random_state=0)out_put = self.cnn_construct()predicts = tf.nn.softmax(out_put)predicts = tf.argmax(predicts, axis=1)actual_y = tf.argmax(self.y_place, axis=1)accuracy = tf.reduce_mean(tf.cast(tf.equal(predicts, actual_y), dtype=tf.float32))cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=out_put, labels=self.y_place))opt = tf.train.AdamOptimizer(learning_rate=0.001)train_step = opt.minimize(cost)with tf.Session() as sess:init = tf.global_variables_initializer()sess.run(init)step = 0saver = tf.train.Saver()while True:train_index = np.random.choice(len(train_x), self.batch_size, replace=False)train_randx = train_x[train_index]train_randy = train_y[train_index]_, loss = sess.run([train_step, cost],feed_dict={self.x_place:train_randx,self.y_place:train_randy,self.keep_place:0.75})step += 1if step % 10 == 0:test_index = np.random.choice(len(test_x), self.batch_size, replace=False)test_randx = test_x[test_index]test_randy = test_y[test_index]acc = sess.run(accuracy,feed_dict={self.x_place : test_randx, self.y_place : test_randy,self.keep_place : 1.0})print(step, loss)if step % 50 == 0:print('accuracy:' + str(acc))if step % 500 == 0:saver.save(sess, save_model_path, global_step=step)if acc > 0.99 and step > 500:saver.save(sess, save_model_path, global_step=step)breakdef test(self,x_images,model_path):text_list = []out_put = self.cnn_construct()predicts = tf.nn.softmax(out_put)predicts = tf.argmax(predicts, axis=1)saver = tf.train.Saver()with tf.Session() as sess:sess.run(tf.global_variables_initializer())saver.restore(sess, model_path)preds = sess.run(predicts, feed_dict={self.x_place: x_images, self.keep_place: 1.0})for i in range(len(preds)):pred = preds[i].astype(int)text_list.append(self.dataset[pred])return text_listdef list_all_files(self,root):files = []list = os.listdir(root)for i in range(len(list)):element = os.path.join(root, list[i])if os.path.isdir(element):temp_dir = os.path.split(element)[-1]if temp_dir in self.dataset:files.extend(self.list_all_files(element))elif os.path.isfile(element):files.append(element)return filesdef init_data(self,dir):X = []y = []if not os.path.exists(data_dir):raise ValueError('没有找到文件夹')files = self.list_all_files(dir)for file in files:src_img = cv2.imread(file, cv2.COLOR_BGR2GRAY)if src_img.ndim == 3:continueresize_img = cv2.resize(src_img, (20, 20))X.append(resize_img)# 获取图片文件全目录dir = os.path.dirname(file)# 获取图片文件上一级目录名dir_name = os.path.split(dir)[-1]vector_y = [0 for i in range(len(self.dataset))]index_y = self.dataset.index(dir_name)vector_y[index_y] = 1y.append(vector_y)X = np.array(X)y = np.array(y).reshape(-1, self.dataset_len)return X, ydef init_testData(self,dir):test_X = []if not os.path.exists(test_dir):raise ValueError('没有找到文件夹')files = self.list_all_files(test_dir)for file in files:src_img = cv2.imread(file, cv2.COLOR_BGR2GRAY)if src_img.ndim == 3:continueresize_img = cv2.resize(src_img, (20, 20))test_X.append(resize_img)test_X = np.array(test_X)return test_Xif __name__ == '__main__':cur_dir = sys.path[0]data_dir = os.path.join(cur_dir, 'carIdentityData/cnn_char_train')test_dir = os.path.join(cur_dir, 'carIdentityData/cnn_char_test')train_model_path = os.path.join(cur_dir, './carIdentityData/model/char_recongnize/model.ckpt')model_path = os.path.join(cur_dir,'./carIdentityData/model/char_recongnize/model.ckpt-580')train_flag = 1net = char_cnn_net()if train_flag == 1:# 训练模型net.train(data_dir,train_model_path)else:# 测试部分test_X = net.init_testData(test_dir)text = net.test(test_X,model_path)print(text)
import sys
import os
import numpy as np
import cv2
import tensorflow as tf
from sklearn.model_selection import train_test_splitclass plate_cnn_net:def __init__(self):self.img_w,self.img_h = 136,36self.y_size = 2self.batch_size = 100self.learn_rate = 0.001self.x_place = tf.placeholder(dtype=tf.float32, shape=[None, self.img_h, self.img_w, 3], name='x_place')self.y_place = tf.placeholder(dtype=tf.float32, shape=[None, self.y_size], name='y_place')self.keep_place = tf.placeholder(dtype=tf.float32, name='keep_place')def cnn_construct(self):x_input = tf.reshape(self.x_place, shape=[-1, self.img_h, self.img_w, 3])cw1 = tf.Variable(tf.random_normal(shape=[3, 3, 3, 32], stddev=0.01), dtype=tf.float32)cb1 = tf.Variable(tf.random_normal(shape=[32]), dtype=tf.float32)conv1 = tf.nn.relu(tf.nn.bias_add(tf.nn.conv2d(x_input, filter=cw1, strides=[1, 1, 1, 1], padding='SAME'), cb1))conv1 = tf.nn.max_pool(conv1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')conv1 = tf.nn.dropout(conv1, self.keep_place)cw2 = tf.Variable(tf.random_normal(shape=[3, 3, 32, 64], stddev=0.01), dtype=tf.float32)cb2 = tf.Variable(tf.random_normal(shape=[64]), dtype=tf.float32)conv2 = tf.nn.relu(tf.nn.bias_add(tf.nn.conv2d(conv1, filter=cw2, strides=[1, 1, 1, 1], padding='SAME'), cb2))conv2 = tf.nn.max_pool(conv2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')conv2 = tf.nn.dropout(conv2, self.keep_place)cw3 = tf.Variable(tf.random_normal(shape=[3, 3, 64, 128], stddev=0.01), dtype=tf.float32)cb3 = tf.Variable(tf.random_normal(shape=[128]), dtype=tf.float32)conv3 = tf.nn.relu(tf.nn.bias_add(tf.nn.conv2d(conv2, filter=cw3, strides=[1, 1, 1, 1], padding='SAME'), cb3))conv3 = tf.nn.max_pool(conv3, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')conv3 = tf.nn.dropout(conv3, self.keep_place)conv_out = tf.reshape(conv3, shape=[-1, 17 * 5 * 128])fw1 = tf.Variable(tf.random_normal(shape=[17 * 5 * 128, 1024], stddev=0.01), dtype=tf.float32)fb1 = tf.Variable(tf.random_normal(shape=[1024]), dtype=tf.float32)fully1 = tf.nn.relu(tf.add(tf.matmul(conv_out, fw1), fb1))fully1 = tf.nn.dropout(fully1, self.keep_place)fw2 = tf.Variable(tf.random_normal(shape=[1024, 1024], stddev=0.01), dtype=tf.float32)fb2 = tf.Variable(tf.random_normal(shape=[1024]), dtype=tf.float32)fully2 = tf.nn.relu(tf.add(tf.matmul(fully1, fw2), fb2))fully2 = tf.nn.dropout(fully2, self.keep_place)fw3 = tf.Variable(tf.random_normal(shape=[1024, self.y_size], stddev=0.01), dtype=tf.float32)fb3 = tf.Variable(tf.random_normal(shape=[self.y_size]), dtype=tf.float32)fully3 = tf.add(tf.matmul(fully2, fw3), fb3, name='out_put')return fully3def train(self,data_dir,model_save_path):print('ready load train dataset')X, y = self.init_data(data_dir)print('success load ' + str(len(y)) + ' datas')train_x, test_x, train_y, test_y = train_test_split(X, y, test_size=0.2, random_state=0)out_put = self.cnn_construct()predicts = tf.nn.softmax(out_put)predicts = tf.argmax(predicts, axis=1)actual_y = tf.argmax(self.y_place, axis=1)accuracy = tf.reduce_mean(tf.cast(tf.equal(predicts, actual_y), dtype=tf.float32))cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=out_put, labels=self.y_place))opt = tf.train.AdamOptimizer(self.learn_rate)train_step = opt.minimize(cost)with tf.Session() as sess:init = tf.global_variables_initializer()sess.run(init)step = 0saver = tf.train.Saver()while True:train_index = np.random.choice(len(train_x), self.batch_size, replace=False)train_randx = train_x[train_index]train_randy = train_y[train_index]_, loss = sess.run([train_step, cost], feed_dict={self.x_place: train_randx,self.y_place: train_randy, self.keep_place: 0.75})step += 1print(step, loss)if step % 10 == 0:test_index = np.random.choice(len(test_x), self.batch_size, replace=False)test_randx = test_x[test_index]test_randy = test_y[test_index]acc = sess.run(accuracy, feed_dict={self.x_place: test_randx,self.y_place: test_randy, self.keep_place: 1.0})print('accuracy:' + str(acc))if acc > 0.99 and step > 500:saver.save(sess, model_save_path, global_step=step)breakdef test(self,x_images,model_path):out_put = self.cnn_construct()predicts = tf.nn.softmax(out_put)probabilitys = tf.reduce_max(predicts, reduction_indices=[1])predicts = tf.argmax(predicts, axis=1)saver = tf.train.Saver()with tf.Session() as sess:sess.run(tf.global_variables_initializer())saver.restore(sess, model_path)preds, probs = sess.run([predicts, probabilitys], feed_dict={self.x_place: x_images, self.keep_place: 1.0})return preds,probsdef list_all_files(self,root):files = []list = os.listdir(root)for i in range(len(list)):element = os.path.join(root, list[i])if os.path.isdir(element):files.extend(self.list_all_files(element))elif os.path.isfile(element):files.append(element)return filesdef init_data(self,dir):X = []y = []if not os.path.exists(dir):raise ValueError('没有找到文件夹')files = self.list_all_files(dir)labels = [os.path.split(os.path.dirname(file))[-1] for file in files]for i, file in enumerate(files):src_img = cv2.imread(file)if src_img.ndim != 3:continueresize_img = cv2.resize(src_img, (136, 36))X.append(resize_img)y.append([[0, 1] if labels[i] == 'has' else [1, 0]])X = np.array(X)y = np.array(y).reshape(-1, 2)return X, ydef init_testData(self,dir):test_X = []if not os.path.exists(dir):raise ValueError('没有找到文件夹')files = self.list_all_files(dir)for file in files:src_img = cv2.imread(file, cv2.COLOR_BGR2GRAY)if src_img.ndim != 3:continueresize_img = cv2.resize(src_img, (136, 36))test_X.append(resize_img)test_X = np.array(test_X)return test_Xif __name__ == '__main__':cur_dir = sys.path[0]data_dir = os.path.join(cur_dir, './carIdentityData/cnn_plate_train')test_dir = os.path.join(cur_dir, './carIdentityData/cnn_plate_test')train_model_path = os.path.join(cur_dir, './carIdentityData/model/plate_recongnize/model.ckpt')model_path = os.path.join(cur_dir,'./carIdentityData/model/plate_recongnize/model.ckpt-520')train_flag = 1net = plate_cnn_net()if train_flag == 1:# 训练模型net.train(data_dir,train_model_path)else:# 测试部分test_X = net.init_testData(test_dir)preds,probs = net.test(test_X,model_path)for i in range(len(preds)):pred = preds[i].astype(int)prob = probs[i]if pred == 1:print('plate',prob)else:print('no',prob)

python — cnn+opencv 识别车牌相关推荐

  1. Opencv识别车牌

    Opencv识别车牌 #encoding:utf8 import cv2 import numpy as np Min_Area = 50 #定位车牌 def color_position(img,o ...

  2. 真香!用Python检测和识别车牌(附代码)

    车牌检测与识别技术用途广泛,可以用于道路系统.无票停车场.车辆门禁等.这项技术结合了计算机视觉和人工智能. 本文将使用Python创建一个车牌检测和识别程序.该程序对输入图像进行处理,检测和识别车牌, ...

  3. 如何使用 Python 检测和识别车牌(附 Python 代码)

    文章目录 创建Python环境 如何在您的计算机上安装Tesseract OCR? 技术提升 磨砺您的Python技能 车牌检测与识别技术用途广泛,可以用于道路系统.无票停车场.车辆门禁等.这项技术结 ...

  4. 如何使用Python检测和识别车牌(附代码)

    来源丨51CTO技术栈 译者丨布加迪 审校丨孙淑娟 车牌检测与识别技术用途广泛,可以用于道路系统.无票停车场.车辆门禁等.这项技术结合了计算机视觉和人工智能. 本文将使用Python创建一个车牌检测和 ...

  5. Python 基于 opencv 的车牌识别系统, 可以准确识别车牌号

    大家好,我是程序员徐师兄,6 年大厂程序员经验,点击关注我 简介 毕业设计基于Opencv的车牌识别系统 车牌搜索识别找出某个车牌号 对比识别车牌系统 车牌数据库认证系统 车牌图文搜索系统 车牌数据库 ...

  6. c++ opencv 识别车牌_小强学Python+OpenCV之-1.0开篇

    写在前面: 有没有想过使用Python + OpenCV来实现人脸识别? 想想就有点小兴奋吧. 小强也是不久前才了解到可以使用Python + OpenCV进行图像处理.觉得有趣就想学习一下. 在这里 ...

  7. 基于Python使用OpenCV进行车牌检测

    点击上方"小白学视觉",选择加"星标"或"置顶" 重磅干货,第一时间送达 车牌识别及步骤 1.车牌检测:第一步是从车上检测车牌.我们将使用O ...

  8. c++ opencv 识别车牌_python利用百度云接口实现车牌识别

    一个小需求---实现车牌识别. 目前有两个想法 调云在线的接口或者使用SDK做开发(配置环境和编译第三方库很麻烦,当然使用python可以避免这些问题) 自己实现车牌识别算法(复杂) 一开始准备使用百 ...

  9. 利用Python及OpenCv 识别车牌号

    环境PyCharm ,OpenCV,网上配置的很多,这里就不多说了. 接着就是找代码调试,比较优秀的是License-Plate-Recognition这个项目,简单的看了看,运行,跑起来了,测试了几 ...

最新文章

  1. 警惕!国内某广告SDK内置“后门”功能,Google Play商店已强制下架
  2. 一个好用的导出excel数据为protobuf的工具
  3. Gradle之使用Gradle的命令行
  4. 在通往VR内容的道路上,音乐将成为一项重要助力
  5. 用 grldr 引导WinXP/Ubuntu双系统
  6. SVN如何回滚到指定版本
  7. 《理财市场情绪监测系统》代码实现【1】之行业词库
  8. php 获取搜索引擎,php获取搜索引擎关键字来源(支持百度、谷歌等搜索引擎)的函数...
  9. iPhone 记录之 点与像素
  10. html内部css调节背景图片的大小,css中怎么改变背景图片大小?
  11. One Note 插件NoteHighlight2016安装
  12. 前瞻:数据科学中的探索性数据分析(DEA)
  13. 笔记本有没有必要加内存条?
  14. vue中实现Excel导入导出功能
  15. 【51单片机】代码实例
  16. PCL(Point Cloud Library)的第三方库简介(boost,eigen,flann,vtk,qhull)
  17. 深度学习部署--搭建后台服务器
  18. C#练习题答案: 折叠用自己的方式去月球【难度:1级】--景越C#经典编程题库,1000道C#基础练习题等你来挑战
  19. Python-实验3.2 Numpy应用
  20. 计算机软件漏洞防御方法,溢出(漏洞)***的实现及防御

热门文章

  1. CKEditor5 自定义上传图片
  2. 更改文件类型方法(文本文档改为lic文件)
  3. Ubuntu 经验 :系统安装 :分区方案
  4. “不允许一个用户使用一个以上用户名与一个服务器或共享资源的多重连接”问题的解决
  5. 同一进程中线程的共享资源以及独占资源
  6. 【漫画】员工当腻了,不如做领导!
  7. 跨国企业在中国 | 德国马夸特在华第二家工厂威海开业;格兰富水环境治理体验中心落户福州...
  8. 小米最新系统android 10,小米MIUI 12再更新,基于Android 10的系统也来了,5项改变...
  9. 小米android系统分享,重温的手机分享 篇一:90hz高刷?45w快充?双扬声器?你不知道的酷安重制版小米9魔改教程...
  10. win10固态硬盘分区 整数_一台机器两个系统,工作生活分隔开——安装win10和Linux双系统...