.Python人脸表情识别QT窗体

如需安装运行环境或远程调试,可加扣905733049, 或扣2945218359由专业技术人员远程协助!

运行结下:

主要代码:

# coding:utf-8
import sys
#从转换的.py文件内调用类
import cv2
import numpy as np
import sys
import tensorflow as tffrom untitled import Ui_Dialog
from PyQt5 import QtWidgetsfrom PyQt5 import QtWidgets, QtCore, QtGui
from PyQt5.QtCore import *class myWin(QtWidgets.QWidget, Ui_Dialog):def __init__(self):super(myWin, self).__init__()self.setupUi(self)def openFileButton(self):imgName, imgType  = QFileDialog.getOpenFileName(self,"打开文件","./","files(*.*)")img = cv2.imread(imgName)cv2.imwrite("temp/original.jpg", img)height, width, pixels = img.shapeprint("width,height",width,height)print("self.label.width()",self.label.width())print("self.label.height()",self.label.height())frame = cv2.resize(img, (int(rwidth), int(rheight)))print("rwidth-elif,rheight-elfi", rwidth, rheight)img2 = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)  # opencv读取的bgr格式图片转换成rgb格式_image = QtGui.QImage(img2[:], img2.shape[1], img2.shape[0], img2.shape[1] * 3, QtGui.QImage.Format_RGB888)jpg_out = QtGui.QPixmap(_image).scaled(rwidth, rheight) #设置图片大小self.label.setPixmap(jpg_out) #设置图片显示def saveFileButton(self):img = cv2.imread("temp/original.jpg")file_path = QFileDialog.getSaveFileName(self, "save file", "./save/test","jpg files (*.jpg);;all files(*.*)")print(file_path[0])cv2.imwrite(file_path[0], img)cv2.waitKey(0)cv2.destroyAllWindows()def recogPerson(self):import osimport cv2img = cv2.imread("temp/original.jpg")cv2.imwrite("save/recognPerson2.jpg", img)face_detect = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')eye_detect = cv2.CascadeClassifier('haarcascade_eye.xml')# 灰度处理gray = cv2.cvtColor(img, code=cv2.COLOR_BGR2GRAY)# 检查人脸 按照1.1倍放到 周围最小像素为5face_zone = face_detect.detectMultiScale(gray,1.3,5)# print ('识别人脸的信息:\n',face_zone)l = len(face_zone)ints = 0# 绘制矩形和圆形检测人脸for x, y, w, h in face_zone:ints += 1# 绘制矩形人脸区域if w < 1000:cv2.rectangle(img, pt1=(x, y), pt2=(x + w, y + h), color=[0, 0, 255], thickness=2)# 绘制圆形人脸区域 radius表示半径cv2.circle(img, center=(x + w // 2, y + h // 2), radius=w // 2, color=[0, 255, 0], thickness=2)roi_face = gray[y:y + h, x:x + w]  # 灰度图roi_color = img[y:y + h, x:x + w]  # 彩色图eyes = eye_detect.detectMultiScale(roi_face)for (ex, ey, ew, eh) in eyes:cv2.rectangle(roi_color, (ex, ey), (ex + ew, ey + eh), (0, 255, 0), 2)cv2.imwrite("save/recognPerson.jpg", img)#cv2.waitKey(0)#显示人数到窗体self.textEdit.setPlainText(str(ints))#self.textEdit.setPlainText('Hello PyQt5!\n单击按钮')frame = cv2.resize(img, (int(rwidth), int(rheight)))img2 = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)  # opencv读取的bgr格式图片转换成rgb格式_image = QtGui.QImage(img2[:], img2.shape[1], img2.shape[0], img2.shape[1] * 3, QtGui.QImage.Format_RGB888)jpg_out = QtGui.QPixmap(_image).scaled(rwidth, rheight) #设置图片大小self.label_2.setPixmap(QPixmap(""))self.label_2.setPixmap(jpg_out) #设置图片显示#emotion recogCASC_PATH = './data/haarcascade_files/haarcascade_frontalface_default.xml'cascade_classifier = cv2.CascadeClassifier(CASC_PATH)EMOTIONS = ['Angry', 'Disgusted', 'Fearful', 'Happy', 'Sad', 'Surprised', 'Neutral']def format_image(image):if len(image.shape) > 2 and image.shape[2] == 3:image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)faces = cascade_classifier.detectMultiScale(image,scaleFactor=1.3,minNeighbors=5)# None is no face found in imageif not len(faces) > 0:return None, Nonemax_are_face = faces[0]for face in faces:if face[2] * face[3] > max_are_face[2] * max_are_face[3]:max_are_face = face# face to imageface_coor = max_are_faceimage = image[face_coor[1]:(face_coor[1] + face_coor[2]), face_coor[0]:(face_coor[0] + face_coor[3])]# Resize image to network sizetry:image = cv2.resize(image, (48, 48), interpolation=cv2.INTER_CUBIC)except Exception:print("[+} Problem during resize")return None, Nonereturn image, face_coordef face_dect(image):"""Detecting faces in image:param image::return:  the coordinate of max face"""if len(image.shape) > 2 and image.shape[2] == 3:image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)faces = cascade_classifier.detectMultiScale(image,scaleFactor=1.3,minNeighbors=5)if not len(faces) > 0:return Nonemax_face = faces[0]for face in faces:if face[2] * face[3] > max_face[2] * max_face[3]:max_face = faceface_image = image[max_face[1]:(max_face[1] + max_face[2]), max_face[0]:(max_face[0] + max_face[3])]try:image = cv2.resize(face_image, (48, 48), interpolation=cv2.INTER_CUBIC) / 255.except Exception:print("[+} Problem during resize")return Nonereturn face_imagedef resize_image(image, size):try:image = cv2.resize(image, size, interpolation=cv2.INTER_CUBIC) / 255.except Exception:print("+} Problem during resize")return Nonereturn imagedef draw_emotion():passimport osimport sysdef demo(modelPath, showBox=False):import cv2face_x = tf.placeholder(tf.float32, [None, 2304])y_conv = deepnn(face_x)probs = tf.nn.softmax(y_conv)print("test3")saver = tf.train.Saver()ckpt = tf.train.get_checkpoint_state(modelPath)sess = tf.Session()if ckpt and ckpt.model_checkpoint_path:saver.restore(sess, ckpt.model_checkpoint_path)print('Restore model sucsses!!\nNOTE: Press SPACE on keyboard to capture face.')feelings_faces = []for index, emotion in enumerate(EMOTIONS):feelings_faces.append(cv2.imread('./data/emojis/' + emotion + '.png', -1))#video_captor = cv2.VideoCapture(0)import cv2result = sess.run(probs, feed_dict={face_x: tensor})print("result: ", result)# name_list = ','.join(result)# print("name_list: ", name_list)print("test4")if os.path.exists("recv.txt"):os.remove("recv.txt")if os.path.exists("recv2.txt"):os.remove("recv2.txt")for index, emotion in enumerate(EMOTIONS):file = open('recv.txt', 'a')file.write(str(emotion)+": " + str(rate)+", ")file.write(";")file.close()file5 = open("recv.txt", 'rt')contents = file5.read()data = contents.replace(";", ";\n")fin = open('recv2.txt', "wt")fin.write(data)fin.close()with open("recv2.txt", "r") as file2:path = file2.read()print("path ", path)file2.close()self.textEdit_2.setPlainText(path)tf.reset_default_graph()# emoji_face = []# result = None## while True:#     #ret, frame = video_captor.read()#     import cv2#     frame = cv2.imread("save/recognPerson.jpg")#     detected_face, face_coor = format_image(frame)#     if showBox:#         if face_coor is not None:#             [x, y, w, h] = face_coor#             cv2.rectangle(frame, (x, y), (x + w, y + h), (255, 0, 0), 2)##     if cv2.waitKey(1) & 0xFF == ord(' '):##         if detected_face is not None:#             # cv2.imwrite('a.jpg', detected_face)#             tensor = image_to_tensor(detected_face)#             result = sess.run(probs, feed_dict={face_x: tensor})#             print("result: ",result)#     if result is not None:#         for index, emotion in enumerate(EMOTIONS):#             print("emotion: ", emotion)#             print("index: ", index)#             cv2.putText(frame, emotion, (10, index * 20 + 20), cv2.FONT_HERSHEY_PLAIN, 1.2, (0, 255, 0), 1)#             cv2.rectangle(frame, (130, index * 20 + 10),#                           (130 + int(result[0][index] * 100), (index + 1) * 20 + 4),#                           (255, 0, 0), -1)#             emoji_face = feelings_faces[np.argmax(result[0])]#             print("int(result[0][index]: ", int(result[0][index] * 100))##         for c in range(0, 3):#             frame[200:320, 10:130, c] = emoji_face[:, :, c] * (emoji_face[:, :, 3] / 255.0) + frame[200:320,#                                                                                               10:130, c] * (#                                                     1.0 - emoji_face[:, :, 3] / 255.0)##     cv2.imshow('face', frame)#     if cv2.waitKey(1) & 0xFF == ord('q'):#         breakif __name__ == '__main__':modelPath = "ckpt"demo(modelPath)if __name__=="__main__":app=QtWidgets.QApplication(sys.argv)Widget=myWin()Widget.showMaximized();Widget.show()sys.exit(app.exec_())

运行结下:

Python人脸表情识别QT窗体相关推荐

  1. 人脸表情识别和情绪分类 | Python+TensorFlow(框架)+Keras+PyQt5

    人脸表情识别 | Python+Keras+PyQt5 参考学习文章: Keras|基于深度学习的人脸表情识别系统 PyQt5+QtDesigner编写摄像头界面程序(一)--pyqt5.qtdesi ...

  2. Python基于OpenCV的人脸表情识别系统[源码&部署教程]

    1.项目背景 人脸表情识别是模式识别中一个非常重要却十分复杂的课题.首先对计算机人脸表情识别技术的研究背景及发展历程作了简单回顾.然后对近期人脸表情识别的方法进行了分类综述.通过对各种识别方法的分析与 ...

  3. python与tensorflow实现人脸表情识别(基于CNN)

    使用fer2013数据集,卷积神经网络实现人脸表情识别 python与CNN实现,有GUI界面,支持摄像头实时识别和手动选取图片识别,GUI界面选取图片进行识别实现效果如下图 摄像头实时读取并识别表情 ...

  4. 人脸表情识别系统介绍——上篇(python实现,含UI界面及完整代码)

    人脸表情识别介绍与演示视频 博客及代码详细介绍:https://www.bilibili.com/video/BV18C4y1H7mH/(欢迎关注博主B站视频) 摘要:这篇博文介绍基于深度卷积神经网络 ...

  5. 100行代码搞定实时视频人脸表情识别(附代码)

    点击上方"小白学视觉",选择加"星标"或"置顶" 重磅干货,第一时间送达本文转自|OpenCV学堂 好就没有写点OpenCV4 + Open ...

  6. 深度学习项目-人脸表情识别

    人脸表情识别 简介 使用卷积神经网络构建整个系统,在尝试了Gabor.LBP等传统人脸特征提取方式基础上,深度模型效果显著.在FER2013.JAFFE和CK+三个表情识别数据集上进行模型评估. 环境 ...

  7. 毕业设计-人脸表情识别系统、人工智能

    人脸表情识别系统 1. 前言 在这个人工智能成为超级大热门的时代,人脸表情识别已成为其中的一项研究热点,而卷积神经网络.深度信念网络和多层感知器等相关算法在人脸面部表情识别领域的运用最为广泛.面部的表 ...

  8. 智慧教室—基于人脸表情识别的考试防作弊系统

    需要源码的朋友请私信我!!!! 智慧教室-基于人脸表情识别的考试防作弊系统 课堂专注度分析 作弊检测 关键点计算方法 逻辑回归关键点 下载权重 使用 部分源码 课堂专注度及考试作弊系统.课堂动态点名, ...

  9. 人脸表情识别 (1) 下载fer2013数据集和数据的处理

    最近做了有关人脸表情识别的实验,还是挺有意思的,所以想在博客上记录这个过程,既是给自己做个记录,也想把这个过程分享给大家,如果能对你有一些帮助我会感到挺开心的,模型,数据集之类的我都会陆续上传,有需要 ...

最新文章

  1. rⅰd的意思_计量经济学练习题
  2. matlab循环前后变量定义,Matlab for 多个变量循环能不能这样啊 ,求教高手!!!!...
  3. linux weblogic启动目录,Linux下WebLogic开机启动设置
  4. oem是代工还是贴牌_代加工和贴牌加工的区别是什么
  5. Linux通过GPIO状态实现按键上报(按键功能)
  6. php 上传 blob,Laravel框架+Blob实现的多图上传功能示例
  7. ubunto应用软件
  8. 云免等候服务器响应,云服务器响应
  9. Linux导出/导入逻辑卷组信息
  10. Spring+Mybatis+SpringMVC后台与前台分页展示实例(附工程)(转)
  11. python 将0矩阵转换为none列表_在学习线性代数时所探索的Python运用
  12. java对象占用内存的说法_JAVA 中关于对象成员占用内存的说法哪个正确 (3.0分)_学小易找答案...
  13. Play framework request code 413
  14. python查看数据_使用Python获取GA数据
  15. 扬州工业机器人外壳设计排名_世界十大工业机器人制造商公布,排名第一的竟是……...
  16. 关于Windows勒索病毒以及445端口防护
  17. 英语语法学习特点总结
  18. c++求余弦的泰勒展开式
  19. r library car_医学统计与R语言:协方差分析(ANCOVA)+plus
  20. 《真倚天屠龙记》详解攻略一

热门文章

  1. 【Idea技巧】02.Idea包进行展开
  2. 了解《2022年全球及中国汽车碳纤维车轮行业头部企业市场占有率及排名调研报告》
  3. 五分钟看懂第一波EOS超级节点投票大战
  4. openGauss数据库共享存储特性简介
  5. 大学生网购调查问卷统计分析报告
  6. python青蛙爬井
  7. 用OpenCV查看视频信息(视频的宽度、高度、帧率和总帧数)
  8. a7芯片能跑linux吗,【分析】A7芯片:真的没区别?
  9. 网络斗地主游戏的完整设计与实现(三)入口存储过程详解,理解动态调用存储过程的原理
  10. 新计算机显卡声音大,刚买回来的电脑主机噪音大怎么办