1.增加人脸识别模块

2.人脸识别整定优化,分离串口配置
3.改为使用原有uart_group_config.py文件实现配置连接,保持原有断线重连
This commit is contained in:
冯佳
2025-09-04 16:22:33 +08:00
parent 25b3cb7f2e
commit 79f733126b
101 changed files with 19115 additions and 326 deletions

View File

@ -1,281 +1,31 @@
# import sys
# import cv2
# import os
# import time
# from PyQt5.QtCore import QThread, pyqtSignal, QObject
# import face_recognition
# current_dir = os.path.dirname(__file__)
# share_code_dir = os.path.abspath(os.path.join(current_dir, '..', '..'))
# sys.path.append(share_code_dir)
# from print_color import *
# DEFAULT_VIDEO_SLEEP_MS = 20
# # 图像处理线程
# class ImageProcessingThread(QThread):
# processed_image_signal = pyqtSignal(object)
# def __init__(self):
# super().__init__()
# def run(self):
# while True:
# # 在这里添加图像处理代码
# # 这里暂时只是将图像传递给下一个线程
# time.sleep(0.01)
# self.processed_image_signal.emit(None)
# # 摄像头采集线程
# class CameraThread(QThread):
# image_signal = pyqtSignal(object)
# def __init__(self, camera_url, circuit_id: int = 0):
# super().__init__()
# self.camera_url = camera_url # 摄像头url地址, 整数 或者 字符串
# self.face_detection: bool = False
# if isinstance(camera_url, int):
# self.camera_url_str = str(camera_url)
# else:
# self.camera_url_str = camera_url
# self.cap: cv2.VideoCapture = None
# self.running: bool = True
# self.fps = 0
# self.cycle_limit = DEFAULT_VIDEO_SLEEP_MS
# self.cycle_ms = 0
# self.circuit_id = circuit_id # 摄像头对应的回路, 组合开关每一个回路都有对应的摄像头
# self.is_signal_connect = False
# self.is_emit = False
# self.set_video_cycle_ms(2000)
# def signal_connect(self, slot_func):
# self.image_signal.connect(slot_func)
# self.is_signal_connect = True
# def signal_disconnect(self):
# # 判断信号是否已连接
# if self.is_signal_connect:
# self.image_signal.disconnect()
# self.is_signal_connect = False
# def set_video_cycle_ms(self, cycle_ms: int = 10):
# if self.cycle_limit != cycle_ms:
# if cycle_ms <= DEFAULT_VIDEO_SLEEP_MS:
# self.cycle_limit = DEFAULT_VIDEO_SLEEP_MS
# else:
# self.cycle_limit = cycle_ms
# # 新建函数,改变函数延时方法
# def change_camera_url(self, camera_url: str):
# if self.circuit_id == camera_url:
# self.set_video_cycle_ms(DEFAULT_VIDEO_SLEEP_MS)
# else:
# self.set_video_cycle_ms(1000)
# def close(self):
# if self.cap == None: # 初始化一直未完成
# self.terminate()
# self.running = False
# def run(self):
# process_count = 0
# fps_time = 0
# while self.running == True:
# inform_msg = "cameral init start, url = " + self.camera_url_str
# print_inform_msg(inform_msg)
# try:
# if isinstance(self.camera_url, int):
# # 在 Windows 平台下,使用默认的 cv2.VideoCapture 接口
# self.cap = cv2.VideoCapture(self.camera_url, cv2.CAP_DSHOW)
# else:
# # 在 Linux 平台下,使用默认的 cv2.VideoCapture 接口
# self.cap = cv2.VideoCapture(self.camera_url)
# if self.cap != None:
# inform_msg = "cameral Init Success, url = " + self.camera_url_str
# self.is_emit = True
# print_inform_msg(inform_msg)
# else:
# inform_msg = "cameral connection timeout, url = " + self.camera_url_str
# print_inform_msg(inform_msg)
# except Exception as e:
# self.cap = None
# inform_msg = "cameral camera Init Fail, url = " + self.camera_url_str
# print_error_msg(inform_msg)
# base_time = time.time()
# while self.running and self.cap != None:
# # 延时20ms
# time.sleep(DEFAULT_VIDEO_SLEEP_MS / 1000)
# if self.cycle_ms + DEFAULT_VIDEO_SLEEP_MS < self.cycle_limit:
# self.cycle_ms += DEFAULT_VIDEO_SLEEP_MS
# self.cap.grab() # 抛弃多余的帧,保持最新帧
# continue
# else:
# self.cycle_ms = 0
# try:
# ret, frame = self.cap.read()
# cur_time = time.time()
# execution_time = cur_time - base_time
# base_time = cur_time
# process_count += 1
# fps_time += execution_time
# if fps_time >= 1:
# self.fps = process_count
# process_count = 0
# fps_time = 0
# if execution_time < 5:
# image_object: QObject = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
# self.image_signal.emit(image_object)
# else: # 时间差大于5秒, 表示网络可能中断过, 退出并重新连接
# err_message = "cameral read timeout, url = " + self.camera_url_str
# print_error_msg(err_message)
# break
# except Exception as e:
# err_message = "cameral read timeout, url = " + self.camera_url_str
# self.is_emit = False
# print_error_msg(err_message)
# if self.running == True:
# time.sleep(2)
# break
# print_inform_msg("cameral connection End")
# time.sleep(0.01)
# if self.cap != None:
# self.cap.release()
# self.cap = None
# - * - coding:utf - 8 - * -
import sys
import cv2
import os
import time
import subprocess
import face_recognition
import numpy as np
from PIL import Image, ImageDraw, ImageFont
from PyQt5.QtCore import QThread, pyqtSignal, QObject
from print_color import *
import face_recognition
current_dir = os.path.dirname(__file__)
share_code_dir = os.path.abspath(os.path.join(current_dir, '..', '..'))
sys.path.append(share_code_dir)
from print_color import *
DEFAULT_VIDEO_SLEEP_MS = 20
# 定义路径常量
npy_file_path = os.path.abspath(os.path.join(os.path.dirname(__file__), 'face_data/face_data.npy'))
image_folder_path = os.path.abspath(os.path.join(os.path.dirname(__file__), 'face_data'))
font_path = os.path.abspath(os.path.join(os.path.dirname(__file__), 'font/hanzi.ttc'))
def load_face_data():
face_data = {}
try:
face_data = np.load(npy_file_path, allow_pickle=True).item()
print_debug_msg(f"Loaded {len(face_data)} face encodings from {npy_file_path}.")
except Exception as e:
print_error_msg(f"Error loading face encodings: {e}")
current_images = [f for f in os.listdir(image_folder_path) if f.endswith('.jpg')]
current_image_set = set(os.path.splitext(f)[0] for f in current_images)
known_image_set = set(face_data.keys())
added_images = current_image_set - known_image_set
removed_images = known_image_set - current_image_set
for image_name in added_images:
image_path = os.path.join(image_folder_path, f"{image_name}.jpg")
try:
image = face_recognition.load_image_file(image_path)
face_encodings = face_recognition.face_encodings(image)
if face_encodings:
face_data[image_name] = face_encodings[0].tolist()
print_debug_msg(f"Added encoding for {image_name}.")
else:
print_warning_msg(f"[WARNING] No face detected in {image_name}.jpg.")
except Exception as e:
print_error_msg(f"[ERROR] Error processing {image_name}.jpg: {e}")
for removed_image in removed_images:
del face_data[removed_image]
print_debug_msg(f"Removed encoding for {removed_image}.")
np.save(npy_file_path, face_data)
print_debug_msg(f"Updated face data saved with {len(face_data)} entries.")
return face_data
# 图像处理线程
class ImageProcessingThread(QThread):
processed_image_signal = pyqtSignal(object)
def __init__(self, frame=None):
def __init__(self):
super().__init__()
self.frame = frame # 添加 frame 参数
self.face_encodings = []
self.face_names = []
self.face_locations = []
self.face_data = load_face_data()
self.total_image_name = list(self.face_data.keys())
self.total_face_encoding = [np.array(self.face_data[name]) for name in self.total_image_name]
def process_frame(self, frame):
"""实时处理帧,匹配已知人脸"""
self.face_locations = face_recognition.face_locations(frame)
face_encodings = face_recognition.face_encodings(frame, self.face_locations)
names = []
for face_encoding in face_encodings:
name = "Unknown"
face_distances = face_recognition.face_distance(self.total_face_encoding, face_encoding)
if face_distances.size > 0:
min_distance = np.min(face_distances)
best_match_index = np.argmin(face_distances)
if min_distance < self.dynamic_tolerance(): # 使用动态容忍度
name = self.total_image_name[best_match_index]
names.append(name)
self.face_names = names
return frame
def dynamic_tolerance(self):
# 动态调整容忍度,可以基于一些规则来调整,如帧内错误率、识别准确率等
error_rate = self.calculate_error_rate()
if error_rate > 0.1:
return 0.5 # 高容忍度以减少错误
else:
return 0.4 # 适中容忍度
def calculate_error_rate(self):
# 计算识别错误率,作为动态调整容忍度的依据
total_faces = len(self.total_face_encoding)
unmatched_faces = sum(1 for name in self.face_names if name == "Unknown")
return unmatched_faces / total_faces if total_faces > 0 else 0
def set_frame(self, frame):
self.frame = frame
def run(self):
while True:
if self.frame is not None:
self.process_frame(self.frame)
self.processed_image_signal.emit(self.frame)
time.sleep(0.01) # 控制帧率
# 在这里添加图像处理代码
# 这里暂时只是将图像传递给下一个线程
time.sleep(0.01)
self.processed_image_signal.emit(None)
# 摄像头采集线程
class CameraThread(QThread):
@ -283,8 +33,9 @@ class CameraThread(QThread):
def __init__(self, camera_url, circuit_id: int = 0):
super().__init__()
self.camera_url = camera_url
self.face_detection: bool = True
self.camera_url = camera_url # 摄像头url地址, 整数 或者 字符串
self.face_detection: bool = False
if isinstance(camera_url, int):
self.camera_url_str = str(camera_url)
@ -295,12 +46,10 @@ class CameraThread(QThread):
self.running: bool = True
self.fps = 0
self.frame_count = 0
self.fps_time = 0
self.cycle_limit = DEFAULT_VIDEO_SLEEP_MS
self.cycle_ms = 0
self.circuit_id = circuit_id
self.circuit_id = circuit_id # 摄像头对应的回路, 组合开关每一个回路都有对应的摄像头
self.is_signal_connect = False
self.is_emit = False
@ -311,6 +60,7 @@ class CameraThread(QThread):
self.is_signal_connect = True
def signal_disconnect(self):
# 判断信号是否已连接
if self.is_signal_connect:
self.image_signal.disconnect()
self.is_signal_connect = False
@ -322,6 +72,7 @@ class CameraThread(QThread):
else:
self.cycle_limit = cycle_ms
# 新建函数,改变函数延时方法
def change_camera_url(self, camera_url: str):
if self.circuit_id == camera_url:
self.set_video_cycle_ms(DEFAULT_VIDEO_SLEEP_MS)
@ -329,115 +80,364 @@ class CameraThread(QThread):
self.set_video_cycle_ms(1000)
def close(self):
if self.cap is None:
if self.cap == None: # 初始化一直未完成
self.terminate()
self.running = False
def run(self):
process_count = 0
fps_time = 0
face_thread = ImageProcessingThread()
face_thread.start()
self.match_count = 0 # 初始化匹配帧数计数器
while self.running:
inform_msg = "Camera init start, URL = " + self.camera_url_str
while self.running == True:
inform_msg = "cameral init start, url = " + self.camera_url_str
print_inform_msg(inform_msg)
try:
self.cap = cv2.VideoCapture(self.camera_url) # 强制使用 BGR 格式
if self.cap.isOpened():
inform_msg = "Camera Init Success, URL = " + self.camera_url_str
if isinstance(self.camera_url, int):
# 在 Windows 平台下,使用默认的 cv2.VideoCapture 接口
self.cap = cv2.VideoCapture(self.camera_url, cv2.CAP_DSHOW)
else:
# 在 Linux 平台下,使用默认的 cv2.VideoCapture 接口
self.cap = cv2.VideoCapture(self.camera_url)
if self.cap != None:
inform_msg = "cameral Init Success, url = " + self.camera_url_str
self.is_emit = True
print_inform_msg(inform_msg)
else:
inform_msg = "Camera connection timeout, URL = " + self.camera_url_str
print_error_msg(inform_msg)
inform_msg = "cameral connection timeout, url = " + self.camera_url_str
print_inform_msg(inform_msg)
except Exception as e:
self.cap = None
inform_msg = "Camera Init Fail, URL = " + self.camera_url_str
inform_msg = "cameral camera Init Fail, url = " + self.camera_url_str
print_error_msg(inform_msg)
base_time = time.time()
while self.running and self.cap:
while self.running and self.cap != None:
# 延时20ms
time.sleep(DEFAULT_VIDEO_SLEEP_MS / 1000)
if self.cycle_ms + DEFAULT_VIDEO_SLEEP_MS < self.cycle_limit:
self.cycle_ms += DEFAULT_VIDEO_SLEEP_MS
self.cap.grab()
self.cap.grab() # 抛弃多余的帧,保持最新帧
continue
else:
self.cycle_ms = 0
try:
ret, frame = self.cap.read()
if not ret:
break
face_thread.set_frame(frame)
cur_time = time.time()
execution_time = cur_time - base_time
base_time = cur_time
process_count += 1
fps_time += execution_time
if fps_time >= 1: # 每隔1秒重新计算 FPS
if fps_time >= 1:
self.fps = process_count
process_count = 0
fps_time = 0
if execution_time < 5:
frame: QObject = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
for (top, right, bottom, left), name in zip(face_thread.face_locations, face_thread.face_names):
if name != "Unknown": # 只绘制已匹配的结果
self.match_count += 1 # 计数成功匹配的帧
# cv2.rectangle(frame, (left, top), (right, bottom), (0, 255, 0), 2)
# cv2.putText(frame, name, (left + 6, bottom - 6), cv2.FONT_HERSHEY_DUPLEX, 1.0, (255, 255, 255), 1)
# 将OpenCV图像转换为PIL图像格式注意要转换色彩空间OpenCV是BGRPIL是RGB
pil_image = Image.fromarray(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
draw = ImageDraw.Draw(pil_image)
# 这里选择合适的中文字体文件路径,比如系统自带的宋体(不同系统路径可能有差异),并设置字体大小
font = ImageFont.truetype(font_path, 15)
# 在指定位置绘制中文
draw.text((left + 12, bottom + 12), name, font=font, fill=(255, 255, 255))
# 再将PIL图像转换回OpenCV图像格式
frame = cv2.cvtColor(np.array(pil_image), cv2.COLOR_RGB2BGR)
cv2.rectangle(frame, (left, top), (right, bottom), (0, 255, 0), 2)
else:
name = "未在人脸库中,无操作权限"
pil_image = Image.fromarray(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
draw = ImageDraw.Draw(pil_image)
#设置字体大小
font = ImageFont.truetype(font_path, 15)
# 在指定位置绘制中文
draw.text((left + 12, bottom + 12), name, font=font, fill=(255, 255, 255))
# 再将PIL图像转换回OpenCV图像格式
frame = cv2.cvtColor(np.array(pil_image), cv2.COLOR_RGB2BGR)
cv2.rectangle(frame, (left, top), (right, bottom), (255, 0, 0), 2)
self.image_signal.emit(frame)
else:
err_message = "Camera read timeout, URL = " + self.camera_url_str
image_object: QObject = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
self.image_signal.emit(image_object)
else: # 时间差大于5秒, 表示网络可能中断过, 退出并重新连接
err_message = "cameral read timeout, url = " + self.camera_url_str
print_error_msg(err_message)
break
except Exception as e:
err_message = "Camera read exception, URL = " + self.camera_url_str
err_message = "cameral read timeout, url = " + self.camera_url_str
self.is_emit = False
print_error_msg(err_message)
if self.running:
if self.running == True:
time.sleep(2)
break
if self.cap:
self.cap.release()
self.cap = None
print_inform_msg("Camera connection ended")
print_inform_msg("cameral connection End")
time.sleep(0.01)
if self.cap != None:
self.cap.release()
self.cap = None
# - * - coding:utf - 8 - * -
# import sys
# import cv2
# import os
# import time
# import subprocess
# import face_recognition
# import numpy as np
# from PIL import Image, ImageDraw, ImageFont
# from PyQt5.QtCore import QThread, pyqtSignal, QObject
# from print_color import *
# current_dir = os.path.dirname(__file__)
# share_code_dir = os.path.abspath(os.path.join(current_dir, '..', '..'))
# sys.path.append(share_code_dir)
# DEFAULT_VIDEO_SLEEP_MS = 20
# # 定义路径常量
# npy_file_path = os.path.abspath(os.path.join(os.path.dirname(__file__), 'face_data/face_data.npy'))
# image_folder_path = os.path.abspath(os.path.join(os.path.dirname(__file__), 'face_data'))
# font_path = os.path.abspath(os.path.join(os.path.dirname(__file__), 'font/hanzi.ttc'))
# def load_face_data():
# face_data = {}
# try:
# face_data = np.load(npy_file_path, allow_pickle=True).item()
# print_debug_msg(f"Loaded {len(face_data)} face encodings from {npy_file_path}.")
# except Exception as e:
# print_error_msg(f"Error loading face encodings: {e}")
# current_images = [f for f in os.listdir(image_folder_path) if f.endswith('.jpg')]
# current_image_set = set(os.path.splitext(f)[0] for f in current_images)
# known_image_set = set(face_data.keys())
# added_images = current_image_set - known_image_set
# removed_images = known_image_set - current_image_set
# for image_name in added_images:
# image_path = os.path.join(image_folder_path, f"{image_name}.jpg")
# try:
# image = face_recognition.load_image_file(image_path)
# face_encodings = face_recognition.face_encodings(image)
# if face_encodings:
# face_data[image_name] = face_encodings[0].tolist()
# print_debug_msg(f"Added encoding for {image_name}.")
# else:
# print_warning_msg(f"[WARNING] No face detected in {image_name}.jpg.")
# except Exception as e:
# print_error_msg(f"[ERROR] Error processing {image_name}.jpg: {e}")
# for removed_image in removed_images:
# del face_data[removed_image]
# print_debug_msg(f"Removed encoding for {removed_image}.")
# np.save(npy_file_path, face_data)
# print_debug_msg(f"Updated face data saved with {len(face_data)} entries.")
# return face_data
# class ImageProcessingThread(QThread):
# processed_image_signal = pyqtSignal(object)
# def __init__(self, frame=None):
# super().__init__()
# self.frame = frame # 添加 frame 参数
# self.face_encodings = []
# self.face_names = []
# self.face_locations = []
# self.face_data = load_face_data()
# self.total_image_name = list(self.face_data.keys())
# self.total_face_encoding = [np.array(self.face_data[name]) for name in self.total_image_name]
# def process_frame(self, frame):
# """实时处理帧,匹配已知人脸"""
# self.face_locations = face_recognition.face_locations(frame)
# face_encodings = face_recognition.face_encodings(frame, self.face_locations)
# names = []
# for face_encoding in face_encodings:
# name = "Unknown"
# face_distances = face_recognition.face_distance(self.total_face_encoding, face_encoding)
# if face_distances.size > 0:
# min_distance = np.min(face_distances)
# best_match_index = np.argmin(face_distances)
# if min_distance < self.dynamic_tolerance(): # 使用动态容忍度
# name = self.total_image_name[best_match_index]
# names.append(name)
# self.face_names = names
# return frame
# def dynamic_tolerance(self):
# # 动态调整容忍度,可以基于一些规则来调整,如帧内错误率、识别准确率等
# error_rate = self.calculate_error_rate()
# if error_rate > 0.1:
# return 0.5 # 高容忍度以减少错误
# else:
# return 0.4 # 适中容忍度
# def calculate_error_rate(self):
# # 计算识别错误率,作为动态调整容忍度的依据
# total_faces = len(self.total_face_encoding)
# unmatched_faces = sum(1 for name in self.face_names if name == "Unknown")
# return unmatched_faces / total_faces if total_faces > 0 else 0
# def set_frame(self, frame):
# self.frame = frame
# def run(self):
# while True:
# if self.frame is not None:
# self.process_frame(self.frame)
# self.processed_image_signal.emit(self.frame)
# time.sleep(0.01) # 控制帧率
# # 摄像头采集线程
# class CameraThread(QThread):
# image_signal = pyqtSignal(object)
# def __init__(self, camera_url, circuit_id: int = 0):
# super().__init__()
# self.camera_url = camera_url
# self.face_detection: bool = True
# if isinstance(camera_url, int):
# self.camera_url_str = str(camera_url)
# else:
# self.camera_url_str = camera_url
# self.cap: cv2.VideoCapture = None
# self.running: bool = True
# self.fps = 0
# self.frame_count = 0
# self.fps_time = 0
# self.cycle_limit = DEFAULT_VIDEO_SLEEP_MS
# self.cycle_ms = 0
# self.circuit_id = circuit_id
# self.is_signal_connect = False
# self.is_emit = False
# self.set_video_cycle_ms(2000)
# def signal_connect(self, slot_func):
# self.image_signal.connect(slot_func)
# self.is_signal_connect = True
# def signal_disconnect(self):
# if self.is_signal_connect:
# self.image_signal.disconnect()
# self.is_signal_connect = False
# def set_video_cycle_ms(self, cycle_ms: int = 10):
# if self.cycle_limit != cycle_ms:
# if cycle_ms <= DEFAULT_VIDEO_SLEEP_MS:
# self.cycle_limit = DEFAULT_VIDEO_SLEEP_MS
# else:
# self.cycle_limit = cycle_ms
# def change_camera_url(self, camera_url: str):
# if self.circuit_id == camera_url:
# self.set_video_cycle_ms(DEFAULT_VIDEO_SLEEP_MS)
# else:
# self.set_video_cycle_ms(1000)
# def close(self):
# if self.cap is None:
# self.terminate()
# self.running = False
# def run(self):
# process_count = 0
# fps_time = 0
# face_thread = ImageProcessingThread()
# face_thread.start()
# self.match_count = 0 # 初始化匹配帧数计数器
# while self.running:
# inform_msg = "Camera init start, URL = " + self.camera_url_str
# print_inform_msg(inform_msg)
# try:
# self.cap = cv2.VideoCapture(self.camera_url) # 强制使用 BGR 格式
# if self.cap.isOpened():
# inform_msg = "Camera Init Success, URL = " + self.camera_url_str
# self.is_emit = True
# print_inform_msg(inform_msg)
# else:
# inform_msg = "Camera connection timeout, URL = " + self.camera_url_str
# print_error_msg(inform_msg)
# except Exception as e:
# self.cap = None
# inform_msg = "Camera Init Fail, URL = " + self.camera_url_str
# print_error_msg(inform_msg)
# base_time = time.time()
# while self.running and self.cap:
# time.sleep(DEFAULT_VIDEO_SLEEP_MS / 1000)
# if self.cycle_ms + DEFAULT_VIDEO_SLEEP_MS < self.cycle_limit:
# self.cycle_ms += DEFAULT_VIDEO_SLEEP_MS
# self.cap.grab()
# continue
# else:
# self.cycle_ms = 0
# try:
# ret, frame = self.cap.read()
# if not ret:
# break
# face_thread.set_frame(frame)
# cur_time = time.time()
# execution_time = cur_time - base_time
# base_time = cur_time
# process_count += 1
# fps_time += execution_time
# if fps_time >= 1: # 每隔1秒重新计算 FPS
# self.fps = process_count
# process_count = 0
# fps_time = 0
# if execution_time < 5:
# frame: QObject = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
# for (top, right, bottom, left), name in zip(face_thread.face_locations, face_thread.face_names):
# if name != "Unknown": # 只绘制已匹配的结果
# self.match_count += 1 # 计数成功匹配的帧
# # cv2.rectangle(frame, (left, top), (right, bottom), (0, 255, 0), 2)
# # cv2.putText(frame, name, (left + 6, bottom - 6), cv2.FONT_HERSHEY_DUPLEX, 1.0, (255, 255, 255), 1)
# # 将OpenCV图像转换为PIL图像格式注意要转换色彩空间OpenCV是BGRPIL是RGB
# pil_image = Image.fromarray(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
# draw = ImageDraw.Draw(pil_image)
# # 这里选择合适的中文字体文件路径,比如系统自带的宋体(不同系统路径可能有差异),并设置字体大小
# font = ImageFont.truetype(font_path, 15)
# # 在指定位置绘制中文
# draw.text((left + 12, bottom + 12), name, font=font, fill=(255, 255, 255))
# # 再将PIL图像转换回OpenCV图像格式
# frame = cv2.cvtColor(np.array(pil_image), cv2.COLOR_RGB2BGR)
# cv2.rectangle(frame, (left, top), (right, bottom), (0, 255, 0), 2)
# else:
# name = "未在人脸库中,无操作权限"
# pil_image = Image.fromarray(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
# draw = ImageDraw.Draw(pil_image)
# #设置字体大小
# font = ImageFont.truetype(font_path, 15)
# # 在指定位置绘制中文
# draw.text((left + 12, bottom + 12), name, font=font, fill=(255, 255, 255))
# # 再将PIL图像转换回OpenCV图像格式
# frame = cv2.cvtColor(np.array(pil_image), cv2.COLOR_RGB2BGR)
# cv2.rectangle(frame, (left, top), (right, bottom), (255, 0, 0), 2)
# self.image_signal.emit(frame)
# else:
# err_message = "Camera read timeout, URL = " + self.camera_url_str
# print_error_msg(err_message)
# break
# except Exception as e:
# err_message = "Camera read exception, URL = " + self.camera_url_str
# print_error_msg(err_message)
# if self.running:
# time.sleep(2)
# break
# if self.cap:
# self.cap.release()
# self.cap = None
# print_inform_msg("Camera connection ended")
# time.sleep(0.01)

View File

@ -0,0 +1,159 @@
# -*- coding: utf-8 -*-
from __future__ import annotations
import sys, os, io, csv, time, datetime
from typing import Callable, Optional
# PyQt5
from PyQt5 import QtWidgets, uic
from PyQt5.QtCore import Qt, QThread, pyqtSignal, QTimer, QSize
from PyQt5.QtGui import QImage, QPixmap
from PyQt5.QtWidgets import (
QApplication, QMainWindow, QWidget, QVBoxLayout, QHBoxLayout,
QLabel, QPushButton, QComboBox, QTextEdit, QFileDialog, QMessageBox,
QGroupBox, QGridLayout, QDialog, QFormLayout, QSpinBox, QCheckBox,
QLineEdit, QTableWidget, QTableWidgetItem
)
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
import cv2
from PIL import Image
import serial
import serial.tools.list_ports
# ---------- 协议导入----------
from Shared_CODE.FaceRecognitionProtocol import (
build_reset, build_uvc_view, build_face_view, build_verify,
build_enroll_itg_single, build_delete_all, build_get_all_userid,build_delete_user,
MID_REPLY, MID_NOTE, CMD_ENROLL, CMD_ENROLL_ITG,
parse_reply, parse_note
)
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '../..')))
ui_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '../Shared_UI'))
users_ui_file_path = os.path.join(ui_path, "users.ui")
verify_ui_file_path = os.path.join(ui_path, "verify.ui")
enrill_ui_file_path = os.path.join(ui_path, "enroll.ui")
CSV_FILE = os.path.join(ui_path, "users.csv")
# -------------------- CSV 工具 --------------------"
def load_users():
users = []
try:
with open(CSV_FILE, "r", encoding="utf-8") as f:
reader = csv.reader(f)
for row in reader:
if len(row) >= 2:
user = {"user_id": row[0], "user_name": row[1]}
user["created_at"] = row[2] if len(row) >= 3 else ""
users.append(user)
except FileNotFoundError:
pass
return users
def save_users_list(users):
with open(CSV_FILE, "w", newline="", encoding="utf-8") as f:
w = csv.writer(f)
for u in users:
w.writerow([u.get("user_id", ""), u.get("user_name", ""), u.get("created_at", "")])
def save_user(user_id: int, user_name: str) -> bool:
users = load_users()
for u in users:
if str(u["user_id"]) == str(user_id):
return False
created_at = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
users.append({"user_id": str(user_id), "user_name": user_name, "created_at": created_at})
save_users_list(users)
return True
class UserManageDialog(QDialog):
def __init__(self, parent, send_func):
super().__init__(parent)
uic.loadUi(users_ui_file_path, self)
self.send_func = send_func
self.btn_delete.clicked.connect(self.delete_selected)
self.btn_refresh.clicked.connect(self.refresh)
self.btn_get.clicked.connect(self.get_from_device)
self.btn_del_all.clicked.connect(self.delete_all_users)
self.refresh()
def refresh(self):
users = load_users()
self.table.setRowCount(len(users))
for r, u in enumerate(users):
self.table.setItem(r, 0, QTableWidgetItem(str(u.get("user_id", ""))))
self.table.setItem(r, 1, QTableWidgetItem(u.get("user_name", "")))
self.table.setItem(r, 2, QTableWidgetItem(u.get("created_at", "")))
def delete_selected(self):
row = self.table.currentRow()
if row < 0:
QMessageBox.warning(self, "提示", "请选择要删除的用户")
return
uid = self.table.item(row, 0).text()
uname = self.table.item(row, 1).text()
try:
uid_int = int(uid)
self.send_func(build_delete_user(uid_int))
except:
pass
users = [u for u in load_users() if str(u["user_id"]) != uid]
save_users_list(users)
self.refresh()
QMessageBox.information(self, "提示", f"用户 {uname}(ID={uid}) 已删除")
def get_from_device(self):
self.send_func(build_get_all_userid())
def delete_all_users(self):
self.send_func(build_delete_all())
save_users_list([])
self.refresh()
QMessageBox.information(self, "提示", "已请求删除所有用户并清空本地记录")
class VerifyDialog(QDialog):
def __init__(self, parent=None):
super().__init__(parent)
uic.loadUi(verify_ui_file_path, self)
self.cb_rightaway.setCurrentIndex(0)
self.sb_timeout.setValue(10)
self.btn_ok.clicked.connect(self.accept)
def values(self):
pd_val = self.cb_rightaway.currentIndex()
timeout_val = self.sb_timeout.value()
return pd_val, timeout_val
class EnrollItgSingleDialog(QDialog):
def __init__(self, parent=None):
super().__init__(parent)
uic.loadUi(enrill_ui_file_path, self)
self.cb_admin.setCurrentIndex(0)
self.btn_ok.clicked.connect(self.accept)
def values(self):
admin_val = self.cb_admin.currentIndex()
uname = self.le_name.text().strip()[:32]
face_dir = 0
if self.chk_mid.isChecked(): face_dir |= 0x01
if self.chk_right.isChecked(): face_dir |= 0x02
if self.chk_left.isChecked(): face_dir |= 0x04
if self.chk_down.isChecked(): face_dir |= 0x08
if self.chk_up.isChecked(): face_dir |= 0x10
if face_dir == 0: face_dir = 0x01
timeout_val = self.sb_timeout.value()
try:
itg_val = int(self.le_itg.text().strip(), 0)
except ValueError:
itg_val = 0
return admin_val, uname, face_dir, timeout_val, itg_val
if __name__ == '__main__':
app = QApplication(sys.argv)
dialog = UserManageDialog()
dialog.exec()
sys.exit(0)

View File

@ -0,0 +1,382 @@
# -*- coding: utf-8 -*-
from __future__ import annotations
import struct
from dataclasses import dataclass
from typing import Optional, Tuple, List
SYNC = b"\xEF\xAA"
# Msg IDs
MID_REPLY = 0x00
MID_NOTE = 0x01
MID_IMAGE = 0x02
# Host->Module (Command) IDs (subset used by the tool)
CMD_RESET = 0x10
CMD_GET_STATUS = 0x11
CMD_VERIFY = 0x12
CMD_ENROLL = 0x13
CMD_DELETE_USER = 0x20
CMD_DELETE_ALL = 0x21
CMD_GET_USER_INFO = 0x22
CMD_FACE_RESET = 0x23
CMD_GET_ALL_USERID = 0x24
CMD_ENROLL_ITG = 0x26
CMD_GET_VERSION = 0x30
CMD_INIT_ENCRYPTION = 0x50
CMD_SET_RELEASE_KEY = 0x52
CMD_SET_DEBUG_KEY = 0x53
CMD_GET_SN = 0x93
CMD_READ_USB_UVC = 0xB0
CMD_SET_USB_UVC = 0xB1
CMD_FACE_VIEW = 0xB5
CMD_UVC_VIEW = 0xC0
CMD_UPGRADE_FW = 0xF6
CMD_ENROLL_WITH_PHOTO = 0xF7
CMD_LED_CONTROL = 0xF9
CMD_ENROLL_SINGLE = 0x1D
CMD_DEMO_MODE = 0xFE
# NOTE nids
NID_READY = 0x00
NID_FACE_STATE = 0x01
NID_UNKNOWNERROR = 0x02
NID_OTA_DONE = 0x03
NID_EYE_STATE = 0x04
# REPLY result codes (partial)
MR_SUCCESS = 0x00
MR_REJECTED = 0x01
MR_ABORTED = 0x02
MR_FAILED4_CAMERA = 0x04
MR_FAILED4_UNKNOWNREASON = 0x05
MR_FAILED4_INVALIDPARAM = 0x06
MR_FAILED4_NOMEMORY = 0x07
MR_FAILED4_UNKNOWNUSER = 0x08
MR_FAILED4_MAXUSER = 0x09
MR_FAILED4_FACEENROLLED = 0x0A
MR_FAILED4_LIVENESSCHECK = 0x0C
MR_FAILED4_TIMEOUT = 0x0D
MR_FAILED4_AUTHORIZATION = 0x0E
MR_FAILED4_READ_FILE = 0x13
MR_FAILED4_WRITE_FILE = 0x14
MR_FAILED4_NO_ENCRYPT = 0x15
MR_FAILED4_NO_RGBIMAGE = 0x17
MR_FAILED4_JPGPHOTO_LARGE = 0x18
MR_FAILED4_JPGPHOTO_SMALL = 0x19
# ---- 映射表 ----
# 指令名称映射指令ID -> 中文名称)
CMD_NAMES = {
CMD_RESET: "复位",
CMD_GET_STATUS: "获取状态",
CMD_VERIFY: "人脸验证",
CMD_LED_CONTROL: "LED控制",
CMD_ENROLL: "人脸录入(多帧)",
CMD_ENROLL_SINGLE: "人脸录入(单帧)",
CMD_ENROLL_ITG: "人脸录入(集成式)",
CMD_DELETE_USER: "删除单个用户",
CMD_DELETE_ALL: "删除所有用户",
CMD_GET_USER_INFO: "获取用户信息",
CMD_GET_ALL_USERID: "获取所有用户ID",
CMD_GET_VERSION: "获取版本信息",
CMD_INIT_ENCRYPTION: "初始化加密",
CMD_ENROLL_WITH_PHOTO: "照片录入注册",
}
# 结果码名称映射(结果码 -> 中文名称)
RESULT_NAMES = {
0x00: "成功",
0x01: "被拒绝",
0x02: "已中止",
0x04: "失败:相机异常",
0x05: "失败:未知错误",
0x06: "失败:参数无效",
0x07: "失败:内存不足",
0x08: "失败:用户不存在",
0x09: "失败:超过最大用户数",
0x0A: "失败:已录入该用户",
0x0C: "失败:活体检测未通过",
0x0D: "失败:超时",
0x0E: "失败:认证失败",
0x13: "失败:文件读取错误",
0x14: "失败:文件写入错误",
0x15: "失败:未启用加密",
0x17: "失败:无RGB图像",
0x18: "失败:JPG文件过大",
0x19: "失败:JPG文件过小",
}
# 通知状态名称映射(状态码 -> 中文名称)
NOTE_NAMES = {
0x00: "就绪",
0x01: "人脸状态",
0x02: "未知错误",
0x03: "OTA升级完成",
0x04: "眼睛状态",
}
# 帧头同步字
SYNC = b"\xEF\xAA"
def xor_checksum(data: bytes) -> int:
"""
计算异或校验 (XOR),范围为整个帧的 MsgID + Size + Data 部分,
不包括 SYNC(2字节) 和最后的校验字节本身。
"""
chk = 0
for b in data:
chk ^= b
return chk & 0xFF
def pack_frame(msg_id: int, data: bytes = b"") -> bytes:
"""
封装一帧数据
格式: SYNC(2) + MsgID(1) + Size(2) + Data(N) + Chk(1)
- MsgID: 命令字
- Size : Data 长度 (big endian, 2字节)
- Data : 负载
- Chk : 校验 = MsgID..Data 的所有字节异或
"""
size = struct.pack(">H", len(data)) # 大端编码
head = bytes([msg_id]) + size + data # MsgID + Size + Data
chk = xor_checksum(head) # 计算校验
return SYNC + head + bytes([chk]) # 拼装完整帧
def unpack_frame(buf: bytes) -> Tuple[Optional[dict], int]:
"""
尝试从缓冲区中解析出一帧。
返回: (frame_dict, consumed_bytes)
- 如果没有完整帧: (None, 0)
- 如果解析成功: ({"msg_id":..,"size":..,"data":..,"raw":..}, 已消耗字节数)
- 如果校验失败: ({"error":"checksum","raw":..}, 已消耗字节数)
buf: 原始接收缓冲区
"""
if len(buf) < 6: # 最小帧长: SYNC(2) + MsgID(1) + Size(2) + Chk(1)
return None, 0
# 查找 SYNC
idx = buf.find(SYNC)
if idx == -1:
# 没找到帧头,整个缓冲区丢弃
return None, len(buf)
# 余下字节不足以解析长度字段,继续等待
if len(buf) - idx < 6:
return None, 0
msg_id = buf[idx+2]
size = struct.unpack(">H", buf[idx+3:idx+5])[0]
total = 2 + 1 + 2 + size + 1 # 整个帧长度
# 数据不完整,继续等待
if len(buf) - idx < total:
return None, 0
# 截取一帧
frame = buf[idx: idx+total]
content = frame[2:-1] # 不包括 SYNC 和最后的校验字节
chk = frame[-1]
# 校验
if xor_checksum(content) != chk:
return {"error": "checksum", "raw": frame}, total
# 提取有效载荷
data = frame[5:-1]
return {
"msg_id": msg_id,
"size": size,
"data": data,
"raw": frame
}, total
# ------ Builders for key commands ------
def build_reset() -> bytes:
return pack_frame(CMD_RESET)
def build_get_status() -> bytes:
return pack_frame(CMD_GET_STATUS)
def build_led_control(state: int) -> bytes:
"""
构建LED控制指令帧(0xF9指令),符合校验位逻辑
:param state: LED状态值,0=灭,1=亮
"""
# 数据部分1字节状态值根据模组要求定义如0x00=灭0x01=亮)
data = struct.pack("B", state & 0xFF) # 确保是单字节
return pack_frame(CMD_LED_CONTROL, data) # CMD_LED_CONTROL = 0xF9
#pd_rightaway: int验证成功后是否立即断电0=不立即断电1=立即断电仅保留低8位有效通过&0xFF确保
#timeout: int验证超时时间单位范围通常为1-255秒仅保留低8位有效通过&0xFF确保
def build_verify(pd_rightaway: int = 0, timeout: int = 10) -> bytes:
data = struct.pack("BB", pd_rightaway & 0xFF, timeout & 0xFF)
return pack_frame(CMD_VERIFY, data)
def build_enroll(admin: int, user_name: str, face_dir: int, timeout: int = 10) -> bytes:
name_bytes = user_name.encode("utf-8")[:32]
name_bytes = name_bytes + bytes(32 - len(name_bytes))
data = struct.pack("B", admin & 0xFF) + name_bytes + struct.pack("BB", face_dir & 0xFF, timeout & 0xFF)
return pack_frame(CMD_ENROLL, data)
def build_enroll_single(admin: int, user_name: str, timeout: int = 10) -> bytes:
name_bytes = user_name.encode("utf-8")[:32]
name_bytes = name_bytes + bytes(32 - len(name_bytes))
# face_dir not used per manual for ENROLL_SINGLE
data = struct.pack("B", admin & 0xFF) + name_bytes + struct.pack("BB", 0x00, timeout & 0xFF)
return pack_frame(0x1D, data)
def build_enroll_itg_single(admin: int, user_name: str, face_dir: int, timeout: int, itg_param: int) -> bytes:
"""
构造 ENROLL_ITG_SINGLE 命令帧
admin: 0=普通用户, 1=管理员
user_name: 最多 32 字节
face_dir: 人脸方向 bitmask
timeout: 超时时间 (秒)
itg_param: ITG 参数 (4 字节)
"""
# user_name 补齐到 32 字节
name_bytes = user_name.encode("utf-8")[:32]
name_bytes = name_bytes.ljust(32, b"\x00")
payload = struct.pack(
">B32sBBI",
admin & 0xFF,
name_bytes,
face_dir & 0xFF,
timeout & 0xFF,
itg_param & 0xFFFFFFFF,
)
return pack_frame(CMD_ENROLL_ITG, payload)
def build_delete_user(user_id: int) -> bytes:
return pack_frame(CMD_DELETE_USER, struct.pack("BB", (user_id>>8)&0xFF, user_id&0xFF))
def build_delete_all() -> bytes:
return pack_frame(CMD_DELETE_ALL)
def build_get_user_info(user_id: int) -> bytes:
return pack_frame(CMD_GET_USER_INFO, struct.pack("BB", (user_id>>8)&0xFF, user_id&0xFF))
def build_get_all_userid() -> bytes:
return pack_frame(CMD_GET_ALL_USERID)
def build_get_version() -> bytes:
return pack_frame(CMD_GET_VERSION)
def build_uvc_view(state: int) -> bytes:
return pack_frame(CMD_UVC_VIEW, struct.pack("B", state & 0xFF))
def build_face_view(state: int) -> bytes:
return pack_frame(CMD_FACE_VIEW, struct.pack("B", state & 0xFF))
def build_init_encryption(seed4: bytes, mode: int = 0) -> bytes:
if len(seed4) != 4:
raise ValueError("seed must be 4 bytes")
return pack_frame(CMD_INIT_ENCRYPTION, seed4 + bytes([mode & 0xFF]))
def build_enroll_with_photo_begin(photo_size: int) -> bytes:
# According to manual: Seq=0, Photo data=4-byte big-endian size
data = b"\x00\x00" + struct.pack(">I", photo_size)
return pack_frame(CMD_ENROLL_WITH_PHOTO, data)
def build_enroll_with_photo_chunk(seq: int, chunk: bytes) -> bytes:
# Seq starts from 1 and increases; MTU=246
data = struct.pack(">H", seq & 0xFFFF) + chunk
return pack_frame(CMD_ENROLL_WITH_PHOTO, data)
# ---- Parsers (Reply / Note / Image) ----
def parse_reply(data: bytes) -> dict:
if len(data) < 2:
return {"type":"REPLY","error":"short"}
mid = data[0]
result = data[1]
rest = data[2:]
# 初始化 info 字典
info = {
"type":"REPLY",
"mid": mid,
"mid_name": CMD_NAMES.get(mid, f"0x{mid:02X}"),
"result": result,
"result_name": RESULT_NAMES.get(result, f"0x{result:02X}")
}
if mid == CMD_VERIFY and len(rest) >= 36:
uid = (rest[0]<<8)|rest[1]
name = rest[2:34].rstrip(b"\x00").decode("utf-8", errors="ignore")
admin = rest[34]
unlock = rest[35]
info.update({"user_id": uid, "user_name": name, "admin": admin, "unlock_status": unlock})
elif mid in (CMD_ENROLL, 0x1D, CMD_ENROLL_ITG) and len(rest) >= 3:
uid = (rest[0]<<8)|rest[1]
face_dir = rest[2]
info.update({"user_id": uid, "face_direction": face_dir})
elif mid == CMD_GET_STATUS and len(rest) >= 1:
status = rest[0]
info.update({"status": status, "status_name": {
0: "空闲",
1: "录入中",
2: "验证中"
}.get(status, f"0x{status:02X}")})
elif mid == CMD_GET_USER_INFO and len(rest) >= 35:
uid = (rest[0]<<8)|rest[1]
name = rest[2:34].decode("ascii", errors="ignore")
admin = rest[34]
info.update({"user_id": uid, "user_name": name, "admin": admin})
elif mid == CMD_GET_ALL_USERID and len(rest) >= 1:
n = rest[0]
ids = [(rest[i]<<8)|rest[i+1] for i in range(1, 1+2*n, 2) if i+1 < len(rest)]
info.update({"count": n, "user_ids": ids})
elif mid == CMD_GET_VERSION:
info["version_str"] = rest.decode("ascii", errors="ignore")
elif mid == CMD_LED_CONTROL and len(rest) >= 1:
led_state = rest[0]
info.update({"led_state": led_state, "led_state_name": {
0: "",
1: ""
}.get(led_state, f"0x{led_state:02X}")})
elif mid == CMD_ENROLL_WITH_PHOTO:
if len(rest) >= 2:
seq = (rest[0]<<8)|rest[1]
info["seq"] = seq
if len(rest) >= 6:
uid = (rest[2]<<8)|rest[3]
info["user_id"] = uid
return info
def parse_note(data: bytes) -> dict:
if len(data) < 1:
return {"type":"NOTE","error":"short"}
nid = data[0]
rest = data[1:]
info = {"type":"NOTE","nid": nid, "nid_name": NOTE_NAMES.get(nid, f"0x{nid:02X}")}
if nid == NID_FACE_STATE and len(rest) >= 16:
vals = struct.unpack(">hhhhhhhh", rest[:16])
info.update({
"state": vals[0],
"left": vals[1], "top": vals[2], "right": vals[3], "bottom": vals[4],
"yaw": vals[5], "pitch": vals[6], "roll": vals[7]
})
return info
def parse_image(data: bytes) -> dict:
return {"type":"IMAGE","jpeg":data}