Python实现人脸识别技术:检测、特征提取、面部器官绘制与活体检测指南

dlib 安装方法 之前博文 https://blog.csdn.net/weixin_44634704/article/details/141332644

环境:

python==3.8
opencv-python==4.11.0.86
face_recognition==1.3.0
dlib==19.24.6

人脸检测

import cv2
import face_recognition

# 读取人脸图片
img = cv2.imread(r"C:\Users\123\Desktop\1.jpg")
face_List = face_recognition.face_locations(img) # 检测人脸,返回人脸坐标信息
print(face_List)

for x in face_List: # 画框
    cv2.rectangle(img, (x[3], x[0]), (x[1], x[2]), (0, 255, 0), 2)
cv2.imshow("a", img)
cv2.waitKey(0)
cv2.destroyAllWindows()
# 输出: [(116, 306, 223, 199)]

人脸分割(切割)

import cv2
import face_recognition

# 读取人脸图片
img = cv2.imread(r"C:\Users\123\Desktop\1.jpg")
face_List = face_recognition.face_locations(img) # 检测人脸,返回人脸坐标信息
print(face_List)

for x in face_List: # 画框
    cv2.rectangle(img, (x[3], x[0]), (x[1], x[2]), (0, 255, 0), 2)
    qie_img = img[x[0]:x[2], x[3]:x[1]]
    
cv2.imshow("a", qie_img)
cv2.waitKey(0)
cv2.destroyAllWindows()

提取人脸特征向量

img = cv2.imread(r"C:\Users\123\Desktop\1.jpg")
# 提取人脸特征向量
face01 = face_recognition.face_encodings(img)[0]
print(face01)

人脸比对(欧式距离)

import cv2
import face_recognition
import numpy as np

# 读取人脸图片
img = cv2.imread(r"C:\Users\123\Desktop\1.jpg")
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # face_recognition库,处理是RGB格式, CV默认为BGR
# 提取人脸特征向量
face01 = face_recognition.face_encodings(img)[0]
# 读取人脸原图的图片
img2 = cv2.imread(r"C:\Users\123\Desktop\1.jpg")
face02 = face_recognition.face_encodings(img2)[0]
#
# 计算欧几里得距离
v = np.linalg.norm(face01 - face02)
if v < 0.8:
    print("是一个人")
else:
    print("不是一个人")

转为置信度

import cv2
import face_recognition
import numpy as np

def euclidean_distance_to_confidence(distance, max_distance):
    # 确保距离在合理范围内
    distance = min(distance, max_distance)
    # 计算置信度
    confidence = 1 - (distance / max_distance)
    return confidence

# 读取人脸图片
img = cv2.imread(r"C:\Users\123\Desktop\1.jpg")
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # face_recognition库,处理是RGB格式, CV默认为BGR
# 提取人脸特征向量
face01 = face_recognition.face_encodings(img)[0]
# 读取人脸原图的图片
img2 = cv2.imread(r"C:\Users\123\Desktop\1.jpg")
face02 = face_recognition.face_encodings(img2)[0]
#
# 计算欧几里得距离
v = np.linalg.norm(face01 - face02)
w = euclidean_distance_to_confidence(v, 1) # 置信度最大阈值为1
print(w) # 计算置信度,距离越小,置信度越高。

人脸比对(余弦)

import cv2
import face_recognition
import numpy as np


def cosine_similarity_to_confidence(similarity):
    # 将余弦相似度从 [-1, 1] 映射到 [0, 1]
    confidence = (similarity + 1) / 2
    return confidence


# 读取人脸图片
img = cv2.imread(r"C:\Users\123\Desktop\1.jpg")
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # face_recognition库,处理是RGB格式, CV默认为BGR
# 提取人脸特征向量
face01 = face_recognition.face_encodings(img)[0]
# 读取人脸原图的图片
img2 = cv2.imread(r"C:\Users\123\Desktop\1.jpg")
face02 = face_recognition.face_encodings(img2)[0]
#
dot_product = np.dot(face01, face02)
norm_face01 = np.linalg.norm(face01)
norm_face02 = np.linalg.norm(face02)
similarity = dot_product / (norm_face01 * norm_face02)
print("余弦相似度:", similarity)
confidence = cosine_similarity_to_confidence(similarity)
print("置信度:", confidence)

绘制眼睛,嘴巴,鼻子,轮廓线条

import cv2
import face_recognition
import numpy as np




# 读取人脸图片
img = cv2.imread(r"C:\Users\123\Desktop\1.jpg")
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # face_recognition库,处理是RGB格式, CV默认为BGR

#img = face_recognition.load_image_file("face.jpg")
face_landmarks = face_recognition.face_landmarks(img)

# 把特征点都画出来
for landmarks in face_landmarks:
    print(landmarks)
    # 画眼睛
    for eye in ('left_eye', 'right_eye'):
        pts = np.array(landmarks[eye], np.int32)
        cv2.polylines(img, (pts,), True, (0, 255, 0), 2)
    # 画嘴巴
    mouth_pts = np.array(landmarks['top_lip'], np.int32)
    cv2.polylines(img, (mouth_pts,), True, (0, 0, 255), 2)
    # 画鼻子
    nose_pts = np.array(landmarks['nose_bridge'], np.int32)
    cv2.polylines(img, (nose_pts,), True, (255, 0, 0), 2)

    # 绘制脸部轮廓
    chin_pts = np.array(landmarks['chin'], np.int32)
    cv2.polylines(img, [chin_pts], False, (255, 255, 0), 2)

cv2.imwrite("landmarks.jpg", img[:, :, ::-1])

face_recognition 库检测出的人脸特征点中,默认是不包含耳朵特征点的。这是因为在很多常见的人脸识别应用场景里,耳朵的特征稳定性和独特性相对较弱,所以库没有专门对耳朵的特征点进行检测。
不过,要是你想要标记耳朵位置,可以考虑使用更高级的人脸关键点检测模型,像 dlib 库提供的 68 点或者 81 点人脸关键点检测器,其中 81 点模型包含了耳朵相关的关键点。

import cv2
import dlib
import numpy as np

# 加载 dlib 的人脸检测器和 81 点关键点预测器
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor("shape_predictor_81_face_landmarks.dat")

# 读取图片
image = cv2.imread(r"C:\Users\123\Desktop\1.jpg")
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

# 检测人脸
faces = detector(gray)

for face in faces:
    # 检测关键点
    landmarks = predictor(gray, face)

    # 绘制眼睛
    left_eye_points = [(landmarks.part(n).x, landmarks.part(n).y) for n in range(36, 42)]
    right_eye_points = [(landmarks.part(n).x, landmarks.part(n).y) for n in range(42, 48)]
    for eye_points in [left_eye_points, right_eye_points]:
        pts = np.array(eye_points, np.int32)
        cv2.polylines(image, [pts], True, (0, 255, 0), 2)

    # 绘制嘴巴
    mouth_points = [(landmarks.part(n).x, landmarks.part(n).y) for n in range(48, 60)]
    pts = np.array(mouth_points, np.int32)
    cv2.polylines(image, [pts], True, (0, 0, 255), 2)

    # 绘制鼻子
    nose_points = [(landmarks.part(n).x, landmarks.part(n).y) for n in range(27, 36)]
    pts = np.array(nose_points, np.int32)
    cv2.polylines(image, [pts], True, (255, 0, 0), 2)

    # 绘制脸部轮廓
    chin_points = [(landmarks.part(n).x, landmarks.part(n).y) for n in range(0, 17)]
    pts = np.array(chin_points, np.int32)
    cv2.polylines(image, [pts], False, (255, 255, 0), 2)

    # 绘制耳朵
    left_ear_points = [(landmarks.part(n).x, landmarks.part(n).y) for n in range(71, 75)]
    right_ear_points = [(landmarks.part(n).x, landmarks.part(n).y) for n in range(75, 79)]
    for ear_points in [left_ear_points, right_ear_points]:
        pts = np.array(ear_points, np.int32)
        cv2.polylines(image, [pts], False, (0, 255, 255), 2)

# 保存图片
cv2.imwrite("landmarks.jpg", image)

实时取摄像头绘制

import numpy as np
import cv2
import face_recognition

# 打开摄像头
video_capture = cv2.VideoCapture(0)

while True:
    # 读取一帧视频
    ret, frame = video_capture.read()

    # 将图像从BGR颜色空间转换为RGB颜色空间,因为face_recognition库使用RGB格式
    rgb_frame = frame[:, :, ::-1]

    # 检测人脸特征点
    face_landmarks_list = face_recognition.face_landmarks(rgb_frame)

    # for face_landmarks in face_landmarks_list:
    #     # 打印所有特征点类型及其对应的关键点坐标
    #     for facial_feature in face_landmarks.keys():
    #         print(f"{facial_feature}: {face_landmarks[facial_feature]}")

    for face_landmarks in face_landmarks_list:
        # 绘制脸部轮廓
        chin_points = face_landmarks['chin']
        pts = [tuple(point) for point in chin_points]
        pts = np.array(pts, np.int32)
        cv2.polylines(frame, [pts], False, (255, 255, 0), 2)

        # 绘制眼睛
        for eye in ('left_eye', 'right_eye'):
            eye_points = face_landmarks[eye]
            pts = [tuple(point) for point in eye_points]
            pts = np.array(pts, np.int32)
            cv2.polylines(frame, [pts], True, (0, 255, 0), 2)

        # 绘制左右眉毛
        for eyebrow in ('left_eyebrow', 'right_eyebrow'):
            eyebrow_points = face_landmarks[eyebrow]
            eyebrow_pts = np.array(eyebrow_points, np.int32)
            cv2.polylines(frame, [eyebrow_pts], False, (255, 0, 255), 2)

        # 上嘴唇
        mouth_pts = np.array(face_landmarks['top_lip'], np.int32)
        cv2.polylines(frame, (mouth_pts,), True, (0, 0, 255), 2)

        # 下嘴唇
        bottom_lip_points = face_landmarks['bottom_lip']
        bottom_lip_pts = np.array(bottom_lip_points, np.int32)
        cv2.polylines(frame, [bottom_lip_pts], True, (0, 0, 255), 2)

        # 绘制鼻子代表鼻梁部分的关键点
        nose_bridge_points = face_landmarks['nose_bridge']
        pts = [tuple(point) for point in nose_bridge_points]
        pts = np.array(pts, np.int32)
        cv2.polylines(frame, [pts], True, (255, 0, 0), 2)

        # 绘制鼻尖
        nose_tip_points = face_landmarks['nose_tip']
        nose_tip_pts = np.array(nose_tip_points, np.int32)
        cv2.polylines(frame, [nose_tip_pts], True, (0, 255, 255), 2)

    # 显示结果图像
    cv2.imshow('Video', frame)

    # 按 'q' 键退出循环
    if cv2.waitKey(1) & 0xFF == ord('q'):
        break

# 释放摄像头并关闭所有窗口
video_capture.release()
cv2.destroyAllWindows()

活体检测

import cv2
import face_recognition
import numpy as np

# 计算眼睛的纵横比
def eye_aspect_ratio(eye):
    A = np.linalg.norm(np.array(eye[1]) - np.array(eye[5]))
    B = np.linalg.norm(np.array(eye[2]) - np.array(eye[4]))
    C = np.linalg.norm(np.array(eye[0]) - np.array(eye[3]))
    ear = (A + B) / (2.0 * C)
    return ear

# 计算嘴巴的纵横比
def mouth_aspect_ratio(mouth):
    # 重新选择特征点
    top_mid = (np.array(mouth[2]) + np.array(mouth[3])) // 2
    bottom_mid = (np.array(mouth[10]) + np.array(mouth[11])) // 2
    left = np.array(mouth[0])
    right = np.array(mouth[6])
    A = np.linalg.norm(top_mid - bottom_mid)
    C = np.linalg.norm(left - right)
    mar = A / C
    return mar

# 初始化变量
EYE_AR_THRESH = 0.2  # 眼睛纵横比阈值
EYE_AR_CONSEC_FRAMES = 3  # 连续闭眼帧数阈值
COUNTER_EYE = 0  # 连续闭眼帧数计数器
TOTAL_EYE = 0  # 眨眼次数计数器

MOUTH_AR_CONSEC_FRAMES = 3  # 连续张嘴帧数阈值
COUNTER_MOUTH = 0  # 连续张嘴帧数计数器
TOTAL_MOUTH = 0  # 张嘴次数计数器

PREVIOUS_LANDMARKS = None  # 上一帧的人脸特征点
HEAD_MOTION_THRESH = 10  # 头部运动阈值
HEAD_MOTION = False  # 头部是否移动标志

# 动态阈值相关变量
INITIAL_FRAMES = 10  # 用于确定初始阈值的帧数
initial_mar_values = []
MOUTH_AR_THRESH = None

# 打开摄像头
video_capture = cv2.VideoCapture(0)

frame_count = 0
while True:
    # 读取一帧视频
    ret, frame = video_capture.read()

    # 将图像从 BGR 颜色空间转换为 RGB 颜色空间
    rgb_frame = frame[:, :, ::-1]

    # 检测人脸特征点
    face_landmarks_list = face_recognition.face_landmarks(rgb_frame)

    for face_landmarks in face_landmarks_list:
        # 提取左右眼睛的特征点
        left_eye = face_landmarks['left_eye']
        right_eye = face_landmarks['right_eye']

        # 计算左右眼睛的纵横比
        left_ear = eye_aspect_ratio(left_eye)
        right_ear = eye_aspect_ratio(right_eye)

        # 计算平均纵横比
        ear = (left_ear + right_ear) / 2.0

        # 检测眨眼
        if ear < EYE_AR_THRESH:
            COUNTER_EYE += 1
        else:
            if COUNTER_EYE >= EYE_AR_CONSEC_FRAMES:
                TOTAL_EYE += 1
            COUNTER_EYE = 0

        # 提取嘴巴的特征点
        mouth = face_landmarks['top_lip'] + face_landmarks['bottom_lip']

        # 计算嘴巴的纵横比
        mar = mouth_aspect_ratio(mouth)
        print(f"嘴巴纵横比: {mar}")  # 打印嘴巴纵横比,用于调试

        # 动态确定嘴巴纵横比阈值
        if frame_count < INITIAL_FRAMES:
            initial_mar_values.append(mar)
            if frame_count == INITIAL_FRAMES - 1:
                MOUTH_AR_THRESH = np.mean(initial_mar_values) * 1.2  # 阈值设为初始平均值的 1.2 倍
        else:
            # 检测张嘴
            if mar > MOUTH_AR_THRESH:
                COUNTER_MOUTH += 1
            else:
                if COUNTER_MOUTH >= MOUTH_AR_CONSEC_FRAMES:
                    TOTAL_MOUTH += 1
                COUNTER_MOUTH = 0

        # 检测头部运动
        if PREVIOUS_LANDMARKS is not None:
            current_landmarks = []
            for feature in face_landmarks.values():
                current_landmarks.extend(feature)
            current_landmarks = np.array(current_landmarks)
            previous_landmarks = []
            for feature in PREVIOUS_LANDMARKS.values():
                previous_landmarks.extend(feature)
            previous_landmarks = np.array(previous_landmarks)
            displacement = np.linalg.norm(current_landmarks - previous_landmarks)
            if displacement > HEAD_MOTION_THRESH:
                HEAD_MOTION = True
            else:
                HEAD_MOTION = False

        PREVIOUS_LANDMARKS = face_landmarks

        # 在图像上显示眨眼次数、张嘴次数和头部是否移动
        cv2.putText(frame, f"Blinks: {TOTAL_EYE}", (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
        cv2.putText(frame, f"Opens: {TOTAL_MOUTH}", (10, 60), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
        cv2.putText(frame, f"Head Motion: {'Yes' if HEAD_MOTION else 'No'}", (10, 90), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)

    # 显示结果图像
    cv2.imshow('Video', frame)
    frame_count += 1

    # 按 'q' 键退出循环
    if cv2.waitKey(1) & 0xFF == ord('q'):
        break

# 释放摄像头并关闭所有窗口
video_capture.release()
cv2.destroyAllWindows()

作者:像风一样的男人@

物联沃分享整理
物联沃-IOTWORD物联网 » Python实现人脸识别技术:检测、特征提取、面部器官绘制与活体检测指南

发表回复