基于pytorch的人脸表情识别
项目简介
对于上传的带有人脸的图片以及摄像头拍摄的图片,先使用opencv库中的haarcascade_frontalface_default功能识别人脸的位置,在通过训练好的模型识别人脸的表情特征并标注出来。
环境配置
anconda的安装
环境我用的是anconda安装装环境,具体怎么安装网上有很多教程,肯定比我讲得好,大家可以先去安装。可以参考下这篇文章
https://blog.csdn.net/tqlisno1/article/details/108908775
torch环境配置
CPU版本
如果你的电脑没有显卡就默认安装就行
进入pytorch的官方网站https://pytorch.org/,根据你自己电脑的配置进行选择
然后复制下面的pip install torch torchvision torchaudio,进入Anacaonda Prompt,也就是下面这个
输入命令,如果直接下载的话会非常慢,在后面加上国内的镜像源地址,例如-i https://pypi.tuna.tsinghua.edu.cn/simple,然后回车等待即可。
GPU版本
推荐大家看这个作者写得文章,讲的真的很详细https://blog.csdn.net/qlkaicx/article/details/134577555
在安装过程中也许你会遇到一个问题,因为torch的gpu版本,你不能直接在后面加上-i https://pypi.tuna.tsinghua.edu.cn/simple这个,否则还是会下载cpu版本的(本人试过,可能是因为镜像网站没有这个吧)
不过我们可以直接到网站中去下载,注意看命令中的https://download.pytprch.org/whl/cu118,打开这个网站
找到torch torchvision torchaudio,在里面找到合适的,我下的是下面这三个
然后在d盘创建一个名为whl的文件夹,把这三个文件复制进去,然后再想我下面这样就ok了
到此环境就配置完成了。
项目内容
数据集
我用的是fer2013数据集(经过训练我发现准确率只有60左右),我将这个数据集划分成了训练集,验证集,测试集即train.csv,test.csv,val.csv。我都分享在我的网盘中。
内容展示
代码片段
模型构建
import torch.nn as nn
class EmotionCNN(nn.Module):
def __init__(self):
super(EmotionCNN, self).__init__()
self.conv1 = nn.Conv2d(1, 32, kernel_size=3, padding=1)
self.conv2 = nn.Conv2d(32, 64, kernel_size=3, padding=1)
self.conv3 = nn.Conv2d(64, 128, kernel_size=3, padding=1)
self.pool = nn.MaxPool2d(kernel_size=2, stride=2)
self.fc1 = nn.Linear(128 * 6 * 6, 256)
self.fc2 = nn.Linear(256, 7) # 假设有7种表情
self.dropout = nn.Dropout(p=0.5)
self.relu = nn.ReLU()
def forward(self, x):
x = self.pool(self.relu(self.conv1(x)))
x = self.pool(self.relu(self.conv2(x)))
x = self.pool(self.relu(self.conv3(x)))
x = x.view(-1, 128 * 6 * 6)
x = self.dropout(self.relu(self.fc1(x)))
x = self.fc2(x)
return x
模型训练
import model
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader, Dataset
from torchvision import transforms
from PIL import Image
import numpy as np
import pandas as pd
# 定义自定义数据集
class FER2013Dataset(Dataset):
def __init__(self, csv_file, transform=None):
self.data = pd.read_csv(csv_file)
self.transform = transform
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
pixels = self.data.iloc[idx, 1]
label = int(self.data.iloc[idx, 0])
pixel_array = np.asarray([int(pixel) for pixel in pixels.split()]).reshape(48, 48).astype(np.uint8)
if self.transform:
image = Image.fromarray(pixel_array)
image = self.transform(image)
else:
image = torch.tensor(pixel_array).unsqueeze(0).float()
return image, label
# 定义数据转换
transform = transforms.Compose([
transforms.Grayscale(),
transforms.ToTensor(),
transforms.Normalize(mean=[0.5], std=[0.5])
])
# 加载数据集
train_dataset = FER2013Dataset(csv_file='data/train.csv', transform=transform)
val_dataset = FER2013Dataset(csv_file='data/val.csv', transform=transform)
test_dataset = FER2013Dataset(csv_file='data/test.csv', transform=transform)
train_loader = DataLoader(train_dataset, batch_size=64, shuffle=True)
val_loader = DataLoader(val_dataset, batch_size=64, shuffle=False)
test_loader = DataLoader(test_dataset, batch_size=64, shuffle=False)
def train(model, train_loader, criterion, optimizer, device):
model.train()
running_loss = 0.0
for images, labels in train_loader:
images, labels = images.to(device), labels.to(device)
optimizer.zero_grad()
outputs = model(images)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
running_loss += loss.item()
return running_loss / len(train_loader)
def validate(model, val_loader, criterion, device):
model.eval()
correct = 0
total = 0
with torch.no_grad():
for images, labels in val_loader:
images, labels = images.to(device), labels.to(device)
outputs = model(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
return 100 * correct / total
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model = model.EmotionCNN().to(device)
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=0.001)
num_epochs = 25
for epoch in range(num_epochs):
train_loss = train(model, train_loader, criterion, optimizer, device)
val_accuracy = validate(model, val_loader, criterion, device)
print(f"Epoch [{epoch + 1}/{num_epochs}], Loss: {train_loss:.4f}, Validation Accuracy: {val_accuracy:.2f}%")
print('Finished Training')
test_accuracy = validate(model, test_loader, criterion, device)
print(f'Test Accuracy: {test_accuracy:.2f}%')
torch.save(model.state_dict(), 'emotion_cnn.pth')
表情识别—图片
import torch
import cv2
import torchvision.transforms as transforms
from PIL import Image
import torch.nn as nn
import model
# 表情标签
emotion_labels = ['Angry', 'Disgust', 'Fear', 'Happy', 'Sad', 'Surprise', 'Neutral']
class EmotionPredictor:
def __init__(self, model_path='emotion_cnn.pth'):
# 加载模型
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
self.model = model.EmotionCNN().to(self.device) # 加载模型并移到指定设备
self.model.load_state_dict(torch.load(model_path)) # 加载模型参数
self.model.eval() # 切换到评估模式
# 定义数据转换
self.transform = transforms.Compose([
transforms.Grayscale(), # 转为灰度图
transforms.ToTensor(), # 转为 Tensor
transforms.Normalize(mean=[0.5], std=[0.5]) # 标准化
])
# 加载人脸检测模型
self.face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')
# 预测函数
# 预测函数
def predict_emotion(self, image):
image = image.resize((48, 48)) # 调整图像大小
image_tensor = self.transform(image).unsqueeze(0).to(self.device) # 应用数据转换并移动到设备
with torch.no_grad():
output = self.model(image_tensor) # 模型推理
_, predicted = torch.max(output.data, 1) # 获取预测结果
return emotion_labels[predicted.item()] # 返回预测的表情标签
# 标注图像函数
def annotate_image(self, input_image_path, output_image_path):
image = cv2.imread(input_image_path) # 以彩色模式读取图像
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) # 转换为灰度图像用于人脸检测
faces = self.face_cascade.detectMultiScale(gray, 1.1, 4) # 检测人脸
for (x, y, w, h) in faces:
face_image = image[y:y + h, x:x + w] # 提取人脸区域
face_pil = Image.fromarray(cv2.cvtColor(face_image, cv2.COLOR_BGR2RGB)) # 转换为 PIL 图像
predicted_emotion = self.predict_emotion(face_pil) # 预测人脸表情
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(image, predicted_emotion, (x, y - 10), font, 1, (255, 0, 0), 2) # 在人脸上方标注表情
cv2.imwrite(output_image_path, image) # 保存标注后的图像
# 示例使用
if __name__ == '__main__':
emotion_predictor = EmotionPredictor() # 创建情绪预测器对象
input_image_path = '722.jpg' # 输入图像路径
output_image_path = 'output_image.jpg' # 输出图像路径
emotion_predictor.annotate_image(input_image_path, output_image_path) # 标注图像并保存结果
表情识别—摄像机
import torch
from torchvision import transforms
from PIL import Image
import cv2
import model
# 定义表情标签
emotion_labels = ['Angry', 'Disgust', 'Fear', 'Happy', 'Sad', 'Surprise', 'Neutral']
class EmotionRecognizer:
def __init__(self, model_path, device=None):
self.device = device or torch.device('cuda' if torch.cuda.is_available() else 'cpu')
self.model = model.EmotionCNN().to(self.device)
self.model.load_state_dict(torch.load(model_path, map_location=self.device))
self.model.eval()
self.transform = transforms.Compose([
transforms.Grayscale(),
transforms.ToTensor(),
transforms.Normalize(mean=[0.5], std=[0.5])
])
def predict_emotion(self, image):
image = image.resize((48, 48))
image_tensor = self.transform(image).unsqueeze(0).to(self.device)
with torch.no_grad():
output = self.model(image_tensor)
_, predicted = torch.max(output.data, 1)
return emotion_labels[predicted.item()]
# 实时摄像头表情识别
def recognize_emotion_from_camera(recognizer):
cap = cv2.VideoCapture(0)
font = cv2.FONT_HERSHEY_SIMPLEX
while True:
ret, frame = cap.read()
if not ret:
break
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')
faces = face_cascade.detectMultiScale(gray, scaleFactor=1.1, minNeighbors=5, minSize=(30, 30))
for (x, y, w, h) in faces:
face = Image.fromarray(gray[y:y + h, x:x + w])
emotion = recognizer.predict_emotion(face)
cv2.rectangle(frame, (x, y), (x + w, y + h), (255, 0, 0), 2)
cv2.putText(frame, emotion, (x, y - 10), font, 0.9, (255, 0, 0), 2, cv2.LINE_AA)
cv2.imshow('Emotion Recognition', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
# 示例使用
model_path = 'emotion_cnn.pth'
recognizer = EmotionRecognizer(model_path)
recognize_emotion_from_camera(recognizer)
下载链接
链接:https://pan.baidu.com/s/1EmlBDiWmL4WRU-_pjlwltA?pwd=91pq
提取码:91pq
作者:wuhu321