feat: initial implementation of People Counter web app
- Add Flask application with MJPEG video streaming - Implement OpenCV DNN face detection module - Add zone-based entry/exit tracking with cooldown mechanism - Create web interface with real-time WebSocket updates - Add model download script and comprehensive README - Include OpenCV DNN model files for face detection
This commit is contained in:
118
face_detector.py
Normal file
118
face_detector.py
Normal file
@@ -0,0 +1,118 @@
|
||||
"""
|
||||
Face Detection Module using OpenCV DNN Face Detector
|
||||
Uses pre-trained models for accurate face detection.
|
||||
"""
|
||||
|
||||
import cv2
|
||||
import numpy as np
|
||||
import os
|
||||
|
||||
|
||||
class FaceDetector:
|
||||
def __init__(self, model_dir="models", confidence_threshold=0.5):
|
||||
"""
|
||||
Initialize the face detector with OpenCV DNN models.
|
||||
|
||||
Args:
|
||||
model_dir: Directory containing the model files
|
||||
confidence_threshold: Minimum confidence for face detection (0.0-1.0)
|
||||
"""
|
||||
self.confidence_threshold = confidence_threshold
|
||||
self.model_dir = model_dir
|
||||
|
||||
# Paths to model files
|
||||
self.prototxt_path = os.path.join(model_dir, "deploy.prototxt")
|
||||
self.model_path = os.path.join(model_dir, "res10_300x300_ssd_iter_140000.caffemodel")
|
||||
|
||||
# Load the DNN face detector
|
||||
self.net = None
|
||||
self._load_model()
|
||||
|
||||
def _load_model(self):
|
||||
"""Load the OpenCV DNN face detection model."""
|
||||
if not os.path.exists(self.prototxt_path):
|
||||
raise FileNotFoundError(
|
||||
f"Model prototxt file not found: {self.prototxt_path}\n"
|
||||
"Please download the model files first."
|
||||
)
|
||||
if not os.path.exists(self.model_path):
|
||||
raise FileNotFoundError(
|
||||
f"Model weights file not found: {self.model_path}\n"
|
||||
"Please download the model files first."
|
||||
)
|
||||
|
||||
self.net = cv2.dnn.readNetFromCaffe(self.prototxt_path, self.model_path)
|
||||
|
||||
def detect_faces(self, frame):
|
||||
"""
|
||||
Detect faces in a frame.
|
||||
|
||||
Args:
|
||||
frame: BGR image frame from OpenCV
|
||||
|
||||
Returns:
|
||||
List of tuples (x, y, w, h, confidence) for each detected face
|
||||
where (x, y) is top-left corner, w and h are width and height
|
||||
"""
|
||||
if self.net is None:
|
||||
return []
|
||||
|
||||
# Get frame dimensions
|
||||
(h, w) = frame.shape[:2]
|
||||
|
||||
# Create blob from frame (preprocessing for DNN)
|
||||
blob = cv2.dnn.blobFromImage(
|
||||
cv2.resize(frame, (300, 300)),
|
||||
1.0,
|
||||
(300, 300),
|
||||
(104.0, 177.0, 123.0)
|
||||
)
|
||||
|
||||
# Pass blob through network
|
||||
self.net.setInput(blob)
|
||||
detections = self.net.forward()
|
||||
|
||||
faces = []
|
||||
|
||||
# Process detections
|
||||
for i in range(0, detections.shape[2]):
|
||||
confidence = detections[0, 0, i, 2]
|
||||
|
||||
# Filter weak detections
|
||||
if confidence > self.confidence_threshold:
|
||||
# Get bounding box coordinates
|
||||
box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
|
||||
(x1, y1, x2, y2) = box.astype("int")
|
||||
|
||||
# Ensure coordinates are within frame bounds
|
||||
x1 = max(0, x1)
|
||||
y1 = max(0, y1)
|
||||
x2 = min(w, x2)
|
||||
y2 = min(h, y2)
|
||||
|
||||
# Convert to (x, y, w, h) format
|
||||
faces.append((x1, y1, x2 - x1, y2 - y1, confidence))
|
||||
|
||||
return faces
|
||||
|
||||
def draw_faces(self, frame, faces, color=(0, 255, 0), thickness=2):
|
||||
"""
|
||||
Draw bounding boxes around detected faces.
|
||||
|
||||
Args:
|
||||
frame: Frame to draw on
|
||||
faces: List of face detections from detect_faces()
|
||||
color: BGR color tuple for bounding boxes
|
||||
thickness: Line thickness
|
||||
|
||||
Returns:
|
||||
Frame with bounding boxes drawn
|
||||
"""
|
||||
result_frame = frame.copy()
|
||||
for (x, y, w, h, confidence) in faces:
|
||||
cv2.rectangle(result_frame, (x, y), (x + w, y + h), color, thickness)
|
||||
# Optionally draw confidence score
|
||||
label = f"{confidence:.2f}"
|
||||
cv2.putText(result_frame, label, (x, y - 10),
|
||||
cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2)
|
||||
return result_frame
|
||||
Reference in New Issue
Block a user