1226 lines
52 KiB
Python
1226 lines
52 KiB
Python
# -*- coding: utf-8 -*-
|
|
import cv2
|
|
from ultralytics import YOLO
|
|
import tkinter as tk
|
|
from tkinter import scrolledtext, messagebox
|
|
from PIL import ImageTk, Image as PILImage
|
|
import os
|
|
from datetime import datetime
|
|
import openpyxl
|
|
from openpyxl.drawing.image import Image as OpenpyxlImage
|
|
import pandas as pd
|
|
import matplotlib.pyplot as plt
|
|
import time
|
|
import math
|
|
import numpy as np
|
|
import json
|
|
import platform
|
|
import sys
|
|
|
|
try:
|
|
from tkmacosx import Button
|
|
except ImportError:
|
|
Button = tk.Button
|
|
# ===========================
|
|
# CONFIGURACIÓN
|
|
# ===========================
|
|
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
|
|
|
|
# Modelos
|
|
MODEL_PERSON_PATH = os.path.join(BASE_DIR, 'models', 'yolov8n.pt') # Revertido a Nano por velocidad
|
|
MODEL_FACE_PATH = os.path.join(BASE_DIR, 'models', 'yolov8n-face.pt')
|
|
|
|
# Umbrales
|
|
PERSON_CONF = 0.45 # Balance: 0.45 detecta oclusiones parciales sin demasiados fantasmas
|
|
FACE_CONF = 0.25 # Bajado a 0.25 para detectar caras "difíciles" (perfil/lejos)
|
|
|
|
# Tracker
|
|
TRACK_IOU_MATCH = 0.45 # MÁS ESTRICTO (era 0.35) para evitar saltar a predicciones lejanas
|
|
TRACK_MAX_MISSES = 10
|
|
TRACK_MIN_HITS = 3 # Bajado a 3 para detectar personas rápidas (era 5)
|
|
|
|
# Excel
|
|
today_date_str = datetime.now().strftime("%Y-%m-%d")
|
|
EXCEL_FILENAME = os.path.join(BASE_DIR, 'data', f"registro_personas_{today_date_str}.xlsx")
|
|
|
|
# Config Persistence
|
|
CONFIG_FILENAME = os.path.join(BASE_DIR, 'config', "fluxus_config.json")
|
|
|
|
# UI Colors
|
|
BG_COLOR = "#282c34"
|
|
TEXT_COLOR = "#abb2bf"
|
|
ACCENT_COLOR = "#61afef"
|
|
|
|
# Cross-platform video backend
|
|
V_BACKEND = cv2.CAP_ANY
|
|
if platform.system() == "Windows":
|
|
V_BACKEND = cv2.CAP_DSHOW
|
|
elif platform.system() == "Darwin": # macOS
|
|
V_BACKEND = cv2.CAP_AVFOUNDATION
|
|
elif platform.system() == "Linux":
|
|
V_BACKEND = cv2.CAP_V4L2
|
|
|
|
# ===========================
|
|
# CLASES DE UTILIDAD
|
|
# ===========================
|
|
|
|
class Track:
|
|
__slots__ = ("id", "box", "hits", "misses", "has_face", "face_buffer", "centroid", "prev_centroid", "last_crossing_time", "kf", "prediction", "line_side")
|
|
def __init__(self, tid, box):
|
|
self.id = tid
|
|
self.box = box
|
|
self.hits = 1
|
|
self.misses = 0
|
|
self.has_face = False
|
|
self.face_buffer = 0
|
|
self.centroid = self._calc_centroid(box)
|
|
self.prev_centroid = None
|
|
self.last_crossing_time = 0
|
|
self.line_side = 0 # -1=below line, 0=unknown, 1=above line
|
|
|
|
# Filtro de Kalman (Estado: [x, y, dx, dy])
|
|
# OPTIMIZADO PARA BAJA VELOCIDAD DE CUADROS (10-20 FPS en Raspberry Pi)
|
|
self.kf = cv2.KalmanFilter(4, 2)
|
|
|
|
# Matriz de medición: Solo observamos posición (x, y)
|
|
self.kf.measurementMatrix = np.array([[1,0,0,0],[0,1,0,0]], np.float32)
|
|
|
|
# Matriz de transición: x' = x + dx, y' = y + dy (modelo de velocidad constante)
|
|
self.kf.transitionMatrix = np.array([[1,0,1,0],[0,1,0,1],[0,0,1,0],[0,0,0,1]], np.float32)
|
|
|
|
# RUIDO DE PROCESO (Q): Qué tan rápido puede cambiar la velocidad
|
|
# A bajo FPS, hay más movimiento entre frames -> más incertidumbre en velocidad
|
|
# Valores altos en dx,dy (índices 2,3) permiten cambios rápidos de dirección
|
|
self.kf.processNoiseCov = np.array([
|
|
[1, 0, 0, 0], # x: poca incertidumbre
|
|
[0, 1, 0, 0], # y: poca incertidumbre
|
|
[0, 0, 25, 0], # dx: ALTA incertidumbre (puede acelerar/frenar bastante)
|
|
[0, 0, 0, 25] # dy: ALTA incertidumbre
|
|
], np.float32) * 0.1 # Factor de escala global
|
|
|
|
# RUIDO DE MEDICIÓN (R): Qué tan ruidosa es la detección de YOLO
|
|
# Valor bajo = confiamos mucho en YOLO (más reactivo, menos suavizado)
|
|
# Valor alto = confiamos menos (más suave, ignora pequeños saltos)
|
|
self.kf.measurementNoiseCov = np.array([[5, 0], [0, 5]], np.float32)
|
|
|
|
# Inicializar covarianza del error (P): Empezamos con incertidumbre media
|
|
self.kf.errorCovPost = np.eye(4, dtype=np.float32) * 10
|
|
|
|
# Inicializar estado
|
|
self.kf.statePre = np.array([[self.centroid[0]], [self.centroid[1]], [0], [0]], np.float32)
|
|
self.kf.statePost = np.array([[self.centroid[0]], [self.centroid[1]], [0], [0]], np.float32)
|
|
self.prediction = self.centroid
|
|
|
|
def _calc_centroid(self, box):
|
|
x1, y1, x2, y2 = box
|
|
return ((x1 + x2) // 2, (y1 + y2) // 2)
|
|
|
|
def predict(self):
|
|
# Predicción del siguiente estado
|
|
p = self.kf.predict()
|
|
self.prediction = (int(p[0].item()), int(p[1].item()))
|
|
return self.prediction
|
|
|
|
def update_box(self, box):
|
|
self.prev_centroid = self.centroid
|
|
self.box = box
|
|
# Medición real (Centro del detector)
|
|
measured_centroid = self._calc_centroid(box)
|
|
|
|
# Corrección Kalman
|
|
e = self.kf.correct(np.array([[np.float32(measured_centroid[0])], [np.float32(measured_centroid[1])]]))
|
|
|
|
# Usamos la POSICIÓN FILTRADA como la verdad (Super estable)
|
|
self.centroid = (int(e[0].item()), int(e[1].item()))
|
|
|
|
class Tracker:
|
|
def __init__(self, iou_match=0.5, max_misses=10, min_hits=3):
|
|
self.iou_match = iou_match
|
|
self.max_misses = max_misses
|
|
self.min_hits = min_hits
|
|
self._tracks = []
|
|
self._next_id = 1
|
|
|
|
def update(self, dets):
|
|
# 1. PREDICT: Mover todos los tracks según su velocidad (Kalman)
|
|
for tr in self._tracks:
|
|
tr.predict()
|
|
|
|
assigned = set()
|
|
for tr in self._tracks:
|
|
best_iou = 0.0
|
|
best_j = -1
|
|
for j, db in enumerate(dets):
|
|
if j in assigned: continue
|
|
v = iou(tr.box, db)
|
|
if v > best_iou:
|
|
best_iou = v
|
|
best_j = j
|
|
if best_j >= 0 and best_iou >= self.iou_match:
|
|
tr.update_box(dets[best_j])
|
|
tr.hits += 1
|
|
tr.misses = 0
|
|
assigned.add(best_j)
|
|
else:
|
|
tr.misses += 1
|
|
|
|
for j, db in enumerate(dets):
|
|
if j not in assigned:
|
|
t = Track(self._next_id, db)
|
|
self._next_id += 1
|
|
self._tracks.append(t)
|
|
|
|
self._tracks = [t for t in self._tracks if t.misses <= self.max_misses]
|
|
|
|
def confirmed_tracks(self):
|
|
# Permitir tracks con hasta 2 frames perdidos (oclusiones breves)
|
|
return [t for t in self._tracks if t.hits >= self.min_hits and t.misses <= 2]
|
|
|
|
class LineCounter:
|
|
def __init__(self):
|
|
self.p1 = None # (x, y)
|
|
self.p2 = None # (x, y)
|
|
self.total_in = 0
|
|
self.total_out = 0
|
|
self.manual_offset = 0
|
|
|
|
def set_line(self, p1, p2):
|
|
self.p1 = p1
|
|
self.p2 = p2
|
|
# No resetear contadores
|
|
|
|
def reset_counts(self):
|
|
self.total_in = 0
|
|
self.total_out = 0
|
|
self.manual_offset = 0
|
|
|
|
def set_manual_count(self, target_count):
|
|
# CORREGIDO: Usar offset en lugar de modificar total_in
|
|
# Esto evita que "bajar el conteo" aumente las entradas
|
|
current_net = self.total_in - self.total_out
|
|
self.manual_offset = target_count - current_net
|
|
|
|
def is_set(self):
|
|
return self.p1 is not None and self.p2 is not None
|
|
|
|
def check_crossing(self, track, calibration=None):
|
|
if not self.is_set() or track.prev_centroid is None:
|
|
return None
|
|
|
|
# Determinar puntos de cruce (Pantalla o Proyectados)
|
|
A, B = self.p1, self.p2
|
|
P_prev = track.prev_centroid
|
|
P_curr = track.centroid
|
|
|
|
# Si hay calibración activa, proyectamos todo al suelo virtual
|
|
if calibration and calibration.active and calibration.matrix is not None:
|
|
tA = calibration.transform_point(A)
|
|
tB = calibration.transform_point(B)
|
|
|
|
h_curr = track.box[3] - track.box[1]
|
|
ground_offset = int(h_curr * 0.35)
|
|
feet_prev = (track.prev_centroid[0], track.prev_centroid[1] + ground_offset)
|
|
feet_curr = (track.centroid[0], track.centroid[1] + ground_offset)
|
|
tP_prev = calibration.transform_point(feet_prev)
|
|
tP_curr = calibration.transform_point(feet_curr)
|
|
|
|
if tA is not None and tB is not None and tP_prev is not None and tP_curr is not None:
|
|
A, B = tA, tB
|
|
P_prev, P_curr = tP_prev, tP_curr
|
|
|
|
# === MÉTODO HÍBRIDO: Intersección de segmentos + Estado de lado ===
|
|
|
|
# 1. Calcular qué lado de la línea está el punto actual
|
|
line_vec = (B[0]-A[0], B[1]-A[1])
|
|
normal = (-line_vec[1], line_vec[0])
|
|
normal_len = math.hypot(normal[0], normal[1])
|
|
|
|
if normal_len < 1:
|
|
return None
|
|
|
|
to_curr = (P_curr[0] - A[0], P_curr[1] - A[1])
|
|
signed_dist = (normal[0]*to_curr[0] + normal[1]*to_curr[1]) / normal_len
|
|
current_side = 1 if signed_dist > 0 else -1
|
|
|
|
# 2. Inicializar lado si el track es nuevo
|
|
if track.line_side == 0:
|
|
track.line_side = current_side
|
|
return None # Primera vez, no contar
|
|
|
|
# 3. Verificar intersección de segmentos (método original - confiable)
|
|
def ccw(A, B, C):
|
|
return (C[1]-A[1]) * (B[0]-A[0]) > (B[1]-A[1]) * (C[0]-A[0])
|
|
|
|
crossed = (ccw(A, P_prev, P_curr) != ccw(B, P_prev, P_curr)) and (ccw(A, B, P_prev) != ccw(A, B, P_curr))
|
|
|
|
if not crossed:
|
|
# No hubo cruce, pero actualizamos el lado para mantener estado
|
|
track.line_side = current_side
|
|
return None
|
|
|
|
# 4. HUBO CRUCE - Aplicar filtros anti-jitter
|
|
|
|
# Cooldown temporal (evita contar la misma persona varias veces)
|
|
if time.time() - track.last_crossing_time < 1.5: # 1.5 segundos cooldown
|
|
return None
|
|
|
|
# Movimiento mínimo (evita jitter de box estático)
|
|
dist_moved = math.hypot(P_curr[0] - P_prev[0], P_curr[1] - P_prev[1])
|
|
if dist_moved < 8: # Mínimo 8 píxeles
|
|
return None
|
|
|
|
# 5. CRUCE CONFIRMADO
|
|
track.last_crossing_time = time.time()
|
|
prev_side = track.line_side
|
|
track.line_side = current_side
|
|
|
|
# Determinar dirección basada en cambio de lado
|
|
if prev_side == -1 and current_side == 1:
|
|
self.total_in += 1
|
|
return "Entrada"
|
|
elif prev_side == 1 and current_side == -1:
|
|
self.total_out += 1
|
|
return "Salida"
|
|
|
|
return None
|
|
|
|
# ===========================
|
|
# HELPERS
|
|
# ===========================
|
|
def iou(a, b):
|
|
x1 = max(a[0], b[0]); y1 = max(a[1], b[1])
|
|
x2 = min(a[2], b[2]); y2 = min(a[3], b[3])
|
|
inter = max(0, x2 - x1) * max(0, y2 - y1)
|
|
if inter == 0: return 0.0
|
|
area_a = max(0, a[2] - a[0]) * max(0, a[3] - a[1])
|
|
area_b = max(0, b[2] - b[0]) * max(0, b[3] - b[1])
|
|
return inter / float(area_a + area_b - inter + 1e-9)
|
|
|
|
def nms(boxes, iou_th=0.6):
|
|
out = []
|
|
boxes = sorted(boxes, key=lambda b: (b[2]-b[0])*(b[3]-b[1]), reverse=True)
|
|
while boxes:
|
|
base = boxes.pop(0)
|
|
out.append(base)
|
|
boxes = [b for b in boxes if iou(base, b) < iou_th]
|
|
return out
|
|
|
|
def ensure_excel():
|
|
if not os.path.exists(EXCEL_FILENAME):
|
|
wb = openpyxl.Workbook()
|
|
sh = wb.active
|
|
sh.title = "Registro"
|
|
sh.append(["Fecha", "Hora", "Evento", "Orientacion", "Total Dentro"])
|
|
wb.save(EXCEL_FILENAME)
|
|
|
|
def log_to_excel(evento, orientacion, total_dentro, widget=None):
|
|
now = datetime.now()
|
|
fecha = now.strftime("%Y-%m-%d")
|
|
hora = now.strftime("%H:%M:%S")
|
|
|
|
try:
|
|
wb = openpyxl.load_workbook(EXCEL_FILENAME)
|
|
sh = wb.active
|
|
sh.append([fecha, hora, evento, orientacion, total_dentro])
|
|
try:
|
|
wb.save(EXCEL_FILENAME)
|
|
except PermissionError:
|
|
# Excel probablemente abierto en Office - guardar backup
|
|
backup_name = f"backup_{fecha}_{hora.replace(':', '-')}_{EXCEL_FILENAME}"
|
|
wb.save(backup_name)
|
|
print(f"Excel abierto, guardado backup: {backup_name}")
|
|
|
|
msg = f"[{hora}] {evento} ({orientacion}) - Total: {total_dentro}"
|
|
if widget and widget.winfo_exists():
|
|
widget.configure(state='normal')
|
|
widget.insert(tk.END, msg + "\n")
|
|
widget.see(tk.END)
|
|
widget.configure(state='disabled')
|
|
print(msg)
|
|
except Exception as e:
|
|
print(f"Error Excel: {e}")
|
|
|
|
def generate_graph():
|
|
print("Iniciando generación de gráfico...")
|
|
try:
|
|
if not os.path.exists(EXCEL_FILENAME):
|
|
print(f"Archivo Excel no encontrado: {EXCEL_FILENAME}")
|
|
return
|
|
|
|
# Leer datos
|
|
df = pd.read_excel(EXCEL_FILENAME)
|
|
print(f"Datos leídos: {len(df)} filas.")
|
|
if len(df) < 2:
|
|
print("No hay suficientes datos para graficar (< 2 filas).")
|
|
return
|
|
|
|
# Convertir hora a datetime para el eje X
|
|
df['Hora_DT'] = pd.to_datetime(df['Fecha'].astype(str) + ' ' + df['Hora'].astype(str))
|
|
|
|
# Asegurar numérico
|
|
df['Total Dentro'] = pd.to_numeric(df['Total Dentro'], errors='coerce').fillna(0)
|
|
|
|
# Crear gráfica
|
|
plt.figure(figsize=(12, 6))
|
|
plt.plot(df['Hora_DT'], df['Total Dentro'], marker='o', linestyle='-', color='#61afef', linewidth=2, markersize=4)
|
|
|
|
plt.title(f"Afluencia de Personas - {today_date_str}", fontsize=14, fontweight='bold')
|
|
plt.xlabel("Hora", fontsize=12)
|
|
plt.ylabel("Personas Dentro", fontsize=12)
|
|
plt.grid(True, linestyle='--', alpha=0.7)
|
|
plt.xticks(rotation=45)
|
|
|
|
# Forzar enteros en eje Y
|
|
from matplotlib.ticker import MaxNLocator
|
|
plt.gca().yaxis.set_major_locator(MaxNLocator(integer=True))
|
|
|
|
plt.tight_layout()
|
|
|
|
# Guardar imagen temporal
|
|
img_path = "temp_graph.png"
|
|
plt.savefig(img_path)
|
|
plt.close()
|
|
|
|
# Insertar en Excel
|
|
wb = openpyxl.load_workbook(EXCEL_FILENAME)
|
|
if "Resumen Gráfico" in wb.sheetnames:
|
|
del wb["Resumen Gráfico"]
|
|
ws = wb.create_sheet("Resumen Gráfico")
|
|
|
|
img = OpenpyxlImage(img_path)
|
|
ws.add_image(img, 'A1')
|
|
wb.save(EXCEL_FILENAME)
|
|
|
|
# Limpiar
|
|
if os.path.exists(img_path):
|
|
os.remove(img_path)
|
|
|
|
print("Gráfica generada y guardada en Excel.")
|
|
|
|
except Exception as e:
|
|
print(f"Error generando gráfica: {e}")
|
|
|
|
class PerspectiveCalibration:
|
|
def __init__(self):
|
|
self.active = False
|
|
self.points = [] # [(x,y), (x,y), (x,y), (x,y)]
|
|
self.matrix = None
|
|
self.width = 500 # Metros virtuales * escala (ej: 5m * 100px)
|
|
self.height = 500
|
|
self.dragging_idx = -1
|
|
self.height_ref_p1 = None
|
|
self.height_ref_p2 = None
|
|
self.pixels_per_meter = 100 # Valor por defecto
|
|
|
|
def set_default_points(self, w, h):
|
|
# Cuadrado central por defecto
|
|
cx, cy = w//2, h//2
|
|
d = 100
|
|
self.points = [
|
|
(cx-d, cy-d), # TL
|
|
(cx+d, cy-d), # TR
|
|
(cx+d, cy+d), # BR
|
|
(cx-d, cy+d) # BL
|
|
]
|
|
self.update_matrix()
|
|
|
|
# Inicializar línea de altura (vertical por defecto)
|
|
self.height_ref_p1 = (cx, cy-50)
|
|
self.height_ref_p2 = (cx, cy+50)
|
|
|
|
def update_matrix(self):
|
|
if len(self.points) != 4: return
|
|
pts1 = np.float32(self.points)
|
|
pts2 = np.float32([[0,0], [self.width,0], [self.width,self.height], [0,self.height]])
|
|
self.matrix = cv2.getPerspectiveTransform(pts1, pts2)
|
|
|
|
def transform_point(self, point):
|
|
if self.matrix is None: return None
|
|
pts = np.float32([[point]]).reshape(-1,1,2)
|
|
trans = cv2.perspectiveTransform(pts, self.matrix)
|
|
return trans[0][0] # (x, y) en plano transformado
|
|
|
|
def handle_click(self, x, y):
|
|
# Check si clic cerca de un punto de la malla
|
|
for i, p in enumerate(self.points):
|
|
if math.hypot(p[0]-x, p[1]-y) < 20:
|
|
self.dragging_idx = i
|
|
return True
|
|
|
|
# Check si clic cerca de la línea de altura
|
|
if self.height_ref_p1 and math.hypot(self.height_ref_p1[0]-x, self.height_ref_p1[1]-y) < 20:
|
|
self.dragging_idx = 10 # ID especial para P1 altura
|
|
return True
|
|
if self.height_ref_p2 and math.hypot(self.height_ref_p2[0]-x, self.height_ref_p2[1]-y) < 20:
|
|
self.dragging_idx = 11 # ID especial para P2 altura
|
|
return True
|
|
|
|
return False
|
|
|
|
def handle_drag(self, x, y):
|
|
if self.dragging_idx != -1:
|
|
if self.dragging_idx == 10:
|
|
self.height_ref_p1 = (x, y)
|
|
elif self.dragging_idx == 11:
|
|
self.height_ref_p2 = (x, y)
|
|
else:
|
|
self.points[self.dragging_idx] = (x, y)
|
|
self.update_matrix()
|
|
return True
|
|
return False
|
|
|
|
def handle_release(self):
|
|
self.dragging_idx = -1
|
|
|
|
def move_grid(self, dx, dy):
|
|
self.points = [(x+dx, y+dy) for x,y in self.points]
|
|
self.update_matrix()
|
|
|
|
def scale_grid(self, factor):
|
|
cx = sum(p[0] for p in self.points) / 4
|
|
cy = sum(p[1] for p in self.points) / 4
|
|
new_points = []
|
|
for x, y in self.points:
|
|
nx = cx + (x - cx) * factor
|
|
ny = cy + (y - cy) * factor
|
|
new_points.append((nx, ny))
|
|
self.points = new_points
|
|
self.update_matrix()
|
|
|
|
def rotate_grid(self, angle_deg):
|
|
cx = sum(p[0] for p in self.points) / 4
|
|
cy = sum(p[1] for p in self.points) / 4
|
|
rad = math.radians(angle_deg)
|
|
cos_a = math.cos(rad)
|
|
sin_a = math.sin(rad)
|
|
new_points = []
|
|
for x, y in self.points:
|
|
tx, ty = x - cx, y - cy
|
|
nx = cx + tx * cos_a - ty * sin_a
|
|
ny = cy + tx * sin_a + ty * cos_a
|
|
new_points.append((nx, ny))
|
|
self.points = new_points
|
|
self.update_matrix()
|
|
|
|
# ===========================
|
|
# UI & APP
|
|
# ===========================
|
|
class App:
|
|
def __init__(self, window, window_title):
|
|
self.window = window
|
|
self.window.title(window_title)
|
|
self.window.configure(bg=BG_COLOR)
|
|
self.window.resizable(True, True) # Permitir redimensionar
|
|
|
|
# Layout
|
|
self.top_frame = tk.Frame(window, bg=BG_COLOR)
|
|
self.top_frame.pack(fill="x", padx=10, pady=5)
|
|
|
|
self.lbl_stats = tk.Label(self.top_frame, text="Dentro (Calc): 0 | Entradas: 0 | Salidas: 0", font=("Helvetica", 14, "bold"), bg=BG_COLOR, fg=ACCENT_COLOR)
|
|
self.lbl_stats.pack(side="left")
|
|
|
|
# Frame para el video - Usamos pack_propagate(False) para que el contenido no cambie el tamaño del frame
|
|
self.video_frame = tk.Frame(window, bg="black")
|
|
self.video_frame.pack(fill="both", expand=True, padx=10, pady=5)
|
|
self.video_frame.pack_propagate(False)
|
|
self.video_frame.bind('<Configure>', self.on_resize)
|
|
|
|
self.lbl_video = tk.Label(self.video_frame, bg="black")
|
|
self.lbl_video.pack(fill="both", expand=True)
|
|
self.lbl_video.bind("<Button-1>", self.on_video_click)
|
|
self.lbl_video.bind("<B1-Motion>", self.on_video_drag)
|
|
self.lbl_video.bind("<ButtonRelease-1>", self.on_video_release)
|
|
|
|
self.controls_frame = tk.Frame(window, bg=BG_COLOR)
|
|
self.controls_frame.pack(fill="x", padx=10, pady=10)
|
|
|
|
self.btn_draw = Button(self.controls_frame, text="Dibujar Línea", command=self.start_drawing, bg=ACCENT_COLOR, fg="white", font=("Helvetica", 10, "bold"))
|
|
self.btn_draw.pack(side="left", padx=5)
|
|
|
|
self.btn_reset_line = Button(self.controls_frame, text="Borrar Línea", command=self.reset_line, bg="#e06c75", fg="white", font=("Helvetica", 10))
|
|
self.btn_reset_line.pack(side="left", padx=5)
|
|
|
|
self.btn_settings = Button(self.controls_frame, text="Ajustes Cámara", command=self.open_settings_window, bg="#e5c07b", fg="black", font=("Helvetica", 10))
|
|
self.btn_settings.pack(side="left", padx=5)
|
|
|
|
self.btn_720p = Button(self.controls_frame, text="720p", command=lambda: self.change_resolution(1280, 720), bg="#61afef", fg="white", font=("Helvetica", 10))
|
|
self.btn_720p.pack(side="left", padx=5)
|
|
|
|
self.btn_1080p = Button(self.controls_frame, text="1080p", command=lambda: self.change_resolution(1920, 1080), bg="#61afef", fg="white", font=("Helvetica", 10))
|
|
self.btn_1080p.pack(side="left", padx=5)
|
|
|
|
self.btn_correct = Button(self.controls_frame, text="Corregir Contador", command=self.open_correction_dialog, bg="#d19a66", fg="white", font=("Helvetica", 10, "bold"))
|
|
self.btn_correct.pack(side="left", padx=5)
|
|
|
|
self.btn_calib = Button(self.controls_frame, text="Calibrar 3D", command=self.toggle_calibration, bg="#98c379", fg="white", font=("Helvetica", 10, "bold"))
|
|
self.btn_calib.pack(side="left", padx=5)
|
|
|
|
# Estado
|
|
|
|
self.calibration = PerspectiveCalibration() # Nueva clase de calibración
|
|
|
|
self.btn_roi = Button(self.controls_frame, text="Reforzar Zona", command=self.start_roi_selection, bg="#c678dd", fg="white", font=("Helvetica", 10))
|
|
self.btn_roi.pack(side="left", padx=5)
|
|
|
|
self.btn_graph = Button(self.controls_frame, text="Generar Gráfico", command=generate_graph, bg="#98c379", fg="black", font=("Helvetica", 10))
|
|
self.btn_graph.pack(side="left", padx=5)
|
|
|
|
self.log_text = scrolledtext.ScrolledText(window, height=8, bg="#21252b", fg="white", font=("Courier", 9) if platform.system() != "Windows" else ("Consolas", 9))
|
|
self.log_text.pack(fill="x", padx=10, pady=5)
|
|
|
|
# Logic
|
|
# Usamos el backend apropiado según el sistema operativo
|
|
self.cap = cv2.VideoCapture(0, V_BACKEND)
|
|
# Configurar 1080p por defecto
|
|
self.cap.set(cv2.CAP_PROP_FRAME_WIDTH, 1920)
|
|
self.cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 1080)
|
|
|
|
self.tracker = Tracker(iou_match=TRACK_IOU_MATCH, max_misses=TRACK_MAX_MISSES, min_hits=TRACK_MIN_HITS)
|
|
self.line_counter = LineCounter()
|
|
|
|
self.drawing_mode = False
|
|
self.draw_points = []
|
|
self.frame_count = 0 # Contador para optimización
|
|
self.current_frame_size = None # Inicializar explícitamente
|
|
|
|
# Dimensiones del frame para resize
|
|
self.frame_w = 640
|
|
self.frame_h = 480
|
|
|
|
# ROI (Reforzar Zona)
|
|
self.roi_mode = False
|
|
self.roi_points = []
|
|
self.roi_rects = [] # Lista de ROIs [(x, y, w, h), ...]
|
|
|
|
self.changing_resolution = False # Flag para evitar crash al cambiar res
|
|
|
|
self.person_conf = PERSON_CONF # Sensibilidad dinámica
|
|
|
|
# Models
|
|
self.load_models()
|
|
|
|
# Cargar configuración guardada (si existe)
|
|
self.load_config()
|
|
|
|
ensure_excel()
|
|
self.update()
|
|
|
|
def on_resize(self, event):
|
|
# Actualizamos las dimensiones disponibles cuando cambia el tamaño de la ventana
|
|
self.frame_w = event.width
|
|
self.frame_h = event.height
|
|
|
|
def load_models(self):
|
|
print("Cargando modelos...")
|
|
try:
|
|
self.model_person = YOLO(MODEL_PERSON_PATH)
|
|
self.model_face = YOLO(MODEL_FACE_PATH)
|
|
print("Modelos cargados.")
|
|
except Exception as e:
|
|
messagebox.showerror("Error", f"No se pudieron cargar los modelos YOLO.\nAsegúrate de que {MODEL_PERSON_PATH} y {MODEL_FACE_PATH} estén en la carpeta.\n\nError: {e}")
|
|
self.window.destroy()
|
|
|
|
def save_config(self):
|
|
"""Guarda la configuración actual (Línea, ROIs, Calibración) en JSON."""
|
|
config = {
|
|
"line": {
|
|
"p1": list(self.line_counter.p1) if self.line_counter.p1 else None,
|
|
"p2": list(self.line_counter.p2) if self.line_counter.p2 else None,
|
|
},
|
|
"rois": [list(r) for r in self.roi_rects],
|
|
"calibration": {
|
|
"active": self.calibration.active,
|
|
"points": [list(p) for p in self.calibration.points] if self.calibration.points else [],
|
|
"height_ref_p1": list(self.calibration.height_ref_p1) if self.calibration.height_ref_p1 else None,
|
|
"height_ref_p2": list(self.calibration.height_ref_p2) if self.calibration.height_ref_p2 else None,
|
|
},
|
|
"person_conf": self.person_conf,
|
|
}
|
|
try:
|
|
with open(CONFIG_FILENAME, 'w', encoding='utf-8') as f:
|
|
json.dump(config, f, indent=2, ensure_ascii=False)
|
|
print(f"Configuración guardada en {CONFIG_FILENAME}")
|
|
except Exception as e:
|
|
print(f"Error guardando configuración: {e}")
|
|
|
|
def load_config(self):
|
|
"""Carga la configuración desde JSON si existe."""
|
|
if not os.path.exists(CONFIG_FILENAME):
|
|
print("No se encontró archivo de configuración previo. Usando valores por defecto.")
|
|
return
|
|
|
|
try:
|
|
with open(CONFIG_FILENAME, 'r', encoding='utf-8') as f:
|
|
config = json.load(f)
|
|
|
|
# Línea de conteo
|
|
if config.get("line"):
|
|
p1 = config["line"].get("p1")
|
|
p2 = config["line"].get("p2")
|
|
if p1 and p2:
|
|
self.line_counter.set_line(tuple(p1), tuple(p2))
|
|
print(f"Línea cargada: {p1} -> {p2}")
|
|
|
|
# ROIs
|
|
if config.get("rois"):
|
|
self.roi_rects = [tuple(r) for r in config["rois"]]
|
|
print(f"ROIs cargadas: {len(self.roi_rects)}")
|
|
|
|
# Calibración 3D
|
|
if config.get("calibration"):
|
|
cal = config["calibration"]
|
|
if cal.get("points") and len(cal["points"]) == 4:
|
|
self.calibration.points = [tuple(p) for p in cal["points"]]
|
|
self.calibration.update_matrix()
|
|
self.calibration.active = cal.get("active", False)
|
|
if cal.get("height_ref_p1"):
|
|
self.calibration.height_ref_p1 = tuple(cal["height_ref_p1"])
|
|
if cal.get("height_ref_p2"):
|
|
self.calibration.height_ref_p2 = tuple(cal["height_ref_p2"])
|
|
print(f"Calibración 3D cargada. Activa: {self.calibration.active}")
|
|
|
|
# Sensibilidad
|
|
if config.get("person_conf"):
|
|
self.person_conf = config["person_conf"]
|
|
|
|
print("Configuración cargada correctamente.")
|
|
except Exception as e:
|
|
print(f"Error cargando configuración: {e}")
|
|
|
|
def start_drawing(self):
|
|
self.drawing_mode = True
|
|
self.roi_mode = False
|
|
self.draw_points = []
|
|
self.btn_draw.config(text="Haz clic en 2 puntos...", state="disabled")
|
|
|
|
def start_roi_selection(self):
|
|
self.roi_mode = True
|
|
self.drawing_mode = False
|
|
self.roi_points = []
|
|
self.btn_roi.config(text="Marca esq. Sup-Izq y Inf-Der", state="disabled")
|
|
|
|
def reset_line(self):
|
|
self.line_counter.p1 = None
|
|
self.line_counter.p2 = None
|
|
self.draw_points = []
|
|
self.roi_rects = [] # Borrar todas las zonas
|
|
self.btn_draw.config(text="Dibujar Línea", state="normal")
|
|
self.btn_roi.config(text="Reforzar Zona", state="normal")
|
|
|
|
def set_camera_property(self, prop_id, value):
|
|
if self.cap and self.cap.isOpened():
|
|
try:
|
|
self.cap.set(prop_id, float(value))
|
|
except Exception:
|
|
pass
|
|
|
|
def open_settings_window(self):
|
|
if hasattr(self, 'settings_window') and self.settings_window is not None and self.settings_window.winfo_exists():
|
|
self.settings_window.lift()
|
|
return
|
|
|
|
self.settings_window = tk.Toplevel(self.window)
|
|
self.settings_window.title("Control Avanzado de Cámara")
|
|
self.settings_window.geometry("400x650") # Más alto para más controles
|
|
self.settings_window.configure(bg=BG_COLOR)
|
|
|
|
# === HELPER: Slider con Valor Actual ===
|
|
def add_control(label_text, prop_id, min_val, max_val, step=1, default=0, is_auto_chk=False, auto_prop_id=None):
|
|
frame = tk.Frame(self.settings_window, bg=BG_COLOR)
|
|
frame.pack(fill="x", padx=10, pady=5)
|
|
|
|
# Label
|
|
tk.Label(frame, text=label_text, bg=BG_COLOR, fg="white", width=15, anchor="w").pack(side="left")
|
|
|
|
# Logic for Auto Checkbox
|
|
if is_auto_chk and auto_prop_id is not None:
|
|
var_auto = tk.IntVar()
|
|
try:
|
|
val = self.cap.get(auto_prop_id)
|
|
# Logitech: 1=Manual, 3=Auto usually. Or 0/1. Detect logic heavily depends on driver.
|
|
if val > 0: var_auto.set(1)
|
|
except: pass
|
|
|
|
def toggle_auto():
|
|
# Logitech C920: AutoFocus=1 (On)? No, 1=Off, 0=On sometimes, or 1=Manual, 3=Auto
|
|
# Standard UVC: 1=Manual, 3=Auto for Exposure.
|
|
# Focus: 0=Auto Off (Manual), 1=Auto On.
|
|
v = var_auto.get()
|
|
# Try setting strictly
|
|
self.set_camera_property(auto_prop_id, 1 if v else 0) # Generic toggle attempt
|
|
# Re-enable/Disable slider if needed (omitted for simplicity, just letting user force it)
|
|
|
|
chk = tk.Checkbutton(frame, text="Auto", variable=var_auto, command=toggle_auto,
|
|
bg=BG_COLOR, fg="white", selectcolor="#282c34", activebackground=BG_COLOR)
|
|
chk.pack(side="right")
|
|
|
|
# Slider
|
|
is_supported = True
|
|
try:
|
|
curr = self.cap.get(prop_id)
|
|
if curr == -1:
|
|
curr = default
|
|
is_supported = False
|
|
except:
|
|
curr = default
|
|
is_supported = False
|
|
|
|
scale = tk.Scale(frame, from_=min_val, to=max_val, resolution=step, orient="horizontal",
|
|
command=lambda v: self.set_camera_property(prop_id, v),
|
|
fg="white", bg=BG_COLOR, highlightthickness=0, length=200)
|
|
scale.set(curr)
|
|
|
|
if not is_supported:
|
|
scale.config(state='disabled', fg='#5c6370', label=f"{label_text} (N/A)")
|
|
else:
|
|
scale.pack(side="right", expand=True, fill="x")
|
|
|
|
if not is_supported:
|
|
tk.Label(frame, text="(No Soportado)", bg=BG_COLOR, fg="#5c6370", font=("Arial", 8)).pack(side="right", padx=5)
|
|
|
|
# === CONTROLES ===
|
|
tk.Label(self.settings_window, text="--- Imagen ---", bg=BG_COLOR, fg="#abb2bf").pack(pady=5)
|
|
add_control("Brillo", cv2.CAP_PROP_BRIGHTNESS, 0, 255, 1, 128)
|
|
add_control("Contraste", cv2.CAP_PROP_CONTRAST, 0, 255, 1, 128)
|
|
add_control("Saturación", cv2.CAP_PROP_SATURATION, 0, 255, 1, 128)
|
|
add_control("Nitidez", cv2.CAP_PROP_SHARPNESS, 0, 255, 1, 128)
|
|
add_control("Gamma", cv2.CAP_PROP_GAMMA, 0, 500, 1, 100)
|
|
|
|
tk.Label(self.settings_window, text="--- Exposición & Foco ---", bg=BG_COLOR, fg="#abb2bf").pack(pady=5)
|
|
# Exposición: -13 a 0 (powers of 2) usually for Logitech. -6 is typical.
|
|
# Auto Exposure: 1=Manual, 3=Auto (Standard UVC).
|
|
# Trick: To disable Auto Exp on C920, usually set CAP_PROP_AUTO_EXPOSURE to 1 (Manual) or 0.25 (cv2 quirk).
|
|
|
|
# Exposure Logic is tricky in OpenCV + Windows. We provide range -15 to 0.
|
|
add_control("Exposición", cv2.CAP_PROP_EXPOSURE, -13, 0, 1, -5)
|
|
|
|
# Ganancia
|
|
add_control("Ganancia", cv2.CAP_PROP_GAIN, 0, 255, 1, 0)
|
|
|
|
# Focus: 0 to 255 (mm or logic units). Auto Focus toggle.
|
|
add_control("Foco (0=Inf)", cv2.CAP_PROP_FOCUS, 0, 255, 5, 0, is_auto_chk=True, auto_prop_id=cv2.CAP_PROP_AUTOFOCUS)
|
|
|
|
tk.Label(self.settings_window, text="--- Detección ---", bg=BG_COLOR, fg="#abb2bf").pack(pady=5)
|
|
|
|
# Slider de Sensibilidad (Confianza) reutilizado
|
|
# Slider de Sensibilidad IA (Personas)
|
|
lbl_conf = tk.Label(self.settings_window, text=f"Sensibilidad Personas ({int(self.person_conf*100)}%)", bg=BG_COLOR, fg="white")
|
|
lbl_conf.pack(pady=(5,0))
|
|
def update_conf(v):
|
|
self.person_conf = float(v)
|
|
lbl_conf.config(text=f"Sensibilidad Personas ({int(self.person_conf*100)}%)")
|
|
s_conf = tk.Scale(self.settings_window, from_=0.1, to=1.0, resolution=0.05, orient="horizontal",
|
|
command=update_conf, fg="white", bg=BG_COLOR, highlightthickness=0, length=300)
|
|
s_conf.set(self.person_conf)
|
|
s_conf.pack(pady=5)
|
|
|
|
# Slider de Sensibilidad IA (Caras)
|
|
# Permite al usuario "reforzar" la detección de caras
|
|
# Definimos FACE_CONF globalmente, pero aquí la haremos dinámica (truco: modificar global o self?)
|
|
# Mejor modificar una variable de clase o self.face_conf si la tuviera.
|
|
# Como FACE_CONF es global, la modificaremos con 'global' en el callback o añadiremos self.face_conf
|
|
|
|
# Primero, aseguremos que usamos self.face_conf en main.py en vez de la constante global
|
|
# (Para este parche rápido, modificaremos la global Face Conf usando un wrapper)
|
|
|
|
lbl_face = tk.Label(self.settings_window, text=f"Sensibilidad Cara ({int(FACE_CONF*100)}%)", bg=BG_COLOR, fg="white")
|
|
lbl_face.pack(pady=(5,0))
|
|
def update_face_conf(v):
|
|
global FACE_CONF
|
|
FACE_CONF = float(v)
|
|
lbl_face.config(text=f"Sensibilidad Cara ({int(FACE_CONF*100)}%)")
|
|
|
|
s_face = tk.Scale(self.settings_window, from_=0.1, to=1.0, resolution=0.05, orient="horizontal",
|
|
command=update_face_conf, fg="white", bg=BG_COLOR, highlightthickness=0, length=300)
|
|
s_face.set(FACE_CONF)
|
|
s_face.pack(pady=5)
|
|
|
|
tk.Label(self.settings_window, text="Nota: Desactiva 'Auto' para usar control manual.", bg=BG_COLOR, fg="#7f848e", font=("Arial", 8)).pack(pady=10)
|
|
|
|
def change_resolution(self, width, height):
|
|
print(f"Cambiando resolución a {width}x{height}...")
|
|
self.changing_resolution = True
|
|
|
|
# Liberar y reabrir para asegurar cambio limpio
|
|
if self.cap.isOpened():
|
|
self.cap.release()
|
|
|
|
# Reabrir con el backend apropiado
|
|
self.cap = cv2.VideoCapture(0, V_BACKEND)
|
|
self.cap.set(cv2.CAP_PROP_FRAME_WIDTH, width)
|
|
self.cap.set(cv2.CAP_PROP_FRAME_HEIGHT, height)
|
|
|
|
# Verificar cambio
|
|
w = self.cap.get(cv2.CAP_PROP_FRAME_WIDTH)
|
|
h = self.cap.get(cv2.CAP_PROP_FRAME_HEIGHT)
|
|
print(f"Resolución actual: {int(w)}x{int(h)}")
|
|
|
|
self.changing_resolution = False
|
|
|
|
def toggle_calibration(self):
|
|
self.calibration.active = not self.calibration.active
|
|
if self.calibration.active:
|
|
self.btn_calib.config(bg="#e5c07b", text="Terminar Calib.")
|
|
if not self.calibration.points:
|
|
# Inicializar puntos si no existen
|
|
if self.current_frame_size is not None:
|
|
self.calibration.set_default_points(self.current_frame_size[0], self.current_frame_size[1])
|
|
self.open_calibration_controls()
|
|
else:
|
|
self.btn_calib.config(bg="#98c379", text="Calibrar 3D")
|
|
if hasattr(self, 'calib_window') and self.calib_window.winfo_exists():
|
|
self.calib_window.destroy()
|
|
|
|
def open_calibration_controls(self):
|
|
if hasattr(self, 'calib_window') and self.calib_window is not None and self.calib_window.winfo_exists():
|
|
self.calib_window.lift()
|
|
return
|
|
|
|
self.calib_window = tk.Toplevel(self.window)
|
|
self.calib_window.title("Controles 3D")
|
|
self.calib_window.geometry("300x400")
|
|
self.calib_window.configure(bg=BG_COLOR)
|
|
|
|
tk.Label(self.calib_window, text="Mover Malla", bg=BG_COLOR, fg="white").pack(pady=5)
|
|
frm_move = tk.Frame(self.calib_window, bg=BG_COLOR)
|
|
frm_move.pack()
|
|
Button(frm_move, text="↑", command=lambda: self.calibration.move_grid(0, -10)).grid(row=0, column=1)
|
|
Button(frm_move, text="←", command=lambda: self.calibration.move_grid(-10, 0)).grid(row=1, column=0)
|
|
Button(frm_move, text="↓", command=lambda: self.calibration.move_grid(0, 10)).grid(row=1, column=1)
|
|
Button(frm_move, text="→", command=lambda: self.calibration.move_grid(10, 0)).grid(row=1, column=2)
|
|
|
|
tk.Label(self.calib_window, text="Rotar", bg=BG_COLOR, fg="white").pack(pady=5)
|
|
frm_rot = tk.Frame(self.calib_window, bg=BG_COLOR)
|
|
frm_rot.pack()
|
|
Button(frm_rot, text="↺ -5°", command=lambda: self.calibration.rotate_grid(-5)).pack(side="left", padx=5)
|
|
Button(frm_rot, text="↻ +5°", command=lambda: self.calibration.rotate_grid(5)).pack(side="left", padx=5)
|
|
|
|
tk.Label(self.calib_window, text="Escalar", bg=BG_COLOR, fg="white").pack(pady=5)
|
|
frm_scale = tk.Frame(self.calib_window, bg=BG_COLOR)
|
|
frm_scale.pack()
|
|
Button(frm_scale, text="-", command=lambda: self.calibration.scale_grid(0.9)).pack(side="left", padx=5)
|
|
Button(frm_scale, text="+", command=lambda: self.calibration.scale_grid(1.1)).pack(side="left", padx=5)
|
|
|
|
tk.Label(self.calib_window, text="Altura Referencia (m)", bg=BG_COLOR, fg="white").pack(pady=10)
|
|
# Aquí iría el control de altura, por ahora solo visual
|
|
tk.Label(self.calib_window, text="(Usa la regla vertical en pantalla)", bg=BG_COLOR, fg="gray").pack()
|
|
|
|
def open_correction_dialog(self):
|
|
win = tk.Toplevel(self.window)
|
|
win.title("Corregir Contador")
|
|
win.geometry("300x200")
|
|
win.configure(bg=BG_COLOR)
|
|
|
|
tk.Label(win, text="Número Real de Personas Dentro:", bg=BG_COLOR, fg="white").pack(pady=10)
|
|
|
|
entry = tk.Entry(win)
|
|
entry.pack(pady=5)
|
|
# Valor actual
|
|
current = self.line_counter.total_in - self.line_counter.total_out + self.line_counter.manual_offset
|
|
entry.insert(0, str(current))
|
|
|
|
def apply():
|
|
try:
|
|
val = int(entry.get())
|
|
self.line_counter.set_manual_count(val)
|
|
log_to_excel("Correccion Manual", "Manual", val, self.log_text)
|
|
win.destroy()
|
|
except ValueError:
|
|
messagebox.showerror("Error", "Introduce un número válido")
|
|
|
|
def reset_all():
|
|
if messagebox.askyesno("Resetear", "¿Poner Entradas y Salidas a 0?"):
|
|
self.line_counter.reset_counts()
|
|
log_to_excel("Reset", "Manual", 0, self.log_text)
|
|
win.destroy()
|
|
|
|
Button(win, text="Aplicar", command=apply, bg=ACCENT_COLOR, fg="white").pack(pady=5)
|
|
Button(win, text="Resetear Contadores a 0", command=reset_all, bg="#e06c75", fg="white").pack(pady=20)
|
|
|
|
def get_img_coords(self, event):
|
|
if self.current_frame_size is None: return 0, 0
|
|
fw, fh = self.current_frame_size
|
|
lw, lh = self.frame_w, self.frame_h
|
|
scale = min(lw/fw, lh/fh)
|
|
nw, nh = int(fw*scale), int(fh*scale)
|
|
dx, dy = (lw-nw)//2, (lh-nh)//2
|
|
x_img = int((event.x - dx) / scale)
|
|
y_img = int((event.y - dy) / scale)
|
|
return x_img, y_img
|
|
|
|
def on_video_drag(self, event):
|
|
if not self.calibration.active: return
|
|
x, y = self.get_img_coords(event)
|
|
self.calibration.handle_drag(x, y)
|
|
|
|
def on_video_release(self, event):
|
|
if not self.calibration.active: return
|
|
self.calibration.handle_release()
|
|
|
|
def on_video_click(self, event):
|
|
# Prioridad a calibración
|
|
if self.calibration.active:
|
|
x, y = self.get_img_coords(event)
|
|
if self.calibration.handle_click(x, y):
|
|
return
|
|
|
|
if not self.drawing_mode and not self.roi_mode: return
|
|
if self.current_frame_size is None: return
|
|
|
|
# Usamos las dimensiones cacheadas del frame
|
|
fw, fh = self.current_frame_size
|
|
lw, lh = self.frame_w, self.frame_h
|
|
|
|
# Calcular escala y offsets (letterbox)
|
|
scale = min(lw/fw, lh/fh)
|
|
nw, nh = int(fw*scale), int(fh*scale)
|
|
dx, dy = (lw-nw)//2, (lh-nh)//2
|
|
|
|
# Coordenadas relativas a la imagen
|
|
x_img = int((event.x - dx) / scale)
|
|
y_img = int((event.y - dy) / scale)
|
|
|
|
# Clamp
|
|
x_img = max(0, min(fw-1, x_img))
|
|
y_img = max(0, min(fh-1, y_img))
|
|
|
|
if self.drawing_mode:
|
|
self.draw_points.append((x_img, y_img))
|
|
if len(self.draw_points) == 2:
|
|
self.line_counter.set_line(self.draw_points[0], self.draw_points[1])
|
|
self.drawing_mode = False
|
|
self.btn_draw.config(text="Dibujar Línea", state="normal")
|
|
print(f"Línea establecida: {self.draw_points}")
|
|
|
|
elif self.roi_mode:
|
|
self.roi_points.append((x_img, y_img))
|
|
if len(self.roi_points) == 2:
|
|
x1, y1 = self.roi_points[0]
|
|
x2, y2 = self.roi_points[1]
|
|
# Asegurar orden
|
|
rx, ry = min(x1, x2), min(y1, y2)
|
|
rw, rh = abs(x2 - x1), abs(y2 - y1)
|
|
|
|
if rw > 10 and rh > 10:
|
|
self.roi_rects.append((rx, ry, rw, rh))
|
|
print(f"ROI añadida: {(rx, ry, rw, rh)}")
|
|
else:
|
|
print("ROI muy pequeña, ignorada.")
|
|
|
|
self.roi_mode = False
|
|
self.btn_roi.config(text="Reforzar Zona", state="normal")
|
|
|
|
def update(self):
|
|
if self.changing_resolution:
|
|
self.window.after(50, self.update)
|
|
return
|
|
|
|
try:
|
|
ret, frame = self.cap.read()
|
|
except Exception as e:
|
|
print(f"Error leyendo cámara: {e}")
|
|
ret = False
|
|
|
|
if not ret:
|
|
# Intentar reconectar cámara si falla
|
|
print("Cámara desconectada o error. Intentando reconectar...")
|
|
try:
|
|
if self.cap.isOpened():
|
|
self.cap.release()
|
|
self.cap = cv2.VideoCapture(0, V_BACKEND)
|
|
self.cap.set(cv2.CAP_PROP_FRAME_WIDTH, 1920)
|
|
self.cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 1080)
|
|
except:
|
|
pass
|
|
self.window.after(1000, self.update) # Reintentar en 1 segundo
|
|
return
|
|
|
|
H, W = frame.shape[:2]
|
|
self.current_frame_size = (W, H)
|
|
|
|
# 1. Detección Global
|
|
results_p = self.model_person(frame, classes=[0], conf=self.person_conf, verbose=False)
|
|
person_boxes = [list(map(int, b.xyxy[0])) for b in results_p[0].boxes]
|
|
|
|
# Inicializar lista de caras (Global)
|
|
face_boxes = []
|
|
|
|
# 1.5 Detección ROI (Múltiples Zonas) - Prioridad Máxima (Cada frame)
|
|
for roi_rect in self.roi_rects:
|
|
rx, ry, rw, rh = roi_rect
|
|
# Recorte seguro
|
|
roi_frame = frame[ry:ry+rh, rx:rx+rw]
|
|
if roi_frame.size > 0:
|
|
# A) Personas en ROI
|
|
results_roi = self.model_person(roi_frame, classes=[0], conf=self.person_conf, verbose=False)
|
|
for b in results_roi[0].boxes:
|
|
bx1, by1, bx2, by2 = map(int, b.xyxy[0])
|
|
global_box = [bx1 + rx, by1 + ry, bx2 + rx, by2 + ry]
|
|
person_boxes.append(global_box)
|
|
|
|
# B) Caras en ROI
|
|
# Chequeo de caras cada 4 frames para no saturar (orientación no necesita tantos FPS)
|
|
if (self.frame_count % 4 == 0):
|
|
results_roi_face = self.model_face(roi_frame, conf=FACE_CONF, verbose=False)
|
|
for b in results_roi_face[0].boxes:
|
|
bx1, by1, bx2, by2 = map(int, b.xyxy[0])
|
|
global_face_box = [bx1 + rx, by1 + ry, bx2 + rx, by2 + ry]
|
|
face_boxes.append(global_face_box)
|
|
|
|
# Aplicar NMS para fusionar duplicados (Global + ROI)
|
|
# BALANCE: 0.55 fusiona partes del mismo cuerpo pero mantiene 2 personas cercanas
|
|
if len(person_boxes) > 0:
|
|
# FILTRO DE TAMAÑO MÍNIMO: Descartar detecciones muy pequeñas (cabezas solas, brazos, objetos)
|
|
# REDUCIDO para detectar personas parcialmente ocluidas (llevando objetos)
|
|
MIN_BOX_WIDTH = 30 # Era 40
|
|
MIN_BOX_HEIGHT = 50 # Era 80 - personas con cajas/maderas pueden verse más bajas
|
|
person_boxes = [b for b in person_boxes if (b[2]-b[0]) >= MIN_BOX_WIDTH and (b[3]-b[1]) >= MIN_BOX_HEIGHT]
|
|
|
|
person_boxes = nms(person_boxes, iou_th=0.55) # Balanceado: fusiona partes, separa personas
|
|
|
|
# 2. Tracking
|
|
self.tracker.update(person_boxes)
|
|
confirmed = self.tracker.confirmed_tracks()
|
|
|
|
# 3. Detección de Orientación (CADA FRAME en PC potente)
|
|
if confirmed:
|
|
# Detectar caras en todo el frame (Global)
|
|
res_f = self.model_face(frame, conf=FACE_CONF, verbose=False)
|
|
# Añadir a la lista existente (que ya puede tener caras del ROI)
|
|
for b in res_f[0].boxes:
|
|
face_boxes.append(list(map(int, b.xyxy[0])))
|
|
|
|
for tr in confirmed:
|
|
# Lógica de Buffer (Histéresis) para estabilidad de cara
|
|
# Decrementamos buffer cada frame
|
|
tr.face_buffer = max(0, tr.face_buffer - 1)
|
|
|
|
found_face_this_frame = False
|
|
|
|
# Chequear si alguna cara está dentro o cerca del box de la persona
|
|
for fb in face_boxes:
|
|
# Centro de la cara
|
|
fcx, fcy = (fb[0]+fb[2])//2, (fb[1]+fb[3])//2
|
|
if tr.box[0] <= fcx <= tr.box[2] and tr.box[1] <= fcy <= tr.box[3]:
|
|
found_face_this_frame = True
|
|
break
|
|
|
|
if found_face_this_frame:
|
|
tr.face_buffer = 5 # Buffer de 0.5s para estabilidad de orientación
|
|
|
|
tr.has_face = (tr.face_buffer > 0)
|
|
|
|
self.frame_count += 1
|
|
|
|
# 4. Lógica de Línea y Dibujo
|
|
if self.line_counter.is_set():
|
|
p1, p2 = self.line_counter.p1, self.line_counter.p2
|
|
cv2.line(frame, p1, p2, (0, 255, 255), 2)
|
|
|
|
# Dibujar flecha indicativa de "Entrada"
|
|
mx, my = (p1[0]+p2[0])//2, (p1[1]+p2[1])//2
|
|
# Revertido al original (L->R = Abajo/Adentro)
|
|
nx, ny = -(p2[1]-p1[1]), (p2[0]-p1[0])
|
|
n_len = math.hypot(nx, ny)
|
|
if n_len > 0:
|
|
nx, ny = int(nx/n_len * 30), int(ny/n_len * 30)
|
|
cv2.arrowedLine(frame, (mx, my), (mx+nx, my+ny), (0, 255, 0), 2, tipLength=0.3)
|
|
cv2.putText(frame, "Entrada", (mx+nx, my+ny), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,255,0), 1)
|
|
|
|
# Chequear cruces
|
|
for tr in confirmed:
|
|
event = self.line_counter.check_crossing(tr, self.calibration)
|
|
if event:
|
|
orientacion = "Frente" if tr.has_face else "Espalda"
|
|
total = self.line_counter.total_in - self.line_counter.total_out + self.line_counter.manual_offset # Include offset
|
|
log_to_excel(event, orientacion, total, self.log_text)
|
|
|
|
# Efecto visual
|
|
color = (0, 255, 0) if event == "Entrada" else (0, 0, 255)
|
|
cv2.circle(frame, tr.centroid, 10, color, -1)
|
|
|
|
# 5. Dibujar Tracks
|
|
for tr in confirmed:
|
|
color = (0, 255, 0) if tr.has_face else (0, 165, 255) # Verde=Cara, Naranja=Espalda
|
|
label = f"ID {tr.id} {'(F)' if tr.has_face else '(E)'}"
|
|
cv2.rectangle(frame, (tr.box[0], tr.box[1]), (tr.box[2], tr.box[3]), color, 2)
|
|
cv2.putText(frame, label, (tr.box[0], tr.box[1]-5), cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2)
|
|
cv2.circle(frame, tr.centroid, 4, (255, 0, 0), -1)
|
|
|
|
# Visualizar Vector de Movimiento (Predicción Kalman)
|
|
if hasattr(tr, 'prediction'):
|
|
# Dibujar línea hacia donde el filtro predice que irá
|
|
cv2.line(frame, tr.centroid, tr.prediction, (255, 255, 0), 2)
|
|
cv2.circle(frame, tr.prediction, 3, (0, 255, 255), -1)
|
|
|
|
# Dibujar ROIs
|
|
for i, roi_rect in enumerate(self.roi_rects):
|
|
rx, ry, rw, rh = roi_rect
|
|
cv2.rectangle(frame, (rx, ry), (rx+rw, ry+rh), (255, 0, 255), 2)
|
|
cv2.putText(frame, f"Zona {i+1}", (rx, ry-5), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 0, 255), 1)
|
|
|
|
# Dibujar Calibración 3D
|
|
if self.calibration.active and len(self.calibration.points) == 4:
|
|
pts = np.array(self.calibration.points, np.int32)
|
|
pts = pts.reshape((-1, 1, 2))
|
|
cv2.polylines(frame, [pts], True, (0, 255, 255), 2)
|
|
|
|
for i, p in enumerate(self.calibration.points):
|
|
color = (0, 0, 255) if i == self.calibration.dragging_idx else (0, 255, 255)
|
|
cv2.circle(frame, (int(p[0]), int(p[1])), 8, color, -1)
|
|
cv2.putText(frame, str(i+1), (int(p[0])+10, int(p[1])), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255,255,255), 1)
|
|
|
|
# Dibujar Línea de Altura (Referencia)
|
|
if self.calibration.height_ref_p1 and self.calibration.height_ref_p2:
|
|
hp1 = (int(self.calibration.height_ref_p1[0]), int(self.calibration.height_ref_p1[1]))
|
|
hp2 = (int(self.calibration.height_ref_p2[0]), int(self.calibration.height_ref_p2[1]))
|
|
cv2.line(frame, hp1, hp2, (255, 0, 0), 3) # Azul
|
|
|
|
# Puntos extremos
|
|
c1 = (0, 0, 255) if self.calibration.dragging_idx == 10 else (255, 0, 0)
|
|
c2 = (0, 0, 255) if self.calibration.dragging_idx == 11 else (255, 0, 0)
|
|
cv2.circle(frame, hp1, 6, c1, -1)
|
|
cv2.circle(frame, hp2, 6, c2, -1)
|
|
cv2.putText(frame, "Altura Ref.", (hp1[0]+10, hp1[1]), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255,0,0), 1)
|
|
|
|
# Actualizar stats
|
|
net_inside = self.line_counter.total_in - self.line_counter.total_out + self.line_counter.manual_offset
|
|
self.lbl_stats.config(text=f"Dentro (Calc): {net_inside} | Entradas: {self.line_counter.total_in} | Salidas: {self.line_counter.total_out}")
|
|
|
|
# Mostrar en Tkinter
|
|
# Usamos las dimensiones cacheadas del frame (self.frame_w, self.frame_h)
|
|
# Esto evita el bucle de retroalimentación donde el contenido empuja el tamaño del frame
|
|
w_label = self.frame_w
|
|
h_label = self.frame_h
|
|
|
|
if w_label > 10 and h_label > 10:
|
|
scale = min(w_label/W, h_label/H)
|
|
nw, nh = int(W*scale), int(H*scale)
|
|
frame_resized = cv2.resize(frame, (nw, nh))
|
|
|
|
# Crear imagen negra de fondo
|
|
bg = PILImage.new('RGB', (w_label, h_label), (0,0,0))
|
|
img_pil = PILImage.fromarray(cv2.cvtColor(frame_resized, cv2.COLOR_BGR2RGB))
|
|
bg.paste(img_pil, ((w_label-nw)//2, (h_label-nh)//2))
|
|
|
|
imgtk = ImageTk.PhotoImage(image=bg)
|
|
self.lbl_video.imgtk = imgtk
|
|
self.lbl_video.configure(image=imgtk)
|
|
|
|
self.window.after(30, self.update)
|
|
|
|
def on_closing(self):
|
|
# Guardar configuración antes de cerrar
|
|
self.save_config()
|
|
|
|
self.cap.release()
|
|
self.window.destroy()
|
|
generate_graph()
|
|
|
|
if __name__ == "__main__":
|
|
root = tk.Tk()
|
|
root.geometry("1000x700")
|
|
app = App(root, "Sistema de Conteo de Personas - IA")
|
|
root.protocol("WM_DELETE_WINDOW", app.on_closing)
|
|
root.mainloop()
|