ponto eletrônico funcional com miniO, cadastro e checkin.

This commit is contained in:
v 2025-05-05 15:48:32 -03:00
commit 39891a0447
34 changed files with 1218 additions and 0 deletions

10
.env Normal file
View File

@ -0,0 +1,10 @@
DEBUG=True
THRESHOLD=0.93
MINIO_ENDPOINT=minio:9000
MINIO_BUCKET=data
MINIO_ACCESS_KEY=admin
MINIO_SECRET_KEY=password
# Faixas de confiança para verificação facial
CONFIDENCE_HIGH=0.95
CONFIDENCE_MEDIUM=0.85

4
.gitignore vendored Normal file
View File

@ -0,0 +1,4 @@
#
services/__pycache__/
__pycache__
routes/__pycache__

25
Dockerfile Normal file
View File

@ -0,0 +1,25 @@
FROM python:3.11-slim
RUN apt-get update && apt-get install -y \
build-essential \
cmake \
libboost-all-dev \
libopenblas-dev \
liblapack-dev \
libx11-dev \
libgtk-3-dev \
&& rm -rf /var/lib/apt/lists/*
WORKDIR /app
COPY requirements.txt .
RUN pip install --no-cache-dir -r requirements.txt
COPY . .
EXPOSE 5006
# Modo dinâmico: respeita DEBUG env
CMD ["sh", "-c", "python app.py"]

34
Makefile Normal file
View File

@ -0,0 +1,34 @@
IMAGE_NAME=face-api
build:
docker build -t $(IMAGE_NAME) .
run:
docker run --rm -p 5006:5006 $(IMAGE_NAME)
up:
docker-compose up -d
devup:
docker-compose -f docker-compose.dev.yaml up --build
devdown:
docker-compose -f docker-compose.dev.yaml down
prdup:
docker-compose -f docker-compose.prd.yaml up --build
down:
docker-compose down
rebuild:
docker-compose down
docker-compose build
docker-compose up -d
clean:
docker system prune -f
logs:
docker-compose -f docker-compose.dev.yaml logs -f

13
app.py Normal file
View File

@ -0,0 +1,13 @@
from flask import Flask
from routes.face_routes import face_bp
import os
def create_app():
app = Flask(__name__)
app.register_blueprint(face_bp)
return app
if __name__ == "__main__":
debug_mode = os.getenv("DEBUG", "True").lower() == "true"
app = create_app()
app.run(host="0.0.0.0", port=5006, debug=debug_mode)

48
docker-compose.dev.yaml Normal file
View File

@ -0,0 +1,48 @@
version: '3.9'
services:
face-api:
build:
context: .
container_name: face-api-dev
ports:
- "5006:5006"
env_file:
- .env
environment:
DEBUG: ${DEBUG:-True}
THRESHOLD: ${THRESHOLD:-0.6}
MINIO_ENDPOINT: ${MINIO_ENDPOINT:-minio:9000}
MINIO_BUCKET: ${MINIO_BUCKET:-faces}
MINIO_ACCESS_KEY: ${MINIO_ACCESS_KEY:-admin}
MINIO_SECRET_KEY: ${MINIO_SECRET_KEY:-password}
volumes:
- .:/app
depends_on:
- minio
restart: unless-stopped
networks:
- reconhecimento_net
minio:
image: minio/minio
container_name: minio
ports:
- "9002:9000"
- "9003:9001"
env_file:
- .env
environment:
MINIO_ROOT_USER: ${MINIO_ACCESS_KEY:-admin}
MINIO_ROOT_PASSWORD: ${MINIO_SECRET_KEY:-password}
volumes:
- minio_data:/data
command: server /data --console-address ":9001"
networks:
- reconhecimento_net
volumes:
minio_data:
networks:
reconhecimento_net:

13
docker-compose.prd.yaml Normal file
View File

@ -0,0 +1,13 @@
version: '3.9'
services:
face-api:
build:
context: .
container_name: face-api-prd
ports:
- "5006:5006"
environment:
- DEBUG=False
- THRESHOLD=0.6
restart: always

9
docker-compose.yaml Normal file
View File

@ -0,0 +1,9 @@
version: '3.9'
services:
face-api:
build: .
container_name: face-api
ports:
- "5006:5006"
restart: unless-stopped

BIN
docs/benchmark_Figure.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 78 KiB

View File

@ -0,0 +1,9 @@
model,avg_similarity_score,avg_duration_sec,avg_match_rate
dlib,0.7533,1.8977,1.0
deepface_arcface,0.7318,1.4837,0.8571
deepface_facenet,0.8284,1.8096,0.8571
deepface_facenet512,0.8038,1.7295,0.8571
deepface_vgg-face,0.6998,1.4428,0.8571
deepface_openface,0.7568,1.2806,0.1429
deepface_dlib,0.9705,0.9873,0.8571
deepface_sface,0.7606,0.921,0.8571
1 model avg_similarity_score avg_duration_sec avg_match_rate
2 dlib 0.7533 1.8977 1.0
3 deepface_arcface 0.7318 1.4837 0.8571
4 deepface_facenet 0.8284 1.8096 0.8571
5 deepface_facenet512 0.8038 1.7295 0.8571
6 deepface_vgg-face 0.6998 1.4428 0.8571
7 deepface_openface 0.7568 1.2806 0.1429
8 deepface_dlib 0.9705 0.9873 0.8571
9 deepface_sface 0.7606 0.921 0.8571

BIN
docs/image-1.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 119 KiB

View File

@ -0,0 +1,11 @@
flask
face_recognition
# numpy
facenet-pytorch # mtcnn
torch
Pillow
minio
python-dotenv
deepface
tensorflow
tf-keras

View File

@ -0,0 +1,41 @@
import pandas as pd
import matplotlib.pyplot as plt
# Carrega CSV salvo pelo benchmark
df = pd.read_csv("benchmark_results.csv")
fig, ax1 = plt.subplots(figsize=(12, 6))
df_sorted = df.sort_values("avg_duration_sec", ascending=True)
color = 'tab:blue'
ax1.set_xlabel('Modelo')
ax1.set_ylabel('Duração [blue] (s)', color=color)
ax1.bar(df_sorted["model"], df_sorted["avg_duration_sec"], color=color, alpha=0.6)
ax1.tick_params(axis='y', labelcolor=color)
plt.xticks(rotation=45)
# Segundo eixo: acurácia
ax2 = ax1.twinx()
color = 'tab:green'
ax2.set_ylabel('Acurácia média (similarity)', color=color)
ax2.plot(df_sorted["model"], df_sorted["avg_similarity_score"], color=color, marker='o')
ax2.tick_params(axis='y', labelcolor=color)
plt.title("Média de Tempo e Acurácia por Modelo com 7 Imagens")
plt.tight_layout()
plt.grid(True)
plt.show()
'''
curl -X POST http://localhost:5006/benchmark_face_match \
-F "person_id=vitor" \
-F "images[]=@/home/v/Desktop/reconhecimento/imgs/a.jpg" \
-F "images[]=@/home/v/Desktop/reconhecimento/imgs/b.jpg" \
-F "images[]=@/home/v/Desktop/reconhecimento/imgs/c.jpg" \
-F "images[]=@/home/v/Desktop/reconhecimento/imgs/d.jpg" \
-F "images[]=@/home/v/Desktop/reconhecimento/imgs/e.jpg" \
-F "images[]=@/home/v/Desktop/reconhecimento/imgs/f.jpg" \
-F "images[]=@/home/v/Desktop/reconhecimento/imgs/g.jpg"
'''

BIN
imgs/a.jpg Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 71 KiB

BIN
imgs/aa.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 578 KiB

BIN
imgs/aaa.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 810 KiB

BIN
imgs/b.jpg Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 74 KiB

BIN
imgs/bb.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 365 KiB

BIN
imgs/bbb.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.0 MiB

BIN
imgs/c.jpg Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 81 KiB

BIN
imgs/ccc.jpg Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 14 KiB

BIN
imgs/d.jpg Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 184 KiB

BIN
imgs/e.jpg Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 176 KiB

BIN
imgs/f.jpg Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 82 KiB

BIN
imgs/g.jpg Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 162 KiB

BIN
imgs/h.jpg Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 77 KiB

BIN
imgs/i.jpeg Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 539 KiB

BIN
imgs/j.jpeg Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 546 KiB

207
readme.md Normal file
View File

@ -0,0 +1,207 @@
# Face Recognition API 🧠
API para comparar duas imagens faciais e verificar se são da mesma pessoa.
### Estrutura de pastas
```plain text
face-api/
├── app.py # Flask e registra rotas
├── failed_faces/
├── imgs/ # imagens de teste
├── routes/
│ └── face_routes.py # Define rotas (separado)
├── services/
│ └── face_service.py # Comparação de rostos
├── config/
│ └── settings.py # Threshold
├── Dockerfile
├── docker-compose.yaml
├── Makefile
├── requirements.txt
└── README.md
```
# 🧠 API de Reconhecimento Facial com DeepFace Dlib
Esta é uma API minimalista de verificação facial utilizando o modelo `deepface_dlib`, selecionado após benchmark de desempenho e precisão. Ela oferece endpoints simples e eficientes para **registrar imagens** e **comparar faces** com alta acurácia e baixo tempo de resposta.
---
## 📦 Tecnologias utilizadas
- [Python 3.11](https://www.python.org/)
- [Flask](https://flask.palletsprojects.com/)
- [DeepFace](https://github.com/serengil/deepface) com backend Dlib
- [MinIO](https://min.io/) para armazenamento de imagens
- [face_recognition](https://github.com/ageitgey/face_recognition) para validação facial
---
## 🚀 Como rodar com Docker
```bash
make devup
```
---
## 🌐 Endpoints disponíveis
### ✅ `POST /register_face`
Registra uma imagem de uma pessoa e salva os metadados (como as posições das faces detectadas) no MinIO.
**Parâmetros:**
- `person_id`: identificador da pessoa
- `image`: arquivo de imagem (formato `.jpg`, `.png`, etc.)
**Exemplo:**
```bash
curl -X POST http://localhost:5006/register_face \
-F "person_id=fulano" \
-F "image=@imgs/aa.jpg"
```
**Resposta:**
```json
{
"image_path": "fulano/20250501-145258.jpg",
"person_id": "fulano",
"status": "Face cropped and saved",
"timestamp": "20250501-145258"
}
```
Exemplo miniO
![Exemplo miniO](docs/image-1.png)
---
### ✅ `POST /checkin`
Compara duas imagens e retorna se é a mesma pessoa com base no modelo `deepface_dlib`. Uma imagem está no storage ([aa.jpg](imgs/aa.png)) e a outra é passada pro endpoint ([bb.jpg](imgs/bb.png)).
**Parâmetros:**
- `person_id`: fulano
- `image`: bb.jpg
**Exemplo:**
```bash
curl -X POST http://localhost:5006/checkin \
-F "person_id=fulano" \
-F "image=@imgs/bb.jpg"
```
**Resposta:**
```json
{
"confidence": "high",
"duration_sec": 0.7338,
"match": true,
"person_id": "fulano",
"similarity_score": 0.9643,
"threshold": 0.6
}
```
**Ou Resposta com medium score:**
```json
{
"confidence": "medium",
"duration_sec": 0.4199,
"match": false,
"person_id": "fulano",
"similarity_score": 0.9136,
"threshold": 0.6
}
```
---
### ✅ Se houver `match = True`, ele:
* ⏱ Salva a data/hora do registro
* 📷 Salva a imagem original
* 😁 Recorta e salva o rosto detectado
* 📝 Gera e salva um .json com:
Pessoa, IP, tempo de resposta, similaridade, confiança e status do match
* * OBS: É interessante, para a vida útil do modelo, que eventualmente seja salvo uma imagem da face nova após realizar um checkin para compor a pasta de faces registradas do indivíduo para garantir mudanças de aparência e garantir assertividade do ponto eletrônico.
**Se houver match = False:**
* Nada acontece, se quiser pode ser salvo as imagens de tentativa de utilização do ponto para verificar fraudes
---
## 📁 Estrutura esperada no MinIO
Ao registrar imagens com `person_id = fulano`, os arquivos são salvos na pasta registred_faces.
Ao realizar checkin é salvo arquivos na pasta checkins conforme:
```
data/
├── registred_faces/
│ └── ciclano/
│ └── 20240501-180123.jpg
│ └── fulano/
│ └── 20240501-180143.jpg
├── checkins/
│ └── fulano
│ └── data
│ └── hora
│ └── metadata.json
│ └── face.jpg
│ └── original.jpg
```
---
## ⚙️ Variáveis de ambiente `.env`
```env
DEBUG=True
THRESHOLD=0.93
MINIO_ENDPOINT=minio:9000
MINIO_BUCKET=data
MINIO_ACCESS_KEY=admin
MINIO_SECRET_KEY=password
# Faixas de confiança para verificação facial
CONFIDENCE_HIGH=0.95
CONFIDENCE_MEDIUM=0.85
```
---
## 🛠️ Comandos Makefile
```bash
make devup # Sobe com docker-compose.dev.yaml
make devdown # Para os containers
make logs # Logs ao vivo
```
---
## 👨‍🔬 Benchmark e decisão de arquitetura
O modelo `deepface_dlib` foi escolhido após benchmarks comparando:
- Tempo médio por verificação
- Similaridade média
- Taxa de acerto
Veja o gráfico da média dos resultados comparando 7 imagens diferentes, a linha verde é a acurácia
![benchmark_Figure.png](docs/benchmark_Figure.png)
### Próximos passos
* Atualmente o checkin utiliza a ultima imagem registrada, é possível fazer com que ele utilize todas e faça uma média, ou pegue a de melhor score ( é um detalhe complicado de lidar pois pode gerar falsos positivos)
* Colocar uma etapa de treinamento de modelo após um número de checkins positivo, mapeando o escore de similaridade para que fique sempre acima de 0.95.
* Colocar um sistema para funcionar offline
* Colocar um aviso de tentativa de fraude
* implementar um método que identifica alguém tentando realizar o ponto com uma foto do rosto da pessoa..

7
requirements.txt Normal file
View File

@ -0,0 +1,7 @@
flask
deepface
face_recognition
minio
Pillow
# numpy
tf-keras

287
routes/face_routes.py Normal file
View File

@ -0,0 +1,287 @@
import os
import time
import json
import logging
import numpy as np
from PIL import Image
import face_recognition
from deepface import DeepFace
from flask import Blueprint, request, jsonify
from services.face_service import compare_faces_service
from services.storage_service import minio_client, BUCKET
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
face_bp = Blueprint('face_bp', __name__)
threshold = float(os.getenv("THRESHOLD", 0.6))
# @face_bp.route('/compare_faces', methods=['POST'])
# def compare_faces():
# logging.info("🖼️ Recebendo imagens para comparação...")
# if 'image1' not in request.files or 'image2' not in request.files:
# return jsonify({"error": "Missing images. Keys should be 'image1' and 'image2'."}), 400
# image1 = request.files['image1']
# image2 = request.files['image2']
# try:
# result = compare_faces_service(image1, image2)
# return jsonify(result), 200
# except ValueError as e:
# return jsonify({"error": str(e)}), 400
# except Exception as e:
# return jsonify({"error": "Internal Server Error", "details": str(e)}), 500
@face_bp.route('/register_face', methods=['POST'])
def register_face():
from datetime import datetime
from io import BytesIO
person_id = request.form.get("person_id")
image_file = request.files.get("image")
if not person_id or not image_file:
return jsonify({"error": "Missing person_id or image"}), 400
try:
# Carrega imagem como array RGB
image_file.seek(0)
image = face_recognition.load_image_file(image_file)
# Detecta a primeira face
face_locations = face_recognition.face_locations(image)
if not face_locations:
return jsonify({"error": "No face detected in image"}), 400
top, right, bottom, left = face_locations[0]
face_crop = image[top:bottom, left:right]
# Converte para PIL
face_pil = Image.fromarray(face_crop)
# Salva em buffer
buffer = BytesIO()
face_pil.save(buffer, format="JPEG")
buffer.seek(0)
# Gera nome baseado em timestamp
timestamp = datetime.utcnow().strftime("%Y%m%d-%H%M%S")
image_filename = f"registred_faces/{person_id}/{timestamp}.jpg"
# Upload para o MinIO
minio_client.put_object(
BUCKET,
image_filename,
buffer,
length=buffer.getbuffer().nbytes,
content_type="image/jpeg"
)
return jsonify({
"person_id": person_id,
"image_path": image_filename,
"timestamp": timestamp,
"status": "Face cropped and saved"
}), 200
except Exception as e:
logger.exception("❌ Erro ao registrar face")
return jsonify({"error": "Failed to register face", "details": str(e)}), 500
@face_bp.route("/checkin", methods=["POST"])
def checkin():
from datetime import datetime
from io import BytesIO
logger.info("📥 Início do registro de ponto (/checkin)")
person_id = request.form.get("person_id")
image_file = request.files.get("image")
ip_address = request.remote_addr
if not person_id or not image_file:
return jsonify({"error": "Missing person_id or image"}), 400
try:
img_probe = np.array(Image.open(image_file).convert("RGB"))
objects = list(minio_client.list_objects(BUCKET, prefix=f"registred_faces/{person_id}/", recursive=True))
image_objects = [obj for obj in objects if obj.object_name.endswith(".jpg")]
if not image_objects:
return jsonify({"error": f"No registered face found for '{person_id}'"}), 404
image_objects.sort(key=lambda x: x.object_name, reverse=True)
target_image_obj = image_objects[0]
logger.debug(f"🎯 Usando imagem registrada: {target_image_obj.object_name}")
response = minio_client.get_object(BUCKET, target_image_obj.object_name)
img_registered = np.array(Image.open(response).convert("RGB"))
t0 = time.time()
result = DeepFace.verify(
img_probe,
img_registered,
model_name="Dlib",
enforce_detection=False
)
duration = round(time.time() - t0, 4)
distance = result["distance"]
similarity = 1 - distance
threshold = float(os.getenv("THRESHOLD", 0.85))
confidence_high = float(os.getenv("CONFIDENCE_HIGH", 0.95))
confidence_medium = float(os.getenv("CONFIDENCE_MEDIUM", 0.85))
match = similarity >= threshold
if similarity >= confidence_high:
confidence = "high"
elif similarity >= confidence_medium:
confidence = "medium"
else:
confidence = "low"
if not match:
return jsonify({
"match": False,
"similarity_score": round(similarity, 4),
"confidence": confidence,
"message": "Face not recognized with sufficient confidence."
}), 401
# Recorta a face com face_recognition
image_file.seek(0)
image_rgb = face_recognition.load_image_file(image_file)
locations = face_recognition.face_locations(image_rgb)
if not locations:
return jsonify({"error": "No face found to crop"}), 400
top, right, bottom, left = locations[0]
face_crop = image_rgb[top:bottom, left:right]
face_pil = Image.fromarray(face_crop)
# Organiza por pessoa/data/hora
now = datetime.utcnow()
date_str = now.strftime("%Y-%m-%d")
time_str = now.strftime("%H-%M-%S")
path_prefix = f"checkins/{person_id}/{date_str}/{time_str}/"
original_name = f"{path_prefix}original.jpg"
face_name = f"{path_prefix}face.jpg"
json_name = f"{path_prefix}metadata.json"
# Upload original
image_file.seek(0)
minio_client.put_object(
BUCKET, original_name, image_file,
length=-1, part_size=10*1024*1024,
content_type="image/jpeg"
)
# Upload face
face_buffer = BytesIO()
face_pil.save(face_buffer, format="JPEG")
face_buffer.seek(0)
minio_client.put_object(
BUCKET, face_name, face_buffer,
length=face_buffer.getbuffer().nbytes,
content_type="image/jpeg"
)
# Upload JSON
data = {
"person_id": person_id,
"timestamp": now.strftime("%Y-%m-%d %H:%M:%S"),
"ip": ip_address,
"confidence": confidence,
"similarity_score": round(similarity, 4),
"duration_sec": duration,
"match": match
}
json_buffer = BytesIO(json.dumps(data).encode("utf-8"))
minio_client.put_object(
BUCKET, json_name, json_buffer,
length=json_buffer.getbuffer().nbytes,
content_type="application/json"
)
return jsonify(data), 200
except Exception as e:
logger.exception("❌ Erro ao processar check-in")
return jsonify({"error": str(e)}), 500
'''
Abaixo é o endpoint que precisa que seja passado duas imagens para comparação.
'''
# @face_bp.route("/verify_face_dlib", methods=["POST"])
# def verify_face_dlib():
# logger.info("🔍 Verificação facial usando deepface_dlib com imagem cadastrada")
# person_id = request.form.get("person_id")
# image_file = request.files.get("image")
# if not person_id or not image_file:
# return jsonify({"error": "Missing person_id or image"}), 400
# try:
# img_probe = np.array(Image.open(image_file).convert("RGB"))
# objects = list(minio_client.list_objects(BUCKET, prefix=f"{person_id}/", recursive=True))
# image_objects = [obj for obj in objects if obj.object_name.endswith(".jpg")]
# if not image_objects:
# return jsonify({"error": f"No registered face found for '{person_id}'"}), 404
# image_objects.sort(key=lambda x: x.object_name, reverse=True)
# target_image_obj = image_objects[0]
# logger.debug(f"🖼 Imagem cadastrada encontrada: {target_image_obj.object_name}")
# response = minio_client.get_object(BUCKET, target_image_obj.object_name)
# img_registered = np.array(Image.open(response).convert("RGB"))
# t0 = time.time()
# result = DeepFace.verify(
# img_probe,
# img_registered,
# model_name="Dlib",
# enforce_detection=False
# )
# duration = round(time.time() - t0, 4)
# distance = result["distance"]
# similarity = 1 - distance
# # Aplica o THRESHOLD sobre a similaridade
# threshold = float(os.getenv("THRESHOLD", 0.93)) # ex: 0.85 = exige mais precisão
# confidence_high = float(os.getenv("CONFIDENCE_HIGH", 0.95))
# confidence_medium = float(os.getenv("CONFIDENCE_MEDIUM", 0.85))
# match = similarity >= threshold
# if similarity >= confidence_high:
# confidence = "high"
# elif similarity >= confidence_medium:
# confidence = "medium"
# else:
# confidence = "low"
# return jsonify({
# "person_id": person_id,
# "match": match,
# "similarity_score": round(similarity, 4),
# "threshold": threshold,
# "confidence": confidence,
# "duration_sec": duration
# }), 200
# except Exception as e:
# logger.exception("Erro na verificação facial")
# return jsonify({"error": str(e)}), 500

View File

@ -0,0 +1,412 @@
import os
import csv
import time
import logging
import numpy as np
from PIL import Image
from io import BytesIO
from minio import Minio
import face_recognition
from deepface import DeepFace
from facenet_pytorch import MTCNN
from flask import Blueprint, request, jsonify
from services.face_service import compare_faces_service
from services.storage_service import minio_client, BUCKET
from services.storage_service import upload_image_to_minio
logging.basicConfig(level=logging.INFO)
face_bp = Blueprint('face_bp', __name__)
mtcnn = MTCNN(image_size=160, margin=0)
logger = logging.getLogger(__name__)
threshold = float(os.getenv("THRESHOLD", 0.6))
@face_bp.route('/compare_faces', methods=['POST'])
def compare_faces():
logging.info("🖼️ Recebendo imagens para comparação...")
if 'image1' not in request.files or 'image2' not in request.files:
return jsonify({"error": "Missing images. Keys should be 'image1' and 'image2'."}), 400
image1 = request.files['image1']
image2 = request.files['image2']
try:
result = compare_faces_service(image1, image2)
return jsonify(result), 200
except ValueError as e:
return jsonify({"error": str(e)}), 400
except Exception as e:
return jsonify({"error": "Internal Server Error", "details": str(e)}), 500
@face_bp.route('/register_face', methods=['POST'])
def register_face():
person_id = request.form.get("person_id")
image_file = request.files.get("image")
if not person_id or not image_file:
return jsonify({"error": "Missing person_id or image"}), 400
# Salva no MinIO
try:
image_path = upload_image_to_minio(image_file, person_id)
except Exception as e:
return jsonify({"error": "Failed to upload image", "details": str(e)}), 500
# Extrai encoding
image_file.seek(0)
image = face_recognition.load_image_file(image_file)
encodings = face_recognition.face_encodings(image)
if not encodings:
return jsonify({"error": "No face detected in image"}), 400
# OBS: encoding poderia ser salvo num banco, aqui apenas retornamos
encoding = encodings[0].tolist() # JSON serializable
return jsonify({
"person_id": person_id,
"image_path": image_path,
"face_encoding": encoding
}), 200
@face_bp.route('/compare_face_with_registered', methods=['POST'])
def compare_face_with_registered():
logger.info("🔍 Iniciando comparação com registros salvos")
person_id = request.form.get("person_id")
image_file = request.files.get("image")
if not person_id or not image_file:
logger.warning("⚠️ Requisição inválida: 'person_id' ou 'image' ausente")
return jsonify({"error": "Missing person_id or image"}), 400
logger.debug(f"📤 Person ID recebido: {person_id}")
# Processa imagem recebida
try:
logger.info("🧠 Carregando imagem enviada para extração facial")
image = face_recognition.load_image_file(image_file)
input_encoding = face_recognition.face_encodings(image)
if not input_encoding:
logger.warning("⚠️ Nenhuma face detectada na imagem de entrada")
return jsonify({"error": "No face found in input image"}), 400
input_encoding = input_encoding[0]
logger.debug("✅ Encoding da imagem enviada obtido com sucesso")
except Exception as e:
logger.exception("❌ Erro ao processar a imagem enviada")
return jsonify({"error": "Failed to process input image", "details": str(e)}), 500
# Busca imagens registradas no MinIO
try:
logger.info("📂 Buscando imagens registradas no MinIO para o usuário")
registered_objects = list(minio_client.list_objects(BUCKET, prefix=f"{person_id}/", recursive=True))
if not registered_objects:
logger.warning("⚠️ Nenhuma imagem registrada encontrada para este usuário")
return jsonify({"error": "No registered images found for this person_id"}), 404
except Exception as e:
logger.exception("❌ Erro ao listar objetos no MinIO")
return jsonify({"error": "Failed to access MinIO", "details": str(e)}), 500
matches = []
for obj in registered_objects:
try:
logger.debug(f"🔄 Comparando com imagem registrada: {obj.object_name}")
response = minio_client.get_object(BUCKET, obj.object_name)
buffer = BytesIO(response.read())
reg_image = face_recognition.load_image_file(buffer)
encodings = face_recognition.face_encodings(reg_image)
if not encodings:
logger.warning(f"⚠️ Nenhuma face encontrada em {obj.object_name}, ignorando")
continue
encoding = encodings[0]
distance = face_recognition.face_distance([encoding], input_encoding)[0]
similarity = 1 - distance
threshold = float(os.getenv("THRESHOLD", 0.6))
match_result = bool(distance <= threshold)
logger.debug(f"📏 Similaridade: {similarity:.4f} | Match: {match_result}")
matches.append({
"registered_image": str(obj.object_name),
"similarity_score": round(float(similarity), 4),
"match": match_result
})
except Exception as e:
logger.error(f"❌ Erro ao comparar com imagem {obj.object_name}: {str(e)}")
if not matches:
logger.warning("⚠️ Nenhuma comparação válida foi possível")
return jsonify({"error": "No valid registered faces found"}), 404
best_match = sorted(matches, key=lambda m: m["similarity_score"], reverse=True)[0]
logger.info(f"✅ Melhor match encontrado: {best_match['registered_image']} com score {best_match['similarity_score']}")
return jsonify(best_match), 200
@face_bp.route('/benchmark_face_match_working', methods=['POST'])
def benchmark_face_match_working():
logger.info("🔬 Iniciando benchmark facial")
person_id = request.form.get("person_id")
image_file = request.files.get("image")
if not person_id or not image_file:
return jsonify({"error": "Missing person_id or image"}), 400
try:
pil_input = Image.open(image_file).convert("RGB")
input_np = np.array(pil_input)
except Exception as e:
logger.exception("Erro ao carregar imagem")
return jsonify({"error": "Invalid image", "details": str(e)}), 400
try:
registered_objects = list(minio_client.list_objects(BUCKET, prefix=f"{person_id}/", recursive=True))
if not registered_objects:
return jsonify({"error": "No registered images found"}), 404
except Exception as e:
logger.exception("Erro ao acessar MinIO")
return jsonify({"error": "MinIO access error", "details": str(e)}), 500
deepface_models = [
"ArcFace", "Facenet", "Facenet512", "VGG-Face",
"OpenFace", "Dlib", "SFace" # Removido "DeepFace"
]
all_results = []
csv_rows = []
for obj in registered_objects:
logger.info(f"📂 Comparando com imagem: {obj.object_name}")
result_entry = {
"registered_image": str(obj.object_name),
"models": {}
}
try:
response = minio_client.get_object(BUCKET, obj.object_name)
buffer = BytesIO(response.read())
pil_registered = Image.open(buffer).convert("RGB")
reg_np = np.array(pil_registered)
except Exception as e:
logger.error(f"Erro ao carregar imagem: {e}")
continue
# Modelo 1: dlib
try:
t0 = time.time()
enc_input = face_recognition.face_encodings(input_np)
enc_reg = face_recognition.face_encodings(reg_np)
if enc_input and enc_reg:
dist = face_recognition.face_distance([enc_reg[0]], enc_input[0])[0]
sim = 1 - dist
match = dist <= threshold
duration = round(time.time() - t0, 4)
result_entry["models"]["dlib"] = {
"similarity_score": round(sim, 4),
"match": bool(match),
"duration_sec": duration
}
csv_rows.append(["dlib", sim, duration, match])
else:
raise ValueError("Encodings não encontrados")
except Exception as e:
result_entry["models"]["dlib"] = {"error": str(e)}
# DeepFace models
for model in deepface_models:
model_key = f"deepface_{model.lower()}"
try:
t0 = time.time()
analysis = DeepFace.verify(
np.array(pil_input),
np.array(pil_registered),
model_name=model,
enforce_detection=False
)
sim = 1 - analysis['distance']
match = analysis['verified']
duration = round(time.time() - t0, 4)
result_entry["models"][model_key] = {
"similarity_score": round(sim, 4),
"match": bool(match),
# "distance_metric": analysis.get("distance_metric", ""),
"distance_metric": analysis.get("distance_metric", "cosine"),
"duration_sec": duration
}
csv_rows.append([model_key, sim, duration, match])
except Exception as e:
result_entry["models"][model_key] = {"error": str(e)}
all_results.append(result_entry)
# Salva CSV
csv_path = "benchmark_results.csv"
try:
with open(csv_path, "w", newline="") as f:
writer = csv.writer(f)
writer.writerow(["model", "similarity_score", "duration_sec", "match"])
writer.writerows(csv_rows)
logger.info(f"✅ CSV salvo em {csv_path}")
except Exception as e:
logger.error(f"❌ Falha ao salvar CSV: {e}")
return jsonify(all_results), 200
@face_bp.route('/benchmark_face_match', methods=['POST'])
def benchmark_face_match():
import csv
logger.info("🧪 Iniciando benchmark com múltiplas imagens de entrada")
person_id = request.form.get("person_id")
image_files = request.files.getlist("images[]")
if not person_id or not image_files:
return jsonify({"error": "Missing person_id or images[]"}), 400
# Validação e conversão de imagens de entrada
input_images = []
for img_file in image_files:
try:
pil_img = Image.open(img_file).convert("RGB")
input_images.append(np.array(pil_img))
except Exception as e:
logger.warning(f"⚠️ Imagem inválida: {img_file.filename} | {str(e)}")
if not input_images:
return jsonify({"error": "No valid input images"}), 400
try:
registered_objects = list(minio_client.list_objects(BUCKET, prefix=f"{person_id}/", recursive=True))
if not registered_objects:
return jsonify({"error": "No registered images found"}), 404
except Exception as e:
logger.exception("❌ Erro ao acessar MinIO")
return jsonify({"error": "MinIO access error", "details": str(e)}), 500
deepface_models = [
"ArcFace", "Facenet", "Facenet512", "VGG-Face",
"OpenFace", "Dlib", "SFace"
]
all_model_stats = {}
# Inicializa estatísticas por modelo
for model in ["dlib", "mtcnn+dlib"] + [f"deepface_{m.lower()}" for m in deepface_models]:
all_model_stats[model] = {
"similarities": [],
"durations": [],
"matches": []
}
for input_np in input_images:
for obj in registered_objects:
try:
response = minio_client.get_object(BUCKET, obj.object_name)
buffer = BytesIO(response.read())
pil_registered = Image.open(buffer).convert("RGB")
reg_np = np.array(pil_registered)
except Exception as e:
logger.warning(f"❌ Erro ao carregar imagem registrada {obj.object_name}: {e}")
continue
# dlib
try:
t0 = time.time()
enc_input = face_recognition.face_encodings(input_np)
enc_reg = face_recognition.face_encodings(reg_np)
if enc_input and enc_reg:
dist = face_recognition.face_distance([enc_reg[0]], enc_input[0])[0]
sim = 1 - dist
match = dist <= threshold
duration = time.time() - t0
all_model_stats["dlib"]["similarities"].append(sim)
all_model_stats["dlib"]["durations"].append(duration)
all_model_stats["dlib"]["matches"].append(match)
except Exception as e:
logger.warning(f"⚠️ Dlib falhou: {e}")
# mtcnn + dlib
try:
t0 = time.time()
t_input = mtcnn(Image.fromarray(input_np))
t_reg = mtcnn(pil_registered)
if t_input is None or t_reg is None:
raise ValueError("MTCNN não detectou rosto")
arr_input = (t_input.permute(1,2,0).numpy()*255).astype(np.uint8)
arr_reg = (t_reg.permute(1,2,0).numpy()*255).astype(np.uint8)
enc_input = face_recognition.face_encodings(arr_input)
enc_reg = face_recognition.face_encodings(arr_reg)
if enc_input and enc_reg:
dist = face_recognition.face_distance([enc_reg[0]], enc_input[0])[0]
sim = 1 - dist
match = dist <= threshold
duration = time.time() - t0
all_model_stats["mtcnn+dlib"]["similarities"].append(sim)
all_model_stats["mtcnn+dlib"]["durations"].append(duration)
all_model_stats["mtcnn+dlib"]["matches"].append(match)
except Exception as e:
logger.warning(f"⚠️ mtcnn+dlib falhou: {e}")
# DeepFace models
for model in deepface_models:
model_key = f"deepface_{model.lower()}"
try:
t0 = time.time()
analysis = DeepFace.verify(
input_np,
reg_np,
model_name=model,
enforce_detection=False
)
sim = 1 - analysis['distance']
match = analysis['verified']
duration = time.time() - t0
all_model_stats[model_key]["similarities"].append(sim)
all_model_stats[model_key]["durations"].append(duration)
all_model_stats[model_key]["matches"].append(match)
except Exception as e:
logger.warning(f"⚠️ DeepFace ({model}) falhou: {e}")
# Calcular médias
results = []
for model, stats in all_model_stats.items():
if stats["similarities"]:
avg_sim = sum(stats["similarities"]) / len(stats["similarities"])
avg_time = sum(stats["durations"]) / len(stats["durations"])
match_rate = sum(stats["matches"]) / len(stats["matches"])
results.append({
"model": model,
"avg_similarity_score": round(avg_sim, 4),
"avg_duration_sec": round(avg_time, 4),
"avg_match_rate": round(match_rate, 4)
})
# Salvar CSV
csv_path = "benchmark_results.csv"
try:
with open(csv_path, "w", newline="") as f:
writer = csv.DictWriter(f, fieldnames=["model", "avg_similarity_score", "avg_duration_sec", "avg_match_rate"])
writer.writeheader()
writer.writerows(results)
logger.info(f"✅ CSV salvo com {len(results)} modelos em {csv_path}")
except Exception as e:
logger.error(f"❌ Falha ao salvar CSV: {e}")
return jsonify(results), 200

53
services/face_service.py Normal file
View File

@ -0,0 +1,53 @@
import os
import logging
import numpy as np
from PIL import Image
from deepface import DeepFace
# Configura logger
logging.basicConfig(
level=logging.DEBUG if os.getenv("DEBUG", "True").lower() == "true" else logging.INFO,
format="%(asctime)s [%(levelname)s] %(message)s"
)
logger = logging.getLogger(__name__)
threshold = float(os.getenv("THRESHOLD", 0.85))
def pil_to_numpy_rgb(image_file):
"""Converte qualquer imagem em numpy RGB"""
image_file.seek(0)
pil_image = Image.open(image_file).convert("RGB")
return np.array(pil_image)
def compare_faces_service(image1_file, image2_file):
"""
Compara duas imagens usando o modelo DeepFace Dlib.
Retorna similaridade, match e tempo.
"""
logger.info("📷 Iniciando comparação facial com DeepFace Dlib...")
try:
img1 = pil_to_numpy_rgb(image1_file)
img2 = pil_to_numpy_rgb(image2_file)
result = DeepFace.verify(
img1,
img2,
model_name="Dlib",
enforce_detection=False
)
similarity_score = 1 - result["distance"]
is_same_person = result["verified"]
logger.info(f"🔍 Match: {is_same_person} | Score: {similarity_score:.4f}")
return {
"match": bool(is_same_person),
"similarity_score": round(float(similarity_score), 4),
"threshold_used": threshold
}
except Exception as e:
logger.exception("❌ Erro na comparação facial")
raise ValueError(f"Erro na verificação: {str(e)}")

View File

@ -0,0 +1,35 @@
from minio import Minio
import os
from datetime import datetime
from io import BytesIO
# Configura cliente MinIO
minio_client = Minio(
endpoint=os.getenv("MINIO_ENDPOINT", "minio:9000").replace("http://", ""),
access_key=os.getenv("MINIO_ACCESS_KEY", "admin"),
secret_key=os.getenv("MINIO_SECRET_KEY", "password"),
secure=False
)
BUCKET = os.getenv("MINIO_BUCKET", "data")
# Cria bucket se não existir
if not minio_client.bucket_exists(BUCKET):
minio_client.make_bucket(BUCKET)
def upload_image_to_minio(image_file, person_id):
now = datetime.now().strftime("%Y%m%d-%H%M%S")
filename = f"faces/{person_id}/{now}.jpg"
image_file.seek(0)
content = image_file.read()
buffer = BytesIO(content)
minio_client.put_object(
bucket_name=BUCKET,
object_name=filename,
data=buffer,
length=len(content),
content_type="image/jpeg"
)
return filename