mirror of
https://git.roussel.pro/telecom-paris/pact.git
synced 2026-02-09 10:30:17 +01:00
add mediapipehands exemple
This commit is contained in:
@@ -5,7 +5,11 @@ ENV PYTHONDONTWRITEBYTECODE=1
|
|||||||
#Afficher les logs directement dans le terminal
|
#Afficher les logs directement dans le terminal
|
||||||
ENV PYTHONUNBUFFERED=1
|
ENV PYTHONUNBUFFERED=1
|
||||||
|
|
||||||
# Installation des dépendances
|
#Installation des dépendances de opencv (TODO: supprimer si plus besoin)
|
||||||
|
RUN apt-get update
|
||||||
|
RUN apt-get install ffmpeg libsm6 libxext6 -y
|
||||||
|
|
||||||
|
# Installation des dépendances python
|
||||||
COPY requirements.txt .
|
COPY requirements.txt .
|
||||||
RUN python -m pip install -r requirements.txt
|
RUN python -m pip install -r requirements.txt
|
||||||
|
|
||||||
|
|||||||
BIN
code/backend_reconaissance/__pycache__/hands.cpython-310.pyc
Normal file
BIN
code/backend_reconaissance/__pycache__/hands.cpython-310.pyc
Normal file
Binary file not shown.
45
code/backend_reconaissance/hands.py
Normal file
45
code/backend_reconaissance/hands.py
Normal file
@@ -0,0 +1,45 @@
|
|||||||
|
import cv2
|
||||||
|
import mediapipe as mp
|
||||||
|
mp_drawing = mp.solutions.drawing_utils
|
||||||
|
mp_drawing_styles = mp.solutions.drawing_styles
|
||||||
|
mp_hands = mp.solutions.hands
|
||||||
|
|
||||||
|
# For webcam input:
|
||||||
|
# cap = cv2.VideoCapture("dev/v4l/by-path/pci-0000:04:00.3-usb-0:4:1.0-video-index0")
|
||||||
|
cap = cv2.VideoCapture(0)
|
||||||
|
hands = mp_hands.Hands(
|
||||||
|
model_complexity=0,
|
||||||
|
min_detection_confidence=0.5,
|
||||||
|
min_tracking_confidence=0.5)
|
||||||
|
|
||||||
|
def frame():
|
||||||
|
if cap.isOpened():
|
||||||
|
success, image = cap.read()
|
||||||
|
if not success:
|
||||||
|
print("Ignoring empty camera frame.")
|
||||||
|
# If loading a video, use 'break' instead of 'continue'.
|
||||||
|
return
|
||||||
|
|
||||||
|
# To improve performance, optionally mark the image as not writeable to
|
||||||
|
# pass by reference.
|
||||||
|
image.flags.writeable = False
|
||||||
|
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
|
||||||
|
results = hands.process(image)
|
||||||
|
|
||||||
|
# Draw the hand annotations on the image.
|
||||||
|
image.flags.writeable = True
|
||||||
|
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
|
||||||
|
if results.multi_hand_landmarks:
|
||||||
|
for hand_landmarks in results.multi_hand_landmarks:
|
||||||
|
mp_drawing.draw_landmarks(
|
||||||
|
image,
|
||||||
|
hand_landmarks,
|
||||||
|
mp_hands.HAND_CONNECTIONS,
|
||||||
|
mp_drawing_styles.get_default_hand_landmarks_style(),
|
||||||
|
mp_drawing_styles.get_default_hand_connections_style())
|
||||||
|
# Flip the image horizontally for a selfie-view display.
|
||||||
|
# cv2.imshow('MediaPipe Hands', cv2.flip(image, 1))
|
||||||
|
if cv2.waitKey(5) & 0xFF == 27:
|
||||||
|
return
|
||||||
|
# cap.release()
|
||||||
|
|
||||||
@@ -3,6 +3,7 @@ import json
|
|||||||
import websockets
|
import websockets
|
||||||
import random
|
import random
|
||||||
import os
|
import os
|
||||||
|
import hands
|
||||||
|
|
||||||
class WebsocketServer:
|
class WebsocketServer:
|
||||||
def __init__(self,getEffects,port=os.getenv("PORT"),host=os.getenv("HOST")) -> None:
|
def __init__(self,getEffects,port=os.getenv("PORT"),host=os.getenv("HOST")) -> None:
|
||||||
@@ -18,6 +19,7 @@ class WebsocketServer:
|
|||||||
async def handler(self,websocket):
|
async def handler(self,websocket):
|
||||||
while True:
|
while True:
|
||||||
messages = self.getEffects()
|
messages = self.getEffects()
|
||||||
|
hands.frame()
|
||||||
await websocket.send(json.dumps(messages))
|
await websocket.send(json.dumps(messages))
|
||||||
await asyncio.sleep(1/30)
|
await asyncio.sleep(1/30)
|
||||||
|
|
||||||
|
|||||||
@@ -1,2 +1,4 @@
|
|||||||
websockets
|
websockets
|
||||||
requests
|
requests
|
||||||
|
opencv-python
|
||||||
|
mediapipe
|
||||||
@@ -79,6 +79,8 @@ services:
|
|||||||
build: ./backend_reconaissance
|
build: ./backend_reconaissance
|
||||||
container_name: backend_reconaissance
|
container_name: backend_reconaissance
|
||||||
restart: always
|
restart: always
|
||||||
|
devices:
|
||||||
|
- /dev/video3:/dev/video0
|
||||||
environment:
|
environment:
|
||||||
- PORT=5000
|
- PORT=5000
|
||||||
- HOST=backend_reconaissance
|
- HOST=backend_reconaissance
|
||||||
|
|||||||
@@ -3,6 +3,8 @@
|
|||||||
* Télécharger : `git clone https://github.com/umlaeute/v4l2loopback.git`
|
* Télécharger : `git clone https://github.com/umlaeute/v4l2loopback.git`
|
||||||
* Installer avec : `make` puis `sudo make install`
|
* Installer avec : `make` puis `sudo make install`
|
||||||
* activer le module : `sudo modprobe v4l2loopback devices=2`
|
* activer le module : `sudo modprobe v4l2loopback devices=2`
|
||||||
|
* Erreur possible : opperation not permitted : il faut désactiver secure boot
|
||||||
|
* OU `apt update && apt install v4l2loopback-dkms v4l2loopback-utils`
|
||||||
* Faire looper la camera /dev/video0 sur les autres
|
* Faire looper la camera /dev/video0 sur les autres
|
||||||
* installer ffmpeg : `sudo apt get install ffmpeg`
|
* installer ffmpeg : `sudo apt get install ffmpeg`
|
||||||
* activer le loopback : `ffmpeg -f video4linux2 -i /dev/video0 -codec copy -f v4l2 /dev/video1 -codec copy -f v4l2 /dev/video2`
|
* activer le loopback : `ffmpeg -f video4linux2 -i /dev/video0 -codec copy -f v4l2 /dev/video1 -codec copy -f v4l2 /dev/video2`
|
||||||
|
|||||||
Reference in New Issue
Block a user