mirror of
https://git.roussel.pro/telecom-paris/pact.git
synced 2026-02-09 02:20:17 +01:00
avancement rapport
This commit is contained in:
27
code/backend_reconnaissance/.dockerignore
Normal file
27
code/backend_reconnaissance/.dockerignore
Normal file
@@ -0,0 +1,27 @@
|
||||
**/__pycache__
|
||||
**/.venv
|
||||
**/.classpath
|
||||
**/.dockerignore
|
||||
**/.env
|
||||
**/.git
|
||||
**/.gitignore
|
||||
**/.project
|
||||
**/.settings
|
||||
**/.toolstarget
|
||||
**/.vs
|
||||
**/.vscode
|
||||
**/*.*proj.user
|
||||
**/*.dbmdl
|
||||
**/*.jfm
|
||||
**/bin
|
||||
**/charts
|
||||
**/docker-compose*
|
||||
**/compose*
|
||||
**/Dockerfile*
|
||||
**/node_modules
|
||||
**/npm-debug.log
|
||||
**/obj
|
||||
**/secrets.dev.yaml
|
||||
**/values.dev.yaml
|
||||
LICENSE
|
||||
README.md
|
||||
20
code/backend_reconnaissance/Dockerfile
Normal file
20
code/backend_reconnaissance/Dockerfile
Normal file
@@ -0,0 +1,20 @@
|
||||
FROM python:3.8-slim
|
||||
|
||||
#Ne pas créer les fichiers .pyc
|
||||
ENV PYTHONDONTWRITEBYTECODE=1
|
||||
#Afficher les logs directement dans le terminal
|
||||
ENV PYTHONUNBUFFERED=1
|
||||
|
||||
#Installation des dépendances de opencv (TODO: supprimer si plus besoin)
|
||||
RUN apt-get update
|
||||
RUN apt-get install ffmpeg libsm6 libxext6 -y
|
||||
|
||||
# Installation des dépendances python
|
||||
COPY requirements.txt .
|
||||
RUN python -m pip install -r requirements.txt
|
||||
|
||||
# Création du répertoire de travail
|
||||
WORKDIR /app
|
||||
COPY . /app
|
||||
|
||||
CMD ["python", "main.py"]
|
||||
BIN
code/backend_reconnaissance/__pycache__/hands.cpython-310.pyc
Normal file
BIN
code/backend_reconnaissance/__pycache__/hands.cpython-310.pyc
Normal file
Binary file not shown.
29
code/backend_reconnaissance/exemple_utilisation.py
Normal file
29
code/backend_reconnaissance/exemple_utilisation.py
Normal file
@@ -0,0 +1,29 @@
|
||||
import requests
|
||||
#Exemple ajout d'un commentaire depuis la borne (site ou geste)
|
||||
avis = {
|
||||
"note": 8,
|
||||
"source": "borne",
|
||||
#Optionel
|
||||
"auteur_age": 20,
|
||||
"notes_autre": '{"proprete":8,"calme":10}',
|
||||
"auteur_sexe": 'f',
|
||||
"commentaire": "Commentaire"
|
||||
}
|
||||
|
||||
# res = requests.post("http://localhost:8080/add_review", data=avis)
|
||||
# print(res.text)
|
||||
|
||||
#Exemple ajout d'un commentaire trouvé sur les réseaux sociaux
|
||||
avis = {
|
||||
"auteur_nom": "michel",
|
||||
"source": "instagram",
|
||||
"note": 8,
|
||||
"date": "2022-12-24",
|
||||
#Optionel
|
||||
"commentaire": "J'ai beaucoup aimé !",
|
||||
"lien": "https://instagram.com/si_insta_avait_des_liens_vers_des_commentaires_faudrait_le_mettre_ici",
|
||||
"auteur_lien": "https://instagram.com/michel",
|
||||
}
|
||||
|
||||
res = requests.post("http://localhost:8080/add_social_review", data=avis)
|
||||
print(res.text)
|
||||
44
code/backend_reconnaissance/hands.py
Normal file
44
code/backend_reconnaissance/hands.py
Normal file
@@ -0,0 +1,44 @@
|
||||
import cv2
|
||||
import mediapipe as mp
|
||||
mp_drawing = mp.solutions.drawing_utils
|
||||
mp_drawing_styles = mp.solutions.drawing_styles
|
||||
mp_hands = mp.solutions.hands
|
||||
|
||||
# For webcam input:
|
||||
cap = cv2.VideoCapture(0)
|
||||
hands = mp_hands.Hands(
|
||||
model_complexity=0,
|
||||
min_detection_confidence=0.5,
|
||||
min_tracking_confidence=0.5)
|
||||
|
||||
def frame():
|
||||
if cap.isOpened():
|
||||
success, image = cap.read()
|
||||
if not success:
|
||||
print("Ignoring empty camera frame.")
|
||||
# If loading a video, use 'break' instead of 'continue'.
|
||||
return
|
||||
|
||||
# To improve performance, optionally mark the image as not writeable to
|
||||
# pass by reference.
|
||||
image.flags.writeable = False
|
||||
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
|
||||
results = hands.process(image)
|
||||
|
||||
# Draw the hand annotations on the image.
|
||||
image.flags.writeable = True
|
||||
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
|
||||
if results.multi_hand_landmarks:
|
||||
for hand_landmarks in results.multi_hand_landmarks:
|
||||
mp_drawing.draw_landmarks(
|
||||
image,
|
||||
hand_landmarks,
|
||||
mp_hands.HAND_CONNECTIONS,
|
||||
mp_drawing_styles.get_default_hand_landmarks_style(),
|
||||
mp_drawing_styles.get_default_hand_connections_style())
|
||||
# Flip the image horizontally for a selfie-view display.
|
||||
# cv2.imshow('MediaPipe Hands', cv2.flip(image, 1))
|
||||
if cv2.waitKey(5) & 0xFF == 27:
|
||||
return
|
||||
# cap.release()
|
||||
|
||||
33
code/backend_reconnaissance/main.py
Normal file
33
code/backend_reconnaissance/main.py
Normal file
@@ -0,0 +1,33 @@
|
||||
import asyncio
|
||||
import json
|
||||
import websockets
|
||||
import random
|
||||
import os
|
||||
import hands
|
||||
import time
|
||||
|
||||
|
||||
class WebsocketServer:
|
||||
def __init__(self,getEffects,port=os.getenv("PORT"),host=os.getenv("HOST")) -> None:
|
||||
self.host = host
|
||||
self.port = port
|
||||
self.getEffects = getEffects
|
||||
|
||||
async def run(self):
|
||||
async with websockets.serve(self.handler, self.host, self.port):
|
||||
await asyncio.Future()
|
||||
|
||||
|
||||
async def handler(self,websocket):
|
||||
while True:
|
||||
messages = self.getEffects()
|
||||
hands.frame()
|
||||
await websocket.send(json.dumps(messages))
|
||||
await asyncio.sleep(1/30)
|
||||
|
||||
#Remplacer ça par la fonction qui récupère les effets (dans le module de reconnaissance de gestes)
|
||||
def getEffects():
|
||||
return {"type": "effects", "effects": [{"type": "thumbs_up", "x":random.randint(0,100), "y": random.randint(0,100), "width": 50, "height": 50}]}
|
||||
|
||||
server = WebsocketServer(getEffects)
|
||||
asyncio.run(server.run())
|
||||
4
code/backend_reconnaissance/requirements.txt
Normal file
4
code/backend_reconnaissance/requirements.txt
Normal file
@@ -0,0 +1,4 @@
|
||||
websockets
|
||||
requests
|
||||
opencv-python
|
||||
mediapipe
|
||||
Reference in New Issue
Block a user