From 0d5167db5796693aef94b05b17ce5ae8d99a2c75 Mon Sep 17 00:00:00 2001 From: Quentin Roussel Date: Wed, 22 Mar 2023 22:10:58 +0100 Subject: [PATCH] =?UTF-8?q?restructuration=20du=20backend=20pr=C3=A9parati?= =?UTF-8?q?on=20de=20l'int=C3=A9gration=20des=20modules=20suivants?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- code/backend_reconnaissance/compteurDoigt.py | 98 ------------------- .../exemple_utilisation.py | 29 ------ code/backend_reconnaissance/hand_detector.py | 84 ++++++++++++++++ code/backend_reconnaissance/hands.py | 78 --------------- code/backend_reconnaissance/main.py | 50 +--------- code/backend_reconnaissance/manager.py | 44 +++++++++ code/backend_reconnaissance/network.py | 31 ++++++ .../reconnaissancePouce.py | 88 ----------------- 8 files changed, 164 insertions(+), 338 deletions(-) delete mode 100644 code/backend_reconnaissance/compteurDoigt.py delete mode 100644 code/backend_reconnaissance/exemple_utilisation.py create mode 100644 code/backend_reconnaissance/hand_detector.py delete mode 100644 code/backend_reconnaissance/hands.py create mode 100644 code/backend_reconnaissance/manager.py create mode 100644 code/backend_reconnaissance/network.py delete mode 100644 code/backend_reconnaissance/reconnaissancePouce.py diff --git a/code/backend_reconnaissance/compteurDoigt.py b/code/backend_reconnaissance/compteurDoigt.py deleted file mode 100644 index c0fbc1f..0000000 --- a/code/backend_reconnaissance/compteurDoigt.py +++ /dev/null @@ -1,98 +0,0 @@ -import cv2 -import mediapipe as mp -mp_drawing = mp.solutions.drawing_utils -mp_drawing_styles = mp.solutions.drawing_styles -mp_hands = mp.solutions.hands - -def prodScalaire(V1,V2): - return (V1[0]*V2[0]+V1[1]*V2[1])/((V1[0]**2+V1[1]**2)**(1/2)*(V2[0]**2+V2[1]**2)**(1/2)) #produit scalaire normalisé - -# initialisation de la caméra -cap = cv2.VideoCapture(0) - -# initialisation de Mediapipe Hands -with mp_hands.Hands( static_image_mode=False, max_num_hands=2, min_detection_confidence=0.5, min_tracking_confidence=0.5) as hands: - - while cap.isOpened(): - - # lecture de la vidéo - ret, frame = cap.read() - - # conversion de l'image en RGB - image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) - - # détection des mains - results = hands.process(image) - - # Draw the hand annotations on the image. - image.flags.writeable = True - if results.multi_hand_landmarks: - for hand_landmarks in results.multi_hand_landmarks: - mp_drawing.draw_landmarks( - image, - hand_landmarks, - mp_hands.HAND_CONNECTIONS, - mp_drawing_styles.get_default_hand_landmarks_style(), - mp_drawing_styles.get_default_hand_connections_style()) - - # détection du nombre de doigts levés - hand_landmarks = [0, 0] - finger_count = 0 - if len(results.multi_hand_landmarks) >0 : - hand_landmarks[0] = results.multi_hand_landmarks[0] - V0= [hand_landmarks[0].landmark[2].x - hand_landmarks[0].landmark[0].x, hand_landmarks[0].landmark[2].y - hand_landmarks[0].landmark[0].y] - V1= [hand_landmarks[0].landmark[4].x - hand_landmarks[0].landmark[2].x, hand_landmarks[0].landmark[4].y - hand_landmarks[0].landmark[2].y] - if prodScalaire(V0,V1) > 0.905135675: - finger_count += 1 - V0= [hand_landmarks[0].landmark[8].x - hand_landmarks[0].landmark[6].x, hand_landmarks[0].landmark[8].y - hand_landmarks[0].landmark[6].y] - V1= [hand_landmarks[0].landmark[6].x - hand_landmarks[0].landmark[0].x, hand_landmarks[0].landmark[6].y - hand_landmarks[0].landmark[0].y] - if prodScalaire(V0,V1) > 0: - finger_count += 1 - V0= [hand_landmarks[0].landmark[12].x - hand_landmarks[0].landmark[10].x, hand_landmarks[0].landmark[12].y - hand_landmarks[0].landmark[10].y] - V1= [hand_landmarks[0].landmark[10].x - hand_landmarks[0].landmark[0].x, hand_landmarks[0].landmark[10].y - hand_landmarks[0].landmark[0].y] - if prodScalaire(V0,V1) > 0: - finger_count += 1 - V0= [hand_landmarks[0].landmark[20].x - hand_landmarks[0].landmark[18].x, hand_landmarks[0].landmark[20].y - hand_landmarks[0].landmark[18].y] - V1= [hand_landmarks[0].landmark[18].x - hand_landmarks[0].landmark[0].x, hand_landmarks[0].landmark[18].y - hand_landmarks[0].landmark[0].y] - if prodScalaire(V0,V1) > 0: - finger_count += 1 - V0= [hand_landmarks[0].landmark[16].x - hand_landmarks[0].landmark[14].x, hand_landmarks[0].landmark[16].y - hand_landmarks[0].landmark[14].y] - V1= [hand_landmarks[0].landmark[14].x - hand_landmarks[0].landmark[0].x, hand_landmarks[0].landmark[14].y - hand_landmarks[0].landmark[0].y] - if prodScalaire(V0,V1) > 0: - finger_count += 1 - - if len(results.multi_hand_landmarks) >1 : - hand_landmarks[1] = results.multi_hand_landmarks[1] - V0= [hand_landmarks[1].landmark[2].x - hand_landmarks[1].landmark[0].x, hand_landmarks[1].landmark[2].y - hand_landmarks[1].landmark[0].y] - V1= [hand_landmarks[1].landmark[4].x - hand_landmarks[1].landmark[2].x, hand_landmarks[1].landmark[4].y - hand_landmarks[1].landmark[2].y] - if prodScalaire(V0,V1) > 0.905135675: - finger_count += 1 - V0= [hand_landmarks[1].landmark[8].x - hand_landmarks[1].landmark[6].x, hand_landmarks[1].landmark[8].y - hand_landmarks[1].landmark[6].y] - V1= [hand_landmarks[1].landmark[6].x - hand_landmarks[1].landmark[0].x, hand_landmarks[1].landmark[6].y - hand_landmarks[1].landmark[0].y] - if prodScalaire(V0,V1) > 0: - finger_count += 1 - V0= [hand_landmarks[1].landmark[12].x - hand_landmarks[1].landmark[10].x, hand_landmarks[1].landmark[12].y - hand_landmarks[1].landmark[10].y] - V1= [hand_landmarks[1].landmark[10].x - hand_landmarks[1].landmark[0].x, hand_landmarks[1].landmark[10].y - hand_landmarks[1].landmark[0].y] - if prodScalaire(V0,V1) > 0: - finger_count += 1 - V0= [hand_landmarks[1].landmark[20].x - hand_landmarks[1].landmark[18].x, hand_landmarks[1].landmark[20].y - hand_landmarks[1].landmark[18].y] - V1= [hand_landmarks[1].landmark[18].x - hand_landmarks[1].landmark[0].x, hand_landmarks[1].landmark[18].y - hand_landmarks[1].landmark[0].y] - if prodScalaire(V0,V1) > 0: - finger_count += 1 - V0= [hand_landmarks[1].landmark[16].x - hand_landmarks[1].landmark[14].x, hand_landmarks[1].landmark[16].y - hand_landmarks[1].landmark[14].y] - V1= [hand_landmarks[1].landmark[14].x - hand_landmarks[1].landmark[0].x, hand_landmarks[1].landmark[14].y - hand_landmarks[1].landmark[0].y] - if prodScalaire(V0,V1) > 0: - finger_count += 1 - - # affichage du nombre de doigts levés - cv2.putText(image, f"Finger count: {finger_count}", (10, 50), - cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2) - - # affichage de la vidéo - cv2.imshow('Video', cv2.cvtColor(image, cv2.COLOR_RGB2BGR)) - if cv2.waitKey(10) & 0xFF == ord('q'): - break - -# libération de la caméra et des ressources -cap.release() -cv2.destroyAllWindows() \ No newline at end of file diff --git a/code/backend_reconnaissance/exemple_utilisation.py b/code/backend_reconnaissance/exemple_utilisation.py deleted file mode 100644 index 638f13e..0000000 --- a/code/backend_reconnaissance/exemple_utilisation.py +++ /dev/null @@ -1,29 +0,0 @@ -import requests -#Exemple ajout d'un commentaire depuis la borne (site ou geste) -avis = { - "note": 8, - "source": "borne", - #Optionel - "auteur_age": 20, - "notes_autre": '{"proprete":8,"calme":10}', - "auteur_sexe": 'f', - "commentaire": "Commentaire" -} - -res = requests.post("http://localhost:8080/add_review", data=avis) -# print(res.text) - -#Exemple ajout d'un commentaire trouvé sur les réseaux sociaux -avis = { - "auteur_nom": "michel", - "source": "instagram", - "note": 8, - "date": "2022-12-24", - #Optionel - "commentaire": "J'ai beaucoup aimé !", - "lien": "https://instagram.com/si_insta_avait_des_liens_vers_des_commentaires_faudrait_le_mettre_ici", - "auteur_lien": "https://instagram.com/michel", -} - -# res = requests.post("http://localhost:8080/add_social_review", data=avis) -print(res.text) \ No newline at end of file diff --git a/code/backend_reconnaissance/hand_detector.py b/code/backend_reconnaissance/hand_detector.py new file mode 100644 index 0000000..3c21c06 --- /dev/null +++ b/code/backend_reconnaissance/hand_detector.py @@ -0,0 +1,84 @@ +import cv2 +import mediapipe as mp +import numpy as np + +class HandDetector(): + def __init__(self): + self.mp_drawing = mp.solutions.drawing_utils + self.mp_drawing_styles = mp.solutions.drawing_styles + self.mp_hands = mp.solutions.hands + self.cap = cv2.VideoCapture(0) + self.hands = self.mp_hands.Hands( + model_complexity=0, + min_detection_confidence=0.5, + min_tracking_confidence=0.5) + #Paramètres + self.BUFFER_LENGTH = 30 + self.DETECTION_THRESHOLD = 3/4 + + self.resultBuffer = [] + + + + def reconnaissancePouce(self,handLandmarks): + etatDuPouce=["neutre","thumbs_down","thumbs_up"] + i=0 + j=0 + for cpt in range (0,4): + V1=[handLandmarks[(4*cpt)+6][0]-handLandmarks[(4*cpt)+5][0],handLandmarks[(4*cpt)+6][1]-handLandmarks[(4*cpt)+5][1]] + V2=[handLandmarks[(4*cpt)+8][0]-handLandmarks[(4*cpt)+6][0],handLandmarks[(4*cpt)+8][1]-handLandmarks[(4*cpt)+6][1]] + j=np.dot(V1,V2) + if (j>0): + return etatDuPouce[0] + V1=[handLandmarks[4][0]-handLandmarks[1][0],handLandmarks[4][1]-handLandmarks[1][1]] + V2=[handLandmarks[2][0]-handLandmarks[1][0],handLandmarks[2][1]-handLandmarks[1][1]] + if((np.dot(V1,V2))>0 and (handLandmarks[4][1]>handLandmarks[2][1])): + i=1 + elif(np.dot(V1,V2)>0 and handLandmarks[4][1] self.BUFFER_LENGTH): + self.resultBuffer.pop(0) + + thumbsUpCount = sum(map(lambda x : x == "thumbs_up", self.resultBuffer)) + thumbsDownCount = sum(map(lambda x : x == "thumbs_down", self.resultBuffer)) + + print(thumbsUpCount,thumbsDownCount) + + if(thumbsUpCount > self.DETECTION_THRESHOLD * self.BUFFER_LENGTH): + result = "thumbs_up" + elif(thumbsDownCount > self.DETECTION_THRESHOLD * self.BUFFER_LENGTH): + result = "thumbs_down" + else: + result = False + + if(thumbState != "neutre"): + return thumbState, handLandmarks[9], np.linalg.norm(np.array(handLandmarks[9]) - np.array(handLandmarks[0])), result + return False + + diff --git a/code/backend_reconnaissance/hands.py b/code/backend_reconnaissance/hands.py deleted file mode 100644 index dab097c..0000000 --- a/code/backend_reconnaissance/hands.py +++ /dev/null @@ -1,78 +0,0 @@ -import cv2 -import mediapipe as mp -import numpy as np - -mp_drawing = mp.solutions.drawing_utils -mp_drawing_styles = mp.solutions.drawing_styles -mp_hands = mp.solutions.hands -cap = cv2.VideoCapture(0) -hands = mp_hands.Hands( - model_complexity=0, - min_detection_confidence=0.5, - min_tracking_confidence=0.5) -BUFFER_LENGTH = 30 -TH_FRACTION = 3/4 -resultBuffer = [] - -def reconnaissancePouce(handLandmarks): - etatDuPouce=["neutre","thumbs_down","thumbs_up"] - i=0 - j=0 - for cpt in range (0,4): - V1=[handLandmarks[(4*cpt)+6][0]-handLandmarks[(4*cpt)+5][0],handLandmarks[(4*cpt)+6][1]-handLandmarks[(4*cpt)+5][1]] - V2=[handLandmarks[(4*cpt)+8][0]-handLandmarks[(4*cpt)+6][0],handLandmarks[(4*cpt)+8][1]-handLandmarks[(4*cpt)+6][1]] - j=np.dot(V1,V2) - if (j>0): - return etatDuPouce[0] - V1=[handLandmarks[4][0]-handLandmarks[1][0],handLandmarks[4][1]-handLandmarks[1][1]] - V2=[handLandmarks[2][0]-handLandmarks[1][0],handLandmarks[2][1]-handLandmarks[1][1]] - if((np.dot(V1,V2))>0 and (handLandmarks[4][1]>handLandmarks[2][1])): - i=1 - elif(np.dot(V1,V2)>0 and handLandmarks[4][1] BUFFER_LENGTH): - resultBuffer.pop(0) - - thumbsUpCount = sum(map(lambda x : x == "thumbs_up", resultBuffer)) - thumbsDownCount = sum(map(lambda x : x == "thumbs_down", resultBuffer)) - - print(thumbsUpCount,thumbsDownCount) - - if(thumbsUpCount > TH_FRACTION * BUFFER_LENGTH): - result = "thumbs_up" - elif(thumbsDownCount > TH_FRACTION * BUFFER_LENGTH): - result = "thumbs_down" - else: - result = False - - if(thumbState != "neutre"): - return thumbState, handLandmarks[9], np.linalg.norm(np.array(handLandmarks[9]) - np.array(handLandmarks[0])), result - return False - - diff --git a/code/backend_reconnaissance/main.py b/code/backend_reconnaissance/main.py index 1f37631..e444f89 100644 --- a/code/backend_reconnaissance/main.py +++ b/code/backend_reconnaissance/main.py @@ -1,45 +1,5 @@ -import asyncio -import json -import math -import websockets -import random -import os -import time -from hands import getThumbState - - -class WebsocketServer: - def __init__(self,getEffects,port=os.getenv("PORT"),host=os.getenv("HOST")) -> None: - self.thumbResult = None - self.state = 0 - self.host = host - self.port = port - self.getEffects = getEffects - - async def run(self): - async with websockets.serve(self.handler, self.host, self.port): - await asyncio.Future() - - - async def handler(self,websocket): - while True: - if(self.state == 0): - messages, result = self.getEffects() - if(messages != False): - if(result == False): - await websocket.send(json.dumps(messages)) - else: - self.thumbResult = result - self.state = 1 - await websocket.send('{"type":"state","state":2}') - -def getEffects(): - res = getThumbState() - if(res != False): - state, coords, size, result = res - return {"type": "effects", "effects": [{"type": state, "x":coords[0], "y": coords[1], "width": size, "height": size}]}, result - else: - return False,False - -server = WebsocketServer(getEffects) -asyncio.run(server.run()) \ No newline at end of file +from manager import Manager +if __name__ == "__main__": + print("backend started") + m = Manager() + m.loop() \ No newline at end of file diff --git a/code/backend_reconnaissance/manager.py b/code/backend_reconnaissance/manager.py new file mode 100644 index 0000000..3d6d0be --- /dev/null +++ b/code/backend_reconnaissance/manager.py @@ -0,0 +1,44 @@ +from hand_detector import HandDetector +from network import WebsocketServer +import time + +class Manager(): + def __init__(self): + self.state = 0 + self.avis = { + "note": None, + "commentaire": None, + "notes_autres": {} + } + self.server = WebsocketServer(None) + self.server.start() + self.handDetector = HandDetector() + print("Backend ready") + + def loop(self): + while(True): + if(self.state == 0): + self.sleep() + if(self.state == 1): + self.camera() + + time.sleep(0.01) + + def sleep(self): + res = self.handDetector.detect() + if(res != False): + self.state = 1 + self.server.sendMessage({"type": "state", "state": 1}) + + def camera(self): + res = self.handDetector.detect() + if(res != False): + state, coords, size, finalDecision = res + self.server.sendMessage({"type": "effects", "effects": [{"type": state, "x":coords[0], "y": coords[1], "width": size, "height": size}]}) + if(finalDecision != False): + self.avis["note"] = 10 if finalDecision == "thumbs_up" else 0 + self.state = 2 + self.server.sendMessage({"type": "state", "state": 2}) + + + diff --git a/code/backend_reconnaissance/network.py b/code/backend_reconnaissance/network.py new file mode 100644 index 0000000..5a28fee --- /dev/null +++ b/code/backend_reconnaissance/network.py @@ -0,0 +1,31 @@ +import asyncio +import json +import os +import threading +import websockets + +class WebsocketServer(threading.Thread): + def __init__(self, onMessage, port=os.getenv("PORT"), host=os.getenv("HOST")): + threading.Thread.__init__(self) + self.host = host + self.port = port + self.messageQueue = [] + self.onMessage = onMessage + + def run(self): + print("server thread started") + asyncio.run(self.runServer()) + + async def runServer(self): + async with websockets.serve(self.handler, self.host, self.port): + await asyncio.Future() + + async def handler(self,websocket): + while True: + for msg in self.messageQueue: + await websocket.send(json.dumps(msg)) + self.messageQueue.pop(0) + await asyncio.sleep(0.01) + + def sendMessage(self,message): + self.messageQueue.append(message) \ No newline at end of file diff --git a/code/backend_reconnaissance/reconnaissancePouce.py b/code/backend_reconnaissance/reconnaissancePouce.py deleted file mode 100644 index 80d3894..0000000 --- a/code/backend_reconnaissance/reconnaissancePouce.py +++ /dev/null @@ -1,88 +0,0 @@ -import cv2 -import numpy as np -import mediapipe as mp -mp_drawing = mp.solutions.drawing_utils -mp_drawing_styles = mp.solutions.drawing_styles -mp_hands = mp.solutions.hands - -def prodScalaire(V1,V2): - return V1[0]*V2[0]+V1[1]*V2[1] - -def reconnaissancePouce(handLandmarks): - etatDuPouce=["neutre","baissé","levé"] - i=0 - j=0 - for cpt in range (0,4): - V1=[handLandmarks[(4*cpt)+6][0]-handLandmarks[(4*cpt)+5][0],handLandmarks[(4*cpt)+6][1]-handLandmarks[(4*cpt)+5][1]] - V2=[handLandmarks[(4*cpt)+8][0]-handLandmarks[(4*cpt)+6][0],handLandmarks[(4*cpt)+8][1]-handLandmarks[(4*cpt)+6][1]] - j=np.dot(V1,V2) - if (j>0): - return etatDuPouce[0] - V1=[handLandmarks[4][0]-handLandmarks[1][0],handLandmarks[4][1]-handLandmarks[1][1]] - V2=[handLandmarks[2][0]-handLandmarks[1][0],handLandmarks[2][1]-handLandmarks[1][1]] - if((np.dot(V1,V2))>0 and (handLandmarks[4][1]>handLandmarks[2][1])): - i=1 - elif(np.dot(V1,V2)>0 and handLandmarks[4][1]