import cv2 import mediapipe as mp import numpy as np mp_drawing = mp.solutions.drawing_utils mp_drawing_styles = mp.solutions.drawing_styles mp_hands = mp.solutions.hands cap = cv2.VideoCapture(0) hands = mp_hands.Hands( model_complexity=0, min_detection_confidence=0.5, min_tracking_confidence=0.5) BUFFER_LENGTH = 30 TH_FRACTION = 3/4 resultBuffer = [] def reconnaissancePouce(handLandmarks): etatDuPouce=["neutre","thumbs_down","thumbs_up"] i=0 j=0 for cpt in range (0,4): V1=[handLandmarks[(4*cpt)+6][0]-handLandmarks[(4*cpt)+5][0],handLandmarks[(4*cpt)+6][1]-handLandmarks[(4*cpt)+5][1]] V2=[handLandmarks[(4*cpt)+8][0]-handLandmarks[(4*cpt)+6][0],handLandmarks[(4*cpt)+8][1]-handLandmarks[(4*cpt)+6][1]] j=np.dot(V1,V2) if (j>0): return etatDuPouce[0] V1=[handLandmarks[4][0]-handLandmarks[1][0],handLandmarks[4][1]-handLandmarks[1][1]] V2=[handLandmarks[2][0]-handLandmarks[1][0],handLandmarks[2][1]-handLandmarks[1][1]] if((np.dot(V1,V2))>0 and (handLandmarks[4][1]>handLandmarks[2][1])): i=1 elif(np.dot(V1,V2)>0 and handLandmarks[4][1] BUFFER_LENGTH): resultBuffer.pop(0) thumbsUpCount = sum(map(lambda x : x == "thumbs_up", resultBuffer)) thumbsDownCount = sum(map(lambda x : x == "thumbs_down", resultBuffer)) print(thumbsUpCount,thumbsDownCount) if(thumbsUpCount > TH_FRACTION * BUFFER_LENGTH): result = "thumbs_up" elif(thumbsDownCount > TH_FRACTION * BUFFER_LENGTH): result = "thumbs_down" else: result = False if(thumbState != "neutre"): return thumbState, handLandmarks[9], np.linalg.norm(np.array(handLandmarks[9]) - np.array(handLandmarks[0])), result return False