import cv2 import numpy as np import mediapipe as mp mp_drawing = mp.solutions.drawing_utils mp_drawing_styles = mp.solutions.drawing_styles mp_hands = mp.solutions.hands def prodScalaire(V1,V2): return V1[0]*V2[0]+V1[1]*V2[1] def reconnaissancePouce(handLandmarks): etatDuPouce=["neutre","baissé","levé"] i=0 j=0 for cpt in range (0,4): V1=[handLandmarks[(4*cpt)+6][0]-handLandmarks[(4*cpt)+5][0],handLandmarks[(4*cpt)+6][1]-handLandmarks[(4*cpt)+5][1]] V2=[handLandmarks[(4*cpt)+8][0]-handLandmarks[(4*cpt)+6][0],handLandmarks[(4*cpt)+8][1]-handLandmarks[(4*cpt)+6][1]] j=j+np.dot(V1,V2) print(j) if(j==-4):#round(produitScalaire()) remplacer ça plus tard pour plus de précision V1=handLandmarks[4]-handLandmarks[0] V2=handLandmarks[3]-handLandmarks[0] if((np.dot(V1,V2))==1):i=2 elif((np.dot(V1,V2))==-1):i=1 return etatDuPouce[i] else: return"" cap = cv2.VideoCapture(0) with mp_hands.Hands( model_complexity=0, min_detection_confidence=0.5, min_tracking_confidence=0.5) as hands: while cap.isOpened(): success, image = cap.read() if not success: print("Ignoring empty camera frame.") # If loading a video, use 'break' instead of 'continue'. continue # To improve performance, optionally mark the image as not writeable to # pass by reference. image.flags.writeable = False image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) results = hands.process(image) # Draw the hand annotations on the image. image.flags.writeable = True image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR) if results.multi_hand_landmarks: for hand_landmarks in results.multi_hand_landmarks: mp_drawing.draw_landmarks( image, hand_landmarks, mp_hands.HAND_CONNECTIONS, mp_drawing_styles.get_default_hand_landmarks_style(), mp_drawing_styles.get_default_hand_connections_style()) # Set variable to keep landmarks positions (x and y) handLandmarks = [] if results.multi_hand_landmarks: for hand_landmarks in results.multi_hand_landmarks: # Fill list with x and y positions of each landmark for landmarks in hand_landmarks.landmark: handLandmarks.append([landmarks.x, landmarks.y]) cv2.putText(image, reconnaissancePouce(handLandmarks), (50, 450), cv2.FONT_HERSHEY_SIMPLEX, 3, (255, 0, 0), 10) # Flip the image horizontally for a selfie-view display. cv2.imshow('MediaPipe Hands', cv2.flip(image, 1)) if cv2.waitKey(5) & 0xFF == 27: break cap.release() """ etatDuPouce=["neutre","baissé","levé"] i=0 if results.multi_hand_landmarks: if(results.multi_hand_landmarks.gestures.categories[0].categoryName==Thumb_Up): cv2.putText(image, str(results.multi_hand_landmarks.gestures.categories[0].categoryName), (50, 450), cv2.FONT_HERSHEY_SIMPLEX, 3, (255, 0, 0), 10) else: cv2.putText(image, "raté", (50, 450), cv2.FONT_HERSHEY_SIMPLEX, 3, (255, 0, 0), 10) """