Files
Telereview/code/Interface_Lounes/reconnaissancePouce.py
2023-03-22 13:27:44 +01:00

88 lines
3.1 KiB
Python

import cv2
import numpy as np
import mediapipe as mp
mp_drawing = mp.solutions.drawing_utils
mp_drawing_styles = mp.solutions.drawing_styles
mp_hands = mp.solutions.hands
def prodScalaire(V1,V2):
return V1[0]*V2[0]+V1[1]*V2[1]
def reconnaissancePouce(handLandmarks):
etatDuPouce=["neutre","baissé","levé"]
i=0
j=0
for cpt in range (0,4):
V1=[handLandmarks[(4*cpt)+6].x-handLandmarks[(4*cpt)+5].x,handLandmarks[(4*cpt)+6].y-handLandmarks[(4*cpt)+5].y]
V2=[handLandmarks[(4*cpt)+8].x-handLandmarks[(4*cpt)+6].x,handLandmarks[(4*cpt)+8].y-handLandmarks[(4*cpt)+6].y]
j=np.dot(V1,V2)
if (j>0):
return etatDuPouce[0]
V1=[handLandmarks[4].x-handLandmarks[1].x,handLandmarks[4].y-handLandmarks[1].y]
V2=[handLandmarks[2].x-handLandmarks[1].x,handLandmarks[2].y-handLandmarks[1].y]
if((np.dot(V1,V2))>0 and (handLandmarks[4].y>handLandmarks[2].y)):
i=1
elif(np.dot(V1,V2)>0 and handLandmarks[4].y<handLandmarks[2].y):
i=2
return etatDuPouce[i]
cap = cv2.VideoCapture(0)
with mp_hands.Hands(
model_complexity=0,
min_detection_confidence=0.5,
min_tracking_confidence=0.5) as hands:
while cap.isOpened():
success, image = cap.read()
if not success:
print("Ignoring empty camera frame.")
# If loading a video, use 'break' instead of 'continue'.
continue
# To improve performance, optionally mark the image as not writeable to
# pass by reference.
image.flags.writeable = False
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
results = hands.process(image)
# Draw the hand annotations on the image.
image.flags.writeable = True
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
if results.multi_hand_landmarks:
for hand_landmarks in results.multi_hand_landmarks:
mp_drawing.draw_landmarks(
image,
hand_landmarks,
mp_hands.HAND_CONNECTIONS,
mp_drawing_styles.get_default_hand_landmarks_style(),
mp_drawing_styles.get_default_hand_connections_style())
# Set variable to keep landmarks positions (x and y)
handLandmarks = []
if results.multi_hand_landmarks:
for hand_landmarks in results.multi_hand_landmarks:
# Fill list with x and y positions of each landmark
for landmarks in hand_landmarks.landmark:
handLandmarks.append([landmarks.x, landmarks.y])
cv2.putText(image, reconnaissancePouce(handLandmarks), (50, 450), cv2.FONT_HERSHEY_SIMPLEX, 3, (255, 0, 0), 10)
# Flip the image horizontally for a selfie-view display.
cv2.imshow('MediaPipe Hands', cv2.flip(image, 1))
if cv2.waitKey(5) & 0xFF == 27:
break
cap.release()
""" etatDuPouce=["neutre","baissé","levé"]
i=0
if results.multi_hand_landmarks:
if(results.multi_hand_landmarks.gestures.categories[0].categoryName==Thumb_Up):
cv2.putText(image, str(results.multi_hand_landmarks.gestures.categories[0].categoryName), (50, 450), cv2.FONT_HERSHEY_SIMPLEX, 3, (255, 0, 0), 10)
else:
cv2.putText(image, "raté", (50, 450), cv2.FONT_HERSHEY_SIMPLEX, 3, (255, 0, 0), 10)
"""