mirror of
https://git.roussel.pro/telecom-paris/pact.git
synced 2026-02-09 02:20:17 +01:00
Merge branch 'Interface_Lounes' of gitlab.enst.fr:pact/2022-2023/pact71 into quentin
This commit is contained in:
98
code/Interface_Lounes/compteurDoigt.py
Normal file
98
code/Interface_Lounes/compteurDoigt.py
Normal file
@@ -0,0 +1,98 @@
|
||||
import cv2
|
||||
import mediapipe as mp
|
||||
mp_drawing = mp.solutions.drawing_utils
|
||||
mp_drawing_styles = mp.solutions.drawing_styles
|
||||
mp_hands = mp.solutions.hands
|
||||
|
||||
def prodScalaire(V1,V2):
|
||||
return (V1[0]*V2[0]+V1[1]*V2[1])/((V1[0]**2+V1[1]**2)**(1/2)*(V2[0]**2+V2[1]**2)**(1/2)) #produit scalaire normalisé
|
||||
|
||||
# initialisation de la caméra
|
||||
cap = cv2.VideoCapture(0)
|
||||
|
||||
# initialisation de Mediapipe Hands
|
||||
with mp_hands.Hands( static_image_mode=False, max_num_hands=2, min_detection_confidence=0.5, min_tracking_confidence=0.5) as hands:
|
||||
|
||||
while cap.isOpened():
|
||||
|
||||
# lecture de la vidéo
|
||||
ret, frame = cap.read()
|
||||
|
||||
# conversion de l'image en RGB
|
||||
image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
|
||||
|
||||
# détection des mains
|
||||
results = hands.process(image)
|
||||
|
||||
# Draw the hand annotations on the image.
|
||||
image.flags.writeable = True
|
||||
if results.multi_hand_landmarks:
|
||||
for hand_landmarks in results.multi_hand_landmarks:
|
||||
mp_drawing.draw_landmarks(
|
||||
image,
|
||||
hand_landmarks,
|
||||
mp_hands.HAND_CONNECTIONS,
|
||||
mp_drawing_styles.get_default_hand_landmarks_style(),
|
||||
mp_drawing_styles.get_default_hand_connections_style())
|
||||
|
||||
# détection du nombre de doigts levés
|
||||
hand_landmarks = [0, 0]
|
||||
finger_count = 0
|
||||
if len(results.multi_hand_landmarks) >0 :
|
||||
hand_landmarks[0] = results.multi_hand_landmarks[0]
|
||||
V0= [hand_landmarks[0].landmark[2].x - hand_landmarks[0].landmark[0].x, hand_landmarks[0].landmark[2].y - hand_landmarks[0].landmark[0].y]
|
||||
V1= [hand_landmarks[0].landmark[4].x - hand_landmarks[0].landmark[2].x, hand_landmarks[0].landmark[4].y - hand_landmarks[0].landmark[2].y]
|
||||
if prodScalaire(V0,V1) > 0.905135675:
|
||||
finger_count += 1
|
||||
V0= [hand_landmarks[0].landmark[8].x - hand_landmarks[0].landmark[6].x, hand_landmarks[0].landmark[8].y - hand_landmarks[0].landmark[6].y]
|
||||
V1= [hand_landmarks[0].landmark[6].x - hand_landmarks[0].landmark[0].x, hand_landmarks[0].landmark[6].y - hand_landmarks[0].landmark[0].y]
|
||||
if prodScalaire(V0,V1) > 0:
|
||||
finger_count += 1
|
||||
V0= [hand_landmarks[0].landmark[12].x - hand_landmarks[0].landmark[10].x, hand_landmarks[0].landmark[12].y - hand_landmarks[0].landmark[10].y]
|
||||
V1= [hand_landmarks[0].landmark[10].x - hand_landmarks[0].landmark[0].x, hand_landmarks[0].landmark[10].y - hand_landmarks[0].landmark[0].y]
|
||||
if prodScalaire(V0,V1) > 0:
|
||||
finger_count += 1
|
||||
V0= [hand_landmarks[0].landmark[20].x - hand_landmarks[0].landmark[18].x, hand_landmarks[0].landmark[20].y - hand_landmarks[0].landmark[18].y]
|
||||
V1= [hand_landmarks[0].landmark[18].x - hand_landmarks[0].landmark[0].x, hand_landmarks[0].landmark[18].y - hand_landmarks[0].landmark[0].y]
|
||||
if prodScalaire(V0,V1) > 0:
|
||||
finger_count += 1
|
||||
V0= [hand_landmarks[0].landmark[16].x - hand_landmarks[0].landmark[14].x, hand_landmarks[0].landmark[16].y - hand_landmarks[0].landmark[14].y]
|
||||
V1= [hand_landmarks[0].landmark[14].x - hand_landmarks[0].landmark[0].x, hand_landmarks[0].landmark[14].y - hand_landmarks[0].landmark[0].y]
|
||||
if prodScalaire(V0,V1) > 0:
|
||||
finger_count += 1
|
||||
|
||||
if len(results.multi_hand_landmarks) >1 :
|
||||
hand_landmarks[1] = results.multi_hand_landmarks[1]
|
||||
V0= [hand_landmarks[1].landmark[2].x - hand_landmarks[1].landmark[0].x, hand_landmarks[1].landmark[2].y - hand_landmarks[1].landmark[0].y]
|
||||
V1= [hand_landmarks[1].landmark[4].x - hand_landmarks[1].landmark[2].x, hand_landmarks[1].landmark[4].y - hand_landmarks[1].landmark[2].y]
|
||||
if prodScalaire(V0,V1) > 0.905135675:
|
||||
finger_count += 1
|
||||
V0= [hand_landmarks[1].landmark[8].x - hand_landmarks[1].landmark[6].x, hand_landmarks[1].landmark[8].y - hand_landmarks[1].landmark[6].y]
|
||||
V1= [hand_landmarks[1].landmark[6].x - hand_landmarks[1].landmark[0].x, hand_landmarks[1].landmark[6].y - hand_landmarks[1].landmark[0].y]
|
||||
if prodScalaire(V0,V1) > 0:
|
||||
finger_count += 1
|
||||
V0= [hand_landmarks[1].landmark[12].x - hand_landmarks[1].landmark[10].x, hand_landmarks[1].landmark[12].y - hand_landmarks[1].landmark[10].y]
|
||||
V1= [hand_landmarks[1].landmark[10].x - hand_landmarks[1].landmark[0].x, hand_landmarks[1].landmark[10].y - hand_landmarks[1].landmark[0].y]
|
||||
if prodScalaire(V0,V1) > 0:
|
||||
finger_count += 1
|
||||
V0= [hand_landmarks[1].landmark[20].x - hand_landmarks[1].landmark[18].x, hand_landmarks[1].landmark[20].y - hand_landmarks[1].landmark[18].y]
|
||||
V1= [hand_landmarks[1].landmark[18].x - hand_landmarks[1].landmark[0].x, hand_landmarks[1].landmark[18].y - hand_landmarks[1].landmark[0].y]
|
||||
if prodScalaire(V0,V1) > 0:
|
||||
finger_count += 1
|
||||
V0= [hand_landmarks[1].landmark[16].x - hand_landmarks[1].landmark[14].x, hand_landmarks[1].landmark[16].y - hand_landmarks[1].landmark[14].y]
|
||||
V1= [hand_landmarks[1].landmark[14].x - hand_landmarks[1].landmark[0].x, hand_landmarks[1].landmark[14].y - hand_landmarks[1].landmark[0].y]
|
||||
if prodScalaire(V0,V1) > 0:
|
||||
finger_count += 1
|
||||
|
||||
# affichage du nombre de doigts levés
|
||||
cv2.putText(image, f"Finger count: {finger_count}", (10, 50),
|
||||
cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
|
||||
|
||||
# affichage de la vidéo
|
||||
cv2.imshow('Video', cv2.cvtColor(image, cv2.COLOR_RGB2BGR))
|
||||
if cv2.waitKey(10) & 0xFF == ord('q'):
|
||||
break
|
||||
|
||||
# libération de la caméra et des ressources
|
||||
cap.release()
|
||||
cv2.destroyAllWindows()
|
||||
88
code/Interface_Lounes/reconnaissancePouce.py
Normal file
88
code/Interface_Lounes/reconnaissancePouce.py
Normal file
@@ -0,0 +1,88 @@
|
||||
import cv2
|
||||
import numpy as np
|
||||
import mediapipe as mp
|
||||
mp_drawing = mp.solutions.drawing_utils
|
||||
mp_drawing_styles = mp.solutions.drawing_styles
|
||||
mp_hands = mp.solutions.hands
|
||||
|
||||
def prodScalaire(V1,V2):
|
||||
return V1[0]*V2[0]+V1[1]*V2[1]
|
||||
|
||||
def reconnaissancePouce(handLandmarks):
|
||||
etatDuPouce=["neutre","baissé","levé"]
|
||||
i=0
|
||||
j=0
|
||||
for cpt in range (0,4):
|
||||
V1=[handLandmarks[(4*cpt)+6][0]-handLandmarks[(4*cpt)+5][0],handLandmarks[(4*cpt)+6][1]-handLandmarks[(4*cpt)+5][1]]
|
||||
V2=[handLandmarks[(4*cpt)+8][0]-handLandmarks[(4*cpt)+6][0],handLandmarks[(4*cpt)+8][1]-handLandmarks[(4*cpt)+6][1]]
|
||||
j=np.dot(V1,V2)
|
||||
if (j>0):
|
||||
return etatDuPouce[0]
|
||||
V1=[handLandmarks[4][0]-handLandmarks[1][0],handLandmarks[4][1]-handLandmarks[1][1]]
|
||||
V2=[handLandmarks[2][0]-handLandmarks[1][0],handLandmarks[2][1]-handLandmarks[1][1]]
|
||||
if((np.dot(V1,V2))>0 and (handLandmarks[4][1]>handLandmarks[2][1])):
|
||||
i=1
|
||||
elif(np.dot(V1,V2)>0 and handLandmarks[4][1]<handLandmarks[2][1]):
|
||||
i=2
|
||||
return etatDuPouce[i]
|
||||
|
||||
|
||||
cap = cv2.VideoCapture(0)
|
||||
with mp_hands.Hands(
|
||||
model_complexity=0,
|
||||
min_detection_confidence=0.5,
|
||||
min_tracking_confidence=0.5) as hands:
|
||||
while cap.isOpened():
|
||||
success, image = cap.read()
|
||||
if not success:
|
||||
print("Ignoring empty camera frame.")
|
||||
# If loading a video, use 'break' instead of 'continue'.
|
||||
continue
|
||||
|
||||
# To improve performance, optionally mark the image as not writeable to
|
||||
# pass by reference.
|
||||
image.flags.writeable = False
|
||||
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
|
||||
results = hands.process(image)
|
||||
|
||||
# Draw the hand annotations on the image.
|
||||
image.flags.writeable = True
|
||||
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
|
||||
if results.multi_hand_landmarks:
|
||||
for hand_landmarks in results.multi_hand_landmarks:
|
||||
mp_drawing.draw_landmarks(
|
||||
image,
|
||||
hand_landmarks,
|
||||
mp_hands.HAND_CONNECTIONS,
|
||||
mp_drawing_styles.get_default_hand_landmarks_style(),
|
||||
mp_drawing_styles.get_default_hand_connections_style())
|
||||
|
||||
# Set variable to keep landmarks positions (x and y)
|
||||
handLandmarks = []
|
||||
if results.multi_hand_landmarks:
|
||||
for hand_landmarks in results.multi_hand_landmarks:
|
||||
# Fill list with x and y positions of each landmark
|
||||
for landmarks in hand_landmarks.landmark:
|
||||
handLandmarks.append([landmarks.x, landmarks.y])
|
||||
|
||||
cv2.putText(image, reconnaissancePouce(handLandmarks), (50, 450), cv2.FONT_HERSHEY_SIMPLEX, 3, (255, 0, 0), 10)
|
||||
|
||||
# Flip the image horizontally for a selfie-view display.
|
||||
cv2.imshow('MediaPipe Hands', cv2.flip(image, 1))
|
||||
if cv2.waitKey(5) & 0xFF == 27:
|
||||
break
|
||||
cap.release()
|
||||
|
||||
|
||||
|
||||
|
||||
""" etatDuPouce=["neutre","baissé","levé"]
|
||||
i=0
|
||||
|
||||
if results.multi_hand_landmarks:
|
||||
|
||||
if(results.multi_hand_landmarks.gestures.categories[0].categoryName==Thumb_Up):
|
||||
cv2.putText(image, str(results.multi_hand_landmarks.gestures.categories[0].categoryName), (50, 450), cv2.FONT_HERSHEY_SIMPLEX, 3, (255, 0, 0), 10)
|
||||
else:
|
||||
cv2.putText(image, "raté", (50, 450), cv2.FONT_HERSHEY_SIMPLEX, 3, (255, 0, 0), 10)
|
||||
"""
|
||||
Reference in New Issue
Block a user