Intégration de la reco d'image à l'interface borne

This commit is contained in:
Quentin Roussel
2023-03-22 14:39:56 +01:00
parent 15bc1c7714
commit d896767543
11 changed files with 131 additions and 156 deletions

View File

@@ -1,44 +1,78 @@
import cv2
import mediapipe as mp
import numpy as np
mp_drawing = mp.solutions.drawing_utils
mp_drawing_styles = mp.solutions.drawing_styles
mp_hands = mp.solutions.hands
# For webcam input:
cap = cv2.VideoCapture(0)
hands = mp_hands.Hands(
model_complexity=0,
min_detection_confidence=0.5,
min_tracking_confidence=0.5)
model_complexity=0,
min_detection_confidence=0.5,
min_tracking_confidence=0.5)
BUFFER_LENGTH = 30
TH_FRACTION = 3/4
resultBuffer = []
def frame():
if cap.isOpened():
success, image = cap.read()
if not success:
print("Ignoring empty camera frame.")
# If loading a video, use 'break' instead of 'continue'.
return
def reconnaissancePouce(handLandmarks):
etatDuPouce=["neutre","thumbs_down","thumbs_up"]
i=0
j=0
for cpt in range (0,4):
V1=[handLandmarks[(4*cpt)+6][0]-handLandmarks[(4*cpt)+5][0],handLandmarks[(4*cpt)+6][1]-handLandmarks[(4*cpt)+5][1]]
V2=[handLandmarks[(4*cpt)+8][0]-handLandmarks[(4*cpt)+6][0],handLandmarks[(4*cpt)+8][1]-handLandmarks[(4*cpt)+6][1]]
j=np.dot(V1,V2)
if (j>0):
return etatDuPouce[0]
V1=[handLandmarks[4][0]-handLandmarks[1][0],handLandmarks[4][1]-handLandmarks[1][1]]
V2=[handLandmarks[2][0]-handLandmarks[1][0],handLandmarks[2][1]-handLandmarks[1][1]]
if((np.dot(V1,V2))>0 and (handLandmarks[4][1]>handLandmarks[2][1])):
i=1
elif(np.dot(V1,V2)>0 and handLandmarks[4][1]<handLandmarks[2][1]):
i=2
return etatDuPouce[i]
# To improve performance, optionally mark the image as not writeable to
# pass by reference.
image.flags.writeable = False
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
results = hands.process(image)
# Draw the hand annotations on the image.
image.flags.writeable = True
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
if results.multi_hand_landmarks:
for hand_landmarks in results.multi_hand_landmarks:
mp_drawing.draw_landmarks(
image,
hand_landmarks,
mp_hands.HAND_CONNECTIONS,
mp_drawing_styles.get_default_hand_landmarks_style(),
mp_drawing_styles.get_default_hand_connections_style())
# Flip the image horizontally for a selfie-view display.
# cv2.imshow('MediaPipe Hands', cv2.flip(image, 1))
if cv2.waitKey(5) & 0xFF == 27:
return
# cap.release()
def getThumbState():
if cap.isOpened():
success, image = cap.read()
if not success:
print("Ignoring empty camera frame.")
# If loading a video, use 'break' instead of 'continue'.
return False
# To improve performance, optionally mark the image as not writeable to
# pass by reference.
image.flags.writeable = False
results = hands.process(image)
# print(results)
handLandmarks = []
if results.multi_hand_landmarks:
for hand_landmarks in results.multi_hand_landmarks:
# Fill list with x and y positions of each landmark
for landmarks in hand_landmarks.landmark:
handLandmarks.append([landmarks.x, landmarks.y])
thumbState = reconnaissancePouce(handLandmarks)
resultBuffer.append(thumbState)
if(len(resultBuffer) > BUFFER_LENGTH):
resultBuffer.pop(0)
thumbsUpCount = sum(map(lambda x : x == "thumbs_up", resultBuffer))
thumbsDownCount = sum(map(lambda x : x == "thumbs_down", resultBuffer))
print(thumbsUpCount,thumbsDownCount)
if(thumbsUpCount > TH_FRACTION * BUFFER_LENGTH):
result = "thumbs_up"
elif(thumbsDownCount > TH_FRACTION * BUFFER_LENGTH):
result = "thumbs_down"
else:
result = False
if(thumbState != "neutre"):
return thumbState, handLandmarks[9], np.linalg.norm(np.array(handLandmarks[9]) - np.array(handLandmarks[0])), result
return False