Merge branch 'Interface_Lounes' of gitlab.enst.fr:pact/2022-2023/pact71 into quentin

This commit is contained in:
Quentin Roussel
2023-03-23 00:00:48 +01:00
2 changed files with 147 additions and 0 deletions

View File

@@ -0,0 +1,59 @@
import cv2
import mediapipe as mp
mp_drawing = mp.solutions.drawing_utils
mp_face_mesh = mp.solutions.face_mesh
drawing_spec = mp_drawing.DrawingSpec(thickness=1, circle_radius=1)
cap = cv2.VideoCapture(0)
with mp_face_mesh.FaceMesh(
max_num_faces=1,
refine_landmarks=True,
min_detection_confidence=0.5,
min_tracking_confidence=0.5) as face_mesh:
while cap.isOpened():
success, image = cap.read()
if not success:
print("Ignoring empty camera frame.")
continue
# Initialize the face mesh model
face_mesh = mp_face_mesh.FaceMesh(static_image_mode=False, max_num_faces=1, min_detection_confidence=0.5)
# Load the input image
# lecture de la vidéo
ret, frame = cap.read()
# conversion de l'image en RGB
image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
# Process the image and extract the landmarks
results = face_mesh.process(image)
if results.multi_face_landmarks:
landmarks = results.multi_face_landmarks[0]
# Define the landmark indices for the corners of the eyes and the tip of the nose
left_eye = [33, 133, 246, 161, 160, 159, 158, 157, 173, 133]
right_eye = [362, 263, 373, 380, 381, 382, 384, 385, 386, 362]
nose_tip = 4
# Calculate the distance between the eyes and the nose tip
left_eye_x = landmarks.landmark[left_eye[0]].x * image.shape[1]
right_eye_x = landmarks.landmark[right_eye[0]].x * image.shape[1]
nose_x = landmarks.landmark[nose_tip].x * image.shape[1]
eye_distance = abs(left_eye_x - right_eye_x)
nose_distance = abs(nose_x - (left_eye_x + right_eye_x) / 2)
# Determine the gender based on the eye and nose distances
if eye_distance > 1.5 * nose_distance:
gender = "Female"
else:
gender = "Male"
# Draw the landmarks on the image
cv2.putText(image, gender, (10, 50),cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
# affichage de la vidéo
cv2.imshow('Video', cv2.cvtColor(image, cv2.COLOR_RGB2BGR))
if cv2.waitKey(10) & 0xFF == ord('q'):
break
# libération de la caméra et des ressources
cap.release()
cv2.destroyAllWindows()

View File

@@ -0,0 +1,88 @@
import cv2
import numpy as np
import mediapipe as mp
mp_drawing = mp.solutions.drawing_utils
mp_drawing_styles = mp.solutions.drawing_styles
mp_hands = mp.solutions.hands
def prodScalaire(V1,V2):
return V1[0]*V2[0]+V1[1]*V2[1]
def reconnaissancePouce(handLandmarks):
etatDuPouce=["neutre","baissé","levé"]
i=0
j=0
for cpt in range (0,4):
V1=[handLandmarks[(4*cpt)+6].x-handLandmarks[(4*cpt)+5].x,handLandmarks[(4*cpt)+6].y-handLandmarks[(4*cpt)+5].y]
V2=[handLandmarks[(4*cpt)+8].x-handLandmarks[(4*cpt)+6].x,handLandmarks[(4*cpt)+8].y-handLandmarks[(4*cpt)+6].y]
j=np.dot(V1,V2)
if (j>0):
return etatDuPouce[0]
V1=[handLandmarks[4].x-handLandmarks[1].x,handLandmarks[4].y-handLandmarks[1].y]
V2=[handLandmarks[2].x-handLandmarks[1].x,handLandmarks[2].y-handLandmarks[1].y]
if((np.dot(V1,V2))>0 and (handLandmarks[4].y>handLandmarks[2].y)):
i=1
elif(np.dot(V1,V2)>0 and handLandmarks[4].y<handLandmarks[2].y):
i=2
return etatDuPouce[i]
cap = cv2.VideoCapture(0)
with mp_hands.Hands(
model_complexity=0,
min_detection_confidence=0.5,
min_tracking_confidence=0.5) as hands:
while cap.isOpened():
success, image = cap.read()
if not success:
print("Ignoring empty camera frame.")
# If loading a video, use 'break' instead of 'continue'.
continue
# To improve performance, optionally mark the image as not writeable to
# pass by reference.
image.flags.writeable = False
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
results = hands.process(image)
# Draw the hand annotations on the image.
image.flags.writeable = True
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
if results.multi_hand_landmarks:
for hand_landmarks in results.multi_hand_landmarks:
mp_drawing.draw_landmarks(
image,
hand_landmarks,
mp_hands.HAND_CONNECTIONS,
mp_drawing_styles.get_default_hand_landmarks_style(),
mp_drawing_styles.get_default_hand_connections_style())
# Set variable to keep landmarks positions (x and y)
handLandmarks = []
if results.multi_hand_landmarks:
for hand_landmarks in results.multi_hand_landmarks:
# Fill list with x and y positions of each landmark
for landmarks in hand_landmarks.landmark:
handLandmarks.append([landmarks.x, landmarks.y])
cv2.putText(image, reconnaissancePouce(handLandmarks), (50, 450), cv2.FONT_HERSHEY_SIMPLEX, 3, (255, 0, 0), 10)
# Flip the image horizontally for a selfie-view display.
cv2.imshow('MediaPipe Hands', cv2.flip(image, 1))
if cv2.waitKey(5) & 0xFF == 27:
break
cap.release()
""" etatDuPouce=["neutre","baissé","levé"]
i=0
if results.multi_hand_landmarks:
if(results.multi_hand_landmarks.gestures.categories[0].categoryName==Thumb_Up):
cv2.putText(image, str(results.multi_hand_landmarks.gestures.categories[0].categoryName), (50, 450), cv2.FONT_HERSHEY_SIMPLEX, 3, (255, 0, 0), 10)
else:
cv2.putText(image, "raté", (50, 450), cv2.FONT_HERSHEY_SIMPLEX, 3, (255, 0, 0), 10)
"""