mirror of
https://git.roussel.pro/telecom-paris/pact.git
synced 2026-02-09 02:20:17 +01:00
Simplification du code
This commit is contained in:
59
code/Interface_Lounes/determinationGenre.py
Normal file
59
code/Interface_Lounes/determinationGenre.py
Normal file
@@ -0,0 +1,59 @@
|
|||||||
|
import cv2
|
||||||
|
import mediapipe as mp
|
||||||
|
|
||||||
|
mp_drawing = mp.solutions.drawing_utils
|
||||||
|
mp_face_mesh = mp.solutions.face_mesh
|
||||||
|
|
||||||
|
drawing_spec = mp_drawing.DrawingSpec(thickness=1, circle_radius=1)
|
||||||
|
cap = cv2.VideoCapture(0)
|
||||||
|
with mp_face_mesh.FaceMesh(
|
||||||
|
max_num_faces=1,
|
||||||
|
refine_landmarks=True,
|
||||||
|
min_detection_confidence=0.5,
|
||||||
|
min_tracking_confidence=0.5) as face_mesh:
|
||||||
|
while cap.isOpened():
|
||||||
|
success, image = cap.read()
|
||||||
|
if not success:
|
||||||
|
print("Ignoring empty camera frame.")
|
||||||
|
continue
|
||||||
|
# Initialize the face mesh model
|
||||||
|
face_mesh = mp_face_mesh.FaceMesh(static_image_mode=False, max_num_faces=1, min_detection_confidence=0.5)
|
||||||
|
|
||||||
|
# Load the input image
|
||||||
|
# lecture de la vidéo
|
||||||
|
ret, frame = cap.read()
|
||||||
|
# conversion de l'image en RGB
|
||||||
|
image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
|
||||||
|
# Process the image and extract the landmarks
|
||||||
|
results = face_mesh.process(image)
|
||||||
|
if results.multi_face_landmarks:
|
||||||
|
landmarks = results.multi_face_landmarks[0]
|
||||||
|
|
||||||
|
# Define the landmark indices for the corners of the eyes and the tip of the nose
|
||||||
|
left_eye = [33, 133, 246, 161, 160, 159, 158, 157, 173, 133]
|
||||||
|
right_eye = [362, 263, 373, 380, 381, 382, 384, 385, 386, 362]
|
||||||
|
nose_tip = 4
|
||||||
|
|
||||||
|
# Calculate the distance between the eyes and the nose tip
|
||||||
|
left_eye_x = landmarks.landmark[left_eye[0]].x * image.shape[1]
|
||||||
|
right_eye_x = landmarks.landmark[right_eye[0]].x * image.shape[1]
|
||||||
|
nose_x = landmarks.landmark[nose_tip].x * image.shape[1]
|
||||||
|
eye_distance = abs(left_eye_x - right_eye_x)
|
||||||
|
nose_distance = abs(nose_x - (left_eye_x + right_eye_x) / 2)
|
||||||
|
|
||||||
|
# Determine the gender based on the eye and nose distances
|
||||||
|
if eye_distance > 1.5 * nose_distance:
|
||||||
|
gender = "Female"
|
||||||
|
else:
|
||||||
|
gender = "Male"
|
||||||
|
|
||||||
|
# Draw the landmarks on the image
|
||||||
|
cv2.putText(image, gender, (10, 50),cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
|
||||||
|
# affichage de la vidéo
|
||||||
|
cv2.imshow('Video', cv2.cvtColor(image, cv2.COLOR_RGB2BGR))
|
||||||
|
if cv2.waitKey(10) & 0xFF == ord('q'):
|
||||||
|
break
|
||||||
|
|
||||||
|
# libération de la caméra et des ressources
|
||||||
|
cap.release()
|
||||||
|
cv2.destroyAllWindows()
|
||||||
@@ -13,16 +13,16 @@ def reconnaissancePouce(handLandmarks):
|
|||||||
i=0
|
i=0
|
||||||
j=0
|
j=0
|
||||||
for cpt in range (0,4):
|
for cpt in range (0,4):
|
||||||
V1=[handLandmarks[(4*cpt)+6][0]-handLandmarks[(4*cpt)+5][0],handLandmarks[(4*cpt)+6][1]-handLandmarks[(4*cpt)+5][1]]
|
V1=[handLandmarks[(4*cpt)+6].x-handLandmarks[(4*cpt)+5].x,handLandmarks[(4*cpt)+6].y-handLandmarks[(4*cpt)+5].y]
|
||||||
V2=[handLandmarks[(4*cpt)+8][0]-handLandmarks[(4*cpt)+6][0],handLandmarks[(4*cpt)+8][1]-handLandmarks[(4*cpt)+6][1]]
|
V2=[handLandmarks[(4*cpt)+8].x-handLandmarks[(4*cpt)+6].x,handLandmarks[(4*cpt)+8].y-handLandmarks[(4*cpt)+6].y]
|
||||||
j=np.dot(V1,V2)
|
j=np.dot(V1,V2)
|
||||||
if (j>0):
|
if (j>0):
|
||||||
return etatDuPouce[0]
|
return etatDuPouce[0]
|
||||||
V1=[handLandmarks[4][0]-handLandmarks[1][0],handLandmarks[4][1]-handLandmarks[1][1]]
|
V1=[handLandmarks[4].x-handLandmarks[1].x,handLandmarks[4].y-handLandmarks[1].y]
|
||||||
V2=[handLandmarks[2][0]-handLandmarks[1][0],handLandmarks[2][1]-handLandmarks[1][1]]
|
V2=[handLandmarks[2].x-handLandmarks[1].x,handLandmarks[2].y-handLandmarks[1].y]
|
||||||
if((np.dot(V1,V2))>0 and (handLandmarks[4][1]>handLandmarks[2][1])):
|
if((np.dot(V1,V2))>0 and (handLandmarks[4].y>handLandmarks[2].y)):
|
||||||
i=1
|
i=1
|
||||||
elif(np.dot(V1,V2)>0 and handLandmarks[4][1]<handLandmarks[2][1]):
|
elif(np.dot(V1,V2)>0 and handLandmarks[4].y<handLandmarks[2].y):
|
||||||
i=2
|
i=2
|
||||||
return etatDuPouce[i]
|
return etatDuPouce[i]
|
||||||
|
|
||||||
|
|||||||
Reference in New Issue
Block a user