import cv2 import mediapipe as mp mp_drawing = mp.solutions.drawing_utils mp_face_mesh = mp.solutions.face_mesh drawing_spec = mp_drawing.DrawingSpec(thickness=1, circle_radius=1) cap = cv2.VideoCapture(0) with mp_face_mesh.FaceMesh( max_num_faces=1, refine_landmarks=True, min_detection_confidence=0.5, min_tracking_confidence=0.5) as face_mesh: while cap.isOpened(): success, image = cap.read() if not success: print("Ignoring empty camera frame.") continue # Initialize the face mesh model face_mesh = mp_face_mesh.FaceMesh(static_image_mode=False, max_num_faces=1, min_detection_confidence=0.5) # Load the input image # lecture de la vidéo ret, frame = cap.read() # conversion de l'image en RGB image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) # Process the image and extract the landmarks results = face_mesh.process(image) if results.multi_face_landmarks: landmarks = results.multi_face_landmarks[0] # Define the landmark indices for the corners of the eyes and the tip of the nose left_eye = [33, 133, 246, 161, 160, 159, 158, 157, 173, 133] right_eye = [362, 263, 373, 380, 381, 382, 384, 385, 386, 362] nose_tip = 4 # Calculate the distance between the eyes and the nose tip left_eye_x = landmarks.landmark[left_eye[0]].x * image.shape[1] right_eye_x = landmarks.landmark[right_eye[0]].x * image.shape[1] nose_x = landmarks.landmark[nose_tip].x * image.shape[1] eye_distance = abs(left_eye_x - right_eye_x) nose_distance = abs(nose_x - (left_eye_x + right_eye_x) / 2) # Determine the gender based on the eye and nose distances if eye_distance > 1.5 * nose_distance: gender = "Female" else: gender = "Male" # Draw the landmarks on the image cv2.putText(image, gender, (10, 50),cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2) # affichage de la vidéo cv2.imshow('Video', cv2.cvtColor(image, cv2.COLOR_RGB2BGR)) if cv2.waitKey(10) & 0xFF == ord('q'): break # libération de la caméra et des ressources cap.release() cv2.destroyAllWindows()