Début implémentation notes autres dans manager backend

This commit is contained in:
Quentin Roussel
2023-05-04 00:23:59 +02:00
parent d16f543056
commit b709602be6
5 changed files with 222 additions and 108 deletions

View File

@@ -5,116 +5,192 @@ import os
from dotenv import load_dotenv
load_dotenv()
camera_id = int(os.getenv("CAMERA_ID"))
mp_hands = mp.solutions.hands
cap = cv2.VideoCapture(camera_id)
def prodScalaire(V1,V2):
return V1[0]*V2[0]+V1[1]*V2[1]/(np.sqrt(V1[0]**2+V1[1]**2)*np.sqrt(V2[0]**2+V2[1]**2))
class HandDetector():
def __init__(self):
self.camera_id = int(os.getenv("CAMERA_ID"))
self.mp_drawing = mp.solutions.drawing_utils
self.mp_drawing_styles = mp.solutions.drawing_styles
self.mp_hands = mp.solutions.hands
self.cap = cv2.VideoCapture(self.camera_id)
self.hands = self.mp_hands.Hands(
model_complexity=0,
min_detection_confidence=0.5,
min_tracking_confidence=0.5)
#Paramètres
self.BUFFER_LENGTH = 60
self.DETECTION_THRESHOLD = 3/4
def __init__(self):
self.hands = mp_hands.Hands(
model_complexity=0,
min_detection_confidence=0.5,
min_tracking_confidence=0.5)
#Paramètres
self.BUFFER_LENGTH = 200
self.DETECTION_THRESHOLD = 1/2
self.resultBuffer = []
self.resultBuffer = []
def reset(self):
self.resultBuffer = []
def reset(self):
self.resultBuffer = []
def reconnaissancePouce(self,handLandmarks):
etatDuPouce=["neutre","thumbs_down","thumbs_up"]
i=0
j=0
for cpt in range (0,4):
V1=[handLandmarks[(4*cpt)+5][0]-handLandmarks[(4*cpt)+0][0],handLandmarks[(4*cpt)+5][1]-handLandmarks[(4*cpt)+0][1]]
V2=[handLandmarks[(4*cpt)+8][0]-handLandmarks[(4*cpt)+5][0],handLandmarks[(4*cpt)+8][1]-handLandmarks[(4*cpt)+5][1]]
j1=np.dot(V1,V2)
V1=[handLandmarks[(4*cpt)+6][0]-handLandmarks[(4*cpt)+5][0],handLandmarks[(4*cpt)+6][1]-handLandmarks[(4*cpt)+5][1]]
V2=[handLandmarks[(4*cpt)+8][0]-handLandmarks[(4*cpt)+6][0],handLandmarks[(4*cpt)+8][1]-handLandmarks[(4*cpt)+6][1]]
j2=np.dot(V1,V2)
if (j1>0 and j2>0):
return etatDuPouce[0]
V1=[handLandmarks[4][0]-handLandmarks[1][0],handLandmarks[4][1]-handLandmarks[1][1]]
V2=[handLandmarks[2][0]-handLandmarks[1][0],handLandmarks[2][1]-handLandmarks[1][1]]
if((np.dot(V1,V2))>0 and (handLandmarks[4][1]>handLandmarks[2][1])):
i=1
elif(np.dot(V1,V2)>0 and handLandmarks[4][1]<handLandmarks[2][1]):
i=2
return etatDuPouce[i]
def analyse_pouce(self, handLandmarks):
etatDuPouce = ["neutre","thumbs_down","thumbs_up"]
i = 0
j = 0
for cpt in range (0,4):
V1=[handLandmarks[(4*cpt)+5][0]-handLandmarks[(4*cpt)+0][0],handLandmarks[(4*cpt)+5][1]-handLandmarks[(4*cpt)+0][1]]
V2=[handLandmarks[(4*cpt)+8][0]-handLandmarks[(4*cpt)+5][0],handLandmarks[(4*cpt)+8][1]-handLandmarks[(4*cpt)+5][1]]
j1=np.dot(V1,V2)
V1=[handLandmarks[(4*cpt)+6][0]-handLandmarks[(4*cpt)+5][0],handLandmarks[(4*cpt)+6][1]-handLandmarks[(4*cpt)+5][1]]
V2=[handLandmarks[(4*cpt)+8][0]-handLandmarks[(4*cpt)+6][0],handLandmarks[(4*cpt)+8][1]-handLandmarks[(4*cpt)+6][1]]
j2=np.dot(V1,V2)
if (j1>0 and j2>0):
return etatDuPouce[0]
V1=[handLandmarks[4][0]-handLandmarks[1][0],handLandmarks[4][1]-handLandmarks[1][1]]
V2=[handLandmarks[2][0]-handLandmarks[1][0],handLandmarks[2][1]-handLandmarks[1][1]]
if((np.dot(V1,V2))>0 and (handLandmarks[4][1]>handLandmarks[2][1])):
i=1
elif(np.dot(V1,V2)>0 and handLandmarks[4][1]<handLandmarks[2][1]):
i=2
return etatDuPouce[i]
def detect(self):
if self.cap.isOpened():
success, image = self.cap.read()
if not success:
print("Ignoring empty camera frame.")
# If loading a video, use 'break' instead of 'continue'.
return False
def loop(self):
if cap.isOpened():
success, image = cap.read()
if not success:
print("Ignoring empty camera frame.")
# If loading a video, use 'break' instead of 'continue'.
return False
# To improve performance, optionally mark the image as not writeable to
# pass by reference.
image.flags.writeable = False
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
results = self.hands.process(image)
# To improve performance, optionally mark the image as not writeable to
# pass by reference.
image.flags.writeable = False
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
results = self.hands.process(image)
if results.multi_hand_landmarks:
handsPositions = []
for hand_landmarks in results.multi_hand_landmarks:
handLandmarks = []
# Fill list with x and y positions of each landmark
for landmarks in hand_landmarks.landmark:
handLandmarks.append([landmarks.x, landmarks.y])
#On ajoute la position de chaque mains a une liste
handsPositions.append([self.reconnaissancePouce(handLandmarks), handLandmarks])
#On calcule le résultat suivant la position des deux mains
if(len(handsPositions) == 2):
if(handsPositions[0][0] == handsPositions[1][0]):
thumbState = handsPositions[0]
handLandmarks = handsPositions[0][1]
elif(handsPositions[0][0] == "neutre"):
thumbState = handsPositions[1]
handLandmarks = handsPositions[1][1]
elif(handsPositions[1][0] == "neutre"):
thumbState = handsPositions[0][0]
handLandmarks = handsPositions[0][1]
else:
thumbState = "neutre"
else:
thumbState = handsPositions[0][0]
handsLandmarks = handsPositions[0][1]
if results.multi_hand_landmarks:
handsPositions = []
for hand_landmarks in results.multi_hand_landmarks:
handLandmarks = []
# Fill list with x and y positions of each landmark
for landmarks in hand_landmarks.landmark:
handLandmarks.append([landmarks.x, landmarks.y])
#On ajoute la position de chaque mains a une liste
handsPositions.append([self.analyse_pouce(handLandmarks), handLandmarks])
#On calcule le résultat suivant la position des deux mains
if(len(handsPositions) == 2):
if(handsPositions[0][0] == handsPositions[1][0]):
thumbState = handsPositions[0]
handLandmarks = handsPositions[0][1]
elif(handsPositions[0][0] == "neutre"):
thumbState = handsPositions[1]
handLandmarks = handsPositions[1][1]
elif(handsPositions[1][0] == "neutre"):
thumbState = handsPositions[0][0]
handLandmarks = handsPositions[0][1]
else:
thumbState = "neutre"
else:
thumbState = handsPositions[0][0]
handsLandmarks = handsPositions[0][1]
self.resultBuffer.append(thumbState)
if(len(self.resultBuffer) > self.BUFFER_LENGTH):
self.resultBuffer.pop(0)
thumbsUpCount = sum(map(lambda x : x == "thumbs_up", self.resultBuffer))
thumbsDownCount = sum(map(lambda x : x == "thumbs_down", self.resultBuffer))
self.resultBuffer.append(thumbState)
if(len(self.resultBuffer) > self.BUFFER_LENGTH):
self.resultBuffer.pop(0)
thumbsUpCount = sum(map(lambda x : x == "thumbs_up", self.resultBuffer))
thumbsDownCount = sum(map(lambda x : x == "thumbs_down", self.resultBuffer))
if(thumbsUpCount > self.DETECTION_THRESHOLD * self.BUFFER_LENGTH):
result = "thumbs_up"
elif(thumbsDownCount > self.DETECTION_THRESHOLD * self.BUFFER_LENGTH):
result = "thumbs_down"
else:
result = False
if(thumbsUpCount > self.DETECTION_THRESHOLD * self.BUFFER_LENGTH):
result = "thumbs_up"
elif(thumbsDownCount > self.DETECTION_THRESHOLD * self.BUFFER_LENGTH):
result = "thumbs_down"
else:
result = False
progress = 0
if thumbState == "thumbs_up":
progress = thumbsUpCount / (self.BUFFER_LENGTH * self.DETECTION_THRESHOLD)
elif thumbState == "thumbs_down":
progress = thumbsDownCount / (self.BUFFER_LENGTH * self.DETECTION_THRESHOLD)
if(thumbState != "neutre"):
return thumbState, handLandmarks[9], np.linalg.norm(np.array(handLandmarks[9]) - np.array(handLandmarks[0])), result, progress
return False
progress = 0
if thumbState == "thumbs_up":
progress = thumbsUpCount / (self.BUFFER_LENGTH * self.DETECTION_THRESHOLD)
elif thumbState == "thumbs_down":
progress = thumbsDownCount / (self.BUFFER_LENGTH * self.DETECTION_THRESHOLD)
if(thumbState != "neutre"):
return thumbState, handLandmarks[9], np.linalg.norm(np.array(handLandmarks[9]) - np.array(handLandmarks[0])), result, progress
return False
class FingerCountDetector():
def __init__(self):
self.BUFFER_LENGTH = 20
self.DETECTION_THRESHOLD = 1/2
self.hands = mp_hands.Hands(static_image_mode=False, max_num_hands=2, min_detection_confidence=0.5, min_tracking_confidence=0.5)
self.buffer = []
def reset(self):
self.buffer = []
def getResult(self):
stats = [0] * 10
for grade in self.buffer:
stats[grade-1] = stats[grade-1]+1
stats = [stat / self.BUFFER_LENGTH for stat in stats]
print(stats)
if max(stats) > self.DETECTION_THRESHOLD:
return stats.index(max(stats)) + 1
def prodScalaire(self,V1,V2):
return (V1[0]*V2[0]+V1[1]*V2[1])/((V1[0]**2+V1[1]**2)**(1/2)*(V2[0]**2+V2[1]**2)**(1/2)) #produit scalaire normalisé
#Fait le prod scalaire entre deux vecteurs formées par les points d'index (id0,id1) et (id2,id3) dans la liste landmarks
def prodScalaireDoigts(self,landmarks,id0,id1,id2,id3):
V0= [landmarks[id0].x - landmarks[id1].x, landmarks[id0].y - landmarks[id1].y]
V1= [landmarks[id2].x - landmarks[id3].x, landmarks[id2].y - landmarks[id3].y]
return self.prodScalaire(V0,V1)
# initialisation de la caméra
#Donne le nombre de doigts levé pour un landmak de main donnée
def analyseMain(self,hand_landmarks):
finger_count = 0
pouce = self.prodScalaireDoigts(hand_landmarks,2,0,4,2)
index = self.prodScalaireDoigts(hand_landmarks,8,6,6,0)
majeur = self.prodScalaireDoigts(hand_landmarks,12,10,10,0)
annulaire= self.prodScalaireDoigts(hand_landmarks,16,14,14,0)
auriculaire = self.prodScalaireDoigts(hand_landmarks,20,18,18,0)
if pouce > 0.905135675:
finger_count += 1
if index > 0:
finger_count += 1
if majeur > 0:
finger_count += 1
if annulaire > 0:
finger_count += 1
if auriculaire > 0:
finger_count += 1
return finger_count
def loop(self):
if cap.isOpened():
# lecture de la vidéo
ret, frame = cap.read()
# conversion de l'image en RGB
image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
# détection des mains
results = self.hands.process(image)
if results.multi_hand_landmarks:
# détection du nombre de doigts levés
finger_count = 0
if len(results.multi_hand_landmarks) >0 :
finger_count += self.analyseMain(results.multi_hand_landmarks[0].landmark)
if len(results.multi_hand_landmarks) >1 :
finger_count += self.analyseMain(results.multi_hand_landmarks[1].landmark)
self.buffer.append(finger_count)
if(len(self.buffer) > self.BUFFER_LENGTH):
self.buffer.pop(0)
return self.getResult()
if __name__ == "__main__":
h = HandDetector()
while(1):
print(h.detect())
h = FingerCountDetector()
while(1):
print(h.loop())

View File

@@ -1,4 +1,4 @@
from hand_detector import HandDetector
from hand_detector import HandDetector, FingerCountDetector
from audio_detector import record, analyze, test
from network import ApiClient, WebsocketServer
import time
@@ -20,10 +20,14 @@ class Manager():
self.server = WebsocketServer(None)
self.server.start()
self.handDetector = HandDetector()
self.fingerCountDetector = FingerCountDetector()
self.api = ApiClient()
self.timeLastChange = time.time()
self.isLastHandPacketEmpty = False
self.recongizer = sr.Recognizer()
#For step 2
self.criteria_list = self.api.get_criteria()
self.currentCriteria = None
print("Backend ready")
#Boucle principale
@@ -34,14 +38,16 @@ class Manager():
if(self.state == 1):
self.camera()
if(self.state == 2):
self.audio()
self.grade()
if(self.state == 3):
self.audio()
if(self.state == 4):
self.thankYou()
time.sleep(0.01)
#Fonction qui est executée pendant que la borne est en veille, reveille la borne si une main est detectée
def sleep(self):
res = self.handDetector.detect()
res = self.handDetector.loop()
if(res != False):
self.state = 1
self.timeLastChange = time.time()
@@ -54,7 +60,7 @@ class Manager():
self.reset()
return
res = self.handDetector.detect()
res = self.handDetector.loop()
if(res != False):
state, coords, size, finalDecision, progress = res
self.server.sendMessage({"type": "effects", "effects": [{"type": "loading", "x":coords[0], "y": coords[1], "width": size, "height": size, "progress": progress},{"type": state, "x":coords[0], "y": coords[1], "width": size, "height": size}]})
@@ -68,6 +74,24 @@ class Manager():
self.server.sendMessage({"type":"effects","effects":[]})
self.isLastHandPacketEmpty = True
def grade(self):
if(self.currentCriteria == None):
self.currentCriteria = 0
self.server.sendMessage({"type":"new_criteria","criteria":self.criteria_list[self.currentCriteria]})
return
grade = self.fingerCountDetector.loop()
if(grade != None):
self.avis["notes_autres"][self.criteria_list[self.currentCriteria]] = grade
self.currentCriteria+=1
if(self.currentCriteria < len(self.criteria_list)):
self.server.sendMessage({"type":"new_criteria","criteria":self.criteria_list[self.currentCriteria]})
self.fingerCountDetector.reset()
else:
self.state = 3
self.server.sendMessage({"type": "state", "state": 3})
def audio(self):
time.sleep(3)
self.server.sendMessage({"type":"recording_started"})
@@ -96,8 +120,8 @@ class Manager():
#On passe a la suite
time.sleep(5)
self.state = 3
self.server.sendMessage({"type": "state", "state": 3})
self.state = 4
self.server.sendMessage({"type": "state", "state": 4})
def thankYou(self):
#On envoie l'avis à l'api
@@ -111,6 +135,7 @@ class Manager():
def reset(self):
self.state = 0
self.currentCriteria = None
self.avis = self.defualtAvis
self.handDetector.reset()

View File

@@ -41,7 +41,6 @@ class ApiClient():
self.port = port
def send(self,note,commentaire):
#Exemple ajout d'un commentaire depuis la borne (site ou geste)
avis = {
"note": note,
@@ -50,3 +49,11 @@ class ApiClient():
}
print(self.host, self.port)
return requests.post("http://"+self.host+":"+self.port+"/add_review", data=avis)
def get_criteria(self):
res = []
crits = json.loads(requests.get("http://"+self.host+":"+self.port+"/borne/get_criteres").text)
for c in crits:
res.append(c["nom"])
return res
print(ApiClient().get_criteria())

View File

@@ -64,7 +64,7 @@ const getReviewFromId = (id) => {
const getCriteres = () => {
return new Promise((resolve, reject) => {
let sql = `SELECT * FROM borne_criteres`;
conn.query(sql, [limit], (err, res) => {
conn.query(sql, (err, res) => {
if (err) {
reject(err);
} else {
@@ -237,4 +237,4 @@ export const handleGetStats = (req, res) => {
res.status(500).send("Error: " + err.message);
});
}
}
}