mirror of
https://git.roussel.pro/telecom-paris/pact.git
synced 2026-02-09 02:20:17 +01:00
Merge branch 'quentin' of https://gitlab.telecom-paris.fr/pact/2022-2023/pact71 into quentin
This commit is contained in:
Binary file not shown.
Binary file not shown.
BIN
code/backend_reconnaissance/__pycache__/manager.cpython-38.pyc
Normal file
BIN
code/backend_reconnaissance/__pycache__/manager.cpython-38.pyc
Normal file
Binary file not shown.
BIN
code/backend_reconnaissance/__pycache__/network.cpython-38.pyc
Normal file
BIN
code/backend_reconnaissance/__pycache__/network.cpython-38.pyc
Normal file
Binary file not shown.
Binary file not shown.
@@ -119,7 +119,6 @@ def init_database():
|
||||
if not os.path.isfile(os.path.join(data_dir, word)):
|
||||
for file in os.listdir(os.path.join(data_dir,word)):
|
||||
if os.path.isfile(os.path.join(data_dir, word,file)):
|
||||
print(word,os.path.join(data_dir, word,file))
|
||||
words.append(word)
|
||||
files.append(os.path.join(data_dir, word,file))
|
||||
return words,files
|
||||
@@ -130,7 +129,23 @@ def get_word_metadata(word):
|
||||
return data[word]
|
||||
|
||||
#Todo : detecte si pas de note donnée
|
||||
def get_grade():
|
||||
def record():
|
||||
sr = 44100 # fréquence d'échantillonnage
|
||||
duration = 6 # durée d'enregistrement en secondes
|
||||
filename = "recording" # nom du fichier à enregistrer
|
||||
record_audio(filename, duration, sr)
|
||||
audio_query,sr = librosa.load(f'{filename}.wav', sr=sr)
|
||||
return audio_query,sr
|
||||
|
||||
def analyze(audio_query,sr):
|
||||
coupe_silence(audio_query)
|
||||
words, files = init_database()
|
||||
audio_train_list = [librosa.load(file, sr=sr)[0] for file in files]
|
||||
recognized_word_index = recognize_speech(audio_query, audio_train_list, sr)
|
||||
recognized_word = words[recognized_word_index]
|
||||
return get_word_metadata(recognized_word)
|
||||
|
||||
def test():
|
||||
sr = 44100 # fréquence d'échantillonnage
|
||||
duration = 6 # durée d'enregistrement en secondes
|
||||
filename = "recording" # nom du fichier à enregistrer
|
||||
|
||||
@@ -1,13 +1,18 @@
|
||||
import cv2
|
||||
import mediapipe as mp
|
||||
import numpy as np
|
||||
import os
|
||||
from dotenv import load_dotenv
|
||||
load_dotenv()
|
||||
|
||||
|
||||
class HandDetector():
|
||||
def __init__(self):
|
||||
self.camera_id = int(os.getenv("CAMERA_ID"))
|
||||
self.mp_drawing = mp.solutions.drawing_utils
|
||||
self.mp_drawing_styles = mp.solutions.drawing_styles
|
||||
self.mp_hands = mp.solutions.hands
|
||||
self.cap = cv2.VideoCapture(0)
|
||||
self.cap = cv2.VideoCapture(self.camera_id)
|
||||
self.hands = self.mp_hands.Hands(
|
||||
model_complexity=0,
|
||||
min_detection_confidence=0.5,
|
||||
@@ -51,8 +56,9 @@ class HandDetector():
|
||||
# To improve performance, optionally mark the image as not writeable to
|
||||
# pass by reference.
|
||||
image.flags.writeable = False
|
||||
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
|
||||
results = self.hands.process(image)
|
||||
# print(results)
|
||||
|
||||
if results.multi_hand_landmarks:
|
||||
handsPositions = []
|
||||
for hand_landmarks in results.multi_hand_landmarks:
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
from hand_detector import HandDetector
|
||||
from audio_detector import get_grade
|
||||
from audio_detector import record, analyze, test
|
||||
from network import ApiClient, WebsocketServer
|
||||
import time
|
||||
|
||||
@@ -57,23 +57,26 @@ class Manager():
|
||||
state, coords, size, finalDecision = res
|
||||
self.server.sendMessage({"type": "effects", "effects": [{"type": state, "x":coords[0], "y": coords[1], "width": size, "height": size}]})
|
||||
self.isLastHandPacketEmpty = False
|
||||
self.timeLastChange = time.time()
|
||||
if(finalDecision != False):
|
||||
self.avis["note"] = 10 if finalDecision == "thumbs_up" else 0
|
||||
self.state = 2
|
||||
self.timeLastChange = time.time()
|
||||
self.server.sendMessage({"type": "state", "state": 2})
|
||||
elif self.isLastHandPacketEmpty == False:
|
||||
self.server.sendMessage({"type":"effects","effects":[]})
|
||||
self.isLastHandPacketEmpty = True
|
||||
|
||||
def audio(self):
|
||||
result = get_grade()
|
||||
audio_query,sr = record()
|
||||
self.server.sendMessage({"type":"recording_done"})
|
||||
result = analyze(audio_query,sr)
|
||||
# result = test()
|
||||
if(result != False):
|
||||
print("mot detecté : " + result["display"] + " avec une note de " + str(result["grade"]))
|
||||
self.server.sendMessage({"type":"new_grade","word":result["display"]})
|
||||
self.avis["notes_autres"]["test"] = result["grade"]
|
||||
time.sleep(3)
|
||||
self.state = 3
|
||||
self.timeLastChange = time.time()
|
||||
self.server.sendMessage({"type": "state", "state": 3})
|
||||
|
||||
def thankYou(self):
|
||||
|
||||
@@ -4,6 +4,9 @@ import json
|
||||
import os
|
||||
import threading
|
||||
import websockets
|
||||
from dotenv import load_dotenv
|
||||
|
||||
load_dotenv()
|
||||
|
||||
class WebsocketServer(threading.Thread):
|
||||
def __init__(self, onMessage, port=os.getenv("PORT"), host=os.getenv("HOST")):
|
||||
|
||||
Binary file not shown.
@@ -85,22 +85,22 @@ services:
|
||||
# #Backend de la borne : scripts pythons de reconnaissances video et audio
|
||||
# #Envoient les infos a l'interface de la borne par websocket pour mettre a jour l'interface rapidement
|
||||
# #Met a jour les avis en faisant des requêtes a l'API
|
||||
backend_reconnaissance:
|
||||
build: ./backend_reconnaissance
|
||||
container_name: backend_reconnaissance
|
||||
restart: always
|
||||
devices:
|
||||
- /dev/video3:/dev/video0
|
||||
- /dev/snd:/dev/snd
|
||||
environment:
|
||||
- PORT=5000
|
||||
- HOST=backend_reconnaissance
|
||||
- API_HOST=reviews_api
|
||||
- API_PORT=8080
|
||||
ports:
|
||||
#Ce container est le serveur websocker dont le client est l'interface de la borne qui tourne dans le navigateur
|
||||
- 5000:5000
|
||||
user: root
|
||||
# backend_reconnaissance:
|
||||
# build: ./backend_reconnaissance
|
||||
# container_name: backend_reconnaissance
|
||||
# restart: always
|
||||
# devices:
|
||||
# - /dev/video3:/dev/video0
|
||||
# - /dev/snd:/dev/snd
|
||||
# environment:
|
||||
# - PORT=5000
|
||||
# - HOST=backend_reconnaissance
|
||||
# - API_HOST=reviews_api
|
||||
# - API_PORT=8080
|
||||
# ports:
|
||||
# #Ce container est le serveur websocker dont le client est l'interface de la borne qui tourne dans le navigateur
|
||||
# - 5000:5000
|
||||
# user: root
|
||||
|
||||
video_loopback:
|
||||
build: ./video_loopback
|
||||
|
||||
@@ -7,16 +7,21 @@ class AudioPage {
|
||||
set enabled(isEnabled) {
|
||||
this.isEnabled = isEnabled;
|
||||
this.DOMElement.style.display = isEnabled ? "block" : "none";
|
||||
document.getElementById("grade").innerHTML = "";
|
||||
document.getElementById("audio_status").innerHTML = "Enregistrement...";
|
||||
}
|
||||
onRecordingDone() {
|
||||
if(this.isEnabled) {
|
||||
document.getElementById("audio_status").innerHTML = "Traitement...";
|
||||
}
|
||||
}
|
||||
|
||||
setGrade(grade) {
|
||||
if(this.isEnabled) {
|
||||
document.getElementById("grade").innerHTML = grade.toString();
|
||||
document.getElementById("audio_status").innerHTML = grade;
|
||||
}
|
||||
}
|
||||
|
||||
reset() {
|
||||
document.getElementById("grade").innerHTML = "";
|
||||
document.getElementById("audio_status").innerHTML = "Enregistrement...";
|
||||
}
|
||||
}
|
||||
@@ -1,5 +1,5 @@
|
||||
class WebsocketClient {
|
||||
constructor(onNewEffects, onNewState, onNewGrade, onReset) {
|
||||
constructor(onNewEffects, onNewState, onNewGrade, onReset, onRecordingDone) {
|
||||
this.socket = new WebSocket("ws://localhost:5000");
|
||||
this.socket.addEventListener("open", (event) => {
|
||||
this.socket.send("connected");
|
||||
@@ -13,10 +13,13 @@ class WebsocketClient {
|
||||
}else if(msg.type == "state") {
|
||||
onNewState(msg.state);
|
||||
}else if(msg.type == "new_grade") {
|
||||
onNewGrade(Number(msg.grade));
|
||||
onNewGrade(msg.word);
|
||||
}else if(msg.type == "reset") {
|
||||
onReset();
|
||||
}
|
||||
else if(msg.type == "recording_done") {
|
||||
onRecordingDone();
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
||||
@@ -21,6 +21,7 @@ class StateManager {
|
||||
(state) => this.setState(state),
|
||||
(grade) => this._audioPage.setGrade(grade),
|
||||
() => this.reset(),
|
||||
() => this._audioPage.onRecordingDone(),
|
||||
);
|
||||
|
||||
this._sleepingPage.enabled = true;
|
||||
|
||||
@@ -35,17 +35,15 @@
|
||||
<div class="title">
|
||||
<h1>Dites-nous en plus</h1>
|
||||
</div>
|
||||
<p>Donnez une note sur 10 au critère suivant</p>
|
||||
<table>
|
||||
<tr>
|
||||
<th>Critère</td>
|
||||
<th>Note / 10</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Calme</td>
|
||||
<td><span id="grade"></span>/10</td>
|
||||
</tr>
|
||||
</table>
|
||||
<p>Comment avez vous trouvé l'exposition ... ?</p>
|
||||
<p>Dites un mot parmis la liste suivante</p>
|
||||
<ul>
|
||||
<li>J'ai beaucoup aimé</li>
|
||||
<li>génial</li>
|
||||
<li>Ennuyant</li>
|
||||
<li>Nul</li>
|
||||
</ul>
|
||||
<p>Mot détécté : <span id="audio_status"></span></p>
|
||||
</div>
|
||||
</div>
|
||||
<div id="thank-you">
|
||||
|
||||
Reference in New Issue
Block a user