Ayushdavidkushwahaaaa commited on
Commit
7054162
1 Parent(s): 543abec

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +74 -0
app.py ADDED
@@ -0,0 +1,74 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import torch
3
+ import whisper
4
+ import warnings
5
+ warnings.filterwarnings('ignore')
6
+ from transformers import pipeline
7
+ import os
8
+
9
+ MODEL_NAME = "openai/whisper-small"
10
+ BATCH_SIZE = 8
11
+
12
+ device = 0 if torch.cuda.is_available() else "cpu"
13
+
14
+ pipe = pipeline(
15
+ task="automatic-speech-recognition",
16
+ model=MODEL_NAME,
17
+ chunk_length_s=30,
18
+ device=device)
19
+
20
+ emotion_classifier = pipeline("text-classification",model='MilaNLProc/xlm-emo-t', return_all_scores=True)
21
+
22
+ def transcribe(microphone, file_upload, task):
23
+ output = ""
24
+ if (microphone is not None) and (file_upload is not None):
25
+ warn_output = (
26
+ "WARNING: You've uploaded an audio file and used the microphone. "
27
+ "The recorded file from the microphone will be used and the uploaded audio will be discarded.\n"
28
+ )
29
+
30
+ elif (microphone is None) and (file_upload is None):
31
+ raise gr.Error("You have to either use the microphone or upload an audio file")
32
+
33
+ file = microphone if microphone is not None else file_upload
34
+
35
+ text = pipe(file, batch_size=BATCH_SIZE, generate_kwargs={"task": task})["text"]
36
+
37
+ return output + text
38
+
39
+ def translate_and_classify(audio):
40
+ text_result = transcribe(audio, None, "transcribe")
41
+ emotion = emotion_classifier(text_result)
42
+ detected_emotion = {}
43
+ for emotion in emotion[0]:
44
+ detected_emotion[emotion["label"]] = emotion["score"]
45
+ return text_result, detected_emotion
46
+
47
+ with gr.Blocks() as demo:
48
+
49
+ gr.Markdown(
50
+ """ # Emotion Detection from Speech
51
+
52
+ ##### Detection of anger, sadness, joy, fear in speech using OpenAI Whisper and XLM-RoBERTa
53
+
54
+ """)
55
+
56
+ with gr.Column():
57
+ with gr.Tab("Record Audio"):
58
+ # The 'source' argument is no longer supported, use 'sources' instead
59
+ audio_input_r = gr.Audio(label = 'Record Audio Input',sources=["microphone"],type="filepath")
60
+ transcribe_audio_r = gr.Button('Transcribe')
61
+
62
+ with gr.Tab("Upload Audio as File"):
63
+ # The 'source' argument is no longer supported, use 'sources' instead
64
+ audio_input_u = gr.Audio(label = 'Upload Audio',sources=["upload"],type="filepath")
65
+ transcribe_audio_u = gr.Button('Transcribe')
66
+
67
+ with gr.Row():
68
+ transcript_output = gr.Textbox(label="Transcription in the language of speech/audio", lines = 3)
69
+ emotion_output = gr.Label(label = "Detected Emotion")
70
+
71
+ transcribe_audio_r.click(translate_and_classify, inputs = audio_input_r, outputs = [transcript_output,emotion_output])
72
+ transcribe_audio_u.click(translate_and_classify, inputs = audio_input_u, outputs = [transcript_output,emotion_output])
73
+
74
+ demo.launch(share=True)