File size: 2,764 Bytes
7054162
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
import gradio as gr
import torch
import whisper
import warnings
warnings.filterwarnings('ignore')
from transformers import pipeline
import os

MODEL_NAME = "openai/whisper-small"
BATCH_SIZE = 8

device = 0 if torch.cuda.is_available() else "cpu"

pipe = pipeline(
    task="automatic-speech-recognition",
    model=MODEL_NAME,
    chunk_length_s=30,
    device=device)

emotion_classifier = pipeline("text-classification",model='MilaNLProc/xlm-emo-t', return_all_scores=True)

def transcribe(microphone, file_upload, task):
    output = ""
    if (microphone is not None) and (file_upload is not None):
        warn_output = (
            "WARNING: You've uploaded an audio file and used the microphone. "
            "The recorded file from the microphone will be used and the uploaded audio will be discarded.\n"
        )

    elif (microphone is None) and (file_upload is None):
        raise gr.Error("You have to either use the microphone or upload an audio file")

    file = microphone if microphone is not None else file_upload

    text = pipe(file, batch_size=BATCH_SIZE, generate_kwargs={"task": task})["text"]

    return output + text

def translate_and_classify(audio):
    text_result = transcribe(audio, None, "transcribe")    
    emotion = emotion_classifier(text_result)
    detected_emotion = {}
    for emotion in emotion[0]:
        detected_emotion[emotion["label"]] = emotion["score"]
    return text_result, detected_emotion

with gr.Blocks() as demo:

    gr.Markdown(
    """    # Emotion Detection from Speech
    
            ##### Detection of anger, sadness, joy, fear in speech using OpenAI Whisper and XLM-RoBERTa 
            
            """)
    
    with gr.Column():
        with gr.Tab("Record Audio"):
            # The 'source' argument is no longer supported, use 'sources' instead
            audio_input_r = gr.Audio(label = 'Record Audio Input',sources=["microphone"],type="filepath") 
            transcribe_audio_r = gr.Button('Transcribe')
        
        with gr.Tab("Upload Audio as File"):
            # The 'source' argument is no longer supported, use 'sources' instead
            audio_input_u = gr.Audio(label = 'Upload Audio',sources=["upload"],type="filepath") 
            transcribe_audio_u = gr.Button('Transcribe')

        with gr.Row():
            transcript_output = gr.Textbox(label="Transcription in the language of speech/audio", lines = 3)
            emotion_output = gr.Label(label = "Detected Emotion")
    
    transcribe_audio_r.click(translate_and_classify, inputs = audio_input_r, outputs = [transcript_output,emotion_output])
    transcribe_audio_u.click(translate_and_classify, inputs = audio_input_u, outputs = [transcript_output,emotion_output])       
    
demo.launch(share=True)