File size: 5,321 Bytes
2a42563
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
from pyChatGPT import ChatGPT
import os
session_token = os.environ.get('SessionToken') 

import whisper
whisper_model = whisper.load_model("small")
 

def chat_hf(audio, custom_token):
    try:
        whisper_text = translate(audio)
        api = ChatGPT(session_token) 
        resp = api.send_message(whisper_text)
        
        
        api.refresh_auth()  # refresh the authorization token
        api.reset_conversation()  # reset the conversation
        gpt_response = resp['message']

    except:
        whisper_text = translate(audio)
        api = ChatGPT(custom_token) 
        resp = api.send_message(whisper_text)
        
        
        api.refresh_auth()  # refresh the authorization token
        api.reset_conversation()  # reset the conversation
        gpt_response = resp['message']
    
    return whisper_text, gpt_response


def translate(audio):
    print("""
    β€”
    Sending audio to Whisper ...
    β€”
    """)
    
    audio = whisper.load_audio(audio)
    audio = whisper.pad_or_trim(audio)
    
    mel = whisper.log_mel_spectrogram(audio).to(whisper_model.device)
    
    _, probs = whisper_model.detect_language(mel)
    
    transcript_options = whisper.DecodingOptions(task="transcribe", fp16 = False)
    #translate_options = whisper.DecodingOptions(task="translate", fp16 = False)
    
    transcription = whisper.decode(whisper_model, mel, transcript_options)
    #translation = whisper.decode(whisper_model, mel, translate_options)
    
    print("language spoken: " + transcription.language)
    print("transcript: " + transcription.text)
    print("β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”")  
    #print("translated: " + translation.text)
      
    return transcription.text

title = """
    <div style="text-align: center; max-width: 500px; margin: 0 auto;">
        <div
        style="
            display: inline-flex;
            align-items: center;
            gap: 0.8rem;
            font-size: 1.75rem;
            margin-bottom: 10px;
        "
        >
        <h1 style="font-weight: 600; margin-bottom: 7px;">
            Whisper to chatGPT
        </h1>
        </div>
        <p style="margin-bottom: 10px;font-size: 94%;font-weight: 100;line-height: 1.5em;">
        Chat with GPT with your voice in your native language !
        <br />If it fails enter custom session key see video for reference refer 
        <a href="https://youtu.be/TdNSj_qgdFk" target="_blank">Bhavesh Baht video</a>
        </p>
        <p style="font-size: 94%">
            You can skip the queue by duplicating this space: 
            <span style="display: flex;align-items: center;justify-content: center;height: 30px;">
            <a href="https://huggingface.co/fffiloni/whisper-to-chatGPT?duplicate=true"><img src="https://img.shields.io/badge/-Duplicate%20Space-blue?labelColor=white&style=flat&logo=data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAAXNSR0IArs4c6QAAAP5JREFUOE+lk7FqAkEURY+ltunEgFXS2sZGIbXfEPdLlnxJyDdYB62sbbUKpLbVNhyYFzbrrA74YJlh9r079973psed0cvUD4A+4HoCjsA85X0Dfn/RBLBgBDxnQPfAEJgBY+A9gALA4tcbamSzS4xq4FOQAJgCDwV2CPKV8tZAJcAjMMkUe1vX+U+SMhfAJEHasQIWmXNN3abzDwHUrgcRGmYcgKe0bxrblHEB4E/pndMazNpSZGcsZdBlYJcEL9Afo75molJyM2FxmPgmgPqlWNLGfwZGG6UiyEvLzHYDmoPkDDiNm9JR9uboiONcBXrpY1qmgs21x1QwyZcpvxt9NS09PlsPAAAAAElFTkSuQmCC&logoWidth=14" alt="Duplicate Space"></a>       
            </span>
        </p>
    </div>
"""

article = """
    <div class="footer">
        <p><a href="https://chat.openai.com/chat" target="_blank">chatGPT</a> 
        by <a href="https://openai.com/" style="text-decoration: underline;" target="_blank">OpenAI</a> - 
        Gradio Demo by πŸ€— <a href="https://twitter.com/fffiloni" target="_blank">Sylvain Filoni</a>
        </p>
    </div>
"""

css = '''
    #col-container {max-width: 700px; margin-left: auto; margin-right: auto;}
    a {text-decoration-line: underline; font-weight: 600;}
    .footer {
            margin-bottom: 45px;
            margin-top: 35px;
            text-align: center;
            border-bottom: 1px solid #e5e5e5;
        }
        .footer>p {
            font-size: .8rem;
            display: inline-block;
            padding: 0 10px;
            transform: translateY(10px);
            background: white;
        }
        .dark .footer {
            border-color: #303030;
        }
        .dark .footer>p {
            background: #0b0f19;
        }
'''
 
import gradio as gr

with gr.Blocks(css=css) as demo:
    
    with gr.Column(elem_id="col-container"):
        
        gr.HTML(title)
        
        with gr.Row():
            record_input = gr.Audio(source="microphone",type="filepath", show_label=False)
            send_btn = gr.Button("Send my message !")
        custom_token = gr.Textbox(label='If it fails, use your own session token', placeholder="your own session token")   
    with gr.Column():
        audio_translation = gr.Textbox(type="text",label="Whisper translation")
        gpt_response = gr.Textbox(type="text",label="chatGPT response")

        gr.HTML(article)
    
    send_btn.click(chat_hf, inputs=[record_input, custom_token], outputs=[audio_translation, gpt_response])

demo.queue(max_size=32, concurrency_count=20).launch(debug=True)