Spaces:
Runtime error
Runtime error
import os | |
import gradio as gr | |
import time | |
import requests | |
list_stack=[] | |
api_url="'http://90f6-34-16-143-189.ngrok-free.app/upload'" | |
def get_transcription_whissper_api(audio,api_url=api_url): | |
audio_file = open(audio, 'rb') | |
files = {'audio': ('audio.wav', audio_file)} | |
response = requests.post(api_url, files=files) | |
json_response = response.json() | |
if response.status_code == 200: | |
print("Audio file uploaded successfully.") | |
return(json_response['message']) | |
else: | |
return("Error uploading the audio file.") | |
def empty_list(): | |
list_stack.clear() | |
def inference_upload(audio,state=""): | |
state+= get_transcription_whissper_api(audio)+" " | |
return (state,state) | |
def inference(audio,state=""): | |
state += get_transcription_whissper_api(audio) + " " | |
delimiter=" " | |
list_stack.append(state) | |
all_transcriptions=(delimiter.join(list_stack)) | |
return (state,all_transcriptions) | |
css = """ | |
.gradio-container { | |
font-family: 'IBM Plex Sans', sans-serif; | |
} | |
.gr-button { | |
color: white; | |
border-color: black; | |
background: black; | |
} | |
input[type='range'] { | |
accent-color: black; | |
} | |
.dark input[type='range'] { | |
accent-color: #dfdfdf; | |
} | |
.container { | |
max-width: 1030px; | |
margin: auto; | |
padding-top: 1.5rem; | |
} | |
.details:hover { | |
text-decoration: underline; | |
} | |
.gr-button { | |
white-space: nowrap; | |
} | |
.gr-button:focus { | |
border-color: rgb(147 197 253 / var(--tw-border-opacity)); | |
outline: none; | |
box-shadow: var(--tw-ring-offset-shadow), var(--tw-ring-shadow), var(--tw-shadow, 0 0 #0000); | |
--tw-border-opacity: 1; | |
--tw-ring-offset-shadow: var(--tw-ring-inset) 0 0 0 var(--tw-ring-offset-width) var(--tw-ring-offset-color); | |
--tw-ring-shadow: var(--tw-ring-inset) 0 0 0 calc(3px var(--tw-ring-offset-width)) var(--tw-ring-color); | |
--tw-ring-color: rgb(191 219 254 / var(--tw-ring-opacity)); | |
--tw-ring-opacity: .5; | |
} | |
.footer { | |
margin-bottom: 45px; | |
margin-top: 35px; | |
text-align: center; | |
border-bottom: 1px solid #e5e5e5; | |
} | |
.footer>p { | |
font-size: .8rem; | |
display: inline-block; | |
padding: 0 10px; | |
transform: translateY(10px); | |
background: white; | |
} | |
.dark .footer { | |
border-color: #303030; | |
} | |
.dark .footer>p { | |
background: #0b0f19; | |
} | |
.prompt h4{ | |
margin: 1.25em 0 .25em 0; | |
font-weight: bold; | |
font-size: 115%; | |
} | |
.animate-spin { | |
animation: spin 1s linear infinite; | |
} | |
@keyframes spin { | |
from { | |
transform: rotate(0deg); | |
} | |
to { | |
transform: rotate(360deg); | |
} | |
} | |
#share-btn-container { | |
display: flex; margin-top: 1.5rem !important; padding-left: 0.5rem !important; padding-right: 0.5rem !important; background-color: #000000; justify-content: center; align-items: center; border-radius: 9999px !important; width: 13rem; | |
} | |
#share-btn { | |
all: initial; color: #ffffff;font-weight: 600; cursor:pointer; font-family: 'IBM Plex Sans', sans-serif; margin-left: 0.5rem !important; padding-top: 0.25rem !important; padding-bottom: 0.25rem !important; | |
} | |
#share-btn * { | |
all: unset; | |
} | |
""" | |
block = gr.Blocks(css=css) | |
with block: | |
gr.HTML( | |
""" | |
<div style="text-align: center; max-width: 1050px; margin: 0 auto;"> | |
<div | |
style=" | |
display: inline-flex; | |
align-items: center; | |
gap: 0.8rem; | |
font-size: 1.75rem; | |
" | |
> | |
<svg | |
width="0.65em" | |
height="0.65em" | |
viewBox="0 0 115 115" | |
fill="none" | |
xmlns="http://www.w3.org/2000/svg" | |
> | |
<rect width="23" height="23" fill="white"></rect> | |
<rect y="69" width="23" height="23" fill="white"></rect> | |
<rect x="23" width="23" height="23" fill="#AEAEAE"></rect> | |
<rect x="23" y="69" width="23" height="23" fill="#AEAEAE"></rect> | |
<rect x="46" width="23" height="23" fill="white"></rect> | |
<rect x="46" y="69" width="23" height="23" fill="white"></rect> | |
<rect x="69" width="23" height="23" fill="black"></rect> | |
<rect x="69" y="69" width="23" height="23" fill="black"></rect> | |
<rect x="92" width="23" height="23" fill="#D9D9D9"></rect> | |
<rect x="92" y="69" width="23" height="23" fill="#AEAEAE"></rect> | |
<rect x="115" y="46" width="23" height="23" fill="white"></rect> | |
<rect x="115" y="115" width="23" height="23" fill="white"></rect> | |
<rect x="115" y="69" width="23" height="23" fill="#D9D9D9"></rect> | |
<rect x="92" y="46" width="23" height="23" fill="#AEAEAE"></rect> | |
<rect x="92" y="115" width="23" height="23" fill="#AEAEAE"></rect> | |
<rect x="92" y="69" width="23" height="23" fill="white"></rect> | |
<rect x="69" y="46" width="23" height="23" fill="white"></rect> | |
<rect x="69" y="115" width="23" height="23" fill="white"></rect> | |
<rect x="69" y="69" width="23" height="23" fill="#D9D9D9"></rect> | |
<rect x="46" y="46" width="23" height="23" fill="black"></rect> | |
<rect x="46" y="115" width="23" height="23" fill="black"></rect> | |
<rect x="46" y="69" width="23" height="23" fill="black"></rect> | |
<rect x="23" y="46" width="23" height="23" fill="#D9D9D9"></rect> | |
<rect x="23" y="115" width="23" height="23" fill="#AEAEAE"></rect> | |
<rect x="23" y="69" width="23" height="23" fill="black"></rect> | |
</svg> | |
<h1 style="font-weight: 900; margin-bottom: 7px;"> | |
Experiment Whisper via API | |
</h1> | |
</div> | |
<p style="margin-bottom: 10px; font-size: 94%"> | |
This page can be used to simply try out the capabilities of tagaloc + english transcription. The model used is the smallest because this process only runs with small computations. Speed and accuracy can be improved with more powerful computing.</p> | |
</div> | |
""" | |
) | |
with gr.Group(): | |
with gr.Box(): | |
with gr.Row().style(mobile_collapse=False, equal_height=True): | |
audio = gr.Audio( | |
label="Input voice", | |
source="microphone", | |
type="filepath", | |
# streaming=True | |
every=4 | |
) | |
audio_file_upload = gr.Audio( | |
label="Input From Example", | |
source="upload", | |
type="filepath", | |
) | |
btn_clear = gr.Button("Clear") | |
btn_trnscribe = gr.Button("Transcribe") | |
text = gr.Textbox(label="Transcriptions Now", elem_id="result-textarea") | |
text_all = gr.Textbox(label="All Transcriptions", elem_id="result-textarea") | |
btn_clear.click(empty_list) | |
btn_trnscribe.click(inference_upload,inputs=audio_file_upload,outputs=[text,text_all],show_progress="minimal") | |
audio.stream(inference,inputs=audio,outputs=[text,text_all],show_progress="minimal") | |
gr.HTML(""" | |
<h2 style="font-weight: 900; margin: 7px;"> | |
Tagaloc audio | |
</h2> | |
""") | |
example_gr_bark = gr.Examples( | |
examples=[ | |
["#1 How mature are you as a Christian Ptr Joey.mp3"], | |
["#2 Masakit Pero May Dahilan.mp3"] | |
], | |
inputs = audio_file_upload | |
) | |
gr.HTML(''' | |
<div class="footer"> | |
<p>Model by openAI</a> - Gradio Demo by 🤗 Hugging Face | |
</p> | |
</div> | |
''') | |
if __name__ == "__main__": | |
block.launch(debug=True) | |