|
import gradio as gr |
|
import plotly.express as px |
|
import requests |
|
|
|
|
|
|
|
|
|
def calculate_route(): |
|
api_key = "api_key" |
|
origin = "49.631997,6.171029" |
|
destination = "49.586745,6.140002" |
|
|
|
url = f"https://api.tomtom.com/routing/1/calculateRoute/{origin}:{destination}/json?key={api_key}" |
|
response = requests.get(url) |
|
data = response.json() |
|
|
|
lats = [] |
|
lons = [] |
|
|
|
for point in data['routes'][0]['legs'][0]['points']: |
|
lats.append(point['latitude']) |
|
lons.append(point['longitude']) |
|
|
|
|
|
|
|
fig = px.line_mapbox(lat=lats, lon=lons, zoom=12, height=600) |
|
|
|
fig.update_layout(mapbox_style="open-street-map", mapbox_zoom=12, mapbox_center_lat=lats[0], mapbox_center_lon=lons[0]) |
|
fig.update_layout(margin={"r":0,"t":0,"l":0,"b":0}) |
|
|
|
return fig |
|
|
|
|
|
def transcript( |
|
general_context, link_to_audio, voice, emotion, place, time, delete_history, state |
|
): |
|
"""this function manages speech-to-text to input Fnanswer function and text-to-speech with the Fnanswer output""" |
|
|
|
audio_path = link_to_audio |
|
audio_array, sampling_rate = librosa.load( |
|
link_to_audio, sr=16000 |
|
) |
|
|
|
|
|
input_features = processor( |
|
audio_array, sampling_rate, return_tensors="pt" |
|
).input_features |
|
predicted_ids = modelw.generate(input_features) |
|
transcription = processor.batch_decode(predicted_ids, skip_special_tokens=True) |
|
quest_processing = FnAnswer( |
|
general_context, transcription, place, time, delete_history, state |
|
) |
|
state = quest_processing[2] |
|
print("langue " + quest_processing[3]) |
|
|
|
tts.tts_to_file( |
|
text=str(quest_processing[0]), |
|
file_path="output.wav", |
|
speaker_wav=f"Audio_Files/{voice}.wav", |
|
language=quest_processing[3], |
|
emotion="angry", |
|
) |
|
|
|
audio_path = "output.wav" |
|
return audio_path, state["context"], state |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
import gradio as gr |
|
|
|
shortcut_js = """ |
|
<script> |
|
function shortcuts(e) { |
|
var event = document.all ? window.event : e; |
|
switch (e.target.tagName.toLowerCase()) { |
|
case "input": |
|
case "textarea": |
|
break; |
|
default: |
|
if (e.key.toLowerCase() == "r" && e.ctrlKey) { |
|
console.log("recording") |
|
document.getElementById("recorder").start_recording(); |
|
} |
|
if (e.key.toLowerCase() == "s" && e.ctrlKey) { |
|
console.log("stopping") |
|
document.getElementById("recorder").stop_recording(); |
|
} |
|
} |
|
} |
|
document.addEventListener('keypress', shortcuts, false); |
|
</script> |
|
""" |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
hour_options = [f"{i:02d}:00:00" for i in range(24)] |
|
|
|
model_answer = "" |
|
general_context = "" |
|
|
|
print(general_context) |
|
initial_state = {"context": general_context} |
|
initial_context = initial_state["context"] |
|
|
|
|
|
|
|
with gr.Blocks(theme=gr.themes.Default()) as demo: |
|
|
|
with gr.Row(): |
|
with gr.Column(scale=1, min_width=300): |
|
time_picker = gr.Dropdown( |
|
choices=hour_options, label="What time is it?", value="08:00:00" |
|
) |
|
history = gr.Radio( |
|
["Yes", "No"], label="Maintain the conversation history?", value="No" |
|
) |
|
voice_character = gr.Radio( |
|
choices=[ |
|
"Rick Sanches", |
|
"Eddie Murphy", |
|
"David Attenborough", |
|
"Morgan Freeman", |
|
], |
|
label="Choose a voice", |
|
value="Rick Sancher", |
|
show_label=True, |
|
) |
|
emotion = gr.Radio( |
|
choices=["Cheerful", "Grumpy"], |
|
label="Choose an emotion", |
|
value="Cheerful", |
|
show_label=True, |
|
) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
origin = gr.Textbox(value="Luxembourg Gare, Luxembourg", label="Origin", interactive=True) |
|
destination = gr.Textbox( |
|
value="Kirchberg Campus, Kirchberg", label="Destination", interactive=True) |
|
recorder = gr.Audio(type="filepath", label="input audio", elem_id="recorder") |
|
with gr.Column(scale=2, min_width=600): |
|
map_plot = gr.Plot() |
|
origin.submit(fn=calculate_route, outputs=map_plot) |
|
destination.submit(fn=calculate_route, outputs=map_plot) |
|
output_audio = gr.Audio(label="output audio") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
gr.close_all() |
|
|
|
|
|
demo.queue().launch( |
|
debug=True, server_name="0.0.0.0", server_port=7860, ssl_verify=False |
|
) |
|
|
|
|
|
|