File size: 1,944 Bytes
de44379 a3414e2 de44379 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 |
import gradio as gr
import torch
import numpy as np
from transformers import Wav2Vec2Processor, Wav2Vec2ForSequenceClassification
#
model_name = "results"
processor = Wav2Vec2Processor.from_pretrained(model_name)
model = Wav2Vec2ForSequenceClassification.from_pretrained(model_name, from_tf=False, from_flax=False, from_safetensors=True)
def classify_accent(audio):
if audio is None:
return "Error: No se recibió audio"
# Entrada
print(f"Tipo de entrada de audio: {type(audio)}")
# O áudio formato
print(f"Entrada de audio recibida: {audio}")
try:
audio_array = audio[1] #
sample_rate = audio[0] #
print(f"Forma del audio: {audio_array.shape}, Frecuencia de muestreo: {sample_rate}")
#
audio_array = audio_array.astype(np.float32)
#
if sample_rate != 16000:
import librosa
audio_array = librosa.resample(audio_array, orig_sr=sample_rate, target_sr=16000)
input_values = processor(audio_array, return_tensors="pt", sampling_rate=16000).input_values
# Infer
with torch.no_grad():
logits = model(input_values).logits
predicted_ids = torch.argmax(logits, dim=-1).item()
#
labels = ["Español", "Otro"]
return labels[predicted_ids]
except Exception as e:
return f"Error al procesar el audio: {str(e)}"
#
description_html = """
<p>Prueba con grabación o cargando un archivo de audio. Para probar, recomiendo una palabra.</p>
<p>Ramon Mayor Martins, Ph.D.: <a href="https://rmayormartins.github.io/" target="_blank">Website</a> | <a href="https://huggingface.co/rmayormartins" target="_blank">Spaces</a></p>
"""
#
interface = gr.Interface(
fn=classify_accent,
inputs=gr.Audio(type="numpy"),
outputs="label",
title="Clasificador de Sotaques (Español vs Otro)",
description=description_html
)
interface.launch()
|