File size: 2,251 Bytes
de44379
 
 
0a494dc
77c34b5
de44379
77c34b5
de44379
 
77c34b5
 
0a494dc
 
de44379
 
 
 
 
 
 
 
 
 
 
 
5bff590
 
de44379
 
 
77c34b5
de44379
 
77c34b5
de44379
 
 
 
 
 
77c34b5
de44379
 
 
 
77c34b5
de44379
 
 
 
 
 
77c34b5
de44379
 
77c34b5
de44379
 
77c34b5
de44379
 
3b01ecb
de44379
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
import gradio as gr
import torch
import numpy as np
from transformers import Wav2Vec2Processor, Wav2Vec2ForSequenceClassification
from safetensors.torch import load_file

# Carregar o modelo e o processador salvos
model_name = "results"
processor = Wav2Vec2Processor.from_pretrained(model_name)

# Carregar o modelo do arquivo safetensors
state_dict = load_file("results/model.safetensors")
model = Wav2Vec2ForSequenceClassification.from_pretrained(model_name, state_dict=state_dict)

def classify_accent(audio):
    if audio is None:
        return "Error: No se recibió audio"

    # Entrada
    print(f"Tipo de entrada de audio: {type(audio)}")

    # O áudio formato
    print(f"Entrada de audio recibida: {audio}")

    try:
        audio_array = audio[0]  # O áudio da tupla
        sample_rate = audio[1]  # A taxa de amostragem da tupla

        print(f"Forma del audio: {audio_array.shape}, Frecuencia de muestreo: {sample_rate}")

        # Converter o áudio para float32
        audio_array = audio_array.astype(np.float32)

        # Resample para 16kHz, se necessário
        if sample_rate != 16000:
            import librosa
            audio_array = librosa.resample(audio_array, orig_sr=sample_rate, target_sr=16000)

        input_values = processor(audio_array, return_tensors="pt", sampling_rate=16000).input_values

        # Inferência
        with torch.no_grad():
            logits = model(input_values).logits
        predicted_ids = torch.argmax(logits, dim=-1).item()

        # IDs de sotaque
        labels = ["Español", "Otro"]
        return labels[predicted_ids]

    except Exception as e:
        return f"Error al procesar el audio: {str(e)}"

# Interface do Gradio
description_html = """
<p>Prueba con grabación o cargando un archivo de audio. Para probar, recomiendo una palabra.</p>
<p>Ramon Mayor Martins: <a href="https://rmayormartins.github.io/" target="_blank">Website</a> | <a href="https://huggingface.co/rmayormartins" target="_blank">Spaces</a></p>
"""

# Interface do Gradio
interface = gr.Interface(
    fn=classify_accent,
    inputs=gr.Audio(type="numpy"),
    outputs="label",
    title="Clasificador de Sotaques (Español vs Otro)",
    description=description_html
)

interface.launch()