File size: 2,025 Bytes
af54c89
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
import torch
import torchaudio
import torchaudio.transforms as transforms
from fairseq.models.wav2vec import Wav2VecModel
from fairseq.data.data_utils import post_process
from fairseq.tasks.audio import AudioPretrainingTask
from fairseq import checkpoint_utils
from examples.speech_to_text.data_utils import extract_fbank_features

def main(audio_path, checkpoint_path):
    # Load the audio file

    def extract_features(audio_path):
        waveform, sample_rate = sf.read(audio_path)
        features = extract_fbank_features(waveform, sample_rate)
        return features


    fbank_features = extract_features(audio_path).numpy()

    # Load the pre-trained model checkpoint
    model, cfg, task = checkpoint_utils.load_model_ensemble_and_task([checkpoint_path])
    model = model[0]
    model.eval()

    # Convert the fbank features to a torch tensor
    fbank_tensor = torch.from_numpy(fbank_features)

    # Apply normalization if necessary
    fbank_tensor = task.apply_input_transform(fbank_tensor)

    # Move the tensor to the same device as the model
    fbank_tensor = fbank_tensor.to(cfg.common.device)

    # Wrap the fbank tensor in a dictionary to match the fairseq batch format
    sample = {"net_input": {"source": fbank_tensor.unsqueeze(0)}}

    # Perform fairseq generation
    with torch.no_grad():
        hypos = task.inference_step(generator=model, models=[model], sample=sample)

    # Extract the predicted tokens from the top hypothesis
    hypo_tokens = hypos[0][0]["tokens"].int().cpu()

    # Convert tokens to string using the target dictionary and post-processing
    hypo_str = post_process(hypo_tokens, cfg.task.target_dictionary)

    return hypo_str


if __name__ == "__main__":
    audio_file_path = "/content/drive/MyDrive/en2hi/fairseq_mustc_single_inference/test.wav"
    checkpoint_path = "/content/drive/MyDrive/en2hi/fairseq_mustc_single_inference/st_avg_last_10_checkpoints.pt"
    prediction = main(audio_file_path, checkpoint_path)
    print("Predicted text:", prediction)