File size: 3,361 Bytes
85d3b29
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
import os
import sys
import tqdm
import torch
import torch.nn.functional as F
import fairseq
import soundfile as sf
import numpy as np


device = sys.argv[1]
n_parts = int(sys.argv[2])
i_part = int(sys.argv[3])

if len(sys.argv) == 7:
    exp_dir, version, is_half = sys.argv[4], sys.argv[5], bool(sys.argv[6])
else:
    i_gpu, exp_dir = sys.argv[4], sys.argv[5]
    os.environ["CUDA_VISIBLE_DEVICES"] = str(i_gpu)
    version, is_half = sys.argv[6], bool(sys.argv[7])


def forward_dml(ctx, x, scale):
    ctx.scale = scale
    res = x.clone().detach()
    return res


fairseq.modules.grad_multiply.GradMultiply.forward = forward_dml

model_path = "hubert_base.pt"

wav_path = f"{exp_dir}/1_16k_wavs"
out_path = f"{exp_dir}/3_feature256" if version == "v1" else f"{exp_dir}/3_feature768"
os.makedirs(out_path, exist_ok=True)


def read_wave(wav_path, normalize=False):
    wav, sr = sf.read(wav_path)
    assert sr == 16000
    feats = torch.from_numpy(wav)
    feats = feats.half() if is_half else feats.float()
    feats = feats.mean(-1) if feats.dim() == 2 else feats
    feats = feats.view(1, -1)
    if normalize:
        with torch.no_grad():
            feats = F.layer_norm(feats, feats.shape)
    return feats


print("Starting feature extraction...")
models, saved_cfg, task = fairseq.checkpoint_utils.load_model_ensemble_and_task(
    [model_path],
    suffix="",
)
model = models[0]
model = model.to(device)
if device not in ["mps", "cpu"]:
    model = model.half()
model.eval()

todo = sorted(os.listdir(wav_path))[i_part::n_parts]
n = max(1, len(todo) // 10)

if len(todo) == 0:
    print(
        "An error occurred in the feature extraction, make sure you have provided the audios correctly."
    )
else:
    print(f"{len(todo)}")
    with tqdm.tqdm(total=len(todo)) as pbar:
        for idx, file in enumerate(todo):
            try:
                if file.endswith(".wav"):
                    wav_file_path = os.path.join(wav_path, file)
                    out_file_path = os.path.join(out_path, file.replace("wav", "npy"))

                    if os.path.exists(out_file_path):
                        continue

                    feats = read_wave(wav_file_path, normalize=saved_cfg.task.normalize)
                    padding_mask = torch.BoolTensor(feats.shape).fill_(False)
                    inputs = {
                        "source": feats.to(device),
                        "padding_mask": padding_mask.to(device),
                        "output_layer": 9 if version == "v1" else 12,
                    }
                    with torch.no_grad():
                        logits = model.extract_features(**inputs)
                        feats = (
                            model.final_proj(logits[0])
                            if version == "v1"
                            else logits[0]
                        )

                    feats = feats.squeeze(0).float().cpu().numpy()
                    if np.isnan(feats).sum() == 0:
                        np.save(out_file_path, feats, allow_pickle=False)
                    else:
                        print(f"{file} - contains nan")
                    pbar.set_description(f"Processing {file} {feats.shape}")
            except Exception as error:
                print(error)
            pbar.update(1)

    print("Feature extraction completed successfully!")