File size: 7,807 Bytes
39e96bc |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 |
import os
import json
import librosa
from tokenizers import Tokenizer
from tokenizers.models import WordPiece
from tokenizers.pre_tokenizers import Whitespace
from tokenizers.processors import TemplateProcessing
from tokenizers.trainers import WordPieceTrainer
class MalayalamCharacterTokenizer:
def __init__(self, transcription_dir, wav_dir):
"""
Initialize character-level tokenizer with directories for transcriptions and audio files
:param transcription_dir: Path to folder containing text transcriptions
:param wav_dir: Path to folder containing WAV audio files
"""
self.transcription_dir = transcription_dir
self.wav_dir = wav_dir
# Define special tokens
self.special_tokens = [
"[PAD]",
"[UNK]",
"[CLS]",
"[SEP]",
"[MASK]"
]
# Initialize text tokenizer
self.text_tokenizer, self.trainer = self._create_character_tokenizer()
# Audio tokenization parameters
self.audio_tokenizer = {
"sample_rate": 16000, # Standard for speech models
"n_mfcc": 13, # Number of MFCCs to extract
"n_fft": 2048, # FFT window size
"hop_length": 512 # Hop length between frames
}
def _create_character_tokenizer(self):
"""
Create a character-level tokenizer for Malayalam text
"""
# Initialize tokenizer with WordPiece model (we'll treat each character as a token)
tokenizer = Tokenizer(WordPiece(unk_token="[UNK]"))
# Use whitespace as pre-tokenizer
tokenizer.pre_tokenizer = Whitespace()
# Create trainer for character-level tokenization
trainer = WordPieceTrainer(
vocab_size=10000, # Large enough to capture all characters
special_tokens=self.special_tokens,
continuing_subword_prefix='##', # This won't be used for character-level, but required by WordPiece
show_progress=True
)
# Prepare special tokens with IDs for post-processing
special_tokens_dict = {
token: tokenizer.token_to_id(token) if tokenizer.token_to_id(token) is not None
else len(tokenizer.get_vocab()) + list(self.special_tokens).index(token)
for token in ["[CLS]", "[SEP]"]
}
# Add special token processing
tokenizer.post_processor = TemplateProcessing(
single="[CLS] $A [SEP]",
pair="[CLS] $A [SEP] $B:1 [SEP]:1",
special_tokens=[
("[CLS]", special_tokens_dict["[CLS]"]),
("[SEP]", special_tokens_dict["[SEP]"])
]
)
return tokenizer, trainer
def _get_matched_files(self):
"""
Find matching transcription and audio files
:return: List of tuples (transcription_path, audio_path)
"""
matched_files = []
# Get all transcription files
for trans_file in os.listdir(self.transcription_dir):
# Remove extension to match with audio file
base_name = os.path.splitext(trans_file)[0]
# Check for corresponding WAV file
wav_path = os.path.join(self.wav_dir, base_name + '.wav')
trans_path = os.path.join(self.transcription_dir, trans_file)
if os.path.exists(wav_path):
matched_files.append((trans_path, wav_path))
return matched_files
def train_character_tokenizer(self):
"""
Train character-level tokenizer on all transcription files
:return: Trained tokenizer
"""
# Collect all transcriptions
transcriptions = []
for trans_path, _ in self._get_matched_files():
with open(trans_path, 'r', encoding='utf-8') as f:
transcriptions.append(f.read().strip())
# Train the tokenizer on transcriptions
# This will effectively create a character-level vocabulary
self.text_tokenizer.train_from_iterator(transcriptions, self.trainer)
return self.text_tokenizer
def process_dataset(self, tokenizer):
"""
Process entire dataset, tokenizing text and extracting audio features
:param tokenizer: Trained tokenizer
:return: Processed dataset with tokenized text and audio features
"""
dataset = []
matched_files = self._get_matched_files()
for trans_path, wav_path in matched_files:
# Read transcription
with open(trans_path, 'r', encoding='utf-8') as f:
transcription = f.read().strip()
# Tokenize text (character-level)
text_tokens = tokenizer.encode(transcription).ids
# Extract audio features
audio_features = self._extract_audio_features(wav_path)
dataset.append({
'transcription': transcription,
'text_tokens': text_tokens,
'audio_features': audio_features,
'audio_path': wav_path,
'transcription_path': trans_path
})
return dataset
def _extract_audio_features(self, audio_path):
"""
Extract MFCC features from audio file
:param audio_path: Path to WAV file
:return: Extracted audio features
"""
# Load audio file
audio, sr = librosa.load(
audio_path,
sr=self.audio_tokenizer['sample_rate']
)
# Extract MFCCs
mfccs = librosa.feature.mfcc(
y=audio,
sr=sr,
n_mfcc=self.audio_tokenizer['n_mfcc'],
n_fft=self.audio_tokenizer['n_fft'],
hop_length=self.audio_tokenizer['hop_length']
)
return mfccs.T.tolist()
def save_dataset(self, output_path, tokenizer):
"""
Save processed dataset to JSON
:param output_path: Path to save processed dataset
:param tokenizer: Trained tokenizer
"""
dataset = self.process_dataset(tokenizer)
with open(output_path, 'w', encoding='utf-8') as f:
json.dump(dataset, f, ensure_ascii=False, indent=2)
print(f"Saved dataset to {output_path}")
def save_tokenizer(self, output_dir, tokenizer):
"""
Save tokenizer configurations
:param output_dir: Directory to save tokenizer files
:param tokenizer: Trained tokenizer
"""
os.makedirs(output_dir, exist_ok=True)
# Save text tokenizer vocabulary and configuration
tokenizer.save(os.path.join(output_dir, 'malayalam_character_tokenizer.json'))
# Save audio tokenizer configuration
with open(os.path.join(output_dir, 'audio_tokenizer.json'), 'w') as f:
json.dump(self.audio_tokenizer, f, indent=2)
# Example usage
if __name__ == "__main__":
# Initialize character-level tokenizer
tokenizer_manager = MalayalamCharacterTokenizer(
transcription_dir='transcription',
wav_dir='wav'
)
# Train character tokenizer
trained_tokenizer = tokenizer_manager.train_character_tokenizer()
# Save dataset
#tokenizer_manager.save_dataset(
# 'malayalam_character_dataset.json',
# trained_tokenizer
#)
# Save tokenizer configurations
tokenizer_manager.save_tokenizer(
'malayalam_character_tokenizer',
trained_tokenizer
)
|