asr_malayalam / char_tokenizer.py
aoxo's picture
Upload char_tokenizer.py with huggingface_hub
39e96bc verified
import os
import json
import librosa
from tokenizers import Tokenizer
from tokenizers.models import WordPiece
from tokenizers.pre_tokenizers import Whitespace
from tokenizers.processors import TemplateProcessing
from tokenizers.trainers import WordPieceTrainer
class MalayalamCharacterTokenizer:
def __init__(self, transcription_dir, wav_dir):
"""
Initialize character-level tokenizer with directories for transcriptions and audio files
:param transcription_dir: Path to folder containing text transcriptions
:param wav_dir: Path to folder containing WAV audio files
"""
self.transcription_dir = transcription_dir
self.wav_dir = wav_dir
# Define special tokens
self.special_tokens = [
"[PAD]",
"[UNK]",
"[CLS]",
"[SEP]",
"[MASK]"
]
# Initialize text tokenizer
self.text_tokenizer, self.trainer = self._create_character_tokenizer()
# Audio tokenization parameters
self.audio_tokenizer = {
"sample_rate": 16000, # Standard for speech models
"n_mfcc": 13, # Number of MFCCs to extract
"n_fft": 2048, # FFT window size
"hop_length": 512 # Hop length between frames
}
def _create_character_tokenizer(self):
"""
Create a character-level tokenizer for Malayalam text
"""
# Initialize tokenizer with WordPiece model (we'll treat each character as a token)
tokenizer = Tokenizer(WordPiece(unk_token="[UNK]"))
# Use whitespace as pre-tokenizer
tokenizer.pre_tokenizer = Whitespace()
# Create trainer for character-level tokenization
trainer = WordPieceTrainer(
vocab_size=10000, # Large enough to capture all characters
special_tokens=self.special_tokens,
continuing_subword_prefix='##', # This won't be used for character-level, but required by WordPiece
show_progress=True
)
# Prepare special tokens with IDs for post-processing
special_tokens_dict = {
token: tokenizer.token_to_id(token) if tokenizer.token_to_id(token) is not None
else len(tokenizer.get_vocab()) + list(self.special_tokens).index(token)
for token in ["[CLS]", "[SEP]"]
}
# Add special token processing
tokenizer.post_processor = TemplateProcessing(
single="[CLS] $A [SEP]",
pair="[CLS] $A [SEP] $B:1 [SEP]:1",
special_tokens=[
("[CLS]", special_tokens_dict["[CLS]"]),
("[SEP]", special_tokens_dict["[SEP]"])
]
)
return tokenizer, trainer
def _get_matched_files(self):
"""
Find matching transcription and audio files
:return: List of tuples (transcription_path, audio_path)
"""
matched_files = []
# Get all transcription files
for trans_file in os.listdir(self.transcription_dir):
# Remove extension to match with audio file
base_name = os.path.splitext(trans_file)[0]
# Check for corresponding WAV file
wav_path = os.path.join(self.wav_dir, base_name + '.wav')
trans_path = os.path.join(self.transcription_dir, trans_file)
if os.path.exists(wav_path):
matched_files.append((trans_path, wav_path))
return matched_files
def train_character_tokenizer(self):
"""
Train character-level tokenizer on all transcription files
:return: Trained tokenizer
"""
# Collect all transcriptions
transcriptions = []
for trans_path, _ in self._get_matched_files():
with open(trans_path, 'r', encoding='utf-8') as f:
transcriptions.append(f.read().strip())
# Train the tokenizer on transcriptions
# This will effectively create a character-level vocabulary
self.text_tokenizer.train_from_iterator(transcriptions, self.trainer)
return self.text_tokenizer
def process_dataset(self, tokenizer):
"""
Process entire dataset, tokenizing text and extracting audio features
:param tokenizer: Trained tokenizer
:return: Processed dataset with tokenized text and audio features
"""
dataset = []
matched_files = self._get_matched_files()
for trans_path, wav_path in matched_files:
# Read transcription
with open(trans_path, 'r', encoding='utf-8') as f:
transcription = f.read().strip()
# Tokenize text (character-level)
text_tokens = tokenizer.encode(transcription).ids
# Extract audio features
audio_features = self._extract_audio_features(wav_path)
dataset.append({
'transcription': transcription,
'text_tokens': text_tokens,
'audio_features': audio_features,
'audio_path': wav_path,
'transcription_path': trans_path
})
return dataset
def _extract_audio_features(self, audio_path):
"""
Extract MFCC features from audio file
:param audio_path: Path to WAV file
:return: Extracted audio features
"""
# Load audio file
audio, sr = librosa.load(
audio_path,
sr=self.audio_tokenizer['sample_rate']
)
# Extract MFCCs
mfccs = librosa.feature.mfcc(
y=audio,
sr=sr,
n_mfcc=self.audio_tokenizer['n_mfcc'],
n_fft=self.audio_tokenizer['n_fft'],
hop_length=self.audio_tokenizer['hop_length']
)
return mfccs.T.tolist()
def save_dataset(self, output_path, tokenizer):
"""
Save processed dataset to JSON
:param output_path: Path to save processed dataset
:param tokenizer: Trained tokenizer
"""
dataset = self.process_dataset(tokenizer)
with open(output_path, 'w', encoding='utf-8') as f:
json.dump(dataset, f, ensure_ascii=False, indent=2)
print(f"Saved dataset to {output_path}")
def save_tokenizer(self, output_dir, tokenizer):
"""
Save tokenizer configurations
:param output_dir: Directory to save tokenizer files
:param tokenizer: Trained tokenizer
"""
os.makedirs(output_dir, exist_ok=True)
# Save text tokenizer vocabulary and configuration
tokenizer.save(os.path.join(output_dir, 'malayalam_character_tokenizer.json'))
# Save audio tokenizer configuration
with open(os.path.join(output_dir, 'audio_tokenizer.json'), 'w') as f:
json.dump(self.audio_tokenizer, f, indent=2)
# Example usage
if __name__ == "__main__":
# Initialize character-level tokenizer
tokenizer_manager = MalayalamCharacterTokenizer(
transcription_dir='transcription',
wav_dir='wav'
)
# Train character tokenizer
trained_tokenizer = tokenizer_manager.train_character_tokenizer()
# Save dataset
#tokenizer_manager.save_dataset(
# 'malayalam_character_dataset.json',
# trained_tokenizer
#)
# Save tokenizer configurations
tokenizer_manager.save_tokenizer(
'malayalam_character_tokenizer',
trained_tokenizer
)