from tokenizers import Tokenizer, models, trainers, pre_tokenizers, normalizers from tokenizers.processors import TemplateProcessing import pandas as pd # Load the cleaned dataset data_path = "tokenizer.txt" df = pd.read_csv(data_path) # Extract text column texts = df['text'].dropna().tolist() # Initialize a Unigram tokenizer tokenizer = Tokenizer(models.Unigram()) # Set a normalizer (optional, but helpful for consistency) tokenizer.normalizer = normalizers.Sequence([ normalizers.NFKC(), # Normalize text to NFKC form normalizers.Lowercase() # Convert to lowercase ]) # Set a pre-tokenizer to split text into initial units tokenizer.pre_tokenizer = pre_tokenizers.Whitespace() # Define a trainer for the Unigram tokenizer trainer = trainers.UnigramTrainer( vocab_size=150000, # Vocabulary size; can adjust based on project size special_tokens=["", "", "", "", ""], unk_token="", # Specify unknown token ) # Train the tokenizer print("Training tokenizer with Unigram...") tokenizer.train_from_iterator(texts, trainer=trainer) print("Tokenizer training complete.") # Define post-processing for consistent format tokenizer.post_processor = TemplateProcessing( single=" $A ", pair=" $A $B ", special_tokens=[("", 0), ("", 1)] ) # Save the tokenizer to a directory output_dir = "balochi_unigram_tokenizer" tokenizer.save(f"{output_dir}/tokenizer.json") print(f"Unigram Tokenizer saved to {output_dir}")