Salman95s's picture
Upload 27 files
cfc20e5 verified
raw
history blame
1.55 kB
from tokenizers import Tokenizer, models, trainers, pre_tokenizers, normalizers
from tokenizers.processors import TemplateProcessing
import pandas as pd
# Load the cleaned dataset
data_path = "tokenizer.txt"
df = pd.read_csv(data_path)
# Extract text column
texts = df['text'].dropna().tolist()
# Initialize a Unigram tokenizer
tokenizer = Tokenizer(models.Unigram())
# Set a normalizer (optional, but helpful for consistency)
tokenizer.normalizer = normalizers.Sequence([
normalizers.NFKC(), # Normalize text to NFKC form
normalizers.Lowercase() # Convert to lowercase
])
# Set a pre-tokenizer to split text into initial units
tokenizer.pre_tokenizer = pre_tokenizers.Whitespace()
# Define a trainer for the Unigram tokenizer
trainer = trainers.UnigramTrainer(
vocab_size=150000, # Vocabulary size; can adjust based on project size
special_tokens=["<pad>", "<s>", "</s>", "<unk>", "<mask>"],
unk_token="<unk>", # Specify unknown token
)
# Train the tokenizer
print("Training tokenizer with Unigram...")
tokenizer.train_from_iterator(texts, trainer=trainer)
print("Tokenizer training complete.")
# Define post-processing for consistent format
tokenizer.post_processor = TemplateProcessing(
single="<s> $A </s>",
pair="<s> $A </s> </s> $B </s>",
special_tokens=[("<s>", 0), ("</s>", 1)]
)
# Save the tokenizer to a directory
output_dir = "balochi_unigram_tokenizer"
tokenizer.save(f"{output_dir}/tokenizer.json")
print(f"Unigram Tokenizer saved to {output_dir}")