|
from tokenizers import Tokenizer, models, trainers, pre_tokenizers, normalizers
|
|
from tokenizers.processors import TemplateProcessing
|
|
import pandas as pd
|
|
|
|
|
|
data_path = "tokenizer.txt"
|
|
df = pd.read_csv(data_path)
|
|
|
|
|
|
texts = df['text'].dropna().tolist()
|
|
|
|
|
|
tokenizer = Tokenizer(models.Unigram())
|
|
|
|
|
|
tokenizer.normalizer = normalizers.Sequence([
|
|
normalizers.NFKC(),
|
|
normalizers.Lowercase()
|
|
])
|
|
|
|
|
|
tokenizer.pre_tokenizer = pre_tokenizers.Whitespace()
|
|
|
|
|
|
trainer = trainers.UnigramTrainer(
|
|
vocab_size=150000,
|
|
special_tokens=["<pad>", "<s>", "</s>", "<unk>", "<mask>"],
|
|
unk_token="<unk>",
|
|
)
|
|
|
|
|
|
print("Training tokenizer with Unigram...")
|
|
tokenizer.train_from_iterator(texts, trainer=trainer)
|
|
print("Tokenizer training complete.")
|
|
|
|
|
|
tokenizer.post_processor = TemplateProcessing(
|
|
single="<s> $A </s>",
|
|
pair="<s> $A </s> </s> $B </s>",
|
|
special_tokens=[("<s>", 0), ("</s>", 1)]
|
|
)
|
|
|
|
|
|
output_dir = "balochi_unigram_tokenizer"
|
|
tokenizer.save(f"{output_dir}/tokenizer.json")
|
|
print(f"Unigram Tokenizer saved to {output_dir}")
|
|
|