File size: 1,553 Bytes
cfc20e5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
from tokenizers import Tokenizer, models, trainers, pre_tokenizers, normalizers
from tokenizers.processors import TemplateProcessing
import pandas as pd

# Load the cleaned dataset
data_path = "tokenizer.txt"
df = pd.read_csv(data_path)

# Extract text column
texts = df['text'].dropna().tolist()

# Initialize a Unigram tokenizer
tokenizer = Tokenizer(models.Unigram())

# Set a normalizer (optional, but helpful for consistency)
tokenizer.normalizer = normalizers.Sequence([
    normalizers.NFKC(),  # Normalize text to NFKC form
    normalizers.Lowercase()  # Convert to lowercase
])

# Set a pre-tokenizer to split text into initial units
tokenizer.pre_tokenizer = pre_tokenizers.Whitespace()

# Define a trainer for the Unigram tokenizer
trainer = trainers.UnigramTrainer(
    vocab_size=150000,  # Vocabulary size; can adjust based on project size
    special_tokens=["<pad>", "<s>", "</s>", "<unk>", "<mask>"],
    unk_token="<unk>",  # Specify unknown token
)

# Train the tokenizer
print("Training tokenizer with Unigram...")
tokenizer.train_from_iterator(texts, trainer=trainer)
print("Tokenizer training complete.")

# Define post-processing for consistent format
tokenizer.post_processor = TemplateProcessing(
    single="<s> $A </s>",
    pair="<s> $A </s> </s> $B </s>",
    special_tokens=[("<s>", 0), ("</s>", 1)]
)

# Save the tokenizer to a directory
output_dir = "balochi_unigram_tokenizer"
tokenizer.save(f"{output_dir}/tokenizer.json")
print(f"Unigram Tokenizer saved to {output_dir}")