File size: 1,345 Bytes
cfc20e5 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 |
import random
from tokenizers import Tokenizer
# Load the tokenizer
TOKENIZER_PATH = 'tokenizer.json'
tokenizer = Tokenizer.from_file(TOKENIZER_PATH)
# Dataset file path
DATASET_PATH = 'tokenizer.txt' # Update with your dataset file path
# Number of random samples to test
NUM_SAMPLES = 100
def load_dataset(file_path):
"""Loads the dataset file and returns a list of lines."""
with open(file_path, 'r', encoding='utf-8') as file:
lines = file.readlines()
return [line.strip() for line in lines if line.strip()]
def test_tokenizer(dataset, num_samples=100):
"""Selects random samples, tokenizes them, and prints results."""
print(f"Testing tokenizer with {num_samples} random samples...\n")
random_samples = random.sample(dataset, min(num_samples, len(dataset)))
for i, sample in enumerate(random_samples, 1):
encoded = tokenizer.encode(sample)
print(f"Sample {i}:")
print(f"Original: {sample}")
print(f"Encoded: {encoded.ids}")
print(f"Decoded: {tokenizer.decode(encoded.ids)}")
print(f"Language ID: 0") #Balochi language ID is 0
print("-" * 40)
if __name__ == "__main__":
# Load dataset
dataset = load_dataset(DATASET_PATH)
# Test tokenizer
test_tokenizer(dataset, NUM_SAMPLES)
|