Salman95s's picture
Upload 27 files
cfc20e5 verified
raw
history blame
1.35 kB
import random
from tokenizers import Tokenizer
# Load the tokenizer
TOKENIZER_PATH = 'tokenizer.json'
tokenizer = Tokenizer.from_file(TOKENIZER_PATH)
# Dataset file path
DATASET_PATH = 'tokenizer.txt' # Update with your dataset file path
# Number of random samples to test
NUM_SAMPLES = 100
def load_dataset(file_path):
"""Loads the dataset file and returns a list of lines."""
with open(file_path, 'r', encoding='utf-8') as file:
lines = file.readlines()
return [line.strip() for line in lines if line.strip()]
def test_tokenizer(dataset, num_samples=100):
"""Selects random samples, tokenizes them, and prints results."""
print(f"Testing tokenizer with {num_samples} random samples...\n")
random_samples = random.sample(dataset, min(num_samples, len(dataset)))
for i, sample in enumerate(random_samples, 1):
encoded = tokenizer.encode(sample)
print(f"Sample {i}:")
print(f"Original: {sample}")
print(f"Encoded: {encoded.ids}")
print(f"Decoded: {tokenizer.decode(encoded.ids)}")
print(f"Language ID: 0") #Balochi language ID is 0
print("-" * 40)
if __name__ == "__main__":
# Load dataset
dataset = load_dataset(DATASET_PATH)
# Test tokenizer
test_tokenizer(dataset, NUM_SAMPLES)