|
import random
|
|
from tokenizers import Tokenizer
|
|
|
|
|
|
TOKENIZER_PATH = 'tokenizer.json'
|
|
tokenizer = Tokenizer.from_file(TOKENIZER_PATH)
|
|
|
|
|
|
DATASET_PATH = 'tokenizer.txt'
|
|
|
|
|
|
NUM_SAMPLES = 100
|
|
|
|
def load_dataset(file_path):
|
|
"""Loads the dataset file and returns a list of lines."""
|
|
with open(file_path, 'r', encoding='utf-8') as file:
|
|
lines = file.readlines()
|
|
return [line.strip() for line in lines if line.strip()]
|
|
|
|
def test_tokenizer(dataset, num_samples=100):
|
|
"""Selects random samples, tokenizes them, and prints results."""
|
|
print(f"Testing tokenizer with {num_samples} random samples...\n")
|
|
random_samples = random.sample(dataset, min(num_samples, len(dataset)))
|
|
|
|
for i, sample in enumerate(random_samples, 1):
|
|
encoded = tokenizer.encode(sample)
|
|
print(f"Sample {i}:")
|
|
print(f"Original: {sample}")
|
|
print(f"Encoded: {encoded.ids}")
|
|
print(f"Decoded: {tokenizer.decode(encoded.ids)}")
|
|
print(f"Language ID: 0")
|
|
print("-" * 40)
|
|
|
|
if __name__ == "__main__":
|
|
|
|
dataset = load_dataset(DATASET_PATH)
|
|
|
|
|
|
test_tokenizer(dataset, NUM_SAMPLES)
|
|
|