Datasets:
File size: 1,798 Bytes
5dc576d |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 |
from tokenizers.decoders import WordPiece as WordPieceDecoder
from tokenizers.pre_tokenizers import BertPreTokenizer
from tokenizers.normalizers import BertNormalizer
from tokenizers.trainers import WordPieceTrainer
from tokenizers.models import WordPiece as WordPieceModel
from tokenizers import Tokenizer
import itertools
from datasets import load_dataset
from datasets.utils.logging import set_verbosity_error
set_verbosity_error()
from utils import SampleBatch
def unpack_samples(
batch: SampleBatch
):
iterator = (
sample.values()
for sample in batch['translation']
)
return list(
itertools.chain
.from_iterable(iterator)
)
def build_tokenizer(
clean_text: bool = True,
strip_accents: bool = True,
lowercase: bool = True
) -> Tokenizer:
tokenizer = Tokenizer(
model=WordPieceModel(
unk_token='<UNK>'
)
)
tokenizer.normalizer = BertNormalizer(
clean_text=clean_text,
handle_chinese_chars=True,
strip_accents=strip_accents,
lowercase=lowercase
)
tokenizer.pre_tokenizer = BertPreTokenizer()
tokenizer.decoder = WordPieceDecoder()
return tokenizer
train_dset = load_dataset(
path='nordmann2023',
name='balanced',
split='train'
)
tokenizer = build_tokenizer(
clean_text=True,
strip_accents=False,
lowercase=False
)
tokenizer.train_from_iterator(
iterator=(
unpack_samples(batch)
for batch in train_dset.iter(
batch_size=10000
)
),
trainer=WordPieceTrainer(
vocab_size=40000,
special_tokens=[
'<UNK>', '<CLS>', '<SEP>', '<PAD>', '<MASK>'
]
),
length=train_dset.num_rows * 2
)
tokenizer.save(
path='tokenizer.json'
)
|