electra-base-japanese-discriminator / tokenizer_config.json
r-terada's picture
Upload tokenizer_config.json
fb1ba61
raw
history blame contribute delete
421 Bytes
{
"tokenizer_class": "ElectraSudachipyTokenizer",
"do_lower_case": false,
"do_word_tokenize": true,
"do_subword_tokenize": true,
"word_tokenizer_type": "sudachipy",
"word_form_type": "dictionary_and_surface",
"subword_tokenizer_type": "wordpiece",
"model_max_length": 512,
"sudachipy_kwargs": {"split_mode":"A","dict_type":"core"},
"auto_map": {"AutoTokenizer": ["modeling.ElectraSudachipyTokenizer", null]}
}