luke-japanese-wordpiece-base / tokenizer_config.json
singletongue's picture
Add model files
4117b7f
raw
history blame
941 Bytes
{
"auto_map": {
"AutoTokenizer": [
"tokenization_luke_bert_japanese.LukeBertJapaneseTokenizer",
null
]
},
"clean_up_tokenization_spaces": true,
"cls_token": "[CLS]",
"do_lower_case": false,
"do_subword_tokenize": true,
"do_word_tokenize": true,
"entity_mask2_token": "[MASK2]",
"entity_mask_token": "[MASK]",
"entity_pad_token": "[PAD]",
"entity_token_1": "<ent>",
"entity_token_2": "<ent2>",
"entity_unk_token": "[UNK]",
"jumanpp_kwargs": null,
"mask_token": "[MASK]",
"max_entity_length": 32,
"max_mention_length": 30,
"mecab_kwargs": {
"mecab_dic": "unidic_lite"
},
"model_max_length": 512,
"never_split": null,
"pad_token": "[PAD]",
"sep_token": "[SEP]",
"spm_file": null,
"subword_tokenizer_type": "wordpiece",
"sudachi_kwargs": null,
"task": null,
"tokenizer_class": "LukeBertJapaneseTokenizer",
"unk_token": "[UNK]",
"word_tokenizer_type": "mecab"
}