| { | |
| "added_tokens_decoder": { | |
| "128000": { | |
| "content": "<|begin_of_text|>", | |
| "lstrip": false, | |
| "normalized": false, | |
| "rstrip": false, | |
| "single_word": false, | |
| "special": true | |
| }, | |
| "128001": { | |
| "content": "<|end_of_text|>", | |
| "lstrip": false, | |
| "normalized": false, | |
| "rstrip": false, | |
| "single_word": false, | |
| "special": true | |
| }, | |
| "128002": { | |
| "content": "<|im_start|>", | |
| "lstrip": false, | |
| "normalized": false, | |
| "rstrip": false, | |
| "single_word": false, | |
| "special": true | |
| }, | |
| "128003": { | |
| "content": "<|im_end|>", | |
| "lstrip": false, | |
| "normalized": false, | |
| "rstrip": false, | |
| "single_word": false, | |
| "special": true | |
| } | |
| }, | |
| "bos_token": "<|begin_of_text|>", | |
| "clean_up_tokenization_spaces": true, | |
| "eos_token": "<|end_of_text|>", | |
| "extra_special_tokens": {}, | |
| "model_input_names": [ | |
| "input_ids", | |
| "attention_mask" | |
| ], | |
| "model_max_length": 65536, | |
| "pad_token": "<|end_of_text|>", | |
| "tokenizer_class": "PreTrainedTokenizerFast" | |
| } | |