Venkatesh4342 commited on
Commit
03fb4b8
1 Parent(s): 3a7b8a9

Upload tokenizer

Browse files
special_tokens_map.json CHANGED
@@ -34,7 +34,6 @@
34
  "rstrip": false,
35
  "single_word": false
36
  },
37
- "pad_token": "<|endoftext|>",
38
  "unk_token": {
39
  "content": "<|endoftext|>",
40
  "lstrip": false,
 
34
  "rstrip": false,
35
  "single_word": false
36
  },
 
37
  "unk_token": {
38
  "content": "<|endoftext|>",
39
  "lstrip": false,
tokenizer.json CHANGED
@@ -1,11 +1,6 @@
1
  {
2
  "version": "1.0",
3
- "truncation": {
4
- "direction": "Right",
5
- "max_length": 512,
6
- "strategy": "LongestFirst",
7
- "stride": 0
8
- },
9
  "padding": null,
10
  "added_tokens": [
11
  {
 
1
  {
2
  "version": "1.0",
3
+ "truncation": null,
 
 
 
 
 
4
  "padding": null,
5
  "added_tokens": [
6
  {
tokenizer_config.json CHANGED
@@ -179,7 +179,6 @@
179
  "clean_up_tokenization_spaces": true,
180
  "eos_token": "<|endoftext|>",
181
  "model_max_length": 1000000000000000019884624838656,
182
- "pad_token": "<|endoftext|>",
183
  "tokenizer_class": "GPT2Tokenizer",
184
  "unk_token": "<|endoftext|>",
185
  "vocab_size": 49152
 
179
  "clean_up_tokenization_spaces": true,
180
  "eos_token": "<|endoftext|>",
181
  "model_max_length": 1000000000000000019884624838656,
 
182
  "tokenizer_class": "GPT2Tokenizer",
183
  "unk_token": "<|endoftext|>",
184
  "vocab_size": 49152