ColleenMacklin commited on
Commit
d548878
1 Parent(s): 424d732

Upload . with huggingface_hub

Browse files
Files changed (3) hide show
  1. config.json +0 -11
  2. pytorch_model.bin +1 -1
  3. tokenizer_config.json +34 -1
config.json CHANGED
@@ -50,17 +50,6 @@
50
  "torch_dtype": "float32",
51
  "transformers_version": "4.20.1",
52
  "use_cache": true,
53
- "task_specific_params": {
54
- "text-generation": {
55
- "do_sample": true,
56
- "temperature": 0.7,
57
- "max_length": 100,
58
- "top_p": 0.95,
59
- "top_k": 50,
60
- "wait_for_model": true,
61
- "num_return_sequences": 2
62
- }
63
- },
64
  "vocab_size": 50257,
65
  "window_size": 256
66
  }
 
50
  "torch_dtype": "float32",
51
  "transformers_version": "4.20.1",
52
  "use_cache": true,
 
 
 
 
 
 
 
 
 
 
 
53
  "vocab_size": 50257,
54
  "window_size": 256
55
  }
pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:acb85f795dca2043117036d9a18bc67dc6bf07ef01f7e9e594cf35312c768fc5
3
  size 551182545
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5c9277160b25c3958e8a20b9b98a98c67f4b98f4338f887d6ccb7dcaebdb8967
3
  size 551182545
tokenizer_config.json CHANGED
@@ -1 +1,34 @@
1
- {"bos_token": "<|startoftext|>", "eos_token": "<|endoftext|>", "pad_token": "<|pad|>", "model_max_length": 1024}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": false,
3
+ "add_prefix_space": false,
4
+ "bos_token": {
5
+ "__type": "AddedToken",
6
+ "content": "<|endoftext|>",
7
+ "lstrip": false,
8
+ "normalized": true,
9
+ "rstrip": false,
10
+ "single_word": false
11
+ },
12
+ "eos_token": {
13
+ "__type": "AddedToken",
14
+ "content": "<|endoftext|>",
15
+ "lstrip": false,
16
+ "normalized": true,
17
+ "rstrip": false,
18
+ "single_word": false
19
+ },
20
+ "errors": "replace",
21
+ "model_max_length": 2048,
22
+ "name_or_path": "EleutherAI/gpt-neo-125M",
23
+ "pad_token": null,
24
+ "special_tokens_map_file": null,
25
+ "tokenizer_class": "GPT2Tokenizer",
26
+ "unk_token": {
27
+ "__type": "AddedToken",
28
+ "content": "<|endoftext|>",
29
+ "lstrip": false,
30
+ "normalized": true,
31
+ "rstrip": false,
32
+ "single_word": false
33
+ }
34
+ }