DAVID TORRES commited on
Commit
12e0d1c
1 Parent(s): b868d1f

End of training

Browse files
README.md CHANGED
@@ -1,6 +1,6 @@
1
  ---
2
- license: mit
3
- base_model: gpt2
4
  tags:
5
  - generated_from_trainer
6
  model-index:
@@ -13,7 +13,7 @@ should probably proofread and complete it, then remove this comment. -->
13
 
14
  # gpt2-test
15
 
16
- This model is a fine-tuned version of [gpt2](https://huggingface.co/gpt2) on an unknown dataset.
17
 
18
  ## Model description
19
 
@@ -40,7 +40,7 @@ The following hyperparameters were used during training:
40
  - total_train_batch_size: 64
41
  - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
42
  - lr_scheduler_type: cosine
43
- - num_epochs: 1000
44
 
45
  ### Training results
46
 
 
1
  ---
2
+ license: other
3
+ base_model: facebook/opt-350m
4
  tags:
5
  - generated_from_trainer
6
  model-index:
 
13
 
14
  # gpt2-test
15
 
16
+ This model is a fine-tuned version of [facebook/opt-350m](https://huggingface.co/facebook/opt-350m) on an unknown dataset.
17
 
18
  ## Model description
19
 
 
40
  - total_train_batch_size: 64
41
  - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
42
  - lr_scheduler_type: cosine
43
+ - num_epochs: 300
44
 
45
  ### Training results
46
 
added_tokens.json CHANGED
@@ -1,3 +1,4 @@
1
  {
2
- "<|endoftext|>": 50256
 
3
  }
 
1
  {
2
+ "</s>": 2,
3
+ "<pad>": 1
4
  }
config.json CHANGED
@@ -1,39 +1,31 @@
1
  {
2
- "_name_or_path": "gpt2",
3
- "activation_function": "gelu_new",
 
 
4
  "architectures": [
5
- "GPT2LMHeadModel"
6
  ],
7
- "attn_pdrop": 0.1,
8
- "bos_token_id": 50256,
9
- "embd_pdrop": 0.1,
10
- "eos_token_id": 50256,
11
- "initializer_range": 0.02,
12
- "layer_norm_epsilon": 1e-05,
13
- "model_type": "gpt2",
14
- "n_ctx": 512,
15
- "n_embd": 768,
16
- "n_head": 12,
17
- "n_inner": null,
18
- "n_layer": 12,
19
- "n_positions": 1024,
20
- "reorder_and_upcast_attn": false,
21
- "resid_pdrop": 0.1,
22
- "scale_attn_by_inverse_layer_idx": false,
23
- "scale_attn_weights": true,
24
- "summary_activation": null,
25
- "summary_first_dropout": 0.1,
26
- "summary_proj_to_labels": true,
27
- "summary_type": "cls_index",
28
- "summary_use_proj": true,
29
- "task_specific_params": {
30
- "text-generation": {
31
- "do_sample": true,
32
- "max_length": 50
33
- }
34
- },
35
  "torch_dtype": "float32",
36
  "transformers_version": "4.34.0",
37
  "use_cache": true,
38
- "vocab_size": 50257
 
39
  }
 
1
  {
2
+ "_name_or_path": "facebook/opt-350m",
3
+ "_remove_final_layer_norm": false,
4
+ "activation_dropout": 0.0,
5
+ "activation_function": "relu",
6
  "architectures": [
7
+ "OPTForCausalLM"
8
  ],
9
+ "attention_dropout": 0.0,
10
+ "bos_token_id": 2,
11
+ "do_layer_norm_before": false,
12
+ "dropout": 0.1,
13
+ "enable_bias": true,
14
+ "eos_token_id": 2,
15
+ "ffn_dim": 4096,
16
+ "hidden_size": 1024,
17
+ "init_std": 0.02,
18
+ "layer_norm_elementwise_affine": true,
19
+ "layerdrop": 0.0,
20
+ "max_position_embeddings": 2048,
21
+ "model_type": "opt",
22
+ "num_attention_heads": 16,
23
+ "num_hidden_layers": 24,
24
+ "pad_token_id": 1,
25
+ "prefix": "</s>",
 
 
 
 
 
 
 
 
 
 
 
26
  "torch_dtype": "float32",
27
  "transformers_version": "4.34.0",
28
  "use_cache": true,
29
+ "vocab_size": 50272,
30
+ "word_embed_proj_dim": 512
31
  }
generation_config.json CHANGED
@@ -1,6 +1,7 @@
1
  {
2
  "_from_model_config": true,
3
- "bos_token_id": 50256,
4
- "eos_token_id": 50256,
 
5
  "transformers_version": "4.34.0"
6
  }
 
1
  {
2
  "_from_model_config": true,
3
+ "bos_token_id": 2,
4
+ "eos_token_id": 2,
5
+ "pad_token_id": 1,
6
  "transformers_version": "4.34.0"
7
  }
pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:125c6f72bf0c0db982df819e4dd8d21a9cedd93ce4f936cbe0ec57006e125cad
3
- size 497807197
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:31aa75174f1171830bbbd66a21b8d5a782f80e1a78e5d0006d746d19af98d641
3
+ size 1324917277
special_tokens_map.json CHANGED
@@ -1,6 +1,6 @@
1
  {
2
- "bos_token": "<|endoftext|>",
3
- "eos_token": "<|endoftext|>",
4
- "pad_token": "<|endoftext|>",
5
- "unk_token": "<|endoftext|>"
6
  }
 
1
  {
2
+ "bos_token": "</s>",
3
+ "eos_token": "</s>",
4
+ "pad_token": "</s>",
5
+ "unk_token": "</s>"
6
  }
tokenizer.json CHANGED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json CHANGED
@@ -1,20 +1,31 @@
1
  {
 
2
  "add_prefix_space": false,
3
  "added_tokens_decoder": {
4
- "50256": {
5
- "content": "<|endoftext|>",
6
  "lstrip": false,
7
- "normalized": false,
 
 
 
 
 
 
 
 
8
  "rstrip": false,
9
  "single_word": false,
10
  "special": true
11
  }
12
  },
13
  "additional_special_tokens": [],
14
- "bos_token": "<|endoftext|>",
15
  "clean_up_tokenization_spaces": true,
16
- "eos_token": "<|endoftext|>",
17
- "model_max_length": 1024,
 
 
18
  "tokenizer_class": "GPT2Tokenizer",
19
- "unk_token": "<|endoftext|>"
20
  }
 
1
  {
2
+ "add_bos_token": true,
3
  "add_prefix_space": false,
4
  "added_tokens_decoder": {
5
+ "1": {
6
+ "content": "<pad>",
7
  "lstrip": false,
8
+ "normalized": true,
9
+ "rstrip": false,
10
+ "single_word": false,
11
+ "special": true
12
+ },
13
+ "2": {
14
+ "content": "</s>",
15
+ "lstrip": false,
16
+ "normalized": true,
17
  "rstrip": false,
18
  "single_word": false,
19
  "special": true
20
  }
21
  },
22
  "additional_special_tokens": [],
23
+ "bos_token": "</s>",
24
  "clean_up_tokenization_spaces": true,
25
+ "eos_token": "</s>",
26
+ "errors": "replace",
27
+ "model_max_length": 1000000000000000019884624838656,
28
+ "pad_token": "</s>",
29
  "tokenizer_class": "GPT2Tokenizer",
30
+ "unk_token": "</s>"
31
  }
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8e38f21fe8c53a606e1936dffd825ecbe8bf21079ba79fc747c573c45c792f40
3
  size 4091
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4a499fd307ebb77a86f6a84938c4bd727e699e35ea9c55d2a0139d41b74511ab
3
  size 4091
vocab.json CHANGED
The diff for this file is too large to render. See raw diff