Muennighoff commited on
Commit
d4a34eb
1 Parent(s): f2ae0d5
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. mup-1b-100m-e3-full-gpt2lmmup.json +1 -0
  2. mup-200m-100m-e3-full-gpt2lmmup.json +1 -0
  3. mup-200m-100m-e3/config.json +34 -0
  4. mup-200m-100m-e3/pytorch_model.bin +3 -0
  5. mup-200m-100m-e3/training_args.bin +3 -0
  6. mup-2b-100m-e3-full-gpt2lmmup.json +1 -0
  7. mup-2b-100m-e3/checkpoint-100/config.json +34 -0
  8. mup-2b-100m-e3/checkpoint-100/optimizer.pt +3 -0
  9. mup-2b-100m-e3/checkpoint-100/pytorch_model.bin +3 -0
  10. mup-2b-100m-e3/checkpoint-100/rng_state.pth +3 -0
  11. mup-2b-100m-e3/checkpoint-100/scheduler.pt +3 -0
  12. mup-2b-100m-e3/checkpoint-100/trainer_state.json +15 -0
  13. mup-2b-100m-e3/checkpoint-100/training_args.bin +3 -0
  14. mup-2b-100m-e3/checkpoint-200/config.json +34 -0
  15. mup-2b-100m-e3/checkpoint-200/optimizer.pt +3 -0
  16. mup-2b-100m-e3/checkpoint-200/pytorch_model.bin +3 -0
  17. mup-2b-100m-e3/checkpoint-200/rng_state.pth +3 -0
  18. mup-2b-100m-e3/checkpoint-200/scheduler.pt +3 -0
  19. mup-2b-100m-e3/checkpoint-200/trainer_state.json +15 -0
  20. mup-2b-100m-e3/checkpoint-200/training_args.bin +3 -0
  21. mup-2b-100m-e3/checkpoint-300/config.json +34 -0
  22. mup-2b-100m-e3/checkpoint-300/optimizer.pt +3 -0
  23. mup-2b-100m-e3/checkpoint-300/pytorch_model.bin +3 -0
  24. mup-2b-100m-e3/checkpoint-300/rng_state.pth +3 -0
  25. mup-2b-100m-e3/checkpoint-300/scheduler.pt +3 -0
  26. mup-2b-100m-e3/checkpoint-300/trainer_state.json +15 -0
  27. mup-2b-100m-e3/checkpoint-300/training_args.bin +3 -0
  28. mup-2b-100m-e3/config.json +34 -0
  29. mup-2b-100m-e3/pytorch_model.bin +3 -0
  30. mup-2b-100m-e3/training_args.bin +3 -0
  31. mup-2b5-100m-e3-full-gpt2lmmup.json +1 -0
  32. mup-2b5-100m-e3/checkpoint-100/config.json +34 -0
  33. mup-2b5-100m-e3/checkpoint-100/optimizer.pt +3 -0
  34. mup-2b5-100m-e3/checkpoint-100/pytorch_model-00001-of-00002.bin +3 -0
  35. mup-2b5-100m-e3/checkpoint-100/pytorch_model-00002-of-00002.bin +3 -0
  36. mup-2b5-100m-e3/checkpoint-100/pytorch_model.bin.index.json +180 -0
  37. mup-2b5-100m-e3/checkpoint-100/rng_state.pth +3 -0
  38. mup-2b5-100m-e3/checkpoint-100/scaler.pt +3 -0
  39. mup-2b5-100m-e3/checkpoint-100/scheduler.pt +3 -0
  40. mup-2b5-100m-e3/checkpoint-100/trainer_state.json +15 -0
  41. mup-2b5-100m-e3/checkpoint-100/training_args.bin +3 -0
  42. mup-2b5-100m-e3/config.json +34 -0
  43. mup-2b5-100m-e3/pytorch_model-00001-of-00002.bin +3 -0
  44. mup-2b5-100m-e3/pytorch_model-00002-of-00002.bin +3 -0
  45. mup-2b5-100m-e3/pytorch_model.bin.index.json +180 -0
  46. mup-2b5-100m-e3/training_args.bin +3 -0
  47. mup-3b-100m-e3-full-gpt2lmmup.json +1 -0
  48. mup-3b-100m-e3/checkpoint-100/config.json +34 -0
  49. mup-3b-100m-e3/checkpoint-100/optimizer.pt +3 -0
  50. mup-3b-100m-e3/checkpoint-100/pytorch_model-00001-of-00002.bin +3 -0
mup-1b-100m-e3-full-gpt2lmmup.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"eval_loss": 10.503863334655762, "eval_runtime": 35656.762, "eval_samples_per_second": 4.751, "eval_steps_per_second": 0.148, "epoch": 0.01}
mup-200m-100m-e3-full-gpt2lmmup.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"eval_loss": 10.863893508911133, "eval_runtime": 35678.3953, "eval_samples_per_second": 4.748, "eval_steps_per_second": 0.148}
mup-200m-100m-e3/config.json ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "activation_function": "gelu_new",
3
+ "architectures": [
4
+ "GPT2LMHeadModel"
5
+ ],
6
+ "attn_mult": 8.0,
7
+ "attn_pdrop": 0.1,
8
+ "bos_token_id": 50256,
9
+ "embd_pdrop": 0.1,
10
+ "eos_token_id": 50256,
11
+ "initializer_range": 0.1,
12
+ "intermediate_size": 12288,
13
+ "layer_norm_epsilon": 1e-05,
14
+ "model_type": "gpt2",
15
+ "n_embd": 3072,
16
+ "n_head": 64,
17
+ "n_inner": null,
18
+ "n_layer": 12,
19
+ "n_positions": 1024,
20
+ "num_layers": 12,
21
+ "reorder_and_upcast_attn": false,
22
+ "resid_pdrop": 0.1,
23
+ "scale_attn_by_inverse_layer_idx": false,
24
+ "scale_attn_weights": true,
25
+ "summary_activation": null,
26
+ "summary_first_dropout": 0.1,
27
+ "summary_proj_to_labels": true,
28
+ "summary_type": "cls_index",
29
+ "summary_use_proj": true,
30
+ "torch_dtype": "float32",
31
+ "transformers_version": "4.25.1",
32
+ "use_cache": true,
33
+ "vocab_size": 50257
34
+ }
mup-200m-100m-e3/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:600250335c93538740793cc7e8a4682defb6f7e202a3641016cf4ee1754bbe75
3
+ size 6080539805
mup-200m-100m-e3/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8a9b9d007760086d2e55ee15759f906d50d344e948eb0bca87f0e434c9a4dd63
3
+ size 3387
mup-2b-100m-e3-full-gpt2lmmup.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"eval_loss": 9.270681381225586, "eval_runtime": 46206.7705, "eval_samples_per_second": 3.666, "eval_steps_per_second": 0.229}
mup-2b-100m-e3/checkpoint-100/config.json ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "activation_function": "gelu_new",
3
+ "architectures": [
4
+ "GPT2LMHeadModel"
5
+ ],
6
+ "attn_mult": 8.0,
7
+ "attn_pdrop": 0.1,
8
+ "bos_token_id": 50256,
9
+ "embd_pdrop": 0.1,
10
+ "eos_token_id": 50256,
11
+ "initializer_range": 0.01,
12
+ "intermediate_size": 13312,
13
+ "layer_norm_epsilon": 1e-05,
14
+ "model_type": "gpt2",
15
+ "n_embd": 3328,
16
+ "n_head": 128,
17
+ "n_inner": null,
18
+ "n_layer": 12,
19
+ "n_positions": 1024,
20
+ "num_layers": 12,
21
+ "reorder_and_upcast_attn": false,
22
+ "resid_pdrop": 0.1,
23
+ "scale_attn_by_inverse_layer_idx": false,
24
+ "scale_attn_weights": true,
25
+ "summary_activation": null,
26
+ "summary_first_dropout": 0.1,
27
+ "summary_proj_to_labels": true,
28
+ "summary_type": "cls_index",
29
+ "summary_use_proj": true,
30
+ "torch_dtype": "float32",
31
+ "transformers_version": "4.25.1",
32
+ "use_cache": true,
33
+ "vocab_size": 50257
34
+ }
mup-2b-100m-e3/checkpoint-100/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:859a1cea6ab663b2651f6b47d6e85d9acb2a67b1ddf7200f7e5c350ab22c1f3b
3
+ size 14128712873
mup-2b-100m-e3/checkpoint-100/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9aff050f6759cb9c9bd9fb82d5908dc746bd5fa9f0754b969286830370959df2
3
+ size 7076932005
mup-2b-100m-e3/checkpoint-100/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4c85e1a7fcee9f0a4e6f8ebf1dc4a63a8c96bd1dd0a67ff0ff15c506aae95304
3
+ size 14575
mup-2b-100m-e3/checkpoint-100/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f41127ce2e7b57031bf845923aadf69d1d932623d3cc306538c8cffe3f95b12f
3
+ size 691
mup-2b-100m-e3/checkpoint-100/trainer_state.json ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 0.2631010986526346,
5
+ "global_step": 100,
6
+ "is_hyper_param_search": false,
7
+ "is_local_process_zero": true,
8
+ "is_world_process_zero": true,
9
+ "log_history": [],
10
+ "max_steps": 380,
11
+ "num_train_epochs": 1,
12
+ "total_flos": 2.50936282841088e+17,
13
+ "trial_name": null,
14
+ "trial_params": null
15
+ }
mup-2b-100m-e3/checkpoint-100/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a529bfebc81b3f76599a2706a7dedaa2e159f90434d17d7113ba289ad90ba128
3
+ size 3387
mup-2b-100m-e3/checkpoint-200/config.json ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "activation_function": "gelu_new",
3
+ "architectures": [
4
+ "GPT2LMHeadModel"
5
+ ],
6
+ "attn_mult": 8.0,
7
+ "attn_pdrop": 0.1,
8
+ "bos_token_id": 50256,
9
+ "embd_pdrop": 0.1,
10
+ "eos_token_id": 50256,
11
+ "initializer_range": 0.01,
12
+ "intermediate_size": 13312,
13
+ "layer_norm_epsilon": 1e-05,
14
+ "model_type": "gpt2",
15
+ "n_embd": 3328,
16
+ "n_head": 128,
17
+ "n_inner": null,
18
+ "n_layer": 12,
19
+ "n_positions": 1024,
20
+ "num_layers": 12,
21
+ "reorder_and_upcast_attn": false,
22
+ "resid_pdrop": 0.1,
23
+ "scale_attn_by_inverse_layer_idx": false,
24
+ "scale_attn_weights": true,
25
+ "summary_activation": null,
26
+ "summary_first_dropout": 0.1,
27
+ "summary_proj_to_labels": true,
28
+ "summary_type": "cls_index",
29
+ "summary_use_proj": true,
30
+ "torch_dtype": "float32",
31
+ "transformers_version": "4.25.1",
32
+ "use_cache": true,
33
+ "vocab_size": 50257
34
+ }
mup-2b-100m-e3/checkpoint-200/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fc8643fa7f495fb7235042b7a64c424947f35a3834c2e7ecaf81a5faff11ddb3
3
+ size 14128712873
mup-2b-100m-e3/checkpoint-200/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:64362990e2eaa64481e967769bacc656341aa1ab8490f6c9a62abaa928ac7f9d
3
+ size 7076932005
mup-2b-100m-e3/checkpoint-200/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:850a75d03b68709ebdb3b0429ccf1c2bfbc264ba389a899cc590928049b771ed
3
+ size 14575
mup-2b-100m-e3/checkpoint-200/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7ff11a668fd015bf8fec7ef706376a383794600ae7f4f86f3b9aeffe27fd39be
3
+ size 691
mup-2b-100m-e3/checkpoint-200/trainer_state.json ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 0.5262021973052692,
5
+ "global_step": 200,
6
+ "is_hyper_param_search": false,
7
+ "is_local_process_zero": true,
8
+ "is_world_process_zero": true,
9
+ "log_history": [],
10
+ "max_steps": 380,
11
+ "num_train_epochs": 1,
12
+ "total_flos": 5.01872565682176e+17,
13
+ "trial_name": null,
14
+ "trial_params": null
15
+ }
mup-2b-100m-e3/checkpoint-200/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a529bfebc81b3f76599a2706a7dedaa2e159f90434d17d7113ba289ad90ba128
3
+ size 3387
mup-2b-100m-e3/checkpoint-300/config.json ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "activation_function": "gelu_new",
3
+ "architectures": [
4
+ "GPT2LMHeadModel"
5
+ ],
6
+ "attn_mult": 8.0,
7
+ "attn_pdrop": 0.1,
8
+ "bos_token_id": 50256,
9
+ "embd_pdrop": 0.1,
10
+ "eos_token_id": 50256,
11
+ "initializer_range": 0.01,
12
+ "intermediate_size": 13312,
13
+ "layer_norm_epsilon": 1e-05,
14
+ "model_type": "gpt2",
15
+ "n_embd": 3328,
16
+ "n_head": 128,
17
+ "n_inner": null,
18
+ "n_layer": 12,
19
+ "n_positions": 1024,
20
+ "num_layers": 12,
21
+ "reorder_and_upcast_attn": false,
22
+ "resid_pdrop": 0.1,
23
+ "scale_attn_by_inverse_layer_idx": false,
24
+ "scale_attn_weights": true,
25
+ "summary_activation": null,
26
+ "summary_first_dropout": 0.1,
27
+ "summary_proj_to_labels": true,
28
+ "summary_type": "cls_index",
29
+ "summary_use_proj": true,
30
+ "torch_dtype": "float32",
31
+ "transformers_version": "4.25.1",
32
+ "use_cache": true,
33
+ "vocab_size": 50257
34
+ }
mup-2b-100m-e3/checkpoint-300/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:83e214ea9a2859b68902931336a551a4a675fe79c227fa5b6bf330b7d270514b
3
+ size 14128712873
mup-2b-100m-e3/checkpoint-300/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b0b6c22fcc540890eee68ac4649802608865e27c9584d1917a079d4034b8ab41
3
+ size 7076932005
mup-2b-100m-e3/checkpoint-300/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:37467b99308898a6ecf9cd44334c15ea2bf2d409cc4222f629e0118cdb7e93e6
3
+ size 14575
mup-2b-100m-e3/checkpoint-300/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:30fd152d86dafeffe2455270e22218f0b6cab42a3c9b31a01b2bc6d8ceadc7c4
3
+ size 691
mup-2b-100m-e3/checkpoint-300/trainer_state.json ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 0.7893032959579038,
5
+ "global_step": 300,
6
+ "is_hyper_param_search": false,
7
+ "is_local_process_zero": true,
8
+ "is_world_process_zero": true,
9
+ "log_history": [],
10
+ "max_steps": 380,
11
+ "num_train_epochs": 1,
12
+ "total_flos": 7.52808848523264e+17,
13
+ "trial_name": null,
14
+ "trial_params": null
15
+ }
mup-2b-100m-e3/checkpoint-300/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a529bfebc81b3f76599a2706a7dedaa2e159f90434d17d7113ba289ad90ba128
3
+ size 3387
mup-2b-100m-e3/config.json ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "activation_function": "gelu_new",
3
+ "architectures": [
4
+ "GPT2LMHeadModel"
5
+ ],
6
+ "attn_mult": 8.0,
7
+ "attn_pdrop": 0.1,
8
+ "bos_token_id": 50256,
9
+ "embd_pdrop": 0.1,
10
+ "eos_token_id": 50256,
11
+ "initializer_range": 0.01,
12
+ "intermediate_size": 13312,
13
+ "layer_norm_epsilon": 1e-05,
14
+ "model_type": "gpt2",
15
+ "n_embd": 3328,
16
+ "n_head": 128,
17
+ "n_inner": null,
18
+ "n_layer": 12,
19
+ "n_positions": 1024,
20
+ "num_layers": 12,
21
+ "reorder_and_upcast_attn": false,
22
+ "resid_pdrop": 0.1,
23
+ "scale_attn_by_inverse_layer_idx": false,
24
+ "scale_attn_weights": true,
25
+ "summary_activation": null,
26
+ "summary_first_dropout": 0.1,
27
+ "summary_proj_to_labels": true,
28
+ "summary_type": "cls_index",
29
+ "summary_use_proj": true,
30
+ "torch_dtype": "float32",
31
+ "transformers_version": "4.25.1",
32
+ "use_cache": true,
33
+ "vocab_size": 50257
34
+ }
mup-2b-100m-e3/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9c72ffdc020dcc3f482979a1b900d14d4832f0f3f07845f1feaba439e3d93b68
3
+ size 7076932005
mup-2b-100m-e3/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c0360c4e8f458bf1a55bfbc7c49d3cd1b51ecf6c15f5985ea0900cb0d4d27bfb
3
+ size 3387
mup-2b5-100m-e3-full-gpt2lmmup.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"eval_loss": 6.969681739807129, "eval_runtime": 14651.8437, "eval_samples_per_second": 11.563, "eval_steps_per_second": 0.723, "epoch": 1.0}
mup-2b5-100m-e3/checkpoint-100/config.json ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "activation_function": "gelu_new",
3
+ "architectures": [
4
+ "GPT2LMHeadModel"
5
+ ],
6
+ "attn_mult": 8.0,
7
+ "attn_pdrop": 0.1,
8
+ "bos_token_id": 50256,
9
+ "embd_pdrop": 0.1,
10
+ "eos_token_id": 50256,
11
+ "initializer_range": 0.01,
12
+ "intermediate_size": 16384,
13
+ "layer_norm_epsilon": 1e-05,
14
+ "model_type": "gpt2",
15
+ "n_embd": 4096,
16
+ "n_head": 128,
17
+ "n_inner": null,
18
+ "n_layer": 12,
19
+ "n_positions": 1024,
20
+ "num_layers": 12,
21
+ "reorder_and_upcast_attn": false,
22
+ "resid_pdrop": 0.1,
23
+ "scale_attn_by_inverse_layer_idx": false,
24
+ "scale_attn_weights": true,
25
+ "summary_activation": null,
26
+ "summary_first_dropout": 0.1,
27
+ "summary_proj_to_labels": true,
28
+ "summary_type": "cls_index",
29
+ "summary_use_proj": true,
30
+ "torch_dtype": "float32",
31
+ "transformers_version": "4.25.1",
32
+ "use_cache": true,
33
+ "vocab_size": 50257
34
+ }
mup-2b5-100m-e3/checkpoint-100/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:049fa73dc06e31ac0fc557bbd1880e3c30b86604acea743361e3bbbe2ef4cbe2
3
+ size 21013034333
mup-2b5-100m-e3/checkpoint-100/pytorch_model-00001-of-00002.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fa17c85fa110ada3a0858e4cb2de270ecd3ee40f30e9d6684d3f2c21a11cac20
3
+ size 9982107965
mup-2b5-100m-e3/checkpoint-100/pytorch_model-00002-of-00002.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:305257ca9d4a2ccbff94906606e7f27125926dc9ce016218df1c6f2e2b6033bb
3
+ size 1360399056
mup-2b5-100m-e3/checkpoint-100/pytorch_model.bin.index.json ADDED
@@ -0,0 +1,180 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "metadata": {
3
+ "total_size": 11342446640
4
+ },
5
+ "weight_map": {
6
+ "lm_head.weight": "pytorch_model-00002-of-00002.bin",
7
+ "transformer.h.0.attn.bias": "pytorch_model-00001-of-00002.bin",
8
+ "transformer.h.0.attn.c_attn.bias": "pytorch_model-00001-of-00002.bin",
9
+ "transformer.h.0.attn.c_attn.weight": "pytorch_model-00001-of-00002.bin",
10
+ "transformer.h.0.attn.c_proj.bias": "pytorch_model-00001-of-00002.bin",
11
+ "transformer.h.0.attn.c_proj.weight": "pytorch_model-00001-of-00002.bin",
12
+ "transformer.h.0.attn.masked_bias": "pytorch_model-00001-of-00002.bin",
13
+ "transformer.h.0.ln_1.bias": "pytorch_model-00001-of-00002.bin",
14
+ "transformer.h.0.ln_1.weight": "pytorch_model-00001-of-00002.bin",
15
+ "transformer.h.0.ln_2.bias": "pytorch_model-00001-of-00002.bin",
16
+ "transformer.h.0.ln_2.weight": "pytorch_model-00001-of-00002.bin",
17
+ "transformer.h.0.mlp.c_fc.bias": "pytorch_model-00001-of-00002.bin",
18
+ "transformer.h.0.mlp.c_fc.weight": "pytorch_model-00001-of-00002.bin",
19
+ "transformer.h.0.mlp.c_proj.bias": "pytorch_model-00001-of-00002.bin",
20
+ "transformer.h.0.mlp.c_proj.weight": "pytorch_model-00001-of-00002.bin",
21
+ "transformer.h.1.attn.bias": "pytorch_model-00001-of-00002.bin",
22
+ "transformer.h.1.attn.c_attn.bias": "pytorch_model-00001-of-00002.bin",
23
+ "transformer.h.1.attn.c_attn.weight": "pytorch_model-00001-of-00002.bin",
24
+ "transformer.h.1.attn.c_proj.bias": "pytorch_model-00001-of-00002.bin",
25
+ "transformer.h.1.attn.c_proj.weight": "pytorch_model-00001-of-00002.bin",
26
+ "transformer.h.1.attn.masked_bias": "pytorch_model-00001-of-00002.bin",
27
+ "transformer.h.1.ln_1.bias": "pytorch_model-00001-of-00002.bin",
28
+ "transformer.h.1.ln_1.weight": "pytorch_model-00001-of-00002.bin",
29
+ "transformer.h.1.ln_2.bias": "pytorch_model-00001-of-00002.bin",
30
+ "transformer.h.1.ln_2.weight": "pytorch_model-00001-of-00002.bin",
31
+ "transformer.h.1.mlp.c_fc.bias": "pytorch_model-00001-of-00002.bin",
32
+ "transformer.h.1.mlp.c_fc.weight": "pytorch_model-00001-of-00002.bin",
33
+ "transformer.h.1.mlp.c_proj.bias": "pytorch_model-00001-of-00002.bin",
34
+ "transformer.h.1.mlp.c_proj.weight": "pytorch_model-00001-of-00002.bin",
35
+ "transformer.h.10.attn.bias": "pytorch_model-00001-of-00002.bin",
36
+ "transformer.h.10.attn.c_attn.bias": "pytorch_model-00001-of-00002.bin",
37
+ "transformer.h.10.attn.c_attn.weight": "pytorch_model-00001-of-00002.bin",
38
+ "transformer.h.10.attn.c_proj.bias": "pytorch_model-00001-of-00002.bin",
39
+ "transformer.h.10.attn.c_proj.weight": "pytorch_model-00001-of-00002.bin",
40
+ "transformer.h.10.attn.masked_bias": "pytorch_model-00001-of-00002.bin",
41
+ "transformer.h.10.ln_1.bias": "pytorch_model-00001-of-00002.bin",
42
+ "transformer.h.10.ln_1.weight": "pytorch_model-00001-of-00002.bin",
43
+ "transformer.h.10.ln_2.bias": "pytorch_model-00001-of-00002.bin",
44
+ "transformer.h.10.ln_2.weight": "pytorch_model-00001-of-00002.bin",
45
+ "transformer.h.10.mlp.c_fc.bias": "pytorch_model-00001-of-00002.bin",
46
+ "transformer.h.10.mlp.c_fc.weight": "pytorch_model-00001-of-00002.bin",
47
+ "transformer.h.10.mlp.c_proj.bias": "pytorch_model-00001-of-00002.bin",
48
+ "transformer.h.10.mlp.c_proj.weight": "pytorch_model-00001-of-00002.bin",
49
+ "transformer.h.11.attn.bias": "pytorch_model-00001-of-00002.bin",
50
+ "transformer.h.11.attn.c_attn.bias": "pytorch_model-00001-of-00002.bin",
51
+ "transformer.h.11.attn.c_attn.weight": "pytorch_model-00001-of-00002.bin",
52
+ "transformer.h.11.attn.c_proj.bias": "pytorch_model-00001-of-00002.bin",
53
+ "transformer.h.11.attn.c_proj.weight": "pytorch_model-00001-of-00002.bin",
54
+ "transformer.h.11.attn.masked_bias": "pytorch_model-00001-of-00002.bin",
55
+ "transformer.h.11.ln_1.bias": "pytorch_model-00001-of-00002.bin",
56
+ "transformer.h.11.ln_1.weight": "pytorch_model-00001-of-00002.bin",
57
+ "transformer.h.11.ln_2.bias": "pytorch_model-00001-of-00002.bin",
58
+ "transformer.h.11.ln_2.weight": "pytorch_model-00001-of-00002.bin",
59
+ "transformer.h.11.mlp.c_fc.bias": "pytorch_model-00002-of-00002.bin",
60
+ "transformer.h.11.mlp.c_fc.weight": "pytorch_model-00002-of-00002.bin",
61
+ "transformer.h.11.mlp.c_proj.bias": "pytorch_model-00002-of-00002.bin",
62
+ "transformer.h.11.mlp.c_proj.weight": "pytorch_model-00002-of-00002.bin",
63
+ "transformer.h.2.attn.bias": "pytorch_model-00001-of-00002.bin",
64
+ "transformer.h.2.attn.c_attn.bias": "pytorch_model-00001-of-00002.bin",
65
+ "transformer.h.2.attn.c_attn.weight": "pytorch_model-00001-of-00002.bin",
66
+ "transformer.h.2.attn.c_proj.bias": "pytorch_model-00001-of-00002.bin",
67
+ "transformer.h.2.attn.c_proj.weight": "pytorch_model-00001-of-00002.bin",
68
+ "transformer.h.2.attn.masked_bias": "pytorch_model-00001-of-00002.bin",
69
+ "transformer.h.2.ln_1.bias": "pytorch_model-00001-of-00002.bin",
70
+ "transformer.h.2.ln_1.weight": "pytorch_model-00001-of-00002.bin",
71
+ "transformer.h.2.ln_2.bias": "pytorch_model-00001-of-00002.bin",
72
+ "transformer.h.2.ln_2.weight": "pytorch_model-00001-of-00002.bin",
73
+ "transformer.h.2.mlp.c_fc.bias": "pytorch_model-00001-of-00002.bin",
74
+ "transformer.h.2.mlp.c_fc.weight": "pytorch_model-00001-of-00002.bin",
75
+ "transformer.h.2.mlp.c_proj.bias": "pytorch_model-00001-of-00002.bin",
76
+ "transformer.h.2.mlp.c_proj.weight": "pytorch_model-00001-of-00002.bin",
77
+ "transformer.h.3.attn.bias": "pytorch_model-00001-of-00002.bin",
78
+ "transformer.h.3.attn.c_attn.bias": "pytorch_model-00001-of-00002.bin",
79
+ "transformer.h.3.attn.c_attn.weight": "pytorch_model-00001-of-00002.bin",
80
+ "transformer.h.3.attn.c_proj.bias": "pytorch_model-00001-of-00002.bin",
81
+ "transformer.h.3.attn.c_proj.weight": "pytorch_model-00001-of-00002.bin",
82
+ "transformer.h.3.attn.masked_bias": "pytorch_model-00001-of-00002.bin",
83
+ "transformer.h.3.ln_1.bias": "pytorch_model-00001-of-00002.bin",
84
+ "transformer.h.3.ln_1.weight": "pytorch_model-00001-of-00002.bin",
85
+ "transformer.h.3.ln_2.bias": "pytorch_model-00001-of-00002.bin",
86
+ "transformer.h.3.ln_2.weight": "pytorch_model-00001-of-00002.bin",
87
+ "transformer.h.3.mlp.c_fc.bias": "pytorch_model-00001-of-00002.bin",
88
+ "transformer.h.3.mlp.c_fc.weight": "pytorch_model-00001-of-00002.bin",
89
+ "transformer.h.3.mlp.c_proj.bias": "pytorch_model-00001-of-00002.bin",
90
+ "transformer.h.3.mlp.c_proj.weight": "pytorch_model-00001-of-00002.bin",
91
+ "transformer.h.4.attn.bias": "pytorch_model-00001-of-00002.bin",
92
+ "transformer.h.4.attn.c_attn.bias": "pytorch_model-00001-of-00002.bin",
93
+ "transformer.h.4.attn.c_attn.weight": "pytorch_model-00001-of-00002.bin",
94
+ "transformer.h.4.attn.c_proj.bias": "pytorch_model-00001-of-00002.bin",
95
+ "transformer.h.4.attn.c_proj.weight": "pytorch_model-00001-of-00002.bin",
96
+ "transformer.h.4.attn.masked_bias": "pytorch_model-00001-of-00002.bin",
97
+ "transformer.h.4.ln_1.bias": "pytorch_model-00001-of-00002.bin",
98
+ "transformer.h.4.ln_1.weight": "pytorch_model-00001-of-00002.bin",
99
+ "transformer.h.4.ln_2.bias": "pytorch_model-00001-of-00002.bin",
100
+ "transformer.h.4.ln_2.weight": "pytorch_model-00001-of-00002.bin",
101
+ "transformer.h.4.mlp.c_fc.bias": "pytorch_model-00001-of-00002.bin",
102
+ "transformer.h.4.mlp.c_fc.weight": "pytorch_model-00001-of-00002.bin",
103
+ "transformer.h.4.mlp.c_proj.bias": "pytorch_model-00001-of-00002.bin",
104
+ "transformer.h.4.mlp.c_proj.weight": "pytorch_model-00001-of-00002.bin",
105
+ "transformer.h.5.attn.bias": "pytorch_model-00001-of-00002.bin",
106
+ "transformer.h.5.attn.c_attn.bias": "pytorch_model-00001-of-00002.bin",
107
+ "transformer.h.5.attn.c_attn.weight": "pytorch_model-00001-of-00002.bin",
108
+ "transformer.h.5.attn.c_proj.bias": "pytorch_model-00001-of-00002.bin",
109
+ "transformer.h.5.attn.c_proj.weight": "pytorch_model-00001-of-00002.bin",
110
+ "transformer.h.5.attn.masked_bias": "pytorch_model-00001-of-00002.bin",
111
+ "transformer.h.5.ln_1.bias": "pytorch_model-00001-of-00002.bin",
112
+ "transformer.h.5.ln_1.weight": "pytorch_model-00001-of-00002.bin",
113
+ "transformer.h.5.ln_2.bias": "pytorch_model-00001-of-00002.bin",
114
+ "transformer.h.5.ln_2.weight": "pytorch_model-00001-of-00002.bin",
115
+ "transformer.h.5.mlp.c_fc.bias": "pytorch_model-00001-of-00002.bin",
116
+ "transformer.h.5.mlp.c_fc.weight": "pytorch_model-00001-of-00002.bin",
117
+ "transformer.h.5.mlp.c_proj.bias": "pytorch_model-00001-of-00002.bin",
118
+ "transformer.h.5.mlp.c_proj.weight": "pytorch_model-00001-of-00002.bin",
119
+ "transformer.h.6.attn.bias": "pytorch_model-00001-of-00002.bin",
120
+ "transformer.h.6.attn.c_attn.bias": "pytorch_model-00001-of-00002.bin",
121
+ "transformer.h.6.attn.c_attn.weight": "pytorch_model-00001-of-00002.bin",
122
+ "transformer.h.6.attn.c_proj.bias": "pytorch_model-00001-of-00002.bin",
123
+ "transformer.h.6.attn.c_proj.weight": "pytorch_model-00001-of-00002.bin",
124
+ "transformer.h.6.attn.masked_bias": "pytorch_model-00001-of-00002.bin",
125
+ "transformer.h.6.ln_1.bias": "pytorch_model-00001-of-00002.bin",
126
+ "transformer.h.6.ln_1.weight": "pytorch_model-00001-of-00002.bin",
127
+ "transformer.h.6.ln_2.bias": "pytorch_model-00001-of-00002.bin",
128
+ "transformer.h.6.ln_2.weight": "pytorch_model-00001-of-00002.bin",
129
+ "transformer.h.6.mlp.c_fc.bias": "pytorch_model-00001-of-00002.bin",
130
+ "transformer.h.6.mlp.c_fc.weight": "pytorch_model-00001-of-00002.bin",
131
+ "transformer.h.6.mlp.c_proj.bias": "pytorch_model-00001-of-00002.bin",
132
+ "transformer.h.6.mlp.c_proj.weight": "pytorch_model-00001-of-00002.bin",
133
+ "transformer.h.7.attn.bias": "pytorch_model-00001-of-00002.bin",
134
+ "transformer.h.7.attn.c_attn.bias": "pytorch_model-00001-of-00002.bin",
135
+ "transformer.h.7.attn.c_attn.weight": "pytorch_model-00001-of-00002.bin",
136
+ "transformer.h.7.attn.c_proj.bias": "pytorch_model-00001-of-00002.bin",
137
+ "transformer.h.7.attn.c_proj.weight": "pytorch_model-00001-of-00002.bin",
138
+ "transformer.h.7.attn.masked_bias": "pytorch_model-00001-of-00002.bin",
139
+ "transformer.h.7.ln_1.bias": "pytorch_model-00001-of-00002.bin",
140
+ "transformer.h.7.ln_1.weight": "pytorch_model-00001-of-00002.bin",
141
+ "transformer.h.7.ln_2.bias": "pytorch_model-00001-of-00002.bin",
142
+ "transformer.h.7.ln_2.weight": "pytorch_model-00001-of-00002.bin",
143
+ "transformer.h.7.mlp.c_fc.bias": "pytorch_model-00001-of-00002.bin",
144
+ "transformer.h.7.mlp.c_fc.weight": "pytorch_model-00001-of-00002.bin",
145
+ "transformer.h.7.mlp.c_proj.bias": "pytorch_model-00001-of-00002.bin",
146
+ "transformer.h.7.mlp.c_proj.weight": "pytorch_model-00001-of-00002.bin",
147
+ "transformer.h.8.attn.bias": "pytorch_model-00001-of-00002.bin",
148
+ "transformer.h.8.attn.c_attn.bias": "pytorch_model-00001-of-00002.bin",
149
+ "transformer.h.8.attn.c_attn.weight": "pytorch_model-00001-of-00002.bin",
150
+ "transformer.h.8.attn.c_proj.bias": "pytorch_model-00001-of-00002.bin",
151
+ "transformer.h.8.attn.c_proj.weight": "pytorch_model-00001-of-00002.bin",
152
+ "transformer.h.8.attn.masked_bias": "pytorch_model-00001-of-00002.bin",
153
+ "transformer.h.8.ln_1.bias": "pytorch_model-00001-of-00002.bin",
154
+ "transformer.h.8.ln_1.weight": "pytorch_model-00001-of-00002.bin",
155
+ "transformer.h.8.ln_2.bias": "pytorch_model-00001-of-00002.bin",
156
+ "transformer.h.8.ln_2.weight": "pytorch_model-00001-of-00002.bin",
157
+ "transformer.h.8.mlp.c_fc.bias": "pytorch_model-00001-of-00002.bin",
158
+ "transformer.h.8.mlp.c_fc.weight": "pytorch_model-00001-of-00002.bin",
159
+ "transformer.h.8.mlp.c_proj.bias": "pytorch_model-00001-of-00002.bin",
160
+ "transformer.h.8.mlp.c_proj.weight": "pytorch_model-00001-of-00002.bin",
161
+ "transformer.h.9.attn.bias": "pytorch_model-00001-of-00002.bin",
162
+ "transformer.h.9.attn.c_attn.bias": "pytorch_model-00001-of-00002.bin",
163
+ "transformer.h.9.attn.c_attn.weight": "pytorch_model-00001-of-00002.bin",
164
+ "transformer.h.9.attn.c_proj.bias": "pytorch_model-00001-of-00002.bin",
165
+ "transformer.h.9.attn.c_proj.weight": "pytorch_model-00001-of-00002.bin",
166
+ "transformer.h.9.attn.masked_bias": "pytorch_model-00001-of-00002.bin",
167
+ "transformer.h.9.ln_1.bias": "pytorch_model-00001-of-00002.bin",
168
+ "transformer.h.9.ln_1.weight": "pytorch_model-00001-of-00002.bin",
169
+ "transformer.h.9.ln_2.bias": "pytorch_model-00001-of-00002.bin",
170
+ "transformer.h.9.ln_2.weight": "pytorch_model-00001-of-00002.bin",
171
+ "transformer.h.9.mlp.c_fc.bias": "pytorch_model-00001-of-00002.bin",
172
+ "transformer.h.9.mlp.c_fc.weight": "pytorch_model-00001-of-00002.bin",
173
+ "transformer.h.9.mlp.c_proj.bias": "pytorch_model-00001-of-00002.bin",
174
+ "transformer.h.9.mlp.c_proj.weight": "pytorch_model-00001-of-00002.bin",
175
+ "transformer.ln_f.bias": "pytorch_model-00002-of-00002.bin",
176
+ "transformer.ln_f.weight": "pytorch_model-00002-of-00002.bin",
177
+ "transformer.wpe.weight": "pytorch_model-00001-of-00002.bin",
178
+ "transformer.wte.weight": "pytorch_model-00001-of-00002.bin"
179
+ }
180
+ }
mup-2b5-100m-e3/checkpoint-100/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f851ccd36da51400a8dc63a45a85ee8ac4dcfac83932885c7b52532d228405d1
3
+ size 14575
mup-2b5-100m-e3/checkpoint-100/scaler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cfa44e8523f62833816d29aa6c576eaa7783e3bbdb3e132e248b1d8aaee6132b
3
+ size 557
mup-2b5-100m-e3/checkpoint-100/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6f7e566314834b141fb5740c7bd281b7d032132544cc57f4aa141cad9641c3f9
3
+ size 691
mup-2b5-100m-e3/checkpoint-100/trainer_state.json ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 0.5262021973052692,
5
+ "global_step": 100,
6
+ "is_hyper_param_search": false,
7
+ "is_local_process_zero": true,
8
+ "is_world_process_zero": true,
9
+ "log_history": [],
10
+ "max_steps": 190,
11
+ "num_train_epochs": 1,
12
+ "total_flos": 7.601860185686016e+17,
13
+ "trial_name": null,
14
+ "trial_params": null
15
+ }
mup-2b5-100m-e3/checkpoint-100/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f53afd7a4c3b2020710831470410e571d9481019dd9692f0f5924a4abc0cf1e3
3
+ size 3387
mup-2b5-100m-e3/config.json ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "activation_function": "gelu_new",
3
+ "architectures": [
4
+ "GPT2LMHeadModel"
5
+ ],
6
+ "attn_mult": 8.0,
7
+ "attn_pdrop": 0.1,
8
+ "bos_token_id": 50256,
9
+ "embd_pdrop": 0.1,
10
+ "eos_token_id": 50256,
11
+ "initializer_range": 0.01,
12
+ "intermediate_size": 16384,
13
+ "layer_norm_epsilon": 1e-05,
14
+ "model_type": "gpt2",
15
+ "n_embd": 4096,
16
+ "n_head": 128,
17
+ "n_inner": null,
18
+ "n_layer": 12,
19
+ "n_positions": 1024,
20
+ "num_layers": 12,
21
+ "reorder_and_upcast_attn": false,
22
+ "resid_pdrop": 0.1,
23
+ "scale_attn_by_inverse_layer_idx": false,
24
+ "scale_attn_weights": true,
25
+ "summary_activation": null,
26
+ "summary_first_dropout": 0.1,
27
+ "summary_proj_to_labels": true,
28
+ "summary_type": "cls_index",
29
+ "summary_use_proj": true,
30
+ "torch_dtype": "float32",
31
+ "transformers_version": "4.25.1",
32
+ "use_cache": true,
33
+ "vocab_size": 50257
34
+ }
mup-2b5-100m-e3/pytorch_model-00001-of-00002.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1df6267cca6b60a123b3b9611c5ccbd8f91b5a38203bf50a124e1fa1c0363dfc
3
+ size 9982107965
mup-2b5-100m-e3/pytorch_model-00002-of-00002.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4cde8ef624b72ede114ccb1793cf4ffa3d59c32a0688f33b13f6636542331551
3
+ size 1360399056
mup-2b5-100m-e3/pytorch_model.bin.index.json ADDED
@@ -0,0 +1,180 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "metadata": {
3
+ "total_size": 11342446640
4
+ },
5
+ "weight_map": {
6
+ "lm_head.weight": "pytorch_model-00002-of-00002.bin",
7
+ "transformer.h.0.attn.bias": "pytorch_model-00001-of-00002.bin",
8
+ "transformer.h.0.attn.c_attn.bias": "pytorch_model-00001-of-00002.bin",
9
+ "transformer.h.0.attn.c_attn.weight": "pytorch_model-00001-of-00002.bin",
10
+ "transformer.h.0.attn.c_proj.bias": "pytorch_model-00001-of-00002.bin",
11
+ "transformer.h.0.attn.c_proj.weight": "pytorch_model-00001-of-00002.bin",
12
+ "transformer.h.0.attn.masked_bias": "pytorch_model-00001-of-00002.bin",
13
+ "transformer.h.0.ln_1.bias": "pytorch_model-00001-of-00002.bin",
14
+ "transformer.h.0.ln_1.weight": "pytorch_model-00001-of-00002.bin",
15
+ "transformer.h.0.ln_2.bias": "pytorch_model-00001-of-00002.bin",
16
+ "transformer.h.0.ln_2.weight": "pytorch_model-00001-of-00002.bin",
17
+ "transformer.h.0.mlp.c_fc.bias": "pytorch_model-00001-of-00002.bin",
18
+ "transformer.h.0.mlp.c_fc.weight": "pytorch_model-00001-of-00002.bin",
19
+ "transformer.h.0.mlp.c_proj.bias": "pytorch_model-00001-of-00002.bin",
20
+ "transformer.h.0.mlp.c_proj.weight": "pytorch_model-00001-of-00002.bin",
21
+ "transformer.h.1.attn.bias": "pytorch_model-00001-of-00002.bin",
22
+ "transformer.h.1.attn.c_attn.bias": "pytorch_model-00001-of-00002.bin",
23
+ "transformer.h.1.attn.c_attn.weight": "pytorch_model-00001-of-00002.bin",
24
+ "transformer.h.1.attn.c_proj.bias": "pytorch_model-00001-of-00002.bin",
25
+ "transformer.h.1.attn.c_proj.weight": "pytorch_model-00001-of-00002.bin",
26
+ "transformer.h.1.attn.masked_bias": "pytorch_model-00001-of-00002.bin",
27
+ "transformer.h.1.ln_1.bias": "pytorch_model-00001-of-00002.bin",
28
+ "transformer.h.1.ln_1.weight": "pytorch_model-00001-of-00002.bin",
29
+ "transformer.h.1.ln_2.bias": "pytorch_model-00001-of-00002.bin",
30
+ "transformer.h.1.ln_2.weight": "pytorch_model-00001-of-00002.bin",
31
+ "transformer.h.1.mlp.c_fc.bias": "pytorch_model-00001-of-00002.bin",
32
+ "transformer.h.1.mlp.c_fc.weight": "pytorch_model-00001-of-00002.bin",
33
+ "transformer.h.1.mlp.c_proj.bias": "pytorch_model-00001-of-00002.bin",
34
+ "transformer.h.1.mlp.c_proj.weight": "pytorch_model-00001-of-00002.bin",
35
+ "transformer.h.10.attn.bias": "pytorch_model-00001-of-00002.bin",
36
+ "transformer.h.10.attn.c_attn.bias": "pytorch_model-00001-of-00002.bin",
37
+ "transformer.h.10.attn.c_attn.weight": "pytorch_model-00001-of-00002.bin",
38
+ "transformer.h.10.attn.c_proj.bias": "pytorch_model-00001-of-00002.bin",
39
+ "transformer.h.10.attn.c_proj.weight": "pytorch_model-00001-of-00002.bin",
40
+ "transformer.h.10.attn.masked_bias": "pytorch_model-00001-of-00002.bin",
41
+ "transformer.h.10.ln_1.bias": "pytorch_model-00001-of-00002.bin",
42
+ "transformer.h.10.ln_1.weight": "pytorch_model-00001-of-00002.bin",
43
+ "transformer.h.10.ln_2.bias": "pytorch_model-00001-of-00002.bin",
44
+ "transformer.h.10.ln_2.weight": "pytorch_model-00001-of-00002.bin",
45
+ "transformer.h.10.mlp.c_fc.bias": "pytorch_model-00001-of-00002.bin",
46
+ "transformer.h.10.mlp.c_fc.weight": "pytorch_model-00001-of-00002.bin",
47
+ "transformer.h.10.mlp.c_proj.bias": "pytorch_model-00001-of-00002.bin",
48
+ "transformer.h.10.mlp.c_proj.weight": "pytorch_model-00001-of-00002.bin",
49
+ "transformer.h.11.attn.bias": "pytorch_model-00001-of-00002.bin",
50
+ "transformer.h.11.attn.c_attn.bias": "pytorch_model-00001-of-00002.bin",
51
+ "transformer.h.11.attn.c_attn.weight": "pytorch_model-00001-of-00002.bin",
52
+ "transformer.h.11.attn.c_proj.bias": "pytorch_model-00001-of-00002.bin",
53
+ "transformer.h.11.attn.c_proj.weight": "pytorch_model-00001-of-00002.bin",
54
+ "transformer.h.11.attn.masked_bias": "pytorch_model-00001-of-00002.bin",
55
+ "transformer.h.11.ln_1.bias": "pytorch_model-00001-of-00002.bin",
56
+ "transformer.h.11.ln_1.weight": "pytorch_model-00001-of-00002.bin",
57
+ "transformer.h.11.ln_2.bias": "pytorch_model-00001-of-00002.bin",
58
+ "transformer.h.11.ln_2.weight": "pytorch_model-00001-of-00002.bin",
59
+ "transformer.h.11.mlp.c_fc.bias": "pytorch_model-00002-of-00002.bin",
60
+ "transformer.h.11.mlp.c_fc.weight": "pytorch_model-00002-of-00002.bin",
61
+ "transformer.h.11.mlp.c_proj.bias": "pytorch_model-00002-of-00002.bin",
62
+ "transformer.h.11.mlp.c_proj.weight": "pytorch_model-00002-of-00002.bin",
63
+ "transformer.h.2.attn.bias": "pytorch_model-00001-of-00002.bin",
64
+ "transformer.h.2.attn.c_attn.bias": "pytorch_model-00001-of-00002.bin",
65
+ "transformer.h.2.attn.c_attn.weight": "pytorch_model-00001-of-00002.bin",
66
+ "transformer.h.2.attn.c_proj.bias": "pytorch_model-00001-of-00002.bin",
67
+ "transformer.h.2.attn.c_proj.weight": "pytorch_model-00001-of-00002.bin",
68
+ "transformer.h.2.attn.masked_bias": "pytorch_model-00001-of-00002.bin",
69
+ "transformer.h.2.ln_1.bias": "pytorch_model-00001-of-00002.bin",
70
+ "transformer.h.2.ln_1.weight": "pytorch_model-00001-of-00002.bin",
71
+ "transformer.h.2.ln_2.bias": "pytorch_model-00001-of-00002.bin",
72
+ "transformer.h.2.ln_2.weight": "pytorch_model-00001-of-00002.bin",
73
+ "transformer.h.2.mlp.c_fc.bias": "pytorch_model-00001-of-00002.bin",
74
+ "transformer.h.2.mlp.c_fc.weight": "pytorch_model-00001-of-00002.bin",
75
+ "transformer.h.2.mlp.c_proj.bias": "pytorch_model-00001-of-00002.bin",
76
+ "transformer.h.2.mlp.c_proj.weight": "pytorch_model-00001-of-00002.bin",
77
+ "transformer.h.3.attn.bias": "pytorch_model-00001-of-00002.bin",
78
+ "transformer.h.3.attn.c_attn.bias": "pytorch_model-00001-of-00002.bin",
79
+ "transformer.h.3.attn.c_attn.weight": "pytorch_model-00001-of-00002.bin",
80
+ "transformer.h.3.attn.c_proj.bias": "pytorch_model-00001-of-00002.bin",
81
+ "transformer.h.3.attn.c_proj.weight": "pytorch_model-00001-of-00002.bin",
82
+ "transformer.h.3.attn.masked_bias": "pytorch_model-00001-of-00002.bin",
83
+ "transformer.h.3.ln_1.bias": "pytorch_model-00001-of-00002.bin",
84
+ "transformer.h.3.ln_1.weight": "pytorch_model-00001-of-00002.bin",
85
+ "transformer.h.3.ln_2.bias": "pytorch_model-00001-of-00002.bin",
86
+ "transformer.h.3.ln_2.weight": "pytorch_model-00001-of-00002.bin",
87
+ "transformer.h.3.mlp.c_fc.bias": "pytorch_model-00001-of-00002.bin",
88
+ "transformer.h.3.mlp.c_fc.weight": "pytorch_model-00001-of-00002.bin",
89
+ "transformer.h.3.mlp.c_proj.bias": "pytorch_model-00001-of-00002.bin",
90
+ "transformer.h.3.mlp.c_proj.weight": "pytorch_model-00001-of-00002.bin",
91
+ "transformer.h.4.attn.bias": "pytorch_model-00001-of-00002.bin",
92
+ "transformer.h.4.attn.c_attn.bias": "pytorch_model-00001-of-00002.bin",
93
+ "transformer.h.4.attn.c_attn.weight": "pytorch_model-00001-of-00002.bin",
94
+ "transformer.h.4.attn.c_proj.bias": "pytorch_model-00001-of-00002.bin",
95
+ "transformer.h.4.attn.c_proj.weight": "pytorch_model-00001-of-00002.bin",
96
+ "transformer.h.4.attn.masked_bias": "pytorch_model-00001-of-00002.bin",
97
+ "transformer.h.4.ln_1.bias": "pytorch_model-00001-of-00002.bin",
98
+ "transformer.h.4.ln_1.weight": "pytorch_model-00001-of-00002.bin",
99
+ "transformer.h.4.ln_2.bias": "pytorch_model-00001-of-00002.bin",
100
+ "transformer.h.4.ln_2.weight": "pytorch_model-00001-of-00002.bin",
101
+ "transformer.h.4.mlp.c_fc.bias": "pytorch_model-00001-of-00002.bin",
102
+ "transformer.h.4.mlp.c_fc.weight": "pytorch_model-00001-of-00002.bin",
103
+ "transformer.h.4.mlp.c_proj.bias": "pytorch_model-00001-of-00002.bin",
104
+ "transformer.h.4.mlp.c_proj.weight": "pytorch_model-00001-of-00002.bin",
105
+ "transformer.h.5.attn.bias": "pytorch_model-00001-of-00002.bin",
106
+ "transformer.h.5.attn.c_attn.bias": "pytorch_model-00001-of-00002.bin",
107
+ "transformer.h.5.attn.c_attn.weight": "pytorch_model-00001-of-00002.bin",
108
+ "transformer.h.5.attn.c_proj.bias": "pytorch_model-00001-of-00002.bin",
109
+ "transformer.h.5.attn.c_proj.weight": "pytorch_model-00001-of-00002.bin",
110
+ "transformer.h.5.attn.masked_bias": "pytorch_model-00001-of-00002.bin",
111
+ "transformer.h.5.ln_1.bias": "pytorch_model-00001-of-00002.bin",
112
+ "transformer.h.5.ln_1.weight": "pytorch_model-00001-of-00002.bin",
113
+ "transformer.h.5.ln_2.bias": "pytorch_model-00001-of-00002.bin",
114
+ "transformer.h.5.ln_2.weight": "pytorch_model-00001-of-00002.bin",
115
+ "transformer.h.5.mlp.c_fc.bias": "pytorch_model-00001-of-00002.bin",
116
+ "transformer.h.5.mlp.c_fc.weight": "pytorch_model-00001-of-00002.bin",
117
+ "transformer.h.5.mlp.c_proj.bias": "pytorch_model-00001-of-00002.bin",
118
+ "transformer.h.5.mlp.c_proj.weight": "pytorch_model-00001-of-00002.bin",
119
+ "transformer.h.6.attn.bias": "pytorch_model-00001-of-00002.bin",
120
+ "transformer.h.6.attn.c_attn.bias": "pytorch_model-00001-of-00002.bin",
121
+ "transformer.h.6.attn.c_attn.weight": "pytorch_model-00001-of-00002.bin",
122
+ "transformer.h.6.attn.c_proj.bias": "pytorch_model-00001-of-00002.bin",
123
+ "transformer.h.6.attn.c_proj.weight": "pytorch_model-00001-of-00002.bin",
124
+ "transformer.h.6.attn.masked_bias": "pytorch_model-00001-of-00002.bin",
125
+ "transformer.h.6.ln_1.bias": "pytorch_model-00001-of-00002.bin",
126
+ "transformer.h.6.ln_1.weight": "pytorch_model-00001-of-00002.bin",
127
+ "transformer.h.6.ln_2.bias": "pytorch_model-00001-of-00002.bin",
128
+ "transformer.h.6.ln_2.weight": "pytorch_model-00001-of-00002.bin",
129
+ "transformer.h.6.mlp.c_fc.bias": "pytorch_model-00001-of-00002.bin",
130
+ "transformer.h.6.mlp.c_fc.weight": "pytorch_model-00001-of-00002.bin",
131
+ "transformer.h.6.mlp.c_proj.bias": "pytorch_model-00001-of-00002.bin",
132
+ "transformer.h.6.mlp.c_proj.weight": "pytorch_model-00001-of-00002.bin",
133
+ "transformer.h.7.attn.bias": "pytorch_model-00001-of-00002.bin",
134
+ "transformer.h.7.attn.c_attn.bias": "pytorch_model-00001-of-00002.bin",
135
+ "transformer.h.7.attn.c_attn.weight": "pytorch_model-00001-of-00002.bin",
136
+ "transformer.h.7.attn.c_proj.bias": "pytorch_model-00001-of-00002.bin",
137
+ "transformer.h.7.attn.c_proj.weight": "pytorch_model-00001-of-00002.bin",
138
+ "transformer.h.7.attn.masked_bias": "pytorch_model-00001-of-00002.bin",
139
+ "transformer.h.7.ln_1.bias": "pytorch_model-00001-of-00002.bin",
140
+ "transformer.h.7.ln_1.weight": "pytorch_model-00001-of-00002.bin",
141
+ "transformer.h.7.ln_2.bias": "pytorch_model-00001-of-00002.bin",
142
+ "transformer.h.7.ln_2.weight": "pytorch_model-00001-of-00002.bin",
143
+ "transformer.h.7.mlp.c_fc.bias": "pytorch_model-00001-of-00002.bin",
144
+ "transformer.h.7.mlp.c_fc.weight": "pytorch_model-00001-of-00002.bin",
145
+ "transformer.h.7.mlp.c_proj.bias": "pytorch_model-00001-of-00002.bin",
146
+ "transformer.h.7.mlp.c_proj.weight": "pytorch_model-00001-of-00002.bin",
147
+ "transformer.h.8.attn.bias": "pytorch_model-00001-of-00002.bin",
148
+ "transformer.h.8.attn.c_attn.bias": "pytorch_model-00001-of-00002.bin",
149
+ "transformer.h.8.attn.c_attn.weight": "pytorch_model-00001-of-00002.bin",
150
+ "transformer.h.8.attn.c_proj.bias": "pytorch_model-00001-of-00002.bin",
151
+ "transformer.h.8.attn.c_proj.weight": "pytorch_model-00001-of-00002.bin",
152
+ "transformer.h.8.attn.masked_bias": "pytorch_model-00001-of-00002.bin",
153
+ "transformer.h.8.ln_1.bias": "pytorch_model-00001-of-00002.bin",
154
+ "transformer.h.8.ln_1.weight": "pytorch_model-00001-of-00002.bin",
155
+ "transformer.h.8.ln_2.bias": "pytorch_model-00001-of-00002.bin",
156
+ "transformer.h.8.ln_2.weight": "pytorch_model-00001-of-00002.bin",
157
+ "transformer.h.8.mlp.c_fc.bias": "pytorch_model-00001-of-00002.bin",
158
+ "transformer.h.8.mlp.c_fc.weight": "pytorch_model-00001-of-00002.bin",
159
+ "transformer.h.8.mlp.c_proj.bias": "pytorch_model-00001-of-00002.bin",
160
+ "transformer.h.8.mlp.c_proj.weight": "pytorch_model-00001-of-00002.bin",
161
+ "transformer.h.9.attn.bias": "pytorch_model-00001-of-00002.bin",
162
+ "transformer.h.9.attn.c_attn.bias": "pytorch_model-00001-of-00002.bin",
163
+ "transformer.h.9.attn.c_attn.weight": "pytorch_model-00001-of-00002.bin",
164
+ "transformer.h.9.attn.c_proj.bias": "pytorch_model-00001-of-00002.bin",
165
+ "transformer.h.9.attn.c_proj.weight": "pytorch_model-00001-of-00002.bin",
166
+ "transformer.h.9.attn.masked_bias": "pytorch_model-00001-of-00002.bin",
167
+ "transformer.h.9.ln_1.bias": "pytorch_model-00001-of-00002.bin",
168
+ "transformer.h.9.ln_1.weight": "pytorch_model-00001-of-00002.bin",
169
+ "transformer.h.9.ln_2.bias": "pytorch_model-00001-of-00002.bin",
170
+ "transformer.h.9.ln_2.weight": "pytorch_model-00001-of-00002.bin",
171
+ "transformer.h.9.mlp.c_fc.bias": "pytorch_model-00001-of-00002.bin",
172
+ "transformer.h.9.mlp.c_fc.weight": "pytorch_model-00001-of-00002.bin",
173
+ "transformer.h.9.mlp.c_proj.bias": "pytorch_model-00001-of-00002.bin",
174
+ "transformer.h.9.mlp.c_proj.weight": "pytorch_model-00001-of-00002.bin",
175
+ "transformer.ln_f.bias": "pytorch_model-00002-of-00002.bin",
176
+ "transformer.ln_f.weight": "pytorch_model-00002-of-00002.bin",
177
+ "transformer.wpe.weight": "pytorch_model-00001-of-00002.bin",
178
+ "transformer.wte.weight": "pytorch_model-00001-of-00002.bin"
179
+ }
180
+ }
mup-2b5-100m-e3/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f53afd7a4c3b2020710831470410e571d9481019dd9692f0f5924a4abc0cf1e3
3
+ size 3387
mup-3b-100m-e3-full-gpt2lmmup.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"eval_loss": 6.870556831359863, "eval_runtime": 16069.3588, "eval_samples_per_second": 10.543, "eval_steps_per_second": 0.659, "epoch": 1.0}
mup-3b-100m-e3/checkpoint-100/config.json ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "activation_function": "gelu_new",
3
+ "architectures": [
4
+ "GPT2LMHeadModel"
5
+ ],
6
+ "attn_mult": 8.0,
7
+ "attn_pdrop": 0.1,
8
+ "bos_token_id": 50256,
9
+ "embd_pdrop": 0.1,
10
+ "eos_token_id": 50256,
11
+ "initializer_range": 0.01,
12
+ "intermediate_size": 17408,
13
+ "layer_norm_epsilon": 1e-05,
14
+ "model_type": "gpt2",
15
+ "n_embd": 4352,
16
+ "n_head": 128,
17
+ "n_inner": null,
18
+ "n_layer": 12,
19
+ "n_positions": 1024,
20
+ "num_layers": 12,
21
+ "reorder_and_upcast_attn": false,
22
+ "resid_pdrop": 0.1,
23
+ "scale_attn_by_inverse_layer_idx": false,
24
+ "scale_attn_weights": true,
25
+ "summary_activation": null,
26
+ "summary_first_dropout": 0.1,
27
+ "summary_proj_to_labels": true,
28
+ "summary_type": "cls_index",
29
+ "summary_use_proj": true,
30
+ "torch_dtype": "float32",
31
+ "transformers_version": "4.25.1",
32
+ "use_cache": true,
33
+ "vocab_size": 50257
34
+ }
mup-3b-100m-e3/checkpoint-100/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:590f451475b76a5bb9001c773d83e4b88c75c838fed4534df9f033eadba5470b
3
+ size 23609798041
mup-3b-100m-e3/checkpoint-100/pytorch_model-00001-of-00002.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:61159c7f524a355b65044bed24541028017265e83427b9c19a9a1a75c006948f
3
+ size 9997736349