yongzx commited on
Commit
f6f9446
1 Parent(s): e8c60b6

rm --cached *

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +0 -41
  2. .gitignore +0 -11
  3. all_results.json +0 -14
  4. checkpoint-10000/config.json +0 -39
  5. checkpoint-10000/optimizer.pt +0 -3
  6. checkpoint-10000/pytorch_model.bin +0 -3
  7. checkpoint-10000/rng_state.pth +0 -3
  8. checkpoint-10000/scheduler.pt +0 -3
  9. checkpoint-10000/special_tokens_map.json +0 -1
  10. checkpoint-10000/tokenizer.json +0 -3
  11. checkpoint-10000/tokenizer_config.json +0 -1
  12. checkpoint-10000/trainer_state.json +0 -56
  13. checkpoint-10000/training_args.bin +0 -3
  14. checkpoint-10000/wikiann-az-results.txt +0 -8
  15. checkpoint-12500/config.json +0 -39
  16. checkpoint-12500/optimizer.pt +0 -3
  17. checkpoint-12500/pytorch_model.bin +0 -3
  18. checkpoint-12500/rng_state.pth +0 -3
  19. checkpoint-12500/scheduler.pt +0 -3
  20. checkpoint-12500/special_tokens_map.json +0 -1
  21. checkpoint-12500/tokenizer.json +0 -3
  22. checkpoint-12500/tokenizer_config.json +0 -1
  23. checkpoint-12500/trainer_state.json +0 -86
  24. checkpoint-12500/training_args.bin +0 -3
  25. checkpoint-12500/wikiann-az-results.txt +0 -8
  26. checkpoint-15000/config.json +0 -39
  27. checkpoint-15000/optimizer.pt +0 -3
  28. checkpoint-15000/pytorch_model.bin +0 -3
  29. checkpoint-15000/rng_state.pth +0 -3
  30. checkpoint-15000/scheduler.pt +0 -3
  31. checkpoint-15000/special_tokens_map.json +0 -1
  32. checkpoint-15000/tokenizer.json +0 -3
  33. checkpoint-15000/tokenizer_config.json +0 -1
  34. checkpoint-15000/trainer_state.json +0 -76
  35. checkpoint-15000/training_args.bin +0 -3
  36. checkpoint-20000/config.json +0 -39
  37. checkpoint-20000/optimizer.pt +0 -3
  38. checkpoint-20000/pytorch_model.bin +0 -3
  39. checkpoint-20000/rng_state.pth +0 -3
  40. checkpoint-20000/scheduler.pt +0 -3
  41. checkpoint-20000/special_tokens_map.json +0 -1
  42. checkpoint-20000/tokenizer.json +0 -3
  43. checkpoint-20000/tokenizer_config.json +0 -1
  44. checkpoint-20000/trainer_state.json +0 -96
  45. checkpoint-20000/training_args.bin +0 -3
  46. checkpoint-2500/config.json +0 -39
  47. checkpoint-2500/optimizer.pt +0 -3
  48. checkpoint-2500/pytorch_model.bin +0 -3
  49. checkpoint-2500/rng_state.pth +0 -3
  50. checkpoint-2500/scheduler.pt +0 -3
.gitattributes DELETED
@@ -1,41 +0,0 @@
1
- *.7z filter=lfs diff=lfs merge=lfs -text
2
- *.arrow filter=lfs diff=lfs merge=lfs -text
3
- *.bin filter=lfs diff=lfs merge=lfs -text
4
- *.bz2 filter=lfs diff=lfs merge=lfs -text
5
- *.ftz filter=lfs diff=lfs merge=lfs -text
6
- *.gz filter=lfs diff=lfs merge=lfs -text
7
- *.h5 filter=lfs diff=lfs merge=lfs -text
8
- *.joblib filter=lfs diff=lfs merge=lfs -text
9
- *.lfs.* filter=lfs diff=lfs merge=lfs -text
10
- *.mlmodel filter=lfs diff=lfs merge=lfs -text
11
- *.model filter=lfs diff=lfs merge=lfs -text
12
- *.msgpack filter=lfs diff=lfs merge=lfs -text
13
- *.npy filter=lfs diff=lfs merge=lfs -text
14
- *.npz filter=lfs diff=lfs merge=lfs -text
15
- *.onnx filter=lfs diff=lfs merge=lfs -text
16
- *.ot filter=lfs diff=lfs merge=lfs -text
17
- *.parquet filter=lfs diff=lfs merge=lfs -text
18
- *.pb filter=lfs diff=lfs merge=lfs -text
19
- *.pickle filter=lfs diff=lfs merge=lfs -text
20
- *.pkl filter=lfs diff=lfs merge=lfs -text
21
- *.pt filter=lfs diff=lfs merge=lfs -text
22
- *.pth filter=lfs diff=lfs merge=lfs -text
23
- *.rar filter=lfs diff=lfs merge=lfs -text
24
- saved_model/**/* filter=lfs diff=lfs merge=lfs -text
25
- *.tar.* filter=lfs diff=lfs merge=lfs -text
26
- *.tflite filter=lfs diff=lfs merge=lfs -text
27
- *.tgz filter=lfs diff=lfs merge=lfs -text
28
- *.wasm filter=lfs diff=lfs merge=lfs -text
29
- *.xz filter=lfs diff=lfs merge=lfs -text
30
- *.zip filter=lfs diff=lfs merge=lfs -text
31
- *.zst filter=lfs diff=lfs merge=lfs -text
32
- *tfevents* filter=lfs diff=lfs merge=lfs -text
33
- checkpoint-10000/tokenizer.json filter=lfs diff=lfs merge=lfs -text
34
- checkpoint-12500/tokenizer.json filter=lfs diff=lfs merge=lfs -text
35
- checkpoint-15000/tokenizer.json filter=lfs diff=lfs merge=lfs -text
36
- checkpoint-20000/tokenizer.json filter=lfs diff=lfs merge=lfs -text
37
- checkpoint-2500/tokenizer.json filter=lfs diff=lfs merge=lfs -text
38
- checkpoint-25000/tokenizer.json filter=lfs diff=lfs merge=lfs -text
39
- checkpoint-5000/tokenizer.json filter=lfs diff=lfs merge=lfs -text
40
- checkpoint-7500/tokenizer.json filter=lfs diff=lfs merge=lfs -text
41
- tokenizer.json filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
.gitignore DELETED
@@ -1,11 +0,0 @@
1
- */pilot_*/
2
- pilot_*/
3
- checkpoint-*/
4
- */pilot_*/
5
- pilot_*/
6
- checkpoint-*/
7
- */pilot_*/
8
- pilot_*/
9
- checkpoint-*/
10
- */pilot_*/
11
- pilot_*/
 
 
 
 
 
 
 
 
 
 
 
 
all_results.json DELETED
@@ -1,14 +0,0 @@
1
- {
2
- "epoch": 181.16,
3
- "eval_loss": 3.5487985610961914,
4
- "eval_runtime": 512.7811,
5
- "eval_samples": 4906,
6
- "eval_samples_per_second": 9.567,
7
- "eval_steps_per_second": 4.784,
8
- "perplexity": 34.7715165292787,
9
- "train_loss": 3.6800425,
10
- "train_runtime": 52880.0792,
11
- "train_samples": 1000,
12
- "train_samples_per_second": 3.782,
13
- "train_steps_per_second": 0.473
14
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
checkpoint-10000/config.json DELETED
@@ -1,39 +0,0 @@
1
- {
2
- "_name_or_path": "bigscience/bloom-350m",
3
- "adapters": {
4
- "adapters": {},
5
- "config_map": {},
6
- "fusion_config_map": {},
7
- "fusions": {}
8
- },
9
- "apply_residual_connection_post_layernorm": false,
10
- "architectures": [
11
- "BloomForCausalLM"
12
- ],
13
- "attention_dropout": 0.0,
14
- "attention_softmax_in_fp32": true,
15
- "bias_dropout_fusion": true,
16
- "bos_token_id": 1,
17
- "eos_token_id": 2,
18
- "hidden_dropout": 0.0,
19
- "hidden_size": 1024,
20
- "initializer_range": 0.02,
21
- "layer_norm_epsilon": 1e-05,
22
- "masked_softmax_fusion": true,
23
- "model_type": "bloom",
24
- "n_head": 16,
25
- "n_inner": null,
26
- "n_layer": 24,
27
- "offset_alibi": 100,
28
- "pad_token_id": 3,
29
- "pretraining_tp": 1,
30
- "seq_length": 2048,
31
- "skip_bias_add": true,
32
- "skip_bias_add_qkv": false,
33
- "slow_but_exact": false,
34
- "torch_dtype": "float32",
35
- "transformers_version": "4.20.0.dev0",
36
- "unk_token_id": 0,
37
- "use_cache": true,
38
- "vocab_size": 250880
39
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
checkpoint-10000/optimizer.pt DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:89473fe4c6043f80ef69c738a10ca98562441638cc321179f695bacbddb29e11
3
- size 2254269
 
 
 
 
checkpoint-10000/pytorch_model.bin DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:ec80f2e8e5cdaee9d52b1e98cb6a801d4d43b3172dc317a3a52187d615a9f2f7
3
- size 2236955191
 
 
 
 
checkpoint-10000/rng_state.pth DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:d74e1315fe06c1a1154b37c656f2f90c1656206e1027b9f3bd60f7fc9d8f41f5
3
- size 14503
 
 
 
 
checkpoint-10000/scheduler.pt DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:ebae5cf74f470a9dc57b090feb9de29d57aa2d381061d1a61fd32b3c3221556b
3
- size 623
 
 
 
 
checkpoint-10000/special_tokens_map.json DELETED
@@ -1 +0,0 @@
1
- {"bos_token": "<s>", "eos_token": "</s>", "unk_token": "<unk>", "pad_token": "<pad>"}
 
 
checkpoint-10000/tokenizer.json DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:8f6efc66e73f1fd69da4f436e48befb519fdff3fe18910850c1d41bd862293a5
3
- size 14500443
 
 
 
 
checkpoint-10000/tokenizer_config.json DELETED
@@ -1 +0,0 @@
1
- {"unk_token": "<unk>", "bos_token": "<s>", "eos_token": "</s>", "pad_token": "<pad>", "add_prefix_space": false, "name_or_path": "bigscience/bloom-350m", "special_tokens_map_file": null, "padding_side": "left", "tokenizer_class": "BloomTokenizer"}
 
 
checkpoint-10000/trainer_state.json DELETED
@@ -1,56 +0,0 @@
1
- {
2
- "best_metric": 3.6707117557525635,
3
- "best_model_checkpoint": "/users/zyong2/data/zyong2/bigscience/data/processed/024/bloom-350m_az_bitfit_1000samples_-1vocab_original-frozen/checkpoint-10000",
4
- "epoch": 72.46126126126126,
5
- "global_step": 10000,
6
- "is_hyper_param_search": false,
7
- "is_local_process_zero": true,
8
- "is_world_process_zero": true,
9
- "log_history": [
10
- {
11
- "epoch": 18.12,
12
- "learning_rate": 9e-05,
13
- "loss": 4.2544,
14
- "step": 2500
15
- },
16
- {
17
- "epoch": 36.23,
18
- "learning_rate": 8e-05,
19
- "loss": 3.9123,
20
- "step": 5000
21
- },
22
- {
23
- "epoch": 36.23,
24
- "eval_loss": 3.843057632446289,
25
- "eval_runtime": 513.4541,
26
- "eval_samples_per_second": 9.555,
27
- "eval_steps_per_second": 4.777,
28
- "step": 5000
29
- },
30
- {
31
- "epoch": 54.35,
32
- "learning_rate": 7e-05,
33
- "loss": 3.7584,
34
- "step": 7500
35
- },
36
- {
37
- "epoch": 72.46,
38
- "learning_rate": 6e-05,
39
- "loss": 3.6674,
40
- "step": 10000
41
- },
42
- {
43
- "epoch": 72.46,
44
- "eval_loss": 3.6707117557525635,
45
- "eval_runtime": 512.9035,
46
- "eval_samples_per_second": 9.565,
47
- "eval_steps_per_second": 4.783,
48
- "step": 10000
49
- }
50
- ],
51
- "max_steps": 25000,
52
- "num_train_epochs": 182,
53
- "total_flos": 1.4939552059333018e+17,
54
- "trial_name": null,
55
- "trial_params": null
56
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
checkpoint-10000/training_args.bin DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:511eb83bbd6787b86308ce4eb4e32d6cc27314d329c212c555d84efc6ad6cd39
3
- size 3375
 
 
 
 
checkpoint-10000/wikiann-az-results.txt DELETED
@@ -1,8 +0,0 @@
1
- ==================================================
2
- Results
3
- ==================================================
4
- Model: /users/zyong2/data/zyong2/bigscience/data/processed/024/bloom-350m_az_bitfit_1000samples_-1vocab_original-frozen/checkpoint-10000
5
- [0.37780898876404495, 0.36464857341684065, 0.3656664340544313, 0.3886156008432889, 0.37621023513139695, 0.39790209790209796, 0.3835616438356164, 0.373989218328841, 0.37212495708891175, 0.3786238211666085]
6
- 37.79
7
- 0.96
8
- ==================================================
 
 
 
 
 
 
 
 
 
checkpoint-12500/config.json DELETED
@@ -1,39 +0,0 @@
1
- {
2
- "_name_or_path": "bigscience/bloom-350m",
3
- "adapters": {
4
- "adapters": {},
5
- "config_map": {},
6
- "fusion_config_map": {},
7
- "fusions": {}
8
- },
9
- "apply_residual_connection_post_layernorm": false,
10
- "architectures": [
11
- "BloomForCausalLM"
12
- ],
13
- "attention_dropout": 0.0,
14
- "attention_softmax_in_fp32": true,
15
- "bias_dropout_fusion": true,
16
- "bos_token_id": 1,
17
- "eos_token_id": 2,
18
- "hidden_dropout": 0.0,
19
- "hidden_size": 1024,
20
- "initializer_range": 0.02,
21
- "layer_norm_epsilon": 1e-05,
22
- "masked_softmax_fusion": true,
23
- "model_type": "bloom",
24
- "n_head": 16,
25
- "n_inner": null,
26
- "n_layer": 24,
27
- "offset_alibi": 100,
28
- "pad_token_id": 3,
29
- "pretraining_tp": 1,
30
- "seq_length": 2048,
31
- "skip_bias_add": true,
32
- "skip_bias_add_qkv": false,
33
- "slow_but_exact": false,
34
- "torch_dtype": "float32",
35
- "transformers_version": "4.20.0.dev0",
36
- "unk_token_id": 0,
37
- "use_cache": true,
38
- "vocab_size": 250880
39
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
checkpoint-12500/optimizer.pt DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:5970c77e08dd285ccd94a1f5c55d69bd605a422b5d2efbcd7428767e3da92241
3
- size 2254269
 
 
 
 
checkpoint-12500/pytorch_model.bin DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:1367d8f3ff2e6e52e1466116315f08c450ef61a17b91ecb83cdcb38a204d599c
3
- size 2236955191
 
 
 
 
checkpoint-12500/rng_state.pth DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:e78992ee794b94f2fbf6fa6e01edca246c41508ad3de33acf1134d85e32c9873
3
- size 14503
 
 
 
 
checkpoint-12500/scheduler.pt DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:d270c6e3000cbdb534f7db7e774ca17393c2523690c8058754d752dd5b11a93a
3
- size 623
 
 
 
 
checkpoint-12500/special_tokens_map.json DELETED
@@ -1 +0,0 @@
1
- {"bos_token": "<s>", "eos_token": "</s>", "unk_token": "<unk>", "pad_token": "<pad>"}
 
 
checkpoint-12500/tokenizer.json DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:8f6efc66e73f1fd69da4f436e48befb519fdff3fe18910850c1d41bd862293a5
3
- size 14500443
 
 
 
 
checkpoint-12500/tokenizer_config.json DELETED
@@ -1 +0,0 @@
1
- {"unk_token": "<unk>", "bos_token": "<s>", "eos_token": "</s>", "pad_token": "<pad>", "add_prefix_space": false, "name_or_path": "bigscience/bloom-350m", "special_tokens_map_file": null, "padding_side": "left", "tokenizer_class": "BloomTokenizer"}
 
 
checkpoint-12500/trainer_state.json DELETED
@@ -1,86 +0,0 @@
1
- {
2
- "best_metric": 3.2581348419189453,
3
- "best_model_checkpoint": "/users/zyong2/data/zyong2/bigscience/data/processed/024/bloom-350m_az_bitfit_1000samples_-1vocab_original-frozen/checkpoint-12500",
4
- "epoch": 90.57657657657657,
5
- "global_step": 12500,
6
- "is_hyper_param_search": false,
7
- "is_local_process_zero": true,
8
- "is_world_process_zero": true,
9
- "log_history": [
10
- {
11
- "epoch": 18.12,
12
- "learning_rate": 0.0008,
13
- "loss": 3.676,
14
- "step": 2500
15
- },
16
- {
17
- "epoch": 18.12,
18
- "eval_loss": 3.457474708557129,
19
- "eval_runtime": 529.7099,
20
- "eval_samples_per_second": 9.262,
21
- "eval_steps_per_second": 4.631,
22
- "step": 2500
23
- },
24
- {
25
- "epoch": 36.23,
26
- "learning_rate": 0.0006,
27
- "loss": 3.3098,
28
- "step": 5000
29
- },
30
- {
31
- "epoch": 36.23,
32
- "eval_loss": 3.3395490646362305,
33
- "eval_runtime": 529.196,
34
- "eval_samples_per_second": 9.271,
35
- "eval_steps_per_second": 4.635,
36
- "step": 5000
37
- },
38
- {
39
- "epoch": 54.35,
40
- "learning_rate": 0.0004,
41
- "loss": 3.2132,
42
- "step": 7500
43
- },
44
- {
45
- "epoch": 54.35,
46
- "eval_loss": 3.292879581451416,
47
- "eval_runtime": 528.2545,
48
- "eval_samples_per_second": 9.287,
49
- "eval_steps_per_second": 4.644,
50
- "step": 7500
51
- },
52
- {
53
- "epoch": 72.46,
54
- "learning_rate": 0.0002,
55
- "loss": 3.1654,
56
- "step": 10000
57
- },
58
- {
59
- "epoch": 72.46,
60
- "eval_loss": 3.267232894897461,
61
- "eval_runtime": 529.038,
62
- "eval_samples_per_second": 9.273,
63
- "eval_steps_per_second": 4.637,
64
- "step": 10000
65
- },
66
- {
67
- "epoch": 90.58,
68
- "learning_rate": 0.0,
69
- "loss": 3.1379,
70
- "step": 12500
71
- },
72
- {
73
- "epoch": 90.58,
74
- "eval_loss": 3.2581348419189453,
75
- "eval_runtime": 529.7541,
76
- "eval_samples_per_second": 9.261,
77
- "eval_steps_per_second": 4.63,
78
- "step": 12500
79
- }
80
- ],
81
- "max_steps": 12500,
82
- "num_train_epochs": 91,
83
- "total_flos": 1.8674440074166272e+17,
84
- "trial_name": null,
85
- "trial_params": null
86
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
checkpoint-12500/training_args.bin DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:d03d7122884f8034582a568ae0aeb2a2c444d81b24122c204b152bc39ad0914e
3
- size 3375
 
 
 
 
checkpoint-12500/wikiann-az-results.txt DELETED
@@ -1,8 +0,0 @@
1
- ==================================================
2
- Results
3
- ==================================================
4
- Model: /users/zyong2/data/zyong2/bigscience/data/processed/024/bloom-350m_az_bitfit_1000samples_-1vocab_original-frozen/checkpoint-12500
5
- [0.37828371278458844, 0.3636993418773814, 0.3639538945162417, 0.37745098039215685, 0.3763066202090592, 0.39721739130434774, 0.37895460797799174, 0.36977152899824256, 0.37243589743589745, 0.3800557880055788]
6
- 37.58
7
- 0.91
8
- ==================================================
 
 
 
 
 
 
 
 
 
checkpoint-15000/config.json DELETED
@@ -1,39 +0,0 @@
1
- {
2
- "_name_or_path": "bigscience/bloom-350m",
3
- "adapters": {
4
- "adapters": {},
5
- "config_map": {},
6
- "fusion_config_map": {},
7
- "fusions": {}
8
- },
9
- "apply_residual_connection_post_layernorm": false,
10
- "architectures": [
11
- "BloomForCausalLM"
12
- ],
13
- "attention_dropout": 0.0,
14
- "attention_softmax_in_fp32": true,
15
- "bias_dropout_fusion": true,
16
- "bos_token_id": 1,
17
- "eos_token_id": 2,
18
- "hidden_dropout": 0.0,
19
- "hidden_size": 1024,
20
- "initializer_range": 0.02,
21
- "layer_norm_epsilon": 1e-05,
22
- "masked_softmax_fusion": true,
23
- "model_type": "bloom",
24
- "n_head": 16,
25
- "n_inner": null,
26
- "n_layer": 24,
27
- "offset_alibi": 100,
28
- "pad_token_id": 3,
29
- "pretraining_tp": 1,
30
- "seq_length": 2048,
31
- "skip_bias_add": true,
32
- "skip_bias_add_qkv": false,
33
- "slow_but_exact": false,
34
- "torch_dtype": "float32",
35
- "transformers_version": "4.20.0.dev0",
36
- "unk_token_id": 0,
37
- "use_cache": true,
38
- "vocab_size": 250880
39
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
checkpoint-15000/optimizer.pt DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:f50dd22578e231ca17b8e970b4471e3363c63b3069f7f3518bad48e7985f7c96
3
- size 2254269
 
 
 
 
checkpoint-15000/pytorch_model.bin DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:ef567c0e28274195ac5c23b0bb43b4c48cefb697c0186129f0a2d689c77939a9
3
- size 2236955191
 
 
 
 
checkpoint-15000/rng_state.pth DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:79867c9c2c68c84cec8929323f22483c37396d62cbd76ddf28f56bcaae4d084e
3
- size 14503
 
 
 
 
checkpoint-15000/scheduler.pt DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:301727affc0c0a4c1f25106f7fd12c059ede0526ba52733c25be949ad3bc04d7
3
- size 623
 
 
 
 
checkpoint-15000/special_tokens_map.json DELETED
@@ -1 +0,0 @@
1
- {"bos_token": "<s>", "eos_token": "</s>", "unk_token": "<unk>", "pad_token": "<pad>"}
 
 
checkpoint-15000/tokenizer.json DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:8f6efc66e73f1fd69da4f436e48befb519fdff3fe18910850c1d41bd862293a5
3
- size 14500443
 
 
 
 
checkpoint-15000/tokenizer_config.json DELETED
@@ -1 +0,0 @@
1
- {"unk_token": "<unk>", "bos_token": "<s>", "eos_token": "</s>", "pad_token": "<pad>", "add_prefix_space": false, "name_or_path": "bigscience/bloom-350m", "special_tokens_map_file": null, "padding_side": "left", "tokenizer_class": "BloomTokenizer"}
 
 
checkpoint-15000/trainer_state.json DELETED
@@ -1,76 +0,0 @@
1
- {
2
- "best_metric": 3.59525203704834,
3
- "best_model_checkpoint": "/users/zyong2/data/zyong2/bigscience/data/processed/024/bloom-350m_az_bitfit_1000samples_-1vocab_original-frozen/checkpoint-15000",
4
- "epoch": 108.69189189189188,
5
- "global_step": 15000,
6
- "is_hyper_param_search": false,
7
- "is_local_process_zero": true,
8
- "is_world_process_zero": true,
9
- "log_history": [
10
- {
11
- "epoch": 18.12,
12
- "learning_rate": 9e-05,
13
- "loss": 4.2544,
14
- "step": 2500
15
- },
16
- {
17
- "epoch": 36.23,
18
- "learning_rate": 8e-05,
19
- "loss": 3.9123,
20
- "step": 5000
21
- },
22
- {
23
- "epoch": 36.23,
24
- "eval_loss": 3.843057632446289,
25
- "eval_runtime": 513.4541,
26
- "eval_samples_per_second": 9.555,
27
- "eval_steps_per_second": 4.777,
28
- "step": 5000
29
- },
30
- {
31
- "epoch": 54.35,
32
- "learning_rate": 7e-05,
33
- "loss": 3.7584,
34
- "step": 7500
35
- },
36
- {
37
- "epoch": 72.46,
38
- "learning_rate": 6e-05,
39
- "loss": 3.6674,
40
- "step": 10000
41
- },
42
- {
43
- "epoch": 72.46,
44
- "eval_loss": 3.6707117557525635,
45
- "eval_runtime": 512.9035,
46
- "eval_samples_per_second": 9.565,
47
- "eval_steps_per_second": 4.783,
48
- "step": 10000
49
- },
50
- {
51
- "epoch": 90.58,
52
- "learning_rate": 5e-05,
53
- "loss": 3.6049,
54
- "step": 12500
55
- },
56
- {
57
- "epoch": 108.69,
58
- "learning_rate": 4e-05,
59
- "loss": 3.5633,
60
- "step": 15000
61
- },
62
- {
63
- "epoch": 108.69,
64
- "eval_loss": 3.59525203704834,
65
- "eval_runtime": 512.888,
66
- "eval_samples_per_second": 9.565,
67
- "eval_steps_per_second": 4.783,
68
- "step": 15000
69
- }
70
- ],
71
- "max_steps": 25000,
72
- "num_train_epochs": 182,
73
- "total_flos": 2.2409328088999526e+17,
74
- "trial_name": null,
75
- "trial_params": null
76
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
checkpoint-15000/training_args.bin DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:511eb83bbd6787b86308ce4eb4e32d6cc27314d329c212c555d84efc6ad6cd39
3
- size 3375
 
 
 
 
checkpoint-20000/config.json DELETED
@@ -1,39 +0,0 @@
1
- {
2
- "_name_or_path": "bigscience/bloom-350m",
3
- "adapters": {
4
- "adapters": {},
5
- "config_map": {},
6
- "fusion_config_map": {},
7
- "fusions": {}
8
- },
9
- "apply_residual_connection_post_layernorm": false,
10
- "architectures": [
11
- "BloomForCausalLM"
12
- ],
13
- "attention_dropout": 0.0,
14
- "attention_softmax_in_fp32": true,
15
- "bias_dropout_fusion": true,
16
- "bos_token_id": 1,
17
- "eos_token_id": 2,
18
- "hidden_dropout": 0.0,
19
- "hidden_size": 1024,
20
- "initializer_range": 0.02,
21
- "layer_norm_epsilon": 1e-05,
22
- "masked_softmax_fusion": true,
23
- "model_type": "bloom",
24
- "n_head": 16,
25
- "n_inner": null,
26
- "n_layer": 24,
27
- "offset_alibi": 100,
28
- "pad_token_id": 3,
29
- "pretraining_tp": 1,
30
- "seq_length": 2048,
31
- "skip_bias_add": true,
32
- "skip_bias_add_qkv": false,
33
- "slow_but_exact": false,
34
- "torch_dtype": "float32",
35
- "transformers_version": "4.20.0.dev0",
36
- "unk_token_id": 0,
37
- "use_cache": true,
38
- "vocab_size": 250880
39
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
checkpoint-20000/optimizer.pt DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:6508d99bdbf164e3ed4c65d681d66d653ae7725ccf2e75ff4eb2967cd0c53f6b
3
- size 2254269
 
 
 
 
checkpoint-20000/pytorch_model.bin DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:feba206df7956295281ddf29ea9d10e1f1a992f6d424767c5806167a67f6ebb3
3
- size 2236955191
 
 
 
 
checkpoint-20000/rng_state.pth DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:5e875fdc7824505ea9288db113abb6ec776202fcd98ab6260922a41311f22d85
3
- size 14503
 
 
 
 
checkpoint-20000/scheduler.pt DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:69341a1831197b0345e31eaac56abf9ad4527cc56eba4b526818b4ffb6ef6dad
3
- size 623
 
 
 
 
checkpoint-20000/special_tokens_map.json DELETED
@@ -1 +0,0 @@
1
- {"bos_token": "<s>", "eos_token": "</s>", "unk_token": "<unk>", "pad_token": "<pad>"}
 
 
checkpoint-20000/tokenizer.json DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:8f6efc66e73f1fd69da4f436e48befb519fdff3fe18910850c1d41bd862293a5
3
- size 14500443
 
 
 
 
checkpoint-20000/tokenizer_config.json DELETED
@@ -1 +0,0 @@
1
- {"unk_token": "<unk>", "bos_token": "<s>", "eos_token": "</s>", "pad_token": "<pad>", "add_prefix_space": false, "name_or_path": "bigscience/bloom-350m", "special_tokens_map_file": null, "padding_side": "left", "tokenizer_class": "BloomTokenizer"}
 
 
checkpoint-20000/trainer_state.json DELETED
@@ -1,96 +0,0 @@
1
- {
2
- "best_metric": 3.559532403945923,
3
- "best_model_checkpoint": "/users/zyong2/data/zyong2/bigscience/data/processed/024/bloom-350m_az_bitfit_1000samples_-1vocab_original-frozen/checkpoint-20000",
4
- "epoch": 144.9225225225225,
5
- "global_step": 20000,
6
- "is_hyper_param_search": false,
7
- "is_local_process_zero": true,
8
- "is_world_process_zero": true,
9
- "log_history": [
10
- {
11
- "epoch": 18.12,
12
- "learning_rate": 9e-05,
13
- "loss": 4.2544,
14
- "step": 2500
15
- },
16
- {
17
- "epoch": 36.23,
18
- "learning_rate": 8e-05,
19
- "loss": 3.9123,
20
- "step": 5000
21
- },
22
- {
23
- "epoch": 36.23,
24
- "eval_loss": 3.843057632446289,
25
- "eval_runtime": 513.4541,
26
- "eval_samples_per_second": 9.555,
27
- "eval_steps_per_second": 4.777,
28
- "step": 5000
29
- },
30
- {
31
- "epoch": 54.35,
32
- "learning_rate": 7e-05,
33
- "loss": 3.7584,
34
- "step": 7500
35
- },
36
- {
37
- "epoch": 72.46,
38
- "learning_rate": 6e-05,
39
- "loss": 3.6674,
40
- "step": 10000
41
- },
42
- {
43
- "epoch": 72.46,
44
- "eval_loss": 3.6707117557525635,
45
- "eval_runtime": 512.9035,
46
- "eval_samples_per_second": 9.565,
47
- "eval_steps_per_second": 4.783,
48
- "step": 10000
49
- },
50
- {
51
- "epoch": 90.58,
52
- "learning_rate": 5e-05,
53
- "loss": 3.6049,
54
- "step": 12500
55
- },
56
- {
57
- "epoch": 108.69,
58
- "learning_rate": 4e-05,
59
- "loss": 3.5633,
60
- "step": 15000
61
- },
62
- {
63
- "epoch": 108.69,
64
- "eval_loss": 3.59525203704834,
65
- "eval_runtime": 512.888,
66
- "eval_samples_per_second": 9.565,
67
- "eval_steps_per_second": 4.783,
68
- "step": 15000
69
- },
70
- {
71
- "epoch": 126.81,
72
- "learning_rate": 3e-05,
73
- "loss": 3.5333,
74
- "step": 17500
75
- },
76
- {
77
- "epoch": 144.92,
78
- "learning_rate": 2e-05,
79
- "loss": 3.5125,
80
- "step": 20000
81
- },
82
- {
83
- "epoch": 144.92,
84
- "eval_loss": 3.559532403945923,
85
- "eval_runtime": 512.8641,
86
- "eval_samples_per_second": 9.566,
87
- "eval_steps_per_second": 4.783,
88
- "step": 20000
89
- }
90
- ],
91
- "max_steps": 25000,
92
- "num_train_epochs": 182,
93
- "total_flos": 2.9879104118666035e+17,
94
- "trial_name": null,
95
- "trial_params": null
96
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
checkpoint-20000/training_args.bin DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:511eb83bbd6787b86308ce4eb4e32d6cc27314d329c212c555d84efc6ad6cd39
3
- size 3375
 
 
 
 
checkpoint-2500/config.json DELETED
@@ -1,39 +0,0 @@
1
- {
2
- "_name_or_path": "bigscience/bloom-350m",
3
- "adapters": {
4
- "adapters": {},
5
- "config_map": {},
6
- "fusion_config_map": {},
7
- "fusions": {}
8
- },
9
- "apply_residual_connection_post_layernorm": false,
10
- "architectures": [
11
- "BloomForCausalLM"
12
- ],
13
- "attention_dropout": 0.0,
14
- "attention_softmax_in_fp32": true,
15
- "bias_dropout_fusion": true,
16
- "bos_token_id": 1,
17
- "eos_token_id": 2,
18
- "hidden_dropout": 0.0,
19
- "hidden_size": 1024,
20
- "initializer_range": 0.02,
21
- "layer_norm_epsilon": 1e-05,
22
- "masked_softmax_fusion": true,
23
- "model_type": "bloom",
24
- "n_head": 16,
25
- "n_inner": null,
26
- "n_layer": 24,
27
- "offset_alibi": 100,
28
- "pad_token_id": 3,
29
- "pretraining_tp": 1,
30
- "seq_length": 2048,
31
- "skip_bias_add": true,
32
- "skip_bias_add_qkv": false,
33
- "slow_but_exact": false,
34
- "torch_dtype": "float32",
35
- "transformers_version": "4.20.0.dev0",
36
- "unk_token_id": 0,
37
- "use_cache": true,
38
- "vocab_size": 250880
39
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
checkpoint-2500/optimizer.pt DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:34b63ec8fba9f45cc23f17cd477edb607fe687bbf6970408c8b1d40a6d7bd58e
3
- size 2254269
 
 
 
 
checkpoint-2500/pytorch_model.bin DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:993f48728e40f168a6fa706bc86e083f7ebc9f3dbca70913e924835ded1b311c
3
- size 2236955191
 
 
 
 
checkpoint-2500/rng_state.pth DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:528887aeaf571c1dd9d1789c0fad11e336830c7f10d9174d25b3f236cf9a2aa4
3
- size 14503
 
 
 
 
checkpoint-2500/scheduler.pt DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:8848be77d5e16f8ad560a7262091b3d3fcd8d0f3fa50682054480c93bc684fe6
3
- size 623