Upload folder using huggingface_hub
Browse files- checkpoint-3/config.json +32 -0
- checkpoint-3/generation_config.json +6 -0
- checkpoint-3/model.safetensors +3 -0
- checkpoint-3/optimizer.pt +3 -0
- checkpoint-3/rng_state.pth +3 -0
- checkpoint-3/scheduler.pt +3 -0
- checkpoint-3/trainer_state.json +54 -0
- checkpoint-3/training_args.bin +3 -0
- config.json +32 -0
- generation_config.json +6 -0
- model.safetensors +3 -0
- training_args.bin +3 -0
checkpoint-3/config.json
ADDED
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "EleutherAI/pythia-70m-deduped-v0",
|
3 |
+
"architectures": [
|
4 |
+
"GPTNeoXForCausalLM"
|
5 |
+
],
|
6 |
+
"attention_bias": true,
|
7 |
+
"attention_dropout": 0.0,
|
8 |
+
"bos_token_id": 0,
|
9 |
+
"classifier_dropout": 0.1,
|
10 |
+
"eos_token_id": 0,
|
11 |
+
"hidden_act": "gelu",
|
12 |
+
"hidden_dropout": 0.0,
|
13 |
+
"hidden_size": 512,
|
14 |
+
"initializer_range": 0.02,
|
15 |
+
"intermediate_size": 2048,
|
16 |
+
"layer_norm_eps": 1e-05,
|
17 |
+
"max_position_embeddings": 2048,
|
18 |
+
"model_type": "gpt_neox",
|
19 |
+
"num_attention_heads": 8,
|
20 |
+
"num_hidden_layers": 6,
|
21 |
+
"partial_rotary_factor": 0.25,
|
22 |
+
"rope_scaling": null,
|
23 |
+
"rope_theta": 10000,
|
24 |
+
"rotary_emb_base": 10000,
|
25 |
+
"rotary_pct": 0.25,
|
26 |
+
"tie_word_embeddings": false,
|
27 |
+
"torch_dtype": "float32",
|
28 |
+
"transformers_version": "4.45.2",
|
29 |
+
"use_cache": true,
|
30 |
+
"use_parallel_residual": true,
|
31 |
+
"vocab_size": 50304
|
32 |
+
}
|
checkpoint-3/generation_config.json
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_from_model_config": true,
|
3 |
+
"bos_token_id": 0,
|
4 |
+
"eos_token_id": 0,
|
5 |
+
"transformers_version": "4.45.2"
|
6 |
+
}
|
checkpoint-3/model.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:8b1554b104dee315439e8833a6b03fc6c98f2e93781f20bf7d4ffd3198945b08
|
3 |
+
size 281715176
|
checkpoint-3/optimizer.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:f26f8d94e16e959d3f1bbf11334e8714975361156a02addee30f227522db3a5f
|
3 |
+
size 563474746
|
checkpoint-3/rng_state.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:1bb8daa2218f661aab713b5c8d6a88cb76f435e9067862f52676baa950ad6801
|
3 |
+
size 13990
|
checkpoint-3/scheduler.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:ac15d8a19cf2b200854676368efdc4b5f41ea1bbdf9751c12b3b9868abf6cb3a
|
3 |
+
size 1064
|
checkpoint-3/trainer_state.json
ADDED
@@ -0,0 +1,54 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"best_metric": null,
|
3 |
+
"best_model_checkpoint": null,
|
4 |
+
"epoch": 7.48839299086416e-05,
|
5 |
+
"eval_steps": 100,
|
6 |
+
"global_step": 3,
|
7 |
+
"is_hyper_param_search": false,
|
8 |
+
"is_local_process_zero": true,
|
9 |
+
"is_world_process_zero": true,
|
10 |
+
"log_history": [
|
11 |
+
{
|
12 |
+
"epoch": 2.4961309969547202e-05,
|
13 |
+
"grad_norm": 41.783329010009766,
|
14 |
+
"learning_rate": 0.0003,
|
15 |
+
"loss": 3.3915,
|
16 |
+
"step": 1
|
17 |
+
},
|
18 |
+
{
|
19 |
+
"epoch": 4.9922619939094404e-05,
|
20 |
+
"grad_norm": 45.086753845214844,
|
21 |
+
"learning_rate": 0.00015,
|
22 |
+
"loss": 2.6814,
|
23 |
+
"step": 2
|
24 |
+
},
|
25 |
+
{
|
26 |
+
"epoch": 7.48839299086416e-05,
|
27 |
+
"grad_norm": 71.21851348876953,
|
28 |
+
"learning_rate": 0.0,
|
29 |
+
"loss": 4.6356,
|
30 |
+
"step": 3
|
31 |
+
}
|
32 |
+
],
|
33 |
+
"logging_steps": 1,
|
34 |
+
"max_steps": 3,
|
35 |
+
"num_input_tokens_seen": 0,
|
36 |
+
"num_train_epochs": 1,
|
37 |
+
"save_steps": 500,
|
38 |
+
"stateful_callbacks": {
|
39 |
+
"TrainerControl": {
|
40 |
+
"args": {
|
41 |
+
"should_epoch_stop": false,
|
42 |
+
"should_evaluate": false,
|
43 |
+
"should_log": false,
|
44 |
+
"should_save": true,
|
45 |
+
"should_training_stop": true
|
46 |
+
},
|
47 |
+
"attributes": {}
|
48 |
+
}
|
49 |
+
},
|
50 |
+
"total_flos": 131600695296.0,
|
51 |
+
"train_batch_size": 1,
|
52 |
+
"trial_name": null,
|
53 |
+
"trial_params": null
|
54 |
+
}
|
checkpoint-3/training_args.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:83303a1b5feeac73c118102d0e04e5f20eb9f3fca3134ccdc07b951c4e498319
|
3 |
+
size 5176
|
config.json
ADDED
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "EleutherAI/pythia-70m-deduped-v0",
|
3 |
+
"architectures": [
|
4 |
+
"GPTNeoXForCausalLM"
|
5 |
+
],
|
6 |
+
"attention_bias": true,
|
7 |
+
"attention_dropout": 0.0,
|
8 |
+
"bos_token_id": 0,
|
9 |
+
"classifier_dropout": 0.1,
|
10 |
+
"eos_token_id": 0,
|
11 |
+
"hidden_act": "gelu",
|
12 |
+
"hidden_dropout": 0.0,
|
13 |
+
"hidden_size": 512,
|
14 |
+
"initializer_range": 0.02,
|
15 |
+
"intermediate_size": 2048,
|
16 |
+
"layer_norm_eps": 1e-05,
|
17 |
+
"max_position_embeddings": 2048,
|
18 |
+
"model_type": "gpt_neox",
|
19 |
+
"num_attention_heads": 8,
|
20 |
+
"num_hidden_layers": 6,
|
21 |
+
"partial_rotary_factor": 0.25,
|
22 |
+
"rope_scaling": null,
|
23 |
+
"rope_theta": 10000,
|
24 |
+
"rotary_emb_base": 10000,
|
25 |
+
"rotary_pct": 0.25,
|
26 |
+
"tie_word_embeddings": false,
|
27 |
+
"torch_dtype": "float32",
|
28 |
+
"transformers_version": "4.45.2",
|
29 |
+
"use_cache": true,
|
30 |
+
"use_parallel_residual": true,
|
31 |
+
"vocab_size": 50304
|
32 |
+
}
|
generation_config.json
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_from_model_config": true,
|
3 |
+
"bos_token_id": 0,
|
4 |
+
"eos_token_id": 0,
|
5 |
+
"transformers_version": "4.45.2"
|
6 |
+
}
|
model.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:8b1554b104dee315439e8833a6b03fc6c98f2e93781f20bf7d4ffd3198945b08
|
3 |
+
size 281715176
|
training_args.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:83303a1b5feeac73c118102d0e04e5f20eb9f3fca3134ccdc07b951c4e498319
|
3 |
+
size 5176
|