ngocquangt2k46 commited on
Commit
cfaf6aa
·
verified ·
1 Parent(s): 42498dc

Training in progress, step 15, checkpoint

Browse files
last-checkpoint/adapter_config.json CHANGED
@@ -21,12 +21,12 @@
21
  "revision": null,
22
  "target_modules": [
23
  "o_proj",
 
24
  "k_proj",
25
  "v_proj",
26
  "q_proj",
27
- "gate_proj",
28
- "down_proj",
29
- "up_proj"
30
  ],
31
  "task_type": "CAUSAL_LM",
32
  "use_dora": false,
 
21
  "revision": null,
22
  "target_modules": [
23
  "o_proj",
24
+ "down_proj",
25
  "k_proj",
26
  "v_proj",
27
  "q_proj",
28
+ "up_proj",
29
+ "gate_proj"
 
30
  ],
31
  "task_type": "CAUSAL_LM",
32
  "use_dora": false,
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:0fc1c6da5518569e9a500a08a91781175557d822cff833744ccc5c35c0c5dfb3
3
  size 50624
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:08859d76dcf05396f35f4db018a01a349d34a86be334bcd3a3156bb22e63f060
3
  size 50624
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:4323947ba6180f1c9298f312c0ba3b542f703ff84fd68e767185d0ac11ed31c9
3
  size 111142
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f21a77af613804b615134ceca179b4ed182dd57080da839aeba81d2c15b6f870
3
  size 111142
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:359d0ddb346e1a051d153b9361e52227f5a93bfd11d7d59f8d34fe81bcaff9e5
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6279e2d6d30b2ce7f2189e648e383ab146d51fb0fecf44d1e16d05504562e6f8
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b4849a6ac0a1d895740f1ab4eba9d346b8d898008d0cfe93dd108cd928d7c63e
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ff2736979009751c0c6b0ddcc5f6544d6f723aa752b4798eab0b70fb76cf0083
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,16 +1,16 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.0032512395350727465,
5
  "eval_steps": 5,
6
- "global_step": 10,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
  "epoch": 0.0003251239535072746,
13
- "grad_norm": 0.024652473628520966,
14
  "learning_rate": 1e-05,
15
  "loss": 11.7645,
16
  "step": 1
@@ -18,89 +18,132 @@
18
  {
19
  "epoch": 0.0003251239535072746,
20
  "eval_loss": 11.76447582244873,
21
- "eval_runtime": 276.626,
22
- "eval_samples_per_second": 37.455,
23
- "eval_steps_per_second": 18.729,
24
  "step": 1
25
  },
26
  {
27
  "epoch": 0.0006502479070145493,
28
- "grad_norm": 0.027672940865159035,
29
  "learning_rate": 2e-05,
30
  "loss": 11.7658,
31
  "step": 2
32
  },
33
  {
34
  "epoch": 0.000975371860521824,
35
- "grad_norm": 0.025643622502684593,
36
  "learning_rate": 3e-05,
37
  "loss": 11.7649,
38
  "step": 3
39
  },
40
  {
41
  "epoch": 0.0013004958140290985,
42
- "grad_norm": 0.025678519159555435,
43
  "learning_rate": 4e-05,
44
  "loss": 11.7648,
45
  "step": 4
46
  },
47
  {
48
  "epoch": 0.0016256197675363732,
49
- "grad_norm": 0.02572811394929886,
50
  "learning_rate": 5e-05,
51
  "loss": 11.7649,
52
  "step": 5
53
  },
54
  {
55
  "epoch": 0.0016256197675363732,
56
- "eval_loss": 11.764395713806152,
57
- "eval_runtime": 276.5749,
58
- "eval_samples_per_second": 37.462,
59
- "eval_steps_per_second": 18.733,
60
  "step": 5
61
  },
62
  {
63
  "epoch": 0.001950743721043648,
64
- "grad_norm": 0.026358768343925476,
65
  "learning_rate": 6e-05,
66
  "loss": 11.7634,
67
  "step": 6
68
  },
69
  {
70
  "epoch": 0.0022758676745509225,
71
- "grad_norm": 0.026500564068555832,
72
  "learning_rate": 7e-05,
73
  "loss": 11.7654,
74
  "step": 7
75
  },
76
  {
77
  "epoch": 0.002600991628058197,
78
- "grad_norm": 0.02642131596803665,
79
  "learning_rate": 8e-05,
80
  "loss": 11.7644,
81
  "step": 8
82
  },
83
  {
84
  "epoch": 0.002926115581565472,
85
- "grad_norm": 0.02585265040397644,
86
  "learning_rate": 9e-05,
87
  "loss": 11.7658,
88
  "step": 9
89
  },
90
  {
91
  "epoch": 0.0032512395350727465,
92
- "grad_norm": 0.024854907765984535,
93
  "learning_rate": 0.0001,
94
  "loss": 11.7641,
95
  "step": 10
96
  },
97
  {
98
  "epoch": 0.0032512395350727465,
99
- "eval_loss": 11.76411247253418,
100
- "eval_runtime": 276.5048,
101
- "eval_samples_per_second": 37.471,
102
- "eval_steps_per_second": 18.737,
103
  "step": 10
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
104
  }
105
  ],
106
  "logging_steps": 1,
@@ -120,7 +163,7 @@
120
  "attributes": {}
121
  }
122
  },
123
- "total_flos": 32592007004160.0,
124
  "train_batch_size": 2,
125
  "trial_name": null,
126
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.00487685930260912,
5
  "eval_steps": 5,
6
+ "global_step": 15,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
  "epoch": 0.0003251239535072746,
13
+ "grad_norm": 0.021640103310346603,
14
  "learning_rate": 1e-05,
15
  "loss": 11.7645,
16
  "step": 1
 
18
  {
19
  "epoch": 0.0003251239535072746,
20
  "eval_loss": 11.76447582244873,
21
+ "eval_runtime": 249.3031,
22
+ "eval_samples_per_second": 41.56,
23
+ "eval_steps_per_second": 20.782,
24
  "step": 1
25
  },
26
  {
27
  "epoch": 0.0006502479070145493,
28
+ "grad_norm": 0.024706723168492317,
29
  "learning_rate": 2e-05,
30
  "loss": 11.7658,
31
  "step": 2
32
  },
33
  {
34
  "epoch": 0.000975371860521824,
35
+ "grad_norm": 0.022929754108190536,
36
  "learning_rate": 3e-05,
37
  "loss": 11.7649,
38
  "step": 3
39
  },
40
  {
41
  "epoch": 0.0013004958140290985,
42
+ "grad_norm": 0.022171195596456528,
43
  "learning_rate": 4e-05,
44
  "loss": 11.7648,
45
  "step": 4
46
  },
47
  {
48
  "epoch": 0.0016256197675363732,
49
+ "grad_norm": 0.02208411693572998,
50
  "learning_rate": 5e-05,
51
  "loss": 11.7649,
52
  "step": 5
53
  },
54
  {
55
  "epoch": 0.0016256197675363732,
56
+ "eval_loss": 11.764402389526367,
57
+ "eval_runtime": 249.1579,
58
+ "eval_samples_per_second": 41.584,
59
+ "eval_steps_per_second": 20.794,
60
  "step": 5
61
  },
62
  {
63
  "epoch": 0.001950743721043648,
64
+ "grad_norm": 0.02443164400756359,
65
  "learning_rate": 6e-05,
66
  "loss": 11.7634,
67
  "step": 6
68
  },
69
  {
70
  "epoch": 0.0022758676745509225,
71
+ "grad_norm": 0.023470092564821243,
72
  "learning_rate": 7e-05,
73
  "loss": 11.7654,
74
  "step": 7
75
  },
76
  {
77
  "epoch": 0.002600991628058197,
78
+ "grad_norm": 0.023716744035482407,
79
  "learning_rate": 8e-05,
80
  "loss": 11.7644,
81
  "step": 8
82
  },
83
  {
84
  "epoch": 0.002926115581565472,
85
+ "grad_norm": 0.022970277816057205,
86
  "learning_rate": 9e-05,
87
  "loss": 11.7658,
88
  "step": 9
89
  },
90
  {
91
  "epoch": 0.0032512395350727465,
92
+ "grad_norm": 0.022244542837142944,
93
  "learning_rate": 0.0001,
94
  "loss": 11.7641,
95
  "step": 10
96
  },
97
  {
98
  "epoch": 0.0032512395350727465,
99
+ "eval_loss": 11.764139175415039,
100
+ "eval_runtime": 248.9949,
101
+ "eval_samples_per_second": 41.611,
102
+ "eval_steps_per_second": 20.808,
103
  "step": 10
104
+ },
105
+ {
106
+ "epoch": 0.003576363488580021,
107
+ "grad_norm": 0.024796368554234505,
108
+ "learning_rate": 9.755282581475769e-05,
109
+ "loss": 11.7653,
110
+ "step": 11
111
+ },
112
+ {
113
+ "epoch": 0.003901487442087296,
114
+ "grad_norm": 0.024691808968782425,
115
+ "learning_rate": 9.045084971874738e-05,
116
+ "loss": 11.7642,
117
+ "step": 12
118
+ },
119
+ {
120
+ "epoch": 0.00422661139559457,
121
+ "grad_norm": 0.026635609567165375,
122
+ "learning_rate": 7.938926261462366e-05,
123
+ "loss": 11.7643,
124
+ "step": 13
125
+ },
126
+ {
127
+ "epoch": 0.004551735349101845,
128
+ "grad_norm": 0.024080852046608925,
129
+ "learning_rate": 6.545084971874738e-05,
130
+ "loss": 11.7639,
131
+ "step": 14
132
+ },
133
+ {
134
+ "epoch": 0.00487685930260912,
135
+ "grad_norm": 0.021976996213197708,
136
+ "learning_rate": 5e-05,
137
+ "loss": 11.763,
138
+ "step": 15
139
+ },
140
+ {
141
+ "epoch": 0.00487685930260912,
142
+ "eval_loss": 11.7637939453125,
143
+ "eval_runtime": 248.9817,
144
+ "eval_samples_per_second": 41.613,
145
+ "eval_steps_per_second": 20.809,
146
+ "step": 15
147
  }
148
  ],
149
  "logging_steps": 1,
 
163
  "attributes": {}
164
  }
165
  },
166
+ "total_flos": 48888010506240.0,
167
  "train_batch_size": 2,
168
  "trial_name": null,
169
  "trial_params": null
last-checkpoint/training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e17a6c83dec956fb9efedd2c17093957afde82b766a4186ec8d108b6cbea110d
3
  size 6776
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9a413dd30c82bf1458b4c9aa8cd9fc6df506b56de5b15f10d2e2a254ff755ea0
3
  size 6776