lesso commited on
Commit
c4ccaa0
·
verified ·
1 Parent(s): 7484463

Training in progress, step 9, checkpoint

Browse files
last-checkpoint/adapter_config.json CHANGED
@@ -21,12 +21,12 @@
21
  "revision": null,
22
  "target_modules": [
23
  "v_proj",
24
- "q_proj",
25
- "down_proj",
26
- "o_proj",
27
  "up_proj",
 
 
28
  "gate_proj",
29
- "k_proj"
 
30
  ],
31
  "task_type": "CAUSAL_LM",
32
  "use_dora": false,
 
21
  "revision": null,
22
  "target_modules": [
23
  "v_proj",
 
 
 
24
  "up_proj",
25
+ "o_proj",
26
+ "k_proj",
27
  "gate_proj",
28
+ "q_proj",
29
+ "down_proj"
30
  ],
31
  "task_type": "CAUSAL_LM",
32
  "use_dora": false,
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:4c8cfac24ff0d3e238a815b333f2ea43cf11b4d6fea1fd7ae85e5916aa356a26
3
  size 83945296
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:63a202a643f27c5a084a406e3d9fcdadeed3e307d936d87819bd3e33fa72afda
3
  size 83945296
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:2cbdaa7f8ee336181c7adfc9dfa44e8b569a2588db424e11cfebb72b6592e69b
3
  size 168037178
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4a549a30c15a77b0e4d2167218887a9c36e3703902f43ffab17c9832b9e95e29
3
  size 168037178
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:574c700717b33f47ad8a70b47083271e5db120d9e87ae1c83258d7702c5eeb81
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b90a19943f8ef28f7f3c1abafd8de7b24ff68c3eda11dffdc748b1a1d989fcb2
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8bdda546a1390be3b85a90dd4ef31050dfb7b691765ffc12ed691b6786ed6e3d
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0099cb7287625b29b67c4fcf42ff20fae623b429bfb10f5ac695bc54f2be54fd
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.0029942360955161314,
5
  "eval_steps": 3,
6
- "global_step": 10,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -18,9 +18,9 @@
18
  {
19
  "epoch": 0.0002994236095516131,
20
  "eval_loss": 11.5775728225708,
21
- "eval_runtime": 95.8713,
22
- "eval_samples_per_second": 7.343,
23
- "eval_steps_per_second": 7.343,
24
  "step": 1
25
  },
26
  {
@@ -40,75 +40,68 @@
40
  {
41
  "epoch": 0.0008982708286548395,
42
  "eval_loss": 11.5775728225708,
43
- "eval_runtime": 94.6943,
44
- "eval_samples_per_second": 7.434,
45
- "eval_steps_per_second": 7.434,
46
  "step": 3
47
  },
48
  {
49
  "epoch": 0.0011976944382064525,
50
- "grad_norm": 11.335941314697266,
51
  "learning_rate": 2e-05,
52
  "loss": 9.5838,
53
  "step": 4
54
  },
55
  {
56
  "epoch": 0.0014971180477580657,
57
- "grad_norm": 12.509073257446289,
58
  "learning_rate": 4e-05,
59
  "loss": 11.0567,
60
  "step": 5
61
  },
62
  {
63
  "epoch": 0.001796541657309679,
64
- "grad_norm": 13.569826126098633,
65
  "learning_rate": 6e-05,
66
- "loss": 12.2136,
67
  "step": 6
68
  },
69
  {
70
  "epoch": 0.001796541657309679,
71
- "eval_loss": 11.521071434020996,
72
- "eval_runtime": 94.7717,
73
- "eval_samples_per_second": 7.428,
74
- "eval_steps_per_second": 7.428,
75
  "step": 6
76
  },
77
  {
78
  "epoch": 0.002095965266861292,
79
- "grad_norm": 12.041800498962402,
80
  "learning_rate": 8e-05,
81
- "loss": 11.451,
82
  "step": 7
83
  },
84
  {
85
  "epoch": 0.002395388876412905,
86
- "grad_norm": 12.907513618469238,
87
  "learning_rate": 0.0001,
88
- "loss": 11.1969,
89
  "step": 8
90
  },
91
  {
92
  "epoch": 0.002694812485964518,
93
- "grad_norm": 8.735396385192871,
94
  "learning_rate": 0.00012,
95
- "loss": 9.0545,
96
  "step": 9
97
  },
98
  {
99
  "epoch": 0.002694812485964518,
100
- "eval_loss": 11.052964210510254,
101
- "eval_runtime": 95.5747,
102
- "eval_samples_per_second": 7.366,
103
- "eval_steps_per_second": 7.366,
104
  "step": 9
105
- },
106
- {
107
- "epoch": 0.0029942360955161314,
108
- "grad_norm": 7.92449951171875,
109
- "learning_rate": 0.00014,
110
- "loss": 10.0,
111
- "step": 10
112
  }
113
  ],
114
  "logging_steps": 1,
@@ -123,12 +116,12 @@
123
  "should_evaluate": false,
124
  "should_log": false,
125
  "should_save": true,
126
- "should_training_stop": true
127
  },
128
  "attributes": {}
129
  }
130
  },
131
- "total_flos": 1849564248145920.0,
132
  "train_batch_size": 1,
133
  "trial_name": null,
134
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.002694812485964518,
5
  "eval_steps": 3,
6
+ "global_step": 9,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
18
  {
19
  "epoch": 0.0002994236095516131,
20
  "eval_loss": 11.5775728225708,
21
+ "eval_runtime": 95.8227,
22
+ "eval_samples_per_second": 7.347,
23
+ "eval_steps_per_second": 7.347,
24
  "step": 1
25
  },
26
  {
 
40
  {
41
  "epoch": 0.0008982708286548395,
42
  "eval_loss": 11.5775728225708,
43
+ "eval_runtime": 95.0865,
44
+ "eval_samples_per_second": 7.404,
45
+ "eval_steps_per_second": 7.404,
46
  "step": 3
47
  },
48
  {
49
  "epoch": 0.0011976944382064525,
50
+ "grad_norm": 10.02202320098877,
51
  "learning_rate": 2e-05,
52
  "loss": 9.5838,
53
  "step": 4
54
  },
55
  {
56
  "epoch": 0.0014971180477580657,
57
+ "grad_norm": 11.54944896697998,
58
  "learning_rate": 4e-05,
59
  "loss": 11.0567,
60
  "step": 5
61
  },
62
  {
63
  "epoch": 0.001796541657309679,
64
+ "grad_norm": 12.557976722717285,
65
  "learning_rate": 6e-05,
66
+ "loss": 12.2132,
67
  "step": 6
68
  },
69
  {
70
  "epoch": 0.001796541657309679,
71
+ "eval_loss": 11.523130416870117,
72
+ "eval_runtime": 94.7466,
73
+ "eval_samples_per_second": 7.43,
74
+ "eval_steps_per_second": 7.43,
75
  "step": 6
76
  },
77
  {
78
  "epoch": 0.002095965266861292,
79
+ "grad_norm": 10.816157341003418,
80
  "learning_rate": 8e-05,
81
+ "loss": 11.453,
82
  "step": 7
83
  },
84
  {
85
  "epoch": 0.002395388876412905,
86
+ "grad_norm": 11.840999603271484,
87
  "learning_rate": 0.0001,
88
+ "loss": 11.202,
89
  "step": 8
90
  },
91
  {
92
  "epoch": 0.002694812485964518,
93
+ "grad_norm": 8.202712059020996,
94
  "learning_rate": 0.00012,
95
+ "loss": 9.0586,
96
  "step": 9
97
  },
98
  {
99
  "epoch": 0.002694812485964518,
100
+ "eval_loss": 11.066697120666504,
101
+ "eval_runtime": 95.5362,
102
+ "eval_samples_per_second": 7.369,
103
+ "eval_steps_per_second": 7.369,
104
  "step": 9
 
 
 
 
 
 
 
105
  }
106
  ],
107
  "logging_steps": 1,
 
116
  "should_evaluate": false,
117
  "should_log": false,
118
  "should_save": true,
119
+ "should_training_stop": false
120
  },
121
  "attributes": {}
122
  }
123
  },
124
+ "total_flos": 1664607823331328.0,
125
  "train_batch_size": 1,
126
  "trial_name": null,
127
  "trial_params": null
last-checkpoint/training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ed5cf1a7f24b2f4b6ffdd137a7076d460d56660645e71b5af0068e2d73030f31
3
  size 6776
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1ee223a43027b8c8afd21622e4578463f433159b96d4ac348cc6f399af5db597
3
  size 6776