leixa commited on
Commit
b86f6f5
1 Parent(s): 82d782a

Training in progress, step 26, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:845df672de51e2f5fa50b9ff6665844667ddcc24c0a2ae19a315aa98030f2c9e
3
  size 639691872
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:87197d74ca257c80e2929b57e1b56b510efda6553d9bfcd2dac9661ba4eb65e8
3
  size 639691872
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:0c6f052f9c11d4659a113bedaf733471f594b91455fd7a6781d61178caf58f9f
3
  size 325339796
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5a1897b95b0853ae3bca8766ff691c75a11ffbf677858864d37481c2dffd3c03
3
  size 325339796
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:321a2c9e4ec3ff5ef7b65a473523bb974a19b15f29b73590813a9ace23443c6c
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:037eb42ee2bf8daa9ffaf5f72a6ace0549662a52669f37379d45f07ff7e6ccc7
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:cfa382b905f4dad34ebe7abd59235e43278cf8532f9270f8693e820e99473115
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2248de37d2320ddbb576171cc6b8d719f7febfba0a31c5797625bd8cfe79c5ab
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.003600110772639158,
5
  "eval_steps": 13,
6
- "global_step": 13,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -51,6 +51,42 @@
51
  "eval_samples_per_second": 13.199,
52
  "eval_steps_per_second": 1.651,
53
  "step": 13
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
54
  }
55
  ],
56
  "logging_steps": 3,
@@ -70,7 +106,7 @@
70
  "attributes": {}
71
  }
72
  },
73
- "total_flos": 2.0622684501049344e+16,
74
  "train_batch_size": 8,
75
  "trial_name": null,
76
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.007200221545278316,
5
  "eval_steps": 13,
6
+ "global_step": 26,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
51
  "eval_samples_per_second": 13.199,
52
  "eval_steps_per_second": 1.651,
53
  "step": 13
54
+ },
55
+ {
56
+ "epoch": 0.004153973968429798,
57
+ "grad_norm": 18.268274307250977,
58
+ "learning_rate": 9.968561049466214e-05,
59
+ "loss": 6.9041,
60
+ "step": 15
61
+ },
62
+ {
63
+ "epoch": 0.004984768762115757,
64
+ "grad_norm": 9.425353050231934,
65
+ "learning_rate": 9.919647942993148e-05,
66
+ "loss": 3.1179,
67
+ "step": 18
68
+ },
69
+ {
70
+ "epoch": 0.005815563555801717,
71
+ "grad_norm": 13.245735168457031,
72
+ "learning_rate": 9.848447601883435e-05,
73
+ "loss": 2.4225,
74
+ "step": 21
75
+ },
76
+ {
77
+ "epoch": 0.006646358349487676,
78
+ "grad_norm": 7.058098793029785,
79
+ "learning_rate": 9.755282581475769e-05,
80
+ "loss": 1.7562,
81
+ "step": 24
82
+ },
83
+ {
84
+ "epoch": 0.007200221545278316,
85
+ "eval_loss": 0.40671274065971375,
86
+ "eval_runtime": 461.0952,
87
+ "eval_samples_per_second": 13.19,
88
+ "eval_steps_per_second": 1.65,
89
+ "step": 26
90
  }
91
  ],
92
  "logging_steps": 3,
 
106
  "attributes": {}
107
  }
108
  },
109
+ "total_flos": 4.290848871992525e+16,
110
  "train_batch_size": 8,
111
  "trial_name": null,
112
  "trial_params": null