leixa commited on
Commit
fcc834f
·
verified ·
1 Parent(s): e183938

Training in progress, step 6, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e4c1ddeee7fb8fcd043f8a112334a14d99fbe6739194dbc28ed9868c6721dca9
3
  size 645975704
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:83033c3dc69ddf3ae05b834d6ccd7662f05741cadf9fe06f522ab5f6791e052c
3
  size 645975704
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:103a96ef37cb357fa5c1052bbbc4b2a7dd59f64e361fb5446389e16a531c1ecf
3
  size 328468404
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:236a6deae7f0eae59c12a6dd66130f9fe1f39a080f76722305c89b3a193b3fc8
3
  size 328468404
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f963f3b18e1ce5bf60b7659ac5cb6aaa997b0bf1c1c52830e575d74128f1f231
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c893fc6997d849f1c551536052821911ccb72e85a0b7fc5adab0bfa68cf904af
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:2d63217b923cb177f669d6bc2174b89abdc6a56d968d279b505491b37976d9bb
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:92b4cf85d7ba7a497f88ff799bf4dec5af7dd95be6e00f78bf46ba5deb56bbf8
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 3.2857142857142856,
5
  "eval_steps": 1,
6
- "global_step": 5,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -54,6 +54,21 @@
54
  "eval_samples_per_second": 13.442,
55
  "eval_steps_per_second": 4.481,
56
  "step": 5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
57
  }
58
  ],
59
  "logging_steps": 3,
@@ -68,12 +83,12 @@
68
  "should_evaluate": false,
69
  "should_log": false,
70
  "should_save": true,
71
- "should_training_stop": false
72
  },
73
  "attributes": {}
74
  }
75
  },
76
- "total_flos": 7109443426713600.0,
77
  "train_batch_size": 8,
78
  "trial_name": null,
79
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 3.857142857142857,
5
  "eval_steps": 1,
6
+ "global_step": 6,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
54
  "eval_samples_per_second": 13.442,
55
  "eval_steps_per_second": 4.481,
56
  "step": 5
57
+ },
58
+ {
59
+ "epoch": 3.857142857142857,
60
+ "grad_norm": 0.0,
61
+ "learning_rate": 6e-05,
62
+ "loss": 0.0,
63
+ "step": 6
64
+ },
65
+ {
66
+ "epoch": 3.857142857142857,
67
+ "eval_loss": NaN,
68
+ "eval_runtime": 0.2247,
69
+ "eval_samples_per_second": 13.351,
70
+ "eval_steps_per_second": 4.45,
71
+ "step": 6
72
  }
73
  ],
74
  "logging_steps": 3,
 
83
  "should_evaluate": false,
84
  "should_log": false,
85
  "should_save": true,
86
+ "should_training_stop": true
87
  },
88
  "attributes": {}
89
  }
90
  },
91
+ "total_flos": 8531332112056320.0,
92
  "train_batch_size": 8,
93
  "trial_name": null,
94
  "trial_params": null