dzanbek commited on
Commit
04a1a33
·
verified ·
1 Parent(s): f34c75f

Training in progress, step 10, checkpoint

Browse files
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:0235e7ff8efed9c9306b23473829de670f7b5976122c45e007b8285545114a43
3
  size 160284754
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b5f5cc161a91512901a16d0451aed799071f29b93aba05163a0f58ec74d8efec
3
  size 160284754
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e300bddb0d7e7d6e026d32cddc3203212379cd7e7d4945e7775d35a028e0ce1f
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:77820273899338f3cb44d8b61f14d2376760c84be6dcbd25e712a3423d224d4a
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ab098d97568c94b26c087e3ff4fd649c3aaa775049e6dbf18e927dde2c5feee8
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4abdd3b29509160214335978cbd2a63c87f2b7769a5608c5e772a6c51d699b65
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.009578544061302681,
5
  "eval_steps": 3,
6
- "global_step": 5,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -58,6 +58,57 @@
58
  "learning_rate": 0.0001,
59
  "loss": 0.0,
60
  "step": 5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
61
  }
62
  ],
63
  "logging_steps": 1,
@@ -77,7 +128,7 @@
77
  "attributes": {}
78
  }
79
  },
80
- "total_flos": 1643164226027520.0,
81
  "train_batch_size": 2,
82
  "trial_name": null,
83
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.019157088122605363,
5
  "eval_steps": 3,
6
+ "global_step": 10,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
58
  "learning_rate": 0.0001,
59
  "loss": 0.0,
60
  "step": 5
61
+ },
62
+ {
63
+ "epoch": 0.011494252873563218,
64
+ "grad_norm": NaN,
65
+ "learning_rate": 9.938441702975689e-05,
66
+ "loss": 0.0,
67
+ "step": 6
68
+ },
69
+ {
70
+ "epoch": 0.011494252873563218,
71
+ "eval_loss": NaN,
72
+ "eval_runtime": 24.832,
73
+ "eval_samples_per_second": 8.86,
74
+ "eval_steps_per_second": 4.43,
75
+ "step": 6
76
+ },
77
+ {
78
+ "epoch": 0.013409961685823755,
79
+ "grad_norm": NaN,
80
+ "learning_rate": 9.755282581475769e-05,
81
+ "loss": 0.0,
82
+ "step": 7
83
+ },
84
+ {
85
+ "epoch": 0.01532567049808429,
86
+ "grad_norm": NaN,
87
+ "learning_rate": 9.45503262094184e-05,
88
+ "loss": 0.0,
89
+ "step": 8
90
+ },
91
+ {
92
+ "epoch": 0.017241379310344827,
93
+ "grad_norm": NaN,
94
+ "learning_rate": 9.045084971874738e-05,
95
+ "loss": 0.0,
96
+ "step": 9
97
+ },
98
+ {
99
+ "epoch": 0.017241379310344827,
100
+ "eval_loss": NaN,
101
+ "eval_runtime": 24.885,
102
+ "eval_samples_per_second": 8.841,
103
+ "eval_steps_per_second": 4.42,
104
+ "step": 9
105
+ },
106
+ {
107
+ "epoch": 0.019157088122605363,
108
+ "grad_norm": NaN,
109
+ "learning_rate": 8.535533905932738e-05,
110
+ "loss": 0.0,
111
+ "step": 10
112
  }
113
  ],
114
  "logging_steps": 1,
 
128
  "attributes": {}
129
  }
130
  },
131
+ "total_flos": 3286328452055040.0,
132
  "train_batch_size": 2,
133
  "trial_name": null,
134
  "trial_params": null