Nexspear commited on
Commit
4d1f267
·
verified ·
1 Parent(s): 384ee3e

Training in progress, step 35, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7f6fea5ad404951ef6a1fae3508ebdd797dc7d0856e4e095dbade4d136af05bb
3
  size 338298
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9b4e9fe3a11c0765df2ef2a681de6cf73de293693def2c6626a8db4db4156545
3
  size 338298
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:2e75175f411e54f2d45a172225b45542e61e14c9898d23196763485fc6e8cdc2
3
  size 418030
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5270fd8e4813d1c8e4a7815db833fb6624cf3f6f7acb5c7123e1c05b3ca089ff
3
  size 418030
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b8897608d343d62eeb1707c720b184d47499c9ca79a71524526ea1ab75dc3c5d
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dbc86b826bc6bd97d977ae368482d2b94ac269f5ff72bde8fe20b71551da06c3
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b4849a6ac0a1d895740f1ab4eba9d346b8d898008d0cfe93dd108cd928d7c63e
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:88f387d8c434535a84694e469cebc18f2e722ba31b0dc0372632798b59011377
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.3669724770642202,
5
  "eval_steps": 5,
6
- "global_step": 10,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -52,6 +52,102 @@
52
  "eval_samples_per_second": 763.509,
53
  "eval_steps_per_second": 99.588,
54
  "step": 10
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
55
  }
56
  ],
57
  "logging_steps": 3,
@@ -71,7 +167,7 @@
71
  "attributes": {}
72
  }
73
  },
74
- "total_flos": 225865089024.0,
75
  "train_batch_size": 8,
76
  "trial_name": null,
77
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 1.2844036697247707,
5
  "eval_steps": 5,
6
+ "global_step": 35,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
52
  "eval_samples_per_second": 763.509,
53
  "eval_steps_per_second": 99.588,
54
  "step": 10
55
+ },
56
+ {
57
+ "epoch": 0.44036697247706424,
58
+ "grad_norm": 0.4800911843776703,
59
+ "learning_rate": 9.938441702975689e-05,
60
+ "loss": 6.9237,
61
+ "step": 12
62
+ },
63
+ {
64
+ "epoch": 0.5504587155963303,
65
+ "grad_norm": 0.5088721513748169,
66
+ "learning_rate": 9.619397662556435e-05,
67
+ "loss": 6.9119,
68
+ "step": 15
69
+ },
70
+ {
71
+ "epoch": 0.5504587155963303,
72
+ "eval_loss": 6.901125907897949,
73
+ "eval_runtime": 0.0697,
74
+ "eval_samples_per_second": 660.168,
75
+ "eval_steps_per_second": 86.109,
76
+ "step": 15
77
+ },
78
+ {
79
+ "epoch": 0.6605504587155964,
80
+ "grad_norm": 0.5459818840026855,
81
+ "learning_rate": 9.045084971874738e-05,
82
+ "loss": 6.8969,
83
+ "step": 18
84
+ },
85
+ {
86
+ "epoch": 0.7339449541284404,
87
+ "eval_loss": 6.87958288192749,
88
+ "eval_runtime": 0.058,
89
+ "eval_samples_per_second": 793.383,
90
+ "eval_steps_per_second": 103.485,
91
+ "step": 20
92
+ },
93
+ {
94
+ "epoch": 0.7706422018348624,
95
+ "grad_norm": 0.5402729511260986,
96
+ "learning_rate": 8.247240241650918e-05,
97
+ "loss": 6.8852,
98
+ "step": 21
99
+ },
100
+ {
101
+ "epoch": 0.8807339449541285,
102
+ "grad_norm": 0.5008237361907959,
103
+ "learning_rate": 7.269952498697734e-05,
104
+ "loss": 6.8723,
105
+ "step": 24
106
+ },
107
+ {
108
+ "epoch": 0.9174311926605505,
109
+ "eval_loss": 6.86152982711792,
110
+ "eval_runtime": 0.0589,
111
+ "eval_samples_per_second": 781.084,
112
+ "eval_steps_per_second": 101.881,
113
+ "step": 25
114
+ },
115
+ {
116
+ "epoch": 0.9908256880733946,
117
+ "grad_norm": 0.4618155360221863,
118
+ "learning_rate": 6.167226819279528e-05,
119
+ "loss": 6.8632,
120
+ "step": 27
121
+ },
122
+ {
123
+ "epoch": 1.1009174311926606,
124
+ "grad_norm": 0.4284931719303131,
125
+ "learning_rate": 5e-05,
126
+ "loss": 8.6369,
127
+ "step": 30
128
+ },
129
+ {
130
+ "epoch": 1.1009174311926606,
131
+ "eval_loss": 6.850541591644287,
132
+ "eval_runtime": 0.0613,
133
+ "eval_samples_per_second": 750.191,
134
+ "eval_steps_per_second": 97.851,
135
+ "step": 30
136
+ },
137
+ {
138
+ "epoch": 1.2110091743119267,
139
+ "grad_norm": 0.37953659892082214,
140
+ "learning_rate": 3.832773180720475e-05,
141
+ "loss": 6.7564,
142
+ "step": 33
143
+ },
144
+ {
145
+ "epoch": 1.2844036697247707,
146
+ "eval_loss": 6.844419956207275,
147
+ "eval_runtime": 0.0605,
148
+ "eval_samples_per_second": 759.745,
149
+ "eval_steps_per_second": 99.097,
150
+ "step": 35
151
  }
152
  ],
153
  "logging_steps": 3,
 
167
  "attributes": {}
168
  }
169
  },
170
+ "total_flos": 774308775936.0,
171
  "train_batch_size": 8,
172
  "trial_name": null,
173
  "trial_params": null