ZeroUniqueness commited on
Commit
f52ffc6
β€’
1 Parent(s): e7d1793

2023-08-03 04:56:50 Autosave for checkpoint additions

Browse files
checkpoint-5000/adapter_model/README.md DELETED
@@ -1,20 +0,0 @@
1
- ---
2
- library_name: peft
3
- ---
4
- ## Training procedure
5
-
6
-
7
- The following `bitsandbytes` quantization config was used during training:
8
- - load_in_8bit: False
9
- - load_in_4bit: True
10
- - llm_int8_threshold: 6.0
11
- - llm_int8_skip_modules: None
12
- - llm_int8_enable_fp32_cpu_offload: False
13
- - llm_int8_has_fp16_weight: False
14
- - bnb_4bit_quant_type: nf4
15
- - bnb_4bit_use_double_quant: True
16
- - bnb_4bit_compute_dtype: bfloat16
17
- ### Framework versions
18
-
19
-
20
- - PEFT 0.5.0.dev0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
checkpoint-5000/adapter_model/adapter_config.json DELETED
@@ -1,26 +0,0 @@
1
- {
2
- "auto_mapping": null,
3
- "base_model_name_or_path": "/workspace/webui/models/TheBloke_Llama-2-13B-fp16",
4
- "bias": "none",
5
- "fan_in_fan_out": null,
6
- "inference_mode": true,
7
- "init_lora_weights": true,
8
- "layers_pattern": null,
9
- "layers_to_transform": null,
10
- "lora_alpha": 16,
11
- "lora_dropout": 0.05,
12
- "modules_to_save": null,
13
- "peft_type": "LORA",
14
- "r": 32,
15
- "revision": null,
16
- "target_modules": [
17
- "v_proj",
18
- "k_proj",
19
- "up_proj",
20
- "down_proj",
21
- "gate_proj",
22
- "o_proj",
23
- "q_proj"
24
- ],
25
- "task_type": "CAUSAL_LM"
26
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
checkpoint-5000/adapter_model/adapter_model.bin DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:a99a6fd84a005d613308e543278f3b8e5211714bb53b78a4198e5a97c2754079
3
- size 500897101
 
 
 
 
checkpoint-5000/optimizer.pt DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:55064709081fd795557e61624a770c9f480f8521aab51cbaf8275cd1efc2dde4
3
- size 1001723453
 
 
 
 
checkpoint-5000/rng_state.pth DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:c1928cd4757531357f4e84609039518155a0b6f12a501b3a07755f8dafb17a42
3
- size 14575
 
 
 
 
checkpoint-5000/scheduler.pt DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:e5cc068063b758a2566219f86d566a205e312a01898a7125f5179bf071646606
3
- size 627
 
 
 
 
checkpoint-5000/trainer_state.json DELETED
@@ -1,356 +0,0 @@
1
- {
2
- "best_metric": 0.7171670794487,
3
- "best_model_checkpoint": "./qlora-out/checkpoint-5000",
4
- "epoch": 0.1864210879534693,
5
- "global_step": 5000,
6
- "is_hyper_param_search": false,
7
- "is_local_process_zero": true,
8
- "is_world_process_zero": true,
9
- "log_history": [
10
- {
11
- "epoch": 0.0,
12
- "learning_rate": 0.00019999938245325715,
13
- "loss": 0.9023,
14
- "step": 100
15
- },
16
- {
17
- "epoch": 0.01,
18
- "learning_rate": 0.00019999724773356797,
19
- "loss": 0.8027,
20
- "step": 200
21
- },
22
- {
23
- "epoch": 0.01,
24
- "learning_rate": 0.0001999935882494411,
25
- "loss": 0.8041,
26
- "step": 300
27
- },
28
- {
29
- "epoch": 0.01,
30
- "learning_rate": 0.00019998840405667672,
31
- "loss": 0.7944,
32
- "step": 400
33
- },
34
- {
35
- "epoch": 0.02,
36
- "learning_rate": 0.00019998169523432365,
37
- "loss": 0.81,
38
- "step": 500
39
- },
40
- {
41
- "epoch": 0.02,
42
- "learning_rate": 0.0001999734618846785,
43
- "loss": 0.7855,
44
- "step": 600
45
- },
46
- {
47
- "epoch": 0.03,
48
- "learning_rate": 0.00019996370413328385,
49
- "loss": 0.7849,
50
- "step": 700
51
- },
52
- {
53
- "epoch": 0.03,
54
- "learning_rate": 0.00019995242212892653,
55
- "loss": 0.7564,
56
- "step": 800
57
- },
58
- {
59
- "epoch": 0.03,
60
- "learning_rate": 0.00019993961604363532,
61
- "loss": 0.7724,
62
- "step": 900
63
- },
64
- {
65
- "epoch": 0.04,
66
- "learning_rate": 0.00019992528607267815,
67
- "loss": 0.7308,
68
- "step": 1000
69
- },
70
- {
71
- "epoch": 0.04,
72
- "eval_loss": 0.7677998542785645,
73
- "eval_runtime": 1774.3517,
74
- "eval_samples_per_second": 0.305,
75
- "eval_steps_per_second": 0.305,
76
- "step": 1000
77
- },
78
- {
79
- "epoch": 0.04,
80
- "learning_rate": 0.0001999094324345594,
81
- "loss": 0.7844,
82
- "step": 1100
83
- },
84
- {
85
- "epoch": 0.04,
86
- "learning_rate": 0.00019989205537101633,
87
- "loss": 0.7668,
88
- "step": 1200
89
- },
90
- {
91
- "epoch": 0.05,
92
- "learning_rate": 0.00019987315514701553,
93
- "loss": 0.7727,
94
- "step": 1300
95
- },
96
- {
97
- "epoch": 0.05,
98
- "learning_rate": 0.00019985273205074878,
99
- "loss": 0.7467,
100
- "step": 1400
101
- },
102
- {
103
- "epoch": 0.06,
104
- "learning_rate": 0.00019983078639362883,
105
- "loss": 0.7516,
106
- "step": 1500
107
- },
108
- {
109
- "epoch": 0.06,
110
- "learning_rate": 0.00019980731851028445,
111
- "loss": 0.7267,
112
- "step": 1600
113
- },
114
- {
115
- "epoch": 0.06,
116
- "learning_rate": 0.0001997823287585554,
117
- "loss": 0.7632,
118
- "step": 1700
119
- },
120
- {
121
- "epoch": 0.07,
122
- "learning_rate": 0.000199755817519487,
123
- "loss": 0.7392,
124
- "step": 1800
125
- },
126
- {
127
- "epoch": 0.07,
128
- "learning_rate": 0.00019972778519732436,
129
- "loss": 0.7528,
130
- "step": 1900
131
- },
132
- {
133
- "epoch": 0.07,
134
- "learning_rate": 0.0001996982322195061,
135
- "loss": 0.725,
136
- "step": 2000
137
- },
138
- {
139
- "epoch": 0.07,
140
- "eval_loss": 0.7452704310417175,
141
- "eval_runtime": 1787.7554,
142
- "eval_samples_per_second": 0.303,
143
- "eval_steps_per_second": 0.303,
144
- "step": 2000
145
- },
146
- {
147
- "epoch": 0.08,
148
- "learning_rate": 0.00019966715903665795,
149
- "loss": 0.7234,
150
- "step": 2100
151
- },
152
- {
153
- "epoch": 0.08,
154
- "learning_rate": 0.00019963456612258576,
155
- "loss": 0.754,
156
- "step": 2200
157
- },
158
- {
159
- "epoch": 0.09,
160
- "learning_rate": 0.00019960045397426841,
161
- "loss": 0.7856,
162
- "step": 2300
163
- },
164
- {
165
- "epoch": 0.09,
166
- "learning_rate": 0.00019956482311185006,
167
- "loss": 0.7387,
168
- "step": 2400
169
- },
170
- {
171
- "epoch": 0.09,
172
- "learning_rate": 0.00019952767407863245,
173
- "loss": 0.7309,
174
- "step": 2500
175
- },
176
- {
177
- "epoch": 0.1,
178
- "learning_rate": 0.00019948900744106633,
179
- "loss": 0.7232,
180
- "step": 2600
181
- },
182
- {
183
- "epoch": 0.1,
184
- "learning_rate": 0.00019944882378874316,
185
- "loss": 0.7406,
186
- "step": 2700
187
- },
188
- {
189
- "epoch": 0.1,
190
- "learning_rate": 0.0001994071237343858,
191
- "loss": 0.7166,
192
- "step": 2800
193
- },
194
- {
195
- "epoch": 0.11,
196
- "learning_rate": 0.00019936390791383936,
197
- "loss": 0.7308,
198
- "step": 2900
199
- },
200
- {
201
- "epoch": 0.11,
202
- "learning_rate": 0.00019931917698606143,
203
- "loss": 0.7288,
204
- "step": 3000
205
- },
206
- {
207
- "epoch": 0.11,
208
- "eval_loss": 0.7343490123748779,
209
- "eval_runtime": 1770.9966,
210
- "eval_samples_per_second": 0.306,
211
- "eval_steps_per_second": 0.306,
212
- "step": 3000
213
- },
214
- {
215
- "epoch": 0.12,
216
- "learning_rate": 0.00019927293163311206,
217
- "loss": 0.7236,
218
- "step": 3100
219
- },
220
- {
221
- "epoch": 0.12,
222
- "learning_rate": 0.00019922517256014337,
223
- "loss": 0.716,
224
- "step": 3200
225
- },
226
- {
227
- "epoch": 0.12,
228
- "learning_rate": 0.00019917590049538874,
229
- "loss": 0.7564,
230
- "step": 3300
231
- },
232
- {
233
- "epoch": 0.13,
234
- "learning_rate": 0.00019912511619015177,
235
- "loss": 0.7082,
236
- "step": 3400
237
- },
238
- {
239
- "epoch": 0.13,
240
- "learning_rate": 0.00019907282041879484,
241
- "loss": 0.7103,
242
- "step": 3500
243
- },
244
- {
245
- "epoch": 0.13,
246
- "learning_rate": 0.00019901901397872715,
247
- "loss": 0.7457,
248
- "step": 3600
249
- },
250
- {
251
- "epoch": 0.14,
252
- "learning_rate": 0.0001989636976903928,
253
- "loss": 0.7076,
254
- "step": 3700
255
- },
256
- {
257
- "epoch": 0.14,
258
- "learning_rate": 0.0001989068723972581,
259
- "loss": 0.7217,
260
- "step": 3800
261
- },
262
- {
263
- "epoch": 0.15,
264
- "learning_rate": 0.00019884853896579873,
265
- "loss": 0.7175,
266
- "step": 3900
267
- },
268
- {
269
- "epoch": 0.15,
270
- "learning_rate": 0.0001987886982854866,
271
- "loss": 0.7083,
272
- "step": 4000
273
- },
274
- {
275
- "epoch": 0.15,
276
- "eval_loss": 0.726176917552948,
277
- "eval_runtime": 1765.3933,
278
- "eval_samples_per_second": 0.307,
279
- "eval_steps_per_second": 0.307,
280
- "step": 4000
281
- },
282
- {
283
- "epoch": 0.15,
284
- "learning_rate": 0.00019872735126877622,
285
- "loss": 0.7228,
286
- "step": 4100
287
- },
288
- {
289
- "epoch": 0.16,
290
- "learning_rate": 0.0001986644988510909,
291
- "loss": 0.7133,
292
- "step": 4200
293
- },
294
- {
295
- "epoch": 0.16,
296
- "learning_rate": 0.00019860014199080822,
297
- "loss": 0.7243,
298
- "step": 4300
299
- },
300
- {
301
- "epoch": 0.16,
302
- "learning_rate": 0.00019853428166924576,
303
- "loss": 0.6929,
304
- "step": 4400
305
- },
306
- {
307
- "epoch": 0.17,
308
- "learning_rate": 0.00019846691889064593,
309
- "loss": 0.7392,
310
- "step": 4500
311
- },
312
- {
313
- "epoch": 0.17,
314
- "learning_rate": 0.0001983980546821607,
315
- "loss": 0.7247,
316
- "step": 4600
317
- },
318
- {
319
- "epoch": 0.18,
320
- "learning_rate": 0.0001983276900938359,
321
- "loss": 0.7258,
322
- "step": 4700
323
- },
324
- {
325
- "epoch": 0.18,
326
- "learning_rate": 0.00019825582619859532,
327
- "loss": 0.7197,
328
- "step": 4800
329
- },
330
- {
331
- "epoch": 0.18,
332
- "learning_rate": 0.0001981824640922242,
333
- "loss": 0.6906,
334
- "step": 4900
335
- },
336
- {
337
- "epoch": 0.19,
338
- "learning_rate": 0.00019810760489335266,
339
- "loss": 0.7274,
340
- "step": 5000
341
- },
342
- {
343
- "epoch": 0.19,
344
- "eval_loss": 0.7171670794487,
345
- "eval_runtime": 1812.7597,
346
- "eval_samples_per_second": 0.299,
347
- "eval_steps_per_second": 0.299,
348
- "step": 5000
349
- }
350
- ],
351
- "max_steps": 80463,
352
- "num_train_epochs": 3,
353
- "total_flos": 1.401555628522537e+18,
354
- "trial_name": null,
355
- "trial_params": null
356
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
checkpoint-5000/training_args.bin DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:9a281372a257be5e7ebbea8ac16ec51e707b224a5ed57c48e2f69549c5a031d8
3
- size 4027
 
 
 
 
{checkpoint-5000 β†’ checkpoint-8000/adapter_model}/README.md RENAMED
File without changes
{checkpoint-5000 β†’ checkpoint-8000/adapter_model}/adapter_config.json RENAMED
File without changes
{checkpoint-5000 β†’ checkpoint-8000/adapter_model}/adapter_model.bin RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a99a6fd84a005d613308e543278f3b8e5211714bb53b78a4198e5a97c2754079
3
  size 500897101
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b4391c3f67be599ec6891113b7074b9b9ecd890ea222792b2e018423f592bd1f
3
  size 500897101