diff --git a/adapter_config.json b/adapter_config.json index 6984f5fd4bab99ec956ebe3bf12d7b823649c936..dca2f2f9b1cb6bbf83fbb8065749a4d18905e681 100644 --- a/adapter_config.json +++ b/adapter_config.json @@ -14,13 +14,13 @@ "r": 32, "revision": null, "target_modules": [ - "down_proj", - "k_proj", "gate_proj", "v_proj", - "o_proj", + "k_proj", "q_proj", - "up_proj" + "up_proj", + "o_proj", + "down_proj" ], "task_type": "CAUSAL_LM" } \ No newline at end of file diff --git a/adapter_model.bin b/adapter_model.bin index 9983417f84d25dbcb0ce2f5f4845d2e07f37baac..558fc37fdb360c0c71f8b5c5f07d76454d85038d 100644 --- a/adapter_model.bin +++ b/adapter_model.bin @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:db5a92be0cd5f8b38b328e0f82e62452a3fa7b5052a0a1f93fd8c4b1dd18b7a7 +oid sha256:dd1d9047d90b00aaf0a6c21147e9789af5c4ef9e3c1df5179a1b86f66b610c52 size 500897101 diff --git a/checkpoint-2900/README.md b/checkpoint-2900/README.md deleted file mode 100644 index f2208b0ded6c10ed47b2ea9df5ab7c8dd721a53c..0000000000000000000000000000000000000000 --- a/checkpoint-2900/README.md +++ /dev/null @@ -1,20 +0,0 @@ ---- -library_name: peft ---- -## Training procedure - - -The following `bitsandbytes` quantization config was used during training: -- load_in_8bit: False -- load_in_4bit: True -- llm_int8_threshold: 6.0 -- llm_int8_skip_modules: None -- llm_int8_enable_fp32_cpu_offload: False -- llm_int8_has_fp16_weight: False -- bnb_4bit_quant_type: nf4 -- bnb_4bit_use_double_quant: True -- bnb_4bit_compute_dtype: bfloat16 -### Framework versions - - -- PEFT 0.5.0.dev0 diff --git a/checkpoint-2900/adapter_config.json b/checkpoint-2900/adapter_config.json deleted file mode 100644 index 6984f5fd4bab99ec956ebe3bf12d7b823649c936..0000000000000000000000000000000000000000 --- a/checkpoint-2900/adapter_config.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "auto_mapping": null, - "base_model_name_or_path": "/workspace/webui/models/TheBloke_Llama-2-13B-fp16", - "bias": "none", - "fan_in_fan_out": null, - "inference_mode": true, - "init_lora_weights": true, - "layers_pattern": null, - "layers_to_transform": null, - "lora_alpha": 16, - "lora_dropout": 0.05, - "modules_to_save": null, - "peft_type": "LORA", - "r": 32, - "revision": null, - "target_modules": [ - "down_proj", - "k_proj", - "gate_proj", - "v_proj", - "o_proj", - "q_proj", - "up_proj" - ], - "task_type": "CAUSAL_LM" -} \ No newline at end of file diff --git a/checkpoint-2900/adapter_model.bin b/checkpoint-2900/adapter_model.bin deleted file mode 100644 index 0e01f2caf060c4519b73a276a9a033a6d99e6d04..0000000000000000000000000000000000000000 --- a/checkpoint-2900/adapter_model.bin +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:69e75ae7e1f69a41e91706ac778c4176886a39e991e0545bde52d6c1f744f678 -size 500897101 diff --git a/checkpoint-2900/adapter_model/README.md b/checkpoint-2900/adapter_model/README.md deleted file mode 100644 index f2208b0ded6c10ed47b2ea9df5ab7c8dd721a53c..0000000000000000000000000000000000000000 --- a/checkpoint-2900/adapter_model/README.md +++ /dev/null @@ -1,20 +0,0 @@ ---- -library_name: peft ---- -## Training procedure - - -The following `bitsandbytes` quantization config was used during training: -- load_in_8bit: False -- load_in_4bit: True -- llm_int8_threshold: 6.0 -- llm_int8_skip_modules: None -- llm_int8_enable_fp32_cpu_offload: False -- llm_int8_has_fp16_weight: False -- bnb_4bit_quant_type: nf4 -- bnb_4bit_use_double_quant: True -- bnb_4bit_compute_dtype: bfloat16 -### Framework versions - - -- PEFT 0.5.0.dev0 diff --git a/checkpoint-2900/adapter_model/adapter_config.json b/checkpoint-2900/adapter_model/adapter_config.json deleted file mode 100644 index 6984f5fd4bab99ec956ebe3bf12d7b823649c936..0000000000000000000000000000000000000000 --- a/checkpoint-2900/adapter_model/adapter_config.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "auto_mapping": null, - "base_model_name_or_path": "/workspace/webui/models/TheBloke_Llama-2-13B-fp16", - "bias": "none", - "fan_in_fan_out": null, - "inference_mode": true, - "init_lora_weights": true, - "layers_pattern": null, - "layers_to_transform": null, - "lora_alpha": 16, - "lora_dropout": 0.05, - "modules_to_save": null, - "peft_type": "LORA", - "r": 32, - "revision": null, - "target_modules": [ - "down_proj", - "k_proj", - "gate_proj", - "v_proj", - "o_proj", - "q_proj", - "up_proj" - ], - "task_type": "CAUSAL_LM" -} \ No newline at end of file diff --git a/checkpoint-2900/adapter_model/adapter_model.bin b/checkpoint-2900/adapter_model/adapter_model.bin deleted file mode 100644 index 0e01f2caf060c4519b73a276a9a033a6d99e6d04..0000000000000000000000000000000000000000 --- a/checkpoint-2900/adapter_model/adapter_model.bin +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:69e75ae7e1f69a41e91706ac778c4176886a39e991e0545bde52d6c1f744f678 -size 500897101 diff --git a/checkpoint-2900/optimizer.pt b/checkpoint-2900/optimizer.pt deleted file mode 100644 index 8474a8e8d05575d6099412cb5ad050dd2f1f1b43..0000000000000000000000000000000000000000 --- a/checkpoint-2900/optimizer.pt +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:c87b4dae7f70055caf6d97c6be4cde96969b4b232d33f2ec8df44468892e94ae -size 1001752701 diff --git a/checkpoint-2900/rng_state_0.pth b/checkpoint-2900/rng_state_0.pth deleted file mode 100644 index f79b83259792dffe57634581a2a672cf046f726b..0000000000000000000000000000000000000000 --- a/checkpoint-2900/rng_state_0.pth +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:ef95949da63c44bf2803034d897cf02b2c1404fb37a4930aa3e5a0ec33f3e973 -size 27772 diff --git a/checkpoint-2900/rng_state_1.pth b/checkpoint-2900/rng_state_1.pth deleted file mode 100644 index c61db5b564881f61f4c37f6d13df0dd28791bffd..0000000000000000000000000000000000000000 --- a/checkpoint-2900/rng_state_1.pth +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:d659d12bcecefb93ed656fc3f6ed770926a62be67a034197a8fc61d6626623aa -size 27772 diff --git a/checkpoint-2900/rng_state_10.pth b/checkpoint-2900/rng_state_10.pth deleted file mode 100644 index 91703dceb44bf355deb72239217b8fb0afb7e846..0000000000000000000000000000000000000000 --- a/checkpoint-2900/rng_state_10.pth +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:3c8b3dd09b89ccf70ecfb60c04b41270829ea90cfa0f805505a977df5bed72a8 -size 27789 diff --git a/checkpoint-2900/rng_state_11.pth b/checkpoint-2900/rng_state_11.pth deleted file mode 100644 index 71dd3078ed5beefd3c50ab839bf51c78cfbbb998..0000000000000000000000000000000000000000 --- a/checkpoint-2900/rng_state_11.pth +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:3a529130f265d55a197418034304b29c2f30e5460bdc34974ffa7dc75b218dee -size 27789 diff --git a/checkpoint-2900/rng_state_12.pth b/checkpoint-2900/rng_state_12.pth deleted file mode 100644 index 8286add7c4aa2ffef0fd5f0eaafab933eb7991d5..0000000000000000000000000000000000000000 --- a/checkpoint-2900/rng_state_12.pth +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:364a7220780238b4e23965c23a3f97f81bcf3f5672fb2c6ba1f95a8ce81dcd6a -size 27789 diff --git a/checkpoint-2900/rng_state_13.pth b/checkpoint-2900/rng_state_13.pth deleted file mode 100644 index 855aa83a6d8f0b9fe70db0c4caa52374ea6dc1ee..0000000000000000000000000000000000000000 --- a/checkpoint-2900/rng_state_13.pth +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:6ca8a5bf47b25db91e77352018364ed5cb8658c4ab84c74754d600a671025b52 -size 27789 diff --git a/checkpoint-2900/rng_state_2.pth b/checkpoint-2900/rng_state_2.pth deleted file mode 100644 index 64aae3442f1ab92a342dc2dab330563a68e132f8..0000000000000000000000000000000000000000 --- a/checkpoint-2900/rng_state_2.pth +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:e38e3caba0fb2345524e8ba51c489e590ff9c29f6b6b1852244b5b876edef717 -size 27772 diff --git a/checkpoint-2900/rng_state_3.pth b/checkpoint-2900/rng_state_3.pth deleted file mode 100644 index 01b1781a3ce90af12bac00aef0d846d9019dd2d0..0000000000000000000000000000000000000000 --- a/checkpoint-2900/rng_state_3.pth +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:e25391629539867dd50549dd648e30136ff0695597362098b5348ddfb9b13591 -size 27772 diff --git a/checkpoint-2900/rng_state_4.pth b/checkpoint-2900/rng_state_4.pth deleted file mode 100644 index faed7b9620bac7edf431140ea99c0d1e47be184e..0000000000000000000000000000000000000000 --- a/checkpoint-2900/rng_state_4.pth +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:05c76df7735faef919f1d2529cb22dc8b258235397aea29e5d522f742f0378d3 -size 27772 diff --git a/checkpoint-2900/rng_state_5.pth b/checkpoint-2900/rng_state_5.pth deleted file mode 100644 index 7b8dcaf84a4eb5ecca0d3a7401598b89a1d3837f..0000000000000000000000000000000000000000 --- a/checkpoint-2900/rng_state_5.pth +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:04f49ff59341bf10436ddf12dfda352e27600dda201e545e829be606ffa4a75b -size 27772 diff --git a/checkpoint-2900/rng_state_6.pth b/checkpoint-2900/rng_state_6.pth deleted file mode 100644 index 869b648f171f0dfa18c414bd69ad610bd3ef1081..0000000000000000000000000000000000000000 --- a/checkpoint-2900/rng_state_6.pth +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:5235f6c75f2e3a3b228ad84d02c69e6e58f2c65de54d888dd789ea3de5eccaae -size 27772 diff --git a/checkpoint-2900/rng_state_7.pth b/checkpoint-2900/rng_state_7.pth deleted file mode 100644 index f50e129b72ec486b2e229cb1f16fc95c11f296fe..0000000000000000000000000000000000000000 --- a/checkpoint-2900/rng_state_7.pth +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:6d1e22c7b9e7f7aea18263f185bde2560e57a023912a137341f61fe9d96545ab -size 27772 diff --git a/checkpoint-2900/rng_state_8.pth b/checkpoint-2900/rng_state_8.pth deleted file mode 100644 index 129db455492da242ee686fe22c9e30d467ca82c2..0000000000000000000000000000000000000000 --- a/checkpoint-2900/rng_state_8.pth +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:466fa25c9331cd4ea53315fb2b9f257aa0623587238faa3ca9b184ccb63c1756 -size 27772 diff --git a/checkpoint-2900/rng_state_9.pth b/checkpoint-2900/rng_state_9.pth deleted file mode 100644 index 05df8534aafa7b4fd29c853c7d099d422cf8817e..0000000000000000000000000000000000000000 --- a/checkpoint-2900/rng_state_9.pth +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:71dcdb5ca148232df1ccde976ae62a837427d137bd880eb213220fe9d130a051 -size 27772 diff --git a/checkpoint-2900/scheduler.pt b/checkpoint-2900/scheduler.pt deleted file mode 100644 index 426769270ce969e3d658cea22246fe6ca1e97104..0000000000000000000000000000000000000000 --- a/checkpoint-2900/scheduler.pt +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:7bdab61020affcb7675661f6f3289658a70b84200495ea5e8c5d13b32c66edcc -size 627 diff --git a/checkpoint-2900/trainer_state.json b/checkpoint-2900/trainer_state.json deleted file mode 100644 index b6b16164b8f888d78a3677c708d66ed5443dfa49..0000000000000000000000000000000000000000 --- a/checkpoint-2900/trainer_state.json +++ /dev/null @@ -1,704 +0,0 @@ -{ - "best_metric": null, - "best_model_checkpoint": null, - "epoch": 1.1244668476153548, - "global_step": 2900, - "is_hyper_param_search": false, - "is_local_process_zero": true, - "is_world_process_zero": true, - "log_history": [ - { - "epoch": 0.02, - "learning_rate": 0.0001999867761371633, - "loss": 1.0435, - "step": 50 - }, - { - "epoch": 0.04, - "learning_rate": 0.00019993306018843102, - "loss": 0.8918, - "step": 100 - }, - { - "epoch": 0.06, - "learning_rate": 0.00019983804784290833, - "loss": 0.8874, - "step": 150 - }, - { - "epoch": 0.08, - "learning_rate": 0.00019970177836355307, - "loss": 0.8839, - "step": 200 - }, - { - "epoch": 0.09, - "learning_rate": 0.00019961818913082012, - "loss": 0.8801, - "step": 225 - }, - { - "epoch": 0.1, - "learning_rate": 0.00019952430806244534, - "loss": 0.8753, - "step": 250 - }, - { - "epoch": 0.11, - "learning_rate": 0.00019942014485754635, - "loss": 0.8754, - "step": 275 - }, - { - "epoch": 0.12, - "learning_rate": 0.00019930571027751713, - "loss": 0.8751, - "step": 300 - }, - { - "epoch": 0.13, - "learning_rate": 0.0001991810161449164, - "loss": 0.8819, - "step": 325 - }, - { - "epoch": 0.14, - "learning_rate": 0.00019904607534224612, - "loss": 0.8744, - "step": 350 - }, - { - "epoch": 0.15, - "learning_rate": 0.00019890090181062063, - "loss": 0.8735, - "step": 375 - }, - { - "epoch": 0.16, - "learning_rate": 0.00019874551054832625, - "loss": 0.8703, - "step": 400 - }, - { - "epoch": 0.16, - "learning_rate": 0.00019857991760927193, - "loss": 0.8715, - "step": 425 - }, - { - "epoch": 0.17, - "learning_rate": 0.00019840414010133045, - "loss": 0.8714, - "step": 450 - }, - { - "epoch": 0.18, - "learning_rate": 0.00019821819618457114, - "loss": 0.8653, - "step": 475 - }, - { - "epoch": 0.19, - "learning_rate": 0.0001980221050693837, - "loss": 0.8716, - "step": 500 - }, - { - "epoch": 0.2, - "learning_rate": 0.00019781588701449338, - "loss": 0.8695, - "step": 525 - }, - { - "epoch": 0.21, - "learning_rate": 0.0001975995633248682, - "loss": 0.8746, - "step": 550 - }, - { - "epoch": 0.22, - "learning_rate": 0.00019737315634951762, - "loss": 0.8731, - "step": 575 - }, - { - "epoch": 0.23, - "learning_rate": 0.00019713668947918386, - "loss": 0.867, - "step": 600 - }, - { - "epoch": 0.24, - "learning_rate": 0.0001968901871439252, - "loss": 0.8706, - "step": 625 - }, - { - "epoch": 0.25, - "learning_rate": 0.000196633674810592, - "loss": 0.8595, - "step": 650 - }, - { - "epoch": 0.26, - "learning_rate": 0.0001963671789801958, - "loss": 0.8627, - "step": 675 - }, - { - "epoch": 0.27, - "learning_rate": 0.0001960907271851712, - "loss": 0.8607, - "step": 700 - }, - { - "epoch": 0.28, - "learning_rate": 0.00019580434798653173, - "loss": 0.858, - "step": 725 - }, - { - "epoch": 0.29, - "learning_rate": 0.00019550807097091876, - "loss": 0.8589, - "step": 750 - }, - { - "epoch": 0.3, - "learning_rate": 0.00019520192674754515, - "loss": 0.8561, - "step": 775 - }, - { - "epoch": 0.31, - "learning_rate": 0.00019488594694503264, - "loss": 0.8576, - "step": 800 - }, - { - "epoch": 0.32, - "learning_rate": 0.00019456016420814446, - "loss": 0.8597, - "step": 825 - }, - { - "epoch": 0.33, - "learning_rate": 0.00019422461219441254, - "loss": 0.862, - "step": 850 - }, - { - "epoch": 0.34, - "learning_rate": 0.00019387932557066035, - "loss": 0.8577, - "step": 875 - }, - { - "epoch": 0.35, - "learning_rate": 0.00019352434000942127, - "loss": 0.8632, - "step": 900 - }, - { - "epoch": 0.36, - "learning_rate": 0.00019315969218525333, - "loss": 0.8567, - "step": 925 - }, - { - "epoch": 0.37, - "learning_rate": 0.00019278541977095005, - "loss": 0.8501, - "step": 950 - }, - { - "epoch": 0.38, - "learning_rate": 0.00019240156143364844, - "loss": 0.8596, - "step": 975 - }, - { - "epoch": 0.39, - "learning_rate": 0.00019200815683083434, - "loss": 0.8556, - "step": 1000 - }, - { - "epoch": 0.39, - "eval_loss": 0.8521950244903564, - "eval_runtime": 59.8838, - "eval_samples_per_second": 12.19, - "eval_steps_per_second": 0.885, - "step": 1000 - }, - { - "epoch": 0.4, - "learning_rate": 0.00019160524660624505, - "loss": 0.8531, - "step": 1025 - }, - { - "epoch": 0.41, - "learning_rate": 0.00019119287238567045, - "loss": 0.8513, - "step": 1050 - }, - { - "epoch": 0.42, - "learning_rate": 0.00019077107677265253, - "loss": 0.8502, - "step": 1075 - }, - { - "epoch": 0.43, - "learning_rate": 0.00019033990334408384, - "loss": 0.8469, - "step": 1100 - }, - { - "epoch": 0.44, - "learning_rate": 0.00018989939664570545, - "loss": 0.8495, - "step": 1125 - }, - { - "epoch": 0.45, - "learning_rate": 0.00018944960218750484, - "loss": 0.8485, - "step": 1150 - }, - { - "epoch": 0.46, - "learning_rate": 0.00018899056643901404, - "loss": 0.8534, - "step": 1175 - }, - { - "epoch": 0.47, - "learning_rate": 0.00018852233682450893, - "loss": 0.8531, - "step": 1200 - }, - { - "epoch": 0.47, - "learning_rate": 0.00018804496171810948, - "loss": 0.8509, - "step": 1225 - }, - { - "epoch": 0.48, - "learning_rate": 0.00018755849043878222, - "loss": 0.8445, - "step": 1250 - }, - { - "epoch": 0.49, - "learning_rate": 0.0001870629732452449, - "loss": 0.8548, - "step": 1275 - }, - { - "epoch": 0.5, - "learning_rate": 0.00018655846133077417, - "loss": 0.8441, - "step": 1300 - }, - { - "epoch": 0.51, - "learning_rate": 0.00018604500681791656, - "loss": 0.8533, - "step": 1325 - }, - { - "epoch": 0.52, - "learning_rate": 0.00018552266275310373, - "loss": 0.8505, - "step": 1350 - }, - { - "epoch": 0.53, - "learning_rate": 0.0001849914831011719, - "loss": 0.8544, - "step": 1375 - }, - { - "epoch": 0.54, - "learning_rate": 0.00018445152273978668, - "loss": 0.845, - "step": 1400 - }, - { - "epoch": 0.55, - "learning_rate": 0.00018390283745377354, - "loss": 0.8376, - "step": 1425 - }, - { - "epoch": 0.56, - "learning_rate": 0.0001833454839293545, - "loss": 0.847, - "step": 1450 - }, - { - "epoch": 0.57, - "learning_rate": 0.00018277951974829163, - "loss": 0.8473, - "step": 1475 - }, - { - "epoch": 0.58, - "learning_rate": 0.0001822050033819382, - "loss": 0.8438, - "step": 1500 - }, - { - "epoch": 0.59, - "learning_rate": 0.00018162199418519785, - "loss": 0.8418, - "step": 1525 - }, - { - "epoch": 0.6, - "learning_rate": 0.00018103055239039243, - "loss": 0.842, - "step": 1550 - }, - { - "epoch": 0.61, - "learning_rate": 0.0001804307391010393, - "loss": 0.8435, - "step": 1575 - }, - { - "epoch": 0.62, - "learning_rate": 0.00017982261628553842, - "loss": 0.8349, - "step": 1600 - }, - { - "epoch": 0.63, - "learning_rate": 0.0001792062467707703, - "loss": 0.8483, - "step": 1625 - }, - { - "epoch": 0.64, - "learning_rate": 0.0001785816942356052, - "loss": 0.8387, - "step": 1650 - }, - { - "epoch": 0.65, - "learning_rate": 0.00017794902320432429, - "loss": 0.843, - "step": 1675 - }, - { - "epoch": 0.66, - "learning_rate": 0.00017730829903995333, - "loss": 0.8424, - "step": 1700 - }, - { - "epoch": 0.67, - "learning_rate": 0.00017665958793751006, - "loss": 0.8418, - "step": 1725 - }, - { - "epoch": 0.68, - "learning_rate": 0.00017600295691716522, - "loss": 0.8384, - "step": 1750 - }, - { - "epoch": 0.69, - "learning_rate": 0.00017533847381731856, - "loss": 0.8445, - "step": 1775 - }, - { - "epoch": 0.7, - "learning_rate": 0.00017466620728759033, - "loss": 0.8446, - "step": 1800 - }, - { - "epoch": 0.71, - "learning_rate": 0.00017398622678172878, - "loss": 0.838, - "step": 1825 - }, - { - "epoch": 0.72, - "learning_rate": 0.0001732986025504348, - "loss": 0.8415, - "step": 1850 - }, - { - "epoch": 0.73, - "learning_rate": 0.000172603405634104, - "loss": 0.8357, - "step": 1875 - }, - { - "epoch": 0.74, - "learning_rate": 0.00017190070785548755, - "loss": 0.8311, - "step": 1900 - }, - { - "epoch": 0.75, - "learning_rate": 0.0001711905818122717, - "loss": 0.8333, - "step": 1925 - }, - { - "epoch": 0.76, - "learning_rate": 0.0001704731008695777, - "loss": 0.8387, - "step": 1950 - }, - { - "epoch": 0.77, - "learning_rate": 0.0001697483391523821, - "loss": 0.8442, - "step": 1975 - }, - { - "epoch": 0.78, - "learning_rate": 0.00016901637153785885, - "loss": 0.8399, - "step": 2000 - }, - { - "epoch": 0.78, - "eval_loss": 0.8339959383010864, - "eval_runtime": 58.5829, - "eval_samples_per_second": 12.461, - "eval_steps_per_second": 0.905, - "step": 2000 - }, - { - "epoch": 0.79, - "learning_rate": 0.0001682772736476434, - "loss": 0.8334, - "step": 2025 - }, - { - "epoch": 0.79, - "learning_rate": 0.0001675311218400201, - "loss": 0.835, - "step": 2050 - }, - { - "epoch": 0.8, - "learning_rate": 0.00016677799320203332, - "loss": 0.8368, - "step": 2075 - }, - { - "epoch": 0.81, - "learning_rate": 0.00016601796554152344, - "loss": 0.8278, - "step": 2100 - }, - { - "epoch": 0.82, - "learning_rate": 0.00016525111737908827, - "loss": 0.8334, - "step": 2125 - }, - { - "epoch": 0.83, - "learning_rate": 0.00016447752793997096, - "loss": 0.8416, - "step": 2150 - }, - { - "epoch": 0.84, - "learning_rate": 0.00016369727714587483, - "loss": 0.8297, - "step": 2175 - }, - { - "epoch": 0.85, - "learning_rate": 0.0001629104456067066, - "loss": 0.8327, - "step": 2200 - }, - { - "epoch": 0.86, - "learning_rate": 0.00016211711461224825, - "loss": 0.8324, - "step": 2225 - }, - { - "epoch": 0.87, - "learning_rate": 0.0001613173661237589, - "loss": 0.8313, - "step": 2250 - }, - { - "epoch": 0.88, - "learning_rate": 0.0001605112827655069, - "loss": 0.8292, - "step": 2275 - }, - { - "epoch": 0.89, - "learning_rate": 0.0001596989478162339, - "loss": 0.8334, - "step": 2300 - }, - { - "epoch": 0.9, - "learning_rate": 0.00015888044520055106, - "loss": 0.8352, - "step": 2325 - }, - { - "epoch": 0.91, - "learning_rate": 0.00015805585948026852, - "loss": 0.823, - "step": 2350 - }, - { - "epoch": 0.92, - "learning_rate": 0.000157225275845659, - "loss": 0.8293, - "step": 2375 - }, - { - "epoch": 0.93, - "learning_rate": 0.00015638878010665672, - "loss": 0.8289, - "step": 2400 - }, - { - "epoch": 0.94, - "learning_rate": 0.00015554645868399205, - "loss": 0.832, - "step": 2425 - }, - { - "epoch": 0.95, - "learning_rate": 0.00015469839860026308, - "loss": 0.8294, - "step": 2450 - }, - { - "epoch": 0.96, - "learning_rate": 0.0001538446874709452, - "loss": 0.8281, - "step": 2475 - }, - { - "epoch": 0.97, - "learning_rate": 0.00015298541349533925, - "loss": 0.8314, - "step": 2500 - }, - { - "epoch": 0.98, - "learning_rate": 0.00015212066544745926, - "loss": 0.831, - "step": 2525 - }, - { - "epoch": 0.99, - "learning_rate": 0.00015125053266686124, - "loss": 0.8319, - "step": 2550 - }, - { - "epoch": 1.0, - "learning_rate": 0.00015037510504941303, - "loss": 0.8259, - "step": 2575 - }, - { - "epoch": 1.01, - "learning_rate": 0.00014949447303800695, - "loss": 0.8133, - "step": 2600 - }, - { - "epoch": 1.02, - "learning_rate": 0.00014860872761321593, - "loss": 0.8139, - "step": 2625 - }, - { - "epoch": 1.03, - "learning_rate": 0.00014771796028389405, - "loss": 0.804, - "step": 2650 - }, - { - "epoch": 1.04, - "learning_rate": 0.0001468222630777225, - "loss": 0.8011, - "step": 2675 - }, - { - "epoch": 1.05, - "learning_rate": 0.00014592172853170193, - "loss": 0.8037, - "step": 2700 - }, - { - "epoch": 1.06, - "learning_rate": 0.00014501644968259212, - "loss": 0.8063, - "step": 2725 - }, - { - "epoch": 1.07, - "learning_rate": 0.00014410652005730025, - "loss": 0.8155, - "step": 2750 - }, - { - "epoch": 1.08, - "learning_rate": 0.00014319203366321826, - "loss": 0.8066, - "step": 2775 - }, - { - "epoch": 1.09, - "learning_rate": 0.0001422730849785107, - "loss": 0.8091, - "step": 2800 - }, - { - "epoch": 1.1, - "learning_rate": 0.0001413497689423539, - "loss": 0.8067, - "step": 2825 - }, - { - "epoch": 1.11, - "learning_rate": 0.00014042218094512755, - "loss": 0.8046, - "step": 2850 - }, - { - "epoch": 1.11, - "learning_rate": 0.00013949041681855985, - "loss": 0.8053, - "step": 2875 - }, - { - "epoch": 1.12, - "learning_rate": 0.0001385545728258264, - "loss": 0.8075, - "step": 2900 - } - ], - "max_steps": 7737, - "num_train_epochs": 3, - "total_flos": 1.248869156726086e+19, - "trial_name": null, - "trial_params": null -} diff --git a/checkpoint-2900/training_args.bin b/checkpoint-2900/training_args.bin deleted file mode 100644 index 89b6f6487f3e8cc200589fafc4378937a3fadf66..0000000000000000000000000000000000000000 --- a/checkpoint-2900/training_args.bin +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:7292138fecd854f5f17371c439bbd450ee3c48e738b75818b778a55f4e26ef57 -size 4027 diff --git a/checkpoint-3000/README.md b/checkpoint-3000/README.md deleted file mode 100644 index f2208b0ded6c10ed47b2ea9df5ab7c8dd721a53c..0000000000000000000000000000000000000000 --- a/checkpoint-3000/README.md +++ /dev/null @@ -1,20 +0,0 @@ ---- -library_name: peft ---- -## Training procedure - - -The following `bitsandbytes` quantization config was used during training: -- load_in_8bit: False -- load_in_4bit: True -- llm_int8_threshold: 6.0 -- llm_int8_skip_modules: None -- llm_int8_enable_fp32_cpu_offload: False -- llm_int8_has_fp16_weight: False -- bnb_4bit_quant_type: nf4 -- bnb_4bit_use_double_quant: True -- bnb_4bit_compute_dtype: bfloat16 -### Framework versions - - -- PEFT 0.5.0.dev0 diff --git a/checkpoint-3000/adapter_config.json b/checkpoint-3000/adapter_config.json deleted file mode 100644 index 6984f5fd4bab99ec956ebe3bf12d7b823649c936..0000000000000000000000000000000000000000 --- a/checkpoint-3000/adapter_config.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "auto_mapping": null, - "base_model_name_or_path": "/workspace/webui/models/TheBloke_Llama-2-13B-fp16", - "bias": "none", - "fan_in_fan_out": null, - "inference_mode": true, - "init_lora_weights": true, - "layers_pattern": null, - "layers_to_transform": null, - "lora_alpha": 16, - "lora_dropout": 0.05, - "modules_to_save": null, - "peft_type": "LORA", - "r": 32, - "revision": null, - "target_modules": [ - "down_proj", - "k_proj", - "gate_proj", - "v_proj", - "o_proj", - "q_proj", - "up_proj" - ], - "task_type": "CAUSAL_LM" -} \ No newline at end of file diff --git a/checkpoint-3000/adapter_model.bin b/checkpoint-3000/adapter_model.bin deleted file mode 100644 index f79a334e9a57ad38eb01a57c4bc21fe08f374f97..0000000000000000000000000000000000000000 --- a/checkpoint-3000/adapter_model.bin +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:3768dd1339753637e98fb5a78e49089bfc20cfbb2e5d5ab1d79b249f12bd91d6 -size 500897101 diff --git a/checkpoint-3000/adapter_model/README.md b/checkpoint-3000/adapter_model/README.md deleted file mode 100644 index f2208b0ded6c10ed47b2ea9df5ab7c8dd721a53c..0000000000000000000000000000000000000000 --- a/checkpoint-3000/adapter_model/README.md +++ /dev/null @@ -1,20 +0,0 @@ ---- -library_name: peft ---- -## Training procedure - - -The following `bitsandbytes` quantization config was used during training: -- load_in_8bit: False -- load_in_4bit: True -- llm_int8_threshold: 6.0 -- llm_int8_skip_modules: None -- llm_int8_enable_fp32_cpu_offload: False -- llm_int8_has_fp16_weight: False -- bnb_4bit_quant_type: nf4 -- bnb_4bit_use_double_quant: True -- bnb_4bit_compute_dtype: bfloat16 -### Framework versions - - -- PEFT 0.5.0.dev0 diff --git a/checkpoint-3000/adapter_model/adapter_config.json b/checkpoint-3000/adapter_model/adapter_config.json deleted file mode 100644 index 6984f5fd4bab99ec956ebe3bf12d7b823649c936..0000000000000000000000000000000000000000 --- a/checkpoint-3000/adapter_model/adapter_config.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "auto_mapping": null, - "base_model_name_or_path": "/workspace/webui/models/TheBloke_Llama-2-13B-fp16", - "bias": "none", - "fan_in_fan_out": null, - "inference_mode": true, - "init_lora_weights": true, - "layers_pattern": null, - "layers_to_transform": null, - "lora_alpha": 16, - "lora_dropout": 0.05, - "modules_to_save": null, - "peft_type": "LORA", - "r": 32, - "revision": null, - "target_modules": [ - "down_proj", - "k_proj", - "gate_proj", - "v_proj", - "o_proj", - "q_proj", - "up_proj" - ], - "task_type": "CAUSAL_LM" -} \ No newline at end of file diff --git a/checkpoint-3000/adapter_model/adapter_model.bin b/checkpoint-3000/adapter_model/adapter_model.bin deleted file mode 100644 index f79a334e9a57ad38eb01a57c4bc21fe08f374f97..0000000000000000000000000000000000000000 --- a/checkpoint-3000/adapter_model/adapter_model.bin +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:3768dd1339753637e98fb5a78e49089bfc20cfbb2e5d5ab1d79b249f12bd91d6 -size 500897101 diff --git a/checkpoint-3000/optimizer.pt b/checkpoint-3000/optimizer.pt deleted file mode 100644 index fa6996cae1b0cc628c935b8728cbabaaa4546684..0000000000000000000000000000000000000000 --- a/checkpoint-3000/optimizer.pt +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:43dd2594437f90014ceb24dae35f0caf1408f106ca0640abfdb6f930ec7d1917 -size 1001752701 diff --git a/checkpoint-3000/rng_state_0.pth b/checkpoint-3000/rng_state_0.pth deleted file mode 100644 index 510336d4202f191ba61185c520d4ae54884583ef..0000000000000000000000000000000000000000 --- a/checkpoint-3000/rng_state_0.pth +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:372977c5ad5707d01ab93c603a2084e21dcc5bbe3746e25cbd04e791984a40ad -size 27772 diff --git a/checkpoint-3000/rng_state_1.pth b/checkpoint-3000/rng_state_1.pth deleted file mode 100644 index d6060d886c1817a8864c1749c754d798213beeda..0000000000000000000000000000000000000000 --- a/checkpoint-3000/rng_state_1.pth +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:0cf974f69d3a79d20401ab16c60981d40397dff4557e89f3fb4f166a1c2b6988 -size 27772 diff --git a/checkpoint-3000/rng_state_10.pth b/checkpoint-3000/rng_state_10.pth deleted file mode 100644 index 3b8cd54c3b3fc41c81b070409c53ee1d226aa39b..0000000000000000000000000000000000000000 --- a/checkpoint-3000/rng_state_10.pth +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:f46cf42d5b149dfdabbc17334f090d493267bd9a2a1e982e5a7904ba8ad96c66 -size 27789 diff --git a/checkpoint-3000/rng_state_11.pth b/checkpoint-3000/rng_state_11.pth deleted file mode 100644 index 4fe7e9b680808b89e2beeb8d24b7d6a5c32d51c6..0000000000000000000000000000000000000000 --- a/checkpoint-3000/rng_state_11.pth +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:0d782b19c86bb619094e572c4d5fa6b1b8aff8745fd27fd20d67ad46b26e3500 -size 27789 diff --git a/checkpoint-3000/rng_state_12.pth b/checkpoint-3000/rng_state_12.pth deleted file mode 100644 index 7510083919a37c116ce27099ad5c4487a14c2db6..0000000000000000000000000000000000000000 --- a/checkpoint-3000/rng_state_12.pth +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:46ed59f9d93f80f01f93314ed6347cd1950aad081d7b1afa92318f622eae07dc -size 27789 diff --git a/checkpoint-3000/rng_state_13.pth b/checkpoint-3000/rng_state_13.pth deleted file mode 100644 index 93625cc9a2dcfa01a5cff10c787c1fd15facf191..0000000000000000000000000000000000000000 --- a/checkpoint-3000/rng_state_13.pth +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:de734c628aa46f0b8f66418bc77802de4cf179c013f3fa6520a4c967c9c44ee9 -size 27789 diff --git a/checkpoint-3000/rng_state_2.pth b/checkpoint-3000/rng_state_2.pth deleted file mode 100644 index f344946020efc1b17146971b1fb54e53df596e00..0000000000000000000000000000000000000000 --- a/checkpoint-3000/rng_state_2.pth +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:8b3ee02de5d832b0550492ad9197099cc49a1ef39883f9f21d43ee5eb7abbe91 -size 27772 diff --git a/checkpoint-3000/rng_state_3.pth b/checkpoint-3000/rng_state_3.pth deleted file mode 100644 index 1dcc4209ae1a2eda4450efbc9374dc11d689c520..0000000000000000000000000000000000000000 --- a/checkpoint-3000/rng_state_3.pth +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:f47b89653cf7379ccd6b6642b1cb9305e7b0ff4639356066795aec75e109dcd0 -size 27772 diff --git a/checkpoint-3000/rng_state_4.pth b/checkpoint-3000/rng_state_4.pth deleted file mode 100644 index 6cf6ec04a76e01315f6444460afbb6d4d77522ae..0000000000000000000000000000000000000000 --- a/checkpoint-3000/rng_state_4.pth +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:896ef18d13ff8a9f7f4850c38ec321a8050c0001f5f2abe7e1f217fabe5940c7 -size 27772 diff --git a/checkpoint-3000/rng_state_5.pth b/checkpoint-3000/rng_state_5.pth deleted file mode 100644 index 4d5b3d0f734b63a8efb6f6966a3e8159c5e71b96..0000000000000000000000000000000000000000 --- a/checkpoint-3000/rng_state_5.pth +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:f03081aff382542ac4332b7380a218da9fc6f3cc8c0e182a596a1aa05e2c4a86 -size 27772 diff --git a/checkpoint-3000/rng_state_6.pth b/checkpoint-3000/rng_state_6.pth deleted file mode 100644 index 757389e834c2c8a7b9b4949aaf429905e9def0e3..0000000000000000000000000000000000000000 --- a/checkpoint-3000/rng_state_6.pth +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:8ba56e43a346f575d11975e33c17fd24d061dc495ff64f652662b4fe2bd5ba2a -size 27772 diff --git a/checkpoint-3000/rng_state_7.pth b/checkpoint-3000/rng_state_7.pth deleted file mode 100644 index dec1ed920426e60cae6d9c216b7956bbe8cb82e9..0000000000000000000000000000000000000000 --- a/checkpoint-3000/rng_state_7.pth +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:cd77f76c2f0d692464a663d61dae016bcf5dd8966ed8742d59dea407d2a7048f -size 27772 diff --git a/checkpoint-3000/rng_state_8.pth b/checkpoint-3000/rng_state_8.pth deleted file mode 100644 index d975d181308b679a3d20c6691c8b8fcb8401ce51..0000000000000000000000000000000000000000 --- a/checkpoint-3000/rng_state_8.pth +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:20c5b17ace5a48bc38935d68f2ceef000b52c248bec852ba042e42f55d1fdcec -size 27772 diff --git a/checkpoint-3000/rng_state_9.pth b/checkpoint-3000/rng_state_9.pth deleted file mode 100644 index 3f72afd6b4b543c9eec9741b036191689e53f96e..0000000000000000000000000000000000000000 --- a/checkpoint-3000/rng_state_9.pth +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:2362bce85dfe8a924fd3e27761bcd0046ea4d1f8b878596ba3abccb85ffaaa3d -size 27772 diff --git a/checkpoint-3000/scheduler.pt b/checkpoint-3000/scheduler.pt deleted file mode 100644 index 9495f8c0328e5475cf01b45c659cb42f5e076afc..0000000000000000000000000000000000000000 --- a/checkpoint-3000/scheduler.pt +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:f5eb3f4a2e8a234b77c028b4206015afec84f155c6d5bcb4ec7cd2c9f89b304b -size 627 diff --git a/checkpoint-3000/trainer_state.json b/checkpoint-3000/trainer_state.json deleted file mode 100644 index 14fcb7acf5d6d39c026f127a8eccfcc6afab2437..0000000000000000000000000000000000000000 --- a/checkpoint-3000/trainer_state.json +++ /dev/null @@ -1,736 +0,0 @@ -{ - "best_metric": null, - "best_model_checkpoint": null, - "epoch": 1.1632415664986429, - "global_step": 3000, - "is_hyper_param_search": false, - "is_local_process_zero": true, - "is_world_process_zero": true, - "log_history": [ - { - "epoch": 0.02, - "learning_rate": 0.0001999867761371633, - "loss": 1.0435, - "step": 50 - }, - { - "epoch": 0.04, - "learning_rate": 0.00019993306018843102, - "loss": 0.8918, - "step": 100 - }, - { - "epoch": 0.06, - "learning_rate": 0.00019983804784290833, - "loss": 0.8874, - "step": 150 - }, - { - "epoch": 0.08, - "learning_rate": 0.00019970177836355307, - "loss": 0.8839, - "step": 200 - }, - { - "epoch": 0.09, - "learning_rate": 0.00019961818913082012, - "loss": 0.8801, - "step": 225 - }, - { - "epoch": 0.1, - "learning_rate": 0.00019952430806244534, - "loss": 0.8753, - "step": 250 - }, - { - "epoch": 0.11, - "learning_rate": 0.00019942014485754635, - "loss": 0.8754, - "step": 275 - }, - { - "epoch": 0.12, - "learning_rate": 0.00019930571027751713, - "loss": 0.8751, - "step": 300 - }, - { - "epoch": 0.13, - "learning_rate": 0.0001991810161449164, - "loss": 0.8819, - "step": 325 - }, - { - "epoch": 0.14, - "learning_rate": 0.00019904607534224612, - "loss": 0.8744, - "step": 350 - }, - { - "epoch": 0.15, - "learning_rate": 0.00019890090181062063, - "loss": 0.8735, - "step": 375 - }, - { - "epoch": 0.16, - "learning_rate": 0.00019874551054832625, - "loss": 0.8703, - "step": 400 - }, - { - "epoch": 0.16, - "learning_rate": 0.00019857991760927193, - "loss": 0.8715, - "step": 425 - }, - { - "epoch": 0.17, - "learning_rate": 0.00019840414010133045, - "loss": 0.8714, - "step": 450 - }, - { - "epoch": 0.18, - "learning_rate": 0.00019821819618457114, - "loss": 0.8653, - "step": 475 - }, - { - "epoch": 0.19, - "learning_rate": 0.0001980221050693837, - "loss": 0.8716, - "step": 500 - }, - { - "epoch": 0.2, - "learning_rate": 0.00019781588701449338, - "loss": 0.8695, - "step": 525 - }, - { - "epoch": 0.21, - "learning_rate": 0.0001975995633248682, - "loss": 0.8746, - "step": 550 - }, - { - "epoch": 0.22, - "learning_rate": 0.00019737315634951762, - "loss": 0.8731, - "step": 575 - }, - { - "epoch": 0.23, - "learning_rate": 0.00019713668947918386, - "loss": 0.867, - "step": 600 - }, - { - "epoch": 0.24, - "learning_rate": 0.0001968901871439252, - "loss": 0.8706, - "step": 625 - }, - { - "epoch": 0.25, - "learning_rate": 0.000196633674810592, - "loss": 0.8595, - "step": 650 - }, - { - "epoch": 0.26, - "learning_rate": 0.0001963671789801958, - "loss": 0.8627, - "step": 675 - }, - { - "epoch": 0.27, - "learning_rate": 0.0001960907271851712, - "loss": 0.8607, - "step": 700 - }, - { - "epoch": 0.28, - "learning_rate": 0.00019580434798653173, - "loss": 0.858, - "step": 725 - }, - { - "epoch": 0.29, - "learning_rate": 0.00019550807097091876, - "loss": 0.8589, - "step": 750 - }, - { - "epoch": 0.3, - "learning_rate": 0.00019520192674754515, - "loss": 0.8561, - "step": 775 - }, - { - "epoch": 0.31, - "learning_rate": 0.00019488594694503264, - "loss": 0.8576, - "step": 800 - }, - { - "epoch": 0.32, - "learning_rate": 0.00019456016420814446, - "loss": 0.8597, - "step": 825 - }, - { - "epoch": 0.33, - "learning_rate": 0.00019422461219441254, - "loss": 0.862, - "step": 850 - }, - { - "epoch": 0.34, - "learning_rate": 0.00019387932557066035, - "loss": 0.8577, - "step": 875 - }, - { - "epoch": 0.35, - "learning_rate": 0.00019352434000942127, - "loss": 0.8632, - "step": 900 - }, - { - "epoch": 0.36, - "learning_rate": 0.00019315969218525333, - "loss": 0.8567, - "step": 925 - }, - { - "epoch": 0.37, - "learning_rate": 0.00019278541977095005, - "loss": 0.8501, - "step": 950 - }, - { - "epoch": 0.38, - "learning_rate": 0.00019240156143364844, - "loss": 0.8596, - "step": 975 - }, - { - "epoch": 0.39, - "learning_rate": 0.00019200815683083434, - "loss": 0.8556, - "step": 1000 - }, - { - "epoch": 0.39, - "eval_loss": 0.8521950244903564, - "eval_runtime": 59.8838, - "eval_samples_per_second": 12.19, - "eval_steps_per_second": 0.885, - "step": 1000 - }, - { - "epoch": 0.4, - "learning_rate": 0.00019160524660624505, - "loss": 0.8531, - "step": 1025 - }, - { - "epoch": 0.41, - "learning_rate": 0.00019119287238567045, - "loss": 0.8513, - "step": 1050 - }, - { - "epoch": 0.42, - "learning_rate": 0.00019077107677265253, - "loss": 0.8502, - "step": 1075 - }, - { - "epoch": 0.43, - "learning_rate": 0.00019033990334408384, - "loss": 0.8469, - "step": 1100 - }, - { - "epoch": 0.44, - "learning_rate": 0.00018989939664570545, - "loss": 0.8495, - "step": 1125 - }, - { - "epoch": 0.45, - "learning_rate": 0.00018944960218750484, - "loss": 0.8485, - "step": 1150 - }, - { - "epoch": 0.46, - "learning_rate": 0.00018899056643901404, - "loss": 0.8534, - "step": 1175 - }, - { - "epoch": 0.47, - "learning_rate": 0.00018852233682450893, - "loss": 0.8531, - "step": 1200 - }, - { - "epoch": 0.47, - "learning_rate": 0.00018804496171810948, - "loss": 0.8509, - "step": 1225 - }, - { - "epoch": 0.48, - "learning_rate": 0.00018755849043878222, - "loss": 0.8445, - "step": 1250 - }, - { - "epoch": 0.49, - "learning_rate": 0.0001870629732452449, - "loss": 0.8548, - "step": 1275 - }, - { - "epoch": 0.5, - "learning_rate": 0.00018655846133077417, - "loss": 0.8441, - "step": 1300 - }, - { - "epoch": 0.51, - "learning_rate": 0.00018604500681791656, - "loss": 0.8533, - "step": 1325 - }, - { - "epoch": 0.52, - "learning_rate": 0.00018552266275310373, - "loss": 0.8505, - "step": 1350 - }, - { - "epoch": 0.53, - "learning_rate": 0.0001849914831011719, - "loss": 0.8544, - "step": 1375 - }, - { - "epoch": 0.54, - "learning_rate": 0.00018445152273978668, - "loss": 0.845, - "step": 1400 - }, - { - "epoch": 0.55, - "learning_rate": 0.00018390283745377354, - "loss": 0.8376, - "step": 1425 - }, - { - "epoch": 0.56, - "learning_rate": 0.0001833454839293545, - "loss": 0.847, - "step": 1450 - }, - { - "epoch": 0.57, - "learning_rate": 0.00018277951974829163, - "loss": 0.8473, - "step": 1475 - }, - { - "epoch": 0.58, - "learning_rate": 0.0001822050033819382, - "loss": 0.8438, - "step": 1500 - }, - { - "epoch": 0.59, - "learning_rate": 0.00018162199418519785, - "loss": 0.8418, - "step": 1525 - }, - { - "epoch": 0.6, - "learning_rate": 0.00018103055239039243, - "loss": 0.842, - "step": 1550 - }, - { - "epoch": 0.61, - "learning_rate": 0.0001804307391010393, - "loss": 0.8435, - "step": 1575 - }, - { - "epoch": 0.62, - "learning_rate": 0.00017982261628553842, - "loss": 0.8349, - "step": 1600 - }, - { - "epoch": 0.63, - "learning_rate": 0.0001792062467707703, - "loss": 0.8483, - "step": 1625 - }, - { - "epoch": 0.64, - "learning_rate": 0.0001785816942356052, - "loss": 0.8387, - "step": 1650 - }, - { - "epoch": 0.65, - "learning_rate": 0.00017794902320432429, - "loss": 0.843, - "step": 1675 - }, - { - "epoch": 0.66, - "learning_rate": 0.00017730829903995333, - "loss": 0.8424, - "step": 1700 - }, - { - "epoch": 0.67, - "learning_rate": 0.00017665958793751006, - "loss": 0.8418, - "step": 1725 - }, - { - "epoch": 0.68, - "learning_rate": 0.00017600295691716522, - "loss": 0.8384, - "step": 1750 - }, - { - "epoch": 0.69, - "learning_rate": 0.00017533847381731856, - "loss": 0.8445, - "step": 1775 - }, - { - "epoch": 0.7, - "learning_rate": 0.00017466620728759033, - "loss": 0.8446, - "step": 1800 - }, - { - "epoch": 0.71, - "learning_rate": 0.00017398622678172878, - "loss": 0.838, - "step": 1825 - }, - { - "epoch": 0.72, - "learning_rate": 0.0001732986025504348, - "loss": 0.8415, - "step": 1850 - }, - { - "epoch": 0.73, - "learning_rate": 0.000172603405634104, - "loss": 0.8357, - "step": 1875 - }, - { - "epoch": 0.74, - "learning_rate": 0.00017190070785548755, - "loss": 0.8311, - "step": 1900 - }, - { - "epoch": 0.75, - "learning_rate": 0.0001711905818122717, - "loss": 0.8333, - "step": 1925 - }, - { - "epoch": 0.76, - "learning_rate": 0.0001704731008695777, - "loss": 0.8387, - "step": 1950 - }, - { - "epoch": 0.77, - "learning_rate": 0.0001697483391523821, - "loss": 0.8442, - "step": 1975 - }, - { - "epoch": 0.78, - "learning_rate": 0.00016901637153785885, - "loss": 0.8399, - "step": 2000 - }, - { - "epoch": 0.78, - "eval_loss": 0.8339959383010864, - "eval_runtime": 58.5829, - "eval_samples_per_second": 12.461, - "eval_steps_per_second": 0.905, - "step": 2000 - }, - { - "epoch": 0.79, - "learning_rate": 0.0001682772736476434, - "loss": 0.8334, - "step": 2025 - }, - { - "epoch": 0.79, - "learning_rate": 0.0001675311218400201, - "loss": 0.835, - "step": 2050 - }, - { - "epoch": 0.8, - "learning_rate": 0.00016677799320203332, - "loss": 0.8368, - "step": 2075 - }, - { - "epoch": 0.81, - "learning_rate": 0.00016601796554152344, - "loss": 0.8278, - "step": 2100 - }, - { - "epoch": 0.82, - "learning_rate": 0.00016525111737908827, - "loss": 0.8334, - "step": 2125 - }, - { - "epoch": 0.83, - "learning_rate": 0.00016447752793997096, - "loss": 0.8416, - "step": 2150 - }, - { - "epoch": 0.84, - "learning_rate": 0.00016369727714587483, - "loss": 0.8297, - "step": 2175 - }, - { - "epoch": 0.85, - "learning_rate": 0.0001629104456067066, - "loss": 0.8327, - "step": 2200 - }, - { - "epoch": 0.86, - "learning_rate": 0.00016211711461224825, - "loss": 0.8324, - "step": 2225 - }, - { - "epoch": 0.87, - "learning_rate": 0.0001613173661237589, - "loss": 0.8313, - "step": 2250 - }, - { - "epoch": 0.88, - "learning_rate": 0.0001605112827655069, - "loss": 0.8292, - "step": 2275 - }, - { - "epoch": 0.89, - "learning_rate": 0.0001596989478162339, - "loss": 0.8334, - "step": 2300 - }, - { - "epoch": 0.9, - "learning_rate": 0.00015888044520055106, - "loss": 0.8352, - "step": 2325 - }, - { - "epoch": 0.91, - "learning_rate": 0.00015805585948026852, - "loss": 0.823, - "step": 2350 - }, - { - "epoch": 0.92, - "learning_rate": 0.000157225275845659, - "loss": 0.8293, - "step": 2375 - }, - { - "epoch": 0.93, - "learning_rate": 0.00015638878010665672, - "loss": 0.8289, - "step": 2400 - }, - { - "epoch": 0.94, - "learning_rate": 0.00015554645868399205, - "loss": 0.832, - "step": 2425 - }, - { - "epoch": 0.95, - "learning_rate": 0.00015469839860026308, - "loss": 0.8294, - "step": 2450 - }, - { - "epoch": 0.96, - "learning_rate": 0.0001538446874709452, - "loss": 0.8281, - "step": 2475 - }, - { - "epoch": 0.97, - "learning_rate": 0.00015298541349533925, - "loss": 0.8314, - "step": 2500 - }, - { - "epoch": 0.98, - "learning_rate": 0.00015212066544745926, - "loss": 0.831, - "step": 2525 - }, - { - "epoch": 0.99, - "learning_rate": 0.00015125053266686124, - "loss": 0.8319, - "step": 2550 - }, - { - "epoch": 1.0, - "learning_rate": 0.00015037510504941303, - "loss": 0.8259, - "step": 2575 - }, - { - "epoch": 1.01, - "learning_rate": 0.00014949447303800695, - "loss": 0.8133, - "step": 2600 - }, - { - "epoch": 1.02, - "learning_rate": 0.00014860872761321593, - "loss": 0.8139, - "step": 2625 - }, - { - "epoch": 1.03, - "learning_rate": 0.00014771796028389405, - "loss": 0.804, - "step": 2650 - }, - { - "epoch": 1.04, - "learning_rate": 0.0001468222630777225, - "loss": 0.8011, - "step": 2675 - }, - { - "epoch": 1.05, - "learning_rate": 0.00014592172853170193, - "loss": 0.8037, - "step": 2700 - }, - { - "epoch": 1.06, - "learning_rate": 0.00014501644968259212, - "loss": 0.8063, - "step": 2725 - }, - { - "epoch": 1.07, - "learning_rate": 0.00014410652005730025, - "loss": 0.8155, - "step": 2750 - }, - { - "epoch": 1.08, - "learning_rate": 0.00014319203366321826, - "loss": 0.8066, - "step": 2775 - }, - { - "epoch": 1.09, - "learning_rate": 0.0001422730849785107, - "loss": 0.8091, - "step": 2800 - }, - { - "epoch": 1.1, - "learning_rate": 0.0001413497689423539, - "loss": 0.8067, - "step": 2825 - }, - { - "epoch": 1.11, - "learning_rate": 0.00014042218094512755, - "loss": 0.8046, - "step": 2850 - }, - { - "epoch": 1.11, - "learning_rate": 0.00013949041681855985, - "loss": 0.8053, - "step": 2875 - }, - { - "epoch": 1.12, - "learning_rate": 0.0001385545728258264, - "loss": 0.8075, - "step": 2900 - }, - { - "epoch": 1.13, - "learning_rate": 0.0001376147456516055, - "loss": 0.8015, - "step": 2925 - }, - { - "epoch": 1.14, - "learning_rate": 0.00013667103239208903, - "loss": 0.8016, - "step": 2950 - }, - { - "epoch": 1.15, - "learning_rate": 0.00013572353054495126, - "loss": 0.8029, - "step": 2975 - }, - { - "epoch": 1.16, - "learning_rate": 0.0001347723379992762, - "loss": 0.8017, - "step": 3000 - }, - { - "epoch": 1.16, - "eval_loss": 0.8229297995567322, - "eval_runtime": 59.3398, - "eval_samples_per_second": 12.302, - "eval_steps_per_second": 0.893, - "step": 3000 - } - ], - "max_steps": 7737, - "num_train_epochs": 3, - "total_flos": 1.2918900565647294e+19, - "trial_name": null, - "trial_params": null -} diff --git a/checkpoint-3000/training_args.bin b/checkpoint-3000/training_args.bin deleted file mode 100644 index 89b6f6487f3e8cc200589fafc4378937a3fadf66..0000000000000000000000000000000000000000 --- a/checkpoint-3000/training_args.bin +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:7292138fecd854f5f17371c439bbd450ee3c48e738b75818b778a55f4e26ef57 -size 4027 diff --git a/checkpoint-3100/README.md b/checkpoint-3100/README.md deleted file mode 100644 index f2208b0ded6c10ed47b2ea9df5ab7c8dd721a53c..0000000000000000000000000000000000000000 --- a/checkpoint-3100/README.md +++ /dev/null @@ -1,20 +0,0 @@ ---- -library_name: peft ---- -## Training procedure - - -The following `bitsandbytes` quantization config was used during training: -- load_in_8bit: False -- load_in_4bit: True -- llm_int8_threshold: 6.0 -- llm_int8_skip_modules: None -- llm_int8_enable_fp32_cpu_offload: False -- llm_int8_has_fp16_weight: False -- bnb_4bit_quant_type: nf4 -- bnb_4bit_use_double_quant: True -- bnb_4bit_compute_dtype: bfloat16 -### Framework versions - - -- PEFT 0.5.0.dev0 diff --git a/checkpoint-3100/adapter_config.json b/checkpoint-3100/adapter_config.json deleted file mode 100644 index 6984f5fd4bab99ec956ebe3bf12d7b823649c936..0000000000000000000000000000000000000000 --- a/checkpoint-3100/adapter_config.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "auto_mapping": null, - "base_model_name_or_path": "/workspace/webui/models/TheBloke_Llama-2-13B-fp16", - "bias": "none", - "fan_in_fan_out": null, - "inference_mode": true, - "init_lora_weights": true, - "layers_pattern": null, - "layers_to_transform": null, - "lora_alpha": 16, - "lora_dropout": 0.05, - "modules_to_save": null, - "peft_type": "LORA", - "r": 32, - "revision": null, - "target_modules": [ - "down_proj", - "k_proj", - "gate_proj", - "v_proj", - "o_proj", - "q_proj", - "up_proj" - ], - "task_type": "CAUSAL_LM" -} \ No newline at end of file diff --git a/checkpoint-3100/adapter_model.bin b/checkpoint-3100/adapter_model.bin deleted file mode 100644 index c15fa3db3fa10e4e183953719d7e0696bdb42120..0000000000000000000000000000000000000000 --- a/checkpoint-3100/adapter_model.bin +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:604d08ed43fee049ea3799c6450a4a1d5f8cc6e58ddf33f377215e77f03ec769 -size 500897101 diff --git a/checkpoint-3100/adapter_model/README.md b/checkpoint-3100/adapter_model/README.md deleted file mode 100644 index f2208b0ded6c10ed47b2ea9df5ab7c8dd721a53c..0000000000000000000000000000000000000000 --- a/checkpoint-3100/adapter_model/README.md +++ /dev/null @@ -1,20 +0,0 @@ ---- -library_name: peft ---- -## Training procedure - - -The following `bitsandbytes` quantization config was used during training: -- load_in_8bit: False -- load_in_4bit: True -- llm_int8_threshold: 6.0 -- llm_int8_skip_modules: None -- llm_int8_enable_fp32_cpu_offload: False -- llm_int8_has_fp16_weight: False -- bnb_4bit_quant_type: nf4 -- bnb_4bit_use_double_quant: True -- bnb_4bit_compute_dtype: bfloat16 -### Framework versions - - -- PEFT 0.5.0.dev0 diff --git a/checkpoint-3100/adapter_model/adapter_config.json b/checkpoint-3100/adapter_model/adapter_config.json deleted file mode 100644 index 6984f5fd4bab99ec956ebe3bf12d7b823649c936..0000000000000000000000000000000000000000 --- a/checkpoint-3100/adapter_model/adapter_config.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "auto_mapping": null, - "base_model_name_or_path": "/workspace/webui/models/TheBloke_Llama-2-13B-fp16", - "bias": "none", - "fan_in_fan_out": null, - "inference_mode": true, - "init_lora_weights": true, - "layers_pattern": null, - "layers_to_transform": null, - "lora_alpha": 16, - "lora_dropout": 0.05, - "modules_to_save": null, - "peft_type": "LORA", - "r": 32, - "revision": null, - "target_modules": [ - "down_proj", - "k_proj", - "gate_proj", - "v_proj", - "o_proj", - "q_proj", - "up_proj" - ], - "task_type": "CAUSAL_LM" -} \ No newline at end of file diff --git a/checkpoint-3100/adapter_model/adapter_model.bin b/checkpoint-3100/adapter_model/adapter_model.bin deleted file mode 100644 index c15fa3db3fa10e4e183953719d7e0696bdb42120..0000000000000000000000000000000000000000 --- a/checkpoint-3100/adapter_model/adapter_model.bin +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:604d08ed43fee049ea3799c6450a4a1d5f8cc6e58ddf33f377215e77f03ec769 -size 500897101 diff --git a/checkpoint-3100/optimizer.pt b/checkpoint-3100/optimizer.pt deleted file mode 100644 index 2113c46d598579c16d2a4b5ecaa23eed143bd496..0000000000000000000000000000000000000000 --- a/checkpoint-3100/optimizer.pt +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:94469f937804d5ce0461fe1eb4aba770ee24539198898ed26597db99e8a9be8c -size 1001752701 diff --git a/checkpoint-3100/rng_state_0.pth b/checkpoint-3100/rng_state_0.pth deleted file mode 100644 index 8c701fecdc36714bdbd9809d3ecea407aab49e46..0000000000000000000000000000000000000000 --- a/checkpoint-3100/rng_state_0.pth +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:cccffebda14d72607655c8cc414bc2d644ff4850957bc97842d7ad5b4e8a35c7 -size 27772 diff --git a/checkpoint-3100/rng_state_1.pth b/checkpoint-3100/rng_state_1.pth deleted file mode 100644 index 7c229c02dec63bdc2e781dbd03d8fdfbda5c0bf9..0000000000000000000000000000000000000000 --- a/checkpoint-3100/rng_state_1.pth +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:bf748078b6d64f54a4c82c901a07fa9a0b12ed31fbdb377aed3baece602b7886 -size 27772 diff --git a/checkpoint-3100/rng_state_10.pth b/checkpoint-3100/rng_state_10.pth deleted file mode 100644 index 7010d794264a6f79b34c3d5f9960f5e60cd4dc62..0000000000000000000000000000000000000000 --- a/checkpoint-3100/rng_state_10.pth +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:98298c09628a5370836fdab430191117c3386c78337f9f2c62f101aa17d7e4f8 -size 27789 diff --git a/checkpoint-3100/rng_state_11.pth b/checkpoint-3100/rng_state_11.pth deleted file mode 100644 index 69bf391e000763718374f273648faea4493154e2..0000000000000000000000000000000000000000 --- a/checkpoint-3100/rng_state_11.pth +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:68ae9f8a2401b67aa57f50c34841e07a3dc51f9b81e0436c33e2b67fe7c4baee -size 27789 diff --git a/checkpoint-3100/rng_state_12.pth b/checkpoint-3100/rng_state_12.pth deleted file mode 100644 index 38ecccc69861fd0c6e9e0892ce1c94f08e46c7ee..0000000000000000000000000000000000000000 --- a/checkpoint-3100/rng_state_12.pth +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:22848d56d849bc600e823fc9cfb270dd9494429c7f77cf79dbfbdf4a72efaba0 -size 27789 diff --git a/checkpoint-3100/rng_state_13.pth b/checkpoint-3100/rng_state_13.pth deleted file mode 100644 index 60986aa8681df4bdf8579fd6208c5af5a417bdc7..0000000000000000000000000000000000000000 --- a/checkpoint-3100/rng_state_13.pth +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:f3076b14a53ecb3520119317b5b2e50278e91bfa176e1f3b4d62a09a55b10c15 -size 27789 diff --git a/checkpoint-3100/rng_state_2.pth b/checkpoint-3100/rng_state_2.pth deleted file mode 100644 index 60462776d902cf64a63733935612b4801f264554..0000000000000000000000000000000000000000 --- a/checkpoint-3100/rng_state_2.pth +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:8502ae4ad065d02b5651b21e8f626b0f0ba70cb25fc8fff417ceacffff2c0af9 -size 27772 diff --git a/checkpoint-3100/rng_state_3.pth b/checkpoint-3100/rng_state_3.pth deleted file mode 100644 index a819389f95a1df3ef7c9232918172c745277d8e2..0000000000000000000000000000000000000000 --- a/checkpoint-3100/rng_state_3.pth +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:e560e73955af90e58da7fd5613f914bc880456d4e233dfb371f9de5f68da2f65 -size 27772 diff --git a/checkpoint-3100/rng_state_4.pth b/checkpoint-3100/rng_state_4.pth deleted file mode 100644 index d622001df7cc09dd4608f228212d19f9dbbd48ac..0000000000000000000000000000000000000000 --- a/checkpoint-3100/rng_state_4.pth +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:c1325c064dbe9b4b47234209edc5ce58860efd7b44e31e1e367cc65157805fb5 -size 27772 diff --git a/checkpoint-3100/rng_state_5.pth b/checkpoint-3100/rng_state_5.pth deleted file mode 100644 index 96bff79b8a079f3b367eb6f8413531fb5bff0ecd..0000000000000000000000000000000000000000 --- a/checkpoint-3100/rng_state_5.pth +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:dae1ede6bf5b2686c0a3e858c74f1ad7c53f73bff27db97fa9e477440f52c571 -size 27772 diff --git a/checkpoint-3100/rng_state_6.pth b/checkpoint-3100/rng_state_6.pth deleted file mode 100644 index 0f377ee3865769ca7390e478c192919ac370e5d3..0000000000000000000000000000000000000000 --- a/checkpoint-3100/rng_state_6.pth +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:181f86e82ddf8a60f98f38e3eb3e9daea0f03767e98f46ac40b43f04100a7cc7 -size 27772 diff --git a/checkpoint-3100/rng_state_7.pth b/checkpoint-3100/rng_state_7.pth deleted file mode 100644 index 2ecd97daa65fc10ea67f18705f827aa7c0168804..0000000000000000000000000000000000000000 --- a/checkpoint-3100/rng_state_7.pth +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:6dba2bbc40ef363764a30227e3decb70eb10e50a3e97241431a928d011805fbe -size 27772 diff --git a/checkpoint-3100/rng_state_8.pth b/checkpoint-3100/rng_state_8.pth deleted file mode 100644 index 53a0bd7dabed2ef1ddcb2a4ea7869e6a28c93cb5..0000000000000000000000000000000000000000 --- a/checkpoint-3100/rng_state_8.pth +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:d1bae03f64a7771751c64781fb44b1c9744351929cb10e9281c9f741f3eea0d3 -size 27772 diff --git a/checkpoint-3100/rng_state_9.pth b/checkpoint-3100/rng_state_9.pth deleted file mode 100644 index 095f3cd91372639f120a8f39eaf40d3bc942a640..0000000000000000000000000000000000000000 --- a/checkpoint-3100/rng_state_9.pth +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:9135a97bdad98c9b172d205cb6e8826f379e19e767179271dbc08babc1c3dd01 -size 27772 diff --git a/checkpoint-3100/scheduler.pt b/checkpoint-3100/scheduler.pt deleted file mode 100644 index a5454ac32b2919846d4817be960479ffed2e9e89..0000000000000000000000000000000000000000 --- a/checkpoint-3100/scheduler.pt +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:d5a1153b6ecad2aef39ef54d264602f1ef058c1cd82ac7533e0298d295685be1 -size 627 diff --git a/checkpoint-3100/trainer_state.json b/checkpoint-3100/trainer_state.json deleted file mode 100644 index ff116b5ea24bf2e784481775f4d5435463907505..0000000000000000000000000000000000000000 --- a/checkpoint-3100/trainer_state.json +++ /dev/null @@ -1,760 +0,0 @@ -{ - "best_metric": null, - "best_model_checkpoint": null, - "epoch": 1.202016285381931, - "global_step": 3100, - "is_hyper_param_search": false, - "is_local_process_zero": true, - "is_world_process_zero": true, - "log_history": [ - { - "epoch": 0.02, - "learning_rate": 0.0001999867761371633, - "loss": 1.0435, - "step": 50 - }, - { - "epoch": 0.04, - "learning_rate": 0.00019993306018843102, - "loss": 0.8918, - "step": 100 - }, - { - "epoch": 0.06, - "learning_rate": 0.00019983804784290833, - "loss": 0.8874, - "step": 150 - }, - { - "epoch": 0.08, - "learning_rate": 0.00019970177836355307, - "loss": 0.8839, - "step": 200 - }, - { - "epoch": 0.09, - "learning_rate": 0.00019961818913082012, - "loss": 0.8801, - "step": 225 - }, - { - "epoch": 0.1, - "learning_rate": 0.00019952430806244534, - "loss": 0.8753, - "step": 250 - }, - { - "epoch": 0.11, - "learning_rate": 0.00019942014485754635, - "loss": 0.8754, - "step": 275 - }, - { - "epoch": 0.12, - "learning_rate": 0.00019930571027751713, - "loss": 0.8751, - "step": 300 - }, - { - "epoch": 0.13, - "learning_rate": 0.0001991810161449164, - "loss": 0.8819, - "step": 325 - }, - { - "epoch": 0.14, - "learning_rate": 0.00019904607534224612, - "loss": 0.8744, - "step": 350 - }, - { - "epoch": 0.15, - "learning_rate": 0.00019890090181062063, - "loss": 0.8735, - "step": 375 - }, - { - "epoch": 0.16, - "learning_rate": 0.00019874551054832625, - "loss": 0.8703, - "step": 400 - }, - { - "epoch": 0.16, - "learning_rate": 0.00019857991760927193, - "loss": 0.8715, - "step": 425 - }, - { - "epoch": 0.17, - "learning_rate": 0.00019840414010133045, - "loss": 0.8714, - "step": 450 - }, - { - "epoch": 0.18, - "learning_rate": 0.00019821819618457114, - "loss": 0.8653, - "step": 475 - }, - { - "epoch": 0.19, - "learning_rate": 0.0001980221050693837, - "loss": 0.8716, - "step": 500 - }, - { - "epoch": 0.2, - "learning_rate": 0.00019781588701449338, - "loss": 0.8695, - "step": 525 - }, - { - "epoch": 0.21, - "learning_rate": 0.0001975995633248682, - "loss": 0.8746, - "step": 550 - }, - { - "epoch": 0.22, - "learning_rate": 0.00019737315634951762, - "loss": 0.8731, - "step": 575 - }, - { - "epoch": 0.23, - "learning_rate": 0.00019713668947918386, - "loss": 0.867, - "step": 600 - }, - { - "epoch": 0.24, - "learning_rate": 0.0001968901871439252, - "loss": 0.8706, - "step": 625 - }, - { - "epoch": 0.25, - "learning_rate": 0.000196633674810592, - "loss": 0.8595, - "step": 650 - }, - { - "epoch": 0.26, - "learning_rate": 0.0001963671789801958, - "loss": 0.8627, - "step": 675 - }, - { - "epoch": 0.27, - "learning_rate": 0.0001960907271851712, - "loss": 0.8607, - "step": 700 - }, - { - "epoch": 0.28, - "learning_rate": 0.00019580434798653173, - "loss": 0.858, - "step": 725 - }, - { - "epoch": 0.29, - "learning_rate": 0.00019550807097091876, - "loss": 0.8589, - "step": 750 - }, - { - "epoch": 0.3, - "learning_rate": 0.00019520192674754515, - "loss": 0.8561, - "step": 775 - }, - { - "epoch": 0.31, - "learning_rate": 0.00019488594694503264, - "loss": 0.8576, - "step": 800 - }, - { - "epoch": 0.32, - "learning_rate": 0.00019456016420814446, - "loss": 0.8597, - "step": 825 - }, - { - "epoch": 0.33, - "learning_rate": 0.00019422461219441254, - "loss": 0.862, - "step": 850 - }, - { - "epoch": 0.34, - "learning_rate": 0.00019387932557066035, - "loss": 0.8577, - "step": 875 - }, - { - "epoch": 0.35, - "learning_rate": 0.00019352434000942127, - "loss": 0.8632, - "step": 900 - }, - { - "epoch": 0.36, - "learning_rate": 0.00019315969218525333, - "loss": 0.8567, - "step": 925 - }, - { - "epoch": 0.37, - "learning_rate": 0.00019278541977095005, - "loss": 0.8501, - "step": 950 - }, - { - "epoch": 0.38, - "learning_rate": 0.00019240156143364844, - "loss": 0.8596, - "step": 975 - }, - { - "epoch": 0.39, - "learning_rate": 0.00019200815683083434, - "loss": 0.8556, - "step": 1000 - }, - { - "epoch": 0.39, - "eval_loss": 0.8521950244903564, - "eval_runtime": 59.8838, - "eval_samples_per_second": 12.19, - "eval_steps_per_second": 0.885, - "step": 1000 - }, - { - "epoch": 0.4, - "learning_rate": 0.00019160524660624505, - "loss": 0.8531, - "step": 1025 - }, - { - "epoch": 0.41, - "learning_rate": 0.00019119287238567045, - "loss": 0.8513, - "step": 1050 - }, - { - "epoch": 0.42, - "learning_rate": 0.00019077107677265253, - "loss": 0.8502, - "step": 1075 - }, - { - "epoch": 0.43, - "learning_rate": 0.00019033990334408384, - "loss": 0.8469, - "step": 1100 - }, - { - "epoch": 0.44, - "learning_rate": 0.00018989939664570545, - "loss": 0.8495, - "step": 1125 - }, - { - "epoch": 0.45, - "learning_rate": 0.00018944960218750484, - "loss": 0.8485, - "step": 1150 - }, - { - "epoch": 0.46, - "learning_rate": 0.00018899056643901404, - "loss": 0.8534, - "step": 1175 - }, - { - "epoch": 0.47, - "learning_rate": 0.00018852233682450893, - "loss": 0.8531, - "step": 1200 - }, - { - "epoch": 0.47, - "learning_rate": 0.00018804496171810948, - "loss": 0.8509, - "step": 1225 - }, - { - "epoch": 0.48, - "learning_rate": 0.00018755849043878222, - "loss": 0.8445, - "step": 1250 - }, - { - "epoch": 0.49, - "learning_rate": 0.0001870629732452449, - "loss": 0.8548, - "step": 1275 - }, - { - "epoch": 0.5, - "learning_rate": 0.00018655846133077417, - "loss": 0.8441, - "step": 1300 - }, - { - "epoch": 0.51, - "learning_rate": 0.00018604500681791656, - "loss": 0.8533, - "step": 1325 - }, - { - "epoch": 0.52, - "learning_rate": 0.00018552266275310373, - "loss": 0.8505, - "step": 1350 - }, - { - "epoch": 0.53, - "learning_rate": 0.0001849914831011719, - "loss": 0.8544, - "step": 1375 - }, - { - "epoch": 0.54, - "learning_rate": 0.00018445152273978668, - "loss": 0.845, - "step": 1400 - }, - { - "epoch": 0.55, - "learning_rate": 0.00018390283745377354, - "loss": 0.8376, - "step": 1425 - }, - { - "epoch": 0.56, - "learning_rate": 0.0001833454839293545, - "loss": 0.847, - "step": 1450 - }, - { - "epoch": 0.57, - "learning_rate": 0.00018277951974829163, - "loss": 0.8473, - "step": 1475 - }, - { - "epoch": 0.58, - "learning_rate": 0.0001822050033819382, - "loss": 0.8438, - "step": 1500 - }, - { - "epoch": 0.59, - "learning_rate": 0.00018162199418519785, - "loss": 0.8418, - "step": 1525 - }, - { - "epoch": 0.6, - "learning_rate": 0.00018103055239039243, - "loss": 0.842, - "step": 1550 - }, - { - "epoch": 0.61, - "learning_rate": 0.0001804307391010393, - "loss": 0.8435, - "step": 1575 - }, - { - "epoch": 0.62, - "learning_rate": 0.00017982261628553842, - "loss": 0.8349, - "step": 1600 - }, - { - "epoch": 0.63, - "learning_rate": 0.0001792062467707703, - "loss": 0.8483, - "step": 1625 - }, - { - "epoch": 0.64, - "learning_rate": 0.0001785816942356052, - "loss": 0.8387, - "step": 1650 - }, - { - "epoch": 0.65, - "learning_rate": 0.00017794902320432429, - "loss": 0.843, - "step": 1675 - }, - { - "epoch": 0.66, - "learning_rate": 0.00017730829903995333, - "loss": 0.8424, - "step": 1700 - }, - { - "epoch": 0.67, - "learning_rate": 0.00017665958793751006, - "loss": 0.8418, - "step": 1725 - }, - { - "epoch": 0.68, - "learning_rate": 0.00017600295691716522, - "loss": 0.8384, - "step": 1750 - }, - { - "epoch": 0.69, - "learning_rate": 0.00017533847381731856, - "loss": 0.8445, - "step": 1775 - }, - { - "epoch": 0.7, - "learning_rate": 0.00017466620728759033, - "loss": 0.8446, - "step": 1800 - }, - { - "epoch": 0.71, - "learning_rate": 0.00017398622678172878, - "loss": 0.838, - "step": 1825 - }, - { - "epoch": 0.72, - "learning_rate": 0.0001732986025504348, - "loss": 0.8415, - "step": 1850 - }, - { - "epoch": 0.73, - "learning_rate": 0.000172603405634104, - "loss": 0.8357, - "step": 1875 - }, - { - "epoch": 0.74, - "learning_rate": 0.00017190070785548755, - "loss": 0.8311, - "step": 1900 - }, - { - "epoch": 0.75, - "learning_rate": 0.0001711905818122717, - "loss": 0.8333, - "step": 1925 - }, - { - "epoch": 0.76, - "learning_rate": 0.0001704731008695777, - "loss": 0.8387, - "step": 1950 - }, - { - "epoch": 0.77, - "learning_rate": 0.0001697483391523821, - "loss": 0.8442, - "step": 1975 - }, - { - "epoch": 0.78, - "learning_rate": 0.00016901637153785885, - "loss": 0.8399, - "step": 2000 - }, - { - "epoch": 0.78, - "eval_loss": 0.8339959383010864, - "eval_runtime": 58.5829, - "eval_samples_per_second": 12.461, - "eval_steps_per_second": 0.905, - "step": 2000 - }, - { - "epoch": 0.79, - "learning_rate": 0.0001682772736476434, - "loss": 0.8334, - "step": 2025 - }, - { - "epoch": 0.79, - "learning_rate": 0.0001675311218400201, - "loss": 0.835, - "step": 2050 - }, - { - "epoch": 0.8, - "learning_rate": 0.00016677799320203332, - "loss": 0.8368, - "step": 2075 - }, - { - "epoch": 0.81, - "learning_rate": 0.00016601796554152344, - "loss": 0.8278, - "step": 2100 - }, - { - "epoch": 0.82, - "learning_rate": 0.00016525111737908827, - "loss": 0.8334, - "step": 2125 - }, - { - "epoch": 0.83, - "learning_rate": 0.00016447752793997096, - "loss": 0.8416, - "step": 2150 - }, - { - "epoch": 0.84, - "learning_rate": 0.00016369727714587483, - "loss": 0.8297, - "step": 2175 - }, - { - "epoch": 0.85, - "learning_rate": 0.0001629104456067066, - "loss": 0.8327, - "step": 2200 - }, - { - "epoch": 0.86, - "learning_rate": 0.00016211711461224825, - "loss": 0.8324, - "step": 2225 - }, - { - "epoch": 0.87, - "learning_rate": 0.0001613173661237589, - "loss": 0.8313, - "step": 2250 - }, - { - "epoch": 0.88, - "learning_rate": 0.0001605112827655069, - "loss": 0.8292, - "step": 2275 - }, - { - "epoch": 0.89, - "learning_rate": 0.0001596989478162339, - "loss": 0.8334, - "step": 2300 - }, - { - "epoch": 0.9, - "learning_rate": 0.00015888044520055106, - "loss": 0.8352, - "step": 2325 - }, - { - "epoch": 0.91, - "learning_rate": 0.00015805585948026852, - "loss": 0.823, - "step": 2350 - }, - { - "epoch": 0.92, - "learning_rate": 0.000157225275845659, - "loss": 0.8293, - "step": 2375 - }, - { - "epoch": 0.93, - "learning_rate": 0.00015638878010665672, - "loss": 0.8289, - "step": 2400 - }, - { - "epoch": 0.94, - "learning_rate": 0.00015554645868399205, - "loss": 0.832, - "step": 2425 - }, - { - "epoch": 0.95, - "learning_rate": 0.00015469839860026308, - "loss": 0.8294, - "step": 2450 - }, - { - "epoch": 0.96, - "learning_rate": 0.0001538446874709452, - "loss": 0.8281, - "step": 2475 - }, - { - "epoch": 0.97, - "learning_rate": 0.00015298541349533925, - "loss": 0.8314, - "step": 2500 - }, - { - "epoch": 0.98, - "learning_rate": 0.00015212066544745926, - "loss": 0.831, - "step": 2525 - }, - { - "epoch": 0.99, - "learning_rate": 0.00015125053266686124, - "loss": 0.8319, - "step": 2550 - }, - { - "epoch": 1.0, - "learning_rate": 0.00015037510504941303, - "loss": 0.8259, - "step": 2575 - }, - { - "epoch": 1.01, - "learning_rate": 0.00014949447303800695, - "loss": 0.8133, - "step": 2600 - }, - { - "epoch": 1.02, - "learning_rate": 0.00014860872761321593, - "loss": 0.8139, - "step": 2625 - }, - { - "epoch": 1.03, - "learning_rate": 0.00014771796028389405, - "loss": 0.804, - "step": 2650 - }, - { - "epoch": 1.04, - "learning_rate": 0.0001468222630777225, - "loss": 0.8011, - "step": 2675 - }, - { - "epoch": 1.05, - "learning_rate": 0.00014592172853170193, - "loss": 0.8037, - "step": 2700 - }, - { - "epoch": 1.06, - "learning_rate": 0.00014501644968259212, - "loss": 0.8063, - "step": 2725 - }, - { - "epoch": 1.07, - "learning_rate": 0.00014410652005730025, - "loss": 0.8155, - "step": 2750 - }, - { - "epoch": 1.08, - "learning_rate": 0.00014319203366321826, - "loss": 0.8066, - "step": 2775 - }, - { - "epoch": 1.09, - "learning_rate": 0.0001422730849785107, - "loss": 0.8091, - "step": 2800 - }, - { - "epoch": 1.1, - "learning_rate": 0.0001413497689423539, - "loss": 0.8067, - "step": 2825 - }, - { - "epoch": 1.11, - "learning_rate": 0.00014042218094512755, - "loss": 0.8046, - "step": 2850 - }, - { - "epoch": 1.11, - "learning_rate": 0.00013949041681855985, - "loss": 0.8053, - "step": 2875 - }, - { - "epoch": 1.12, - "learning_rate": 0.0001385545728258264, - "loss": 0.8075, - "step": 2900 - }, - { - "epoch": 1.13, - "learning_rate": 0.0001376147456516055, - "loss": 0.8015, - "step": 2925 - }, - { - "epoch": 1.14, - "learning_rate": 0.00013667103239208903, - "loss": 0.8016, - "step": 2950 - }, - { - "epoch": 1.15, - "learning_rate": 0.00013572353054495126, - "loss": 0.8029, - "step": 2975 - }, - { - "epoch": 1.16, - "learning_rate": 0.0001347723379992762, - "loss": 0.8017, - "step": 3000 - }, - { - "epoch": 1.16, - "eval_loss": 0.8229297995567322, - "eval_runtime": 59.3398, - "eval_samples_per_second": 12.302, - "eval_steps_per_second": 0.893, - "step": 3000 - }, - { - "epoch": 1.17, - "learning_rate": 0.0001338175530254443, - "loss": 0.8049, - "step": 3025 - }, - { - "epoch": 1.18, - "learning_rate": 0.00013285927426497985, - "loss": 0.8027, - "step": 3050 - }, - { - "epoch": 1.19, - "learning_rate": 0.00013189760072036008, - "loss": 0.8028, - "step": 3075 - }, - { - "epoch": 1.2, - "learning_rate": 0.0001309326317447869, - "loss": 0.8021, - "step": 3100 - } - ], - "max_steps": 7737, - "num_train_epochs": 3, - "total_flos": 1.3349988348702228e+19, - "trial_name": null, - "trial_params": null -} diff --git a/checkpoint-3100/training_args.bin b/checkpoint-3100/training_args.bin deleted file mode 100644 index 89b6f6487f3e8cc200589fafc4378937a3fadf66..0000000000000000000000000000000000000000 --- a/checkpoint-3100/training_args.bin +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:7292138fecd854f5f17371c439bbd450ee3c48e738b75818b778a55f4e26ef57 -size 4027 diff --git a/checkpoint-3200/README.md b/checkpoint-3200/README.md deleted file mode 100644 index f2208b0ded6c10ed47b2ea9df5ab7c8dd721a53c..0000000000000000000000000000000000000000 --- a/checkpoint-3200/README.md +++ /dev/null @@ -1,20 +0,0 @@ ---- -library_name: peft ---- -## Training procedure - - -The following `bitsandbytes` quantization config was used during training: -- load_in_8bit: False -- load_in_4bit: True -- llm_int8_threshold: 6.0 -- llm_int8_skip_modules: None -- llm_int8_enable_fp32_cpu_offload: False -- llm_int8_has_fp16_weight: False -- bnb_4bit_quant_type: nf4 -- bnb_4bit_use_double_quant: True -- bnb_4bit_compute_dtype: bfloat16 -### Framework versions - - -- PEFT 0.5.0.dev0 diff --git a/checkpoint-3200/adapter_config.json b/checkpoint-3200/adapter_config.json deleted file mode 100644 index 6984f5fd4bab99ec956ebe3bf12d7b823649c936..0000000000000000000000000000000000000000 --- a/checkpoint-3200/adapter_config.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "auto_mapping": null, - "base_model_name_or_path": "/workspace/webui/models/TheBloke_Llama-2-13B-fp16", - "bias": "none", - "fan_in_fan_out": null, - "inference_mode": true, - "init_lora_weights": true, - "layers_pattern": null, - "layers_to_transform": null, - "lora_alpha": 16, - "lora_dropout": 0.05, - "modules_to_save": null, - "peft_type": "LORA", - "r": 32, - "revision": null, - "target_modules": [ - "down_proj", - "k_proj", - "gate_proj", - "v_proj", - "o_proj", - "q_proj", - "up_proj" - ], - "task_type": "CAUSAL_LM" -} \ No newline at end of file diff --git a/checkpoint-3200/adapter_model.bin b/checkpoint-3200/adapter_model.bin deleted file mode 100644 index 9983417f84d25dbcb0ce2f5f4845d2e07f37baac..0000000000000000000000000000000000000000 --- a/checkpoint-3200/adapter_model.bin +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:db5a92be0cd5f8b38b328e0f82e62452a3fa7b5052a0a1f93fd8c4b1dd18b7a7 -size 500897101 diff --git a/checkpoint-3200/optimizer.pt b/checkpoint-3200/optimizer.pt deleted file mode 100644 index 226452e1d09e64d212b48ac55c6172578c2af053..0000000000000000000000000000000000000000 --- a/checkpoint-3200/optimizer.pt +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:734792a711547c325f20f564fba3fc1dafb5a5e2eb00a8e88a628060b50ad4e0 -size 1001752701 diff --git a/checkpoint-3200/rng_state_0.pth b/checkpoint-3200/rng_state_0.pth deleted file mode 100644 index f1227112a055b9edb5eeb329533f5aed76e67d80..0000000000000000000000000000000000000000 --- a/checkpoint-3200/rng_state_0.pth +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:47493f691983492eb548e89c4bc00a908f9336311a3cdb9b6d4d0436862a7afe -size 27772 diff --git a/checkpoint-3200/rng_state_1.pth b/checkpoint-3200/rng_state_1.pth deleted file mode 100644 index 634b8df2811de99580c787bd77a406df7d6a5509..0000000000000000000000000000000000000000 --- a/checkpoint-3200/rng_state_1.pth +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:7fd2d4258cdbe26a4276e6adf71ed40f5fb449eeb7efadbcb60ee6c3eb243b4a -size 27772 diff --git a/checkpoint-3200/rng_state_10.pth b/checkpoint-3200/rng_state_10.pth deleted file mode 100644 index 67bbbcdf71e1616cf3652e62ce0bf9646740465b..0000000000000000000000000000000000000000 --- a/checkpoint-3200/rng_state_10.pth +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:a1a66a04d36cd4ced3163ee7de7c3f532444837ca748b2c7402468c7cf9ac0cc -size 27789 diff --git a/checkpoint-3200/rng_state_11.pth b/checkpoint-3200/rng_state_11.pth deleted file mode 100644 index b3a1d0f07d52d1e00fc37067477a0fded1f4137e..0000000000000000000000000000000000000000 --- a/checkpoint-3200/rng_state_11.pth +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:6fd816f782b7158df45bbb3a0847b7e567cbc8246b73c484a51cea72f837a615 -size 27789 diff --git a/checkpoint-3200/rng_state_12.pth b/checkpoint-3200/rng_state_12.pth deleted file mode 100644 index fc0ca9f711bacbbbae129702028db9b98da142de..0000000000000000000000000000000000000000 --- a/checkpoint-3200/rng_state_12.pth +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:a5c0e7d21944297ce2889bf41e83eaf69e29b09934c6e5396b81f21c8d554008 -size 27789 diff --git a/checkpoint-3200/rng_state_13.pth b/checkpoint-3200/rng_state_13.pth deleted file mode 100644 index c2edd1b9466db9c5e305a5e0456176a22be139ce..0000000000000000000000000000000000000000 --- a/checkpoint-3200/rng_state_13.pth +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:a4a856ae9cc1a21a8770367e26ba40b6eb42e788a7f1046e05e030a2571733a3 -size 27789 diff --git a/checkpoint-3200/rng_state_2.pth b/checkpoint-3200/rng_state_2.pth deleted file mode 100644 index 3adb767352c06c044f2f114dd6c52eb83db232f3..0000000000000000000000000000000000000000 --- a/checkpoint-3200/rng_state_2.pth +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:21fe838aa7244c39844b0cc17fda41f0bf30a92e8c365cbf60e9909dc9539b7a -size 27772 diff --git a/checkpoint-3200/rng_state_3.pth b/checkpoint-3200/rng_state_3.pth deleted file mode 100644 index ad285df37a770b7a13cd51599d010197b3fb9772..0000000000000000000000000000000000000000 --- a/checkpoint-3200/rng_state_3.pth +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:c73d64b42d450cb0cd9d780bb5d76b4532c040febbd3e57c1a7e507f307c6919 -size 27772 diff --git a/checkpoint-3200/rng_state_4.pth b/checkpoint-3200/rng_state_4.pth deleted file mode 100644 index 1ffcead5c4feafbc420a89b58372a0c00df964d5..0000000000000000000000000000000000000000 --- a/checkpoint-3200/rng_state_4.pth +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:dd75d438f8c213a76df48ea475d4e8d725ef60a192611b3a3b5156b54d9bd3aa -size 27772 diff --git a/checkpoint-3200/rng_state_5.pth b/checkpoint-3200/rng_state_5.pth deleted file mode 100644 index 57c81481a57ac9811e8813fdfcf9f35248637e65..0000000000000000000000000000000000000000 --- a/checkpoint-3200/rng_state_5.pth +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:a92b4282b70600e8661d984a46aae33c95db7e5a45a98aa75d1ab7fe809f0e87 -size 27772 diff --git a/checkpoint-3200/rng_state_6.pth b/checkpoint-3200/rng_state_6.pth deleted file mode 100644 index 3dbeee076e2fd6d7449c24e95f3c71a242fe4aa5..0000000000000000000000000000000000000000 --- a/checkpoint-3200/rng_state_6.pth +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:60a6277ffeade78c02a3c80e695be2609d9c03891fa74c239da9236a5d8a411f -size 27772 diff --git a/checkpoint-3200/rng_state_7.pth b/checkpoint-3200/rng_state_7.pth deleted file mode 100644 index 6902be9c4287b3c7d011a62201d86ca18cb84de8..0000000000000000000000000000000000000000 --- a/checkpoint-3200/rng_state_7.pth +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:04ceb85b25ddf1ff875502c31946bedfdea45ff9b753957a01149a9fc85fe4aa -size 27772 diff --git a/checkpoint-3200/rng_state_8.pth b/checkpoint-3200/rng_state_8.pth deleted file mode 100644 index a3f4fa192010aaaaba4e5c479c145ece5ec0d660..0000000000000000000000000000000000000000 --- a/checkpoint-3200/rng_state_8.pth +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:7647a4e90848b95f8b12db8456e13797e34b5cb91f94f229dfe860ef88396c11 -size 27772 diff --git a/checkpoint-3200/rng_state_9.pth b/checkpoint-3200/rng_state_9.pth deleted file mode 100644 index 119154421bf9b73bef8d5b5ac45cc6432215fa7d..0000000000000000000000000000000000000000 --- a/checkpoint-3200/rng_state_9.pth +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:f4297218a6c8fddf475d7093a33dde8e1024bb348b2eacb353ad76d4cb02c652 -size 27772 diff --git a/checkpoint-3200/scheduler.pt b/checkpoint-3200/scheduler.pt deleted file mode 100644 index 7bff1a8e664b7b0d992cbf63b72d3c26a103d7f8..0000000000000000000000000000000000000000 --- a/checkpoint-3200/scheduler.pt +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:c684e0d6e24a062502c981434add298c33b176147f87f977608afcf00cc005c3 -size 627 diff --git a/checkpoint-3200/trainer_state.json b/checkpoint-3200/trainer_state.json deleted file mode 100644 index bd1f8246bdf2d7cc608063f44e0a61d07ba85f46..0000000000000000000000000000000000000000 --- a/checkpoint-3200/trainer_state.json +++ /dev/null @@ -1,784 +0,0 @@ -{ - "best_metric": null, - "best_model_checkpoint": null, - "epoch": 1.240791004265219, - "global_step": 3200, - "is_hyper_param_search": false, - "is_local_process_zero": true, - "is_world_process_zero": true, - "log_history": [ - { - "epoch": 0.02, - "learning_rate": 0.0001999867761371633, - "loss": 1.0435, - "step": 50 - }, - { - "epoch": 0.04, - "learning_rate": 0.00019993306018843102, - "loss": 0.8918, - "step": 100 - }, - { - "epoch": 0.06, - "learning_rate": 0.00019983804784290833, - "loss": 0.8874, - "step": 150 - }, - { - "epoch": 0.08, - "learning_rate": 0.00019970177836355307, - "loss": 0.8839, - "step": 200 - }, - { - "epoch": 0.09, - "learning_rate": 0.00019961818913082012, - "loss": 0.8801, - "step": 225 - }, - { - "epoch": 0.1, - "learning_rate": 0.00019952430806244534, - "loss": 0.8753, - "step": 250 - }, - { - "epoch": 0.11, - "learning_rate": 0.00019942014485754635, - "loss": 0.8754, - "step": 275 - }, - { - "epoch": 0.12, - "learning_rate": 0.00019930571027751713, - "loss": 0.8751, - "step": 300 - }, - { - "epoch": 0.13, - "learning_rate": 0.0001991810161449164, - "loss": 0.8819, - "step": 325 - }, - { - "epoch": 0.14, - "learning_rate": 0.00019904607534224612, - "loss": 0.8744, - "step": 350 - }, - { - "epoch": 0.15, - "learning_rate": 0.00019890090181062063, - "loss": 0.8735, - "step": 375 - }, - { - "epoch": 0.16, - "learning_rate": 0.00019874551054832625, - "loss": 0.8703, - "step": 400 - }, - { - "epoch": 0.16, - "learning_rate": 0.00019857991760927193, - "loss": 0.8715, - "step": 425 - }, - { - "epoch": 0.17, - "learning_rate": 0.00019840414010133045, - "loss": 0.8714, - "step": 450 - }, - { - "epoch": 0.18, - "learning_rate": 0.00019821819618457114, - "loss": 0.8653, - "step": 475 - }, - { - "epoch": 0.19, - "learning_rate": 0.0001980221050693837, - "loss": 0.8716, - "step": 500 - }, - { - "epoch": 0.2, - "learning_rate": 0.00019781588701449338, - "loss": 0.8695, - "step": 525 - }, - { - "epoch": 0.21, - "learning_rate": 0.0001975995633248682, - "loss": 0.8746, - "step": 550 - }, - { - "epoch": 0.22, - "learning_rate": 0.00019737315634951762, - "loss": 0.8731, - "step": 575 - }, - { - "epoch": 0.23, - "learning_rate": 0.00019713668947918386, - "loss": 0.867, - "step": 600 - }, - { - "epoch": 0.24, - "learning_rate": 0.0001968901871439252, - "loss": 0.8706, - "step": 625 - }, - { - "epoch": 0.25, - "learning_rate": 0.000196633674810592, - "loss": 0.8595, - "step": 650 - }, - { - "epoch": 0.26, - "learning_rate": 0.0001963671789801958, - "loss": 0.8627, - "step": 675 - }, - { - "epoch": 0.27, - "learning_rate": 0.0001960907271851712, - "loss": 0.8607, - "step": 700 - }, - { - "epoch": 0.28, - "learning_rate": 0.00019580434798653173, - "loss": 0.858, - "step": 725 - }, - { - "epoch": 0.29, - "learning_rate": 0.00019550807097091876, - "loss": 0.8589, - "step": 750 - }, - { - "epoch": 0.3, - "learning_rate": 0.00019520192674754515, - "loss": 0.8561, - "step": 775 - }, - { - "epoch": 0.31, - "learning_rate": 0.00019488594694503264, - "loss": 0.8576, - "step": 800 - }, - { - "epoch": 0.32, - "learning_rate": 0.00019456016420814446, - "loss": 0.8597, - "step": 825 - }, - { - "epoch": 0.33, - "learning_rate": 0.00019422461219441254, - "loss": 0.862, - "step": 850 - }, - { - "epoch": 0.34, - "learning_rate": 0.00019387932557066035, - "loss": 0.8577, - "step": 875 - }, - { - "epoch": 0.35, - "learning_rate": 0.00019352434000942127, - "loss": 0.8632, - "step": 900 - }, - { - "epoch": 0.36, - "learning_rate": 0.00019315969218525333, - "loss": 0.8567, - "step": 925 - }, - { - "epoch": 0.37, - "learning_rate": 0.00019278541977095005, - "loss": 0.8501, - "step": 950 - }, - { - "epoch": 0.38, - "learning_rate": 0.00019240156143364844, - "loss": 0.8596, - "step": 975 - }, - { - "epoch": 0.39, - "learning_rate": 0.00019200815683083434, - "loss": 0.8556, - "step": 1000 - }, - { - "epoch": 0.39, - "eval_loss": 0.8521950244903564, - "eval_runtime": 59.8838, - "eval_samples_per_second": 12.19, - "eval_steps_per_second": 0.885, - "step": 1000 - }, - { - "epoch": 0.4, - "learning_rate": 0.00019160524660624505, - "loss": 0.8531, - "step": 1025 - }, - { - "epoch": 0.41, - "learning_rate": 0.00019119287238567045, - "loss": 0.8513, - "step": 1050 - }, - { - "epoch": 0.42, - "learning_rate": 0.00019077107677265253, - "loss": 0.8502, - "step": 1075 - }, - { - "epoch": 0.43, - "learning_rate": 0.00019033990334408384, - "loss": 0.8469, - "step": 1100 - }, - { - "epoch": 0.44, - "learning_rate": 0.00018989939664570545, - "loss": 0.8495, - "step": 1125 - }, - { - "epoch": 0.45, - "learning_rate": 0.00018944960218750484, - "loss": 0.8485, - "step": 1150 - }, - { - "epoch": 0.46, - "learning_rate": 0.00018899056643901404, - "loss": 0.8534, - "step": 1175 - }, - { - "epoch": 0.47, - "learning_rate": 0.00018852233682450893, - "loss": 0.8531, - "step": 1200 - }, - { - "epoch": 0.47, - "learning_rate": 0.00018804496171810948, - "loss": 0.8509, - "step": 1225 - }, - { - "epoch": 0.48, - "learning_rate": 0.00018755849043878222, - "loss": 0.8445, - "step": 1250 - }, - { - "epoch": 0.49, - "learning_rate": 0.0001870629732452449, - "loss": 0.8548, - "step": 1275 - }, - { - "epoch": 0.5, - "learning_rate": 0.00018655846133077417, - "loss": 0.8441, - "step": 1300 - }, - { - "epoch": 0.51, - "learning_rate": 0.00018604500681791656, - "loss": 0.8533, - "step": 1325 - }, - { - "epoch": 0.52, - "learning_rate": 0.00018552266275310373, - "loss": 0.8505, - "step": 1350 - }, - { - "epoch": 0.53, - "learning_rate": 0.0001849914831011719, - "loss": 0.8544, - "step": 1375 - }, - { - "epoch": 0.54, - "learning_rate": 0.00018445152273978668, - "loss": 0.845, - "step": 1400 - }, - { - "epoch": 0.55, - "learning_rate": 0.00018390283745377354, - "loss": 0.8376, - "step": 1425 - }, - { - "epoch": 0.56, - "learning_rate": 0.0001833454839293545, - "loss": 0.847, - "step": 1450 - }, - { - "epoch": 0.57, - "learning_rate": 0.00018277951974829163, - "loss": 0.8473, - "step": 1475 - }, - { - "epoch": 0.58, - "learning_rate": 0.0001822050033819382, - "loss": 0.8438, - "step": 1500 - }, - { - "epoch": 0.59, - "learning_rate": 0.00018162199418519785, - "loss": 0.8418, - "step": 1525 - }, - { - "epoch": 0.6, - "learning_rate": 0.00018103055239039243, - "loss": 0.842, - "step": 1550 - }, - { - "epoch": 0.61, - "learning_rate": 0.0001804307391010393, - "loss": 0.8435, - "step": 1575 - }, - { - "epoch": 0.62, - "learning_rate": 0.00017982261628553842, - "loss": 0.8349, - "step": 1600 - }, - { - "epoch": 0.63, - "learning_rate": 0.0001792062467707703, - "loss": 0.8483, - "step": 1625 - }, - { - "epoch": 0.64, - "learning_rate": 0.0001785816942356052, - "loss": 0.8387, - "step": 1650 - }, - { - "epoch": 0.65, - "learning_rate": 0.00017794902320432429, - "loss": 0.843, - "step": 1675 - }, - { - "epoch": 0.66, - "learning_rate": 0.00017730829903995333, - "loss": 0.8424, - "step": 1700 - }, - { - "epoch": 0.67, - "learning_rate": 0.00017665958793751006, - "loss": 0.8418, - "step": 1725 - }, - { - "epoch": 0.68, - "learning_rate": 0.00017600295691716522, - "loss": 0.8384, - "step": 1750 - }, - { - "epoch": 0.69, - "learning_rate": 0.00017533847381731856, - "loss": 0.8445, - "step": 1775 - }, - { - "epoch": 0.7, - "learning_rate": 0.00017466620728759033, - "loss": 0.8446, - "step": 1800 - }, - { - "epoch": 0.71, - "learning_rate": 0.00017398622678172878, - "loss": 0.838, - "step": 1825 - }, - { - "epoch": 0.72, - "learning_rate": 0.0001732986025504348, - "loss": 0.8415, - "step": 1850 - }, - { - "epoch": 0.73, - "learning_rate": 0.000172603405634104, - "loss": 0.8357, - "step": 1875 - }, - { - "epoch": 0.74, - "learning_rate": 0.00017190070785548755, - "loss": 0.8311, - "step": 1900 - }, - { - "epoch": 0.75, - "learning_rate": 0.0001711905818122717, - "loss": 0.8333, - "step": 1925 - }, - { - "epoch": 0.76, - "learning_rate": 0.0001704731008695777, - "loss": 0.8387, - "step": 1950 - }, - { - "epoch": 0.77, - "learning_rate": 0.0001697483391523821, - "loss": 0.8442, - "step": 1975 - }, - { - "epoch": 0.78, - "learning_rate": 0.00016901637153785885, - "loss": 0.8399, - "step": 2000 - }, - { - "epoch": 0.78, - "eval_loss": 0.8339959383010864, - "eval_runtime": 58.5829, - "eval_samples_per_second": 12.461, - "eval_steps_per_second": 0.905, - "step": 2000 - }, - { - "epoch": 0.79, - "learning_rate": 0.0001682772736476434, - "loss": 0.8334, - "step": 2025 - }, - { - "epoch": 0.79, - "learning_rate": 0.0001675311218400201, - "loss": 0.835, - "step": 2050 - }, - { - "epoch": 0.8, - "learning_rate": 0.00016677799320203332, - "loss": 0.8368, - "step": 2075 - }, - { - "epoch": 0.81, - "learning_rate": 0.00016601796554152344, - "loss": 0.8278, - "step": 2100 - }, - { - "epoch": 0.82, - "learning_rate": 0.00016525111737908827, - "loss": 0.8334, - "step": 2125 - }, - { - "epoch": 0.83, - "learning_rate": 0.00016447752793997096, - "loss": 0.8416, - "step": 2150 - }, - { - "epoch": 0.84, - "learning_rate": 0.00016369727714587483, - "loss": 0.8297, - "step": 2175 - }, - { - "epoch": 0.85, - "learning_rate": 0.0001629104456067066, - "loss": 0.8327, - "step": 2200 - }, - { - "epoch": 0.86, - "learning_rate": 0.00016211711461224825, - "loss": 0.8324, - "step": 2225 - }, - { - "epoch": 0.87, - "learning_rate": 0.0001613173661237589, - "loss": 0.8313, - "step": 2250 - }, - { - "epoch": 0.88, - "learning_rate": 0.0001605112827655069, - "loss": 0.8292, - "step": 2275 - }, - { - "epoch": 0.89, - "learning_rate": 0.0001596989478162339, - "loss": 0.8334, - "step": 2300 - }, - { - "epoch": 0.9, - "learning_rate": 0.00015888044520055106, - "loss": 0.8352, - "step": 2325 - }, - { - "epoch": 0.91, - "learning_rate": 0.00015805585948026852, - "loss": 0.823, - "step": 2350 - }, - { - "epoch": 0.92, - "learning_rate": 0.000157225275845659, - "loss": 0.8293, - "step": 2375 - }, - { - "epoch": 0.93, - "learning_rate": 0.00015638878010665672, - "loss": 0.8289, - "step": 2400 - }, - { - "epoch": 0.94, - "learning_rate": 0.00015554645868399205, - "loss": 0.832, - "step": 2425 - }, - { - "epoch": 0.95, - "learning_rate": 0.00015469839860026308, - "loss": 0.8294, - "step": 2450 - }, - { - "epoch": 0.96, - "learning_rate": 0.0001538446874709452, - "loss": 0.8281, - "step": 2475 - }, - { - "epoch": 0.97, - "learning_rate": 0.00015298541349533925, - "loss": 0.8314, - "step": 2500 - }, - { - "epoch": 0.98, - "learning_rate": 0.00015212066544745926, - "loss": 0.831, - "step": 2525 - }, - { - "epoch": 0.99, - "learning_rate": 0.00015125053266686124, - "loss": 0.8319, - "step": 2550 - }, - { - "epoch": 1.0, - "learning_rate": 0.00015037510504941303, - "loss": 0.8259, - "step": 2575 - }, - { - "epoch": 1.01, - "learning_rate": 0.00014949447303800695, - "loss": 0.8133, - "step": 2600 - }, - { - "epoch": 1.02, - "learning_rate": 0.00014860872761321593, - "loss": 0.8139, - "step": 2625 - }, - { - "epoch": 1.03, - "learning_rate": 0.00014771796028389405, - "loss": 0.804, - "step": 2650 - }, - { - "epoch": 1.04, - "learning_rate": 0.0001468222630777225, - "loss": 0.8011, - "step": 2675 - }, - { - "epoch": 1.05, - "learning_rate": 0.00014592172853170193, - "loss": 0.8037, - "step": 2700 - }, - { - "epoch": 1.06, - "learning_rate": 0.00014501644968259212, - "loss": 0.8063, - "step": 2725 - }, - { - "epoch": 1.07, - "learning_rate": 0.00014410652005730025, - "loss": 0.8155, - "step": 2750 - }, - { - "epoch": 1.08, - "learning_rate": 0.00014319203366321826, - "loss": 0.8066, - "step": 2775 - }, - { - "epoch": 1.09, - "learning_rate": 0.0001422730849785107, - "loss": 0.8091, - "step": 2800 - }, - { - "epoch": 1.1, - "learning_rate": 0.0001413497689423539, - "loss": 0.8067, - "step": 2825 - }, - { - "epoch": 1.11, - "learning_rate": 0.00014042218094512755, - "loss": 0.8046, - "step": 2850 - }, - { - "epoch": 1.11, - "learning_rate": 0.00013949041681855985, - "loss": 0.8053, - "step": 2875 - }, - { - "epoch": 1.12, - "learning_rate": 0.0001385545728258264, - "loss": 0.8075, - "step": 2900 - }, - { - "epoch": 1.13, - "learning_rate": 0.0001376147456516055, - "loss": 0.8015, - "step": 2925 - }, - { - "epoch": 1.14, - "learning_rate": 0.00013667103239208903, - "loss": 0.8016, - "step": 2950 - }, - { - "epoch": 1.15, - "learning_rate": 0.00013572353054495126, - "loss": 0.8029, - "step": 2975 - }, - { - "epoch": 1.16, - "learning_rate": 0.0001347723379992762, - "loss": 0.8017, - "step": 3000 - }, - { - "epoch": 1.16, - "eval_loss": 0.8229297995567322, - "eval_runtime": 59.3398, - "eval_samples_per_second": 12.302, - "eval_steps_per_second": 0.893, - "step": 3000 - }, - { - "epoch": 1.17, - "learning_rate": 0.0001338175530254443, - "loss": 0.8049, - "step": 3025 - }, - { - "epoch": 1.18, - "learning_rate": 0.00013285927426497985, - "loss": 0.8027, - "step": 3050 - }, - { - "epoch": 1.19, - "learning_rate": 0.00013189760072036008, - "loss": 0.8028, - "step": 3075 - }, - { - "epoch": 1.2, - "learning_rate": 0.0001309326317447869, - "loss": 0.8021, - "step": 3100 - }, - { - "epoch": 1.21, - "learning_rate": 0.00012996446703192257, - "loss": 0.8033, - "step": 3125 - }, - { - "epoch": 1.22, - "learning_rate": 0.00012899320660558986, - "loss": 0.8016, - "step": 3150 - }, - { - "epoch": 1.23, - "learning_rate": 0.00012801895080943846, - "loss": 0.7995, - "step": 3175 - }, - { - "epoch": 1.24, - "learning_rate": 0.0001270418002965782, - "loss": 0.799, - "step": 3200 - } - ], - "max_steps": 7737, - "num_train_epochs": 3, - "total_flos": 1.3780535807691457e+19, - "trial_name": null, - "trial_params": null -} diff --git a/checkpoint-3200/training_args.bin b/checkpoint-3200/training_args.bin deleted file mode 100644 index 89b6f6487f3e8cc200589fafc4378937a3fadf66..0000000000000000000000000000000000000000 --- a/checkpoint-3200/training_args.bin +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:7292138fecd854f5f17371c439bbd450ee3c48e738b75818b778a55f4e26ef57 -size 4027 diff --git a/training_args.bin b/training_args.bin index 89b6f6487f3e8cc200589fafc4378937a3fadf66..fb0f561b69fb593b0412efd616e76bb2ca52ed7b 100644 --- a/training_args.bin +++ b/training_args.bin @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:7292138fecd854f5f17371c439bbd450ee3c48e738b75818b778a55f4e26ef57 +oid sha256:4458bf553277b063d908ed9668b95abc04892c52d8d793f6b007433394d06f17 size 4027