End of training
Browse files- README.md +6 -9
- adapter_config.json +4 -4
- adapter_model.bin +1 -1
- adapter_model.safetensors +1 -1
- training_args.bin +1 -1
README.md
CHANGED
@@ -102,7 +102,7 @@ xformers_attention: null
|
|
102 |
|
103 |
This model is a fine-tuned version of [unsloth/Llama-3.1-Storm-8B](https://huggingface.co/unsloth/Llama-3.1-Storm-8B) on the None dataset.
|
104 |
It achieves the following results on the evaluation set:
|
105 |
-
- Loss:
|
106 |
|
107 |
## Model description
|
108 |
|
@@ -125,11 +125,8 @@ The following hyperparameters were used during training:
|
|
125 |
- train_batch_size: 1
|
126 |
- eval_batch_size: 1
|
127 |
- seed: 42
|
128 |
-
- distributed_type: multi-GPU
|
129 |
-
- num_devices: 2
|
130 |
- gradient_accumulation_steps: 4
|
131 |
-
- total_train_batch_size:
|
132 |
-
- total_eval_batch_size: 2
|
133 |
- optimizer: Use OptimizerNames.ADAMW_HF with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments
|
134 |
- lr_scheduler_type: cosine
|
135 |
- lr_scheduler_warmup_steps: 10
|
@@ -140,10 +137,10 @@ The following hyperparameters were used during training:
|
|
140 |
|
141 |
| Training Loss | Epoch | Step | Validation Loss |
|
142 |
|:-------------:|:------:|:----:|:---------------:|
|
143 |
-
| 10.
|
144 |
-
|
|
145 |
-
|
|
146 |
-
| 9.
|
147 |
|
148 |
|
149 |
### Framework versions
|
|
|
102 |
|
103 |
This model is a fine-tuned version of [unsloth/Llama-3.1-Storm-8B](https://huggingface.co/unsloth/Llama-3.1-Storm-8B) on the None dataset.
|
104 |
It achieves the following results on the evaluation set:
|
105 |
+
- Loss: 11.0530
|
106 |
|
107 |
## Model description
|
108 |
|
|
|
125 |
- train_batch_size: 1
|
126 |
- eval_batch_size: 1
|
127 |
- seed: 42
|
|
|
|
|
128 |
- gradient_accumulation_steps: 4
|
129 |
+
- total_train_batch_size: 4
|
|
|
130 |
- optimizer: Use OptimizerNames.ADAMW_HF with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments
|
131 |
- lr_scheduler_type: cosine
|
132 |
- lr_scheduler_warmup_steps: 10
|
|
|
137 |
|
138 |
| Training Loss | Epoch | Step | Validation Loss |
|
139 |
|:-------------:|:------:|:----:|:---------------:|
|
140 |
+
| 10.9802 | 0.0003 | 1 | 11.5776 |
|
141 |
+
| 10.434 | 0.0009 | 3 | 11.5776 |
|
142 |
+
| 12.2136 | 0.0018 | 6 | 11.5211 |
|
143 |
+
| 9.0545 | 0.0027 | 9 | 11.0530 |
|
144 |
|
145 |
|
146 |
### Framework versions
|
adapter_config.json
CHANGED
@@ -21,12 +21,12 @@
|
|
21 |
"revision": null,
|
22 |
"target_modules": [
|
23 |
"v_proj",
|
24 |
-
"
|
|
|
25 |
"o_proj",
|
26 |
-
"
|
27 |
"gate_proj",
|
28 |
-
"
|
29 |
-
"down_proj"
|
30 |
],
|
31 |
"task_type": "CAUSAL_LM",
|
32 |
"use_dora": false,
|
|
|
21 |
"revision": null,
|
22 |
"target_modules": [
|
23 |
"v_proj",
|
24 |
+
"q_proj",
|
25 |
+
"down_proj",
|
26 |
"o_proj",
|
27 |
+
"up_proj",
|
28 |
"gate_proj",
|
29 |
+
"k_proj"
|
|
|
30 |
],
|
31 |
"task_type": "CAUSAL_LM",
|
32 |
"use_dora": false,
|
adapter_model.bin
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 84047370
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:443852624920258f4e1983ea89998a100fcdc9c6ccd235847e23c1dda8b8087d
|
3 |
size 84047370
|
adapter_model.safetensors
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 83945296
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:4c8cfac24ff0d3e238a815b333f2ea43cf11b4d6fea1fd7ae85e5916aa356a26
|
3 |
size 83945296
|
training_args.bin
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 6776
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:ed5cf1a7f24b2f4b6ffdd137a7076d460d56660645e71b5af0068e2d73030f31
|
3 |
size 6776
|