Update lora config
Browse files
README.md
CHANGED
@@ -134,7 +134,7 @@ See sample configs in [configs](configs) folder or [examples](examples) for quic
|
|
134 |
|
135 |
- lora
|
136 |
```yaml
|
137 |
-
adapter: lora # blank for full finetune
|
138 |
lora_r: 8
|
139 |
lora_alpha: 16
|
140 |
lora_dropout: 0.05
|
@@ -185,6 +185,8 @@ datasets:
|
|
185 |
# The type of prompt to use for training. [alpaca, sharegpt, gpteacher, oasst, reflection]
|
186 |
type: alpaca
|
187 |
data_files: # path to source data files
|
|
|
|
|
188 |
|
189 |
# axolotl attempts to save the dataset as an arrow after packing the data together so
|
190 |
# subsequent training attempts load faster, relative path
|
@@ -201,7 +203,7 @@ sequence_len: 2048
|
|
201 |
# inspired by StackLLaMA. see https://huggingface.co/blog/stackllama#supervised-fine-tuning
|
202 |
max_packed_sequence_len: 1024
|
203 |
|
204 |
-
# if you want to use lora
|
205 |
adapter: lora
|
206 |
# if you already have a lora model trained that you want to load, put that here
|
207 |
# lora hyperparameters
|
@@ -293,6 +295,9 @@ torchdistx_path:
|
|
293 |
|
294 |
# Debug mode
|
295 |
debug:
|
|
|
|
|
|
|
296 |
```
|
297 |
|
298 |
</details>
|
|
|
134 |
|
135 |
- lora
|
136 |
```yaml
|
137 |
+
adapter: lora # qlora or leave blank for full finetune
|
138 |
lora_r: 8
|
139 |
lora_alpha: 16
|
140 |
lora_dropout: 0.05
|
|
|
185 |
# The type of prompt to use for training. [alpaca, sharegpt, gpteacher, oasst, reflection]
|
186 |
type: alpaca
|
187 |
data_files: # path to source data files
|
188 |
+
shards: # true if use subset data. make sure to set `shards` param also
|
189 |
+
shards: # number of shards to split dataset into
|
190 |
|
191 |
# axolotl attempts to save the dataset as an arrow after packing the data together so
|
192 |
# subsequent training attempts load faster, relative path
|
|
|
203 |
# inspired by StackLLaMA. see https://huggingface.co/blog/stackllama#supervised-fine-tuning
|
204 |
max_packed_sequence_len: 1024
|
205 |
|
206 |
+
# if you want to use 'lora' or 'qlora' or leave blank to train all parameters in original model
|
207 |
adapter: lora
|
208 |
# if you already have a lora model trained that you want to load, put that here
|
209 |
# lora hyperparameters
|
|
|
295 |
|
296 |
# Debug mode
|
297 |
debug:
|
298 |
+
|
299 |
+
# Seed
|
300 |
+
seed:
|
301 |
```
|
302 |
|
303 |
</details>
|