File size: 2,125 Bytes
7512c3a b432889 7512c3a |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 |
{
// Use IntelliSense to learn about possible attributes.
// Hover to view descriptions of existing attributes.
// For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387
"version": "0.2.0",
"configurations": [
{
"name": "Debug axolotl prompt - sharegpt",
"type": "python",
"module": "accelerate.commands.launch",
"request": "launch",
"args": [
"-m", "axolotl.cli.train", "dev_sharegpt.yml",
// The flags below simplify debugging by overriding the axolotl config
// with the debugging tips above. Modify as needed.
"--dataset_processes=1", // limits data preprocessing to one process
"--max_steps=1", // limits training to just one step
"--batch_size=1", // minimizes batch size
"--micro_batch_size=1", // minimizes batch size
"--val_set_size=0", // disables validation
"--sample_packing=False", // disables sample packing which is necessary for small datasets
"--eval_sample_packing=False",// disables sample packing on eval set
"--dataset_prepared_path=temp_debug/axolotl_outputs/data", // send data outputs to a temp folder
"--output_dir=temp_debug/axolotl_outputs/model" // send model outputs to a temp folder
],
"console": "integratedTerminal", // show output in the integrated terminal
"cwd": "${workspaceFolder}/devtools", // set working directory to devtools from the root of the project
"justMyCode": true, // step through only axolotl code
"env": {"CUDA_VISIBLE_DEVICES": "0", // Since we aren't doing distributed training, we need to limit to one GPU
"HF_HOME": "${workspaceFolder}/devtools/temp_debug/.hf-cache"}, // send HF cache to a temp folder
"preLaunchTask": "cleanup-for-dataprep", // delete temp folders (see below)
}
]
}
|