train model
Browse files- scripts/TRAIN.md +1 -1
- scripts/model.yaml +8 -3
scripts/TRAIN.md
CHANGED
@@ -36,5 +36,5 @@ pip install -U -r requirements-lit.in
|
|
36 |
```
|
37 |
|
38 |
```bash
|
39 |
-
litgpt pretrain --
|
40 |
```
|
|
|
36 |
```
|
37 |
|
38 |
```bash
|
39 |
+
litgpt pretrain --config ./model.yaml
|
40 |
```
|
scripts/model.yaml
CHANGED
@@ -42,7 +42,12 @@ initial_checkpoint_dir:
|
|
42 |
resume: "auto"
|
43 |
|
44 |
# Data-related arguments. If not provided, the default is ``litgpt.data.TinyLlama``.
|
45 |
-
|
|
|
|
|
|
|
|
|
|
|
46 |
|
47 |
# Training-related arguments. See ``litgpt.args.TrainArgs`` for details
|
48 |
train:
|
@@ -56,8 +61,8 @@ train:
|
|
56 |
global_batch_size: 512
|
57 |
|
58 |
# Number of samples per data-parallel rank (type: int, default: 4)
|
59 |
-
micro_batch_size: 16
|
60 |
-
|
61 |
|
62 |
# Number of iterations with learning rate warmup active (type: int, default: 2000)
|
63 |
lr_warmup_steps: 2000
|
|
|
42 |
resume: "auto"
|
43 |
|
44 |
# Data-related arguments. If not provided, the default is ``litgpt.data.TinyLlama``.
|
45 |
+
data:
|
46 |
+
class_path: LitData
|
47 |
+
init_args:
|
48 |
+
data_path: "../data/"
|
49 |
+
# num_workers: 16
|
50 |
+
num_workers: 3
|
51 |
|
52 |
# Training-related arguments. See ``litgpt.args.TrainArgs`` for details
|
53 |
train:
|
|
|
61 |
global_batch_size: 512
|
62 |
|
63 |
# Number of samples per data-parallel rank (type: int, default: 4)
|
64 |
+
# micro_batch_size: 16
|
65 |
+
micro_batch_size: 15
|
66 |
|
67 |
# Number of iterations with learning rate warmup active (type: int, default: 2000)
|
68 |
lr_warmup_steps: 2000
|