winglian commited on
Commit
4df9da7
2 Parent(s): 2531ea2 e0ccacc

Merge pull request #105 from viktoriussuwandi/viktoriussuwandi-patch

Browse files
configs/cerebras_1_3B_alpaca.yml CHANGED
@@ -24,7 +24,7 @@ lora_fan_in_fan_out: false
24
  wandb_project: pythia-1.4b-lora
25
  wandb_watch:
26
  wandb_run_id:
27
- wandb_log_model: checkpoint
28
  output_dir: ./lora-alpaca
29
  batch_size: 32
30
  micro_batch_size: 4
 
24
  wandb_project: pythia-1.4b-lora
25
  wandb_watch:
26
  wandb_run_id:
27
+ wandb_log_model:
28
  output_dir: ./lora-alpaca
29
  batch_size: 32
30
  micro_batch_size: 4
configs/galactica_1_3B.yml CHANGED
@@ -21,7 +21,7 @@ lora_fan_in_fan_out: false
21
  wandb_project:
22
  wandb_watch:
23
  wandb_run_id:
24
- wandb_log_model: checkpoint
25
  output_dir: ./lora-llama-alpaca
26
  batch_size: 32
27
  micro_batch_size: 16
 
21
  wandb_project:
22
  wandb_watch:
23
  wandb_run_id:
24
+ wandb_log_model:
25
  output_dir: ./lora-llama-alpaca
26
  batch_size: 32
27
  micro_batch_size: 16
configs/gpt_neox_20b.yml CHANGED
@@ -23,7 +23,7 @@ lora_fan_in_fan_out: true # pythia/GPTNeoX lora specific
23
  wandb_project: gpt4all-neox-20b
24
  wandb_watch:
25
  wandb_run_id:
26
- wandb_log_model: checkpoint
27
  output_dir: ./gpt4all-neox-20b
28
  batch_size: 48
29
  micro_batch_size: 4
 
23
  wandb_project: gpt4all-neox-20b
24
  wandb_watch:
25
  wandb_run_id:
26
+ wandb_log_model:
27
  output_dir: ./gpt4all-neox-20b
28
  batch_size: 48
29
  micro_batch_size: 4
configs/llama_13B_alpaca.yml CHANGED
@@ -21,7 +21,7 @@ lora_fan_in_fan_out: false
21
  wandb_project:
22
  wandb_watch:
23
  wandb_run_id:
24
- wandb_log_model: checkpoint
25
  output_dir: ./llama-13b-sharegpt
26
  batch_size: 64
27
  micro_batch_size: 2
 
21
  wandb_project:
22
  wandb_watch:
23
  wandb_run_id:
24
+ wandb_log_model:
25
  output_dir: ./llama-13b-sharegpt
26
  batch_size: 64
27
  micro_batch_size: 2
configs/llama_65B_alpaca.yml CHANGED
@@ -27,7 +27,7 @@ lora_fan_in_fan_out: false
27
  wandb_project: llama-65b-lora
28
  wandb_watch:
29
  wandb_run_id:
30
- wandb_log_model: checkpoint
31
  output_dir: ./lora-llama-alpaca
32
  batch_size: 128
33
  micro_batch_size: 16
 
27
  wandb_project: llama-65b-lora
28
  wandb_watch:
29
  wandb_run_id:
30
+ wandb_log_model:
31
  output_dir: ./lora-llama-alpaca
32
  batch_size: 128
33
  micro_batch_size: 16
configs/llama_7B_4bit.yml CHANGED
@@ -24,7 +24,7 @@ lora_fan_in_fan_out: false
24
  wandb_project:
25
  wandb_watch:
26
  wandb_run_id:
27
- wandb_log_model: checkpoint
28
  output_dir: ./lora-test
29
  batch_size: 8
30
  micro_batch_size: 2
 
24
  wandb_project:
25
  wandb_watch:
26
  wandb_run_id:
27
+ wandb_log_model:
28
  output_dir: ./lora-test
29
  batch_size: 8
30
  micro_batch_size: 2
configs/llama_7B_alpaca.yml CHANGED
@@ -26,7 +26,7 @@ lora_fan_in_fan_out: false
26
  wandb_project: llama-7b-lora
27
  wandb_watch:
28
  wandb_run_id:
29
- wandb_log_model: checkpoint
30
  output_dir: ./lora-llama-alpaca
31
  batch_size: 128
32
  micro_batch_size: 16
 
26
  wandb_project: llama-7b-lora
27
  wandb_watch:
28
  wandb_run_id:
29
+ wandb_log_model:
30
  output_dir: ./lora-llama-alpaca
31
  batch_size: 128
32
  micro_batch_size: 16
configs/llama_7B_jeopardy.yml CHANGED
@@ -22,7 +22,7 @@ lora_fan_in_fan_out: false
22
  wandb_project: jeopardy-bot-7b
23
  wandb_watch:
24
  wandb_run_id:
25
- wandb_log_model: checkpoint
26
  output_dir: ./jeopardy-bot-7b
27
  batch_size: 4
28
  micro_batch_size: 1
 
22
  wandb_project: jeopardy-bot-7b
23
  wandb_watch:
24
  wandb_run_id:
25
+ wandb_log_model:
26
  output_dir: ./jeopardy-bot-7b
27
  batch_size: 4
28
  micro_batch_size: 1
configs/pythia_1_2B_alpaca.yml CHANGED
@@ -26,7 +26,7 @@ lora_fan_in_fan_out: true # pythia/GPTNeoX lora specific
26
  wandb_project: pythia-1.4b-lora
27
  wandb_watch:
28
  wandb_run_id:
29
- wandb_log_model: checkpoint
30
  output_dir: ./lora-alpaca
31
  batch_size: 48
32
  micro_batch_size: 4
 
26
  wandb_project: pythia-1.4b-lora
27
  wandb_watch:
28
  wandb_run_id:
29
+ wandb_log_model:
30
  output_dir: ./lora-alpaca
31
  batch_size: 48
32
  micro_batch_size: 4
configs/quickstart.yml CHANGED
@@ -24,7 +24,7 @@ lora_fan_in_fan_out: false
24
  wandb_project:
25
  wandb_watch:
26
  wandb_run_id:
27
- wandb_log_model: checkpoint
28
  output_dir: ./lora-test
29
  batch_size: 4
30
  micro_batch_size: 1
 
24
  wandb_project:
25
  wandb_watch:
26
  wandb_run_id:
27
+ wandb_log_model:
28
  output_dir: ./lora-test
29
  batch_size: 4
30
  micro_batch_size: 1
configs/sample.yml CHANGED
@@ -49,7 +49,7 @@ lora_fan_in_fan_out: false
49
  wandb_project:
50
  wandb_watch:
51
  wandb_run_id:
52
- wandb_log_model: checkpoint
53
  # where to save the finsihed model to
54
  output_dir: ./completed-model
55
  # training hyperparameters
 
49
  wandb_project:
50
  wandb_watch:
51
  wandb_run_id:
52
+ wandb_log_model:
53
  # where to save the finsihed model to
54
  output_dir: ./completed-model
55
  # training hyperparameters
configs/stability_3b.yml CHANGED
@@ -20,7 +20,7 @@ lora_fan_in_fan_out: false
20
  wandb_project: stable-alpaca-3b
21
  wandb_watch:
22
  wandb_run_id:
23
- wandb_log_model: checkpoint
24
  output_dir: ./stable-alpaca-3b
25
  batch_size: 2
26
  micro_batch_size: 1
 
20
  wandb_project: stable-alpaca-3b
21
  wandb_watch:
22
  wandb_run_id:
23
+ wandb_log_model:
24
  output_dir: ./stable-alpaca-3b
25
  batch_size: 2
26
  micro_batch_size: 1
configs/vicuna_13B_4bit_reflect.yml CHANGED
@@ -28,7 +28,7 @@ lora_fan_in_fan_out: false
28
  wandb_project:
29
  wandb_watch:
30
  wandb_run_id:
31
- wandb_log_model: checkpoint
32
  output_dir: ./lora-reflect
33
  batch_size: 8
34
  micro_batch_size: 2
 
28
  wandb_project:
29
  wandb_watch:
30
  wandb_run_id:
31
+ wandb_log_model:
32
  output_dir: ./lora-reflect
33
  batch_size: 8
34
  micro_batch_size: 2
examples/gptq-lora-7b/config.yml CHANGED
@@ -24,7 +24,7 @@ lora_fan_in_fan_out: false
24
  wandb_project: llama-7b-lora-int4
25
  wandb_watch:
26
  wandb_run_id:
27
- wandb_log_model: checkpoint
28
  output_dir: ./llama-7b-lora-int4
29
  batch_size: 1
30
  micro_batch_size: 1
 
24
  wandb_project: llama-7b-lora-int4
25
  wandb_watch:
26
  wandb_run_id:
27
+ wandb_log_model:
28
  output_dir: ./llama-7b-lora-int4
29
  batch_size: 1
30
  micro_batch_size: 1
examples/mpt-7b/config.yml CHANGED
@@ -22,7 +22,7 @@ lora_fan_in_fan_out: false
22
  wandb_project: mpt-alpaca-7b
23
  wandb_watch:
24
  wandb_run_id:
25
- wandb_log_model: checkpoint
26
  output_dir: ./mpt-alpaca-7b
27
  batch_size: 1
28
  micro_batch_size: 1
 
22
  wandb_project: mpt-alpaca-7b
23
  wandb_watch:
24
  wandb_run_id:
25
+ wandb_log_model:
26
  output_dir: ./mpt-alpaca-7b
27
  batch_size: 1
28
  micro_batch_size: 1
examples/redpajama/config-3b.yml CHANGED
@@ -23,7 +23,7 @@ lora_fan_in_fan_out: false
23
  wandb_project: redpajama-alpaca-3b
24
  wandb_watch:
25
  wandb_run_id:
26
- wandb_log_model: checkpoint
27
  output_dir: ./redpajama-alpaca-3b
28
  batch_size: 4
29
  micro_batch_size: 1
 
23
  wandb_project: redpajama-alpaca-3b
24
  wandb_watch:
25
  wandb_run_id:
26
+ wandb_log_model:
27
  output_dir: ./redpajama-alpaca-3b
28
  batch_size: 4
29
  micro_batch_size: 1