File size: 3,946 Bytes
5e13925
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
18d1b3b
 
 
 
5e13925
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
18d1b3b
5e13925
18d1b3b
5e13925
 
 
 
 
 
 
 
 
 
 
 
 
18d1b3b
 
5e13925
 
 
18d1b3b
5e13925
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
base_model: huggyllama/llama-13b
base_model_config: huggyllama/llama-13b
model_type: LlamaForCausalLM
tokenizer_type: LlamaTokenizer
load_in_8bit: false
load_in_4bit: false
gptq: false
strict: false
push_dataset_to_hub: winglian
hf_use_auth_token: true
datasets:
  - path: winglian/evals
    data_files:
      - hf/ARC-Challenge.jsonl
      - hf/ARC-Easy.jsonl
      - hf/riddle_sense.jsonl
    type: explainchoice:chat
  - path: winglian/evals
    data_files:
      - hf/gsm8k.jsonl
      - hf/winogrande.jsonl
    type: alpaca_chat.load_qa
  - path: winglian/evals
    data_files:
      - custom/n_task.jsonl
      - custom/misconceptions.jsonl
      - custom/context_insensitivity.jsonl
    type: alpaca_chat
  - path: camel-ai/math
    type: alpaca_chat.load_camel_ai
  - path: camel-ai/biology
    type: alpaca_chat.load_camel_ai
  - path: camel-ai/physics
    type: alpaca_chat.load_camel_ai
  - path: camel-ai/chemistry
    type: alpaca_chat.load_camel_ai
  - path: winglian/evals
    data_files:
      - custom/in_context_qa.jsonl
    type: context_qa
  - path: winglian/evals
    data_files:
      - custom/in_context_qa.jsonl
    type: context_qa.load_404
  - path: winglian/evals
    data_files:
      - custom/jokes_explained_500up.jsonl
    type: sharegpt_jokes
  - path: winglian/evals
    data_files:
      - custom/classify-self-chat.sharegpt.jsonl
      - custom/coding-self-chat.sharegpt.jsonl
      - custom/prose-gpt4.sharegpt.jsonl
      - custom/prose-rewrite-gpt4.sharegpt.jsonl
    type: sharegpt_simple
  - path: winglian/evals
    data_files:
      - openai/tldr.jsonl
    type: summarizetldr:chat
  - path: winglian/evals
    data_files:
      - hellaswag/hellaswag.jsonl
    type: explainchoice:chat
    shards: 60
  - path: metaeval/ScienceQA_text_only
    type: concisechoice:chat
    shards: 13
  - path: teknium/GPTeacher-General-Instruct
    data_files: gpt4-instruct-similarity-0.6-dataset.json
    type: gpteacher:chat
  - path: QingyiSi/Alpaca-CoT
    data_files:
      - Chain-of-Thought/formatted_cot_data/aqua_train.json
      - Chain-of-Thought/formatted_cot_data/creak_train.json
      - Chain-of-Thought/formatted_cot_data/ecqa_train.json
      - Chain-of-Thought/formatted_cot_data/esnli_train.json
      - Chain-of-Thought/formatted_cot_data/gsm8k_train.json
      - Chain-of-Thought/formatted_cot_data/qasc_train.json
      - Chain-of-Thought/formatted_cot_data/qed_train.json
      - Chain-of-Thought/formatted_cot_data/sensemaking_train.json
      - Chain-of-Thought/formatted_cot_data/strategyqa_train.json
      - GPTeacher/Roleplay/formatted_roleplay-similarity_0.6-instruct-dataset.json
    type: alpaca_chat
  - path: ehartford/WizardLM_alpaca_evol_instruct_70k_unfiltered
    type: alpaca:chat
  - path: ewof/code-alpaca-instruct-unfiltered
    type: alpaca_chat
  - path: teknium/GPT4-LLM-Cleaned
    type: alpaca_chat
dataset_prepared_path: last_run_prepared
val_set_size: 0.01
adapter:
lora_model_dir:
sequence_len: 2048
max_packed_sequence_len: 2048
lora_r:
lora_alpha:
lora_dropout:
lora_target_modules:
lora_target_linear: true
lora_fan_in_fan_out:
wandb_project: minotaur-13b
wandb_watch:
wandb_run_id:
wandb_log_model:
output_dir: ./minotaur-13b
gradient_accumulation_steps: 1
micro_batch_size: 12
num_epochs: 12
optimizer: adamw_bnb_8bit
torchdistx_path:
lr_scheduler: cosine
learning_rate: 0.00005
train_on_inputs: false
group_by_length: true
bf16: true
fp16: false
tf32: true
gradient_checkpointing: true
early_stopping_patience:
resume_from_checkpoint:
local_rank:
logging_steps: 1
xformers_attention: true
flash_attention:
gptq_groupsize:
gptq_model_v1:
warmup_steps: 100
eval_steps: 29
save_steps: 29
load_best_model_at_end: false
debug:
deepspeed:
weight_decay: 0.0007
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_offload_params: true
  fsdp_transformer_layer_cls_to_wrap: LlamaDecoderLayer
special_tokens:
  bos_token: "<s>"
  eos_token: "</s>"
  unk_token: "<unk>"