File size: 1,108 Bytes
94f5e41 4818380 94f5e41 4818380 94f5e41 4818380 94f5e41 4818380 94f5e41 4818380 94f5e41 4818380 94f5e41 4818380 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 |
base_model: stabilityai/stablelm-base-alpha-3b
base_model_config: stabilityai/stablelm-base-alpha-3b
load_in_8bit: false
datasets:
- path: vicgalle/alpaca-gpt4
type: alpaca
dataset_prepared_path: last_run_prepared
val_set_size: 0.04
adapter:
lora_model_dir:
sequence_len: 4096
max_packed_sequence_len: 4096
lora_r: 8
lora_alpha: 16
lora_dropout: 0.05
lora_target_modules:
- q_proj
- v_proj
lora_fan_in_fan_out: false
wandb_project: stable-alpaca-3b
wandb_watch:
wandb_run_id:
wandb_log_model: checkpoint
output_dir: ./stable-alpaca-3b
batch_size: 2
micro_batch_size: 1
num_epochs: 1
optimizer: adamw_bnb_8bit
torchdistx_path:
lr_scheduler: cosine
learning_rate: 0.0000002
train_on_inputs: false
group_by_length: false
bf16: true
tf32: true
early_stopping_patience:
resume_from_checkpoint:
local_rank:
logging_steps: 1
xformers_attention: true
flash_attention:
gptq_groupsize:
gptq_model_v1:
warmup_steps: 100
eval_steps: 50
save_steps: 200
debug:
deepspeed:
weight_decay: 0.01
fsdp:
fsdp_config:
#special_tokens:
# pad_token: "[PAD]"
# bos_token: "<s>"
# eos_token: "</s>"
# unk_token: "<unk>"
|