File size: 2,396 Bytes
3b92e1b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
architecture:
    backbone_dtype: float16
    force_embedding_gradients: false
    gradient_checkpointing: true
    intermediate_dropout: 0.0
    pretrained: true
    pretrained_weights: ''
augmentation:
    random_parent_probability: 0.0
    skip_parent_probability: 0.0
    token_mask_probability: 0.0
dataset:
    add_eos_token_to_answer: true
    add_eos_token_to_prompt: true
    answer_column: output
    data_sample: 1.0
    data_sample_choice:
    - Train
    - Validation
    mask_prompt_labels: true
    parent_id_column: parent_id
    prompt_column:
    - instruction
    text_answer_separator: <|answer|>
    text_prompt_start: <|prompt|>
    train_dataframe: data/user/oasst/train_full_allrank.pq
    validation_dataframe: data/user/oasst/val.csv
    validation_size: 0.01
    validation_strategy: custom
environment:
    compile_model: false
    find_unused_parameters: false
    gpus:
    - '0'
    - '1'
    - '2'
    - '3'
    mixed_precision: true
    number_of_workers: 8
    seed: -1
    trust_remote_code: false
    use_fsdp: false
experiment_name: h2ogpt-gm-oasst1-en-2048-open-llama-7b-preview-300bt-v2
llm_backbone: h2o-llmstudio/open_llama_7b_preview_300bt_fix
logging:
    logger: Neptune
    neptune_project: Zoo/h2o-llm
    number_of_texts: 10
output_directory: output/user/h2ogpt-gm-oasst1-en-2048-open-llama-7b-preview-300bt-v2/
prediction:
    batch_size_inference: 0
    do_sample: false
    max_length_inference: 256
    metric: GPT3.5
    min_length_inference: 2
    num_beams: 2
    repetition_penalty: 1.2
    stop_tokens: ''
    temperature: 0.3
problem_type: text_causal_language_modeling
tokenizer:
    add_prefix_space: false
    add_prompt_answer_tokens: false
    max_length: 2048
    max_length_answer: 1024
    max_length_prompt: 2048
    padding_quantile: 1.0
    use_fast: false
training:
    batch_size: 16
    differential_learning_rate: 1.0e-05
    differential_learning_rate_layers: []
    drop_last_batch: true
    epochs: 3
    evaluate_before_training: false
    evaluation_epochs: 0.5
    grad_accumulation: 1
    gradient_clip: 0.0
    learning_rate: 0.001
    lora: true
    lora_alpha: 32
    lora_dropout: 0.05
    lora_r: 16
    lora_target_modules: ''
    loss_function: CrossEntropy
    optimizer: AdamW
    save_best_checkpoint: false
    schedule: Cosine
    train_validation_data: false
    warmup_epochs: 1.0
    weight_decay: 0.0