Text Generation
PEFT
Safetensors
File size: 1,759 Bytes
9078c5e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
deb8e2a
 
9078c5e
 
 
 
 
 
 
deb8e2a
 
9078c5e
 
 
 
 
 
 
 
 
 
 
 
deb8e2a
 
 
 
 
9078c5e
 
 
 
deb8e2a
 
 
9078c5e
 
85bc0c5
9078c5e
 
deb8e2a
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
{
  "model_name": "unsloth/gemma-2-2b",
  "max_seq_length": 2048,
  "dtype": null,
  "load_in_4bit": false,
  "lora_params": {
    "r": 16,
    "target_modules": [
      "q_proj",
      "k_proj",
      "v_proj",
      "o_proj",
      "gate_proj",
      "up_proj",
      "down_proj"
    ],
    "lora_alpha": 16,
    "lora_dropout": 0,
    "bias": "none",
    "use_gradient_checkpointing": "unsloth",
    "random_state": 3407,
    "use_rslora": false,
    "loftq_config": null
  },
  "training_args": {
    "per_device_train_batch_size": 4,
    "gradient_accumulation_steps": 4,
    "warmup_steps": 5,
    "num_train_epochs": 1,
    "learning_rate": 0.0002,
    "fp16": false,
    "bf16": true,
    "logging_steps": 1,
    "optim": "adamw_8bit",
    "weight_decay": 0.01,
    "lr_scheduler_type": "linear",
    "seed": 3407,
    "output_dir": "outputs",
    "report_to": "none"
  },
  "data_config": {
    "dataset_name": "marmikpandya/mental-health",
    "dataset_split": "train",
    "dataset_rows": null,
    "conversation_format": {
      "instruction": "Provide an instruction to the model, e.g., 'Provide advice on coping with stress.'",
      "input": "Include user details or context, e.g., 'I've been feeling overwhelmed with work and personal responsibilities.'",
      "response": "The model's response based on the instruction and input."
    },
    "max_seq_length": 2048,
    "train_on_responses_only": false
  },
  "inference_config": {
    "temperature": 1.0,
    "min_p": 0.9,
    "max_new_tokens": 512
  },
  "alpaca_prompt": {
    "template": "If you are a licensed psychologist, please provide this patient with a helpful response to their concern.\n\n### Instruction:\n{}\n\n### Input:\n{}\n\n### Response:\n{}",
    "eos_token": true
  }
}