nblinh63 commited on
Commit
6c03cb7
1 Parent(s): 7f11a65

End of training

Browse files
README.md CHANGED
@@ -47,7 +47,7 @@ flash_attention: false
47
  fp16: null
48
  fsdp: null
49
  fsdp_config: null
50
- gradient_accumulation_steps: 4
51
  gradient_checkpointing: true
52
  group_by_length: false
53
  hub_model_id: nblinh63/c15eea94-310b-4df8-b4a4-ca8f6c1a7aab
@@ -67,7 +67,7 @@ lora_r: 16
67
  lora_target_linear: true
68
  lr_scheduler: cosine
69
  max_steps: 10
70
- micro_batch_size: 2
71
  mlflow_experiment_name: /tmp/2437198224a470c0_train_data.json
72
  model_type: AutoModelForCausalLM
73
  num_epochs: 1
@@ -123,11 +123,9 @@ More information needed
123
 
124
  The following hyperparameters were used during training:
125
  - learning_rate: 0.0002
126
- - train_batch_size: 2
127
- - eval_batch_size: 2
128
  - seed: 42
129
- - gradient_accumulation_steps: 4
130
- - total_train_batch_size: 8
131
  - optimizer: Use OptimizerNames.ADAMW_BNB with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments
132
  - lr_scheduler_type: cosine
133
  - lr_scheduler_warmup_steps: 10
@@ -137,7 +135,7 @@ The following hyperparameters were used during training:
137
 
138
  | Training Loss | Epoch | Step | Validation Loss |
139
  |:-------------:|:------:|:----:|:---------------:|
140
- | 0.0 | 0.0112 | 10 | nan |
141
 
142
 
143
  ### Framework versions
 
47
  fp16: null
48
  fsdp: null
49
  fsdp_config: null
50
+ gradient_accumulation_steps: 1
51
  gradient_checkpointing: true
52
  group_by_length: false
53
  hub_model_id: nblinh63/c15eea94-310b-4df8-b4a4-ca8f6c1a7aab
 
67
  lora_target_linear: true
68
  lr_scheduler: cosine
69
  max_steps: 10
70
+ micro_batch_size: 1
71
  mlflow_experiment_name: /tmp/2437198224a470c0_train_data.json
72
  model_type: AutoModelForCausalLM
73
  num_epochs: 1
 
123
 
124
  The following hyperparameters were used during training:
125
  - learning_rate: 0.0002
126
+ - train_batch_size: 1
127
+ - eval_batch_size: 1
128
  - seed: 42
 
 
129
  - optimizer: Use OptimizerNames.ADAMW_BNB with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments
130
  - lr_scheduler_type: cosine
131
  - lr_scheduler_warmup_steps: 10
 
135
 
136
  | Training Loss | Epoch | Step | Validation Loss |
137
  |:-------------:|:------:|:----:|:---------------:|
138
+ | 0.0 | 0.0014 | 10 | nan |
139
 
140
 
141
  ### Framework versions
adapter_config.json CHANGED
@@ -20,13 +20,13 @@
20
  "rank_pattern": {},
21
  "revision": null,
22
  "target_modules": [
23
- "up_proj",
24
- "o_proj",
25
- "down_proj",
26
  "v_proj",
 
27
  "q_proj",
28
- "k_proj",
29
- "gate_proj"
30
  ],
31
  "task_type": "CAUSAL_LM",
32
  "use_dora": false,
 
20
  "rank_pattern": {},
21
  "revision": null,
22
  "target_modules": [
23
+ "gate_proj",
24
+ "k_proj",
 
25
  "v_proj",
26
+ "up_proj",
27
  "q_proj",
28
+ "o_proj",
29
+ "down_proj"
30
  ],
31
  "task_type": "CAUSAL_LM",
32
  "use_dora": false,
adapter_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9ce564cadad4549387ddbf7f4177a3652cd00cc904cc5197a0bad3871250559e
3
  size 275494090
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:eb5a181c1642c83b6fa5c53eca6b3f22aae90f6d66f5ebe2d6681a96c3977cad
3
  size 275494090
adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:494f0bc6d731581c53b687ca50d7736e706164cbceee2e0271e1be64407c3b27
3
  size 275341720
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5cc995a168185b66da21b5d111b5f6dd4c087187a6bb307703a19893040eb1fd
3
  size 275341720
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:625bdaeaa249f50b6bc662f91dbddd37ed10355d0f4a7be16a19a93738dd71d9
3
  size 6776
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:54fa189f91ee8a9b701d32c0e946fcb512221c24b19c7dc9e7af62744d52bc02
3
  size 6776