lesso commited on
Commit
ece3b6b
1 Parent(s): 525d2ab

End of training

Browse files
README.md CHANGED
@@ -42,7 +42,7 @@ early_stopping_patience: null
42
  eval_max_new_tokens: 128
43
  eval_table_size: null
44
  evals_per_epoch: 4
45
- flash_attention: true
46
  fp16: true
47
  fsdp: null
48
  fsdp_config: null
@@ -102,7 +102,7 @@ xformers_attention: null
102
 
103
  This model is a fine-tuned version of [aisingapore/llama3-8b-cpt-sea-lionv2.1-instruct](https://huggingface.co/aisingapore/llama3-8b-cpt-sea-lionv2.1-instruct) on the None dataset.
104
  It achieves the following results on the evaluation set:
105
- - Loss: 0.7840
106
 
107
  ## Model description
108
 
@@ -125,25 +125,22 @@ The following hyperparameters were used during training:
125
  - train_batch_size: 1
126
  - eval_batch_size: 1
127
  - seed: 42
128
- - distributed_type: multi-GPU
129
- - num_devices: 2
130
  - gradient_accumulation_steps: 4
131
- - total_train_batch_size: 8
132
- - total_eval_batch_size: 2
133
  - optimizer: Use OptimizerNames.ADAMW_HF with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments
134
  - lr_scheduler_type: cosine
135
  - lr_scheduler_warmup_steps: 10
136
- - training_steps: 6
137
  - mixed_precision_training: Native AMP
138
 
139
  ### Training results
140
 
141
  | Training Loss | Epoch | Step | Validation Loss |
142
  |:-------------:|:------:|:----:|:---------------:|
143
- | 1.3997 | 0.1667 | 1 | 1.4973 |
144
- | 1.4535 | 0.3333 | 2 | 1.4973 |
145
- | 1.394 | 0.6667 | 4 | 1.2689 |
146
- | 1.0206 | 1.0 | 6 | 0.7840 |
147
 
148
 
149
  ### Framework versions
 
42
  eval_max_new_tokens: 128
43
  eval_table_size: null
44
  evals_per_epoch: 4
45
+ flash_attention: false
46
  fp16: true
47
  fsdp: null
48
  fsdp_config: null
 
102
 
103
  This model is a fine-tuned version of [aisingapore/llama3-8b-cpt-sea-lionv2.1-instruct](https://huggingface.co/aisingapore/llama3-8b-cpt-sea-lionv2.1-instruct) on the None dataset.
104
  It achieves the following results on the evaluation set:
105
+ - Loss: 0.0493
106
 
107
  ## Model description
108
 
 
125
  - train_batch_size: 1
126
  - eval_batch_size: 1
127
  - seed: 42
 
 
128
  - gradient_accumulation_steps: 4
129
+ - total_train_batch_size: 4
 
130
  - optimizer: Use OptimizerNames.ADAMW_HF with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments
131
  - lr_scheduler_type: cosine
132
  - lr_scheduler_warmup_steps: 10
133
+ - training_steps: 10
134
  - mixed_precision_training: Native AMP
135
 
136
  ### Training results
137
 
138
  | Training Loss | Epoch | Step | Validation Loss |
139
  |:-------------:|:------:|:----:|:---------------:|
140
+ | 1.4924 | 0.0833 | 1 | 1.4976 |
141
+ | 1.5739 | 0.25 | 3 | 1.4466 |
142
+ | 1.0885 | 0.5 | 6 | 0.8232 |
143
+ | 0.2414 | 0.75 | 9 | 0.0493 |
144
 
145
 
146
  ### Framework versions
adapter_config.json CHANGED
@@ -20,13 +20,13 @@
20
  "rank_pattern": {},
21
  "revision": null,
22
  "target_modules": [
23
- "up_proj",
24
- "gate_proj",
25
- "o_proj",
26
- "q_proj",
27
  "k_proj",
28
  "down_proj",
29
- "v_proj"
 
 
 
 
30
  ],
31
  "task_type": "CAUSAL_LM",
32
  "use_dora": false,
 
20
  "rank_pattern": {},
21
  "revision": null,
22
  "target_modules": [
 
 
 
 
23
  "k_proj",
24
  "down_proj",
25
+ "up_proj",
26
+ "v_proj",
27
+ "q_proj",
28
+ "o_proj",
29
+ "gate_proj"
30
  ],
31
  "task_type": "CAUSAL_LM",
32
  "use_dora": false,
adapter_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:47e2c1095c3a83dff7d2cd1d3de5a5a1b332271683786561c9239cd077dc6111
3
  size 84047370
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5a22a702d1d494c84ee8584c305f950c4c106ff560a1eaddcaa732ce30b1639c
3
  size 84047370
adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c52d9e387c8871db8e6320785b47284eb7bb61158eac9e6b7cbbe6c659894e8c
3
  size 83945296
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e9e75ea8842804f67731a1381ebcf772d861ce735e4c8169fdaf6a92730000af
3
  size 83945296
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1dfa5dd032e4a03f440be49b93d0489893f415e88c3769ffa1903e5787dfef9f
3
  size 6776
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d5fa11f84d8d8e5e8936ae65e6012c835679c9ed6678232c43544c849861d322
3
  size 6776