lesso commited on
Commit
2545d05
1 Parent(s): 711bfbd

End of training

Browse files
README.md CHANGED
@@ -42,7 +42,7 @@ early_stopping_patience: null
42
  eval_max_new_tokens: 128
43
  eval_table_size: null
44
  evals_per_epoch: 4
45
- flash_attention: false
46
  fp16: true
47
  fsdp: null
48
  fsdp_config: null
@@ -102,7 +102,7 @@ xformers_attention: null
102
 
103
  This model is a fine-tuned version of [aisingapore/llama3-8b-cpt-sea-lionv2.1-instruct](https://huggingface.co/aisingapore/llama3-8b-cpt-sea-lionv2.1-instruct) on the None dataset.
104
  It achieves the following results on the evaluation set:
105
- - Loss: 0.0493
106
 
107
  ## Model description
108
 
@@ -137,10 +137,10 @@ The following hyperparameters were used during training:
137
 
138
  | Training Loss | Epoch | Step | Validation Loss |
139
  |:-------------:|:------:|:----:|:---------------:|
140
- | 1.4924 | 0.0833 | 1 | 1.4976 |
141
- | 1.5739 | 0.25 | 3 | 1.4466 |
142
- | 1.0885 | 0.5 | 6 | 0.8232 |
143
- | 0.2414 | 0.75 | 9 | 0.0493 |
144
 
145
 
146
  ### Framework versions
 
42
  eval_max_new_tokens: 128
43
  eval_table_size: null
44
  evals_per_epoch: 4
45
+ flash_attention: true
46
  fp16: true
47
  fsdp: null
48
  fsdp_config: null
 
102
 
103
  This model is a fine-tuned version of [aisingapore/llama3-8b-cpt-sea-lionv2.1-instruct](https://huggingface.co/aisingapore/llama3-8b-cpt-sea-lionv2.1-instruct) on the None dataset.
104
  It achieves the following results on the evaluation set:
105
+ - Loss: 0.0435
106
 
107
  ## Model description
108
 
 
137
 
138
  | Training Loss | Epoch | Step | Validation Loss |
139
  |:-------------:|:------:|:----:|:---------------:|
140
+ | 1.4927 | 0.0833 | 1 | 1.4978 |
141
+ | 1.5735 | 0.25 | 3 | 1.4451 |
142
+ | 1.0815 | 0.5 | 6 | 0.8162 |
143
+ | 0.229 | 0.75 | 9 | 0.0435 |
144
 
145
 
146
  ### Framework versions
adapter_config.json CHANGED
@@ -20,13 +20,13 @@
20
  "rank_pattern": {},
21
  "revision": null,
22
  "target_modules": [
23
- "q_proj",
24
- "o_proj",
25
  "gate_proj",
26
- "down_proj",
 
27
  "k_proj",
28
- "v_proj",
29
- "up_proj"
30
  ],
31
  "task_type": "CAUSAL_LM",
32
  "use_dora": false,
 
20
  "rank_pattern": {},
21
  "revision": null,
22
  "target_modules": [
23
+ "up_proj",
 
24
  "gate_proj",
25
+ "o_proj",
26
+ "q_proj",
27
  "k_proj",
28
+ "down_proj",
29
+ "v_proj"
30
  ],
31
  "task_type": "CAUSAL_LM",
32
  "use_dora": false,
adapter_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:5a22a702d1d494c84ee8584c305f950c4c106ff560a1eaddcaa732ce30b1639c
3
  size 84047370
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e29767d254322f76e4a7a492448794d0aed620a49b295ca886053e6cb71419a3
3
  size 84047370
adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7e7b3085613f5f73bc880d9364f4eef64991c08178bbf4fb9d8b05a2bd9a1fd9
3
  size 83945296
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c52d9e387c8871db8e6320785b47284eb7bb61158eac9e6b7cbbe6c659894e8c
3
  size 83945296
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:33ee77f9946e0ddfbac6f0c8ae58cf2871b3fdcb3ea1247868cf8374e044a7bd
3
  size 6776
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1dfa5dd032e4a03f440be49b93d0489893f415e88c3769ffa1903e5787dfef9f
3
  size 6776