llm-wizard commited on
Commit
d923804
1 Parent(s): e06d11e

ai-maker-space/llama38binstruct-summary-100s

Browse files
README.md CHANGED
@@ -1,13 +1,13 @@
1
  ---
2
- license: other
 
 
3
  library_name: peft
 
4
  tags:
5
  - trl
6
  - sft
7
  - generated_from_trainer
8
- base_model: NousResearch/Meta-Llama-3-8B-Instruct
9
- datasets:
10
- - generator
11
  model-index:
12
  - name: llama38binstruct_summarize
13
  results: []
@@ -20,7 +20,7 @@ should probably proofread and complete it, then remove this comment. -->
20
 
21
  This model is a fine-tuned version of [NousResearch/Meta-Llama-3-8B-Instruct](https://huggingface.co/NousResearch/Meta-Llama-3-8B-Instruct) on the generator dataset.
22
  It achieves the following results on the evaluation set:
23
- - Loss: 1.6753
24
 
25
  ## Model description
26
 
@@ -45,23 +45,23 @@ The following hyperparameters were used during training:
45
  - seed: 42
46
  - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
47
  - lr_scheduler_type: constant
48
- - lr_scheduler_warmup_steps: 0.03
49
  - training_steps: 100
50
 
51
  ### Training results
52
 
53
  | Training Loss | Epoch | Step | Validation Loss |
54
  |:-------------:|:------:|:----:|:---------------:|
55
- | 1.4436 | 1.1905 | 25 | 1.0958 |
56
- | 0.5989 | 2.3810 | 50 | 1.2958 |
57
- | 0.2448 | 3.5714 | 75 | 1.5235 |
58
- | 0.099 | 4.7619 | 100 | 1.6753 |
59
 
60
 
61
  ### Framework versions
62
 
63
- - PEFT 0.11.1
64
- - Transformers 4.41.2
65
- - Pytorch 2.3.0+cu121
66
- - Datasets 2.19.2
67
  - Tokenizers 0.19.1
 
1
  ---
2
+ base_model: NousResearch/Meta-Llama-3-8B-Instruct
3
+ datasets:
4
+ - generator
5
  library_name: peft
6
+ license: other
7
  tags:
8
  - trl
9
  - sft
10
  - generated_from_trainer
 
 
 
11
  model-index:
12
  - name: llama38binstruct_summarize
13
  results: []
 
20
 
21
  This model is a fine-tuned version of [NousResearch/Meta-Llama-3-8B-Instruct](https://huggingface.co/NousResearch/Meta-Llama-3-8B-Instruct) on the generator dataset.
22
  It achieves the following results on the evaluation set:
23
+ - Loss: 2.1068
24
 
25
  ## Model description
26
 
 
45
  - seed: 42
46
  - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
47
  - lr_scheduler_type: constant
48
+ - lr_scheduler_warmup_steps: 30
49
  - training_steps: 100
50
 
51
  ### Training results
52
 
53
  | Training Loss | Epoch | Step | Validation Loss |
54
  |:-------------:|:------:|:----:|:---------------:|
55
+ | 1.55 | 1.3158 | 25 | 1.5000 |
56
+ | 0.5276 | 2.6316 | 50 | 1.7814 |
57
+ | 0.2099 | 3.9474 | 75 | 1.8811 |
58
+ | 0.0761 | 5.2632 | 100 | 2.1068 |
59
 
60
 
61
  ### Framework versions
62
 
63
+ - PEFT 0.12.0
64
+ - Transformers 4.44.2
65
+ - Pytorch 2.4.0+cu121
66
+ - Datasets 3.0.0
67
  - Tokenizers 0.19.1
adapter_config.json CHANGED
@@ -20,13 +20,13 @@
20
  "rank_pattern": {},
21
  "revision": null,
22
  "target_modules": [
23
- "down_proj",
24
  "q_proj",
25
- "v_proj",
26
  "up_proj",
27
- "gate_proj",
28
  "k_proj",
29
- "o_proj"
 
 
30
  ],
31
  "task_type": "CAUSAL_LM",
32
  "use_dora": false,
 
20
  "rank_pattern": {},
21
  "revision": null,
22
  "target_modules": [
 
23
  "q_proj",
24
+ "down_proj",
25
  "up_proj",
 
26
  "k_proj",
27
+ "o_proj",
28
+ "gate_proj",
29
+ "v_proj"
30
  ],
31
  "task_type": "CAUSAL_LM",
32
  "use_dora": false,
adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e830cf60d62fd1beae745801b2eebd6ca2035f72dbcf013a55efb8625a87d86d
3
  size 167832240
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3e9d4c0f52c8eccf0b29f14f9db91d438725d9da5b1a065f582fcbc1b41edbb8
3
  size 167832240
runs/Sep12_19-43-37_8e18eff6a629/events.out.tfevents.1726170227.8e18eff6a629.938.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:58678f8b841bad145dc5fc3478c03272db94bb147f867c94a69a1ec475a0d111
3
+ size 9354
special_tokens_map.json CHANGED
@@ -7,11 +7,11 @@
7
  "single_word": false
8
  },
9
  "eos_token": {
10
- "content": "<|end_of_text|>",
11
  "lstrip": false,
12
  "normalized": false,
13
  "rstrip": false,
14
  "single_word": false
15
  },
16
- "pad_token": "<|end_of_text|>"
17
  }
 
7
  "single_word": false
8
  },
9
  "eos_token": {
10
+ "content": "<|eot_id|>",
11
  "lstrip": false,
12
  "normalized": false,
13
  "rstrip": false,
14
  "single_word": false
15
  },
16
+ "pad_token": "<|eot_id|>"
17
  }
tokenizer_config.json CHANGED
@@ -2052,12 +2052,12 @@
2052
  "bos_token": "<|begin_of_text|>",
2053
  "chat_template": "{% set loop_messages = messages %}{% for message in loop_messages %}{% set content = '<|start_header_id|>' + message['role'] + '<|end_header_id|>\n\n'+ message['content'] | trim + '<|eot_id|>' %}{% if loop.index0 == 0 %}{% set content = bos_token + content %}{% endif %}{{ content }}{% endfor %}{% if add_generation_prompt %}{{ '<|start_header_id|>assistant<|end_header_id|>\n\n' }}{% endif %}",
2054
  "clean_up_tokenization_spaces": true,
2055
- "eos_token": "<|end_of_text|>",
2056
  "model_input_names": [
2057
  "input_ids",
2058
  "attention_mask"
2059
  ],
2060
  "model_max_length": 1000000000000000019884624838656,
2061
- "pad_token": "<|end_of_text|>",
2062
  "tokenizer_class": "PreTrainedTokenizerFast"
2063
  }
 
2052
  "bos_token": "<|begin_of_text|>",
2053
  "chat_template": "{% set loop_messages = messages %}{% for message in loop_messages %}{% set content = '<|start_header_id|>' + message['role'] + '<|end_header_id|>\n\n'+ message['content'] | trim + '<|eot_id|>' %}{% if loop.index0 == 0 %}{% set content = bos_token + content %}{% endif %}{{ content }}{% endfor %}{% if add_generation_prompt %}{{ '<|start_header_id|>assistant<|end_header_id|>\n\n' }}{% endif %}",
2054
  "clean_up_tokenization_spaces": true,
2055
+ "eos_token": "<|eot_id|>",
2056
  "model_input_names": [
2057
  "input_ids",
2058
  "attention_mask"
2059
  ],
2060
  "model_max_length": 1000000000000000019884624838656,
2061
+ "pad_token": "<|eot_id|>",
2062
  "tokenizer_class": "PreTrainedTokenizerFast"
2063
  }
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9ec406f70a4db0f7b6ca78d6f1e68a2e38a26462493b4efaf8ca0e9819f20c0f
3
- size 5368
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f99e1b41aec4320fd7664144dd9a1354cf2a88d14a37b24a316c3d45e900b0da
3
+ size 5496