vwxyzjn commited on
Commit
efea01c
1 Parent(s): bb2d0de

End of training

Browse files
README.md CHANGED
@@ -1,10 +1,12 @@
1
  ---
2
- license: mit
3
- base_model: HuggingFaceH4/mistral-7b-sft-beta
4
  tags:
5
  - trl
6
  - reward-trainer
7
  - generated_from_trainer
 
 
8
  model-index:
9
  - name: rm_zephyr
10
  results: []
@@ -15,7 +17,10 @@ should probably proofread and complete it, then remove this comment. -->
15
 
16
  # rm_zephyr
17
 
18
- This model is a fine-tuned version of [HuggingFaceH4/mistral-7b-sft-beta](https://huggingface.co/HuggingFaceH4/mistral-7b-sft-beta) on an unknown dataset.
 
 
 
19
 
20
  ## Model description
21
 
@@ -39,21 +44,25 @@ The following hyperparameters were used during training:
39
  - eval_batch_size: 1
40
  - seed: 42
41
  - distributed_type: multi-GPU
42
- - num_devices: 7
43
  - gradient_accumulation_steps: 32
44
- - total_train_batch_size: 224
45
- - total_eval_batch_size: 7
46
  - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
47
  - lr_scheduler_type: linear
48
  - num_epochs: 1.0
49
 
50
  ### Training results
51
 
 
 
 
 
52
 
53
 
54
  ### Framework versions
55
 
56
- - Transformers 4.40.1
57
- - Pytorch 2.2.1+cu121
58
- - Datasets 2.18.0
59
  - Tokenizers 0.19.1
 
1
  ---
2
+ license: apache-2.0
3
+ base_model: alignment-handbook/zephyr-7b-sft-full
4
  tags:
5
  - trl
6
  - reward-trainer
7
  - generated_from_trainer
8
+ metrics:
9
+ - accuracy
10
  model-index:
11
  - name: rm_zephyr
12
  results: []
 
17
 
18
  # rm_zephyr
19
 
20
+ This model is a fine-tuned version of [alignment-handbook/zephyr-7b-sft-full](https://huggingface.co/alignment-handbook/zephyr-7b-sft-full) on an unknown dataset.
21
+ It achieves the following results on the evaluation set:
22
+ - Loss: 0.4657
23
+ - Accuracy: 0.7599
24
 
25
  ## Model description
26
 
 
44
  - eval_batch_size: 1
45
  - seed: 42
46
  - distributed_type: multi-GPU
47
+ - num_devices: 8
48
  - gradient_accumulation_steps: 32
49
+ - total_train_batch_size: 256
50
+ - total_eval_batch_size: 8
51
  - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
52
  - lr_scheduler_type: linear
53
  - num_epochs: 1.0
54
 
55
  ### Training results
56
 
57
+ | Training Loss | Epoch | Step | Validation Loss | Accuracy |
58
+ |:-------------:|:------:|:----:|:---------------:|:--------:|
59
+ | 0.5368 | 0.4290 | 100 | 0.4863 | 0.7537 |
60
+ | 0.4497 | 0.8580 | 200 | 0.4657 | 0.7599 |
61
 
62
 
63
  ### Framework versions
64
 
65
+ - Transformers 4.41.1
66
+ - Pytorch 2.3.0+cu121
67
+ - Datasets 2.19.1
68
  - Tokenizers 0.19.1
config.json CHANGED
@@ -1,5 +1,5 @@
1
  {
2
- "_name_or_path": "HuggingFaceH4/mistral-7b-sft-beta",
3
  "architectures": [
4
  "MistralForSequenceClassification"
5
  ],
@@ -27,7 +27,7 @@
27
  "sliding_window": 4096,
28
  "tie_word_embeddings": false,
29
  "torch_dtype": "bfloat16",
30
- "transformers_version": "4.40.1",
31
  "use_cache": true,
32
  "vocab_size": 32000
33
  }
 
1
  {
2
+ "_name_or_path": "alignment-handbook/zephyr-7b-sft-full",
3
  "architectures": [
4
  "MistralForSequenceClassification"
5
  ],
 
27
  "sliding_window": 4096,
28
  "tie_word_embeddings": false,
29
  "torch_dtype": "bfloat16",
30
+ "transformers_version": "4.41.1",
31
  "use_cache": true,
32
  "vocab_size": 32000
33
  }
model-00001-of-00003.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9e2d615765cf79772d162d57eb87e1b0b75ea427ae0d93ada943fb98710f23c4
3
  size 4943162336
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:105d7a303bc53cb2da3f026768889215497153e2c68f91a3b75e91e8c6bf87aa
3
  size 4943162336
model-00002-of-00003.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ba0d8a0118b2607f26f912fed55d58f18b5b794fdd329c012c66a40f574e3d7c
3
  size 4999819336
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:65455db6b99bf0d4bde1ac18b4418894628fbf1255967a42cea7496515d00d65
3
  size 4999819336
model-00003-of-00003.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:2673659c4d1ad4af161ed59550c9629e80ea3fb52924082331f28975b71a0db8
3
  size 4278380512
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:44c2123f83f42bfe919f26e2f96736696f4e61477e4df00d5156ca834d678848
3
  size 4278380512
special_tokens_map.json CHANGED
@@ -1,9 +1,4 @@
1
  {
2
- "additional_special_tokens": [
3
- "<unk>",
4
- "<s>",
5
- "</s>"
6
- ],
7
  "bos_token": {
8
  "content": "<s>",
9
  "lstrip": false,
@@ -19,7 +14,7 @@
19
  "single_word": false
20
  },
21
  "pad_token": {
22
- "content": "</s>",
23
  "lstrip": false,
24
  "normalized": false,
25
  "rstrip": false,
 
1
  {
 
 
 
 
 
2
  "bos_token": {
3
  "content": "<s>",
4
  "lstrip": false,
 
14
  "single_word": false
15
  },
16
  "pad_token": {
17
+ "content": "[PAD]",
18
  "lstrip": false,
19
  "normalized": false,
20
  "rstrip": false,
tokenizer.json CHANGED
@@ -29,6 +29,15 @@
29
  "rstrip": false,
30
  "normalized": false,
31
  "special": true
 
 
 
 
 
 
 
 
 
32
  }
33
  ],
34
  "normalizer": {
 
29
  "rstrip": false,
30
  "normalized": false,
31
  "special": true
32
+ },
33
+ {
34
+ "id": 32000,
35
+ "content": "[PAD]",
36
+ "single_word": false,
37
+ "lstrip": false,
38
+ "rstrip": false,
39
+ "normalized": false,
40
+ "special": true
41
  }
42
  ],
43
  "normalizer": {
tokenizer_config.json CHANGED
@@ -25,24 +25,27 @@
25
  "rstrip": false,
26
  "single_word": false,
27
  "special": true
 
 
 
 
 
 
 
 
28
  }
29
  },
30
- "additional_special_tokens": [
31
- "<unk>",
32
- "<s>",
33
- "</s>"
34
- ],
35
  "bos_token": "<s>",
36
  "chat_template": "{% for message in messages %}\n{% if message['role'] == 'user' %}\n{{ '<|user|>\n' + message['content'] + eos_token }}\n{% elif message['role'] == 'system' %}\n{{ '<|system|>\n' + message['content'] + eos_token }}\n{% elif message['role'] == 'assistant' %}\n{{ '<|assistant|>\n' + message['content'] + eos_token }}\n{% endif %}\n{% if loop.last and add_generation_prompt %}\n{{ '<|assistant|>' }}\n{% endif %}\n{% endfor %}",
37
  "clean_up_tokenization_spaces": false,
38
  "eos_token": "</s>",
39
  "legacy": true,
40
- "model_max_length": 1000000000000000019884624838656,
41
- "pad_token": "</s>",
42
  "sp_model_kwargs": {},
43
  "spaces_between_special_tokens": false,
44
  "tokenizer_class": "LlamaTokenizer",
45
- "truncation_side": "left",
46
  "unk_token": "<unk>",
47
- "use_default_system_prompt": true
48
  }
 
25
  "rstrip": false,
26
  "single_word": false,
27
  "special": true
28
+ },
29
+ "32000": {
30
+ "content": "[PAD]",
31
+ "lstrip": false,
32
+ "normalized": false,
33
+ "rstrip": false,
34
+ "single_word": false,
35
+ "special": true
36
  }
37
  },
38
+ "additional_special_tokens": [],
 
 
 
 
39
  "bos_token": "<s>",
40
  "chat_template": "{% for message in messages %}\n{% if message['role'] == 'user' %}\n{{ '<|user|>\n' + message['content'] + eos_token }}\n{% elif message['role'] == 'system' %}\n{{ '<|system|>\n' + message['content'] + eos_token }}\n{% elif message['role'] == 'assistant' %}\n{{ '<|assistant|>\n' + message['content'] + eos_token }}\n{% endif %}\n{% if loop.last and add_generation_prompt %}\n{{ '<|assistant|>' }}\n{% endif %}\n{% endfor %}",
41
  "clean_up_tokenization_spaces": false,
42
  "eos_token": "</s>",
43
  "legacy": true,
44
+ "model_max_length": 2048,
45
+ "pad_token": "[PAD]",
46
  "sp_model_kwargs": {},
47
  "spaces_between_special_tokens": false,
48
  "tokenizer_class": "LlamaTokenizer",
 
49
  "unk_token": "<unk>",
50
+ "use_default_system_prompt": false
51
  }
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f7e09ea8807e52f329fbf728777352d40c4ec516261d6959d9a9b57307aca9a6
3
  size 6072
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:095a22d234f78ca72ea0d6cc056b9bfcca05d7707da427187fbcf3718fd219f4
3
  size 6072