ymoslem commited on
Commit
df1c4a1
·
verified ·
1 Parent(s): 908e790

End of training

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ tokenizer.json filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,108 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: transformers
3
+ language:
4
+ - multilingual
5
+ - bn
6
+ - cs
7
+ - de
8
+ - en
9
+ - et
10
+ - fi
11
+ - fr
12
+ - gu
13
+ - ha
14
+ - hi
15
+ - is
16
+ - ja
17
+ - kk
18
+ - km
19
+ - lt
20
+ - lv
21
+ - pl
22
+ - ps
23
+ - ru
24
+ - ta
25
+ - tr
26
+ - uk
27
+ - xh
28
+ - zh
29
+ - zu
30
+ license: mit
31
+ base_model: FacebookAI/xlm-roberta-large
32
+ tags:
33
+ - quality-estimation
34
+ - regression
35
+ - generated_from_trainer
36
+ datasets:
37
+ - ymoslem/wmt-da-human-evaluation
38
+ model-index:
39
+ - name: Quality Estimation for Machine Translation
40
+ results: []
41
+ ---
42
+
43
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
44
+ should probably proofread and complete it, then remove this comment. -->
45
+
46
+ # Quality Estimation for Machine Translation
47
+
48
+ This model is a fine-tuned version of [FacebookAI/xlm-roberta-large](https://huggingface.co/FacebookAI/xlm-roberta-large) on the ymoslem/wmt-da-human-evaluation dataset.
49
+ It achieves the following results on the evaluation set:
50
+ - Loss: 0.0752
51
+
52
+ ## Model description
53
+
54
+ More information needed
55
+
56
+ ## Intended uses & limitations
57
+
58
+ More information needed
59
+
60
+ ## Training and evaluation data
61
+
62
+ More information needed
63
+
64
+ ## Training procedure
65
+
66
+ ### Training hyperparameters
67
+
68
+ The following hyperparameters were used during training:
69
+ - learning_rate: 8e-05
70
+ - train_batch_size: 64
71
+ - eval_batch_size: 64
72
+ - seed: 42
73
+ - optimizer: Use OptimizerNames.ADAMW_TORCH_FUSED with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments
74
+ - lr_scheduler_type: linear
75
+ - training_steps: 20000
76
+
77
+ ### Training results
78
+
79
+ | Training Loss | Epoch | Step | Validation Loss |
80
+ |:-------------:|:------:|:-----:|:---------------:|
81
+ | 0.0743 | 0.0502 | 1000 | 0.0598 |
82
+ | 0.0853 | 0.1004 | 2000 | 0.0745 |
83
+ | 0.0829 | 0.1506 | 3000 | 0.0726 |
84
+ | 0.0814 | 0.2008 | 4000 | 0.0872 |
85
+ | 0.0805 | 0.2509 | 5000 | 0.0715 |
86
+ | 0.0782 | 0.3011 | 6000 | 0.0819 |
87
+ | 0.0789 | 0.3513 | 7000 | 0.0733 |
88
+ | 0.0791 | 0.4015 | 8000 | 0.0748 |
89
+ | 0.0787 | 0.4517 | 9000 | 0.0759 |
90
+ | 0.0761 | 0.5019 | 10000 | 0.0725 |
91
+ | 0.0746 | 0.5521 | 11000 | 0.0745 |
92
+ | 0.0762 | 0.6023 | 12000 | 0.0750 |
93
+ | 0.077 | 0.6524 | 13000 | 0.0725 |
94
+ | 0.0777 | 0.7026 | 14000 | 0.0737 |
95
+ | 0.0764 | 0.7528 | 15000 | 0.0745 |
96
+ | 0.0781 | 0.8030 | 16000 | 0.0750 |
97
+ | 0.0748 | 0.8532 | 17000 | 0.0765 |
98
+ | 0.0768 | 0.9034 | 18000 | 0.0750 |
99
+ | 0.0737 | 0.9536 | 19000 | 0.0759 |
100
+ | 0.0769 | 1.0038 | 20000 | 0.0752 |
101
+
102
+
103
+ ### Framework versions
104
+
105
+ - Transformers 4.48.0
106
+ - Pytorch 2.4.1+cu124
107
+ - Datasets 3.2.0
108
+ - Tokenizers 0.21.0
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9da49ac3dcb9792b31b9db0a35236784033286db9f1c2b260c1cc438b2daf7e5
3
  size 1119831426
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:44a87b998ab829ef31229168b09c3e1e0c7bc45bfe6bf51b7020c3eef731bd3e
3
  size 1119831426
runs/Jan15_05-51-13_ea48e459ea32/events.out.tfevents.1736920273.ea48e459ea32.1232.0 CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:5ff16f0b2ddc255945ad9f237257e899559acc212001b9be5c8305c75cefa4a9
3
- size 53102
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8c7eb780b41b3311fc7a2e4bad91115061d6fcabdae3787eacca8d3bb95bedda
3
+ size 53462
special_tokens_map.json ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "<s>",
3
+ "cls_token": "<s>",
4
+ "eos_token": "</s>",
5
+ "mask_token": {
6
+ "content": "<mask>",
7
+ "lstrip": true,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false
11
+ },
12
+ "pad_token": "<pad>",
13
+ "sep_token": "</s>",
14
+ "unk_token": "<unk>"
15
+ }
tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3ffb37461c391f096759f4a9bbbc329da0f36952f88bab061fcf84940c022e98
3
+ size 17082999
tokenizer_config.json ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "<s>",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "1": {
12
+ "content": "<pad>",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "2": {
20
+ "content": "</s>",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "3": {
28
+ "content": "<unk>",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ },
35
+ "250001": {
36
+ "content": "<mask>",
37
+ "lstrip": true,
38
+ "normalized": false,
39
+ "rstrip": false,
40
+ "single_word": false,
41
+ "special": true
42
+ }
43
+ },
44
+ "bos_token": "<s>",
45
+ "clean_up_tokenization_spaces": false,
46
+ "cls_token": "<s>",
47
+ "eos_token": "</s>",
48
+ "extra_special_tokens": {},
49
+ "mask_token": "<mask>",
50
+ "model_max_length": 512,
51
+ "pad_token": "<pad>",
52
+ "sep_token": "</s>",
53
+ "tokenizer_class": "XLMRobertaTokenizer",
54
+ "unk_token": "<unk>"
55
+ }