salbatarni commited on
Commit
c295c41
1 Parent(s): df3610e

Training in progress, step 150

Browse files
Files changed (4) hide show
  1. README.md +137 -0
  2. config.json +32 -0
  3. model.safetensors +3 -0
  4. training_args.bin +3 -0
README.md ADDED
@@ -0,0 +1,137 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ base_model: aubmindlab/bert-base-arabertv02
3
+ tags:
4
+ - generated_from_trainer
5
+ model-index:
6
+ - name: arabert_cross_organization_task7_fold6
7
+ results: []
8
+ ---
9
+
10
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
11
+ should probably proofread and complete it, then remove this comment. -->
12
+
13
+ # arabert_cross_organization_task7_fold6
14
+
15
+ This model is a fine-tuned version of [aubmindlab/bert-base-arabertv02](https://huggingface.co/aubmindlab/bert-base-arabertv02) on the None dataset.
16
+ It achieves the following results on the evaluation set:
17
+ - Loss: 0.6600
18
+ - Qwk: 0.5581
19
+ - Mse: 0.6587
20
+
21
+ ## Model description
22
+
23
+ More information needed
24
+
25
+ ## Intended uses & limitations
26
+
27
+ More information needed
28
+
29
+ ## Training and evaluation data
30
+
31
+ More information needed
32
+
33
+ ## Training procedure
34
+
35
+ ### Training hyperparameters
36
+
37
+ The following hyperparameters were used during training:
38
+ - learning_rate: 2e-05
39
+ - train_batch_size: 64
40
+ - eval_batch_size: 64
41
+ - seed: 42
42
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
43
+ - lr_scheduler_type: linear
44
+ - num_epochs: 10
45
+
46
+ ### Training results
47
+
48
+ | Training Loss | Epoch | Step | Validation Loss | Qwk | Mse |
49
+ |:-------------:|:-----:|:----:|:---------------:|:------:|:------:|
50
+ | No log | 0.125 | 2 | 2.0463 | 0.0838 | 2.0448 |
51
+ | No log | 0.25 | 4 | 1.0746 | 0.1939 | 1.0725 |
52
+ | No log | 0.375 | 6 | 1.0643 | 0.3449 | 1.0639 |
53
+ | No log | 0.5 | 8 | 0.8689 | 0.5214 | 0.8684 |
54
+ | No log | 0.625 | 10 | 0.7524 | 0.3263 | 0.7518 |
55
+ | No log | 0.75 | 12 | 0.6423 | 0.3754 | 0.6420 |
56
+ | No log | 0.875 | 14 | 0.5644 | 0.5844 | 0.5643 |
57
+ | No log | 1.0 | 16 | 0.5224 | 0.6557 | 0.5223 |
58
+ | No log | 1.125 | 18 | 0.4855 | 0.6292 | 0.4850 |
59
+ | No log | 1.25 | 20 | 0.5779 | 0.5418 | 0.5767 |
60
+ | No log | 1.375 | 22 | 0.5208 | 0.6043 | 0.5197 |
61
+ | No log | 1.5 | 24 | 0.5175 | 0.7174 | 0.5174 |
62
+ | No log | 1.625 | 26 | 0.4998 | 0.7107 | 0.4998 |
63
+ | No log | 1.75 | 28 | 0.4818 | 0.6457 | 0.4809 |
64
+ | No log | 1.875 | 30 | 0.4990 | 0.6364 | 0.4979 |
65
+ | No log | 2.0 | 32 | 0.5085 | 0.6403 | 0.5073 |
66
+ | No log | 2.125 | 34 | 0.4978 | 0.6611 | 0.4969 |
67
+ | No log | 2.25 | 36 | 0.4811 | 0.6848 | 0.4805 |
68
+ | No log | 2.375 | 38 | 0.4675 | 0.6672 | 0.4669 |
69
+ | No log | 2.5 | 40 | 0.4889 | 0.6232 | 0.4881 |
70
+ | No log | 2.625 | 42 | 0.5071 | 0.6102 | 0.5062 |
71
+ | No log | 2.75 | 44 | 0.5162 | 0.6263 | 0.5151 |
72
+ | No log | 2.875 | 46 | 0.5184 | 0.6317 | 0.5172 |
73
+ | No log | 3.0 | 48 | 0.5229 | 0.6543 | 0.5219 |
74
+ | No log | 3.125 | 50 | 0.5389 | 0.6233 | 0.5377 |
75
+ | No log | 3.25 | 52 | 0.5879 | 0.5675 | 0.5861 |
76
+ | No log | 3.375 | 54 | 0.6183 | 0.5488 | 0.6164 |
77
+ | No log | 3.5 | 56 | 0.5578 | 0.5898 | 0.5563 |
78
+ | No log | 3.625 | 58 | 0.5612 | 0.6909 | 0.5607 |
79
+ | No log | 3.75 | 60 | 0.5964 | 0.7100 | 0.5962 |
80
+ | No log | 3.875 | 62 | 0.5615 | 0.6815 | 0.5609 |
81
+ | No log | 4.0 | 64 | 0.5730 | 0.5963 | 0.5716 |
82
+ | No log | 4.125 | 66 | 0.6867 | 0.5243 | 0.6849 |
83
+ | No log | 4.25 | 68 | 0.6700 | 0.5276 | 0.6682 |
84
+ | No log | 4.375 | 70 | 0.5889 | 0.5659 | 0.5873 |
85
+ | No log | 4.5 | 72 | 0.5446 | 0.6149 | 0.5434 |
86
+ | No log | 4.625 | 74 | 0.5556 | 0.6355 | 0.5547 |
87
+ | No log | 4.75 | 76 | 0.5886 | 0.6034 | 0.5871 |
88
+ | No log | 4.875 | 78 | 0.6730 | 0.5568 | 0.6709 |
89
+ | No log | 5.0 | 80 | 0.6892 | 0.5344 | 0.6871 |
90
+ | No log | 5.125 | 82 | 0.6046 | 0.5665 | 0.6029 |
91
+ | No log | 5.25 | 84 | 0.5605 | 0.6134 | 0.5591 |
92
+ | No log | 5.375 | 86 | 0.5415 | 0.6417 | 0.5404 |
93
+ | No log | 5.5 | 88 | 0.5515 | 0.6247 | 0.5504 |
94
+ | No log | 5.625 | 90 | 0.5964 | 0.5762 | 0.5948 |
95
+ | No log | 5.75 | 92 | 0.6466 | 0.5489 | 0.6449 |
96
+ | No log | 5.875 | 94 | 0.6325 | 0.5648 | 0.6310 |
97
+ | No log | 6.0 | 96 | 0.6036 | 0.6097 | 0.6022 |
98
+ | No log | 6.125 | 98 | 0.5955 | 0.6483 | 0.5944 |
99
+ | No log | 6.25 | 100 | 0.6017 | 0.6168 | 0.6005 |
100
+ | No log | 6.375 | 102 | 0.6349 | 0.5846 | 0.6335 |
101
+ | No log | 6.5 | 104 | 0.6941 | 0.5277 | 0.6925 |
102
+ | No log | 6.625 | 106 | 0.6740 | 0.5262 | 0.6724 |
103
+ | No log | 6.75 | 108 | 0.6043 | 0.5829 | 0.6030 |
104
+ | No log | 6.875 | 110 | 0.5813 | 0.6039 | 0.5802 |
105
+ | No log | 7.0 | 112 | 0.5847 | 0.6056 | 0.5836 |
106
+ | No log | 7.125 | 114 | 0.6031 | 0.5987 | 0.6019 |
107
+ | No log | 7.25 | 116 | 0.6490 | 0.5645 | 0.6474 |
108
+ | No log | 7.375 | 118 | 0.6772 | 0.5326 | 0.6756 |
109
+ | No log | 7.5 | 120 | 0.6849 | 0.5311 | 0.6833 |
110
+ | No log | 7.625 | 122 | 0.6620 | 0.5393 | 0.6606 |
111
+ | No log | 7.75 | 124 | 0.6230 | 0.5696 | 0.6217 |
112
+ | No log | 7.875 | 126 | 0.5912 | 0.5983 | 0.5901 |
113
+ | No log | 8.0 | 128 | 0.5924 | 0.5983 | 0.5913 |
114
+ | No log | 8.125 | 130 | 0.6124 | 0.5864 | 0.6112 |
115
+ | No log | 8.25 | 132 | 0.6364 | 0.5615 | 0.6351 |
116
+ | No log | 8.375 | 134 | 0.6650 | 0.5476 | 0.6635 |
117
+ | No log | 8.5 | 136 | 0.6693 | 0.5397 | 0.6678 |
118
+ | No log | 8.625 | 138 | 0.6639 | 0.5516 | 0.6624 |
119
+ | No log | 8.75 | 140 | 0.6658 | 0.5467 | 0.6643 |
120
+ | No log | 8.875 | 142 | 0.6772 | 0.5437 | 0.6757 |
121
+ | No log | 9.0 | 144 | 0.6778 | 0.5489 | 0.6763 |
122
+ | No log | 9.125 | 146 | 0.6641 | 0.5504 | 0.6627 |
123
+ | No log | 9.25 | 148 | 0.6614 | 0.5557 | 0.6600 |
124
+ | No log | 9.375 | 150 | 0.6564 | 0.5609 | 0.6551 |
125
+ | No log | 9.5 | 152 | 0.6530 | 0.5618 | 0.6517 |
126
+ | No log | 9.625 | 154 | 0.6533 | 0.5618 | 0.6520 |
127
+ | No log | 9.75 | 156 | 0.6546 | 0.5581 | 0.6533 |
128
+ | No log | 9.875 | 158 | 0.6579 | 0.5581 | 0.6566 |
129
+ | No log | 10.0 | 160 | 0.6600 | 0.5581 | 0.6587 |
130
+
131
+
132
+ ### Framework versions
133
+
134
+ - Transformers 4.44.0
135
+ - Pytorch 2.4.0
136
+ - Datasets 2.21.0
137
+ - Tokenizers 0.19.1
config.json ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "aubmindlab/bert-base-arabertv02",
3
+ "architectures": [
4
+ "BertForSequenceClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "classifier_dropout": null,
8
+ "hidden_act": "gelu",
9
+ "hidden_dropout_prob": 0.1,
10
+ "hidden_size": 768,
11
+ "id2label": {
12
+ "0": "LABEL_0"
13
+ },
14
+ "initializer_range": 0.02,
15
+ "intermediate_size": 3072,
16
+ "label2id": {
17
+ "LABEL_0": 0
18
+ },
19
+ "layer_norm_eps": 1e-12,
20
+ "max_position_embeddings": 512,
21
+ "model_type": "bert",
22
+ "num_attention_heads": 12,
23
+ "num_hidden_layers": 12,
24
+ "pad_token_id": 0,
25
+ "position_embedding_type": "absolute",
26
+ "problem_type": "regression",
27
+ "torch_dtype": "float32",
28
+ "transformers_version": "4.44.0",
29
+ "type_vocab_size": 2,
30
+ "use_cache": true,
31
+ "vocab_size": 64000
32
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7c0b75fa9f68d4ce19999a238ad3554bac721f2ce1973355612eb5a576b7260e
3
+ size 540799996
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4e95671d9f3dbe8ba1efcb270bfac0d6480d1e06d3b535b229c88405f57bef39
3
+ size 5240