smerchi commited on
Commit
df32508
1 Parent(s): 263b02a

Model save

Browse files
README.md CHANGED
@@ -1,6 +1,6 @@
1
  ---
2
  library_name: transformers
3
- base_model: asafaya/bert-large-arabic
4
  tags:
5
  - generated_from_trainer
6
  metrics:
@@ -15,10 +15,10 @@ should probably proofread and complete it, then remove this comment. -->
15
 
16
  # darija_test8
17
 
18
- This model is a fine-tuned version of [asafaya/bert-large-arabic](https://huggingface.co/asafaya/bert-large-arabic) on the None dataset.
19
  It achieves the following results on the evaluation set:
20
- - Loss: 0.0000
21
- - Accuracy: 1.0
22
 
23
  ## Model description
24
 
@@ -49,12 +49,12 @@ The following hyperparameters were used during training:
49
 
50
  | Training Loss | Epoch | Step | Validation Loss | Accuracy |
51
  |:-------------:|:-----:|:----:|:---------------:|:--------:|
52
- | No log | 1.0 | 281 | 0.0390 | 0.9929 |
53
- | 0.0967 | 2.0 | 562 | 0.0139 | 0.9982 |
54
- | 0.0967 | 3.0 | 843 | 0.0008 | 1.0 |
55
- | 0.0216 | 4.0 | 1124 | 0.0000 | 1.0 |
56
- | 0.0216 | 5.0 | 1405 | 0.0000 | 1.0 |
57
- | 0.0032 | 6.0 | 1686 | 0.0000 | 1.0 |
58
 
59
 
60
  ### Framework versions
 
1
  ---
2
  library_name: transformers
3
+ base_model: SI2M-Lab/DarijaBERT
4
  tags:
5
  - generated_from_trainer
6
  metrics:
 
15
 
16
  # darija_test8
17
 
18
+ This model is a fine-tuned version of [SI2M-Lab/DarijaBERT](https://huggingface.co/SI2M-Lab/DarijaBERT) on the None dataset.
19
  It achieves the following results on the evaluation set:
20
+ - Loss: 0.0106
21
+ - Accuracy: 0.9982
22
 
23
  ## Model description
24
 
 
49
 
50
  | Training Loss | Epoch | Step | Validation Loss | Accuracy |
51
  |:-------------:|:-----:|:----:|:---------------:|:--------:|
52
+ | No log | 1.0 | 281 | 0.0242 | 0.9947 |
53
+ | 0.0937 | 2.0 | 562 | 0.0167 | 0.9982 |
54
+ | 0.0937 | 3.0 | 843 | 0.0245 | 0.9947 |
55
+ | 0.0022 | 4.0 | 1124 | 0.0239 | 0.9964 |
56
+ | 0.0022 | 5.0 | 1405 | 0.0181 | 0.9982 |
57
+ | 0.0003 | 6.0 | 1686 | 0.0106 | 0.9982 |
58
 
59
 
60
  ### Framework versions
config.json CHANGED
@@ -1,37 +1,43 @@
1
  {
2
- "_name_or_path": "asafaya/bert-large-arabic",
3
- "_num_labels": 2,
4
  "architectures": [
5
  "BertForSequenceClassification"
6
  ],
7
  "attention_probs_dropout_prob": 0.1,
8
  "classifier_dropout": null,
 
 
9
  "gradient_checkpointing": false,
10
  "hidden_act": "gelu",
11
  "hidden_dropout_prob": 0.1,
12
- "hidden_size": 1024,
13
  "id2label": {
14
  "0": "*6",
15
  "1": "autre"
16
  },
17
  "initializer_range": 0.02,
18
- "intermediate_size": 4096,
19
  "label2id": {
20
  "*6": 0,
21
  "autre": 1
22
  },
23
  "layer_norm_eps": 1e-12,
 
 
24
  "max_position_embeddings": 512,
 
25
  "model_type": "bert",
26
- "num_attention_heads": 16,
27
- "num_hidden_layers": 24,
28
- "output_past": true,
29
  "pad_token_id": 0,
30
  "position_embedding_type": "absolute",
31
  "problem_type": "single_label_classification",
 
32
  "torch_dtype": "float32",
33
  "transformers_version": "4.44.2",
34
  "type_vocab_size": 2,
 
35
  "use_cache": true,
36
- "vocab_size": 32000
37
  }
 
1
  {
2
+ "_name_or_path": "SI2M-Lab/DarijaBERT",
 
3
  "architectures": [
4
  "BertForSequenceClassification"
5
  ],
6
  "attention_probs_dropout_prob": 0.1,
7
  "classifier_dropout": null,
8
+ "cls_token": "[CLS]",
9
+ "do_lower_case": true,
10
  "gradient_checkpointing": false,
11
  "hidden_act": "gelu",
12
  "hidden_dropout_prob": 0.1,
13
+ "hidden_size": 768,
14
  "id2label": {
15
  "0": "*6",
16
  "1": "autre"
17
  },
18
  "initializer_range": 0.02,
19
+ "intermediate_size": 3072,
20
  "label2id": {
21
  "*6": 0,
22
  "autre": 1
23
  },
24
  "layer_norm_eps": 1e-12,
25
+ "mask_token": "[MASK]",
26
+ "max_len": 128,
27
  "max_position_embeddings": 512,
28
+ "model_max_length": 128,
29
  "model_type": "bert",
30
+ "num_attention_heads": 12,
31
+ "num_hidden_layers": 12,
32
+ "pad_token": "[PAD]",
33
  "pad_token_id": 0,
34
  "position_embedding_type": "absolute",
35
  "problem_type": "single_label_classification",
36
+ "sep_token": "[SEP]",
37
  "torch_dtype": "float32",
38
  "transformers_version": "4.44.2",
39
  "type_vocab_size": 2,
40
+ "unk_token": "[UNK]",
41
  "use_cache": true,
42
+ "vocab_size": 80000
43
  }
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:cc44c255d329cb60a88bd40957805804d721d611e29d8f973dba65a7d146822c
3
- size 1346676656
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8cedc757f7ee16bc187a86d3d8a932c04a1aa572a1da43d5aa5fbc9a1e20e3e5
3
+ size 589955072
runs/Sep12_10-44-45_16cf65176a0f/events.out.tfevents.1726137886.16cf65176a0f.2037.2 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:83e13182b241e5eaf7388a3e2ec307d83fab7305ad3595b9a08aced807a83b2c
3
+ size 8109
runs/Sep12_10-44-45_16cf65176a0f/events.out.tfevents.1726138277.16cf65176a0f.2037.3 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:daea50217d727c5c5ec1df68a8750d8cafdbc64bd4bdd54927a15e4dac750b98
3
+ size 411
special_tokens_map.json CHANGED
@@ -1,7 +1,37 @@
1
  {
2
- "cls_token": "[CLS]",
3
- "mask_token": "[MASK]",
4
- "pad_token": "[PAD]",
5
- "sep_token": "[SEP]",
6
- "unk_token": "[UNK]"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7
  }
 
1
  {
2
+ "cls_token": {
3
+ "content": "[CLS]",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "mask_token": {
10
+ "content": "[MASK]",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": {
17
+ "content": "[PAD]",
18
+ "lstrip": false,
19
+ "normalized": false,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
+ "sep_token": {
24
+ "content": "[SEP]",
25
+ "lstrip": false,
26
+ "normalized": false,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ },
30
+ "unk_token": {
31
+ "content": "[UNK]",
32
+ "lstrip": false,
33
+ "normalized": false,
34
+ "rstrip": false,
35
+ "single_word": false
36
+ }
37
  }
tokenizer.json CHANGED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json CHANGED
@@ -45,7 +45,6 @@
45
  "cls_token": "[CLS]",
46
  "do_basic_tokenize": true,
47
  "do_lower_case": true,
48
- "full_tokenizer_file": null,
49
  "mask_token": "[MASK]",
50
  "model_max_length": 1000000000000000019884624838656,
51
  "never_split": null,
 
45
  "cls_token": "[CLS]",
46
  "do_basic_tokenize": true,
47
  "do_lower_case": true,
 
48
  "mask_token": "[MASK]",
49
  "model_max_length": 1000000000000000019884624838656,
50
  "never_split": null,
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:848e1b0697766f97ae43902e8bd1b7663261e83fc17fa9c6dced7665058abc08
3
  size 5176
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6df38197662ca5ddd415754ac8ebc6a93e899e6465f25524d1298ff0a04dd1f6
3
  size 5176
vocab.txt CHANGED
The diff for this file is too large to render. See raw diff