crabz commited on
Commit
97fee74
1 Parent(s): e2ab635

initial commit

Browse files
README.md ADDED
@@ -0,0 +1,98 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: mit
3
+ tags:
4
+ - generated_from_trainer
5
+ datasets:
6
+ - wikiann_sk
7
+ metrics:
8
+ - precision
9
+ - recall
10
+ - f1
11
+ - accuracy
12
+ model-index:
13
+ - name: output_dir
14
+ results:
15
+ - task:
16
+ name: Token Classification
17
+ type: token-classification
18
+ dataset:
19
+ name: wikiann_sk
20
+ type: wikiann_sk
21
+ args: sk
22
+ metrics:
23
+ - name: Precision
24
+ type: precision
25
+ value: 0.9327115256495669
26
+ - name: Recall
27
+ type: recall
28
+ value: 0.9470124013528749
29
+ - name: F1
30
+ type: f1
31
+ value: 0.9398075632132469
32
+ - name: Accuracy
33
+ type: accuracy
34
+ value: 0.9785228256835333
35
+ ---
36
+
37
+ # output_dir
38
+
39
+ This model is a fine-tuned version of [gerulata/slovakbert](https://huggingface.co/gerulata/slovakbert) on the wikiann_sk dataset.
40
+ It achieves the following results on the evaluation set:
41
+ - Loss: 0.1600
42
+ - Precision: 0.9327
43
+ - Recall: 0.9470
44
+ - F1: 0.9398
45
+ - Accuracy: 0.9785
46
+
47
+ ## Model description
48
+
49
+ More information needed
50
+
51
+ ## Intended uses & limitations
52
+
53
+ More information needed
54
+
55
+ ## Training and evaluation data
56
+
57
+ More information needed
58
+
59
+ ## Training procedure
60
+
61
+ ### Training hyperparameters
62
+
63
+ The following hyperparameters were used during training:
64
+ - learning_rate: 5e-05
65
+ - train_batch_size: 32
66
+ - eval_batch_size: 8
67
+ - seed: 42
68
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
69
+ - lr_scheduler_type: linear
70
+ - num_epochs: 15.0
71
+
72
+ ### Training results
73
+
74
+ | Training Loss | Epoch | Step | Validation Loss | Precision | Recall | F1 | Accuracy |
75
+ |:-------------:|:-----:|:----:|:---------------:|:---------:|:------:|:------:|:--------:|
76
+ | 0.2342 | 1.0 | 625 | 0.1233 | 0.8891 | 0.9076 | 0.8982 | 0.9667 |
77
+ | 0.1114 | 2.0 | 1250 | 0.1079 | 0.9118 | 0.9269 | 0.9193 | 0.9725 |
78
+ | 0.0817 | 3.0 | 1875 | 0.1093 | 0.9173 | 0.9315 | 0.9243 | 0.9747 |
79
+ | 0.0438 | 4.0 | 2500 | 0.1076 | 0.9188 | 0.9353 | 0.9270 | 0.9743 |
80
+ | 0.028 | 5.0 | 3125 | 0.1230 | 0.9143 | 0.9387 | 0.9264 | 0.9744 |
81
+ | 0.0256 | 6.0 | 3750 | 0.1204 | 0.9246 | 0.9423 | 0.9334 | 0.9765 |
82
+ | 0.018 | 7.0 | 4375 | 0.1332 | 0.9292 | 0.9416 | 0.9353 | 0.9770 |
83
+ | 0.0107 | 8.0 | 5000 | 0.1339 | 0.9280 | 0.9427 | 0.9353 | 0.9769 |
84
+ | 0.0079 | 9.0 | 5625 | 0.1368 | 0.9326 | 0.9442 | 0.9383 | 0.9785 |
85
+ | 0.0065 | 10.0 | 6250 | 0.1490 | 0.9284 | 0.9445 | 0.9364 | 0.9772 |
86
+ | 0.0061 | 11.0 | 6875 | 0.1566 | 0.9328 | 0.9433 | 0.9380 | 0.9778 |
87
+ | 0.0031 | 12.0 | 7500 | 0.1555 | 0.9339 | 0.9473 | 0.9406 | 0.9787 |
88
+ | 0.0024 | 13.0 | 8125 | 0.1548 | 0.9349 | 0.9462 | 0.9405 | 0.9787 |
89
+ | 0.0015 | 14.0 | 8750 | 0.1562 | 0.9330 | 0.9469 | 0.9399 | 0.9788 |
90
+ | 0.0013 | 15.0 | 9375 | 0.1600 | 0.9327 | 0.9470 | 0.9398 | 0.9785 |
91
+
92
+
93
+ ### Framework versions
94
+
95
+ - Transformers 4.13.0.dev0
96
+ - Pytorch 1.10.0+cu113
97
+ - Datasets 1.15.1
98
+ - Tokenizers 0.10.3
config.json ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "gerulata/slovakbert",
3
+ "architectures": [
4
+ "RobertaForTokenClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "bos_token_id": 0,
8
+ "classifier_dropout": null,
9
+ "eos_token_id": 2,
10
+ "finetuning_task": "ner",
11
+ "gradient_checkpointing": false,
12
+ "hidden_act": "gelu",
13
+ "hidden_dropout_prob": 0.1,
14
+ "hidden_size": 768,
15
+ "id2label": {
16
+ "0": 0,
17
+ "1": 1,
18
+ "2": 2,
19
+ "3": 3,
20
+ "4": 4,
21
+ "5": 5,
22
+ "6": 6
23
+ },
24
+ "initializer_range": 0.02,
25
+ "intermediate_size": 3072,
26
+ "label2id": {
27
+ "0": 0,
28
+ "1": 1,
29
+ "2": 2,
30
+ "3": 3,
31
+ "4": 4,
32
+ "5": 5,
33
+ "6": 6
34
+ },
35
+ "layer_norm_eps": 1e-05,
36
+ "max_position_embeddings": 514,
37
+ "model_type": "roberta",
38
+ "num_attention_heads": 12,
39
+ "num_hidden_layers": 12,
40
+ "pad_token_id": 1,
41
+ "position_embedding_type": "absolute",
42
+ "torch_dtype": "float32",
43
+ "transformers_version": "4.13.0.dev0",
44
+ "type_vocab_size": 1,
45
+ "use_cache": true,
46
+ "vocab_size": 50264
47
+ }
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
pytorch-model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9fbd8a2479c99e55f5906e5e46438562b27e5b5ec36829d0216824b91d41d690
3
+ size 496323313
special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"bos_token": {"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, "eos_token": {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, "unk_token": {"content": "<unk>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, "sep_token": {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, "pad_token": {"content": "<pad>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, "cls_token": {"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, "mask_token": {"content": "<mask>", "single_word": false, "lstrip": true, "rstrip": false, "normalized": true}}
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"unk_token": {"content": "<unk>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "bos_token": {"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "eos_token": {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "add_prefix_space": true, "errors": "replace", "sep_token": {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "cls_token": {"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "pad_token": {"content": "<pad>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "mask_token": {"content": "<mask>", "single_word": false, "lstrip": true, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "special_tokens_map_file": null, "model_max_length": 512, "name_or_path": "gerulata/slovakbert", "tokenizer_class": "RobertaTokenizer"}
vocab.json ADDED
The diff for this file is too large to render. See raw diff