HiTZ
/

Token Classification
Transformers
Safetensors
bert
Inference Endpoints
anaryegen commited on
Commit
af2c5aa
1 Parent(s): 0b01137
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. README.md +52 -0
  2. all_results.json +8 -0
  3. checkpoint-1000/config.json +46 -0
  4. checkpoint-1000/model.safetensors +3 -0
  5. checkpoint-1000/optimizer.pt +3 -0
  6. checkpoint-1000/rng_state.pth +3 -0
  7. checkpoint-1000/scheduler.pt +3 -0
  8. checkpoint-1000/special_tokens_map.json +7 -0
  9. checkpoint-1000/tokenizer.json +0 -0
  10. checkpoint-1000/tokenizer_config.json +56 -0
  11. checkpoint-1000/trainer_state.json +35 -0
  12. checkpoint-1000/training_args.bin +3 -0
  13. checkpoint-1000/vocab.txt +0 -0
  14. checkpoint-1500/config.json +46 -0
  15. checkpoint-1500/model.safetensors +3 -0
  16. checkpoint-1500/optimizer.pt +3 -0
  17. checkpoint-1500/rng_state.pth +3 -0
  18. checkpoint-1500/scheduler.pt +3 -0
  19. checkpoint-1500/special_tokens_map.json +7 -0
  20. checkpoint-1500/tokenizer.json +0 -0
  21. checkpoint-1500/tokenizer_config.json +56 -0
  22. checkpoint-1500/trainer_state.json +42 -0
  23. checkpoint-1500/training_args.bin +3 -0
  24. checkpoint-1500/vocab.txt +0 -0
  25. checkpoint-2000/config.json +46 -0
  26. checkpoint-2000/model.safetensors +3 -0
  27. checkpoint-2000/optimizer.pt +3 -0
  28. checkpoint-2000/rng_state.pth +3 -0
  29. checkpoint-2000/scheduler.pt +3 -0
  30. checkpoint-2000/special_tokens_map.json +7 -0
  31. checkpoint-2000/tokenizer.json +0 -0
  32. checkpoint-2000/tokenizer_config.json +56 -0
  33. checkpoint-2000/trainer_state.json +49 -0
  34. checkpoint-2000/training_args.bin +3 -0
  35. checkpoint-2000/vocab.txt +0 -0
  36. checkpoint-2500/config.json +46 -0
  37. checkpoint-2500/model.safetensors +3 -0
  38. checkpoint-2500/optimizer.pt +3 -0
  39. checkpoint-2500/rng_state.pth +3 -0
  40. checkpoint-2500/scheduler.pt +3 -0
  41. checkpoint-2500/special_tokens_map.json +7 -0
  42. checkpoint-2500/tokenizer.json +0 -0
  43. checkpoint-2500/tokenizer_config.json +56 -0
  44. checkpoint-2500/trainer_state.json +56 -0
  45. checkpoint-2500/training_args.bin +3 -0
  46. checkpoint-2500/vocab.txt +0 -0
  47. checkpoint-3000/config.json +46 -0
  48. checkpoint-3000/model.safetensors +3 -0
  49. checkpoint-3000/optimizer.pt +3 -0
  50. checkpoint-3000/rng_state.pth +3 -0
README.md ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: apache-2.0
3
+ base_model: bert-base-multilingual-cased
4
+ tags:
5
+ - generated_from_trainer
6
+ model-index:
7
+ - name: multi_rebuttal_neoplasm_mbert
8
+ results: []
9
+ ---
10
+
11
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
12
+ should probably proofread and complete it, then remove this comment. -->
13
+
14
+ # multi_rebuttal_neoplasm_mbert
15
+
16
+ This model is a fine-tuned version of [bert-base-multilingual-cased](https://huggingface.co/bert-base-multilingual-cased) on an unknown dataset.
17
+
18
+ ## Model description
19
+
20
+ More information needed
21
+
22
+ ## Intended uses & limitations
23
+
24
+ More information needed
25
+
26
+ ## Training and evaluation data
27
+
28
+ More information needed
29
+
30
+ ## Training procedure
31
+
32
+ ### Training hyperparameters
33
+
34
+ The following hyperparameters were used during training:
35
+ - learning_rate: 5e-05
36
+ - train_batch_size: 16
37
+ - eval_batch_size: 8
38
+ - seed: 42
39
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
40
+ - lr_scheduler_type: linear
41
+ - num_epochs: 3.0
42
+
43
+ ### Training results
44
+
45
+
46
+
47
+ ### Framework versions
48
+
49
+ - Transformers 4.40.0.dev0
50
+ - Pytorch 2.1.2+cu121
51
+ - Datasets 2.16.1
52
+ - Tokenizers 0.15.2
all_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 3.0,
3
+ "train_loss": 0.21816863666881214,
4
+ "train_runtime": 391.9097,
5
+ "train_samples": 17598,
6
+ "train_samples_per_second": 134.71,
7
+ "train_steps_per_second": 8.42
8
+ }
checkpoint-1000/config.json ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "bert-base-multilingual-cased",
3
+ "architectures": [
4
+ "BertForTokenClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "classifier_dropout": null,
8
+ "directionality": "bidi",
9
+ "finetuning_task": "ner",
10
+ "hidden_act": "gelu",
11
+ "hidden_dropout_prob": 0.1,
12
+ "hidden_size": 768,
13
+ "id2label": {
14
+ "0": "B-Claim",
15
+ "1": "B-Premise",
16
+ "2": "I-Claim",
17
+ "3": "I-Premise",
18
+ "4": "O"
19
+ },
20
+ "initializer_range": 0.02,
21
+ "intermediate_size": 3072,
22
+ "label2id": {
23
+ "B-Claim": 0,
24
+ "B-Premise": 1,
25
+ "I-Claim": 2,
26
+ "I-Premise": 3,
27
+ "O": 4
28
+ },
29
+ "layer_norm_eps": 1e-12,
30
+ "max_position_embeddings": 512,
31
+ "model_type": "bert",
32
+ "num_attention_heads": 12,
33
+ "num_hidden_layers": 12,
34
+ "pad_token_id": 0,
35
+ "pooler_fc_size": 768,
36
+ "pooler_num_attention_heads": 12,
37
+ "pooler_num_fc_layers": 3,
38
+ "pooler_size_per_head": 128,
39
+ "pooler_type": "first_token_transform",
40
+ "position_embedding_type": "absolute",
41
+ "torch_dtype": "float32",
42
+ "transformers_version": "4.40.0.dev0",
43
+ "type_vocab_size": 2,
44
+ "use_cache": true,
45
+ "vocab_size": 119547
46
+ }
checkpoint-1000/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:64f9b2ee70840baf0b6a59bf80dba21c3a9184b2c4ce06979aa23d6e9ebe3428
3
+ size 709090132
checkpoint-1000/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:531c75de3504beadf314fba92581ae0d2ab2270153e63730acb94bf4496573d1
3
+ size 1418299962
checkpoint-1000/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:914c899ca93fae83ac35523abf2653a093a4ca7b2d2dd49bf07c50ebc9a81c49
3
+ size 14244
checkpoint-1000/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2fdb2b08b12ca96cd0d58b8fc000fabfb6fb858115ad2e3dc1d93dba30d4a2e0
3
+ size 1064
checkpoint-1000/special_tokens_map.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "cls_token": "[CLS]",
3
+ "mask_token": "[MASK]",
4
+ "pad_token": "[PAD]",
5
+ "sep_token": "[SEP]",
6
+ "unk_token": "[UNK]"
7
+ }
checkpoint-1000/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-1000/tokenizer_config.json ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "[PAD]",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "100": {
12
+ "content": "[UNK]",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "101": {
20
+ "content": "[CLS]",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "102": {
28
+ "content": "[SEP]",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ },
35
+ "103": {
36
+ "content": "[MASK]",
37
+ "lstrip": false,
38
+ "normalized": false,
39
+ "rstrip": false,
40
+ "single_word": false,
41
+ "special": true
42
+ }
43
+ },
44
+ "clean_up_tokenization_spaces": true,
45
+ "cls_token": "[CLS]",
46
+ "do_lower_case": false,
47
+ "mask_token": "[MASK]",
48
+ "model_max_length": 512,
49
+ "pad_token": "[PAD]",
50
+ "sep_token": "[SEP]",
51
+ "strip_accents": null,
52
+ "tokenize_chinese_chars": true,
53
+ "tokenizer_class": "BertTokenizer",
54
+ "truncation": true,
55
+ "unk_token": "[UNK]"
56
+ }
checkpoint-1000/trainer_state.json ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 0.9090909090909091,
5
+ "eval_steps": 500,
6
+ "global_step": 1000,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.45,
13
+ "grad_norm": 4.369685173034668,
14
+ "learning_rate": 4.242424242424243e-05,
15
+ "loss": 0.4661,
16
+ "step": 500
17
+ },
18
+ {
19
+ "epoch": 0.91,
20
+ "grad_norm": 3.9891207218170166,
21
+ "learning_rate": 3.484848484848485e-05,
22
+ "loss": 0.3327,
23
+ "step": 1000
24
+ }
25
+ ],
26
+ "logging_steps": 500,
27
+ "max_steps": 3300,
28
+ "num_input_tokens_seen": 0,
29
+ "num_train_epochs": 3,
30
+ "save_steps": 500,
31
+ "total_flos": 807608523858240.0,
32
+ "train_batch_size": 16,
33
+ "trial_name": null,
34
+ "trial_params": null
35
+ }
checkpoint-1000/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8ce0ec1d3c14f032ca50e426364a62c0adbd65c3b9fdc8fd4787ebbcb999f0a3
3
+ size 4984
checkpoint-1000/vocab.txt ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-1500/config.json ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "bert-base-multilingual-cased",
3
+ "architectures": [
4
+ "BertForTokenClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "classifier_dropout": null,
8
+ "directionality": "bidi",
9
+ "finetuning_task": "ner",
10
+ "hidden_act": "gelu",
11
+ "hidden_dropout_prob": 0.1,
12
+ "hidden_size": 768,
13
+ "id2label": {
14
+ "0": "B-Claim",
15
+ "1": "B-Premise",
16
+ "2": "I-Claim",
17
+ "3": "I-Premise",
18
+ "4": "O"
19
+ },
20
+ "initializer_range": 0.02,
21
+ "intermediate_size": 3072,
22
+ "label2id": {
23
+ "B-Claim": 0,
24
+ "B-Premise": 1,
25
+ "I-Claim": 2,
26
+ "I-Premise": 3,
27
+ "O": 4
28
+ },
29
+ "layer_norm_eps": 1e-12,
30
+ "max_position_embeddings": 512,
31
+ "model_type": "bert",
32
+ "num_attention_heads": 12,
33
+ "num_hidden_layers": 12,
34
+ "pad_token_id": 0,
35
+ "pooler_fc_size": 768,
36
+ "pooler_num_attention_heads": 12,
37
+ "pooler_num_fc_layers": 3,
38
+ "pooler_size_per_head": 128,
39
+ "pooler_type": "first_token_transform",
40
+ "position_embedding_type": "absolute",
41
+ "torch_dtype": "float32",
42
+ "transformers_version": "4.40.0.dev0",
43
+ "type_vocab_size": 2,
44
+ "use_cache": true,
45
+ "vocab_size": 119547
46
+ }
checkpoint-1500/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bbf70ead670f6736e231542aaaefacec1654c92bc271c4620110147cf6ad8bf6
3
+ size 709090132
checkpoint-1500/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:efd121e937ab0167c3d5a51612000b3ba180361a4029047bb882e0b74ca34cca
3
+ size 1418299962
checkpoint-1500/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a94e10534d36e7068a7fcaaadf92a61d539f75f8f486d57a9d005710d2d04b18
3
+ size 14244
checkpoint-1500/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2c15757d448c691579d103186f84b10b75f8fae0a36f8654dbd83af4c9dc14c4
3
+ size 1064
checkpoint-1500/special_tokens_map.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "cls_token": "[CLS]",
3
+ "mask_token": "[MASK]",
4
+ "pad_token": "[PAD]",
5
+ "sep_token": "[SEP]",
6
+ "unk_token": "[UNK]"
7
+ }
checkpoint-1500/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-1500/tokenizer_config.json ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "[PAD]",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "100": {
12
+ "content": "[UNK]",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "101": {
20
+ "content": "[CLS]",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "102": {
28
+ "content": "[SEP]",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ },
35
+ "103": {
36
+ "content": "[MASK]",
37
+ "lstrip": false,
38
+ "normalized": false,
39
+ "rstrip": false,
40
+ "single_word": false,
41
+ "special": true
42
+ }
43
+ },
44
+ "clean_up_tokenization_spaces": true,
45
+ "cls_token": "[CLS]",
46
+ "do_lower_case": false,
47
+ "mask_token": "[MASK]",
48
+ "model_max_length": 512,
49
+ "pad_token": "[PAD]",
50
+ "sep_token": "[SEP]",
51
+ "strip_accents": null,
52
+ "tokenize_chinese_chars": true,
53
+ "tokenizer_class": "BertTokenizer",
54
+ "truncation": true,
55
+ "unk_token": "[UNK]"
56
+ }
checkpoint-1500/trainer_state.json ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 1.3636363636363638,
5
+ "eval_steps": 500,
6
+ "global_step": 1500,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.45,
13
+ "grad_norm": 4.369685173034668,
14
+ "learning_rate": 4.242424242424243e-05,
15
+ "loss": 0.4661,
16
+ "step": 500
17
+ },
18
+ {
19
+ "epoch": 0.91,
20
+ "grad_norm": 3.9891207218170166,
21
+ "learning_rate": 3.484848484848485e-05,
22
+ "loss": 0.3327,
23
+ "step": 1000
24
+ },
25
+ {
26
+ "epoch": 1.36,
27
+ "grad_norm": 9.819967269897461,
28
+ "learning_rate": 2.7272727272727273e-05,
29
+ "loss": 0.2258,
30
+ "step": 1500
31
+ }
32
+ ],
33
+ "logging_steps": 500,
34
+ "max_steps": 3300,
35
+ "num_input_tokens_seen": 0,
36
+ "num_train_epochs": 3,
37
+ "save_steps": 500,
38
+ "total_flos": 1210469642226000.0,
39
+ "train_batch_size": 16,
40
+ "trial_name": null,
41
+ "trial_params": null
42
+ }
checkpoint-1500/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8ce0ec1d3c14f032ca50e426364a62c0adbd65c3b9fdc8fd4787ebbcb999f0a3
3
+ size 4984
checkpoint-1500/vocab.txt ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-2000/config.json ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "bert-base-multilingual-cased",
3
+ "architectures": [
4
+ "BertForTokenClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "classifier_dropout": null,
8
+ "directionality": "bidi",
9
+ "finetuning_task": "ner",
10
+ "hidden_act": "gelu",
11
+ "hidden_dropout_prob": 0.1,
12
+ "hidden_size": 768,
13
+ "id2label": {
14
+ "0": "B-Claim",
15
+ "1": "B-Premise",
16
+ "2": "I-Claim",
17
+ "3": "I-Premise",
18
+ "4": "O"
19
+ },
20
+ "initializer_range": 0.02,
21
+ "intermediate_size": 3072,
22
+ "label2id": {
23
+ "B-Claim": 0,
24
+ "B-Premise": 1,
25
+ "I-Claim": 2,
26
+ "I-Premise": 3,
27
+ "O": 4
28
+ },
29
+ "layer_norm_eps": 1e-12,
30
+ "max_position_embeddings": 512,
31
+ "model_type": "bert",
32
+ "num_attention_heads": 12,
33
+ "num_hidden_layers": 12,
34
+ "pad_token_id": 0,
35
+ "pooler_fc_size": 768,
36
+ "pooler_num_attention_heads": 12,
37
+ "pooler_num_fc_layers": 3,
38
+ "pooler_size_per_head": 128,
39
+ "pooler_type": "first_token_transform",
40
+ "position_embedding_type": "absolute",
41
+ "torch_dtype": "float32",
42
+ "transformers_version": "4.40.0.dev0",
43
+ "type_vocab_size": 2,
44
+ "use_cache": true,
45
+ "vocab_size": 119547
46
+ }
checkpoint-2000/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cb2c70558eebbb25d5c7ea9e9bbd5d867c0832274c8ac4273752cb4245656cc1
3
+ size 709090132
checkpoint-2000/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1e946867f65e9f95137c8697d5299859ce5f01f55d30673f988c52c6d1b2b23a
3
+ size 1418299962
checkpoint-2000/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:600e8ed7e6dfec45186e15a1d84ef9706770fa56a6f8fa368f0d0c1eb63f00dd
3
+ size 14244
checkpoint-2000/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bd050490d981ce2a8ab6debb3279be68125b0d5992ec889e49cb48137ad083ac
3
+ size 1064
checkpoint-2000/special_tokens_map.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "cls_token": "[CLS]",
3
+ "mask_token": "[MASK]",
4
+ "pad_token": "[PAD]",
5
+ "sep_token": "[SEP]",
6
+ "unk_token": "[UNK]"
7
+ }
checkpoint-2000/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-2000/tokenizer_config.json ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "[PAD]",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "100": {
12
+ "content": "[UNK]",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "101": {
20
+ "content": "[CLS]",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "102": {
28
+ "content": "[SEP]",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ },
35
+ "103": {
36
+ "content": "[MASK]",
37
+ "lstrip": false,
38
+ "normalized": false,
39
+ "rstrip": false,
40
+ "single_word": false,
41
+ "special": true
42
+ }
43
+ },
44
+ "clean_up_tokenization_spaces": true,
45
+ "cls_token": "[CLS]",
46
+ "do_lower_case": false,
47
+ "mask_token": "[MASK]",
48
+ "model_max_length": 512,
49
+ "pad_token": "[PAD]",
50
+ "sep_token": "[SEP]",
51
+ "strip_accents": null,
52
+ "tokenize_chinese_chars": true,
53
+ "tokenizer_class": "BertTokenizer",
54
+ "truncation": true,
55
+ "unk_token": "[UNK]"
56
+ }
checkpoint-2000/trainer_state.json ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 1.8181818181818183,
5
+ "eval_steps": 500,
6
+ "global_step": 2000,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.45,
13
+ "grad_norm": 4.369685173034668,
14
+ "learning_rate": 4.242424242424243e-05,
15
+ "loss": 0.4661,
16
+ "step": 500
17
+ },
18
+ {
19
+ "epoch": 0.91,
20
+ "grad_norm": 3.9891207218170166,
21
+ "learning_rate": 3.484848484848485e-05,
22
+ "loss": 0.3327,
23
+ "step": 1000
24
+ },
25
+ {
26
+ "epoch": 1.36,
27
+ "grad_norm": 9.819967269897461,
28
+ "learning_rate": 2.7272727272727273e-05,
29
+ "loss": 0.2258,
30
+ "step": 1500
31
+ },
32
+ {
33
+ "epoch": 1.82,
34
+ "grad_norm": 1.7621924877166748,
35
+ "learning_rate": 1.9696969696969697e-05,
36
+ "loss": 0.1772,
37
+ "step": 2000
38
+ }
39
+ ],
40
+ "logging_steps": 500,
41
+ "max_steps": 3300,
42
+ "num_input_tokens_seen": 0,
43
+ "num_train_epochs": 3,
44
+ "save_steps": 500,
45
+ "total_flos": 1612289628090960.0,
46
+ "train_batch_size": 16,
47
+ "trial_name": null,
48
+ "trial_params": null
49
+ }
checkpoint-2000/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8ce0ec1d3c14f032ca50e426364a62c0adbd65c3b9fdc8fd4787ebbcb999f0a3
3
+ size 4984
checkpoint-2000/vocab.txt ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-2500/config.json ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "bert-base-multilingual-cased",
3
+ "architectures": [
4
+ "BertForTokenClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "classifier_dropout": null,
8
+ "directionality": "bidi",
9
+ "finetuning_task": "ner",
10
+ "hidden_act": "gelu",
11
+ "hidden_dropout_prob": 0.1,
12
+ "hidden_size": 768,
13
+ "id2label": {
14
+ "0": "B-Claim",
15
+ "1": "B-Premise",
16
+ "2": "I-Claim",
17
+ "3": "I-Premise",
18
+ "4": "O"
19
+ },
20
+ "initializer_range": 0.02,
21
+ "intermediate_size": 3072,
22
+ "label2id": {
23
+ "B-Claim": 0,
24
+ "B-Premise": 1,
25
+ "I-Claim": 2,
26
+ "I-Premise": 3,
27
+ "O": 4
28
+ },
29
+ "layer_norm_eps": 1e-12,
30
+ "max_position_embeddings": 512,
31
+ "model_type": "bert",
32
+ "num_attention_heads": 12,
33
+ "num_hidden_layers": 12,
34
+ "pad_token_id": 0,
35
+ "pooler_fc_size": 768,
36
+ "pooler_num_attention_heads": 12,
37
+ "pooler_num_fc_layers": 3,
38
+ "pooler_size_per_head": 128,
39
+ "pooler_type": "first_token_transform",
40
+ "position_embedding_type": "absolute",
41
+ "torch_dtype": "float32",
42
+ "transformers_version": "4.40.0.dev0",
43
+ "type_vocab_size": 2,
44
+ "use_cache": true,
45
+ "vocab_size": 119547
46
+ }
checkpoint-2500/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:384a84c19967531dbccaa7ed728b32936164175bf035eb4459087e5a445a7538
3
+ size 709090132
checkpoint-2500/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d3d7bf57922294546ef663cce05c64697804c146674f8d584d97b44006d6239a
3
+ size 1418299962
checkpoint-2500/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5db57296a2f997dbf2ff82d3ea48da094e0ecb6bcf669798dd85e9f931f295c0
3
+ size 14244
checkpoint-2500/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bfd8ff06a95d837530f2029e9972b5702eb69fdc7ac4df0ac3900ff986a2641d
3
+ size 1064
checkpoint-2500/special_tokens_map.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "cls_token": "[CLS]",
3
+ "mask_token": "[MASK]",
4
+ "pad_token": "[PAD]",
5
+ "sep_token": "[SEP]",
6
+ "unk_token": "[UNK]"
7
+ }
checkpoint-2500/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-2500/tokenizer_config.json ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "[PAD]",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "100": {
12
+ "content": "[UNK]",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "101": {
20
+ "content": "[CLS]",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "102": {
28
+ "content": "[SEP]",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ },
35
+ "103": {
36
+ "content": "[MASK]",
37
+ "lstrip": false,
38
+ "normalized": false,
39
+ "rstrip": false,
40
+ "single_word": false,
41
+ "special": true
42
+ }
43
+ },
44
+ "clean_up_tokenization_spaces": true,
45
+ "cls_token": "[CLS]",
46
+ "do_lower_case": false,
47
+ "mask_token": "[MASK]",
48
+ "model_max_length": 512,
49
+ "pad_token": "[PAD]",
50
+ "sep_token": "[SEP]",
51
+ "strip_accents": null,
52
+ "tokenize_chinese_chars": true,
53
+ "tokenizer_class": "BertTokenizer",
54
+ "truncation": true,
55
+ "unk_token": "[UNK]"
56
+ }
checkpoint-2500/trainer_state.json ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 2.2727272727272725,
5
+ "eval_steps": 500,
6
+ "global_step": 2500,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.45,
13
+ "grad_norm": 4.369685173034668,
14
+ "learning_rate": 4.242424242424243e-05,
15
+ "loss": 0.4661,
16
+ "step": 500
17
+ },
18
+ {
19
+ "epoch": 0.91,
20
+ "grad_norm": 3.9891207218170166,
21
+ "learning_rate": 3.484848484848485e-05,
22
+ "loss": 0.3327,
23
+ "step": 1000
24
+ },
25
+ {
26
+ "epoch": 1.36,
27
+ "grad_norm": 9.819967269897461,
28
+ "learning_rate": 2.7272727272727273e-05,
29
+ "loss": 0.2258,
30
+ "step": 1500
31
+ },
32
+ {
33
+ "epoch": 1.82,
34
+ "grad_norm": 1.7621924877166748,
35
+ "learning_rate": 1.9696969696969697e-05,
36
+ "loss": 0.1772,
37
+ "step": 2000
38
+ },
39
+ {
40
+ "epoch": 2.27,
41
+ "grad_norm": 21.205034255981445,
42
+ "learning_rate": 1.2121212121212122e-05,
43
+ "loss": 0.1106,
44
+ "step": 2500
45
+ }
46
+ ],
47
+ "logging_steps": 500,
48
+ "max_steps": 3300,
49
+ "num_input_tokens_seen": 0,
50
+ "num_train_epochs": 3,
51
+ "save_steps": 500,
52
+ "total_flos": 2019349980886680.0,
53
+ "train_batch_size": 16,
54
+ "trial_name": null,
55
+ "trial_params": null
56
+ }
checkpoint-2500/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8ce0ec1d3c14f032ca50e426364a62c0adbd65c3b9fdc8fd4787ebbcb999f0a3
3
+ size 4984
checkpoint-2500/vocab.txt ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-3000/config.json ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "bert-base-multilingual-cased",
3
+ "architectures": [
4
+ "BertForTokenClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "classifier_dropout": null,
8
+ "directionality": "bidi",
9
+ "finetuning_task": "ner",
10
+ "hidden_act": "gelu",
11
+ "hidden_dropout_prob": 0.1,
12
+ "hidden_size": 768,
13
+ "id2label": {
14
+ "0": "B-Claim",
15
+ "1": "B-Premise",
16
+ "2": "I-Claim",
17
+ "3": "I-Premise",
18
+ "4": "O"
19
+ },
20
+ "initializer_range": 0.02,
21
+ "intermediate_size": 3072,
22
+ "label2id": {
23
+ "B-Claim": 0,
24
+ "B-Premise": 1,
25
+ "I-Claim": 2,
26
+ "I-Premise": 3,
27
+ "O": 4
28
+ },
29
+ "layer_norm_eps": 1e-12,
30
+ "max_position_embeddings": 512,
31
+ "model_type": "bert",
32
+ "num_attention_heads": 12,
33
+ "num_hidden_layers": 12,
34
+ "pad_token_id": 0,
35
+ "pooler_fc_size": 768,
36
+ "pooler_num_attention_heads": 12,
37
+ "pooler_num_fc_layers": 3,
38
+ "pooler_size_per_head": 128,
39
+ "pooler_type": "first_token_transform",
40
+ "position_embedding_type": "absolute",
41
+ "torch_dtype": "float32",
42
+ "transformers_version": "4.40.0.dev0",
43
+ "type_vocab_size": 2,
44
+ "use_cache": true,
45
+ "vocab_size": 119547
46
+ }
checkpoint-3000/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5619c877f67bfd2f913eccf09c3a7063cbbf19e828903252309e7b058b73277f
3
+ size 709090132
checkpoint-3000/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c52117b9c3efec9aeb0fd02e2577e34dc9dd0419283ecaf529383ac7874ba980
3
+ size 1418299962
checkpoint-3000/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:628c164d40ee82705ab87fb944474752767a01a1aa3a21ef7550283c163ce69f
3
+ size 14244