Training in progress, step 1500
Browse files- config.json +202 -0
- merges.txt +0 -0
- model.safetensors +3 -0
- special_tokens_map.json +51 -0
- tokenizer.json +0 -0
- tokenizer_config.json +57 -0
- training_args.bin +3 -0
- vocab.json +0 -0
config.json
ADDED
@@ -0,0 +1,202 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "ufal/robeczech-base",
|
3 |
+
"architectures": [
|
4 |
+
"RobertaForTokenClassification"
|
5 |
+
],
|
6 |
+
"attention_probs_dropout_prob": 0.35,
|
7 |
+
"bos_token_id": 0,
|
8 |
+
"classifier_dropout": null,
|
9 |
+
"eos_token_id": 2,
|
10 |
+
"gradient_checkpointing": false,
|
11 |
+
"hidden_act": "gelu",
|
12 |
+
"hidden_dropout_prob": 0.35,
|
13 |
+
"hidden_size": 768,
|
14 |
+
"id2label": {
|
15 |
+
"0": "O",
|
16 |
+
"1": "B-ah",
|
17 |
+
"2": "I-ah",
|
18 |
+
"3": "B-at",
|
19 |
+
"4": "I-at",
|
20 |
+
"5": "B-az",
|
21 |
+
"6": "I-az",
|
22 |
+
"7": "B-g_",
|
23 |
+
"8": "I-g_",
|
24 |
+
"9": "B-gc",
|
25 |
+
"10": "I-gc",
|
26 |
+
"11": "B-gh",
|
27 |
+
"12": "I-gh",
|
28 |
+
"13": "B-gl",
|
29 |
+
"14": "I-gl",
|
30 |
+
"15": "B-gp",
|
31 |
+
"16": "I-gp",
|
32 |
+
"17": "B-gq",
|
33 |
+
"18": "I-gq",
|
34 |
+
"19": "B-gr",
|
35 |
+
"20": "I-gr",
|
36 |
+
"21": "B-gs",
|
37 |
+
"22": "I-gs",
|
38 |
+
"23": "B-gt",
|
39 |
+
"24": "I-gt",
|
40 |
+
"25": "B-gu",
|
41 |
+
"26": "I-gu",
|
42 |
+
"27": "B-i_",
|
43 |
+
"28": "I-i_",
|
44 |
+
"29": "B-ia",
|
45 |
+
"30": "I-ia",
|
46 |
+
"31": "B-ic",
|
47 |
+
"32": "I-ic",
|
48 |
+
"33": "B-if",
|
49 |
+
"34": "I-if",
|
50 |
+
"35": "B-io",
|
51 |
+
"36": "I-io",
|
52 |
+
"37": "B-mn",
|
53 |
+
"38": "I-mn",
|
54 |
+
"39": "B-mt",
|
55 |
+
"40": "I-mt",
|
56 |
+
"41": "B-mr",
|
57 |
+
"42": "I-mr",
|
58 |
+
"43": "B-o_",
|
59 |
+
"44": "I-o_",
|
60 |
+
"45": "B-oa",
|
61 |
+
"46": "I-oa",
|
62 |
+
"47": "B-oc",
|
63 |
+
"48": "I-oc",
|
64 |
+
"49": "B-oe",
|
65 |
+
"50": "I-oe",
|
66 |
+
"51": "B-om",
|
67 |
+
"52": "I-om",
|
68 |
+
"53": "B-op",
|
69 |
+
"54": "I-op",
|
70 |
+
"55": "B-or",
|
71 |
+
"56": "I-or",
|
72 |
+
"57": "B-p_",
|
73 |
+
"58": "I-p_",
|
74 |
+
"59": "B-pb",
|
75 |
+
"60": "I-pb",
|
76 |
+
"61": "B-pc",
|
77 |
+
"62": "I-pc",
|
78 |
+
"63": "B-pd",
|
79 |
+
"64": "I-pd",
|
80 |
+
"65": "B-pf",
|
81 |
+
"66": "I-pf",
|
82 |
+
"67": "B-pm",
|
83 |
+
"68": "I-pm",
|
84 |
+
"69": "B-pp",
|
85 |
+
"70": "I-pp",
|
86 |
+
"71": "B-ps",
|
87 |
+
"72": "I-ps",
|
88 |
+
"73": "B-td",
|
89 |
+
"74": "I-td",
|
90 |
+
"75": "B-tf",
|
91 |
+
"76": "I-tf",
|
92 |
+
"77": "B-th",
|
93 |
+
"78": "I-th",
|
94 |
+
"79": "B-ti",
|
95 |
+
"80": "I-ti",
|
96 |
+
"81": "B-tm",
|
97 |
+
"82": "I-tm",
|
98 |
+
"83": "B-ty",
|
99 |
+
"84": "I-ty"
|
100 |
+
},
|
101 |
+
"initializer_range": 0.02,
|
102 |
+
"intermediate_size": 3072,
|
103 |
+
"label2id": {
|
104 |
+
"B-ah": 1,
|
105 |
+
"B-at": 3,
|
106 |
+
"B-az": 5,
|
107 |
+
"B-g_": 7,
|
108 |
+
"B-gc": 9,
|
109 |
+
"B-gh": 11,
|
110 |
+
"B-gl": 13,
|
111 |
+
"B-gp": 15,
|
112 |
+
"B-gq": 17,
|
113 |
+
"B-gr": 19,
|
114 |
+
"B-gs": 21,
|
115 |
+
"B-gt": 23,
|
116 |
+
"B-gu": 25,
|
117 |
+
"B-i_": 27,
|
118 |
+
"B-ia": 29,
|
119 |
+
"B-ic": 31,
|
120 |
+
"B-if": 33,
|
121 |
+
"B-io": 35,
|
122 |
+
"B-mn": 37,
|
123 |
+
"B-mr": 41,
|
124 |
+
"B-mt": 39,
|
125 |
+
"B-o_": 43,
|
126 |
+
"B-oa": 45,
|
127 |
+
"B-oc": 47,
|
128 |
+
"B-oe": 49,
|
129 |
+
"B-om": 51,
|
130 |
+
"B-op": 53,
|
131 |
+
"B-or": 55,
|
132 |
+
"B-p_": 57,
|
133 |
+
"B-pb": 59,
|
134 |
+
"B-pc": 61,
|
135 |
+
"B-pd": 63,
|
136 |
+
"B-pf": 65,
|
137 |
+
"B-pm": 67,
|
138 |
+
"B-pp": 69,
|
139 |
+
"B-ps": 71,
|
140 |
+
"B-td": 73,
|
141 |
+
"B-tf": 75,
|
142 |
+
"B-th": 77,
|
143 |
+
"B-ti": 79,
|
144 |
+
"B-tm": 81,
|
145 |
+
"B-ty": 83,
|
146 |
+
"I-ah": 2,
|
147 |
+
"I-at": 4,
|
148 |
+
"I-az": 6,
|
149 |
+
"I-g_": 8,
|
150 |
+
"I-gc": 10,
|
151 |
+
"I-gh": 12,
|
152 |
+
"I-gl": 14,
|
153 |
+
"I-gp": 16,
|
154 |
+
"I-gq": 18,
|
155 |
+
"I-gr": 20,
|
156 |
+
"I-gs": 22,
|
157 |
+
"I-gt": 24,
|
158 |
+
"I-gu": 26,
|
159 |
+
"I-i_": 28,
|
160 |
+
"I-ia": 30,
|
161 |
+
"I-ic": 32,
|
162 |
+
"I-if": 34,
|
163 |
+
"I-io": 36,
|
164 |
+
"I-mn": 38,
|
165 |
+
"I-mr": 42,
|
166 |
+
"I-mt": 40,
|
167 |
+
"I-o_": 44,
|
168 |
+
"I-oa": 46,
|
169 |
+
"I-oc": 48,
|
170 |
+
"I-oe": 50,
|
171 |
+
"I-om": 52,
|
172 |
+
"I-op": 54,
|
173 |
+
"I-or": 56,
|
174 |
+
"I-p_": 58,
|
175 |
+
"I-pb": 60,
|
176 |
+
"I-pc": 62,
|
177 |
+
"I-pd": 64,
|
178 |
+
"I-pf": 66,
|
179 |
+
"I-pm": 68,
|
180 |
+
"I-pp": 70,
|
181 |
+
"I-ps": 72,
|
182 |
+
"I-td": 74,
|
183 |
+
"I-tf": 76,
|
184 |
+
"I-th": 78,
|
185 |
+
"I-ti": 80,
|
186 |
+
"I-tm": 82,
|
187 |
+
"I-ty": 84,
|
188 |
+
"O": 0
|
189 |
+
},
|
190 |
+
"layer_norm_eps": 1e-05,
|
191 |
+
"max_position_embeddings": 514,
|
192 |
+
"model_type": "roberta",
|
193 |
+
"num_attention_heads": 12,
|
194 |
+
"num_hidden_layers": 12,
|
195 |
+
"pad_token_id": 1,
|
196 |
+
"position_embedding_type": "absolute",
|
197 |
+
"torch_dtype": "float32",
|
198 |
+
"transformers_version": "4.36.2",
|
199 |
+
"type_vocab_size": 1,
|
200 |
+
"use_cache": true,
|
201 |
+
"vocab_size": 51997
|
202 |
+
}
|
merges.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
model.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:4146ed79b456c3e0b0a7f1c6f5f340db0421fcd4f0caf17386164d012c66056a
|
3 |
+
size 501826260
|
special_tokens_map.json
ADDED
@@ -0,0 +1,51 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"bos_token": {
|
3 |
+
"content": "[CLS]",
|
4 |
+
"lstrip": false,
|
5 |
+
"normalized": true,
|
6 |
+
"rstrip": false,
|
7 |
+
"single_word": false
|
8 |
+
},
|
9 |
+
"cls_token": {
|
10 |
+
"content": "[CLS]",
|
11 |
+
"lstrip": false,
|
12 |
+
"normalized": true,
|
13 |
+
"rstrip": false,
|
14 |
+
"single_word": false
|
15 |
+
},
|
16 |
+
"eos_token": {
|
17 |
+
"content": "[SEP]",
|
18 |
+
"lstrip": false,
|
19 |
+
"normalized": true,
|
20 |
+
"rstrip": false,
|
21 |
+
"single_word": false
|
22 |
+
},
|
23 |
+
"mask_token": {
|
24 |
+
"content": "[MASK]",
|
25 |
+
"lstrip": true,
|
26 |
+
"normalized": true,
|
27 |
+
"rstrip": false,
|
28 |
+
"single_word": false
|
29 |
+
},
|
30 |
+
"pad_token": {
|
31 |
+
"content": "[PAD]",
|
32 |
+
"lstrip": false,
|
33 |
+
"normalized": true,
|
34 |
+
"rstrip": false,
|
35 |
+
"single_word": false
|
36 |
+
},
|
37 |
+
"sep_token": {
|
38 |
+
"content": "[SEP]",
|
39 |
+
"lstrip": false,
|
40 |
+
"normalized": true,
|
41 |
+
"rstrip": false,
|
42 |
+
"single_word": false
|
43 |
+
},
|
44 |
+
"unk_token": {
|
45 |
+
"content": "[UNK]",
|
46 |
+
"lstrip": false,
|
47 |
+
"normalized": true,
|
48 |
+
"rstrip": false,
|
49 |
+
"single_word": false
|
50 |
+
}
|
51 |
+
}
|
tokenizer.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
tokenizer_config.json
ADDED
@@ -0,0 +1,57 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"add_prefix_space": true,
|
3 |
+
"added_tokens_decoder": {
|
4 |
+
"0": {
|
5 |
+
"content": "[CLS]",
|
6 |
+
"lstrip": false,
|
7 |
+
"normalized": true,
|
8 |
+
"rstrip": false,
|
9 |
+
"single_word": false,
|
10 |
+
"special": true
|
11 |
+
},
|
12 |
+
"1": {
|
13 |
+
"content": "[PAD]",
|
14 |
+
"lstrip": false,
|
15 |
+
"normalized": true,
|
16 |
+
"rstrip": false,
|
17 |
+
"single_word": false,
|
18 |
+
"special": true
|
19 |
+
},
|
20 |
+
"2": {
|
21 |
+
"content": "[SEP]",
|
22 |
+
"lstrip": false,
|
23 |
+
"normalized": true,
|
24 |
+
"rstrip": false,
|
25 |
+
"single_word": false,
|
26 |
+
"special": true
|
27 |
+
},
|
28 |
+
"3": {
|
29 |
+
"content": "[UNK]",
|
30 |
+
"lstrip": false,
|
31 |
+
"normalized": true,
|
32 |
+
"rstrip": false,
|
33 |
+
"single_word": false,
|
34 |
+
"special": true
|
35 |
+
},
|
36 |
+
"51960": {
|
37 |
+
"content": "[MASK]",
|
38 |
+
"lstrip": true,
|
39 |
+
"normalized": true,
|
40 |
+
"rstrip": false,
|
41 |
+
"single_word": false,
|
42 |
+
"special": true
|
43 |
+
}
|
44 |
+
},
|
45 |
+
"bos_token": "[CLS]",
|
46 |
+
"clean_up_tokenization_spaces": true,
|
47 |
+
"cls_token": "[CLS]",
|
48 |
+
"eos_token": "[SEP]",
|
49 |
+
"errors": "replace",
|
50 |
+
"mask_token": "[MASK]",
|
51 |
+
"model_max_length": 1000000000000000019884624838656,
|
52 |
+
"pad_token": "[PAD]",
|
53 |
+
"sep_token": "[SEP]",
|
54 |
+
"tokenizer_class": "RobertaTokenizer",
|
55 |
+
"trim_offsets": true,
|
56 |
+
"unk_token": "[UNK]"
|
57 |
+
}
|
training_args.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:721b26ab0a66facc0578467c09b66d012108ba472058f1f6ed1f4b31248979c8
|
3 |
+
size 4664
|
vocab.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|