parquet-converter commited on
Commit
ff0d069
·
1 Parent(s): c849c69

Update parquet files

Browse files
.gitattributes DELETED
@@ -1,41 +0,0 @@
1
- *.7z filter=lfs diff=lfs merge=lfs -text
2
- *.arrow filter=lfs diff=lfs merge=lfs -text
3
- *.bin filter=lfs diff=lfs merge=lfs -text
4
- *.bz2 filter=lfs diff=lfs merge=lfs -text
5
- *.ftz filter=lfs diff=lfs merge=lfs -text
6
- *.gz filter=lfs diff=lfs merge=lfs -text
7
- *.h5 filter=lfs diff=lfs merge=lfs -text
8
- *.joblib filter=lfs diff=lfs merge=lfs -text
9
- *.lfs.* filter=lfs diff=lfs merge=lfs -text
10
- *.model filter=lfs diff=lfs merge=lfs -text
11
- *.msgpack filter=lfs diff=lfs merge=lfs -text
12
- *.npy filter=lfs diff=lfs merge=lfs -text
13
- *.npz filter=lfs diff=lfs merge=lfs -text
14
- *.onnx filter=lfs diff=lfs merge=lfs -text
15
- *.ot filter=lfs diff=lfs merge=lfs -text
16
- *.parquet filter=lfs diff=lfs merge=lfs -text
17
- *.pb filter=lfs diff=lfs merge=lfs -text
18
- *.pickle filter=lfs diff=lfs merge=lfs -text
19
- *.pkl filter=lfs diff=lfs merge=lfs -text
20
- *.pt filter=lfs diff=lfs merge=lfs -text
21
- *.pth filter=lfs diff=lfs merge=lfs -text
22
- *.rar filter=lfs diff=lfs merge=lfs -text
23
- saved_model/**/* filter=lfs diff=lfs merge=lfs -text
24
- *.tar.* filter=lfs diff=lfs merge=lfs -text
25
- *.tflite filter=lfs diff=lfs merge=lfs -text
26
- *.tgz filter=lfs diff=lfs merge=lfs -text
27
- *.wasm filter=lfs diff=lfs merge=lfs -text
28
- *.xz filter=lfs diff=lfs merge=lfs -text
29
- *.zip filter=lfs diff=lfs merge=lfs -text
30
- *.zstandard filter=lfs diff=lfs merge=lfs -text
31
- *tfevents* filter=lfs diff=lfs merge=lfs -text
32
- # Audio files - uncompressed
33
- *.pcm filter=lfs diff=lfs merge=lfs -text
34
- *.sam filter=lfs diff=lfs merge=lfs -text
35
- *.raw filter=lfs diff=lfs merge=lfs -text
36
- # Audio files - compressed
37
- *.aac filter=lfs diff=lfs merge=lfs -text
38
- *.flac filter=lfs diff=lfs merge=lfs -text
39
- *.mp3 filter=lfs diff=lfs merge=lfs -text
40
- *.ogg filter=lfs diff=lfs merge=lfs -text
41
- *.wav filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
mydata.zip → MrSemyon12--data_frame/text-train.parquet RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:afc0583182471085825f0341f51bdaf90bd2ccfd76303a53b31de75eef230a5d
3
- size 880279
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b5203acc8d660544a482648b8e2d7ee55d3b2bffe75a1910192aebce18ad54ff
3
+ size 1848285
conll2003.py DELETED
@@ -1,213 +0,0 @@
1
- import os, datasets
2
-
3
- _CITATION = """\
4
- @inproceedings{tjong-kim-sang-de-meulder-2003-introduction,
5
- title = "Introduction to the {C}o{NLL}-2003 Shared Task: Language-Independent Named Entity Recognition",
6
- author = "Tjong Kim Sang, Erik F. and
7
- De Meulder, Fien",
8
- booktitle = "Proceedings of the Seventh Conference on Natural Language Learning at {HLT}-{NAACL} 2003",
9
- year = "2003",
10
- url = "https://www.aclweb.org/anthology/W03-0419",
11
- pages = "142--147",
12
- }
13
- """
14
-
15
- _DESCRIPTION = """\
16
- The shared task of CoNLL-2003 concerns language-independent named entity recognition. We will concentrate on
17
- four types of named entities: persons, locations, organizations and names of miscellaneous entities that do
18
- not belong to the previous three groups.
19
-
20
- The CoNLL-2003 shared task data files contain four columns separated by a single space. Each word has been put on
21
- a separate line and there is an empty line after each sentence. The first item on each line is a word, the second
22
- a part-of-speech (POS) tag, the third a syntactic chunk tag and the fourth the named entity tag. The chunk tags
23
- and the named entity tags have the format I-TYPE which means that the word is inside a phrase of type TYPE. Only
24
- if two phrases of the same type immediately follow each other, the first word of the second phrase will have tag
25
- B-TYPE to show that it starts a new phrase. A word with tag O is not part of a phrase. Note the dataset uses IOB2
26
- tagging scheme, whereas the original dataset uses IOB1.
27
-
28
- For more details see https://www.clips.uantwerpen.be/conll2003/ner/ and https://www.aclweb.org/anthology/W03-0419
29
- """
30
-
31
- _URL = "mydata.zip"
32
- _TRAINING_FILE = "train.txt"
33
- _DEV_FILE = "valid.txt"
34
- _TEST_FILE = "test.txt"
35
-
36
-
37
- class Conll2003Config(datasets.BuilderConfig):
38
- """BuilderConfig for Conll2003"""
39
-
40
- def __init__(self, **kwargs):
41
- """BuilderConfig forConll2003.
42
-
43
- Args:
44
- **kwargs: keyword arguments forwarded to super.
45
- """
46
- super(Conll2003Config, self).__init__(**kwargs)
47
-
48
-
49
- class Conll2003(datasets.GeneratorBasedBuilder):
50
- """Conll2003 dataset."""
51
-
52
- BUILDER_CONFIGS = [
53
- Conll2003Config(name="conll2003", version=datasets.Version("1.0.0"), description="Conll2003 dataset"),
54
- ]
55
-
56
- def _info(self):
57
- return datasets.DatasetInfo(
58
- description=_DESCRIPTION,
59
- features=datasets.Features(
60
- {
61
- "id": datasets.Value("string"),
62
- "tokens": datasets.Sequence(datasets.Value("string")),
63
- "pos_tags": datasets.Sequence(
64
- datasets.features.ClassLabel(
65
- names=[
66
- '"',
67
- "''",
68
- "#",
69
- "$",
70
- "(",
71
- ")",
72
- ",",
73
- ".",
74
- ":",
75
- "``",
76
- "CC",
77
- "CD",
78
- "DT",
79
- "EX",
80
- "FW",
81
- "IN",
82
- "JJ",
83
- "JJR",
84
- "JJS",
85
- "LS",
86
- "MD",
87
- "NN",
88
- "NNP",
89
- "NNPS",
90
- "NNS",
91
- "NN|SYM",
92
- "PDT",
93
- "POS",
94
- "PRP",
95
- "PRP$",
96
- "RB",
97
- "RBR",
98
- "RBS",
99
- "RP",
100
- "SYM",
101
- "TO",
102
- "UH",
103
- "VB",
104
- "VBD",
105
- "VBG",
106
- "VBN",
107
- "VBP",
108
- "VBZ",
109
- "WDT",
110
- "WP",
111
- "WP$",
112
- "WRB",
113
- ]
114
- )
115
- ),
116
- "chunk_tags": datasets.Sequence(
117
- datasets.features.ClassLabel(
118
- names=[
119
- "O",
120
- "B-ADJP",
121
- "I-ADJP",
122
- "B-ADVP",
123
- "I-ADVP",
124
- "B-CONJP",
125
- "I-CONJP",
126
- "B-INTJ",
127
- "I-INTJ",
128
- "B-LST",
129
- "I-LST",
130
- "B-NP",
131
- "I-NP",
132
- "B-PP",
133
- "I-PP",
134
- "B-PRT",
135
- "I-PRT",
136
- "B-SBAR",
137
- "I-SBAR",
138
- "B-UCP",
139
- "I-UCP",
140
- "B-VP",
141
- "I-VP",
142
- ]
143
- )
144
- ),
145
- "ner_tags": datasets.Sequence(
146
- datasets.features.ClassLabel(
147
- names=[
148
- "PER",
149
- "ORG",
150
- "LOC",
151
- "MEDIA",
152
- "GEOPOLIT",
153
- ]
154
- )
155
- ),
156
- }
157
- ),
158
- supervised_keys=None,
159
- homepage="https://www.aclweb.org/anthology/W03-0419/",
160
- citation=_CITATION,
161
- )
162
-
163
- def _split_generators(self, dl_manager):
164
- """Returns SplitGenerators."""
165
- downloaded_file = dl_manager.download_and_extract(_URL)
166
- data_files = {
167
- "train": os.path.join(downloaded_file, _TRAINING_FILE),
168
- "dev": os.path.join(downloaded_file, _DEV_FILE),
169
- "test": os.path.join(downloaded_file, _TEST_FILE),
170
- }
171
-
172
- return [
173
- datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": data_files["train"]}),
174
- datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": data_files["dev"]}),
175
- datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": data_files["test"]}),
176
- ]
177
-
178
- def _generate_examples(self, filepath):
179
- with open(filepath, encoding="utf-8") as f:
180
- guid = 0
181
- tokens = []
182
- pos_tags = []
183
- chunk_tags = []
184
- ner_tags = []
185
- for line in f:
186
- if line.startswith("-DOCSTART-") or line == "" or line == "\n":
187
- if tokens:
188
- yield guid, {
189
- "id": str(guid),
190
- "tokens": tokens,
191
- "pos_tags": pos_tags,
192
- "chunk_tags": chunk_tags,
193
- "ner_tags": ner_tags,
194
- }
195
- guid += 1
196
- tokens = []
197
- pos_tags = []
198
- chunk_tags = []
199
- ner_tags = []
200
- else:
201
- # conll2003 tokens are space separated
202
- splits = line.split(" ")
203
- tokens.append(splits[0])
204
- ner_tags.append(splits[1].rstrip())
205
- # last example
206
- if tokens:
207
- yield guid, {
208
- "id": str(guid),
209
- "tokens": tokens,
210
- "pos_tags": pos_tags,
211
- "chunk_tags": chunk_tags,
212
- "ner_tags": ner_tags,
213
- }