holylovenia
commited on
Commit
•
066ce3e
1
Parent(s):
8911bdc
Upload wikiann.py with huggingface_hub
Browse files- wikiann.py +23 -23
wikiann.py
CHANGED
@@ -4,16 +4,13 @@ from typing import List
|
|
4 |
import datasets
|
5 |
from datasets import NamedSplit
|
6 |
|
7 |
-
from
|
8 |
-
from
|
9 |
-
from
|
10 |
-
DEFAULT_SOURCE_VIEW_NAME, Tasks)
|
11 |
|
12 |
_DATASETNAME = "wikiann"
|
13 |
-
_SOURCE_VIEW_NAME = DEFAULT_SOURCE_VIEW_NAME
|
14 |
-
_UNIFIED_VIEW_NAME = DEFAULT_NUSANTARA_VIEW_NAME
|
15 |
|
16 |
-
_LANGUAGES = ["ind", "
|
17 |
_LOCAL = False
|
18 |
_CITATION = """\
|
19 |
@inproceedings{pan-etal-2017-cross,
|
@@ -71,34 +68,37 @@ Javanese jv jav
|
|
71 |
Minangkabau min min
|
72 |
Sundanese su sun
|
73 |
Acehnese ace ace
|
74 |
-
Malay ms
|
75 |
Banyumasan map-bms map-bms
|
|
|
|
|
|
|
|
|
|
|
76 |
|
77 |
|
78 |
"""
|
79 |
|
80 |
_HOMEPAGE = "https://github.com/afshinrahimi/mmner"
|
81 |
|
82 |
-
_LICENSE =
|
83 |
|
84 |
-
|
85 |
-
"wikiann": "https://s3.amazonaws.com/datasets.huggingface.co/wikiann/1.1.0/panx_dataset.zip",
|
86 |
-
}
|
87 |
|
88 |
_SUPPORTED_TASKS = [Tasks.NAMED_ENTITY_RECOGNITION]
|
89 |
|
90 |
_SOURCE_VERSION = "1.1.0"
|
91 |
-
|
92 |
|
93 |
|
94 |
-
def
|
95 |
if lang == "":
|
96 |
raise ValueError(f"Invalid lang {lang}")
|
97 |
|
98 |
-
if schema != "source" and schema != "
|
99 |
raise ValueError(f"Invalid schema: {schema}")
|
100 |
|
101 |
-
return
|
102 |
name="wikiann_{lang}_{schema}".format(lang=lang, schema=schema),
|
103 |
version=datasets.Version(version),
|
104 |
description="wikiann with {schema} schema for {lang} language".format(lang=lang, schema=schema),
|
@@ -107,8 +107,8 @@ def nusantara_config_constructor(lang, schema, version):
|
|
107 |
)
|
108 |
|
109 |
|
110 |
-
LANGUAGES_MAP = {"
|
111 |
-
LANG_CODES = {"
|
112 |
|
113 |
|
114 |
class WikiAnnDataset(datasets.GeneratorBasedBuilder):
|
@@ -117,14 +117,14 @@ class WikiAnnDataset(datasets.GeneratorBasedBuilder):
|
|
117 |
|
118 |
label_classes = ["B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC", "O"]
|
119 |
|
120 |
-
BUILDER_CONFIGS = [
|
121 |
|
122 |
DEFAULT_CONFIG_NAME = "wikiann_ind_source"
|
123 |
|
124 |
def _info(self):
|
125 |
if self.config.schema == "source":
|
126 |
features = datasets.Features({"index": datasets.Value("string"), "tokens": [datasets.Value("string")], "ner_tag": [datasets.Value("string")]})
|
127 |
-
elif self.config.schema == "
|
128 |
features = schemas.seq_label_features(self.label_classes)
|
129 |
|
130 |
return datasets.DatasetInfo(
|
@@ -136,10 +136,10 @@ class WikiAnnDataset(datasets.GeneratorBasedBuilder):
|
|
136 |
)
|
137 |
|
138 |
def get_lang(self, name):
|
139 |
-
return name.removesuffix("_source").removesuffix("
|
140 |
|
141 |
def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
|
142 |
-
path = Path(dl_manager.download_and_extract(
|
143 |
lang = LANG_CODES[self.get_lang(self.config.name)]
|
144 |
wikiann_dl_dir = path / f"{lang}.tar.gz"
|
145 |
return [
|
@@ -176,7 +176,7 @@ class WikiAnnDataset(datasets.GeneratorBasedBuilder):
|
|
176 |
if tokens:
|
177 |
if self.config.schema == "source":
|
178 |
yield guid_index, {"index": str(guid_index), "tokens": tokens, "ner_tag": ner_tags}
|
179 |
-
elif self.config.schema == "
|
180 |
yield guid_index, {"id": str(guid_index), "tokens": tokens, "labels": ner_tags}
|
181 |
else:
|
182 |
raise ValueError(f"Invalid config: {self.config.name}")
|
|
|
4 |
import datasets
|
5 |
from datasets import NamedSplit
|
6 |
|
7 |
+
from seacrowd.utils import schemas
|
8 |
+
from seacrowd.utils.configs import SEACrowdConfig
|
9 |
+
from seacrowd.utils.constants import Tasks, Licenses
|
|
|
10 |
|
11 |
_DATASETNAME = "wikiann"
|
|
|
|
|
12 |
|
13 |
+
_LANGUAGES = ["ind", "jav", "min", "sun", "ace", "zlm", "map-bms", "mya", "tgl", "tha", "vie", "khm"]
|
14 |
_LOCAL = False
|
15 |
_CITATION = """\
|
16 |
@inproceedings{pan-etal-2017-cross,
|
|
|
68 |
Minangkabau min min
|
69 |
Sundanese su sun
|
70 |
Acehnese ace ace
|
71 |
+
Malay ms zlm
|
72 |
Banyumasan map-bms map-bms
|
73 |
+
Myanmar my mya
|
74 |
+
Tagalog tl tgl
|
75 |
+
Thailand th tha
|
76 |
+
Vietnam vi vie
|
77 |
+
Khmer km khm
|
78 |
|
79 |
|
80 |
"""
|
81 |
|
82 |
_HOMEPAGE = "https://github.com/afshinrahimi/mmner"
|
83 |
|
84 |
+
_LICENSE = Licenses.APACHE_2_0.value
|
85 |
|
86 |
+
_URL = "https://s3.amazonaws.com/datasets.huggingface.co/wikiann/1.1.0/panx_dataset.zip"
|
|
|
|
|
87 |
|
88 |
_SUPPORTED_TASKS = [Tasks.NAMED_ENTITY_RECOGNITION]
|
89 |
|
90 |
_SOURCE_VERSION = "1.1.0"
|
91 |
+
_SEACROWD_VERSION = "2024.06.20"
|
92 |
|
93 |
|
94 |
+
def seacrowd_config_constructor(lang, schema, version):
|
95 |
if lang == "":
|
96 |
raise ValueError(f"Invalid lang {lang}")
|
97 |
|
98 |
+
if schema != "source" and schema != "seacrowd_seq_label":
|
99 |
raise ValueError(f"Invalid schema: {schema}")
|
100 |
|
101 |
+
return SEACrowdConfig(
|
102 |
name="wikiann_{lang}_{schema}".format(lang=lang, schema=schema),
|
103 |
version=datasets.Version(version),
|
104 |
description="wikiann with {schema} schema for {lang} language".format(lang=lang, schema=schema),
|
|
|
107 |
)
|
108 |
|
109 |
|
110 |
+
LANGUAGES_MAP = {"ind": "indonesian", "jav": "javanese", "min": "minangkabau", "sun": "sundanese", "ace": "acehnese", "zlm": "malay", "map_bms": "banyumasan", "mya": "myanmar", "tgl": "tagalog", "tha": "thailand", "vie": "vietnam", "khm": "khmer"}
|
111 |
+
LANG_CODES = {"ind": "id", "jav": "jv", "min": "min", "sun": "su", "ace": "ace", "zlm": "ms", "map_bms": "map-bms", "mya": "my", "tgl": "tl", "tha": "th","vie": "vi","khm": "km"}
|
112 |
|
113 |
|
114 |
class WikiAnnDataset(datasets.GeneratorBasedBuilder):
|
|
|
117 |
|
118 |
label_classes = ["B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC", "O"]
|
119 |
|
120 |
+
BUILDER_CONFIGS = [seacrowd_config_constructor(lang, "source", _SOURCE_VERSION) for lang in LANGUAGES_MAP] + [seacrowd_config_constructor(lang, "seacrowd_seq_label", _SEACROWD_VERSION) for lang in LANGUAGES_MAP]
|
121 |
|
122 |
DEFAULT_CONFIG_NAME = "wikiann_ind_source"
|
123 |
|
124 |
def _info(self):
|
125 |
if self.config.schema == "source":
|
126 |
features = datasets.Features({"index": datasets.Value("string"), "tokens": [datasets.Value("string")], "ner_tag": [datasets.Value("string")]})
|
127 |
+
elif self.config.schema == "seacrowd_seq_label":
|
128 |
features = schemas.seq_label_features(self.label_classes)
|
129 |
|
130 |
return datasets.DatasetInfo(
|
|
|
136 |
)
|
137 |
|
138 |
def get_lang(self, name):
|
139 |
+
return name.removesuffix("_source").removesuffix("_seacrowd_seq_label").removeprefix("wikiann_")
|
140 |
|
141 |
def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
|
142 |
+
path = Path(dl_manager.download_and_extract(_URL))
|
143 |
lang = LANG_CODES[self.get_lang(self.config.name)]
|
144 |
wikiann_dl_dir = path / f"{lang}.tar.gz"
|
145 |
return [
|
|
|
176 |
if tokens:
|
177 |
if self.config.schema == "source":
|
178 |
yield guid_index, {"index": str(guid_index), "tokens": tokens, "ner_tag": ner_tags}
|
179 |
+
elif self.config.schema == "seacrowd_seq_label":
|
180 |
yield guid_index, {"id": str(guid_index), "tokens": tokens, "labels": ner_tags}
|
181 |
else:
|
182 |
raise ValueError(f"Invalid config: {self.config.name}")
|