holylovenia
commited on
Commit
•
293ed7f
1
Parent(s):
6c46de9
Upload korpus_nusantara.py with huggingface_hub
Browse files- korpus_nusantara.py +17 -17
korpus_nusantara.py
CHANGED
@@ -5,13 +5,13 @@ import re
|
|
5 |
import datasets
|
6 |
import pandas as pd
|
7 |
|
8 |
-
from
|
9 |
-
from
|
10 |
-
from
|
11 |
|
12 |
_DATASETNAME = "korpus_nusantara"
|
13 |
_SOURCE_VIEW_NAME = DEFAULT_SOURCE_VIEW_NAME
|
14 |
-
_UNIFIED_VIEW_NAME =
|
15 |
|
16 |
_LANGUAGES = ["ind", "jav", "xdy", "bug", "sun", "mad", "bjn", "bbc", "khek", "msa", "min", "tiociu"] # We follow ISO639-3 language code (https://iso639-3.sil.org/code_tables/639/data)
|
17 |
_LOCAL = False
|
@@ -50,7 +50,7 @@ _URLS = {
|
|
50 |
_SUPPORTED_TASKS = [Tasks.MACHINE_TRANSLATION]
|
51 |
|
52 |
_SOURCE_VERSION = "1.0.0"
|
53 |
-
|
54 |
|
55 |
|
56 |
"""
|
@@ -93,7 +93,7 @@ class KorpusNusantara(datasets.GeneratorBasedBuilder):
|
|
93 |
"""Bible En-Id is a machine translation dataset containing Indonesian-English parallel sentences collected from the bible.."""
|
94 |
|
95 |
BUILDER_CONFIGS = [
|
96 |
-
|
97 |
name=f"korpus_nusantara_ind_{subset}_source",
|
98 |
version=datasets.Version(_SOURCE_VERSION),
|
99 |
description=f"Korpus_Nusantara ind2{subset} source schema",
|
@@ -103,17 +103,17 @@ class KorpusNusantara(datasets.GeneratorBasedBuilder):
|
|
103 |
for subset in _LANGUAGES[1:]
|
104 |
] + \
|
105 |
[
|
106 |
-
|
107 |
-
name=f"korpus_nusantara_ind_{subset}
|
108 |
-
version=datasets.Version(
|
109 |
description=f"Korpus_Nusantara ind2{subset} Nusantara schema",
|
110 |
-
schema="
|
111 |
subset_id=f"korpus_nusantara",
|
112 |
)
|
113 |
for subset in _LANGUAGES[1:]
|
114 |
] + \
|
115 |
[
|
116 |
-
|
117 |
name=f"korpus_nusantara_{subset}_ind_source",
|
118 |
version=datasets.Version(_SOURCE_VERSION),
|
119 |
description=f"Korpus_Nusantara {subset}2ind source schema",
|
@@ -123,11 +123,11 @@ class KorpusNusantara(datasets.GeneratorBasedBuilder):
|
|
123 |
for subset in _LANGUAGES[1:]
|
124 |
] + \
|
125 |
[
|
126 |
-
|
127 |
-
name=f"korpus_nusantara_{subset}
|
128 |
-
version=datasets.Version(
|
129 |
description=f"Korpus_Nusantara {subset}2ind Nusantara schema",
|
130 |
-
schema="
|
131 |
subset_id=f"korpus_nusantara",
|
132 |
)
|
133 |
for subset in _LANGUAGES[1:]
|
@@ -138,7 +138,7 @@ class KorpusNusantara(datasets.GeneratorBasedBuilder):
|
|
138 |
def _info(self):
|
139 |
if self.config.schema == "source":
|
140 |
features = datasets.Features({"id": datasets.Value("string"), "text": datasets.Value("string"), "label": datasets.Value("string")})
|
141 |
-
elif self.config.schema == "
|
142 |
features = schemas.text2text_features
|
143 |
|
144 |
return datasets.DatasetInfo(
|
@@ -203,7 +203,7 @@ class KorpusNusantara(datasets.GeneratorBasedBuilder):
|
|
203 |
}
|
204 |
yield idx, ex
|
205 |
|
206 |
-
elif self.config.schema == "
|
207 |
for idx, row in enumerate(df.itertuples()):
|
208 |
ex = {
|
209 |
"id": str(idx),
|
|
|
5 |
import datasets
|
6 |
import pandas as pd
|
7 |
|
8 |
+
from seacrowd.utils import schemas
|
9 |
+
from seacrowd.utils.configs import SEACrowdConfig
|
10 |
+
from seacrowd.utils.constants import Tasks, DEFAULT_SOURCE_VIEW_NAME, DEFAULT_SEACROWD_VIEW_NAME
|
11 |
|
12 |
_DATASETNAME = "korpus_nusantara"
|
13 |
_SOURCE_VIEW_NAME = DEFAULT_SOURCE_VIEW_NAME
|
14 |
+
_UNIFIED_VIEW_NAME = DEFAULT_SEACROWD_VIEW_NAME
|
15 |
|
16 |
_LANGUAGES = ["ind", "jav", "xdy", "bug", "sun", "mad", "bjn", "bbc", "khek", "msa", "min", "tiociu"] # We follow ISO639-3 language code (https://iso639-3.sil.org/code_tables/639/data)
|
17 |
_LOCAL = False
|
|
|
50 |
_SUPPORTED_TASKS = [Tasks.MACHINE_TRANSLATION]
|
51 |
|
52 |
_SOURCE_VERSION = "1.0.0"
|
53 |
+
_SEACROWD_VERSION = "2024.06.20"
|
54 |
|
55 |
|
56 |
"""
|
|
|
93 |
"""Bible En-Id is a machine translation dataset containing Indonesian-English parallel sentences collected from the bible.."""
|
94 |
|
95 |
BUILDER_CONFIGS = [
|
96 |
+
SEACrowdConfig(
|
97 |
name=f"korpus_nusantara_ind_{subset}_source",
|
98 |
version=datasets.Version(_SOURCE_VERSION),
|
99 |
description=f"Korpus_Nusantara ind2{subset} source schema",
|
|
|
103 |
for subset in _LANGUAGES[1:]
|
104 |
] + \
|
105 |
[
|
106 |
+
SEACrowdConfig(
|
107 |
+
name=f"korpus_nusantara_ind_{subset}_seacrowd_t2t",
|
108 |
+
version=datasets.Version(_SEACROWD_VERSION),
|
109 |
description=f"Korpus_Nusantara ind2{subset} Nusantara schema",
|
110 |
+
schema="seacrowd_t2t",
|
111 |
subset_id=f"korpus_nusantara",
|
112 |
)
|
113 |
for subset in _LANGUAGES[1:]
|
114 |
] + \
|
115 |
[
|
116 |
+
SEACrowdConfig(
|
117 |
name=f"korpus_nusantara_{subset}_ind_source",
|
118 |
version=datasets.Version(_SOURCE_VERSION),
|
119 |
description=f"Korpus_Nusantara {subset}2ind source schema",
|
|
|
123 |
for subset in _LANGUAGES[1:]
|
124 |
] + \
|
125 |
[
|
126 |
+
SEACrowdConfig(
|
127 |
+
name=f"korpus_nusantara_{subset}_ind_seacrowd_t2t",
|
128 |
+
version=datasets.Version(_SEACROWD_VERSION),
|
129 |
description=f"Korpus_Nusantara {subset}2ind Nusantara schema",
|
130 |
+
schema="seacrowd_t2t",
|
131 |
subset_id=f"korpus_nusantara",
|
132 |
)
|
133 |
for subset in _LANGUAGES[1:]
|
|
|
138 |
def _info(self):
|
139 |
if self.config.schema == "source":
|
140 |
features = datasets.Features({"id": datasets.Value("string"), "text": datasets.Value("string"), "label": datasets.Value("string")})
|
141 |
+
elif self.config.schema == "seacrowd_t2t":
|
142 |
features = schemas.text2text_features
|
143 |
|
144 |
return datasets.DatasetInfo(
|
|
|
203 |
}
|
204 |
yield idx, ex
|
205 |
|
206 |
+
elif self.config.schema == "seacrowd_t2t":
|
207 |
for idx, row in enumerate(df.itertuples()):
|
208 |
ex = {
|
209 |
"id": str(idx),
|