Datasets:
Tasks:
Token Classification
Languages:
Persian
File size: 2,596 Bytes
a900bc7 7f781bc a900bc7 7f781bc a900bc7 7f781bc a900bc7 d64a6c2 a900bc7 7f781bc a900bc7 7f781bc a900bc7 7f781bc a900bc7 7f781bc d64a6c2 a900bc7 7f781bc d64a6c2 7f781bc a900bc7 d64a6c2 a900bc7 7f781bc a900bc7 7f781bc a900bc7 7f781bc a900bc7 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 |
import csv
from ast import literal_eval
import datasets
logger = datasets.logging.get_logger(__name__)
_CITATION = """"""
_DESCRIPTION = """"""
_DOWNLOAD_URLS = {
"train": "https://huggingface.co/datasets/mahdiyehebrahimi/nerutc/raw/main/nerutc_train.csv",
"test": "https://huggingface.co/datasets/mahdiyehebrahimi/nerutc/raw/main/nerutc_test.csv",
}
class ParsTwiNERConfig(datasets.BuilderConfig):
def __init__(self, **kwargs):
super(ParsTwiNERConfig, self).__init__(**kwargs)
class ParsTwiNER(datasets.GeneratorBasedBuilder):
BUILDER_CONFIGS = [
ParsTwiNERConfig(
name="nerutc",
version=datasets.Version("1.1.1"),
description=_DESCRIPTION,
),
]
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"tokens": datasets.Sequence(datasets.Value("string")),
"ner_tags": datasets.Sequence(
datasets.features.ClassLabel(
names=[
"O",
"B-UNI",
"I-UNI",
]
)
),
}
),
homepage="https://huggingface.co/datasets/mahdiyehebrahimi/nerutc",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""
Return SplitGenerators.
"""
train_path = dl_manager.download_and_extract(_DOWNLOAD_URLS["train"])
test_path = dl_manager.download_and_extract(_DOWNLOAD_URLS["test"])
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN, gen_kwargs={"filepath": train_path}
),
datasets.SplitGenerator(
name=datasets.Split.TEST, gen_kwargs={"filepath": test_path}
),
]
def _generate_examples(self, filepath):
logger.info("⏳ Generating examples from = %s", filepath)
with open(filepath, encoding="utf-8") as csv_file:
csv_reader = csv.reader(csv_file, quotechar='"', skipinitialspace=True)
next(csv_reader, None)
for id_, row in enumerate(csv_reader):
tokens, ner_tags = row
# Optional preprocessing here
tokens = literal_eval(tokens)
ner_tags = literal_eval(ner_tags)
yield id_, {"tokens": tokens, "ner_tags": ner_tags} |