Datasets:
Tasks:
Token Classification
Sub-tasks:
named-entity-recognition
Languages:
Hindi
Size:
100K<n<1M
ArXiv:
License:
File size: 4,771 Bytes
f0480f1 74b1501 f0480f1 6f1ae4f f0480f1 6f1ae4f f0480f1 74b1501 f0480f1 a902ef3 f0480f1 0c6605d f0480f1 a902ef3 6cf9411 f0480f1 0c6605d 74b1501 0c6605d |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 |
import os
import datasets
from typing import List
import json
logger = datasets.logging.get_logger(__name__)
_CITATION = """
XX
"""
_DESCRIPTION = """
This is the repository for HiNER - a large Hindi Named Entity Recognition dataset.
"""
class HiNERCollapsedConfig(datasets.BuilderConfig):
"""BuilderConfig for Conll2003"""
def __init__(self, **kwargs):
"""BuilderConfig forConll2003.
Args:
**kwargs: keyword arguments forwarded to super.
"""
super(HiNERCollapsedConfig, self).__init__(**kwargs)
class HiNERCollapsedConfig(datasets.GeneratorBasedBuilder):
"""HiNER Collapsed dataset."""
BUILDER_CONFIGS = [
HiNERCollapsedConfig(name="HiNER-Collapsed", version=datasets.Version("0.0.2"), description="Hindi Named Entity Recognition Dataset"),
]
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"id": datasets.Value("string"),
"tokens": datasets.Sequence(datasets.Value("string")),
"ner_tags": datasets.Sequence(
datasets.features.ClassLabel(
names=[
"O",
"B-PERSON",
"I-PERSON",
"B-LOCATION",
"I-LOCATION",
"B-ORGANIZATION",
"I-ORGANIZATION"
]
)
),
}
),
supervised_keys=None,
homepage="YY",
citation=_CITATION,
)
_URL = "https://huggingface.co/datasets/cfilt/HiNER-collapsed/resolve/main/data/"
_URLS = {
"train": _URL + "train.json",
"validation": _URL + "validation.json",
"test": _URL + "test.json"
}
def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
urls_to_download = self._URLS
downloaded_files = dl_manager.download_and_extract(urls_to_download)
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}),
datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["validation"]}),
datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": downloaded_files["test"]})
]
def _generate_examples(self, filepath):
"""This function returns the examples in the raw (text) form."""
logger.info("⏳ Generating examples from = %s", filepath)
with open(filepath) as f:
data = json.load(f)
for object in data:
id_ = int(object['id'])
yield id_, {
"id": str(id_),
"tokens": object['tokens'],
#"pos_tags": object['pos_tags'],
"ner_tags": object['ner_tags'],
}
# def _generate_examples(self, filepath):
# logger.info("⏳ Generating examples from = %s", filepath)
# with open(filepath, encoding="utf-8") as f:
# guid = 0
# tokens = []
# # pos_tags = []
# # chunk_tags = []
# ner_tags = []
# for line in f:
# if line.startswith("-DOCSTART-") or line == "" or line == "\n":
# if tokens:
# yield guid, {
# "id": str(guid),
# "tokens": tokens,
# # "pos_tags": pos_tags,
# # "chunk_tags": chunk_tags,
# "ner_tags": ner_tags,
# }
# guid += 1
# tokens = []
# # pos_tags = []
# # chunk_tags = []
# ner_tags = []
# else:
# # conll2003 tokens are space separated
# splits = line.split("\t")
# tokens.append(splits[0].strip())
# # pos_tags.append(splits[1])
# # chunk_tags.append(splits[2])
# ner_tags.append(splits[1].rstrip())
# # last example
# yield guid, {
# "id": str(guid),
# "tokens": tokens,
# # "pos_tags": pos_tags,
# # "chunk_tags": chunk_tags,
# "ner_tags": ner_tags,
# }
|