Datasets:
Tasks:
Token Classification
Sub-tasks:
named-entity-recognition
Languages:
Hindi
Size:
100K<n<1M
ArXiv:
License:
dipteshkanojia
commited on
Commit
•
74b1501
1
Parent(s):
61c6045
cahnges
Browse files- HiNER-collapsed.py +55 -16
- data/test.conll +3 -0
- data/test_clean.conll +3 -0
- data/train.conll +3 -0
- data/train_clean.conll +3 -0
- data/validation.conll +3 -0
- data/validation_clean.conll +3 -0
HiNER-collapsed.py
CHANGED
@@ -8,6 +8,7 @@ logger = datasets.logging.get_logger(__name__)
|
|
8 |
|
9 |
|
10 |
_CITATION = """
|
|
|
11 |
"""
|
12 |
|
13 |
_DESCRIPTION = """
|
@@ -55,15 +56,15 @@ class HiNERCollapsedConfig(datasets.GeneratorBasedBuilder):
|
|
55 |
}
|
56 |
),
|
57 |
supervised_keys=None,
|
58 |
-
homepage="",
|
59 |
citation=_CITATION,
|
60 |
)
|
61 |
|
62 |
_URL = "https://huggingface.co/datasets/cfilt/HiNER-collapsed/raw/main/data/"
|
63 |
_URLS = {
|
64 |
-
"train": _URL + "train_clean.
|
65 |
-
"validation": _URL + "validation_clean.
|
66 |
-
"test": _URL + "test_clean.
|
67 |
}
|
68 |
|
69 |
def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
|
@@ -76,16 +77,54 @@ class HiNERCollapsedConfig(datasets.GeneratorBasedBuilder):
|
|
76 |
datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": downloaded_files["test"]})
|
77 |
]
|
78 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
79 |
def _generate_examples(self, filepath):
|
80 |
-
"
|
81 |
-
|
82 |
-
|
83 |
-
|
84 |
-
|
85 |
-
|
86 |
-
|
87 |
-
|
88 |
-
|
89 |
-
|
90 |
-
|
91 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
8 |
|
9 |
|
10 |
_CITATION = """
|
11 |
+
XX
|
12 |
"""
|
13 |
|
14 |
_DESCRIPTION = """
|
|
|
56 |
}
|
57 |
),
|
58 |
supervised_keys=None,
|
59 |
+
homepage="YY",
|
60 |
citation=_CITATION,
|
61 |
)
|
62 |
|
63 |
_URL = "https://huggingface.co/datasets/cfilt/HiNER-collapsed/raw/main/data/"
|
64 |
_URLS = {
|
65 |
+
"train": _URL + "train_clean.conll",
|
66 |
+
"validation": _URL + "validation_clean.conll",
|
67 |
+
"test": _URL + "test_clean.conll"
|
68 |
}
|
69 |
|
70 |
def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
|
|
|
77 |
datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": downloaded_files["test"]})
|
78 |
]
|
79 |
|
80 |
+
# def _generate_examples(self, filepath):
|
81 |
+
# """This function returns the examples in the raw (text) form."""
|
82 |
+
# logger.info("generating examples from = %s", filepath)
|
83 |
+
# with open(filepath) as f:
|
84 |
+
# data = json.load(f)
|
85 |
+
# for object in data:
|
86 |
+
# id_ = int(object['id'])
|
87 |
+
# yield id_, {
|
88 |
+
# "id": str(id_),
|
89 |
+
# "tokens": object['tokens'],
|
90 |
+
# #"pos_tags": object['pos_tags'],
|
91 |
+
# "ner_tags": object['ner_tags'],
|
92 |
+
# }
|
93 |
def _generate_examples(self, filepath):
|
94 |
+
logger.info("⏳ Generating examples from = %s", filepath)
|
95 |
+
with open(filepath, encoding="utf-8") as f:
|
96 |
+
guid = 0
|
97 |
+
tokens = []
|
98 |
+
# pos_tags = []
|
99 |
+
# chunk_tags = []
|
100 |
+
ner_tags = []
|
101 |
+
for line in f:
|
102 |
+
if line.startswith("-DOCSTART-") or line == "" or line == "\n":
|
103 |
+
if tokens:
|
104 |
+
yield guid, {
|
105 |
+
"id": str(guid),
|
106 |
+
"tokens": tokens,
|
107 |
+
# "pos_tags": pos_tags,
|
108 |
+
# "chunk_tags": chunk_tags,
|
109 |
+
"ner_tags": ner_tags,
|
110 |
+
}
|
111 |
+
guid += 1
|
112 |
+
tokens = []
|
113 |
+
# pos_tags = []
|
114 |
+
# chunk_tags = []
|
115 |
+
ner_tags = []
|
116 |
+
else:
|
117 |
+
# conll2003 tokens are space separated
|
118 |
+
splits = line.split("\t")
|
119 |
+
tokens.append(splits[0].strip())
|
120 |
+
# pos_tags.append(splits[1])
|
121 |
+
# chunk_tags.append(splits[2])
|
122 |
+
ner_tags.append(splits[1].strip())
|
123 |
+
# last example
|
124 |
+
yield guid, {
|
125 |
+
"id": str(guid),
|
126 |
+
"tokens": tokens,
|
127 |
+
# "pos_tags": pos_tags,
|
128 |
+
# "chunk_tags": chunk_tags,
|
129 |
+
"ner_tags": ner_tags,
|
130 |
+
}
|
data/test.conll
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:9032a2af825659f30e7c0fb8696b259a8a353c6ec535b5947e8c9c80332bb3e0
|
3 |
+
size 8537851
|
data/test_clean.conll
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:9032a2af825659f30e7c0fb8696b259a8a353c6ec535b5947e8c9c80332bb3e0
|
3 |
+
size 8537851
|
data/train.conll
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:23975e1e2433c293dba97d163b8995428e9eeb6bba52f9ab057a72c5ac81c5b5
|
3 |
+
size 23631586
|
data/train_clean.conll
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:23975e1e2433c293dba97d163b8995428e9eeb6bba52f9ab057a72c5ac81c5b5
|
3 |
+
size 23631586
|
data/validation.conll
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:25cbf8c6361473786545348bf1a2832747f1d0da1e159c860a5fc130dc6bf4ec
|
3 |
+
size 2911669
|
data/validation_clean.conll
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:25cbf8c6361473786545348bf1a2832747f1d0da1e159c860a5fc130dc6bf4ec
|
3 |
+
size 2911669
|