Datasets:
Tasks:
Token Classification
Sub-tasks:
named-entity-recognition
Languages:
Hindi
Size:
100K<n<1M
ArXiv:
License:
dipteshkanojia
commited on
Commit
•
0c6605d
1
Parent(s):
a902ef3
changes
Browse files
HiNER-collapsed.py
CHANGED
@@ -62,9 +62,9 @@ class HiNERCollapsedConfig(datasets.GeneratorBasedBuilder):
|
|
62 |
|
63 |
_URL = "https://huggingface.co/datasets/cfilt/HiNER-collapsed/resolve/main/data/"
|
64 |
_URLS = {
|
65 |
-
"train": _URL + "
|
66 |
-
"validation": _URL + "
|
67 |
-
"test": _URL + "
|
68 |
}
|
69 |
|
70 |
def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
|
@@ -77,55 +77,54 @@ class HiNERCollapsedConfig(datasets.GeneratorBasedBuilder):
|
|
77 |
datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": downloaded_files["test"]})
|
78 |
]
|
79 |
|
80 |
-
# def _generate_examples(self, filepath):
|
81 |
-
# """This function returns the examples in the raw (text) form."""
|
82 |
-
# logger.info("generating examples from = %s", filepath)
|
83 |
-
# with open(filepath) as f:
|
84 |
-
# data = json.load(f)
|
85 |
-
# for object in data:
|
86 |
-
# id_ = int(object['id'])
|
87 |
-
# yield id_, {
|
88 |
-
# "id": str(id_),
|
89 |
-
# "tokens": object['tokens'],
|
90 |
-
# #"pos_tags": object['pos_tags'],
|
91 |
-
# "ner_tags": object['ner_tags'],
|
92 |
-
# }
|
93 |
def _generate_examples(self, filepath):
|
|
|
94 |
logger.info("⏳ Generating examples from = %s", filepath)
|
95 |
-
with open(filepath
|
96 |
-
|
97 |
-
|
98 |
-
|
99 |
-
|
100 |
-
|
101 |
-
|
102 |
-
|
103 |
-
|
104 |
-
|
105 |
-
|
106 |
-
|
107 |
-
|
108 |
-
|
109 |
-
|
110 |
-
|
111 |
-
|
112 |
-
|
113 |
-
|
114 |
-
|
115 |
-
|
116 |
-
|
117 |
-
|
118 |
-
|
119 |
-
|
120 |
-
|
121 |
-
|
122 |
-
|
123 |
-
|
124 |
-
|
125 |
-
|
126 |
-
|
127 |
-
|
128 |
-
|
129 |
-
|
130 |
-
|
131 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
62 |
|
63 |
_URL = "https://huggingface.co/datasets/cfilt/HiNER-collapsed/resolve/main/data/"
|
64 |
_URLS = {
|
65 |
+
"train": _URL + "train.json",
|
66 |
+
"validation": _URL + "validation.json",
|
67 |
+
"test": _URL + "test.json"
|
68 |
}
|
69 |
|
70 |
def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
|
|
|
77 |
datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": downloaded_files["test"]})
|
78 |
]
|
79 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
80 |
def _generate_examples(self, filepath):
|
81 |
+
"""This function returns the examples in the raw (text) form."""
|
82 |
logger.info("⏳ Generating examples from = %s", filepath)
|
83 |
+
with open(filepath) as f:
|
84 |
+
data = json.load(f)
|
85 |
+
for object in data:
|
86 |
+
id_ = int(object['id'])
|
87 |
+
yield id_, {
|
88 |
+
"id": str(id_),
|
89 |
+
"tokens": object['tokens'],
|
90 |
+
#"pos_tags": object['pos_tags'],
|
91 |
+
"ner_tags": object['ner_tags'],
|
92 |
+
}
|
93 |
+
# def _generate_examples(self, filepath):
|
94 |
+
# logger.info("⏳ Generating examples from = %s", filepath)
|
95 |
+
# with open(filepath, encoding="utf-8") as f:
|
96 |
+
# guid = 0
|
97 |
+
# tokens = []
|
98 |
+
# # pos_tags = []
|
99 |
+
# # chunk_tags = []
|
100 |
+
# ner_tags = []
|
101 |
+
# for line in f:
|
102 |
+
# if line.startswith("-DOCSTART-") or line == "" or line == "\n":
|
103 |
+
# if tokens:
|
104 |
+
# yield guid, {
|
105 |
+
# "id": str(guid),
|
106 |
+
# "tokens": tokens,
|
107 |
+
# # "pos_tags": pos_tags,
|
108 |
+
# # "chunk_tags": chunk_tags,
|
109 |
+
# "ner_tags": ner_tags,
|
110 |
+
# }
|
111 |
+
# guid += 1
|
112 |
+
# tokens = []
|
113 |
+
# # pos_tags = []
|
114 |
+
# # chunk_tags = []
|
115 |
+
# ner_tags = []
|
116 |
+
# else:
|
117 |
+
# # conll2003 tokens are space separated
|
118 |
+
# splits = line.split("\t")
|
119 |
+
# tokens.append(splits[0].strip())
|
120 |
+
# # pos_tags.append(splits[1])
|
121 |
+
# # chunk_tags.append(splits[2])
|
122 |
+
# ner_tags.append(splits[1].rstrip())
|
123 |
+
# # last example
|
124 |
+
# yield guid, {
|
125 |
+
# "id": str(guid),
|
126 |
+
# "tokens": tokens,
|
127 |
+
# # "pos_tags": pos_tags,
|
128 |
+
# # "chunk_tags": chunk_tags,
|
129 |
+
# "ner_tags": ner_tags,
|
130 |
+
# }
|
data/{train_clean.json → train copy.json}
RENAMED
File without changes
|
data/{validation_clean.json → validation copy.json}
RENAMED
File without changes
|