Datasets:
Tasks:
Token Classification
Modalities:
Text
Sub-tasks:
coreference-resolution
Languages:
English
Size:
< 1K
License:
File size: 5,126 Bytes
056956d 18e2798 2b83143 056956d 9e8f717 056956d 4341e1d 2b7e78e 056956d 37db430 18e2798 3bfa95b 9c1dc3e 151dd98 d66638a 151dd98 37db430 ce5868a 056956d f143e84 056956d 02347ac 4a4522a 3bfa95b 4a4522a b8b9455 151dd98 d66638a 151dd98 b8b9455 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 |
"""SciCo"""
import os
from datasets.arrow_dataset import DatasetTransformationNotAllowedError
from datasets.utils import metadata
import jsonlines
import datasets
_CITATION = """\
@inproceedings{
cattan2021scico,
title={SciCo: Hierarchical Cross-Document Coreference for Scientific Concepts},
author={Arie Cattan and Sophie Johnson and Daniel S. Weld and Ido Dagan and Iz Beltagy and Doug Downey and Tom Hope},
booktitle={3rd Conference on Automated Knowledge Base Construction},
year={2021},
url={https://openreview.net/forum?id=OFLbgUP04nC}
}
"""
_DESCRIPTION = """\
SciCo is a dataset for hierarchical cross-document coreference resolution
over scientific papers in the CS domain.
"""
_DATA_URL = "https://nlp.biu.ac.il/~ariecattan/scico/data.tar"
class Scico(datasets.GeneratorBasedBuilder):
# BUILDER_CONFIGS = [
# datasets.BuilderConfig(
# name="plain_text",
# version=datasets.Version("1.0.0", ""),
# description="SciCo",
# )
# ]
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
homepage="https://scico.apps.allenai.org/",
features=datasets.Features(
{
"flatten_tokens": datasets.features.Sequence(datasets.features.Value("string")),
"flatten_mentions": datasets.features.Sequence(datasets.features.Sequence(datasets.features.Value("int32"), length=3)),
"tokens": datasets.features.Sequence(datasets.features.Sequence(datasets.features.Value("string"))),
"doc_ids": datasets.features.Sequence(datasets.features.Value("int32")),
# "metadata": datasets.features.Sequence(
# {
# "title": datasets.features.Value("string"),
# "paper_sha": datasets.features.Value("string"),
# "fields_of_study": datasets.features.Value("string"),
# "Year": datasets.features.Value("string"),
# "BookTitle": datasets.features.Value("string"),
# "url": datasets.features.Value("string")
# }
# )
"sentences": datasets.features.Sequence(datasets.features.Sequence(datasets.features.Sequence(datasets.features.Value("int32")))),
"mentions": datasets.features.Sequence(datasets.features.Sequence(datasets.features.Value("int32"), length=4)),
"relations": datasets.features.Sequence(datasets.features.Sequence(datasets.features.Value("int32"), length=2)),
"id": datasets.Value("int32"),
"source": datasets.Value("string"),
"hard_10": datasets.features.Value("bool"),
"hard_20": datasets.features.Value("bool"),
"curated": datasets.features.Value("bool")
}
),
supervised_keys=None,
citation = _CITATION)
def _split_generators(self, dl_manager):
data_dir = dl_manager.download_and_extract(_DATA_URL)
return [
datasets.SplitGenerator(
name=datasets.Split.TEST, gen_kwargs={"filepath": os.path.join(data_dir, "test.jsonl")}
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION, gen_kwargs={"filepath": os.path.join(data_dir, "dev.jsonl")}
),
datasets.SplitGenerator(
name=datasets.Split.TRAIN, gen_kwargs={"filepath": os.path.join(data_dir, "train.jsonl")}
),
]
def _generate_examples(self, filepath):
"""This function returns the examples in the raw (text) form."""
print(filepath)
with jsonlines.open(filepath, 'r') as f:
for i, topic in enumerate(f):
topic['hard_10'] = topic['hard_10'] if 'hard_10' in topic else False
topic['hard_20'] = topic['hard_20'] if 'hard_20' in topic else False
topic["curated"] = topic["curated"] if "curated" in topic else False
yield i, {
"flatten_tokens": topic['flatten_tokens'],
"flatten_mentions": topic["flatten_mentions"],
"tokens": topic["tokens"],
"doc_ids": topic["doc_ids"],
"doc_ids": topic["doc_ids"],
# "metadata": topic["metadata"]
"sentences": topic["sentences"],
"mentions": topic["mentions"],
"relations": topic["relations"],
"id": topic["id"],
"source": topic["source"],
"hard_10": topic["hard_10"],
"hard_20": topic["hard_20"],
"curated": topic["curated"]
}
|