|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
"""TODO: Add a description here.""" |
|
|
|
|
|
import json |
|
|
|
import datasets |
|
|
|
_CITATION = """\ |
|
@article{pratapa-etal-2022-multilingual, |
|
title = {Multilingual Event Linking to Wikidata}, |
|
author = {Pratapa, Adithya and Gupta, Rishubh and Mitamura, Teruko}, |
|
publisher = {arXiv}, |
|
year = {2022}, |
|
url = {https://arxiv.org/abs/2204.06535}, |
|
} |
|
""" |
|
|
|
_DESCRIPTION = """\ |
|
XLEL-WD is a multilingual event linking dataset. \ |
|
This dataset contains mention references from multilingual Wikipedia/Wikinews articles to event items in Wikidata. \ |
|
The text descriptions for Wikidata events are compiled from Wikipedia articles. |
|
""" |
|
|
|
_HOMEPAGE = "https://github.com/adithya7/xlel-wd" |
|
|
|
_LICENSE = "CC-BY-4.0" |
|
|
|
|
|
|
|
|
|
_URLS = { |
|
"wikipedia-zero-shot": { |
|
"train": "wikipedia.train.jsonl", |
|
"dev": "wikipedia-zero-shot.dev.jsonl", |
|
"test": "wikipedia-zero-shot.test.jsonl", |
|
}, |
|
"wikinews-zero-shot": {"test": "wikinews-zero-shot.test.jsonl"}, |
|
"wikinews-cross-domain": {"test": "wikinews-cross-domain.test.jsonl"}, |
|
} |
|
|
|
_WIKIPEDIA_ZERO_SHOT_LANGS = [ |
|
"af", |
|
"ar", |
|
"be", |
|
"bg", |
|
"bn", |
|
"ca", |
|
"cs", |
|
"da", |
|
"de", |
|
"el", |
|
"en", |
|
"es", |
|
"fa", |
|
"fi", |
|
"fr", |
|
"he", |
|
"hi", |
|
"hu", |
|
"id", |
|
"it", |
|
"ja", |
|
"ko", |
|
"ml", |
|
"mr", |
|
"ms", |
|
"nl", |
|
"no", |
|
"pl", |
|
"pt", |
|
"ro", |
|
"ru", |
|
"si", |
|
"sk", |
|
"sl", |
|
"sr", |
|
"sv", |
|
"sw", |
|
"ta", |
|
"te", |
|
"th", |
|
"tr", |
|
"uk", |
|
"vi", |
|
"zh", |
|
] |
|
|
|
_WIKINEWS_CROSS_DOMAIN_LANGS = [ |
|
"ar", |
|
"bg", |
|
"ca", |
|
"cs", |
|
"de", |
|
"el", |
|
"en", |
|
"es", |
|
"fi", |
|
"fr", |
|
"he", |
|
"hu", |
|
"it", |
|
"ja", |
|
"ko", |
|
"nl", |
|
"no", |
|
"pl", |
|
"pt", |
|
"ro", |
|
"ru", |
|
"sr", |
|
"sv", |
|
"ta", |
|
"tr", |
|
"uk", |
|
"zh", |
|
] |
|
|
|
_WIKINEWS_ZERO_SHOT_LANGS = [ |
|
"ar", |
|
"cs", |
|
"de", |
|
"en", |
|
"es", |
|
"fi", |
|
"fr", |
|
"it", |
|
"ja", |
|
"ko", |
|
"nl", |
|
"no", |
|
"pl", |
|
"pt", |
|
"ru", |
|
"sr", |
|
"sv", |
|
"ta", |
|
"tr", |
|
"uk", |
|
"zh", |
|
] |
|
|
|
_TASK_NAMES = [] |
|
_TASK_DESCRIPTIONS = [] |
|
|
|
|
|
_TASK_NAMES += ["wikipedia-zero-shot"] |
|
_TASK_DESCRIPTIONS += [ |
|
"This task requires linking mentions from multilingual wiki to Wikidata events (zero-shot evaluation)" |
|
] |
|
|
|
for lang in _WIKIPEDIA_ZERO_SHOT_LANGS: |
|
_TASK_NAMES += [f"wikipedia-zero-shot.{lang}"] |
|
_TASK_DESCRIPTIONS += [ |
|
f"This task requires linking mentions from {lang}wiki to Wikidata events (zero-shot evaluation)." |
|
] |
|
|
|
|
|
_TASK_NAMES += ["wikinews-zero-shot"] |
|
_TASK_DESCRIPTIONS += [ |
|
"This task requires linking mentions from multilingual wikinews to Wikidata events (zero-shot evaluation)." |
|
] |
|
for lang in _WIKINEWS_ZERO_SHOT_LANGS: |
|
_TASK_NAMES += [f"wikinews-zero-shot.{lang}"] |
|
_TASK_DESCRIPTIONS += [ |
|
f"This task requires linking mentions from {lang}wikinews to Wikidata events (zero-shot evaluation)." |
|
] |
|
|
|
|
|
_TASK_NAMES += ["wikinews-cross-domain"] |
|
_TASK_DESCRIPTIONS += [ |
|
"This task requires linking mentions from multilingual wikinews to Wikidata events (cross-domain evaluation)." |
|
] |
|
for lang in _WIKINEWS_CROSS_DOMAIN_LANGS: |
|
_TASK_NAMES += [f"wikinews-cross-domain.{lang}"] |
|
_TASK_DESCRIPTIONS += [ |
|
f"This task requires linking mentions from {lang}wikinews to Wikidata events (cross-domain evaluation)." |
|
] |
|
|
|
|
|
class XlelWdConfig(datasets.BuilderConfig): |
|
"""BuilderConfig for XLEL-WD""" |
|
|
|
def __init__(self, features, citation, url, **kwargs) -> None: |
|
super(XlelWdConfig, self).__init__(version=datasets.Version("1.0.0"), **kwargs) |
|
self.features = features |
|
self.citation = citation |
|
self.url = url |
|
|
|
|
|
class XlelWd(datasets.GeneratorBasedBuilder): |
|
"""A dataset for multilingual linking of event mentions to Wikidata.""" |
|
|
|
VERSION = datasets.Version("1.0.0") |
|
|
|
|
|
BUILDER_CONFIGS = [ |
|
XlelWdConfig( |
|
name=name, |
|
description=desc, |
|
features=["mention", "context_left", "context_right", "context_lang"], |
|
citation=_CITATION, |
|
url=_URLS[name.split(".")[0]], |
|
) |
|
if name.startswith("wikipedia") |
|
else XlelWdConfig( |
|
name=name, |
|
description=desc, |
|
features=[ |
|
"mention", |
|
"context_left", |
|
"context_right", |
|
"context_lang", |
|
"context_title", |
|
"context_date", |
|
], |
|
citation=_CITATION, |
|
url=_URLS[name.split(".")[0]], |
|
) |
|
for name, desc in zip(_TASK_NAMES, _TASK_DESCRIPTIONS) |
|
] |
|
|
|
def _info(self): |
|
|
|
features = { |
|
feature: datasets.Value("string") for feature in self.config.features |
|
} |
|
features["label_id"] = datasets.Value("string") |
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION + self.config.description, |
|
features=datasets.Features(features), |
|
homepage=_HOMEPAGE, |
|
license=_LICENSE, |
|
citation=self.config.citation, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
|
|
urls = _URLS[self.config.name.split(".")[0]] |
|
downloaded_files = dl_manager.download_and_extract(urls) |
|
if self.config.name.startswith("wikipedia"): |
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
gen_kwargs={ |
|
"filepath": downloaded_files["train"], |
|
"split": "train", |
|
}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.VALIDATION, |
|
gen_kwargs={ |
|
"filepath": downloaded_files["dev"], |
|
"split": "dev", |
|
}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TEST, |
|
gen_kwargs={ |
|
"filepath": downloaded_files["test"], |
|
"split": "test", |
|
}, |
|
), |
|
] |
|
elif self.config.name.startswith("wikinews"): |
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TEST, |
|
gen_kwargs={ |
|
"filepath": downloaded_files["test"], |
|
"split": "test", |
|
}, |
|
), |
|
] |
|
|
|
def _generate_examples(self, filepath, split): |
|
|
|
task_domain, *task_langs = self.config.name.split(".") |
|
with open(filepath, encoding="utf-8") as f: |
|
for key, row in enumerate(f): |
|
data = json.loads(row) |
|
|
|
|
|
if len(task_langs) == 0 or task_langs[0] == data["context_lang"]: |
|
if task_domain.startswith("wikipedia"): |
|
yield key, { |
|
"mention": data["mention"], |
|
"context_left": data["context_left"], |
|
"context_right": data["context_right"], |
|
"context_lang": data["context_lang"], |
|
"label_id": data["label_id"], |
|
} |
|
elif task_domain.startswith("wikinews"): |
|
yield key, { |
|
"mention": data["mention"], |
|
"context_left": data["context_left"], |
|
"context_right": data["context_right"], |
|
"context_lang": data["context_lang"], |
|
"context_title": data["context_title"], |
|
"context_date": data["context_date"], |
|
"label_id": data["label_id"], |
|
} |
|
|