|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
"""Data Loader for Turku Hockey Data2Text corpus""" |
|
|
|
|
|
import csv |
|
import json |
|
import os |
|
import re |
|
|
|
import datasets |
|
|
|
|
|
|
|
_CITATION = """\ |
|
@inproceedings{kanerva2019newsgen, |
|
Title = {Template-free Data-to-Text Generation of Finnish Sports News}, |
|
Author = {Jenna Kanerva and Samuel R{\"o}nnqvist and Riina Kekki and Tapio Salakoski and Filip Ginter}, |
|
booktitle = {Proceedings of the 22nd Nordic Conference on Computational Linguistics (NoDaLiDa’19)}, |
|
year={2019} |
|
} |
|
""" |
|
|
|
|
|
|
|
_DESCRIPTION = """\ |
|
The Turku Hockey Data2Text corpus was developed as a benchmark for evaluating template-free, machine learning methods on Finnish news generation in the area of ice hockey reporting. This dataset is a collection of 3,454 ice hockey games, each including game statistics and a news article describing the game. Each game includes manual alignment of events (such as goals or penalties) and sentences describing the specific event in natural language extracted from the news article. The corpus includes 12,827 annotated events. The natural language passages are manually curated not to include any information not derivable from the input data or world knowledge. |
|
""" |
|
|
|
_HOMEPAGE = "https://github.com/TurkuNLP/Turku-hockey-data2text" |
|
|
|
_LICENSE = "Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International (CC BY-NC-SA 4.0)" |
|
|
|
|
|
|
|
|
|
_URLs = { |
|
'train': 'train.json', |
|
'validation': 'validation.json', |
|
'test': 'test.json' |
|
} |
|
|
|
|
|
|
|
class TurkuHockeyData2Text(datasets.GeneratorBasedBuilder): |
|
"""The Turky Hockey Data2Text is a manually curated corpus for Finnish news generation in the area of ice hockey reporting.""" |
|
|
|
VERSION = datasets.Version("1.1.0") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
BUILDER_CONFIGS = [ |
|
datasets.BuilderConfig(name="main", version=VERSION, description="Main config"), |
|
] |
|
|
|
DEFAULT_CONFIG_NAME = "main" |
|
|
|
def _info(self): |
|
|
|
|
|
features = datasets.Features( |
|
{ |
|
"gem_id": datasets.Value("string"), |
|
"id": datasets.Value("string"), |
|
"news_article": datasets.Value("string"), |
|
"events": datasets.features.Sequence( |
|
{ |
|
"event_id": datasets.Value("string"), |
|
"event_type": datasets.Value("string"), |
|
"text": datasets.Value("string"), |
|
"home_team": datasets.Value("string"), |
|
"guest_team": datasets.Value("string"), |
|
"score": datasets.Value("string"), |
|
"periods": datasets.features.Sequence(datasets.Value("string")), |
|
"features": datasets.features.Sequence(datasets.Value("string")), |
|
"player": datasets.Value("string"), |
|
"assist": datasets.features.Sequence(datasets.Value("string")), |
|
"team": datasets.Value("string"), |
|
"team_name": datasets.Value("string"), |
|
"time": datasets.Value("string"), |
|
"penalty_minutes": datasets.Value("string"), |
|
"saves": datasets.Value("string"), |
|
"multi_reference": datasets.Value("bool") |
|
} |
|
), |
|
} |
|
) |
|
|
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=features, |
|
|
|
|
|
|
|
supervised_keys=None, |
|
homepage=_HOMEPAGE, |
|
license=_LICENSE, |
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
"""Returns SplitGenerators.""" |
|
|
|
|
|
|
|
|
|
|
|
|
|
my_urls = _URLs |
|
data_dir = dl_manager.download_and_extract(my_urls) |
|
return [ |
|
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": data_dir["train"], "split": "train"}), |
|
datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": data_dir["validation"], "split": "validation"}), |
|
datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": data_dir["test"], "split": "test"}) |
|
] |
|
|
|
def _generate_examples(self, filepath, split): |
|
""" Yields examples as (key, example) tuples. """ |
|
|
|
|
|
|
|
with open(filepath, "rt", encoding="utf-8") as f: |
|
data = json.load(f) |
|
for i, example in enumerate(data): |
|
example["gem_id"] = f"gem-turku_hockey_data2text-{split}-{i}" |
|
example = self._generate_features(example) |
|
yield i, example |
|
|
|
|
|
def _generate_features(self, example): |
|
_events = [] |
|
for i, e in enumerate(example["events"]): |
|
d = {"event_id": e["event_id"], |
|
"event_type": e["event_type"], |
|
"text": e["text"] if e["text"] != None else "", |
|
"home_team": e.get("home_team", ""), |
|
"guest_team": e.get("guest_team", ""), |
|
"score": e.get("score", ""), |
|
"periods": e.get("periods", []), |
|
"features": e.get("features", []), |
|
"player": e.get("player", ""), |
|
"assist": e.get("assist", []), |
|
"team": e.get("team", ""), |
|
"team_name": e.get("team_name", ""), |
|
"time": e.get("time", ""), |
|
"penalty_minutes": e.get("penalty_minutes", ""), |
|
"saves": e.get("saves", ""), |
|
"multi_reference": self._is_multireference(i, example["events"]) |
|
} |
|
_events.append(d) |
|
example["events"] = _events |
|
return example |
|
|
|
def _is_multireference(self, i, events): |
|
""" Return True if this event is one of the multireference events (multiple events refer to the same text passage) |
|
Otherwise, return False. |
|
""" |
|
if events[i]["text"] == None: |
|
return False |
|
multireference = re.compile("E[0-9]+") |
|
if multireference.match(events[i]["text"]): |
|
return True |
|
|
|
multi_events = [] |
|
for event in events: |
|
if event["text"] == None: |
|
continue |
|
if multireference.match(event["text"]): |
|
multi_events.append(event["text"]) |
|
if events[i]["event_id"] in multi_events: |
|
return True |
|
return False |
|
|