turku_hockey_data2text / turku_hockey_data2text.py
jmnybl's picture
data loader
2be6a42
raw
history blame
9.26 kB
# coding=utf-8
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Data Loader for Turku Hockey Data2Text corpus"""
import csv
import json
import os
import re
import datasets
# Find for instance the citation on arxiv or on the dataset repo/website
_CITATION = """\
@inproceedings{kanerva2019newsgen,
Title = {Template-free Data-to-Text Generation of Finnish Sports News},
Author = {Jenna Kanerva and Samuel R{\"o}nnqvist and Riina Kekki and Tapio Salakoski and Filip Ginter},
booktitle = {Proceedings of the 22nd Nordic Conference on Computational Linguistics (NoDaLiDa’19)},
year={2019}
}
"""
# You can copy an official description
_DESCRIPTION = """\
The Turku Hockey Data2Text corpus was developed as a benchmark for evaluating template-free, machine learning methods on Finnish news generation in the area of ice hockey reporting. This dataset is a collection of 3,454 ice hockey games, each including game statistics and a news article describing the game. Each game includes manual alignment of events (such as goals or penalties) and sentences describing the specific event in natural language extracted from the news article. The corpus includes 12,827 annotated events. The natural language passages are manually curated not to include any information not derivable from the input data or world knowledge.
"""
_HOMEPAGE = "https://github.com/TurkuNLP/Turku-hockey-data2text"
_LICENSE = "Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International (CC BY-NC-SA 4.0)"
# The HuggingFace dataset library don't host the datasets but only point to the original files
# This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
_URLs = {
'train': 'train.json',
'validation': 'validation.json',
'test': 'test.json'
}
class TurkuHockeyData2Text(datasets.GeneratorBasedBuilder):
"""The Turky Hockey Data2Text is a manually curated corpus for Finnish news generation in the area of ice hockey reporting."""
VERSION = datasets.Version("1.1.0")
# This is an example of a dataset with multiple configurations.
# If you don't want/need to define several sub-sets in your dataset,
# just remove the BUILDER_CONFIG_CLASS and the BUILDER_CONFIGS attributes.
# If you need to make complex sub-parts in the datasets with configurable options
# You can create your own builder configuration class to store attribute, inheriting from datasets.BuilderConfig
# BUILDER_CONFIG_CLASS = MyBuilderConfig
# You will be able to load one or the other configurations in the following list with
# data = datasets.load_dataset('my_dataset', 'first_domain')
# data = datasets.load_dataset('my_dataset', 'second_domain')
BUILDER_CONFIGS = [
datasets.BuilderConfig(name="main", version=VERSION, description="Main config"),
]
DEFAULT_CONFIG_NAME = "main" # It's not mandatory to have a default configuration. Just use one if it make sense.
def _info(self):
# This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset
#if self.config.name == "main": # This is the name of the configuration selected in BUILDER_CONFIGS above
features = datasets.Features(
{
"gem_id": datasets.Value("string"),
"id": datasets.Value("string"),
"news_article": datasets.Value("string"),
"events": datasets.features.Sequence(
{
"event_id": datasets.Value("string"),
"event_type": datasets.Value("string"),
"text": datasets.Value("string"),
"home_team": datasets.Value("string"),
"guest_team": datasets.Value("string"),
"score": datasets.Value("string"),
"periods": datasets.features.Sequence(datasets.Value("string")),
"features": datasets.features.Sequence(datasets.Value("string")),
"player": datasets.Value("string"),
"assist": datasets.features.Sequence(datasets.Value("string")),
"team": datasets.Value("string"),
"team_name": datasets.Value("string"),
"time": datasets.Value("string"),
"penalty_minutes": datasets.Value("string"),
"saves": datasets.Value("string"),
"multi_reference": datasets.Value("bool")
}
),
}
)
# define other configs
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=features,
# If there's a common (input, target) tuple from the features,
# specify them here. They'll be used if as_supervised=True in
# builder.as_dataset.
supervised_keys=None,
homepage=_HOMEPAGE,
license=_LICENSE,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
# If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name
# dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLs
# It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
# By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
my_urls = _URLs
data_dir = dl_manager.download_and_extract(my_urls)
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": data_dir["train"], "split": "train"}),
datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": data_dir["validation"], "split": "validation"}),
datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": data_dir["test"], "split": "test"})
]
def _generate_examples(self, filepath, split):
""" Yields examples as (key, example) tuples. """
# This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
# The `key` is here for legacy reason (tfds) and is not important in itself.
with open(filepath, "rt", encoding="utf-8") as f:
data = json.load(f)
for i, example in enumerate(data):
example["gem_id"] = f"gem-turku_hockey_data2text-{split}-{i}" # fill in gem_id
example = self._generate_features(example)
yield i, example
def _generate_features(self, example):
_events = []
for i, e in enumerate(example["events"]):
d = {"event_id": e["event_id"],
"event_type": e["event_type"],
"text": e["text"] if e["text"] != None else "",
"home_team": e.get("home_team", ""),
"guest_team": e.get("guest_team", ""),
"score": e.get("score", ""),
"periods": e.get("periods", []),
"features": e.get("features", []),
"player": e.get("player", ""),
"assist": e.get("assist", []),
"team": e.get("team", ""),
"team_name": e.get("team_name", ""),
"time": e.get("time", ""),
"penalty_minutes": e.get("penalty_minutes", ""),
"saves": e.get("saves", ""),
"multi_reference": self._is_multireference(i, example["events"])
}
_events.append(d)
example["events"] = _events
return example
def _is_multireference(self, i, events):
""" Return True if this event is one of the multireference events (multiple events refer to the same text passage)
Otherwise, return False.
"""
if events[i]["text"] == None:
return False
multireference = re.compile("E[0-9]+")
if multireference.match(events[i]["text"]):
return True
# can be first of the multireference events (the one including the actual text passage)
multi_events = []
for event in events:
if event["text"] == None:
continue
if multireference.match(event["text"]):
multi_events.append(event["text"])
if events[i]["event_id"] in multi_events:
return True
return False