|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
""" Multilexnorm dataset.""" |
|
|
|
|
|
import csv |
|
import json |
|
import os |
|
|
|
import datasets |
|
|
|
|
|
|
|
|
|
_CITATION = r"""\ |
|
@inproceedings{van-der-goot-etal-2021-multilexnorm, |
|
title = "{M}ulti{L}ex{N}orm: A Shared Task on Multilingual Lexical Normalization", |
|
author = {van der Goot, Rob and |
|
Ramponi, Alan and |
|
Zubiaga, Arkaitz and |
|
Plank, Barbara and |
|
Muller, Benjamin and |
|
San Vicente Roncal, I{\~n}aki and |
|
Ljube{\v{s}}i{\'c}, Nikola and |
|
{\c{C}}etino{\u{g}}lu, {\"O}zlem and |
|
Mahendra, Rahmad and |
|
{\c{C}}olako{\u{g}}lu, Talha and |
|
Baldwin, Timothy and |
|
Caselli, Tommaso and |
|
Sidorenko, Wladimir}, |
|
booktitle = "Proceedings of the Seventh Workshop on Noisy User-generated Text (W-NUT 2021)", |
|
month = nov, |
|
year = "2021", |
|
address = "Online", |
|
publisher = "Association for Computational Linguistics", |
|
url = "https://aclanthology.org/2021.wnut-1.55", |
|
doi = "10.18653/v1/2021.wnut-1.55", |
|
pages = "493--509", |
|
abstract = "Lexical normalization is the task of transforming an utterance into its standardized form. This task is beneficial for downstream analysis, as it provides a way to harmonize (often spontaneous) linguistic variation. Such variation is typical for social media on which information is shared in a multitude of ways, including diverse languages and code-switching. Since the seminal work of Han and Baldwin (2011) a decade ago, lexical normalization has attracted attention in English and multiple other languages. However, there exists a lack of a common benchmark for comparison of systems across languages with a homogeneous data and evaluation setup. The MultiLexNorm shared task sets out to fill this gap. We provide the largest publicly available multilingual lexical normalization benchmark including 13 language variants. We propose a homogenized evaluation setup with both intrinsic and extrinsic evaluation. As extrinsic evaluation, we use dependency parsing and part-of-speech tagging with adapted evaluation metrics (a-LAS, a-UAS, and a-POS) to account for alignment discrepancies. The shared task hosted at W-NUT 2021 attracted 9 participants and 18 submissions. The results show that neural normalization systems outperform the previous state-of-the-art system by a large margin. Downstream parsing and part-of-speech tagging performance is positively affected but to varying degrees, with improvements of up to 1.72 a-LAS, 0.85 a-UAS, and 1.54 a-POS for the winning system.", |
|
} |
|
""" |
|
|
|
|
|
|
|
_DESCRIPTION = """\ |
|
For this task, participants are asked to develop a system that performs lexical normalization: the conversion of non-canonical texts to their canonical equivalent form. In particular, this task includes data from 12 languages. |
|
""" |
|
|
|
|
|
_HOMEPAGE = "http://noisy-text.github.io/2021/multi-lexnorm.html" |
|
|
|
|
|
_LICENSE = "Creative Commons Attribution 4.0 International License." |
|
|
|
|
|
|
|
|
|
_DATA_DIR = "data" |
|
|
|
|
|
class MultiLexNorm(datasets.GeneratorBasedBuilder): |
|
""" Lexnorm dataset for 12 languages.""" |
|
|
|
VERSION = datasets.Version("1.1.0") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
BUILDER_CONFIGS = [ |
|
datasets.BuilderConfig(name="da", version=VERSION, description="Danish"), |
|
datasets.BuilderConfig(name="de", version=VERSION, description="German"), |
|
datasets.BuilderConfig(name="en", version=VERSION, description="English"), |
|
datasets.BuilderConfig(name="es", version=VERSION, description="Spanish"), |
|
datasets.BuilderConfig(name="hr", version=VERSION, description="Croatian"), |
|
datasets.BuilderConfig(name="id-en", version=VERSION, description="Indonesian-English"), |
|
datasets.BuilderConfig(name="it", version=VERSION, description="Italian"), |
|
datasets.BuilderConfig(name="nl", version=VERSION, description="Dutch"), |
|
datasets.BuilderConfig(name="sl", version=VERSION, description="Slovenian"), |
|
datasets.BuilderConfig(name="sr", version=VERSION, description="Serbian"), |
|
datasets.BuilderConfig(name="tr", version=VERSION, description="Turkish"), |
|
datasets.BuilderConfig(name="tr-de", version=VERSION, description="Turkish-German"), |
|
] |
|
|
|
def _info(self): |
|
|
|
features = datasets.Features( |
|
{ |
|
"inputs": datasets.Value("string"), |
|
"targets": datasets.Value("string"), |
|
|
|
} |
|
) |
|
return datasets.DatasetInfo( |
|
|
|
description=_DESCRIPTION, |
|
|
|
features=features, |
|
|
|
|
|
|
|
|
|
homepage=_HOMEPAGE, |
|
|
|
license=_LICENSE, |
|
|
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
|
|
|
|
|
|
|
|
|
|
|
|
try: |
|
_URLS = { |
|
"train": os.path.join(_DATA_DIR, self.config.name, "train.norm"), |
|
"dev": os.path.join(_DATA_DIR, self.config.name, "dev.norm"), |
|
"test": os.path.join(_DATA_DIR, self.config.name, "test.norm"), |
|
} |
|
downloaded_files = dl_manager.download_and_extract(_URLS) |
|
|
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
|
|
gen_kwargs={ |
|
"filepath": downloaded_files["train"], |
|
"split": "train", |
|
}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.VALIDATION, |
|
|
|
gen_kwargs={ |
|
"filepath": downloaded_files["dev"], |
|
"split": "dev", |
|
}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TEST, |
|
|
|
gen_kwargs={ |
|
"filepath": downloaded_files["test"], |
|
"split": "test", |
|
}, |
|
), |
|
] |
|
except FileNotFoundError: |
|
_URLS = { |
|
"train": os.path.join(_DATA_DIR, self.config.name, "train.norm"), |
|
"test": os.path.join(_DATA_DIR, self.config.name, "test.norm"), |
|
} |
|
downloaded_files = dl_manager.download_and_extract(_URLS) |
|
|
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
|
|
gen_kwargs={ |
|
"filepath": downloaded_files["train"], |
|
"split": "train", |
|
}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TEST, |
|
|
|
gen_kwargs={ |
|
"filepath": downloaded_files["test"], |
|
"split": "test", |
|
}, |
|
), |
|
] |
|
|
|
|
|
def _generate_examples(self, filepath, split): |
|
|
|
|
|
with open(filepath, encoding="utf-8") as f: |
|
for key, line in enumerate(f): |
|
if len(line) > 1: |
|
ip, tgt = line.split("\t") |
|
else: |
|
ip, tgt = "", "" |
|
yield key, { |
|
"inputs": ip.strip(), |
|
"targets": tgt.strip(), |
|
} |
|
|