|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
"""mMARCO dataset.""" |
|
|
|
from collections import defaultdict |
|
from gc import collect |
|
import datasets |
|
from tqdm import tqdm |
|
import random |
|
|
|
|
|
_CITATION = """ |
|
@misc{bonifacio2021mmarco, |
|
title={mMARCO: A Multilingual Version of the MS MARCO Passage Ranking Dataset}, |
|
author={Luiz Henrique Bonifacio and Israel Campiotti and Vitor Jeronymo and Hugo Queiroz Abonizio and Roberto Lotufo and Rodrigo Nogueira}, |
|
year={2021}, |
|
eprint={2108.13897}, |
|
archivePrefix={arXiv}, |
|
primaryClass={cs.CL} |
|
} |
|
""" |
|
|
|
_URL = "https://github.com/unicamp-dl/mMARCO" |
|
|
|
_DESCRIPTION = """ |
|
mMARCO translated datasets |
|
""" |
|
|
|
|
|
_BASE_URLS = { |
|
"collections": "https://huggingface.co/datasets/unicamp-dl/mmarco/resolve/main/data/google/collections/", |
|
"queries-train": "https://huggingface.co/datasets/unicamp-dl/mmarco/resolve/main/data/google/queries/train/", |
|
"queries-dev": "https://huggingface.co/datasets/unicamp-dl/mmarco/resolve/main/data/google/queries/dev/", |
|
"runs": "https://huggingface.co/datasets/unicamp-dl/mmarco/resolve/main/data/google/runs/", |
|
"train": "https://huggingface.co/datasets/unicamp-dl/mmarco/resolve/main/data/triples.train.ids.small.tsv", |
|
} |
|
|
|
LANGUAGES = [ |
|
"arabic", |
|
"chinese", |
|
"dutch", |
|
"english", |
|
"french", |
|
"german", |
|
"hindi", |
|
"indonesian", |
|
"italian", |
|
"japanese", |
|
"portuguese", |
|
"russian", |
|
"spanish", |
|
"vietnamese", |
|
] |
|
|
|
|
|
class MMarco(datasets.GeneratorBasedBuilder): |
|
|
|
BUILDER_CONFIGS = ( |
|
[ |
|
datasets.BuilderConfig( |
|
name=language, |
|
description=f"{language.capitalize()} triples", |
|
version=datasets.Version("2.0.0"), |
|
) |
|
for language in LANGUAGES |
|
] |
|
+ [ |
|
datasets.BuilderConfig( |
|
name=f"collection-{language}", |
|
description=f"{language.capitalize()} collection version v2", |
|
version=datasets.Version("2.0.0"), |
|
) |
|
for language in LANGUAGES |
|
] |
|
+ [ |
|
datasets.BuilderConfig( |
|
name=f"queries-{language}", |
|
description=f"{language.capitalize()} queries version v2", |
|
version=datasets.Version("2.0.0"), |
|
) |
|
for language in LANGUAGES |
|
] |
|
+ [ |
|
datasets.BuilderConfig( |
|
name=f"runs-{language}", |
|
description=f"{language.capitalize()} runs version v2", |
|
version=datasets.Version("2.0.0"), |
|
) |
|
for language in LANGUAGES |
|
] |
|
+ [ |
|
datasets.BuilderConfig( |
|
name=f"all", |
|
description=f"All training data version v2", |
|
version=datasets.Version("2.0.0"), |
|
) |
|
] |
|
) |
|
|
|
size_per_lang = {lang: 398792 for lang in LANGUAGES} |
|
|
|
|
|
|
|
DEFAULT_CONFIG_NAME = "english" |
|
|
|
def _info(self): |
|
name = self.config.name |
|
assert name in LANGUAGES + ["all"], f"Does not support languge {name}. Must be one of {LANGUAGES}." |
|
|
|
features = { |
|
"query_id": datasets.Value("string"), |
|
"query": datasets.Value("string"), |
|
"positive_passages": [ |
|
{'docid': datasets.Value('string'), 'text': datasets.Value('string')} |
|
], |
|
"negative_passages": [ |
|
{'docid': datasets.Value('string'), 'text': datasets.Value('string')} |
|
], |
|
} |
|
|
|
return datasets.DatasetInfo( |
|
description=f"{_DESCRIPTION}\n{self.config.description}", |
|
features=datasets.Features(features), |
|
supervised_keys=None, |
|
homepage=_URL, |
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
"""Returns SplitGenerators.""" |
|
languages = [self.config.name] if self.config.name in LANGUAGES else LANGUAGES |
|
urls = { |
|
"collection": {lang: _BASE_URLS["collections"] + lang + "_collection.tsv" for lang in languages}, |
|
"queries": {lang: _BASE_URLS["queries-train"] + lang + "_queries.train.tsv" for lang in languages}, |
|
"train": _BASE_URLS["train"], |
|
} |
|
dl_path = dl_manager.download_and_extract(urls) |
|
|
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
gen_kwargs={ |
|
"files": dl_path["train"], |
|
"args": { |
|
"collection": dl_path["collection"], |
|
"queries": dl_path["queries"], |
|
}, |
|
}, |
|
) |
|
] |
|
|
|
|
|
def _generate_examples(self, files, args=None): |
|
"""Yields examples.""" |
|
|
|
languages = [self.config.name] if self.config.name in LANGUAGES else LANGUAGES |
|
|
|
|
|
runs = dict() |
|
with open(files, encoding="utf-8") as f: |
|
for (idx, line) in enumerate(f): |
|
query_id, pos_id, neg_id = line.rstrip().split("\t") |
|
if query_id not in runs: |
|
runs[query_id] = [set(pos_id), set(neg_id)] |
|
else: |
|
runs[query_id][0].add(pos_id) |
|
runs[query_id][1].add(neg_id) |
|
|
|
|
|
for lang in tqdm(languages, desc=f"Preparing training example for {len(languages)} languages."): |
|
n_missed_q = 0 |
|
n_missed_d = 0 |
|
|
|
collection_path, queries_path = args["collection"][lang], args["queries"][lang] |
|
|
|
collection = {} |
|
with open(collection_path, encoding="utf-8") as f: |
|
collection = dict(line.rstrip().split("\t") for line in f) |
|
|
|
queries = {} |
|
with open(queries_path, encoding="utf-8") as f: |
|
for line in f: |
|
queries = dict(line.rstrip().split("\t") for line in f) |
|
|
|
assert len(runs) == self.size_per_lang[lang] |
|
for query_id, (pos_ids, neg_ids) in runs.items(): |
|
if query_id not in queries: |
|
n_missed_q += 1 |
|
continue |
|
|
|
pos_ids, neg_ids = list(pos_ids), list(neg_ids) |
|
pos_ids = [d for d in pos_ids if d in collection] |
|
neg_ids = [d for d in neg_ids if d in collection] |
|
if len(neg_ids) == 0 or len(pos_ids) == 0: |
|
n_missed_d += 1 |
|
continue |
|
|
|
NNEG = min(10, len(neg_ids)) |
|
neg_ids = random.choices(neg_ids, k=NNEG) |
|
|
|
features = { |
|
"query_id": query_id, |
|
"query": queries[query_id], |
|
"positive_passages": [{ |
|
"docid": pos_id, |
|
"text": collection[pos_id], |
|
} for pos_id in pos_ids], |
|
"negative_passages": [{ |
|
"docid": neg_id, |
|
"text": collection[neg_id], |
|
} for neg_id in neg_ids], |
|
} |
|
yield f"{lang}-{query_id}-{idx}", features |
|
print(f'Number of missed Q: {n_missed_q}. Number of missed D: {n_missed_d}') |