"""PRES retrieval dataset"""


import json
import tempfile
import patoolib
import csv

import os

import datasets

_DESCRIPTION = 'Reference: https://mklab.iti.gr/results/spanish-passage-retrieval-dataset/'

_HOMEPAGE_URL = 'https://mklab.iti.gr/results/spanish-passage-retrieval-dataset/'
_LANGUAGES = {'es': 'ES'}
_VERSION = '1.0.0'


URL = 'http://mklab.iti.gr/files/PRES_Dataset.rar'


class PRESConfig(datasets.BuilderConfig):
    """BuilderConfig for PRESConfig."""

    def __init__(self, **kwargs):
        super(PRESConfig, self).__init__(
            version=datasets.Version(_VERSION, ''), **kwargs
        ),


class PRES(datasets.GeneratorBasedBuilder):
    """The XMarketDE category to product retrieval dataset"""

    BUILDER_CONFIGS = [
        datasets.BuilderConfig(
            name=name,
            description=f'{name.title()} of the Spanish Passage Retrieval dataset.',
        )
        for name in ['corpus.sentences', 'corpus.documents', 'queries', 'qrels.s2s', 'qrels.s2p']
    ]

    BUILDER_CONFIG_CLASS = PRESConfig

    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)
        self._data = None

    def _info(self):
        return datasets.DatasetInfo(
            description=_DESCRIPTION,
            features=datasets.Features(
                {
                    "_id": datasets.Value("string"),
                    "text": datasets.Value("string"),
                }
            ),
            supervised_keys=None,
            homepage=_HOMEPAGE_URL,
        )

    def _split_generators(self, dl_manager: datasets.DownloadManager):
        downloaded_archive = dl_manager.download(URL)
        return [
            datasets.SplitGenerator(
                name=datasets.Split.TEST,
                gen_kwargs={
                    'archive': downloaded_archive,
                    'split': 'test',
                },
            ),
        ]

    def _generate_examples(
        self,
        archive: str,
        split: str = None,
    ):

        if not self._data:
            with tempfile.TemporaryDirectory() as tmpdir:
                patoolib.extract_archive(archive, outdir=tmpdir)
                extracted_path = os.path.join(tmpdir, 'PRES Dataset')

                with open(os.path.join(extracted_path, 'docs.json')) as f:
                    docs = json.load(f)

                with open(os.path.join(extracted_path, 'topics.json')) as f:
                    topics = json.load(f)

                with open(os.path.join(extracted_path, 'relevance_passages.json')) as f:
                    rel_passages = json.load(f)

            corpus_sentences = []
            corpus_documents = []
            queries = dict()
            qrels_s2s = dict()
            qrels_s2p = dict()
            topic_to_queries = dict()
            for topic in topics['topics']:
                topic_to_queries[topic['number']] = []
                for query in topic['queries']:
                    qid = query['number']
                    queries[qid] = query['text']
                    topic_to_queries[topic['number']].append(qid)
                    qrels_s2s[qid] = []
                    qrels_s2p[qid] = []

            known_passage_ids = set()

            for annotated_topic in rel_passages['topics']:
                topic = annotated_topic['number']
                for annotation in annotated_topic['annotations']:
                    passage_id = f'doc_{annotation["docNo"]}_{annotation["start"]}_{annotation["end"]}'
                    doc_id = f'doc_{annotation["docNo"]}'
                    if passage_id not in known_passage_ids:
                        corpus_sentences.append({'_id': passage_id, 'text': annotation['text']})
                        known_passage_ids.add(passage_id)
                    for qid in topic_to_queries[topic]:
                        qrels_s2s[qid].append(passage_id)
                        qrels_s2p[qid].append(doc_id)

            for doc in docs['documents']:
                doc_id = f'doc_{doc["docNo"]}'
                corpus_documents.append({'_id': doc_id, 'text': doc['text']})


            self._data = {
                'corpus.sentences': corpus_sentences,
                'corpus.documents': corpus_documents,
                'queries': queries,
                'qrels.s2s': qrels_s2s,
                'qrels.s2p': qrels_s2p
            }

        if self.config.name not in self._data:
            raise ValueError(f'Unknown config name: {self.config.name}')

        if self.config.name.startswith('corpus'):
            for line in self._data[self.config.name]:
                yield line['_id'], line
        elif self.config.name == 'queries':
            for qid, query in self._data['queries'].items():
                yield qid, {
                    "_id": qid,
                    "text": query,
                }
        elif self.config.name.startswith('qrels'):
            for qid, dids in self._data[self.config.name].items():
                yield qid, {
                    "_id": qid,
                    "text": ' '.join(dids),
                }