flibusta / flibusta.py
rominf's picture
initial commit
b681b53 verified
import dataclasses
import re
import urllib.parse
import xml.etree.ElementTree
import zipfile
from dataclasses import dataclass
from textwrap import dedent
import datasets
logger = datasets.logging.get_logger(__name__)
FLIBUSTA_URL = "https://flibusta.is"
_CITATION = """\
@ONLINE{flibusta,
author = "Флибуста",
title = "Флибуста",
url = "https://flibusta.is"
}
"""
_DESCRIPTION = """\
Convenient access to books in Russian hosted on Flibusta (https://flibusta.is/).
Authors of the dataset do not endorse the usage of Flibusta for illegal
purposes: please read "Licensing Information" before use.
"""
_LICENSE = """\
Books are stored on https://flibusta.is/ and may not be accessible from your
location because of legal reasons.
Please check with your local law if you can use this dataset.
The license Apache 2.0 applies only to the code.
"""
@dataclass
class BookInfo:
author: str
id: str
title: str
url: str
url_txt: str
@dataclass
class DownloadedBookInfo:
book_info: BookInfo
file_path: str
class FlibustaConfig(datasets.BuilderConfig):
def __init__(self, *args, books_query, **kwargs):
super().__init__(*args, version=datasets.utils.Version("0.1.0"), **kwargs)
self.books_query = books_query
class Flibusta(datasets.GeneratorBasedBuilder):
BUILDER_CONFIG_CLASS = FlibustaConfig
def _info(self):
return datasets.DatasetInfo(
citation=_CITATION,
description=_DESCRIPTION,
features=datasets.Features(
{
"author": datasets.Value("string"),
"id": datasets.Value("string"),
"text": datasets.Value("string"),
"title": datasets.Value("string"),
"url": datasets.Value("string"),
"url_txt": datasets.Value("string"),
}
),
homepage=FLIBUSTA_URL,
version=datasets.Version("0.1.0"),
)
@staticmethod
def _get_books(dl_manager, query):
flibusta_search_url = urllib.parse.urljoin(FLIBUSTA_URL, "opds/search")
continue_traverse = True
page = 0
while continue_traverse:
params_str = urllib.parse.urlencode(
{
"searchType": "books",
"searchTerm": query,
"pageNumber": page,
}
)
url = f"{flibusta_search_url}?{params_str}"
logger.info("Extracting book infos from: %s", url)
search_xml_file_path = dl_manager.download(url)
search_xml_str = open(search_xml_file_path, encoding="utf8").read()
search_xml_tree = xml.etree.ElementTree.fromstring(search_xml_str)
ns = "{http://www.w3.org/2005/Atom}"
for entry in search_xml_tree.findall(f"./{ns}entry"):
author = entry.findtext(f"./{ns}author/{ns}name")
title = entry.findtext(f"./{ns}title")
url_path_entry = entry.find(f'./{ns}link[@title="Книга на сайте"]')
url_txt_path_entry = entry.find(
f'./{ns}link[@type="application/txt+zip"]'
)
if (
author
and title
and url_path_entry is not None
and url_txt_path_entry is not None
):
url_path = url_path_entry.get("href")
url = urllib.parse.urljoin(FLIBUSTA_URL, url_path)
url_txt_path = url_txt_path_entry.get("href")
url_txt = urllib.parse.urljoin(FLIBUSTA_URL, url_txt_path)
id_ = url_path.rpartition("/")[2]
yield BookInfo(
author=author,
id=id_,
title=title,
url=url,
url_txt=url_txt,
)
if search_xml_tree.find(f'./{ns}link[@rel="next"]') is not None:
page += 1
else:
continue_traverse = False
def _split_generators(self, dl_manager):
book_infos = list(
self._get_books(dl_manager=dl_manager, query=self.config.books_query)
)
book_urls_txt = {book_info.id: book_info.url_txt for book_info in book_infos}
book_file_paths = dl_manager.download(book_urls_txt)
downloaded_book_infos = [
DownloadedBookInfo(
book_info=book_info, file_path=book_file_paths[book_info.id]
)
for book_info in book_infos
]
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={"downloaded_book_infos": downloaded_book_infos},
)
]
def _generate_examples(self, downloaded_book_infos):
for downloaded_book_info in downloaded_book_infos:
archive = zipfile.ZipFile(open(downloaded_book_info.file_path, "rb"))
for content_name in archive.namelist():
if content_name.endswith(".txt"):
txt_path = zipfile.Path(archive, at=content_name)
text = txt_path.read_text(encoding="utf8")
text = re.sub(r"Взято из Флибусты,.*", "", text)
text = text.strip("\n")
text = dedent(text)
example = {
"text": text,
**dataclasses.asdict(downloaded_book_info.book_info),
}
yield downloaded_book_info.book_info.id, example
break