File size: 2,124 Bytes
7a093ca
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
import requests
from datasets import Dataset
from selectolax.lexbor import LexborHTMLParser

# How many pages to seek for article recommendations?
# (https://www.storm.mg/articles/{page_id})
N_PAGES_OF_ARTICLES_RECOMMENDATIONS = 100

base_url = "https://www.storm.mg/articles/%i"
user_agent = (
    # use mine, or put your user agent here
    "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) "
    "Chrome/121.0.0.0 Safari/537.36 OPR/107.0.0.0"
)

def read_article(link: str):
    """Read an article on www.storm.mg."""
    r = requests.get(link, headers={ "User-Agent": user_agent })
    r.raise_for_status()

    contents = []
    parser = LexborHTMLParser(r.text)

    for paragraph in parser.css("p[aid]"):
        contents.append(paragraph.text(separator=" ", strip=True))

    return contents


def generate_dataset():
    """Generate the dataset."""
    for page_id in range(N_PAGES_OF_ARTICLES_RECOMMENDATIONS):
        r = requests.get(base_url % (page_id + 1), headers={
            "User-Agent": user_agent
        })
        r.raise_for_status()

        parser = LexborHTMLParser(r.text)
        articles = parser.css(".category_cards_wrapper .category_card.card_thumbs_left")

        for article in articles:
            image = article.css_first("img").attributes['src']
            title = article.css_first(".card_title").text()
            tag = article.css_first(".tags_wrapper a").text()

            info = article.css_first("p.card_info.right")
            author = info.css_first(".info_author").text()
            timestamp = info.css_first(".info_time").text()
            link = article.css_first(".link_title").attributes['href']

            yield {
                "image": image,
                "title": title,
                "content": "\n".join(read_article(link)),
                "tag": tag,
                "author": author,
                "timestamp": timestamp,
                "link": link
            }

dataset = Dataset.from_generator(generate_dataset)
dataset.save_to_disk(
    f"storm-org-articles-{20 * N_PAGES_OF_ARTICLES_RECOMMENDATIONS}"
)