File size: 1,271 Bytes
bbabc35
cb58ca6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
# pip install lxml cssselector requests xmltodict

from datetime import datetime
import json
import lxml.html
from lxml.cssselect import CSSSelector

import requests
import xmltodict

# load from WikiNews English
r = requests.get("https://en.wikinews.org/w/index.php?title=Special:NewsFeed&feed=atom&categories=Published&count=5")
data = xmltodict.parse(r.content)

outputs = []
entries = data["feed"]["entry"]
for en in entries:
  # en["summary"]["#text"]
  # en["title"]
  dtme = datetime.strptime(en["updated"], "%Y-%m-%dT%H:%M:%SZ")
  dt = dtme.strftime("%Y/%m/%d")

  summ = lxml.html.fromstring(en["summary"]["#text"])

  selAnchor = CSSSelector('a[rel="nofollow"]')
  foundElements = selAnchor(summ)
  for el in foundElements:
    print(el.get("href"))

  plaintxt = summ.text_content()
  if 'Have an opinion on this story?' in plaintxt:
    plaintxt = plaintxt[:plaintxt.find('Have an opinion on this story?')]
  # print(plaintxt)

  outputs.append({ "question_date": dt, "question_url": en["link"]["@href"], })

tstamp = datetime.now().strftime("%Y%m%d")
with open(f"./{tstamp}_qa_public.jsonl", "w") as fi:
  for idx, op in enumerate(outputs):
    op["question_id"] = f"{tstamp}_{idx}"
    op["question_source"] = "WikiNews"
    fi.write(json.dumps(op) + "\n")