monsoon-nlp
commited on
Commit
•
cb58ca6
1
Parent(s):
603f6e2
add script to scrape from wikinews
Browse files
scrape.py
ADDED
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# pip install openai lxml cssselector requests xmltodict
|
2 |
+
|
3 |
+
from datetime import datetime
|
4 |
+
import json
|
5 |
+
import lxml.html
|
6 |
+
from lxml.cssselect import CSSSelector
|
7 |
+
|
8 |
+
import requests
|
9 |
+
import xmltodict
|
10 |
+
|
11 |
+
# load from WikiNews English
|
12 |
+
r = requests.get("https://en.wikinews.org/w/index.php?title=Special:NewsFeed&feed=atom&categories=Published&count=5")
|
13 |
+
data = xmltodict.parse(r.content)
|
14 |
+
|
15 |
+
outputs = []
|
16 |
+
entries = data["feed"]["entry"]
|
17 |
+
for en in entries:
|
18 |
+
# en["summary"]["#text"]
|
19 |
+
# en["title"]
|
20 |
+
dtme = datetime.strptime(en["updated"], "%Y-%m-%dT%H:%M:%SZ")
|
21 |
+
dt = dtme.strftime("%Y/%m/%d")
|
22 |
+
|
23 |
+
summ = lxml.html.fromstring(en["summary"]["#text"])
|
24 |
+
|
25 |
+
selAnchor = CSSSelector('a[rel="nofollow"]')
|
26 |
+
foundElements = selAnchor(summ)
|
27 |
+
for el in foundElements:
|
28 |
+
print(el.get("href"))
|
29 |
+
|
30 |
+
plaintxt = summ.text_content()
|
31 |
+
if 'Have an opinion on this story?' in plaintxt:
|
32 |
+
plaintxt = plaintxt[:plaintxt.find('Have an opinion on this story?')]
|
33 |
+
# print(plaintxt)
|
34 |
+
|
35 |
+
outputs.append({ "question_date": dt, "question_url": en["link"]["@href"], })
|
36 |
+
|
37 |
+
tstamp = datetime.now().strftime("%Y%m%d")
|
38 |
+
with open(f"./{tstamp}_qa_public.jsonl", "w") as fi:
|
39 |
+
for idx, op in enumerate(outputs):
|
40 |
+
op["question_id"] = f"{tstamp}_{idx}"
|
41 |
+
op["question_source"] = "WikiNews"
|
42 |
+
fi.write(json.dumps(op) + "\n")
|