|
|
|
|
|
from datetime import date, datetime, timedelta |
|
import json |
|
import lxml.html |
|
from lxml.cssselect import CSSSelector |
|
import time |
|
|
|
from openai import OpenAI |
|
import requests |
|
import xmltodict |
|
|
|
tabooWords = ['deletion', 'rape', 'rapist', 'abuse', 'minor'] |
|
|
|
|
|
recentArticles = [] |
|
dateForArticle = {} |
|
today = date.today() |
|
for days in range(5, 12): |
|
tdate = (today - timedelta(days=days)) |
|
tstamp = tdate.strftime("%B_%d,_%Y") |
|
r = requests.get(f"https://en.wikinews.org/wiki/Category:{tstamp}") |
|
contents = lxml.html.fromstring(r.content) |
|
selAnchor = CSSSelector('a') |
|
for linkEl in selAnchor(contents): |
|
link = str(linkEl.get('href')) |
|
if link[:6] == '/wiki/' and '/Special:' not in link and '/Category:' not in link and 'Main_Page' not in link and 'Help:' not in link and 'Wikinews:' not in link and 'File:' not in link: |
|
recentArticles.append(link) |
|
dateForArticle[link] = tdate.strftime("%Y/%m/%d") |
|
time.sleep(1) |
|
|
|
client = OpenAI() |
|
|
|
outputs = [] |
|
for article in recentArticles: |
|
print(article) |
|
r = requests.get(f"https://en.wikinews.org{article}") |
|
contents = lxml.html.fromstring(r.content) |
|
|
|
selMain = CSSSelector('.mw-body-content p') |
|
plaintxt = "" |
|
for para in selMain(contents): |
|
c = para.text_content() |
|
if 'pre-publication review' in c or 'last amended' in c: |
|
continue |
|
plaintxt += c + "\n" |
|
if 'Have an opinion' in plaintxt: |
|
plaintxt = plaintxt[:plaintxt.index('Have an opinion')] |
|
|
|
plaintxt = plaintxt.strip() |
|
|
|
block = False |
|
for taboo in tabooWords: |
|
if taboo in plaintxt.lower(): |
|
block = True |
|
if block: |
|
print("Article marked for deletion or about subject sensitive for AI summarization") |
|
continue |
|
|
|
dt = dateForArticle[article] |
|
|
|
selAnchor = CSSSelector('a[rel="nofollow"]') |
|
foundElements = selAnchor(contents) |
|
articleLinks = [] |
|
for el in foundElements: |
|
link = el.get('href') |
|
|
|
linkblocks = ['/wiki/', '.com/intent/tweet', 'creativecommons.org/licenses', 'facebook.com/sharer.php', 'mailto:', 'reddit.com/submit', 'linkedin.com/shareArticle'] |
|
block = False |
|
for blocker in linkblocks: |
|
if blocker in link.lower(): |
|
block = True |
|
if block: |
|
continue |
|
|
|
articleLinks.append(link) |
|
|
|
qs = [] |
|
response = client.chat.completions.create( |
|
model="gpt-4o", |
|
messages=[ |
|
{ |
|
"role": "system", |
|
"content": "You will be provided with an article from today's news. Provide 3-5 multiple choice questions based on the content of the article, especially newly-introduced facts or knowledge. Don't make the correct answer any more specific, numeric, or realistic compared to the others.\n Respond in JSON format: [{ question: 'Who was elected president of Sesame Street?', choices: ['Big Bird', 'Donald Duck'], answer: 'Big Bird' }]", |
|
}, |
|
{ |
|
"role": "user", |
|
"content": f"Here's the article: \n{plaintxt}", |
|
}, |
|
], |
|
) |
|
reply = response.choices[0].message.content |
|
reply = reply[reply.index('[') : reply.rindex(']') + 1] |
|
qs = json.loads(reply) |
|
|
|
for q in qs: |
|
if q["answer"] not in q["choices"]: |
|
continue |
|
|
|
outputs.append({ |
|
"question_date": dt, |
|
"question_url": f"https://en.wikinews.org{article}", |
|
"question_sentence": q["question"], |
|
"links": articleLinks, |
|
"choices": q["choices"], |
|
"answer_text": q["answer"], |
|
"answer": [ q["choices"].index(q["answer"]) ], |
|
}) |
|
time.sleep(1) |
|
|
|
tstamp = datetime.now().strftime("%Y%m%d") |
|
with open(f"./{tstamp}_qa_public.jsonl", "w") as fi: |
|
for idx, op in enumerate(outputs): |
|
op["question_id"] = f"{tstamp}_{idx}" |
|
op["question_source"] = "WikiNews" |
|
fi.write(json.dumps(op) + "\n") |
|
|