File size: 3,776 Bytes
e2a3756
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
# pip install openai lxml cssselector requests xmltodict

from datetime import date, datetime, timedelta
import json
import lxml.html
from lxml.cssselect import CSSSelector
import time

from openai import OpenAI
import requests
import xmltodict

tabooWords = ['deletion', 'rape', 'rapist', 'abuse', 'minor']

# load from WikiNews English recent days (not yet published)
recentArticles = []
dateForArticle = {}
today = date.today()
for days in range(5, 12):
  tdate = (today - timedelta(days=days))
  tstamp = tdate.strftime("%B_%d,_%Y")
  r = requests.get(f"https://en.wikinews.org/wiki/Category:{tstamp}")
  contents = lxml.html.fromstring(r.content)
  selAnchor = CSSSelector('a')
  for linkEl in selAnchor(contents):
    link = str(linkEl.get('href'))
    if link[:6] == '/wiki/' and '/Special:' not in link and '/Category:' not in link and 'Main_Page' not in link and 'Help:' not in link and 'Wikinews:' not in link and 'File:' not in link:
      recentArticles.append(link)
      dateForArticle[link] = tdate.strftime("%Y/%m/%d")
  time.sleep(1)

client = OpenAI()

outputs = []
for article in recentArticles:
  print(article)
  r = requests.get(f"https://en.wikinews.org{article}")
  contents = lxml.html.fromstring(r.content)

  selMain = CSSSelector('.mw-body-content p')
  plaintxt = ""
  for para in selMain(contents):
    c = para.text_content()
    if 'pre-publication review' in c or 'last amended' in c:
      continue
    plaintxt += c + "\n"
  if 'Have an opinion' in plaintxt:
    plaintxt = plaintxt[:plaintxt.index('Have an opinion')]

  plaintxt = plaintxt.strip()

  block = False
  for taboo in tabooWords:
    if taboo in plaintxt.lower():
      block = True
  if block:
    print("Article marked for deletion or about subject sensitive for AI summarization")
    continue

  dt = dateForArticle[article]

  selAnchor = CSSSelector('a[rel="nofollow"]')
  foundElements = selAnchor(contents)
  articleLinks = []
  for el in foundElements:
    link = el.get('href')

    linkblocks = ['/wiki/', '.com/intent/tweet', 'creativecommons.org/licenses', 'facebook.com/sharer.php', 'mailto:', 'reddit.com/submit', 'linkedin.com/shareArticle']
    block = False
    for blocker in linkblocks:
      if blocker in link.lower():
        block = True
    if block:
      continue

    articleLinks.append(link)

  qs = []
  response = client.chat.completions.create(
    model="gpt-4o",
    messages=[
      {
        "role": "system",
        "content": "You will be provided with an article from today's news. Provide 3-5 multiple choice questions based on the content of the article, especially newly-introduced facts or knowledge. Don't make the correct answer any more specific, numeric, or realistic compared to the others.\n Respond in JSON format: [{ question: 'Who was elected president of Sesame Street?', choices: ['Big Bird', 'Donald Duck'], answer: 'Big Bird' }]",
      },
      {
        "role": "user",
        "content": f"Here's the article: \n{plaintxt}",
      },
    ],
  )
  reply = response.choices[0].message.content
  reply = reply[reply.index('[') : reply.rindex(']') + 1]
  qs = json.loads(reply)

  for q in qs:
    if q["answer"] not in q["choices"]:
      continue

    outputs.append({
      "question_date": dt,
      "question_url": f"https://en.wikinews.org{article}",
      "question_sentence": q["question"],
      "links": articleLinks,
      "choices": q["choices"],
      "answer_text": q["answer"],
      "answer": [ q["choices"].index(q["answer"]) ],
    })
  time.sleep(1)

tstamp = datetime.now().strftime("%Y%m%d")
with open(f"./{tstamp}_qa_public.jsonl", "w") as fi:
  for idx, op in enumerate(outputs):
    op["question_id"] = f"{tstamp}_{idx}"
    op["question_source"] = "WikiNews"
    fi.write(json.dumps(op) + "\n")