shamikbose89
commited on
Commit
•
33ce7c2
1
Parent(s):
4a48492
Upload hansard_speech.py
Browse files- hansard_speech.py +26 -15
hansard_speech.py
CHANGED
@@ -15,9 +15,10 @@
|
|
15 |
A dataset containing every speech in the House of Commons from May 1979-July 2020.
|
16 |
"""
|
17 |
|
|
|
18 |
import os
|
19 |
import pandas as pd
|
20 |
-
|
21 |
import datasets
|
22 |
|
23 |
_CITATION = """@misc{odell, evan_2021,
|
@@ -113,12 +114,6 @@ class HansardSpeech(datasets.GeneratorBasedBuilder):
|
|
113 |
)
|
114 |
|
115 |
def _split_generators(self, dl_manager):
|
116 |
-
# TODO: This method is tasked with downloading/extracting the data and defining the splits depending on the configuration
|
117 |
-
# If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name
|
118 |
-
|
119 |
-
# dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLS
|
120 |
-
# It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
|
121 |
-
# By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
|
122 |
temp_dir = dl_manager.download_and_extract(_URLS["csv"])
|
123 |
csv_file = os.path.join(temp_dir, "hansard-speeches-v310.csv")
|
124 |
json_file = dl_manager.download(_URLS["json"])
|
@@ -132,20 +127,36 @@ class HansardSpeech(datasets.GeneratorBasedBuilder):
|
|
132 |
|
133 |
def _generate_examples(self, filepaths, split):
|
134 |
logger.warn("\nThis is a large dataset. Please be patient")
|
135 |
-
json_data = pd.read_json(filepaths[1])
|
136 |
-
csv_data_chunks = pd.read_csv(filepaths[0], chunksize=50000)
|
137 |
for data_chunk in csv_data_chunks:
|
|
|
138 |
for _, row in data_chunk.iterrows():
|
139 |
data_point = {}
|
140 |
for field in fields[:-3]:
|
141 |
-
data_point[field] = row[field]
|
142 |
-
|
143 |
-
|
144 |
-
|
145 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
146 |
opp_post = []
|
147 |
gov_post = []
|
148 |
data_point["government_posts"] = gov_post
|
149 |
data_point["opposition_posts"] = opp_post
|
150 |
-
data_point["parliamentary_posts"] =
|
151 |
yield data_point["id"], data_point
|
|
|
|
15 |
A dataset containing every speech in the House of Commons from May 1979-July 2020.
|
16 |
"""
|
17 |
|
18 |
+
import json
|
19 |
import os
|
20 |
import pandas as pd
|
21 |
+
from datetime import datetime
|
22 |
import datasets
|
23 |
|
24 |
_CITATION = """@misc{odell, evan_2021,
|
|
|
114 |
)
|
115 |
|
116 |
def _split_generators(self, dl_manager):
|
|
|
|
|
|
|
|
|
|
|
|
|
117 |
temp_dir = dl_manager.download_and_extract(_URLS["csv"])
|
118 |
csv_file = os.path.join(temp_dir, "hansard-speeches-v310.csv")
|
119 |
json_file = dl_manager.download(_URLS["json"])
|
|
|
127 |
|
128 |
def _generate_examples(self, filepaths, split):
|
129 |
logger.warn("\nThis is a large dataset. Please be patient")
|
130 |
+
json_data = pd.read_json(filepaths[1], dtype="object")
|
131 |
+
csv_data_chunks = pd.read_csv(filepaths[0], chunksize=50000, dtype="object")
|
132 |
for data_chunk in csv_data_chunks:
|
133 |
+
data_chunk.fillna("", inplace=True)
|
134 |
for _, row in data_chunk.iterrows():
|
135 |
data_point = {}
|
136 |
for field in fields[:-3]:
|
137 |
+
data_point[field] = row[field] if row[field] else ""
|
138 |
+
parl_post_list = []
|
139 |
+
if data_point["mnis_id"] and data_point["date"]:
|
140 |
+
if data_point["time"]:
|
141 |
+
speech_dt = (
|
142 |
+
data_point["date"] + " " + data_point["time"] + ":00"
|
143 |
+
)
|
144 |
+
else:
|
145 |
+
speech_dt = data_point["date"] + " 00:00:00"
|
146 |
+
speech_dt_obj = datetime.strptime(speech_dt, "%Y-%m-%d %H:%M:%S")
|
147 |
+
parl_posts = json_data[
|
148 |
+
(json_data["mnis_id"] == data_point["mnis_id"])
|
149 |
+
& (json_data["date"] == speech_dt_obj)
|
150 |
+
]["parliamentary_posts"]
|
151 |
+
if len(parl_posts) > 0:
|
152 |
+
parl_posts = parl_posts.iloc[0]
|
153 |
+
for item in parl_posts:
|
154 |
+
parl_post_list.append(item["parl_post_name"])
|
155 |
+
|
156 |
opp_post = []
|
157 |
gov_post = []
|
158 |
data_point["government_posts"] = gov_post
|
159 |
data_point["opposition_posts"] = opp_post
|
160 |
+
data_point["parliamentary_posts"] = parl_post_list
|
161 |
yield data_point["id"], data_point
|
162 |
+
break
|