Datasets:
Sub-tasks:
multi-class-classification
Languages:
English
Size:
1K<n<10K
Tags:
natural-language-understanding
ideology classification
text classification
natural language processing
License:
"""This script is used to get many posts from the desired subreddit(s)""" | |
import requests | |
import time | |
import random | |
import copy | |
import pandas as pd | |
"""This script is used to get many posts from the desired subreddit(s)""" | |
subreddit_list = [ | |
"theredpillrebooted", | |
"RedPillWomen", | |
"Feminism", | |
"marriedredpill", | |
"TheBluePill", | |
"PurplePillDebate", | |
"RedPillWives", | |
"askMRP", | |
"ForeverAloneWomen", | |
] | |
url_template = "https://www.reddit.com/r/{}/.json?t=all{}" | |
headers = {"User-Agent": "Testing Bot Gundam Wing"} | |
params = "" | |
str_log = [] | |
original_counter = 10000 | |
counter = original_counter | |
post_list = [] | |
for subreddit in subreddit_list: | |
while counter > 0: | |
print(f"Getting posts with params: {params}") | |
print("\n\n\n\n") | |
url = url_template.format(subreddit, params) | |
response = requests.get(url, headers=headers) | |
if response.ok: | |
data = response.json() | |
posts = data["data"]["children"] | |
print(f"Got {len(posts)} posts") | |
for post in posts: | |
pdata = post["data"] | |
post_id = pdata["id"] | |
title = pdata["title"] | |
text = pdata.get("selftext") | |
score = pdata["score"] | |
author = pdata["author"] | |
date = pdata["created_utc"] | |
url = pdata.get("url_overridden_by_dest") | |
print(f"{post_id}: {title} - {url}") | |
# prints for debugging | |
# print("Keys are ", pdata.keys()) | |
# post_list.append( | |
# { | |
# "id": post_id, | |
# "title": title, | |
# "text": text, | |
# "url": url, | |
# "score": score, | |
# "author": author, | |
# "date": date, | |
# "pdata": pdata, | |
# } | |
# ) | |
post_list.append( | |
[subreddit, post_id, title, text, url, score, author, date, pdata] | |
) | |
print(f"Got {len(posts)} posts") | |
try: | |
params = "&after=" + data["data"]["after"] | |
except: | |
print( | |
"No more posts, broke on ", subreddit, "with counter at ", counter | |
) | |
# write this log to a txt file | |
str_log.append( | |
"No more posts, broke on " | |
+ subreddit | |
+ "with counter at " | |
+ str(counter) | |
) | |
break | |
counter -= 1 | |
time.sleep(random.randint(1, 45)) | |
else: | |
print(f"Error: {response.status_code}") | |
counter = original_counter | |
params = "" | |
# make a copy of the list | |
post_list_copy = copy.deepcopy(post_list) | |
# save the list to a csv file, as a backup | |
# to avoid running the script again | |
df = pd.DataFrame(post_list_copy) | |
df.columns = [ | |
"subreddit", | |
"id", | |
"title", | |
"text", | |
"url", | |
"score", | |
"author", | |
"date", | |
"pdata", | |
] | |
df.to_csv("reddit_posts.csv", index=False) | |
# Add useful features to the dataframe | |
def pull_info_from_reddit_dict( | |
dict, | |
fields=[ | |
"subreddit_subscribers", | |
"num_comments", | |
"ups", | |
"downs", | |
"upvote_ratio", | |
"is_video", | |
], | |
): | |
"""This function takes a dictionary from the Reddit API and returns a list of the values for the fields specified""" | |
return [dict.get(field, "Not Found") for field in fields] | |
# Create a new lists of posts with the additional fields | |
processed_posts = [] | |
for post in post_list_copy: | |
fields_to_add = pull_info_from_reddit_dict(post[8]) | |
temp_post = post[:-1] + fields_to_add | |
processed_posts.append(temp_post) | |
# save the final csv | |
df = pd.DataFrame( | |
processed_posts, | |
columns=[ | |
"subreddit", | |
"id", | |
"title", | |
"text", | |
"url", | |
"score", | |
# "author", author is not useful for the analysis | |
"date", | |
"subreddit_subscribers", | |
"num_comments", | |
"ups", | |
"downs", | |
"upvote_ratio", | |
"is_video", | |
], | |
) | |
df.to_csv("reddit_posts_fm.csv", index=False) | |