|
''' |
|
This code outlines the data pre-processing work done (text: cleaning/ tokenizing and images: extracting text from the image using pytesseract). |
|
The dataset in the repo is the processed one. |
|
''' |
|
|
|
! pip install datasets q |
|
|
|
from datasets import load_dataset |
|
|
|
dataset = load_dataset("mo-mittal/reddit_political_subs", trust_remote_code=True) |
|
|
|
|
|
!sudo apt install tesseract-ocr |
|
|
|
|
|
!pip install pytesseract |
|
|
|
import pytesseract |
|
from concurrent.futures import ThreadPoolExecutor |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def ocr_image(index): |
|
try: |
|
|
|
image_pil = dataset['train'][index]['image'] |
|
url = dataset['train'][index]['url'] |
|
|
|
|
|
if (dataset['train'][index]['image_text'] == '') and ('imgur' in url): |
|
text = pytesseract.image_to_string(image_pil) |
|
print(f'Sublime! Processed img at {index}') |
|
return text |
|
except Exception as e: |
|
return None |
|
|
|
|
|
num_workers = 8 |
|
imgur_text = [] |
|
|
|
with ThreadPoolExecutor(max_workers=num_workers) as executor: |
|
|
|
results = list(executor.map(ocr_image, range(len(dataset['train'])))) |
|
|
|
|
|
|
|
imgur_text.extend(filter(None, results)) |
|
|
|
|
|
|
|
|
|
|
|
import pandas as pd |
|
|
|
df = pd.DataFrame(dataset['train'].remove_columns(['image'])) |
|
|
|
|
|
results_list = list(results) |
|
|
|
for i, text in enumerate(results_list): |
|
if text is not None: |
|
df.loc[i, 'image_text'] = text |
|
|
|
|
|
import pandas as pd |
|
|
|
df = pd.read_csv('reddit_political_subs.csv') |
|
|
|
import nltk |
|
nltk.download('stopwords') |
|
|
|
import re |
|
from nltk.corpus import stopwords |
|
import string |
|
|
|
def clean_text(text): |
|
text = re.sub(r'\\n', ' ', text) |
|
text = re.sub(r'\\x..', '', text) |
|
text = re.sub(r'[@|\\]', '', text) |
|
|
|
text = text.translate(str.maketrans('', '', string.punctuation)) |
|
stop_words = set(stopwords.words('english')) |
|
text = ' '.join([word for word in text.split() if word not in stop_words]) |
|
|
|
text = ' '.join(text.split()) |
|
text = text.lower() |
|
|
|
return text |
|
|
|
df['image_text'] = df['image_text'].apply(clean_text) |
|
|