minecraft_builds / get_pages.py
mattismegevand's picture
init commit
a906372
#!/usr/bin/env python3
import bs4
import requests
import time
from tqdm import tqdm
from concurrent.futures import ThreadPoolExecutor, as_completed
NUM_PAGES = 422
BASE_URL = 'https://www.grabcraft.com'
MAX_RETRIES = 5
RETRY_DELAY = 2 # seconds
def fetch_page(i):
for attempt in range(MAX_RETRIES):
try:
r = requests.get(f'{BASE_URL}/minecraft/pg/{i}')
if r.status_code == 200:
soup = bs4.BeautifulSoup(r.text, 'html.parser')
return [BASE_URL + '/'.join(a['href'].split('/')[:-1]) for a in soup.find_all('a', attrs={'class': 'button more-info details'})]
else:
print(f'Error: {r.status_code} on page {i}')
except requests.RequestException as e:
print(f'Exception: {e} on page {i}')
time.sleep(RETRY_DELAY)
with open('errors.txt', 'a') as error_file:
error_file.write(f'Failed to fetch page {i} after {MAX_RETRIES} attempts.\n')
return []
if __name__ == '__main__':
pages = []
with ThreadPoolExecutor() as executor:
futures = {executor.submit(fetch_page, i): i for i in range(NUM_PAGES + 1)}
for future in tqdm(as_completed(futures), total=len(futures)):
pages += future.result()
with open('pages.txt', 'w') as f:
f.write('\n'.join(set(pages)))