raghuv-aditya's picture
Update scraper.py
11eb809 verified
import requests
from bs4 import BeautifulSoup
import re
import time
def get_course_links(page_url):
"""
Scrapes the links of individual courses from the given page URL.
Args:
page_url (str): The URL of the page to scrape.
Returns:
list: A list of course URLs.
"""
response = requests.get(page_url)
soup = BeautifulSoup(response.content, 'html.parser')
course_links = [a['href'] for a in soup.select('a.course-card.course-card__public.published') if 'href' in a.attrs]
course_links = [link if link.startswith('http') else f"https://courses.analyticsvidhya.com{link}" for link in course_links]
return course_links
def parse_course_page(url):
"""
Extracts course details from a given course page URL.
Args:
url (str): The URL of the course page.
Returns:
dict: A dictionary containing course information.
"""
response = requests.get(url)
soup = BeautifulSoup(response.content, 'html.parser')
title = soup.select_one('h1[class^="section__heading"]')
title = title.text.strip() if title else 'N/A'
description = soup.select_one('div.fr-view')
description = description.text.strip() if description else 'No description available'
curriculum = [re.sub(r'\s+', ' ', item.text.strip()) for item in soup.select('div.course-curriculum__chapter-content-wrapper')]
curriculum = ' | '.join(curriculum) if curriculum else 'No curriculum available'
return {
'Title': title,
'Description': description,
'Curriculum': curriculum,
'Link': url
}
def scrape_courses_json(base_url, num_pages=5):
"""
Scrapes courses from multiple pages and returns course data.
Args:
base_url (str): Base URL of the course pages.
num_pages (int): Number of pages to scrape.
Returns:
list: A list of dictionaries, each containing course information.
"""
data = []
for page_num in range(1, num_pages + 1):
page_url = f"{base_url}{page_num}"
print(f"Scraping page {page_num}: {page_url}")
course_links = get_course_links(page_url)
print(f"Found {len(course_links)} courses on page {page_num}")
for link in course_links:
try:
print(f"Scraping course: {link}")
course_data = parse_course_page(link)
data.append(course_data)
time.sleep(1) # Be courteous to the server
except Exception as e:
print(f"Failed to scrape {link}: {e}")
print("Scraping completed.")
return data