raghuv-aditya commited on
Commit
4b5594c
·
verified ·
1 Parent(s): 3f9570a

Create scraper.py

Browse files
Files changed (1) hide show
  1. scraper.py +77 -0
scraper.py ADDED
@@ -0,0 +1,77 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import requests
2
+ from bs4 import BeautifulSoup
3
+ import re
4
+ import time
5
+
6
+ def get_course_links(page_url):
7
+ """
8
+ Scrapes the links of individual courses from the given page URL.
9
+
10
+ Args:
11
+ page_url (str): The URL of the page to scrape.
12
+
13
+ Returns:
14
+ list: A list of course URLs.
15
+ """
16
+ response = requests.get(page_url)
17
+ soup = BeautifulSoup(response.content, 'html.parser')
18
+ course_links = [a['href'] for a in soup.select('a.course-card.course-card__public.published') if 'href' in a.attrs]
19
+ course_links = [link if link.startswith('http') else f"https://courses.analyticsvidhya.com{link}" for link in course_links]
20
+ return course_links
21
+
22
+
23
+ def parse_course_page(url):
24
+ """
25
+ Extracts course details from a given course page URL.
26
+
27
+ Args:
28
+ url (str): The URL of the course page.
29
+
30
+ Returns:
31
+ dict: A dictionary containing course information.
32
+ """
33
+ response = requests.get(url)
34
+ soup = BeautifulSoup(response.content, 'html.parser')
35
+
36
+ title = soup.select_one('h1[class^="section__heading"]')
37
+ title = title.text.strip() if title else 'N/A'
38
+
39
+ description = soup.select_one('div.fr-view')
40
+ description = description.text.strip() if description else 'No description available'
41
+
42
+ curriculum = [re.sub(r'\s+', ' ', item.text.strip()) for item in soup.select('div.course-curriculum__chapter-content-wrapper')]
43
+ curriculum = ' | '.join(curriculum) if curriculum else 'No curriculum available'
44
+
45
+ return {
46
+ 'Title': title,
47
+ 'Description': description,
48
+ 'Curriculum': curriculum,
49
+ 'Link': url
50
+ }
51
+
52
+
53
+ def scrape_courses_json(base_url, num_pages=5):
54
+ """
55
+ Scrapes courses from multiple pages and returns course data.
56
+
57
+ Args:
58
+ base_url (str): Base URL of the course pages.
59
+ num_pages (int): Number of pages to scrape.
60
+
61
+ Returns:
62
+ list: A list of dictionaries, each containing course information.
63
+ """
64
+ data = []
65
+ for page_num in range(1, num_pages + 1):
66
+ page_url = f"{base_url}{page_num}"
67
+ course_links = get_course_links(page_url)
68
+
69
+ for link in course_links:
70
+ try:
71
+ course_data = parse_course_page(link)
72
+ data.append(course_data)
73
+ time.sleep(1) # Be courteous to the server
74
+ except Exception as e:
75
+ print(f"Failed to scrape {link}: {e}")
76
+
77
+ return data