0xhaz commited on
Commit
f9e169a
·
1 Parent(s): 58340d4

Upload 25 files

Browse files
.gitignore ADDED
@@ -0,0 +1 @@
 
 
1
+ venv
README.md ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # article scraper
2
+
3
+ ### Setup
4
+ 1. Clone the repository.
5
+ ```sh
6
+ # https
7
+ git clone https://github.com/avila-bugayong-esguerra/article-scraper.git
8
+
9
+ # or
10
+
11
+ # ssh
12
+ git clone git@github.com:avila-bugayong-esguerra/article-scraper.git
13
+ ```
14
+
15
+ 2. Change directory into project folder.
16
+ ```sh
17
+ cd article_scraper
18
+ ```
19
+
20
+ 3. Create a virtual environment.
21
+ ```sh
22
+ python -m venv venv
23
+ ```
24
+
25
+ 4. Activate the virtual environment.
26
+ ```sh
27
+ # windows
28
+ \venv\Scripts\activate
29
+
30
+ # unix
31
+ source venv/bin/activate
32
+ ```
33
+
34
+ 5. Install the dependencies.
35
+ ```sh
36
+ pip install -r article_scraper/requirements.txt
37
+ ```
38
+
39
+ 6. Change directory into the Scrapy project.
40
+ ```sh
41
+ cd article_scraper
42
+ ```
article_scraper/__init__.py ADDED
File without changes
article_scraper/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (158 Bytes). View file
 
article_scraper/__pycache__/settings.cpython-310.pyc ADDED
Binary file (413 Bytes). View file
 
article_scraper/items.py ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Define here the models for your scraped items
2
+ #
3
+ # See documentation in:
4
+ # https://docs.scrapy.org/en/latest/topics/items.html
5
+
6
+ import scrapy
7
+
8
+
9
+ class KamiScraperItem(scrapy.Item):
10
+ # define the fields for your item here like:
11
+ # name = scrapy.Field()
12
+ pass
article_scraper/main.py ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ import argparse
3
+ import os
4
+ from pathlib import Path
5
+
6
+
7
+ if __name__ == "__main__":
8
+ parser = argparse.ArgumentParser(description="article scraper")
9
+ parser.add_argument("--dataset", "-d",
10
+ help="the dataset to be made after articles are scraped (choices: 3000, 5000)",
11
+ default="3000")
12
+ args = parser.parse_args()
13
+
14
+ path = os.getcwd()
15
+
16
+ if args.dataset == "3000":
17
+ path += "/3000.html"
18
+ if not Path(path).is_file():
19
+ os.system("wget https://raw.githubusercontent.com/jamesesguerra/misc/main/3000.html")
20
+ os.system("sed -i '' 's/5000/3000/' spiders/kami_spider.py")
21
+ elif args.dataset == "5000":
22
+ path += "/5000.html"
23
+ if not Path(path).is_file():
24
+ os.system("wget https://raw.githubusercontent.com/jamesesguerra/misc/main/5000.html")
25
+ os.system("sed -i '' 's/3000/5000/' spiders/kami_spider.py")
26
+ else:
27
+ print(f"Dataset {args.dataset} isn't a valid option.")
28
+
29
+ os.system("scrapy crawl kami -O ../csv_files/kami.csv")
30
+
article_scraper/middlewares.py ADDED
@@ -0,0 +1,103 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Define here the models for your spider middleware
2
+ #
3
+ # See documentation in:
4
+ # https://docs.scrapy.org/en/latest/topics/spider-middleware.html
5
+
6
+ from scrapy import signals
7
+
8
+ # useful for handling different item types with a single interface
9
+ from itemadapter import is_item, ItemAdapter
10
+
11
+
12
+ class KamiScraperSpiderMiddleware:
13
+ # Not all methods need to be defined. If a method is not defined,
14
+ # scrapy acts as if the spider middleware does not modify the
15
+ # passed objects.
16
+
17
+ @classmethod
18
+ def from_crawler(cls, crawler):
19
+ # This method is used by Scrapy to create your spiders.
20
+ s = cls()
21
+ crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
22
+ return s
23
+
24
+ def process_spider_input(self, response, spider):
25
+ # Called for each response that goes through the spider
26
+ # middleware and into the spider.
27
+
28
+ # Should return None or raise an exception.
29
+ return None
30
+
31
+ def process_spider_output(self, response, result, spider):
32
+ # Called with the results returned from the Spider, after
33
+ # it has processed the response.
34
+
35
+ # Must return an iterable of Request, or item objects.
36
+ for i in result:
37
+ yield i
38
+
39
+ def process_spider_exception(self, response, exception, spider):
40
+ # Called when a spider or process_spider_input() method
41
+ # (from other spider middleware) raises an exception.
42
+
43
+ # Should return either None or an iterable of Request or item objects.
44
+ pass
45
+
46
+ def process_start_requests(self, start_requests, spider):
47
+ # Called with the start requests of the spider, and works
48
+ # similarly to the process_spider_output() method, except
49
+ # that it doesn’t have a response associated.
50
+
51
+ # Must return only requests (not items).
52
+ for r in start_requests:
53
+ yield r
54
+
55
+ def spider_opened(self, spider):
56
+ spider.logger.info('Spider opened: %s' % spider.name)
57
+
58
+
59
+ class KamiScraperDownloaderMiddleware:
60
+ # Not all methods need to be defined. If a method is not defined,
61
+ # scrapy acts as if the downloader middleware does not modify the
62
+ # passed objects.
63
+
64
+ @classmethod
65
+ def from_crawler(cls, crawler):
66
+ # This method is used by Scrapy to create your spiders.
67
+ s = cls()
68
+ crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
69
+ return s
70
+
71
+ def process_request(self, request, spider):
72
+ # Called for each request that goes through the downloader
73
+ # middleware.
74
+
75
+ # Must either:
76
+ # - return None: continue processing this request
77
+ # - or return a Response object
78
+ # - or return a Request object
79
+ # - or raise IgnoreRequest: process_exception() methods of
80
+ # installed downloader middleware will be called
81
+ return None
82
+
83
+ def process_response(self, request, response, spider):
84
+ # Called with the response returned from the downloader.
85
+
86
+ # Must either;
87
+ # - return a Response object
88
+ # - return a Request object
89
+ # - or raise IgnoreRequest
90
+ return response
91
+
92
+ def process_exception(self, request, exception, spider):
93
+ # Called when a download handler or a process_request()
94
+ # (from other downloader middleware) raises an exception.
95
+
96
+ # Must either:
97
+ # - return None: continue processing this exception
98
+ # - return a Response object: stops process_exception() chain
99
+ # - return a Request object: stops process_exception() chain
100
+ pass
101
+
102
+ def spider_opened(self, spider):
103
+ spider.logger.info('Spider opened: %s' % spider.name)
article_scraper/pipelines.py ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Define your item pipelines here
2
+ #
3
+ # Don't forget to add your pipeline to the ITEM_PIPELINES setting
4
+ # See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
5
+
6
+
7
+ # useful for handling different item types with a single interface
8
+ from itemadapter import ItemAdapter
9
+
10
+
11
+ class KamiScraperPipeline:
12
+ def process_item(self, item, spider):
13
+ return item
article_scraper/settings.py ADDED
@@ -0,0 +1,92 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Scrapy settings for kami_scraper project
2
+ #
3
+ # For simplicity, this file contains only settings considered important or
4
+ # commonly used. You can find more settings consulting the documentation:
5
+ #
6
+ # https://docs.scrapy.org/en/latest/topics/settings.html
7
+ # https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
8
+ # https://docs.scrapy.org/en/latest/topics/spider-middleware.html
9
+
10
+ BOT_NAME = 'article_scraper'
11
+
12
+ SPIDER_MODULES = ['article_scraper.spiders']
13
+ NEWSPIDER_MODULE = 'article_scraper.spiders'
14
+
15
+
16
+ # Crawl responsibly by identifying yourself (and your website) on the user-agent
17
+ #USER_AGENT = 'kami_scraper (+http://www.yourdomain.com)'
18
+
19
+ # Obey robots.txt rules
20
+ ROBOTSTXT_OBEY = False
21
+
22
+ # Configure maximum concurrent requests performed by Scrapy (default: 16)
23
+ #CONCURRENT_REQUESTS = 32
24
+
25
+ # Configure a delay for requests for the same website (default: 0)
26
+ # See https://docs.scrapy.org/en/latest/topics/settings.html#download-delay
27
+ # See also autothrottle settings and docs
28
+ #DOWNLOAD_DELAY = 3
29
+ # The download delay setting will honor only one of:
30
+ #CONCURRENT_REQUESTS_PER_DOMAIN = 16
31
+ #CONCURRENT_REQUESTS_PER_IP = 16
32
+
33
+ # Disable cookies (enabled by default)
34
+ #COOKIES_ENABLED = False
35
+
36
+ # Disable Telnet Console (enabled by default)
37
+ #TELNETCONSOLE_ENABLED = False
38
+
39
+ # Override the default request headers:
40
+ #DEFAULT_REQUEST_HEADERS = {
41
+ # 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
42
+ # 'Accept-Language': 'en',
43
+ #}
44
+
45
+ # Enable or disable spider middlewares
46
+ # See https://docs.scrapy.org/en/latest/topics/spider-middleware.html
47
+ #SPIDER_MIDDLEWARES = {
48
+ # 'kami_scraper.middlewares.KamiScraperSpiderMiddleware': 543,
49
+ #}
50
+
51
+ # Enable or disable downloader middlewares
52
+ # See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
53
+ #DOWNLOADER_MIDDLEWARES = {
54
+ # 'kami_scraper.middlewares.KamiScraperDownloaderMiddleware': 543,
55
+ #}
56
+
57
+ # Enable or disable extensions
58
+ # See https://docs.scrapy.org/en/latest/topics/extensions.html
59
+ #EXTENSIONS = {
60
+ # 'scrapy.extensions.telnet.TelnetConsole': None,
61
+ #}
62
+
63
+ # Configure item pipelines
64
+ # See https://docs.scrapy.org/en/latest/topics/item-pipeline.html
65
+ #ITEM_PIPELINES = {
66
+ # 'kami_scraper.pipelines.KamiScraperPipeline': 300,
67
+ #}
68
+
69
+ # Enable and configure the AutoThrottle extension (disabled by default)
70
+ # See https://docs.scrapy.org/en/latest/topics/autothrottle.html
71
+ #AUTOTHROTTLE_ENABLED = True
72
+ # The initial download delay
73
+ #AUTOTHROTTLE_START_DELAY = 5
74
+ # The maximum download delay to be set in case of high latencies
75
+ #AUTOTHROTTLE_MAX_DELAY = 60
76
+ # The average number of requests Scrapy should be sending in parallel to
77
+ # each remote server
78
+ #AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
79
+ # Enable showing throttling stats for every response received:
80
+ #AUTOTHROTTLE_DEBUG = False
81
+
82
+ # Enable and configure HTTP caching (disabled by default)
83
+ # See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
84
+ #HTTPCACHE_ENABLED = True
85
+ #HTTPCACHE_EXPIRATION_SECS = 0
86
+ #HTTPCACHE_DIR = 'httpcache'
87
+ #HTTPCACHE_IGNORE_HTTP_CODES = []
88
+ #HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
89
+
90
+ # Set settings whose default value is deprecated to a future-proof value
91
+ REQUEST_FINGERPRINTER_IMPLEMENTATION = '2.7'
92
+ TWISTED_REACTOR = 'twisted.internet.asyncioreactor.AsyncioSelectorReactor'
article_scraper/spiders/__init__.py ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # This package will contain the spiders of your Scrapy project
2
+ #
3
+ # Please refer to the documentation for information on how to create and manage
4
+ # your spiders.
article_scraper/spiders/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (166 Bytes). View file
 
article_scraper/spiders/__pycache__/kami_spider.cpython-310.pyc ADDED
Binary file (1.96 kB). View file
 
article_scraper/spiders/kami_spider.py ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import scrapy
3
+ from langdetect import detect
4
+
5
+ from ..utils.dash_remover import clean
6
+ from ..utils.noise_word_remover import remove
7
+ from ..utils.highlights_remover import remove_highlights
8
+
9
+
10
+ class KamiSpider(scrapy.Spider):
11
+ name = 'kami'
12
+ print(os.getcwd())
13
+ start_urls = [f'file://{os.getcwd()}/3000.html']
14
+
15
+
16
+ def __init__(self, **kwargs):
17
+ super().__init__(**kwargs)
18
+
19
+
20
+ def parse(self, response):
21
+ articles = response.css('div.c-article-card-horizontal__container a::attr(href)').getall()
22
+
23
+ for article in articles:
24
+ yield response.follow(article, callback=self.parse_articles)
25
+
26
+
27
+ def parse_articles(self, response):
28
+ # for article text
29
+ bold_text = remove(response.css("strong::text").getall())
30
+ article_text = remove(response.css("div.post__content p *::text").getall())
31
+
32
+ cleaned_article_text = remove_highlights(bold_text, article_text)
33
+ article_text = ' '.join(cleaned_article_text).strip()
34
+
35
+ # for summary
36
+ highlights = list(filter(lambda x: x.startswith("-"), response.css('strong::text').getall()))
37
+ cleaned_highlights = list(map(clean, highlights))
38
+ summary = '.'.join(cleaned_highlights).strip()
39
+
40
+
41
+ if detect(summary) != "en":
42
+ yield {
43
+ 'title': response.css("h1.c-main-headline::text").get(),
44
+ 'article_text': article_text,
45
+ 'summary': summary,
46
+ 'article_date': response.css("time::text").get(),
47
+ 'source': response.request.url
48
+ }
article_scraper/utils/__pycache__/dash_remover.cpython-310.pyc ADDED
Binary file (313 Bytes). View file
 
article_scraper/utils/__pycache__/highlights_remover.cpython-310.pyc ADDED
Binary file (459 Bytes). View file
 
article_scraper/utils/__pycache__/noise_word_remover.cpython-310.pyc ADDED
Binary file (1.38 kB). View file
 
article_scraper/utils/__pycache__/normalizer.cpython-310.pyc ADDED
Binary file (349 Bytes). View file
 
article_scraper/utils/__pycache__/placeRemover.cpython-310.pyc ADDED
Binary file (368 Bytes). View file
 
article_scraper/utils/dash_remover.py ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ def clean(sentence):
2
+ if sentence.startswith("-"):
3
+ return sentence[1::]
4
+
5
+ return sentence
article_scraper/utils/highlights_remover.py ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ def remove_highlights(highlights, article_text):
2
+ return list(filter(lambda x: x not in highlights, article_text))
article_scraper/utils/noise_word_remover.py ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ def remove(article_text):
2
+ noise_words = [
3
+ "KAMI",
4
+ "PAY ATTENTION:", "PAY ATTENTION",
5
+ ": Follow us on", "Instagram",
6
+ "- get the most important news directly in your favourite app!",
7
+ "Source: KAMI.com.gh",
8
+ "Like and share our Facebook posts to support the KAMI team! Share your thoughts in the comments. We love reading them!",
9
+ "Like and share our",
10
+ "Facebook posts",
11
+ "to support the KAMI team! Share your thoughts in the comments. We love reading them!",
12
+ "Click", '"See First" under the "Following" tab to see KAMI news on your News Feed',
13
+ 'Click "See First" under the "Following" tab to see KAMI news on your News Feed',
14
+ 'Click "See first" under the "Following" tab to see KAMI news on your Newsfeed',
15
+ 'Click "See First" under the "Following" tab to see KAMI news on your News Feed!',
16
+ 'Click "See First" under the "Following" tab to see',
17
+ "Tingnan ang mga balitang para sa'yo ➡️ hanapin ang \"Recommended for you\" block at mag-enjoy!",
18
+ "Update KAMI App now: the old version will be disabled on June, 15!"
19
+ ]
20
+
21
+ return list(filter(lambda x: x.strip() not in noise_words, article_text))
article_scraper/utils/normalizer.py ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ from unicodedata import normalize
2
+
3
+
4
+ def normalize_txt(txt):
5
+ return normalize('NFKD', txt)
article_scraper/utils/placeRemover.py ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ def clean(txt):
2
+ try:
3
+ idx = txt.index('—')
4
+ txt = txt[idx+1::].strip()
5
+ except:
6
+ pass
7
+ return txt
scrapy.cfg ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Automatically created by: scrapy startproject
2
+ #
3
+ # For more information about the [deploy] section see:
4
+ # https://scrapyd.readthedocs.io/en/latest/deploy.html
5
+
6
+ [settings]
7
+ default = article_scraper.settings
8
+
9
+ [deploy]
10
+ #url = http://localhost:6800/
11
+ project = article_scraper