Compare commits
35 Commits
0726bcccb0
...
master
| Author | SHA1 | Date | |
|---|---|---|---|
| 8698c21fda | |||
| 273cf56a3b | |||
| 1af26f50f2 | |||
| c40c5cea50 | |||
| a6224f9b6a | |||
| b64711973f | |||
| 9e125dfae0 | |||
| f16f82fdfb | |||
| a523154848 | |||
| 9e754a5584 | |||
| 1b005570ee | |||
| 17fa9f93f9 | |||
| 1e51e10db2 | |||
| 225fd8b3ea | |||
| d686ae0bc4 | |||
| 69f5788745 | |||
| b5d644a223 | |||
| 6508156aa4 | |||
| 738ab8e441 | |||
| fdd84a8786 | |||
| ab0ab0a010 | |||
| 6a1259aa7d | |||
| 164239b343 | |||
| ce1f2745c9 | |||
| e70bdc9ca1 | |||
| d1c1e17f4f | |||
| 816a727d79 | |||
| 84ab27a75e | |||
| 6d9103c154 | |||
| e57a86c60a | |||
| a3ec9451e3 | |||
| f2c294ebdb | |||
| 1b9b207a28 | |||
| 6abe7d68e0 | |||
| 7d919039b6 |
1
.gitignore
vendored
1
.gitignore
vendored
@@ -2,3 +2,4 @@
|
|||||||
venv/
|
venv/
|
||||||
.vscode/*
|
.vscode/*
|
||||||
__pycache__/
|
__pycache__/
|
||||||
|
sitemap.html
|
||||||
|
|||||||
20
README.md
20
README.md
@@ -1 +1,21 @@
|
|||||||
# Concurrent web scraper
|
# Concurrent web scraper
|
||||||
|
|
||||||
|
## Requirements
|
||||||
|
|
||||||
|
This crawler was written in 3.7.0 to take advantage of the latest `asyncio` features.
|
||||||
|
|
||||||
|
Install required modules:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
pip install -r requirements.txt
|
||||||
|
```
|
||||||
|
|
||||||
|
Run:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
python crawler.py -u https://urltocrawl.com
|
||||||
|
```
|
||||||
|
|
||||||
|
## Results
|
||||||
|
|
||||||
|
The resulting sitemap will be output in the root of this directory as `sitemap.html`
|
||||||
|
|||||||
78
crawler.py
78
crawler.py
@@ -4,15 +4,19 @@ Need a docstring.
|
|||||||
'''
|
'''
|
||||||
|
|
||||||
import argparse
|
import argparse
|
||||||
from utils.helpers import (UrlPool, WebPage, sanitise_url)
|
import jinja2
|
||||||
from pprint import pprint
|
import os
|
||||||
|
import asyncio
|
||||||
|
from datetime import datetime
|
||||||
|
from utils.helpers import (UrlPool, WebPage, RobotsTxt, sanitise_url)
|
||||||
|
|
||||||
def init_crawler(base_url=None):
|
|
||||||
|
def init_crawler(base_url=None, robots=None):
|
||||||
'''
|
'''
|
||||||
needs a docstring
|
Initialises the crawler by running the initial URL.
|
||||||
'''
|
'''
|
||||||
uncrawled_urls, crawled_urls = UrlPool(), UrlPool()
|
uncrawled_urls, crawled_urls = UrlPool(), UrlPool()
|
||||||
initial_page = WebPage(url=base_url, base_url=base_url)
|
initial_page = WebPage(url=base_url, base_url=base_url, robots=robots)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
initial_page.run()
|
initial_page.run()
|
||||||
@@ -20,11 +24,19 @@ def init_crawler(base_url=None):
|
|||||||
print(e)
|
print(e)
|
||||||
|
|
||||||
initial_urls = initial_page.list_urls()
|
initial_urls = initial_page.list_urls()
|
||||||
|
|
||||||
# ensure the base URL isn't crawled again
|
# ensure the base URL isn't crawled again
|
||||||
try:
|
try:
|
||||||
initial_urls.remove(base_url)
|
initial_urls.remove(base_url)
|
||||||
except KeyError:
|
except KeyError:
|
||||||
pass
|
pass
|
||||||
|
# also ensure base URL wasn't discovered with a trailing slash on the
|
||||||
|
# initial page scrape
|
||||||
|
try:
|
||||||
|
initial_urls.remove("".join([base_url, '/']))
|
||||||
|
except KeyError:
|
||||||
|
pass
|
||||||
|
|
||||||
# Add the base URL to the crawled pool
|
# Add the base URL to the crawled pool
|
||||||
crawled_urls.add_to_pool(base_url)
|
crawled_urls.add_to_pool(base_url)
|
||||||
|
|
||||||
@@ -36,39 +48,67 @@ def init_crawler(base_url=None):
|
|||||||
return(uncrawled_urls, crawled_urls)
|
return(uncrawled_urls, crawled_urls)
|
||||||
|
|
||||||
|
|
||||||
def process_pool(base_url=None, uncrawled_urls=None, crawled_urls=None):
|
def process_pool(base_url=None, uncrawled_urls=None, crawled_urls=None, robots=None):
|
||||||
'''
|
'''
|
||||||
Needs a docstring
|
Iterates over the pool of URLs and adds any discovered URLs.
|
||||||
'''
|
'''
|
||||||
while uncrawled_urls.pool:
|
while uncrawled_urls.pool:
|
||||||
# pop url from pool
|
# pop url from pool
|
||||||
new_url = uncrawled_urls.remove_from_pool()
|
new_url = uncrawled_urls.remove_from_pool()
|
||||||
# create a WebPage object for the URL
|
# create a WebPage object for the URL
|
||||||
current_page = WebPage(url=new_url, base_url=base_url)
|
current_page = WebPage(url=new_url, base_url=base_url, robots=robots)
|
||||||
try:
|
try:
|
||||||
current_page.run()
|
succeeded = current_page.run()
|
||||||
_urls = current_page.list_urls()
|
|
||||||
crawled_urls.add_to_pool(new_url)
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
print(e)
|
print(e)
|
||||||
|
|
||||||
for url in _urls:
|
if succeeded:
|
||||||
sanitised_url = sanitise_url(url=url)
|
_urls = current_page.list_urls()
|
||||||
if sanitised_url not in crawled_urls.pool:
|
crawled_urls.add_to_pool(new_url)
|
||||||
uncrawled_urls.add_to_pool(url)
|
|
||||||
|
for url in _urls:
|
||||||
|
sanitised_url = sanitise_url(url=url)
|
||||||
|
if sanitised_url not in crawled_urls.pool:
|
||||||
|
uncrawled_urls.add_to_pool(url)
|
||||||
|
|
||||||
|
print('{0} URLs crawled, {1} remaining'.format(len(crawled_urls.pool),
|
||||||
|
len(uncrawled_urls.pool)))
|
||||||
|
|
||||||
|
|
||||||
|
def render_sitemap(base_url=None, crawled_urls=None, runtime=None):
|
||||||
|
'''
|
||||||
|
Renders the sitemap as an HTML file.
|
||||||
|
'''
|
||||||
|
urlcount = len(crawled_urls)
|
||||||
|
sorted_urls = sorted(crawled_urls)
|
||||||
|
|
||||||
|
tmpl = jinja2.Environment(
|
||||||
|
loader=jinja2.FileSystemLoader('templates')
|
||||||
|
).get_template('sitemap.html.j2')
|
||||||
|
|
||||||
|
rendered_html = tmpl.render(base_url=base_url, urlcount=urlcount, urls=sorted_urls, runtime=runtime)
|
||||||
|
|
||||||
|
with open('sitemap.html', 'w') as outfile:
|
||||||
|
outfile.write(rendered_html)
|
||||||
|
|
||||||
|
print('Sitemap available at {0}/sitemap.html'.format(os.getcwd()))
|
||||||
|
|
||||||
|
|
||||||
def run(args=None):
|
def run(args=None):
|
||||||
'''
|
'''
|
||||||
needs a docstring.
|
needs a docstring.
|
||||||
'''
|
'''
|
||||||
|
starttime = datetime.now()
|
||||||
|
|
||||||
base_url = sanitise_url(args.url, base_url=True)
|
base_url = sanitise_url(args.url, base_url=True)
|
||||||
|
robots = RobotsTxt(base_url=base_url)
|
||||||
|
|
||||||
uncrawled_urls, crawled_urls = init_crawler(base_url)
|
uncrawled_urls, crawled_urls = init_crawler(base_url, robots)
|
||||||
process_pool(base_url, uncrawled_urls, crawled_urls)
|
process_pool(base_url, uncrawled_urls, crawled_urls, robots)
|
||||||
|
|
||||||
pprint(crawled_urls.pool)
|
runtime = int((datetime.now() - starttime).total_seconds())
|
||||||
print('{0} URLs crawled'.format(len(crawled_urls.pool)))
|
|
||||||
|
render_sitemap(base_url=base_url, crawled_urls=crawled_urls.pool, runtime=runtime)
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
|
|||||||
61
notes.md
61
notes.md
@@ -1,9 +1,56 @@
|
|||||||
## Thoughts
|
## Thoughts
|
||||||
|
|
||||||
###### for each URL, do the following:
|
* ~~strip hashes and everything following (as they're in-page anchors)~~
|
||||||
* mark it as crawled
|
* ~~strip args~~
|
||||||
* get page content
|
* ~~use `pop()` on the set instead of `.remove()`~~
|
||||||
* if that fails, mark the link as invalid
|
* ~~return false once the set is empty~~
|
||||||
* find all links in the content
|
* ~~`WebPage.parse_urls()` needs to compare startswith to base url~~
|
||||||
* check each link for dupes
|
* ~~ignore any links which aren't to pages~~
|
||||||
* add to pool or discard
|
* ~~better url checking to get bare domain~~ #wontfix
|
||||||
|
* ~~remove trailing slash from any discovered url~~
|
||||||
|
* ~~investigate lxml parser~~
|
||||||
|
* ~~remove base url from initial urls with and without trailing slash~~
|
||||||
|
* ~~investigate using [tldextract](https://github.com/john-kurkowski/tldextract) to match urls~~ #wontfix
|
||||||
|
* ~~implement parsing of [robots.txt](http://docs.w3cub.com/python~3.6/library/urllib.robotparser/)~~
|
||||||
|
* ~~investigate [gzip encoding](https://stackoverflow.com/questions/36383227/avoid-downloading-images-using-beautifulsoup-and-urllib-request)~~
|
||||||
|
* ~~implement some kind of progress display~~
|
||||||
|
* async
|
||||||
|
* better exception handling
|
||||||
|
* randomise output filename
|
||||||
|
|
||||||
|
### Async bits
|
||||||
|
|
||||||
|
in `__main__`:
|
||||||
|
|
||||||
|
```python
|
||||||
|
loop = asyncio.get_event_loop()
|
||||||
|
try:
|
||||||
|
loop.run_until_complete(main())
|
||||||
|
finally:
|
||||||
|
loop.close()
|
||||||
|
```
|
||||||
|
|
||||||
|
* initialises loop and runs it to completion
|
||||||
|
* needs to handle errors (try/except/finally)
|
||||||
|
|
||||||
|
```python
|
||||||
|
async def run(args=None):
|
||||||
|
tasks = []
|
||||||
|
|
||||||
|
for url in pool:
|
||||||
|
tasks.append(url)
|
||||||
|
# for i in range(10):
|
||||||
|
# tasks.append(asyncio.ensure_future(myCoroutine(i)))
|
||||||
|
|
||||||
|
# gather completed tasks
|
||||||
|
await asyncio.gather(*tasks)
|
||||||
|
```
|
||||||
|
|
||||||
|
Getting the contents of the page needs to be async too
|
||||||
|
|
||||||
|
```python
|
||||||
|
async def get_source():
|
||||||
|
blah
|
||||||
|
blah
|
||||||
|
await urlopen(url)
|
||||||
|
```
|
||||||
@@ -3,5 +3,6 @@ bs4==0.0.1
|
|||||||
certifi==2018.8.13
|
certifi==2018.8.13
|
||||||
chardet==3.0.4
|
chardet==3.0.4
|
||||||
idna==2.7
|
idna==2.7
|
||||||
requests==2.19.1
|
Jinja2==2.10
|
||||||
urllib3==1.23
|
lxml==4.2.4
|
||||||
|
MarkupSafe==1.0
|
||||||
|
|||||||
14
templates/sitemap.html.j2
Normal file
14
templates/sitemap.html.j2
Normal file
@@ -0,0 +1,14 @@
|
|||||||
|
<html>
|
||||||
|
<head>
|
||||||
|
<title>Sitemap for {{ base_url }}</title>
|
||||||
|
</head>
|
||||||
|
<body>
|
||||||
|
<p>
|
||||||
|
Crawled {{ urlcount }} URLs on {{ base_url }} in ~{{ runtime }} seconds.
|
||||||
|
<ul>
|
||||||
|
{% for url in urls %}
|
||||||
|
<li><a href="{{ url }}">{{ url }}</a></li>
|
||||||
|
{% endfor %}
|
||||||
|
</ul>
|
||||||
|
</body>
|
||||||
|
</html>
|
||||||
@@ -1,37 +1,35 @@
|
|||||||
#!/usr/bin/env python
|
#!/usr/bin/env python
|
||||||
|
|
||||||
import unittest
|
import unittest
|
||||||
from utils.helpers import (clean_base_url)
|
from utils.helpers import (sanitise_url)
|
||||||
|
|
||||||
class TestUrls(unittest.TestCase):
|
class TestUrls(unittest.TestCase):
|
||||||
|
|
||||||
base_url = "github.com"
|
|
||||||
|
|
||||||
base_url_list = (('eu.httpbin.org', 'http://eu.httpbin.org'),
|
base_url_list = (('eu.httpbin.org', 'http://eu.httpbin.org'),
|
||||||
('www.simonweald.com', 'http://www.simonweald.com'),
|
('www.simonweald.com', 'http://www.simonweald.com'),
|
||||||
('http://www.github.com/', 'http://www.github.com'),
|
('http://www.github.com/', 'http://www.github.com'),
|
||||||
('https://www.github.com', 'https://www.github.com'))
|
('https://www.github.com', 'https://www.github.com'))
|
||||||
|
|
||||||
valid_urls = ["https://www.github.com", "http://www.github.com",
|
urls_to_clean = (('https://www.github.com/', 'https://www.github.com/'),
|
||||||
"github.com", "/some/url/", "index.html"]
|
('https://github.com/?foo=bar', 'https://github.com/'),
|
||||||
|
('https://github.com/#anchor', 'https://github.com/'))
|
||||||
|
|
||||||
|
|
||||||
def test_clean_base_url(self):
|
def test_sanitise_base_url(self):
|
||||||
'''
|
'''
|
||||||
Tests whether a URL's protocol can be discovered if not provided.
|
Tests whether a URL's protocol can be discovered if not provided.
|
||||||
'''
|
'''
|
||||||
for url, target in self.base_url_list:
|
for url, target in self.base_url_list:
|
||||||
result = clean_base_url(url)
|
result = sanitise_url(url, base_url=True)
|
||||||
self.assertEqual(result, target)
|
self.assertEqual(result, target)
|
||||||
|
|
||||||
# def test_url_validation(self):
|
def test_sanitise_url(self):
|
||||||
# '''
|
'''
|
||||||
# Passes when given a valid URL. A valid URL is qualified
|
Tests whether a URL's protocol can be discovered if not provided.
|
||||||
# by being local to the domain to be crawled.
|
'''
|
||||||
# '''
|
for url, target in self.urls_to_clean:
|
||||||
# for url in self.valid_urls:
|
result = sanitise_url(url)
|
||||||
# result = url_validation(self.base_url, url)
|
self.assertEqual(result, target)
|
||||||
# self.assertTrue(result)
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
|
|||||||
@@ -3,8 +3,12 @@
|
|||||||
Utilities to provide various misc functions.
|
Utilities to provide various misc functions.
|
||||||
'''
|
'''
|
||||||
|
|
||||||
import urllib.request
|
|
||||||
from bs4 import BeautifulSoup
|
from bs4 import BeautifulSoup
|
||||||
|
import aiohttp
|
||||||
|
import urllib.request
|
||||||
|
import urllib.robotparser
|
||||||
|
import urllib.error
|
||||||
|
import gzip
|
||||||
from urllib.parse import (urljoin, urlsplit)
|
from urllib.parse import (urljoin, urlsplit)
|
||||||
|
|
||||||
|
|
||||||
@@ -45,11 +49,16 @@ class WebPage(object):
|
|||||||
the data from each individual page.
|
the data from each individual page.
|
||||||
'''
|
'''
|
||||||
|
|
||||||
headers = {'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:61.0) Gecko/20100101 Firefox/61.0'}
|
# set a sane user-agent and request compression if available.
|
||||||
|
headers = {'Accept-Encoding': 'gzip, deflate',
|
||||||
|
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:61.0) Gecko/20100101 Firefox/61.0'}
|
||||||
|
|
||||||
def __init__(self, url=None, base_url=None):
|
def __init__(self, url=None, base_url=None, robots=None):
|
||||||
self.url = url
|
self.url = url
|
||||||
self.base_url = base_url
|
self.base_url = base_url
|
||||||
|
self.robots = robots
|
||||||
|
self.source = None
|
||||||
|
self.urls_to_crawl = set()
|
||||||
|
|
||||||
|
|
||||||
def get_source(self):
|
def get_source(self):
|
||||||
@@ -59,7 +68,16 @@ class WebPage(object):
|
|||||||
|
|
||||||
request = urllib.request.Request(self.url, headers=self.headers)
|
request = urllib.request.Request(self.url, headers=self.headers)
|
||||||
page = urllib.request.urlopen(request, timeout=5)
|
page = urllib.request.urlopen(request, timeout=5)
|
||||||
self.source = page.read()
|
|
||||||
|
# handle the content encoding in case it needs decompressing.
|
||||||
|
if 'text/html' in page.info().get('Content-Type'):
|
||||||
|
if page.info().get('Content-Encoding'):
|
||||||
|
if page.info().get('Content-Encoding') == 'gzip':
|
||||||
|
self.source = gzip.decompress(page.read())
|
||||||
|
elif page.info().get('Content-Encoding') == 'deflate':
|
||||||
|
self.source = page.read()
|
||||||
|
else:
|
||||||
|
self.source = page.read()
|
||||||
|
|
||||||
|
|
||||||
def find_links(self):
|
def find_links(self):
|
||||||
@@ -69,7 +87,7 @@ class WebPage(object):
|
|||||||
'''
|
'''
|
||||||
hrefs = set()
|
hrefs = set()
|
||||||
|
|
||||||
soup = BeautifulSoup(self.source, 'html.parser')
|
soup = BeautifulSoup(self.source, 'lxml')
|
||||||
links = soup.find_all('a', href=True)
|
links = soup.find_all('a', href=True)
|
||||||
|
|
||||||
for link in links:
|
for link in links:
|
||||||
@@ -86,37 +104,74 @@ class WebPage(object):
|
|||||||
Iterate through the list of discovered URLs and add them to the
|
Iterate through the list of discovered URLs and add them to the
|
||||||
pool if they start with the base URL.
|
pool if they start with the base URL.
|
||||||
'''
|
'''
|
||||||
self.urls_to_crawl = set()
|
|
||||||
|
|
||||||
for url in self.discovered_hrefs:
|
for url in self.discovered_hrefs:
|
||||||
if url.startswith(self.url):
|
if url.startswith(self.base_url) and self.robots.check(url):
|
||||||
sanitised_url = sanitise_url(url=url)
|
sanitised_url = sanitise_url(url=url)
|
||||||
self.urls_to_crawl.add(sanitised_url)
|
self.urls_to_crawl.add(sanitised_url)
|
||||||
|
|
||||||
|
|
||||||
def list_urls(self):
|
def list_urls(self):
|
||||||
'''
|
'''
|
||||||
Returns the contents of the
|
Returns all valid discovered URLs.
|
||||||
'''
|
'''
|
||||||
|
|
||||||
return self.urls_to_crawl
|
return self.urls_to_crawl
|
||||||
|
|
||||||
|
|
||||||
def run(self):
|
def run(self):
|
||||||
|
'''
|
||||||
|
Attempt to get the page's source and if successful, iterate through it
|
||||||
|
to find any links we can crawl.
|
||||||
|
'''
|
||||||
try:
|
try:
|
||||||
self.get_source()
|
self.get_source()
|
||||||
except Exception as e:
|
except Exception:
|
||||||
print(e)
|
# skip if we didn't retrieve the source.
|
||||||
|
pass
|
||||||
|
|
||||||
try:
|
if self.source:
|
||||||
self.find_links()
|
self.find_links()
|
||||||
except Exception as e:
|
self.parse_urls()
|
||||||
print(e)
|
return True
|
||||||
|
else:
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
class RobotsTxt(object):
|
||||||
|
'''
|
||||||
|
needs a docstring
|
||||||
|
'''
|
||||||
|
|
||||||
|
def __init__(self, base_url=None):
|
||||||
|
'''
|
||||||
|
Manually retrieve robots.txt to allow us to set the user-agent.
|
||||||
|
'''
|
||||||
|
self.base_url = base_url
|
||||||
|
self.headers = {'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:61.0) Gecko/20100101 Firefox/61.0'}
|
||||||
|
|
||||||
|
robots_url = urljoin(self.base_url, 'robots.txt')
|
||||||
|
request = urllib.request.Request(robots_url, headers=self.headers)
|
||||||
|
|
||||||
|
robots = urllib.robotparser.RobotFileParser()
|
||||||
|
robots.set_url(robots_url)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
self.parse_urls()
|
response = urllib.request.urlopen(request, timeout=5)
|
||||||
except Exception as e:
|
except urllib.error.HTTPError:
|
||||||
print(e)
|
robots.allow_all = True
|
||||||
|
else:
|
||||||
|
data = response.read()
|
||||||
|
decoded_data = data.decode("utf-8").splitlines()
|
||||||
|
robots.parse(decoded_data)
|
||||||
|
|
||||||
|
self.robots = robots
|
||||||
|
|
||||||
|
|
||||||
|
def check(self, url):
|
||||||
|
'''
|
||||||
|
Test if robots allows us to crawl that URL.
|
||||||
|
'''
|
||||||
|
return self.robots.can_fetch("*", url)
|
||||||
|
|
||||||
|
|
||||||
def sanitise_url(url, base_url=False):
|
def sanitise_url(url, base_url=False):
|
||||||
|
|||||||
Reference in New Issue
Block a user