Compare commits
62 Commits
c383fb7ee9
...
master
| Author | SHA1 | Date | |
|---|---|---|---|
| 8698c21fda | |||
| 273cf56a3b | |||
| 1af26f50f2 | |||
| c40c5cea50 | |||
| a6224f9b6a | |||
| b64711973f | |||
| 9e125dfae0 | |||
| f16f82fdfb | |||
| a523154848 | |||
| 9e754a5584 | |||
| 1b005570ee | |||
| 17fa9f93f9 | |||
| 1e51e10db2 | |||
| 225fd8b3ea | |||
| d686ae0bc4 | |||
| 69f5788745 | |||
| b5d644a223 | |||
| 6508156aa4 | |||
| 738ab8e441 | |||
| fdd84a8786 | |||
| ab0ab0a010 | |||
| 6a1259aa7d | |||
| 164239b343 | |||
| ce1f2745c9 | |||
| e70bdc9ca1 | |||
| d1c1e17f4f | |||
| 816a727d79 | |||
| 84ab27a75e | |||
| 6d9103c154 | |||
| e57a86c60a | |||
| a3ec9451e3 | |||
| f2c294ebdb | |||
| 1b9b207a28 | |||
| 6abe7d68e0 | |||
| 7d919039b6 | |||
| 0726bcccb0 | |||
| 05e907ecec | |||
| abc628106d | |||
| c436016e0c | |||
| 03554fde80 | |||
| 759f965e95 | |||
| 0517e5bc56 | |||
| 1b18aa83eb | |||
| 5e0d9fd568 | |||
| 915def3a5d | |||
| 453331d69d | |||
| 2b812da26a | |||
| fb096b4468 | |||
| 5d94991167 | |||
| 482d23dd4f | |||
| 452de87f35 | |||
| 73cb883151 | |||
| 5c933fc5c9 | |||
| 25f8c4c686 | |||
| 0d0438670c | |||
| 8a1fd39dc4 | |||
| 79b10798a3 | |||
| fb6b976391 | |||
| a04de7f4de | |||
| 665ec1d7a7 | |||
| 65fc332925 | |||
| c6ce63838f |
4
.gitignore
vendored
4
.gitignore
vendored
@@ -1 +1,5 @@
|
||||
*~
|
||||
venv/
|
||||
.vscode/*
|
||||
__pycache__/
|
||||
sitemap.html
|
||||
|
||||
21
README.md
Normal file
21
README.md
Normal file
@@ -0,0 +1,21 @@
|
||||
# Concurrent web scraper
|
||||
|
||||
## Requirements
|
||||
|
||||
This crawler was written in 3.7.0 to take advantage of the latest `asyncio` features.
|
||||
|
||||
Install required modules:
|
||||
|
||||
```bash
|
||||
pip install -r requirements.txt
|
||||
```
|
||||
|
||||
Run:
|
||||
|
||||
```bash
|
||||
python crawler.py -u https://urltocrawl.com
|
||||
```
|
||||
|
||||
## Results
|
||||
|
||||
The resulting sitemap will be output in the root of this directory as `sitemap.html`
|
||||
120
crawler.py
Normal file
120
crawler.py
Normal file
@@ -0,0 +1,120 @@
|
||||
#!/usr/bin/env python
|
||||
'''
|
||||
Need a docstring.
|
||||
'''
|
||||
|
||||
import argparse
|
||||
import jinja2
|
||||
import os
|
||||
import asyncio
|
||||
from datetime import datetime
|
||||
from utils.helpers import (UrlPool, WebPage, RobotsTxt, sanitise_url)
|
||||
|
||||
|
||||
def init_crawler(base_url=None, robots=None):
|
||||
'''
|
||||
Initialises the crawler by running the initial URL.
|
||||
'''
|
||||
uncrawled_urls, crawled_urls = UrlPool(), UrlPool()
|
||||
initial_page = WebPage(url=base_url, base_url=base_url, robots=robots)
|
||||
|
||||
try:
|
||||
initial_page.run()
|
||||
except Exception as e:
|
||||
print(e)
|
||||
|
||||
initial_urls = initial_page.list_urls()
|
||||
|
||||
# ensure the base URL isn't crawled again
|
||||
try:
|
||||
initial_urls.remove(base_url)
|
||||
except KeyError:
|
||||
pass
|
||||
# also ensure base URL wasn't discovered with a trailing slash on the
|
||||
# initial page scrape
|
||||
try:
|
||||
initial_urls.remove("".join([base_url, '/']))
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
# Add the base URL to the crawled pool
|
||||
crawled_urls.add_to_pool(base_url)
|
||||
|
||||
for url in initial_urls:
|
||||
sanitised_url = sanitise_url(url=url)
|
||||
if sanitised_url not in crawled_urls.pool:
|
||||
uncrawled_urls.add_to_pool(sanitised_url)
|
||||
|
||||
return(uncrawled_urls, crawled_urls)
|
||||
|
||||
|
||||
def process_pool(base_url=None, uncrawled_urls=None, crawled_urls=None, robots=None):
|
||||
'''
|
||||
Iterates over the pool of URLs and adds any discovered URLs.
|
||||
'''
|
||||
while uncrawled_urls.pool:
|
||||
# pop url from pool
|
||||
new_url = uncrawled_urls.remove_from_pool()
|
||||
# create a WebPage object for the URL
|
||||
current_page = WebPage(url=new_url, base_url=base_url, robots=robots)
|
||||
try:
|
||||
succeeded = current_page.run()
|
||||
except Exception as e:
|
||||
print(e)
|
||||
|
||||
if succeeded:
|
||||
_urls = current_page.list_urls()
|
||||
crawled_urls.add_to_pool(new_url)
|
||||
|
||||
for url in _urls:
|
||||
sanitised_url = sanitise_url(url=url)
|
||||
if sanitised_url not in crawled_urls.pool:
|
||||
uncrawled_urls.add_to_pool(url)
|
||||
|
||||
print('{0} URLs crawled, {1} remaining'.format(len(crawled_urls.pool),
|
||||
len(uncrawled_urls.pool)))
|
||||
|
||||
|
||||
def render_sitemap(base_url=None, crawled_urls=None, runtime=None):
|
||||
'''
|
||||
Renders the sitemap as an HTML file.
|
||||
'''
|
||||
urlcount = len(crawled_urls)
|
||||
sorted_urls = sorted(crawled_urls)
|
||||
|
||||
tmpl = jinja2.Environment(
|
||||
loader=jinja2.FileSystemLoader('templates')
|
||||
).get_template('sitemap.html.j2')
|
||||
|
||||
rendered_html = tmpl.render(base_url=base_url, urlcount=urlcount, urls=sorted_urls, runtime=runtime)
|
||||
|
||||
with open('sitemap.html', 'w') as outfile:
|
||||
outfile.write(rendered_html)
|
||||
|
||||
print('Sitemap available at {0}/sitemap.html'.format(os.getcwd()))
|
||||
|
||||
|
||||
def run(args=None):
|
||||
'''
|
||||
needs a docstring.
|
||||
'''
|
||||
starttime = datetime.now()
|
||||
|
||||
base_url = sanitise_url(args.url, base_url=True)
|
||||
robots = RobotsTxt(base_url=base_url)
|
||||
|
||||
uncrawled_urls, crawled_urls = init_crawler(base_url, robots)
|
||||
process_pool(base_url, uncrawled_urls, crawled_urls, robots)
|
||||
|
||||
runtime = int((datetime.now() - starttime).total_seconds())
|
||||
|
||||
render_sitemap(base_url=base_url, crawled_urls=crawled_urls.pool, runtime=runtime)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
||||
parser = argparse.ArgumentParser(description='Recursive web crawler')
|
||||
parser.add_argument("-u", "--url", required=True, help="Base url to crawl")
|
||||
args = parser.parse_args()
|
||||
|
||||
run(args)
|
||||
56
notes.md
Normal file
56
notes.md
Normal file
@@ -0,0 +1,56 @@
|
||||
## Thoughts
|
||||
|
||||
* ~~strip hashes and everything following (as they're in-page anchors)~~
|
||||
* ~~strip args~~
|
||||
* ~~use `pop()` on the set instead of `.remove()`~~
|
||||
* ~~return false once the set is empty~~
|
||||
* ~~`WebPage.parse_urls()` needs to compare startswith to base url~~
|
||||
* ~~ignore any links which aren't to pages~~
|
||||
* ~~better url checking to get bare domain~~ #wontfix
|
||||
* ~~remove trailing slash from any discovered url~~
|
||||
* ~~investigate lxml parser~~
|
||||
* ~~remove base url from initial urls with and without trailing slash~~
|
||||
* ~~investigate using [tldextract](https://github.com/john-kurkowski/tldextract) to match urls~~ #wontfix
|
||||
* ~~implement parsing of [robots.txt](http://docs.w3cub.com/python~3.6/library/urllib.robotparser/)~~
|
||||
* ~~investigate [gzip encoding](https://stackoverflow.com/questions/36383227/avoid-downloading-images-using-beautifulsoup-and-urllib-request)~~
|
||||
* ~~implement some kind of progress display~~
|
||||
* async
|
||||
* better exception handling
|
||||
* randomise output filename
|
||||
|
||||
### Async bits
|
||||
|
||||
in `__main__`:
|
||||
|
||||
```python
|
||||
loop = asyncio.get_event_loop()
|
||||
try:
|
||||
loop.run_until_complete(main())
|
||||
finally:
|
||||
loop.close()
|
||||
```
|
||||
|
||||
* initialises loop and runs it to completion
|
||||
* needs to handle errors (try/except/finally)
|
||||
|
||||
```python
|
||||
async def run(args=None):
|
||||
tasks = []
|
||||
|
||||
for url in pool:
|
||||
tasks.append(url)
|
||||
# for i in range(10):
|
||||
# tasks.append(asyncio.ensure_future(myCoroutine(i)))
|
||||
|
||||
# gather completed tasks
|
||||
await asyncio.gather(*tasks)
|
||||
```
|
||||
|
||||
Getting the contents of the page needs to be async too
|
||||
|
||||
```python
|
||||
async def get_source():
|
||||
blah
|
||||
blah
|
||||
await urlopen(url)
|
||||
```
|
||||
@@ -3,5 +3,6 @@ bs4==0.0.1
|
||||
certifi==2018.8.13
|
||||
chardet==3.0.4
|
||||
idna==2.7
|
||||
requests==2.19.1
|
||||
urllib3==1.23
|
||||
Jinja2==2.10
|
||||
lxml==4.2.4
|
||||
MarkupSafe==1.0
|
||||
|
||||
14
templates/sitemap.html.j2
Normal file
14
templates/sitemap.html.j2
Normal file
@@ -0,0 +1,14 @@
|
||||
<html>
|
||||
<head>
|
||||
<title>Sitemap for {{ base_url }}</title>
|
||||
</head>
|
||||
<body>
|
||||
<p>
|
||||
Crawled {{ urlcount }} URLs on {{ base_url }} in ~{{ runtime }} seconds.
|
||||
<ul>
|
||||
{% for url in urls %}
|
||||
<li><a href="{{ url }}">{{ url }}</a></li>
|
||||
{% endfor %}
|
||||
</ul>
|
||||
</body>
|
||||
</html>
|
||||
36
test_helpers.py
Normal file
36
test_helpers.py
Normal file
@@ -0,0 +1,36 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
import unittest
|
||||
from utils.helpers import (sanitise_url)
|
||||
|
||||
class TestUrls(unittest.TestCase):
|
||||
|
||||
base_url_list = (('eu.httpbin.org', 'http://eu.httpbin.org'),
|
||||
('www.simonweald.com', 'http://www.simonweald.com'),
|
||||
('http://www.github.com/', 'http://www.github.com'),
|
||||
('https://www.github.com', 'https://www.github.com'))
|
||||
|
||||
urls_to_clean = (('https://www.github.com/', 'https://www.github.com/'),
|
||||
('https://github.com/?foo=bar', 'https://github.com/'),
|
||||
('https://github.com/#anchor', 'https://github.com/'))
|
||||
|
||||
|
||||
def test_sanitise_base_url(self):
|
||||
'''
|
||||
Tests whether a URL's protocol can be discovered if not provided.
|
||||
'''
|
||||
for url, target in self.base_url_list:
|
||||
result = sanitise_url(url, base_url=True)
|
||||
self.assertEqual(result, target)
|
||||
|
||||
def test_sanitise_url(self):
|
||||
'''
|
||||
Tests whether a URL's protocol can be discovered if not provided.
|
||||
'''
|
||||
for url, target in self.urls_to_clean:
|
||||
result = sanitise_url(url)
|
||||
self.assertEqual(result, target)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
||||
0
utils/__init__.py
Normal file
0
utils/__init__.py
Normal file
204
utils/helpers.py
Normal file
204
utils/helpers.py
Normal file
@@ -0,0 +1,204 @@
|
||||
#!/usr/bin/env python
|
||||
'''
|
||||
Utilities to provide various misc functions.
|
||||
'''
|
||||
|
||||
from bs4 import BeautifulSoup
|
||||
import aiohttp
|
||||
import urllib.request
|
||||
import urllib.robotparser
|
||||
import urllib.error
|
||||
import gzip
|
||||
from urllib.parse import (urljoin, urlsplit)
|
||||
|
||||
|
||||
class UrlPool(object):
|
||||
'''
|
||||
Object to manage a pool of URLs.
|
||||
'''
|
||||
|
||||
def __init__(self):
|
||||
self.pool = set()
|
||||
|
||||
def check_duplicate(self, new_url):
|
||||
'''
|
||||
Checks if a URL exists in the current pool.
|
||||
'''
|
||||
if new_url in self.pool:
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
def remove_from_pool(self):
|
||||
'''
|
||||
Remove a URL from the pool and return it to be crawled.
|
||||
'''
|
||||
return(self.pool.pop())
|
||||
|
||||
def add_to_pool(self, url):
|
||||
self.pool.add(url)
|
||||
|
||||
def list_pool(self):
|
||||
pool = self.pool
|
||||
return pool
|
||||
|
||||
|
||||
class WebPage(object):
|
||||
'''
|
||||
Object to manage common operations required to return
|
||||
the data from each individual page.
|
||||
'''
|
||||
|
||||
# set a sane user-agent and request compression if available.
|
||||
headers = {'Accept-Encoding': 'gzip, deflate',
|
||||
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:61.0) Gecko/20100101 Firefox/61.0'}
|
||||
|
||||
def __init__(self, url=None, base_url=None, robots=None):
|
||||
self.url = url
|
||||
self.base_url = base_url
|
||||
self.robots = robots
|
||||
self.source = None
|
||||
self.urls_to_crawl = set()
|
||||
|
||||
|
||||
def get_source(self):
|
||||
'''
|
||||
Retrieve a page's source.
|
||||
'''
|
||||
|
||||
request = urllib.request.Request(self.url, headers=self.headers)
|
||||
page = urllib.request.urlopen(request, timeout=5)
|
||||
|
||||
# handle the content encoding in case it needs decompressing.
|
||||
if 'text/html' in page.info().get('Content-Type'):
|
||||
if page.info().get('Content-Encoding'):
|
||||
if page.info().get('Content-Encoding') == 'gzip':
|
||||
self.source = gzip.decompress(page.read())
|
||||
elif page.info().get('Content-Encoding') == 'deflate':
|
||||
self.source = page.read()
|
||||
else:
|
||||
self.source = page.read()
|
||||
|
||||
|
||||
def find_links(self):
|
||||
'''
|
||||
Find all URLs on a page and ensure they are absolute. If they are
|
||||
relative then they will be appended to the base URL.
|
||||
'''
|
||||
hrefs = set()
|
||||
|
||||
soup = BeautifulSoup(self.source, 'lxml')
|
||||
links = soup.find_all('a', href=True)
|
||||
|
||||
for link in links:
|
||||
if link['href'].startswith('/'):
|
||||
hrefs.add(urljoin(self.url, link['href']))
|
||||
else:
|
||||
hrefs.add(link['href'])
|
||||
|
||||
self.discovered_hrefs = hrefs
|
||||
|
||||
|
||||
def parse_urls(self):
|
||||
'''
|
||||
Iterate through the list of discovered URLs and add them to the
|
||||
pool if they start with the base URL.
|
||||
'''
|
||||
for url in self.discovered_hrefs:
|
||||
if url.startswith(self.base_url) and self.robots.check(url):
|
||||
sanitised_url = sanitise_url(url=url)
|
||||
self.urls_to_crawl.add(sanitised_url)
|
||||
|
||||
|
||||
def list_urls(self):
|
||||
'''
|
||||
Returns all valid discovered URLs.
|
||||
'''
|
||||
|
||||
return self.urls_to_crawl
|
||||
|
||||
|
||||
def run(self):
|
||||
'''
|
||||
Attempt to get the page's source and if successful, iterate through it
|
||||
to find any links we can crawl.
|
||||
'''
|
||||
try:
|
||||
self.get_source()
|
||||
except Exception:
|
||||
# skip if we didn't retrieve the source.
|
||||
pass
|
||||
|
||||
if self.source:
|
||||
self.find_links()
|
||||
self.parse_urls()
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
|
||||
class RobotsTxt(object):
|
||||
'''
|
||||
needs a docstring
|
||||
'''
|
||||
|
||||
def __init__(self, base_url=None):
|
||||
'''
|
||||
Manually retrieve robots.txt to allow us to set the user-agent.
|
||||
'''
|
||||
self.base_url = base_url
|
||||
self.headers = {'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:61.0) Gecko/20100101 Firefox/61.0'}
|
||||
|
||||
robots_url = urljoin(self.base_url, 'robots.txt')
|
||||
request = urllib.request.Request(robots_url, headers=self.headers)
|
||||
|
||||
robots = urllib.robotparser.RobotFileParser()
|
||||
robots.set_url(robots_url)
|
||||
|
||||
try:
|
||||
response = urllib.request.urlopen(request, timeout=5)
|
||||
except urllib.error.HTTPError:
|
||||
robots.allow_all = True
|
||||
else:
|
||||
data = response.read()
|
||||
decoded_data = data.decode("utf-8").splitlines()
|
||||
robots.parse(decoded_data)
|
||||
|
||||
self.robots = robots
|
||||
|
||||
|
||||
def check(self, url):
|
||||
'''
|
||||
Test if robots allows us to crawl that URL.
|
||||
'''
|
||||
return self.robots.can_fetch("*", url)
|
||||
|
||||
|
||||
def sanitise_url(url, base_url=False):
|
||||
'''
|
||||
If `base_url` is True, we attempt to standardise `url` to ensure it can be
|
||||
prepended to relative URLs. If no scheme has been provided then we default
|
||||
to http as any sane https-only site should 301 redirect http > https.
|
||||
|
||||
If `base_url` is False, we sanitise URLs to strip queries and fragments (we
|
||||
don't want to scrape in-page anchors etc).
|
||||
|
||||
Returns a sanitised URL as a string.
|
||||
'''
|
||||
default_proto = 'http'
|
||||
delim = '://'
|
||||
|
||||
split_url = urlsplit(url)
|
||||
|
||||
if base_url:
|
||||
# This will sanitise the initial url for the initial page crawl.
|
||||
if split_url.scheme and split_url.scheme.startswith('http'):
|
||||
sanitised_url = "".join([split_url.scheme, delim, split_url.netloc])
|
||||
elif (split_url.path and not split_url.scheme and not split_url.netloc):
|
||||
sanitised_url = "".join([default_proto, delim, split_url.path])
|
||||
else:
|
||||
# Sanitise discovered URLs. We already expect them in the format
|
||||
# protocol://base_url/path
|
||||
sanitised_url = "".join([split_url.scheme, delim, split_url.netloc, split_url.path])
|
||||
|
||||
return sanitised_url
|
||||
Reference in New Issue
Block a user