Compare commits

..

3 Commits

3 changed files with 33 additions and 26 deletions

View File

@@ -6,6 +6,7 @@ Need a docstring.
import argparse import argparse
import jinja2 import jinja2
import os import os
import asyncio
from datetime import datetime from datetime import datetime
from utils.helpers import (UrlPool, WebPage, RobotsTxt, sanitise_url) from utils.helpers import (UrlPool, WebPage, RobotsTxt, sanitise_url)
@@ -57,16 +58,18 @@ def process_pool(base_url=None, uncrawled_urls=None, crawled_urls=None, robots=N
# create a WebPage object for the URL # create a WebPage object for the URL
current_page = WebPage(url=new_url, base_url=base_url, robots=robots) current_page = WebPage(url=new_url, base_url=base_url, robots=robots)
try: try:
current_page.run() succeeded = current_page.run()
_urls = current_page.list_urls()
crawled_urls.add_to_pool(new_url)
except Exception as e: except Exception as e:
print(e) print(e)
for url in _urls: if succeeded:
sanitised_url = sanitise_url(url=url) _urls = current_page.list_urls()
if sanitised_url not in crawled_urls.pool: crawled_urls.add_to_pool(new_url)
uncrawled_urls.add_to_pool(url)
for url in _urls:
sanitised_url = sanitise_url(url=url)
if sanitised_url not in crawled_urls.pool:
uncrawled_urls.add_to_pool(url)
print('{0} URLs crawled, {1} remaining'.format(len(crawled_urls.pool), print('{0} URLs crawled, {1} remaining'.format(len(crawled_urls.pool),
len(uncrawled_urls.pool))) len(uncrawled_urls.pool)))

View File

@@ -1,37 +1,35 @@
#!/usr/bin/env python #!/usr/bin/env python
import unittest import unittest
from utils.helpers import (clean_base_url) from utils.helpers import (sanitise_url)
class TestUrls(unittest.TestCase): class TestUrls(unittest.TestCase):
base_url = "github.com"
base_url_list = (('eu.httpbin.org', 'http://eu.httpbin.org'), base_url_list = (('eu.httpbin.org', 'http://eu.httpbin.org'),
('www.simonweald.com', 'http://www.simonweald.com'), ('www.simonweald.com', 'http://www.simonweald.com'),
('http://www.github.com/', 'http://www.github.com'), ('http://www.github.com/', 'http://www.github.com'),
('https://www.github.com', 'https://www.github.com')) ('https://www.github.com', 'https://www.github.com'))
valid_urls = ["https://www.github.com", "http://www.github.com", urls_to_clean = (('https://www.github.com/', 'https://www.github.com/'),
"github.com", "/some/url/", "index.html"] ('https://github.com/?foo=bar', 'https://github.com/'),
('https://github.com/#anchor', 'https://github.com/'))
def test_clean_base_url(self): def test_sanitise_base_url(self):
''' '''
Tests whether a URL's protocol can be discovered if not provided. Tests whether a URL's protocol can be discovered if not provided.
''' '''
for url, target in self.base_url_list: for url, target in self.base_url_list:
result = clean_base_url(url) result = sanitise_url(url, base_url=True)
self.assertEqual(result, target) self.assertEqual(result, target)
# def test_url_validation(self): def test_sanitise_url(self):
# ''' '''
# Passes when given a valid URL. A valid URL is qualified Tests whether a URL's protocol can be discovered if not provided.
# by being local to the domain to be crawled. '''
# ''' for url, target in self.urls_to_clean:
# for url in self.valid_urls: result = sanitise_url(url)
# result = url_validation(self.base_url, url) self.assertEqual(result, target)
# self.assertTrue(result)
if __name__ == '__main__': if __name__ == '__main__':

View File

@@ -4,6 +4,7 @@ Utilities to provide various misc functions.
''' '''
from bs4 import BeautifulSoup from bs4 import BeautifulSoup
import aiohttp
import urllib.request import urllib.request
import urllib.robotparser import urllib.robotparser
import urllib.error import urllib.error
@@ -104,10 +105,9 @@ class WebPage(object):
pool if they start with the base URL. pool if they start with the base URL.
''' '''
for url in self.discovered_hrefs: for url in self.discovered_hrefs:
if url.startswith(self.url): if url.startswith(self.base_url) and self.robots.check(url):
if self.robots.check(url): sanitised_url = sanitise_url(url=url)
sanitised_url = sanitise_url(url=url) self.urls_to_crawl.add(sanitised_url)
self.urls_to_crawl.add(sanitised_url)
def list_urls(self): def list_urls(self):
@@ -132,6 +132,9 @@ class WebPage(object):
if self.source: if self.source:
self.find_links() self.find_links()
self.parse_urls() self.parse_urls()
return True
else:
return False
class RobotsTxt(object): class RobotsTxt(object):
@@ -140,6 +143,9 @@ class RobotsTxt(object):
''' '''
def __init__(self, base_url=None): def __init__(self, base_url=None):
'''
Manually retrieve robots.txt to allow us to set the user-agent.
'''
self.base_url = base_url self.base_url = base_url
self.headers = {'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:61.0) Gecko/20100101 Firefox/61.0'} self.headers = {'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:61.0) Gecko/20100101 Firefox/61.0'}