implement parsing of robots.txt

This commit is contained in:
2018-09-05 18:56:20 +01:00
parent f2c294ebdb
commit a3ec9451e3
3 changed files with 41 additions and 12 deletions

View File

@@ -3,8 +3,9 @@
Utilities to provide various misc functions.
'''
import urllib.request
from bs4 import BeautifulSoup
import urllib.request
import urllib.robotparser
from urllib.parse import (urljoin, urlsplit)
@@ -47,9 +48,10 @@ class WebPage(object):
headers = {'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:61.0) Gecko/20100101 Firefox/61.0'}
def __init__(self, url=None, base_url=None):
def __init__(self, url=None, base_url=None, robots=None):
self.url = url
self.base_url = base_url
self.robots = robots
def get_source(self):
@@ -90,8 +92,9 @@ class WebPage(object):
for url in self.discovered_hrefs:
if url.startswith(self.url):
sanitised_url = sanitise_url(url=url)
self.urls_to_crawl.add(sanitised_url)
if self.robots.check(url):
sanitised_url = sanitise_url(url=url)
self.urls_to_crawl.add(sanitised_url)
def list_urls(self):
@@ -119,6 +122,31 @@ class WebPage(object):
print(e)
class RobotsTxt(object):
'''
needs a docstring
'''
def __init__(self, base_url=None):
self.base_url = base_url
robots = urllib.robotparser.RobotFileParser()
robots.set_url(urljoin(self.base_url, 'robots.txt'))
try:
robots.read()
except Exception as e:
print(e)
self.robots = robots
def check(self, url):
'''
needs a docstring
'''
return self.robots.can_fetch("*", url)
def sanitise_url(url, base_url=False):
'''
If `base_url` is True, we attempt to standardise `url` to ensure it can be