initial foray into asynchronous crawling
This commit is contained in:
@@ -3,13 +3,90 @@
|
||||
Utilities to provide various misc functions.
|
||||
'''
|
||||
|
||||
from bs4 import BeautifulSoup
|
||||
import aiohttp
|
||||
import urllib.request
|
||||
import urllib.robotparser
|
||||
import urllib.error
|
||||
import gzip
|
||||
from urllib.parse import (urljoin, urlsplit)
|
||||
|
||||
|
||||
|
||||
import aiohttp
|
||||
import asyncio
|
||||
from bs4 import BeautifulSoup
|
||||
from urllib.parse import urljoin, urlsplit
|
||||
import urllib.robotparser
|
||||
|
||||
|
||||
class AsyncCrawler(object):
|
||||
'''
|
||||
docstring
|
||||
'''
|
||||
|
||||
def __init__(self, baseurl=None, robots=None, concurrency=None):
|
||||
self.baseurl = baseurl
|
||||
self.robots = robots
|
||||
self.uncrawled = set()
|
||||
self.crawled = set()
|
||||
self.session = aiohttp.ClientSession()
|
||||
self.semaphore = asyncio.BoundedSemaphore(concurrency)
|
||||
# add the base URL to be crawled
|
||||
self.uncrawled.add(baseurl)
|
||||
self.headers = {'Accept-Encoding': 'gzip, deflate',
|
||||
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:61.0) Gecko/20100101 Firefox/61.0'}
|
||||
|
||||
|
||||
def validate_url(self, url):
|
||||
'''
|
||||
Checks if the discovered URL is local to the base URL.
|
||||
'''
|
||||
# ensure the URL is in a sane format
|
||||
url = sanitise_url(url=url)
|
||||
|
||||
if url.startswith(self.baseurl) and robots.check(url=url):
|
||||
return url
|
||||
else:
|
||||
return False
|
||||
|
||||
|
||||
def get_source(self, url):
|
||||
'''
|
||||
Obtains the page's source.
|
||||
'''
|
||||
pass
|
||||
|
||||
return source
|
||||
|
||||
|
||||
def find_links(self, source):
|
||||
'''
|
||||
Find all links in a page's source.
|
||||
'''
|
||||
links = set()
|
||||
|
||||
html = BeautifulSoup(source, 'lxml')
|
||||
hrefs = html.find_all('a', href=True)
|
||||
|
||||
for href in hrefs:
|
||||
url = self.validate_url(url=href)
|
||||
if url:
|
||||
links.add(url)
|
||||
|
||||
return links
|
||||
|
||||
|
||||
def run(self):
|
||||
'''
|
||||
function which runs the crawler
|
||||
'''
|
||||
pass
|
||||
|
||||
for url in self.uncrawled:
|
||||
validated = validate_url(url=url)
|
||||
|
||||
if validated:
|
||||
source = get_source(url=url)
|
||||
links = find_links(source=source)
|
||||
|
||||
|
||||
|
||||
|
||||
class UrlPool(object):
|
||||
|
||||
Reference in New Issue
Block a user