initial foray into asynchronous crawling

This commit is contained in:
2018-09-12 22:54:12 +01:00
parent 8698c21fda
commit 36e1f7693f
2 changed files with 155 additions and 4 deletions

74
async_crawler.py Normal file
View File

@@ -0,0 +1,74 @@
#!/usr/bin/env python
'''
Need a docstring.
'''
import argparse
import jinja2
import os
import asyncio
from datetime import datetime
# from utils.helpers import (UrlPool, WebPage, RobotsTxt, sanitise_url)
from utils.helpers import RobotsTxt, AsyncCrawler, sanitise_url
def init_crawler(url=None):
'''
docstring
'''
# ensure we have a sensible URL to work with
baseurl = sanitise_url(url=url, base_url=True)
# get robots.txt
robots = RobotsTxt(base_url=baseurl)
return(baseurl, robots)
def render_sitemap(base_url=None, crawled_urls=None, runtime=None):
'''
Renders the sitemap as an HTML file.
'''
# urlcount = len(crawled_urls)
# sorted_urls = sorted(crawled_urls)
tmpl = jinja2.Environment(
loader=jinja2.FileSystemLoader('templates')
).get_template('sitemap.html.j2')
rendered_html = tmpl.render(
base_url=base_url, urlcount=urlcount, urls=sorted_urls, runtime=runtime)
with open('sitemap.html', 'w') as outfile:
outfile.write(rendered_html)
print('Sitemap available at {0}/sitemap.html'.format(os.getcwd()))
def main(args=None):
'''
docstring
'''
starttime = datetime.now()
baseurl, robots = init_crawler(url=args.url)
# create a crawler
async_crawler = AsyncCrawler(baseurl=baseurl, robots=robots, concurrency=args.concurrency)
# async_crawler.run()
crawler = asyncio.Task(async_crawler.run())
loop = asyncio.get_event_loop()
loop.run_until_complete(crawler)
loop.close()
result = crawler.result()
print(len(result))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Recursive web crawler')
parser.add_argument("-u", "--url", required=True, help="Base url to crawl")
parser.add_argument("-s", "--concurrency", required=False, type=int, default=50, help="Max number of pages to crawl concurrently")
args = parser.parse_args()
main(args)