Files
web-scraper/async_crawler.py

84 lines
2.1 KiB
Python

#!/usr/bin/env python
'''
Need a docstring.
'''
import argparse
import jinja2
import os
import sys
import asyncio
from datetime import datetime
from utils.helpers import RobotsTxt, AsyncCrawler, standardise_url
def init_crawler(url=None):
'''
docstring
'''
# ensure we have a sensible URL to work with
baseurl = standardise_url(url=url, base_url=url)
# get robots.txt
robots = RobotsTxt(base_url=baseurl)
# fail early if robots denies all crawling
if not robots.check(url=baseurl):
sys.exit("{baseurl} cannot be crawled (denied by robots.txt)".format(baseurl=baseurl))
return(baseurl, robots)
def render_sitemap(base_url=None, crawled_urls=None, runtime=None):
'''
Renders the sitemap as an HTML file.
'''
# urlcount = len(crawled_urls)
# sorted_urls = sorted(crawled_urls)
tmpl = jinja2.Environment(
loader=jinja2.FileSystemLoader('templates')
).get_template('sitemap.html.j2')
rendered_html = tmpl.render(
base_url=base_url, urlcount=urlcount, urls=sorted_urls, runtime=runtime)
with open('sitemap.html', 'w') as outfile:
outfile.write(rendered_html)
print('Sitemap available at {0}/sitemap.html'.format(os.getcwd()))
def main():
'''
docstring
'''
starttime = datetime.now()
baseurl, robots = init_crawler(url=args.url)
# create a crawler
async_crawler = AsyncCrawler(baseurl=baseurl, robots=robots, concurrency=args.concurrency)
# run the crawler
task = asyncio.Task(async_crawler.run_loop())
loop = asyncio.get_event_loop()
loop.run_until_complete(task)
loop.close()
results = task.result()
print(results)
print(len(results))
runtime = int((datetime.now() - starttime).total_seconds())
print(runtime)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Recursive web crawler')
parser.add_argument("-u", "--url", required=True, help="Base url to crawl")
parser.add_argument("-c", "--concurrency", required=False, type=int,
default=50, help="Max number of pages to crawl concurrently")
args = parser.parse_args()
main()