From 679b1b7b5375c718bf2abf2cf9a2a3842e147699 Mon Sep 17 00:00:00 2001 From: Simon Weald Date: Tue, 18 Sep 2018 18:24:15 +0100 Subject: [PATCH] rename all instances of base_url to rooturl, add more documentation --- async_crawler.py | 35 +++++++++---------- templates/sitemap.html.j2 | 2 +- utils/helpers.py | 73 +++++++++++++++++++++++++-------------- 3 files changed, 65 insertions(+), 45 deletions(-) diff --git a/async_crawler.py b/async_crawler.py index 2dae874..484fee4 100644 --- a/async_crawler.py +++ b/async_crawler.py @@ -8,7 +8,7 @@ URLs discovered will be crawled. The crawler takes a total of two arguments (concurrency is optional): - url: the base URL to begin the crawl from. + url: the root URL to begin the crawl from. concurrency: the maximum number of pages which may be crawled concurrently. ''' @@ -29,32 +29,31 @@ def sanity_checks(url=None): url: the root URL to be crawled. Returns: - baseurl: a validated and cleaned version of the initial URL. - (type=string) + rooturl: a string containing avalidated and cleaned version of the + initial URL. robots: an object which allows us to query whether a site may be crawled. - (type=RobotsTxt) ''' # ensure we have a sensible URL to work with - baseurl = standardise_url(url=url) + rooturl = standardise_url(url=url) # get robots.txt - robots = RobotsTxt(base_url=baseurl) + robots = RobotsTxt(rooturl=rooturl) # fail early if robots denies all crawling - if not robots.check(url=baseurl): - sys.exit("{baseurl} cannot be crawled (denied by robots.txt)".format( - baseurl=baseurl)) + if not robots.check(url=rooturl): + sys.exit("{0} cannot be crawled (denied by robots.txt)".format( + rooturl)) - return(baseurl, robots) + return(rooturl, robots) -def render_sitemap(base_url=None, crawled_urls=None, runtime=None): +def render_sitemap(rooturl=None, crawled_urls=None, runtime=None): ''' Renders the sitemap to an HTML file. Accepts: - base_url: - crawled_urls: - runtime: + rooturl: string containing the root URL + crawled_urls: set containing discovered URLs + runtime: int representing run time of AsyncCrawler ''' urlcount = len(crawled_urls) sorted_urls = sorted(crawled_urls) @@ -63,7 +62,7 @@ def render_sitemap(base_url=None, crawled_urls=None, runtime=None): loader=jinja2.FileSystemLoader('templates') ).get_template('sitemap.html.j2') - rendered_html = template.render(base_url=base_url, urlcount=urlcount, + rendered_html = template.render(rooturl=rooturl, urlcount=urlcount, urls=sorted_urls, runtime=runtime) with open('sitemap.html', 'w') as outfile: @@ -79,10 +78,10 @@ def main(): ''' starttime = datetime.now() - baseurl, robots = sanity_checks(url=args.url) + rooturl, robots = sanity_checks(url=args.url) # create a crawler - async_crawler = AsyncCrawler(baseurl=baseurl, robots=robots, + async_crawler = AsyncCrawler(rooturl=rooturl, robots=robots, concurrency=args.concurrency) # create a task to run the crawler, run the loop and then gather the @@ -95,7 +94,7 @@ def main(): runtime = int((datetime.now() - starttime).total_seconds()) - render_sitemap(base_url=baseurl, crawled_urls=results, runtime=runtime) + render_sitemap(rooturl=rooturl, crawled_urls=results, runtime=runtime) if __name__ == '__main__': diff --git a/templates/sitemap.html.j2 b/templates/sitemap.html.j2 index c9822ae..b6a79a1 100644 --- a/templates/sitemap.html.j2 +++ b/templates/sitemap.html.j2 @@ -4,7 +4,7 @@

-Crawled {{ urlcount }} URLs on {{ base_url }} in ~{{ runtime }} seconds. +Crawled {{ urlcount }} URLs on {{ rooturl }} in ~{{ runtime }} seconds.