rename all instances of base_url to rooturl, add more documentation
This commit is contained in:
@@ -8,7 +8,7 @@ URLs discovered will be crawled.
|
||||
|
||||
The crawler takes a total of two arguments (concurrency is optional):
|
||||
|
||||
url: the base URL to begin the crawl from.
|
||||
url: the root URL to begin the crawl from.
|
||||
concurrency: the maximum number of pages which may be crawled concurrently.
|
||||
'''
|
||||
|
||||
@@ -29,32 +29,31 @@ def sanity_checks(url=None):
|
||||
url: the root URL to be crawled.
|
||||
|
||||
Returns:
|
||||
baseurl: a validated and cleaned version of the initial URL.
|
||||
(type=string)
|
||||
rooturl: a string containing avalidated and cleaned version of the
|
||||
initial URL.
|
||||
robots: an object which allows us to query whether a site may be crawled.
|
||||
(type=RobotsTxt)
|
||||
'''
|
||||
# ensure we have a sensible URL to work with
|
||||
baseurl = standardise_url(url=url)
|
||||
rooturl = standardise_url(url=url)
|
||||
# get robots.txt
|
||||
robots = RobotsTxt(base_url=baseurl)
|
||||
robots = RobotsTxt(rooturl=rooturl)
|
||||
|
||||
# fail early if robots denies all crawling
|
||||
if not robots.check(url=baseurl):
|
||||
sys.exit("{baseurl} cannot be crawled (denied by robots.txt)".format(
|
||||
baseurl=baseurl))
|
||||
if not robots.check(url=rooturl):
|
||||
sys.exit("{0} cannot be crawled (denied by robots.txt)".format(
|
||||
rooturl))
|
||||
|
||||
return(baseurl, robots)
|
||||
return(rooturl, robots)
|
||||
|
||||
|
||||
def render_sitemap(base_url=None, crawled_urls=None, runtime=None):
|
||||
def render_sitemap(rooturl=None, crawled_urls=None, runtime=None):
|
||||
'''
|
||||
Renders the sitemap to an HTML file.
|
||||
|
||||
Accepts:
|
||||
base_url:
|
||||
crawled_urls:
|
||||
runtime:
|
||||
rooturl: string containing the root URL
|
||||
crawled_urls: set containing discovered URLs
|
||||
runtime: int representing run time of AsyncCrawler
|
||||
'''
|
||||
urlcount = len(crawled_urls)
|
||||
sorted_urls = sorted(crawled_urls)
|
||||
@@ -63,7 +62,7 @@ def render_sitemap(base_url=None, crawled_urls=None, runtime=None):
|
||||
loader=jinja2.FileSystemLoader('templates')
|
||||
).get_template('sitemap.html.j2')
|
||||
|
||||
rendered_html = template.render(base_url=base_url, urlcount=urlcount,
|
||||
rendered_html = template.render(rooturl=rooturl, urlcount=urlcount,
|
||||
urls=sorted_urls, runtime=runtime)
|
||||
|
||||
with open('sitemap.html', 'w') as outfile:
|
||||
@@ -79,10 +78,10 @@ def main():
|
||||
'''
|
||||
starttime = datetime.now()
|
||||
|
||||
baseurl, robots = sanity_checks(url=args.url)
|
||||
rooturl, robots = sanity_checks(url=args.url)
|
||||
|
||||
# create a crawler
|
||||
async_crawler = AsyncCrawler(baseurl=baseurl, robots=robots,
|
||||
async_crawler = AsyncCrawler(rooturl=rooturl, robots=robots,
|
||||
concurrency=args.concurrency)
|
||||
|
||||
# create a task to run the crawler, run the loop and then gather the
|
||||
@@ -95,7 +94,7 @@ def main():
|
||||
|
||||
runtime = int((datetime.now() - starttime).total_seconds())
|
||||
|
||||
render_sitemap(base_url=baseurl, crawled_urls=results, runtime=runtime)
|
||||
render_sitemap(rooturl=rooturl, crawled_urls=results, runtime=runtime)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
||||
Reference in New Issue
Block a user