commit of working async crawler
This commit is contained in:
@@ -13,12 +13,12 @@ from datetime import datetime
|
||||
from utils.helpers import RobotsTxt, AsyncCrawler, standardise_url
|
||||
|
||||
|
||||
def init_crawler(url=None):
|
||||
def sanity_checks(url=None):
|
||||
'''
|
||||
docstring
|
||||
Runs some basic sanity checks before the crawler is initialised.
|
||||
'''
|
||||
# ensure we have a sensible URL to work with
|
||||
baseurl = standardise_url(url=url, base_url=url)
|
||||
baseurl = standardise_url(url=url)
|
||||
# get robots.txt
|
||||
robots = RobotsTxt(base_url=baseurl)
|
||||
|
||||
@@ -31,16 +31,16 @@ def init_crawler(url=None):
|
||||
|
||||
def render_sitemap(base_url=None, crawled_urls=None, runtime=None):
|
||||
'''
|
||||
Renders the sitemap as an HTML file.
|
||||
Renders the sitemap to an HTML file.
|
||||
'''
|
||||
# urlcount = len(crawled_urls)
|
||||
# sorted_urls = sorted(crawled_urls)
|
||||
urlcount = len(crawled_urls)
|
||||
sorted_urls = sorted(crawled_urls)
|
||||
|
||||
tmpl = jinja2.Environment(
|
||||
template = jinja2.Environment(
|
||||
loader=jinja2.FileSystemLoader('templates')
|
||||
).get_template('sitemap.html.j2')
|
||||
|
||||
rendered_html = tmpl.render(
|
||||
rendered_html = template.render(
|
||||
base_url=base_url, urlcount=urlcount, urls=sorted_urls, runtime=runtime)
|
||||
|
||||
with open('sitemap.html', 'w') as outfile:
|
||||
@@ -55,21 +55,19 @@ def main():
|
||||
'''
|
||||
starttime = datetime.now()
|
||||
|
||||
baseurl, robots = init_crawler(url=args.url)
|
||||
baseurl, robots = sanity_checks(url=args.url)
|
||||
|
||||
# create a crawler
|
||||
async_crawler = AsyncCrawler(baseurl=baseurl, robots=robots, concurrency=args.concurrency)
|
||||
# run the crawler
|
||||
|
||||
task = asyncio.Task(async_crawler.run_loop())
|
||||
task = asyncio.Task(async_crawler.main())
|
||||
loop = asyncio.get_event_loop()
|
||||
loop.run_until_complete(task)
|
||||
loop.close()
|
||||
results = task.result()
|
||||
print(results)
|
||||
print(len(results))
|
||||
runtime = int((datetime.now() - starttime).total_seconds())
|
||||
print(runtime)
|
||||
|
||||
render_sitemap(base_url=baseurl, crawled_urls=results, runtime=runtime)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
@@ -77,7 +75,7 @@ if __name__ == '__main__':
|
||||
parser = argparse.ArgumentParser(description='Recursive web crawler')
|
||||
parser.add_argument("-u", "--url", required=True, help="Base url to crawl")
|
||||
parser.add_argument("-c", "--concurrency", required=False, type=int,
|
||||
default=50, help="Max number of pages to crawl concurrently")
|
||||
default=100, help="Max number of pages to crawl concurrently")
|
||||
args = parser.parse_args()
|
||||
|
||||
main()
|
||||
|
||||
Reference in New Issue
Block a user