return from WebPage to indicate whether a link was actually crawlable and only actually crawl it if it was
This commit is contained in:
17
crawler.py
17
crawler.py
@@ -6,6 +6,7 @@ Need a docstring.
|
||||
import argparse
|
||||
import jinja2
|
||||
import os
|
||||
import asyncio
|
||||
from datetime import datetime
|
||||
from utils.helpers import (UrlPool, WebPage, RobotsTxt, sanitise_url)
|
||||
|
||||
@@ -57,16 +58,18 @@ def process_pool(base_url=None, uncrawled_urls=None, crawled_urls=None, robots=N
|
||||
# create a WebPage object for the URL
|
||||
current_page = WebPage(url=new_url, base_url=base_url, robots=robots)
|
||||
try:
|
||||
current_page.run()
|
||||
_urls = current_page.list_urls()
|
||||
crawled_urls.add_to_pool(new_url)
|
||||
succeeded = current_page.run()
|
||||
except Exception as e:
|
||||
print(e)
|
||||
|
||||
for url in _urls:
|
||||
sanitised_url = sanitise_url(url=url)
|
||||
if sanitised_url not in crawled_urls.pool:
|
||||
uncrawled_urls.add_to_pool(url)
|
||||
if succeeded:
|
||||
_urls = current_page.list_urls()
|
||||
crawled_urls.add_to_pool(new_url)
|
||||
|
||||
for url in _urls:
|
||||
sanitised_url = sanitise_url(url=url)
|
||||
if sanitised_url not in crawled_urls.pool:
|
||||
uncrawled_urls.add_to_pool(url)
|
||||
|
||||
print('{0} URLs crawled, {1} remaining'.format(len(crawled_urls.pool),
|
||||
len(uncrawled_urls.pool)))
|
||||
|
||||
Reference in New Issue
Block a user