return from WebPage to indicate whether a link was actually crawlable and only actually crawl it if it was
This commit is contained in:
17
crawler.py
17
crawler.py
@@ -6,6 +6,7 @@ Need a docstring.
|
|||||||
import argparse
|
import argparse
|
||||||
import jinja2
|
import jinja2
|
||||||
import os
|
import os
|
||||||
|
import asyncio
|
||||||
from datetime import datetime
|
from datetime import datetime
|
||||||
from utils.helpers import (UrlPool, WebPage, RobotsTxt, sanitise_url)
|
from utils.helpers import (UrlPool, WebPage, RobotsTxt, sanitise_url)
|
||||||
|
|
||||||
@@ -57,16 +58,18 @@ def process_pool(base_url=None, uncrawled_urls=None, crawled_urls=None, robots=N
|
|||||||
# create a WebPage object for the URL
|
# create a WebPage object for the URL
|
||||||
current_page = WebPage(url=new_url, base_url=base_url, robots=robots)
|
current_page = WebPage(url=new_url, base_url=base_url, robots=robots)
|
||||||
try:
|
try:
|
||||||
current_page.run()
|
succeeded = current_page.run()
|
||||||
_urls = current_page.list_urls()
|
|
||||||
crawled_urls.add_to_pool(new_url)
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
print(e)
|
print(e)
|
||||||
|
|
||||||
for url in _urls:
|
if succeeded:
|
||||||
sanitised_url = sanitise_url(url=url)
|
_urls = current_page.list_urls()
|
||||||
if sanitised_url not in crawled_urls.pool:
|
crawled_urls.add_to_pool(new_url)
|
||||||
uncrawled_urls.add_to_pool(url)
|
|
||||||
|
for url in _urls:
|
||||||
|
sanitised_url = sanitise_url(url=url)
|
||||||
|
if sanitised_url not in crawled_urls.pool:
|
||||||
|
uncrawled_urls.add_to_pool(url)
|
||||||
|
|
||||||
print('{0} URLs crawled, {1} remaining'.format(len(crawled_urls.pool),
|
print('{0} URLs crawled, {1} remaining'.format(len(crawled_urls.pool),
|
||||||
len(uncrawled_urls.pool)))
|
len(uncrawled_urls.pool)))
|
||||||
|
|||||||
@@ -4,6 +4,7 @@ Utilities to provide various misc functions.
|
|||||||
'''
|
'''
|
||||||
|
|
||||||
from bs4 import BeautifulSoup
|
from bs4 import BeautifulSoup
|
||||||
|
import aiohttp
|
||||||
import urllib.request
|
import urllib.request
|
||||||
import urllib.robotparser
|
import urllib.robotparser
|
||||||
import urllib.error
|
import urllib.error
|
||||||
@@ -104,10 +105,9 @@ class WebPage(object):
|
|||||||
pool if they start with the base URL.
|
pool if they start with the base URL.
|
||||||
'''
|
'''
|
||||||
for url in self.discovered_hrefs:
|
for url in self.discovered_hrefs:
|
||||||
if url.startswith(self.url):
|
if url.startswith(self.base_url) and self.robots.check(url):
|
||||||
if self.robots.check(url):
|
sanitised_url = sanitise_url(url=url)
|
||||||
sanitised_url = sanitise_url(url=url)
|
self.urls_to_crawl.add(sanitised_url)
|
||||||
self.urls_to_crawl.add(sanitised_url)
|
|
||||||
|
|
||||||
|
|
||||||
def list_urls(self):
|
def list_urls(self):
|
||||||
@@ -132,6 +132,9 @@ class WebPage(object):
|
|||||||
if self.source:
|
if self.source:
|
||||||
self.find_links()
|
self.find_links()
|
||||||
self.parse_urls()
|
self.parse_urls()
|
||||||
|
return True
|
||||||
|
else:
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
class RobotsTxt(object):
|
class RobotsTxt(object):
|
||||||
|
|||||||
Reference in New Issue
Block a user