remove testing url with requests and assume that the user is correct

This commit is contained in:
2018-08-28 17:22:52 +01:00
parent 0d0438670c
commit 25f8c4c686
2 changed files with 36 additions and 39 deletions

View File

@@ -1,34 +1,37 @@
#!/usr/bin/env python #!/usr/bin/env python
import unittest import unittest
from utils.helpers import (url_validation, standardise_base_url) from utils.helpers import (clean_base_url)
class TestUrls(unittest.TestCase): class TestUrls(unittest.TestCase):
base_url = "github.com" base_url = "github.com"
base_url_list = (('eu.httpbin.org', 'http://eu.httpbin.org'), base_url_list = (('eu.httpbin.org', 'http://eu.httpbin.org'),
('www.simonweald.com', 'https://www.simonweald.com'), ('www.simonweald.com', 'http://www.simonweald.com'),
('http://www.github.com', 'http://www.github.com')) ('http://www.github.com/', 'http://www.github.com'),
('https://www.github.com', 'https://www.github.com'))
valid_urls = ["https://www.github.com", "http://www.github.com", valid_urls = ["https://www.github.com", "http://www.github.com",
"github.com", "/some/url/", "index.html"] "github.com", "/some/url/", "index.html"]
def test_url_standardisation(self): def test_clean_base_url(self):
''' '''
Tests whether a URL's protocol can be discovered if not provided. Tests whether a URL's protocol can be discovered if not provided.
''' '''
for url, target in self.base_url_list: for url, target in self.base_url_list:
result = standardise_base_url(url) result = clean_base_url(url)
self.assertEqual(result, target) self.assertEqual(result, target)
def test_url_validation(self): # def test_url_validation(self):
''' # '''
Passes when given a valid URL. A valid URL is qualified # Passes when given a valid URL. A valid URL is qualified
by being local to the domain to be crawled. # by being local to the domain to be crawled.
''' # '''
for url in self.valid_urls: # for url in self.valid_urls:
result = url_validation(self.base_url, url) # result = url_validation(self.base_url, url)
self.assertTrue(result) # self.assertTrue(result)
if __name__ == '__main__': if __name__ == '__main__':

View File

@@ -1,44 +1,38 @@
#!/usr/bin/env python #!/usr/bin/env python
'''
import re Utilities to provide various misc functions.
import requests '''
def standardise_base_url(url): def clean_base_url(url):
''' '''
Standardise the URL to be scraped to ensure it Standardise the URL to be scraped to ensure it
is added to relative URLs in a consistent manner. is added to relative URLs in a consistent manner.
''' '''
match_protocol = r'http(s?)\:\/\/' protocol = 'http://'
if re.match(match_protocol, url): if url.startswith('http'):
base_url = url base_url = url
else: else:
http_url = 'http://{0}'.format(url) # otherwise assume HTTP as any sane site should upgrade
https_url = 'https://{0}'.format(url) # to HTTPS via a 301 redirect.
# attempt to discover which protocol is being used. base_url = "".join([protocol, url])
try:
result = requests.get(http_url)
if result.url.startswith('http'):
base_url = http_url
if result.url.startswith('https'):
base_url = https_url
except requests.exceptions.RequestException as e:
base_url = https_url
# strip the trailing slash to allow us to append
# relative URLs.
if base_url.endswith('/'): if base_url.endswith('/'):
base_url = base_url[:-1] base_url = base_url[:-1]
return base_url return base_url
def get_url_validation(base_url=None, url=None): # def get_url_validation(base_url=None, url=None):
''' # '''
Checks if a URL is valid. Can be absolute or relative. # Checks if a URL is valid. Can be absolute or relative.
''' # '''
if url.startswith('/'): # if url.startswith('/'):
full_url = '{0}{1}'.format(base_url, url) # full_url = '{0}{1}'.format(base_url, url)
if url.startswith(ffbase_url): # if url.startswith(ffbase_url):
full_url = url # full_url = url
elif url.startswith('/'): # elif url.startswith('/'):