Former-commit-id: 994d782d8883843a55bf2558f8e6a6c9ffbcebde
--- a/scrape.py
+++ b/scrape.py
@@ -7,6 +7,61 @@
from urlparse import urljoin
import time
import os
+import mimetypes
+import re
+import urllib
+import urlparse
+
+def canonurl(url):
+ r"""Return the canonical, ASCII-encoded form of a UTF-8 encoded URL, or ''
+ if the URL looks invalid.
+ >>> canonurl('\xe2\x9e\xa1.ws') # tinyarro.ws
+ 'http://xn--hgi.ws/'
+ """
+ # strip spaces at the ends and ensure it's prefixed with 'scheme://'
+ url = url.strip()
+ if not url:
+ return ''
+ if not urlparse.urlsplit(url).scheme:
+ url = 'http://' + url
+
+ # turn it into Unicode
+ #try:
+ # url = unicode(url, 'utf-8')
+ #except UnicodeDecodeError:
+ # return '' # bad UTF-8 chars in URL
+
+ # parse the URL into its components
+ parsed = urlparse.urlsplit(url)
+ scheme, netloc, path, query, fragment = parsed
+
+ # ensure scheme is a letter followed by letters, digits, and '+-.' chars
+ if not re.match(r'[a-z][-+.a-z0-9]*$', scheme, flags=re.I):
+ return ''
+ scheme = str(scheme)
+
+ # ensure domain and port are valid, eg: sub.domain.<1-to-6-TLD-chars>[:port]
+ match = re.match(r'(.+\.[a-z0-9]{1,6})(:\d{1,5})?$', netloc, flags=re.I)
+ if not match:
+ return ''
+ domain, port = match.groups()
+ netloc = domain + (port if port else '')
+ netloc = netloc.encode('idna')
+
+ # ensure path is valid and convert Unicode chars to %-encoded
+ if not path:
+ path = '/' # eg: 'http://google.com' -> 'http://google.com/'
+ path = urllib.quote(urllib.unquote(path.encode('utf-8')), safe='/;')
+
+ # ensure query is valid
+ query = urllib.quote(urllib.unquote(query.encode('utf-8')), safe='=&?/')
+
+ # ensure fragment is valid
+ fragment = urllib.quote(urllib.unquote(fragment.encode('utf-8')))
+
+ # piece it all back together, truncating it to a maximum of 4KB
+ url = urlparse.urlunsplit((scheme, netloc, path, query, fragment))
+ return url[:4096]
#http://diveintopython.org/http_web_services/etags.html
class NotModifiedHandler(urllib2.BaseHandler):
@@ -16,14 +71,18 @@
return addinfourl
def fetchURL(docsdb, url, fieldName, agencyID, scrape_again=True):
- hash = hashlib.md5(url).hexdigest()
+ url = canonurl(url)
+ hash = hashlib.md5(url).hexdigest().encode("utf-8")
req = urllib2.Request(url)
print "Fetching %s" % url
+ if url.startswith("mailto") or url.startswith("javascript") or url.startswith("#") or url == None or url == "":
+ print "Not a valid HTTP url"
+ return (None,None)
doc = docsdb.get(hash)
if doc == None:
doc = {'_id': hash, 'agencyID': agencyID, 'url': url, 'fieldName':fieldName}
else:
- if (time.time() - doc['page_scraped']) < 3600:
+ if (('page_scraped' in doc) and (time.time() - doc['page_scraped']) < 999999):
print "Uh oh, trying to scrape URL again too soon!"
last_attachment_fname = doc["_attachments"].keys()[-1]
last_attachment = docsdb.get_attachment(doc,last_attachment_fname)
@@ -41,17 +100,23 @@
req.add_header("If-Modified-Since", doc['last_modified'])
opener = urllib2.build_opener(NotModifiedHandler())
- url_handle = opener.open(req)
- headers = url_handle.info() # the addinfourls have the .info() too
- doc['etag'] = headers.getheader("ETag")
- doc['last_modified'] = headers.getheader("Last-Modified")
- doc['date'] = headers.getheader("Date")
- doc['page_scraped'] = time.time()
- doc['web_server'] = headers.getheader("Server")
- doc['powered_by'] = headers.getheader("X-Powered-By")
- doc['file_size'] = headers.getheader("Content-Length")
- doc['mime_type'] = headers.getheader("Content-Type").split(";")[0]
- if hasattr(url_handle, 'code'):
+ try:
+ url_handle = opener.open(req)
+ headers = url_handle.info() # the addinfourls have the .info() too
+ doc['etag'] = headers.getheader("ETag")
+ doc['last_modified'] = headers.getheader("Last-Modified")
+ doc['date'] = headers.getheader("Date")
+ doc['page_scraped'] = time.time()
+ doc['web_server'] = headers.getheader("Server")
+ doc['powered_by'] = headers.getheader("X-Powered-By")
+ doc['file_size'] = headers.getheader("Content-Length")
+ content_type = headers.getheader("Content-Type")
+ if content_type != None:
+ doc['mime_type'] = content_type.split(";")[0]
+ else:
+ (type,encoding) = mimetypes.guess_type(url)
+ doc['mime_type'] = type
+ if hasattr(url_handle, 'code'):
if url_handle.code == 304:
print "the web page has not been modified"
return (None,None)
@@ -61,10 +126,15 @@
doc = docsdb.get(hash) # need to get a _rev
docsdb.put_attachment(doc, content, str(time.time())+"-"+os.path.basename(url), doc['mime_type'])
return (doc['mime_type'], content)
- #store as attachment epoch-filename
- else:
- print "error %s in downloading %s" % url_handle.code, URL
- doc['error'] = "error %s in downloading %s" % url_handle.code, URL
+ #store as attachment epoch-filename
+ except urllib2.URLError as e:
+ error = ""
+ if hasattr(e, 'reason'):
+ error = "error %s in downloading %s" % (str(e.reason), url)
+ elif hasattr(e, 'code'):
+ error = "error %s in downloading %s" % (e.code, url)
+ print error
+ doc['error'] = error
docsdb.save(doc)
return (None,None)
@@ -92,6 +162,12 @@
# lets not do external links for now
# linkurls.add(link['href'])
None
+ if link['href'].startswith("mailto"):
+ # not http
+ None
+ if link['href'].startswith("javascript"):
+ # not http
+ None
else:
linkurls.add(urljoin(url,link['href'].replace(" ","%20")))
for linkurl in linkurls:
@@ -108,9 +184,15 @@
agency = agencydb.get(row.id)
print agency['name']
for key in agency.keys():
- if key == 'website' or key.endswith('URL'):
- print key
- scrapeAndStore(docsdb, agency[key],agency['scrapeDepth'],key,agency['_id'])
- agency['metadata']['lastscraped'] = time.time()
+ if key == 'website':
+ scrapeAndStore(docsdb, agency[key],0,key,agency['_id'])
+ if key.endswith('URL'):
+ print key
+ depth = 1
+ if 'scrapeDepth' in agency.keys():
+ depth = agency['scrapeDepth']
+ scrapeAndStore(docsdb, agency[key],depth,key,agency['_id'])
+
+ agency['metadata']['lastScraped'] = time.time()
agencydb.save(agency)