Hash urls for insertion during scraping
Former-commit-id: 68bfb5914592737b5409075f197de39d8ab19319
--- a/scrape.py
+++ b/scrape.py
@@ -3,6 +3,7 @@
import urllib2
from BeautifulSoup import BeautifulSoup
import re
+import hashlib
#http://diveintopython.org/http_web_services/etags.html
class NotModifiedHandler(urllib2.BaseHandler):
@@ -11,25 +12,25 @@
addinfourl.code = code
return addinfourl
-def scrapeAndStore(URL, depth, agency):
- URL = "http://www.google.com"
- req = urllib2.Request(URL)
- etag = 'y'
- last_modified = 'y'
- #if there is a previous version sotred in couchdb, load caching helper tags
- if etag:
- req.add_header("If-None-Match", etag)
- if last_modified:
- req.add_header("If-Modified-Since", last_modified)
+def scrapeAndStore(docsdb, url, depth, agencyID):
+ hash = hashlib.md5(url).hexdigest()
+ req = urllib2.Request(url)
+ print "Fetching %s", url
+ doc = docsdb['hash']
+ #if there is a previous version stored in couchdb, load caching helper tags
+ if doc.has_key('etag'):
+ req.add_header("If-None-Match", doc['etag'])
+ if doc.has_key('last_modified'):
+ req.add_header("If-Modified-Since", doc['last_modified'])
opener = urllib2.build_opener(NotModifiedHandler())
url_handle = opener.open(req)
headers = url_handle.info() # the addinfourls have the .info() too
- etag = headers.getheader("ETag")
- last_modified = headers.getheader("Last-Modified")
- web_server = headers.getheader("Server")
- file_size = headers.getheader("Content-Length")
- mime_type = headers.getheader("Content-Type")
+ doc['etag'] = headers.getheader("ETag")
+ doc['last_modified'] = headers.getheader("Last-Modified")
+ doc['web_server'] = headers.getheader("Server")
+ doc['file_size'] = headers.getheader("Content-Length")
+ doc['mime_type'] = headers.getheader("Content-Type")
if hasattr(url_handle, 'code'):
if url_handle.code == 304:
@@ -41,13 +42,14 @@
soup = BeautifulSoup(html)
links = soup.findAll('a') # soup.findAll('a', id=re.compile("^p-"))
for link in links:
- print link['href']
- #for each unique link
- #if html mimetype
- # go down X levels,
- # diff with last stored attachment, store in document
- #if not
- # remember to save parentURL and title (link text that lead to document)
+ if link.has_key("href"):
+ print link['href']
+ #for each unique link
+ #if html mimetype
+ # go down X levels,
+ # diff with last stored attachment, store in document
+ #if not
+ # remember to save parentURL and title (link text that lead to document)
#store as attachment epoch-filename
else:
@@ -68,9 +70,10 @@
# select database
agencydb = couch['disclosr-agencies']
+docsdb = couch['disclosr-documents']
for row in agencydb.view('app/getScrapeRequired'): #not recently scraped agencies view?
agency = agencydb.get(row.id)
print agency['name']
-scrapeAndStore("A",1,1)
+ scrapeAndStore(docsdb, agency['website'],1,agency['_id'])