more scraper work
more scraper work


Former-commit-id: f58b0639b55df64a91f425e957bd65ca579545ce

--- a/documents/scrape.py
+++ b/documents/scrape.py
@@ -12,6 +12,9 @@
 import urllib
 import urlparse
 
+def mkhash(input):
+    return hashlib.md5(input).hexdigest().encode("utf-8")
+
 def canonurl(url):
     r"""Return the canonical, ASCII-encoded form of a UTF-8 encoded URL, or ''
     if the URL looks invalid.
@@ -63,6 +66,11 @@
     url = urlparse.urlunsplit((scheme, netloc, path, query, fragment))
     return url[:4096]
 
+def fullurl(url,href):
+    href = href.replace(" ","%20")
+    href = re.sub('#.*$','',href)
+    return urljoin(url,href)
+
 #http://diveintopython.org/http_web_services/etags.html
 class NotModifiedHandler(urllib2.BaseHandler):  
     def http_error_304(self, req, fp, code, message, headers):
@@ -72,7 +80,7 @@
 
 def fetchURL(docsdb, url, fieldName, agencyID, scrape_again=True):
     url = canonurl(url)
-    hash = hashlib.md5(url).hexdigest().encode("utf-8")
+    hash = mkhash(url)
     req = urllib2.Request(url)
     print "Fetching %s" % url
     if url.startswith("mailto") or url.startswith("javascript") or url.startswith("#") or url == None or url == "":
@@ -174,9 +182,7 @@
 				None
 			else:
 				# remove anchors and spaces in urls
-				link['href'] = link['href'].replace(" ","%20")
-				link['href'] = re.sub('#.*$','',link['href'])
-                		linkurls.add(urljoin(url,link['href']))
+                		linkurls.add(fullurl(url,link['href']))
             for linkurl in linkurls:
 		#print linkurl
 		scrapeAndStore(docsdb, linkurl, depth-1, fieldName, agencyID)    

--- a/documents/scrapers/3cd40b1240e987cbcd3f0e67054ce259.py
+++ b/documents/scrapers/3cd40b1240e987cbcd3f0e67054ce259.py
@@ -6,7 +6,8 @@
 #RSS feed not detailed
 from bs4 import BeautifulSoup
 #http://www.apvma.gov.au/about/foi/disclosure/index.php
-(url,mime_type,content) = scrape.fetchURL(scrape.docsdb, "http://www.apvma.gov.au/about/foi/disclosure/index.php", "foidocuments", "3cd40b1240e987cbcd3f0e67054ce259")
+agencyID = "3cd40b1240e987cbcd3f0e67054ce259"
+(url,mime_type,content) = scrape.fetchURL(scrape.docsdb, "http://www.apvma.gov.au/about/foi/disclosure/index.php", "foidocuments", agencyID)
 if content != None:
 	if mime_type == "text/html" or mime_type == "application/xhtml+xml" or mime_type =="application/xml":
             # http://www.crummy.com/software/BeautifulSoup/documentation.html
@@ -15,7 +16,22 @@
 			columns = row.find_all('td')
 			if len(columns) == 5:
 				(id, date, description, title, notes) = columns
-				print id
+				print id.string
+				hash = scrape.mkhash(url+id.string)
+				links = []
+				for atag in row.find_all("a"):
+ 					if atag.has_key('href'):
+						links.append(scrape.fullurl(url,atag['href']))
+				doc = foidocsdb.get(hash)
+				descriptiontxt = ""
+				for string in description.stripped_strings:
+					descriptiontxt = descriptiontxt + string
+    				if doc == None:
+					print "saving"
+	 				doc = {'_id': hash, 'agencyID': agencyID, 'url': url, "links": links, 'docID': id.string, "date": date.string, "description": descriptiontxt,"title": title.string,"notes": notes.string}
+					foidocsdb.save(doc)
+				else:
+					print "already saved"
 			elif len(row.find_all('th')) == 5:
 				print "header row"
 			else: