scraoer fxies
Former-commit-id: e590ab23de1740c86b174b7ae55da837875155de
--- a/documents/genericScrapers.py
+++ b/documents/genericScrapers.py
@@ -200,11 +200,16 @@
return table.find_all('tr')
def getDate(self, content, entry, doc):
- date = ''.join(content.stripped_strings).strip()
- (a, b, c) = date.partition("(")
- date = self.remove_control_chars(a.replace("Octber", "October").replace("1012","2012"))
- print date
- edate = parse(date, dayfirst=True, fuzzy=True).strftime("%Y-%m-%d")
+ strdate = ''.join(content.stripped_strings).strip()
+ (a, b, c) = strdate.partition("(")
+ strdate = self.remove_control_chars(a.replace("Octber", "October").replace("Janrurary", "January").replace("1012","2012"))
+ print strdate
+ try:
+ edate = parse(strdate, dayfirst=True, fuzzy=True).strftime("%Y-%m-%d")
+ except ValueError:
+ print >> sys.stderr, "ERROR date invalid %s " % strdate
+ print >> sys.stderr, "ERROR date originally %s " % ''.join(content.stripped_strings).strip()
+ edate = date.today().strftime("%Y-%m-%d")
print edate
doc.update({'date': edate})
return
@@ -267,8 +272,7 @@
'Summary of FOIrequest received by agency/minister',
'Summary of FOI request received', 'Description of FOI Request',
"FOI request", 'Results 1 to 67 of 67']
- if doc['title'] not in badtitles\
- and doc['description'] != '':
+ if doc['title'] not in badtitles and 'description' in doc.keys() and doc['description'] != '':
print "saving"
foidocsdb.save(doc)
else:
--- a/documents/scrape.py
+++ b/documents/scrape.py
@@ -90,7 +90,7 @@
def getLastAttachment(docsdb, url):
hash = mkhash(url)
doc = docsdb.get(hash)
- if doc != None:
+ if doc != None and "_attachments" in doc.keys():
last_attachment_fname = doc["_attachments"].keys()[-1]
last_attachment = docsdb.get_attachment(doc, last_attachment_fname)
return last_attachment
@@ -112,10 +112,15 @@
else:
if (('page_scraped' in doc) and ((time.time() - doc['page_scraped']) < 60 * 24 * 14) or (scrape_again == False)):
print "Uh oh, trying to scrape URL again too soon!" + hash
- last_attachment_fname = doc["_attachments"].keys()[-1]
- last_attachment = docsdb.get_attachment(doc, last_attachment_fname)
- content = last_attachment
- return (doc['url'], doc['mime_type'], content.read())
+ if "_attachments" in doc.keys():
+ last_attachment_fname = doc["_attachments"].keys()[-1]
+ last_attachment = docsdb.get_attachment(doc, last_attachment_fname)
+ content = last_attachment.read()
+ mime_type = doc['mime_type']
+ else:
+ content = None
+ mime_type = None
+ return (doc['url'], mime_type, content)
req.add_header("User-Agent", "Mozilla/4.0 (compatible; Prometheus webspider; owner maxious@lambdacomplex.org)")
#if there is a previous version stored in couchdb, load caching helper tags
--- a/documents/scrapers/24bd71114d3975ed9a63ad29624c62c9.py
+++ b/documents/scrapers/24bd71114d3975ed9a63ad29624c62c9.py
@@ -7,7 +7,7 @@
#http://www.doughellmann.com/PyMOTW/abc/
class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
def getTable(self,soup):
- return soup.find(id = "inner_content")
+ return soup.find(class_="tborder")
def getColumnCount(self):
return 2
def getColumns(self,columns):
--- a/documents/scrapers/3d5871a44abbbc81ef5b3a420070755d.py
+++ b/documents/scrapers/3d5871a44abbbc81ef5b3a420070755d.py
@@ -41,6 +41,8 @@
return
if __name__ == '__main__':
+#http://www.csiro.au/Portals/About-CSIRO/How-we-work/Governance/FOI-Request-Disclosure-Log-2012-13.aspx
+#http://www.csiro.au/Portals/About-CSIRO/How-we-work/Governance/FOI-Request-Disclosure-Log-2011-12.aspx
print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper)
print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper)
ScraperImplementation().doScrape()
--- a/documents/scrapers/7c6adc1d41cf029bf1a0959e5156477a.py
+++ b/documents/scrapers/7c6adc1d41cf029bf1a0959e5156477a.py
@@ -25,7 +25,11 @@
(idate,descA,descB,link,deldate,notes) = table('tbody tr').map(lambda i, e: pq(e).children().eq(1).text())
links = table('a').map(lambda i, e: pq(e).attr('href'))
description = descA+" "+descB
- edate = parse(idate[:12], dayfirst=True, fuzzy=True).strftime("%Y-%m-%d")
+ try:
+ edate = parse(idate[:12], dayfirst=True, fuzzy=True).strftime("%Y-%m-%d")
+ except ValueError:
+ edate = date.today().strftime("%Y-%m-%d")
+ pass
print edate
dochash = scrape.mkhash(self.remove_control_chars(title))
doc = foidocsdb.get(dochash)
--- a/documents/scrapers/8e874a2fde8aa0ccdc6d14573d766540.py
+++ b/documents/scrapers/8e874a2fde8aa0ccdc6d14573d766540.py
@@ -18,10 +18,10 @@
if mime_type == "text/html" or mime_type == "application/xhtml+xml" or mime_type =="application/xml":
# http://www.crummy.com/software/BeautifulSoup/documentation.html
soup = BeautifulSoup(htcontent)
- for text in soup.find(id="divFullWidthColumn").stripped_strings:
+ for text in soup.find(class_ = "mainContent").stripped_strings:
description = description + text.encode('ascii', 'ignore')
- for atag in soup.find(id="divFullWidthColumn").find_all("a"):
+ for atag in soup.find(id="SortingTable").find_all("a"):
if atag.has_key('href'):
links.append(scrape.fullurl(link,atag['href']))
--- a/documents/scrapers/a687a9eaab9e10e9e118d3fd7cf0e13a.py
+++ b/documents/scrapers/a687a9eaab9e10e9e118d3fd7cf0e13a.py
@@ -7,11 +7,11 @@
#http://www.doughellmann.com/PyMOTW/abc/
class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
def getTable(self,soup):
- return soup.find(id="ctl00_ContentPlaceHolderMainNoAjax_EdtrTD1494_2").table
+ return soup.find(id="int-content").table
def getColumnCount(self):
- return 4
+ return 3
def getColumns(self,columns):
- (blank,id, title,date) = columns
+ (id, title,date) = columns
return (id, date, title, title, None)
if __name__ == '__main__':
--- a/documents/scrapers/dfd7414bb0c21a0076ab559901ae0588.py
+++ b/documents/scrapers/dfd7414bb0c21a0076ab559901ae0588.py
@@ -10,7 +10,7 @@
(id, date, title, description, notes) = columns
return (id, date, title, description, notes)
def getTable(self,soup):
- return soup.find(class_ = "content")
+ return soup.find(class_ = "simpletable")
if __name__ == '__main__':
print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper)