From: Maxious Date: Sat, 08 Dec 2012 12:56:09 +0000 Subject: more scrapers X-Git-Url: https://maxious.lambdacomplex.org/git/?p=disclosr.git&a=commitdiff&h=e0f986ea938bab86a6a737457aa4cd5ec19af725 --- more scrapers Former-commit-id: 4e165fe3801a38df9f3943299b117f28334cc042 --- --- a/documents/genericScrapers.py +++ b/documents/genericScrapers.py @@ -13,6 +13,8 @@ from datetime import * import codecs +import difflib + from StringIO import StringIO from pdfminer.pdfparser import PDFDocument, PDFParser @@ -49,6 +51,31 @@ """ do the scraping """ return +class GenericHTMLDisclogScraper(GenericDisclogScraper): + + def doScrape(self): + foidocsdb = scrape.couch['disclosr-foidocuments'] + (url, mime_type, rcontent) = scrape.fetchURL(scrape.docsdb, + self.getURL(), "foidocuments", self.getAgencyID()) + content = rcontent.read() + dochash = scrape.mkhash(content) + doc = foidocsdb.get(dochash) + if doc is None: + print "saving " + dochash + description = "This log may have updated but as it was not in a table last time we viewed it, we cannot extract what has changed. Please refer to the agency's website Disclosure Log to see the most recent entries" + last_attach = scrape.getLastAttachment(scrape.docsdb, self.getURL()) + if last_attach != None: + html_diff = difflib.HtmlDiff() + description = description + "\nChanges: " + description = description + html_diff.make_table(last_attach.read().split('\n'), + content.split('\n')) + edate = date.today().strftime("%Y-%m-%d") + doc = {'_id': dochash, 'agencyID': self.getAgencyID() + , 'url': self.getURL(), 'docID': dochash, + "date": edate, "title": "Disclosure Log Updated", "description": description} + foidocsdb.save(doc) + else: + print "already saved" class GenericPDFDisclogScraper(GenericDisclogScraper): --- a/documents/scrape.py +++ b/documents/scrape.py @@ -76,6 +76,16 @@ addinfourl = urllib2.addinfourl(fp, headers, req.get_full_url()) addinfourl.code = code return addinfourl + +def getLastAttachment(docsdb,url): + hash = mkhash(url) + doc = docsdb.get(hash) + if doc != None: + last_attachment_fname = doc["_attachments"].keys()[-1] + last_attachment = docsdb.get_attachment(doc,last_attachment_fname) + return last_attachment + else: + return None def fetchURL(docsdb, url, fieldName, agencyID, scrape_again=True): url = canonurl(url) --- a/documents/scrapers/0049d35216493c545ef5f7f000e6b252.py +++ b/documents/scrapers/0049d35216493c545ef5f7f000e6b252.py @@ -12,8 +12,8 @@ if __name__ == '__main__': print 'Subclass:', issubclass(ScraperImplementation, - genericScrapers.GenericOAICDisclogScraper) + genericScrapers.GenericPDFDisclogScraper) print 'Instance:', isinstance(ScraperImplementation(), - genericScrapers.GenericOAICDisclogScraper) + genericScrapers.GenericPDFDisclogScraper) ScraperImplementation().doScrape() --- /dev/null +++ b/documents/scrapers/0ae822d1a748e60d90f0b79b97d5a3e5.py @@ -1,1 +1,19 @@ +import sys +import os +sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) +import genericScrapers + +class ScraperImplementation(genericScrapers.GenericHTMLDisclogScraper): + + def __init__(self): + super(ScraperImplementation, self).__init__() + + +if __name__ == '__main__': + print 'Subclass:', issubclass(ScraperImplementation, + genericScrapers.GenericHTMLDisclogScraper) + print 'Instance:', isinstance(ScraperImplementation(), + genericScrapers.GenericHTMLDisclogScraper) + ScraperImplementation().doScrape() + --- /dev/null +++ b/documents/scrapers/31685505438d393f45a90f442b8fa27f.py @@ -1,1 +1,19 @@ +import sys +import os +sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) +import genericScrapers + +class ScraperImplementation(genericScrapers.GenericPDFDisclogScraper): + + def __init__(self): + super(ScraperImplementation, self).__init__() + + +if __name__ == '__main__': + print 'Subclass:', issubclass(ScraperImplementation, + genericScrapers.GenericPDFDisclogScraper) + print 'Instance:', isinstance(ScraperImplementation(), + genericScrapers.GenericPDFDisclogScraper) + ScraperImplementation().doScrape() + --- a/documents/scrapers/31685505438d393f45a90f442b8fa27f.txt +++ /dev/null @@ -1,2 +1,1 @@ -pdf --- /dev/null +++ b/documents/scrapers/3e2f110af49d62833a835bd257771ffb.py @@ -1,1 +1,19 @@ +import sys +import os +sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) +import genericScrapers + +class ScraperImplementation(genericScrapers.GenericHTMLDisclogScraper): + + def __init__(self): + super(ScraperImplementation, self).__init__() + + +if __name__ == '__main__': + print 'Subclass:', issubclass(ScraperImplementation, + genericScrapers.GenericHTMLDisclogScraper) + print 'Instance:', isinstance(ScraperImplementation(), + genericScrapers.GenericHTMLDisclogScraper) + ScraperImplementation().doScrape() + --- a/documents/scrapers/3e2f110af49d62833a835bd257771ffb.txt +++ /dev/null @@ -1,2 +1,1 @@ -no disclog --- /dev/null +++ b/documents/scrapers/4d2af2dcc72f1703bbf04b13b03720a8.py @@ -1,1 +1,19 @@ +import sys +import os +sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) +import genericScrapers + +class ScraperImplementation(genericScrapers.GenericHTMLDisclogScraper): + + def __init__(self): + super(ScraperImplementation, self).__init__() + + +if __name__ == '__main__': + print 'Subclass:', issubclass(ScraperImplementation, + genericScrapers.GenericHTMLDisclogScraper) + print 'Instance:', isinstance(ScraperImplementation(), + genericScrapers.GenericHTMLDisclogScraper) + ScraperImplementation().doScrape() + --- a/documents/scrapers/4d2af2dcc72f1703bbf04b13b03720a8.txt +++ /dev/null @@ -1,2 +1,1 @@ -no disclog --- a/documents/scrapers/50601505ef69483121a6d130bb0515e4.txt +++ b/documents/scrapers/50601505ef69483121a6d130bb0515e4.txt @@ -1,1 +1,1 @@ -apsc has ACMA style disclog +ACMA style --- /dev/null +++ b/documents/scrapers/525c3953187da08cd702359b2fc2997f.py @@ -1,1 +1,19 @@ +import sys +import os +sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) +import genericScrapers + +class ScraperImplementation(genericScrapers.GenericHTMLDisclogScraper): + + def __init__(self): + super(ScraperImplementation, self).__init__() + + +if __name__ == '__main__': + print 'Subclass:', issubclass(ScraperImplementation, + genericScrapers.GenericHTMLDisclogScraper) + print 'Instance:', isinstance(ScraperImplementation(), + genericScrapers.GenericHTMLDisclogScraper) + ScraperImplementation().doScrape() + --- a/documents/scrapers/525c3953187da08cd702359b2fc2997f.txt +++ /dev/null @@ -1,2 +1,1 @@ -no disclog --- /dev/null +++ b/documents/scrapers/627f116dfe42c9f27ad6747be0aa44e2.py @@ -1,1 +1,19 @@ +import sys +import os +sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) +import genericScrapers + +class ScraperImplementation(genericScrapers.GenericHTMLDisclogScraper): + + def __init__(self): + super(ScraperImplementation, self).__init__() + + +if __name__ == '__main__': + print 'Subclass:', issubclass(ScraperImplementation, + genericScrapers.GenericHTMLDisclogScraper) + print 'Instance:', isinstance(ScraperImplementation(), + genericScrapers.GenericHTMLDisclogScraper) + ScraperImplementation().doScrape() + --- a/documents/scrapers/627f116dfe42c9f27ad6747be0aa44e2.txt +++ /dev/null @@ -1,1 +1,1 @@ -no disclog + --- /dev/null +++ b/documents/scrapers/795e7a8afb39a420360aa207b0cb1306.py @@ -1,1 +1,19 @@ +import sys +import os +sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) +import genericScrapers + +class ScraperImplementation(genericScrapers.GenericHTMLDisclogScraper): + + def __init__(self): + super(ScraperImplementation, self).__init__() + + +if __name__ == '__main__': + print 'Subclass:', issubclass(ScraperImplementation, + genericScrapers.GenericHTMLDisclogScraper) + print 'Instance:', isinstance(ScraperImplementation(), + genericScrapers.GenericHTMLDisclogScraper) + ScraperImplementation().doScrape() + --- a/documents/scrapers/795e7a8afb39a420360aa207b0cb1306.txt +++ /dev/null @@ -1,2 +1,1 @@ -no disclog --- /dev/null +++ b/documents/scrapers/7b39ce7f362a0af9a711eaf223943eea.py @@ -1,1 +1,19 @@ +import sys +import os +sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) +import genericScrapers + +class ScraperImplementation(genericScrapers.GenericHTMLDisclogScraper): + + def __init__(self): + super(ScraperImplementation, self).__init__() + + +if __name__ == '__main__': + print 'Subclass:', issubclass(ScraperImplementation, + genericScrapers.GenericHTMLDisclogScraper) + print 'Instance:', isinstance(ScraperImplementation(), + genericScrapers.GenericHTMLDisclogScraper) + ScraperImplementation().doScrape() + --- a/documents/scrapers/7b39ce7f362a0af9a711eaf223943eea.txt +++ /dev/null @@ -1,2 +1,1 @@ -no disclog --- a/documents/scrapers/7c6adc1d41cf029bf1a0959e5156477a.py +++ b/documents/scrapers/7c6adc1d41cf029bf1a0959e5156477a.py @@ -16,7 +16,7 @@ foidocsdb = scrape.couch['disclosr-foidocuments'] (url, mime_type, content) = scrape.fetchURL(scrape.docsdb, self.getURL(), "foidocuments", self.getAgencyID()) - + d = pq(content.read()) d.make_links_absolute(base_url = self.getURL()) for table in d('table').items(): @@ -35,7 +35,7 @@ doc = {'_id': dochash, 'agencyID': self.getAgencyID() , 'url': self.getURL(), 'docID': dochash, "links": links, - "date": edate, "notes": notes, "title": "Disclosure Log Updated", "description": description} + "date": edate, "notes": notes, "title": title, "description": description} #print doc foidocsdb.save(doc) else: --- /dev/null +++ b/documents/scrapers/9f4815bfdcb918a036e4bb43a30f8d77.py @@ -1,1 +1,19 @@ +import sys +import os +sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) +import genericScrapers + +class ScraperImplementation(genericScrapers.GenericHTMLDisclogScraper): + + def __init__(self): + super(ScraperImplementation, self).__init__() + + +if __name__ == '__main__': + print 'Subclass:', issubclass(ScraperImplementation, + genericScrapers.GenericHTMLDisclogScraper) + print 'Instance:', isinstance(ScraperImplementation(), + genericScrapers.GenericHTMLDisclogScraper) + ScraperImplementation().doScrape() + --- a/documents/scrapers/9f4815bfdcb918a036e4bb43a30f8d77.txt +++ /dev/null @@ -1,1 +1,1 @@ -no disclog + --- /dev/null +++ b/documents/scrapers/bb96fe4065afb7e0872136dd657f9369.py @@ -1,1 +1,19 @@ +import sys +import os +sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) +import genericScrapers + +class ScraperImplementation(genericScrapers.GenericHTMLDisclogScraper): + + def __init__(self): + super(ScraperImplementation, self).__init__() + + +if __name__ == '__main__': + print 'Subclass:', issubclass(ScraperImplementation, + genericScrapers.GenericHTMLDisclogScraper) + print 'Instance:', isinstance(ScraperImplementation(), + genericScrapers.GenericHTMLDisclogScraper) + ScraperImplementation().doScrape() + --- a/documents/scrapers/bb96fe4065afb7e0872136dd657f9369.txt +++ /dev/null @@ -1,1 +1,1 @@ -no disclog + --- /dev/null +++ b/documents/scrapers/bf6e587f166040b63681cd2ff76fbfdf.py @@ -1,1 +1,19 @@ +import sys +import os +sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) +import genericScrapers + +class ScraperImplementation(genericScrapers.GenericHTMLDisclogScraper): + + def __init__(self): + super(ScraperImplementation, self).__init__() + + +if __name__ == '__main__': + print 'Subclass:', issubclass(ScraperImplementation, + genericScrapers.GenericHTMLDisclogScraper) + print 'Instance:', isinstance(ScraperImplementation(), + genericScrapers.GenericHTMLDisclogScraper) + ScraperImplementation().doScrape() + --- a/documents/scrapers/bf6e587f166040b63681cd2ff76fbfdf.txt +++ /dev/null @@ -1,1 +1,1 @@ -no disclog + --- a/documents/scrapers/c1302c8d7cbbd911f0d4d8a4128f8079.txt +++ b/documents/scrapers/c1302c8d7cbbd911f0d4d8a4128f8079.txt @@ -1,1 +1,1 @@ -uses RET disclog +parent --- /dev/null +++ b/documents/scrapers/cb7f40e3495b682de6eee61bf09c1cfc.py @@ -1,1 +1,19 @@ +import sys +import os +sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) +import genericScrapers + +class ScraperImplementation(genericScrapers.GenericHTMLDisclogScraper): + + def __init__(self): + super(ScraperImplementation, self).__init__() + + +if __name__ == '__main__': + print 'Subclass:', issubclass(ScraperImplementation, + genericScrapers.GenericHTMLDisclogScraper) + print 'Instance:', isinstance(ScraperImplementation(), + genericScrapers.GenericHTMLDisclogScraper) + ScraperImplementation().doScrape() + --- a/documents/scrapers/cb7f40e3495b682de6eee61bf09c1cfc.txt +++ /dev/null @@ -1,1 +1,1 @@ -no disclog + --- /dev/null +++ b/documents/scrapers/d72744fb1e5d6e87af9a5ea16cc27fa5.py @@ -1,1 +1,50 @@ +import sys +import os +sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) +import genericScrapers +import scrape +from datetime import date +from pyquery import PyQuery as pq +from lxml import etree +import urllib +import dateutil +from dateutil.parser import * +class ACMADisclogScraper(genericScrapers.GenericDisclogScraper): + + def doScrape(self): + foidocsdb = scrape.couch['disclosr-foidocuments'] + (url, mime_type, content) = scrape.fetchURL(scrape.docsdb, + self.getURL(), "foidocuments", self.getAgencyID()) + + d = pq(content.read()) + d.make_links_absolute(base_url = self.getURL()) + for item in d('.item-list').items(): + title= item('h3').text() + print title + links = item('a').map(lambda i, e: pq(e).attr('href')) + description = title= item('ul').text() + edate = date.today().strftime("%Y-%m-%d") + print edate + dochash = scrape.mkhash(self.remove_control_chars(title)) + doc = foidocsdb.get(dochash) + if doc is None: + print "saving " + dochash + edate = date.today().strftime("%Y-%m-%d") + doc = {'_id': dochash, 'agencyID': self.getAgencyID() + , 'url': self.getURL(), 'docID': dochash, + "links": links, + "date": edate, "title": title, "description": description} + #print doc + foidocsdb.save(doc) + else: + print "already saved" + + +if __name__ == '__main__': + print 'Subclass:', issubclass(ACMADisclogScraper, + genericScrapers.GenericDisclogScraper) + print 'Instance:', isinstance(ACMADisclogScraper(), + genericScrapers.GenericDisclogScraper) + ACMADisclogScraper().doScrape() + --- /dev/null +++ b/documents/scrapers/e770921522a49dc77de208cc724ce134.py @@ -1,1 +1,19 @@ +import sys +import os +sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) +import genericScrapers + +class ScraperImplementation(genericScrapers.GenericHTMLDisclogScraper): + + def __init__(self): + super(ScraperImplementation, self).__init__() + + +if __name__ == '__main__': + print 'Subclass:', issubclass(ScraperImplementation, + genericScrapers.GenericHTMLDisclogScraper) + print 'Instance:', isinstance(ScraperImplementation(), + genericScrapers.GenericHTMLDisclogScraper) + ScraperImplementation().doScrape() + --- a/documents/scrapers/e770921522a49dc77de208cc724ce134.txt +++ /dev/null @@ -1,1 +1,1 @@ -no disclog + --- /dev/null +++ b/documents/scrapers/f189459fc43f941e0d4ecfba52c666f3.py @@ -1,1 +1,19 @@ +import sys +import os +sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) +import genericScrapers + +class ScraperImplementation(genericScrapers.GenericHTMLDisclogScraper): + + def __init__(self): + super(ScraperImplementation, self).__init__() + + +if __name__ == '__main__': + print 'Subclass:', issubclass(ScraperImplementation, + genericScrapers.GenericHTMLDisclogScraper) + print 'Instance:', isinstance(ScraperImplementation(), + genericScrapers.GenericHTMLDisclogScraper) + ScraperImplementation().doScrape() + --- a/documents/scrapers/f189459fc43f941e0d4ecfba52c666f3.txt +++ /dev/null @@ -1,2 +1,1 @@ -no disclog --- a/documents/template.inc.php +++ b/documents/template.inc.php @@ -83,6 +83,7 @@ } function include_footer_documents() { + global $ENV; ?>
@@ -168,7 +169,7 @@ if (isset($row->value->links)) { $result .= '

Links/Documents

";