beginning of docx/pdf scrapers
beginning of docx/pdf scrapers


Former-commit-id: 72b18d2f2bae7cfce33fb8639ad1523c7bbcc0a3

import sys,os import sys,os
sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
import scrape import scrape
from bs4 import BeautifulSoup from bs4 import BeautifulSoup
from time import mktime from time import mktime
import feedparser import feedparser
import abc import abc
import unicodedata, re import unicodedata, re
import dateutil import dateutil
from dateutil.parser import * from dateutil.parser import *
from datetime import * from datetime import *
import codecs import codecs
   
  from StringIO import StringIO
   
  from docx import *
  from lxml import etree
  import zipfile
   
  from pdfminer.pdfparser import PDFDocument, PDFParser
  from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter, process_pdf
  from pdfminer.pdfdevice import PDFDevice, TagExtractor
  from pdfminer.converter import TextConverter
  from pdfminer.cmapdb import CMapDB
  from pdfminer.layout import LAParams
   
class GenericDisclogScraper(object): class GenericDisclogScraper(object):
__metaclass__ = abc.ABCMeta __metaclass__ = abc.ABCMeta
agencyID = None agencyID = None
disclogURL = None disclogURL = None
def remove_control_chars(self, input): def remove_control_chars(self, input):
return "".join([i for i in input if ord(i) in range(32, 127)]) return "".join([i for i in input if ord(i) in range(32, 127)])
def getAgencyID(self): def getAgencyID(self):
""" disclosr agency id """ """ disclosr agency id """
if self.agencyID == None: if self.agencyID == None:
self.agencyID = os.path.basename(sys.argv[0]).replace(".py","") self.agencyID = os.path.basename(sys.argv[0]).replace(".py","")
return self.agencyID return self.agencyID
   
def getURL(self): def getURL(self):
""" disclog URL""" """ disclog URL"""
if self.disclogURL == None: if self.disclogURL == None:
agency = scrape.agencydb.get(self.getAgencyID()) agency = scrape.agencydb.get(self.getAgencyID())
self.disclogURL = agency['FOIDocumentsURL'] self.disclogURL = agency['FOIDocumentsURL']
return self.disclogURL return self.disclogURL
   
@abc.abstractmethod @abc.abstractmethod
def doScrape(self): def doScrape(self):
""" do the scraping """ """ do the scraping """
return return
   
@abc.abstractmethod class GenericPDFDisclogScraper(GenericDisclogScraper):
def getDescription(self, content, entry, doc):  
""" get description""" def doScrape(self):
return foidocsdb = scrape.couch['disclosr-foidocuments']
  (url,mime_type,content) = scrape.fetchURL(scrape.docsdb, self.getURL(), "foidocuments", self.getAgencyID())
   
  laparams = LAParams()
   
  rsrcmgr = PDFResourceManager(caching=True)
   
  outfp = StringIO.StringIO()
   
  device = TextConverter(rsrcmgr, outfp, codec='utf-8', laparams=laparams)
   
   
  fp = StringIO.StringIO()
  fp.write(content)
  description = output.getvalue();
  process_pdf(rsrcmgr, device, fp, set(), caching=True, check_extractable=True)
  fp.close()
  device.close()
  outfp.close()
   
  hash = scrape.mkhash(description)
  #print hash
  doc = foidocsdb.get(hash)
  #print doc
  if doc == None:
  print "saving "+ hash
  edate = datetime.fromtimestamp(mktime( )).strftime("%Y-%m-%d")
  doc = {'_id': hash, 'agencyID': self.getAgencyID(), 'url': self.getURL(), 'docID': hash,
  "date": edate,"title": "Disclosure Log Updated"}
  self.getDescription(entry,entry, doc)
  foidocsdb.save(doc)
  else:
  print "already saved"
   
   
  class GenericDOCXDisclogScraper(GenericDisclogScraper):
   
  def doScrape(self):
  foidocsdb = scrape.couch['disclosr-foidocuments']
  (url,mime_type,content) = scrape.fetchURL(scrape.docsdb, self.getURL(), "foidocuments", self.getAgencyID())
   
  mydoc = zipfile.ZipFile(file)
  xmlcontent = mydoc.read('word/document.xml')
  document = etree.fromstring(xmlcontent)
   
  ## Fetch all the text out of the document we just created
  paratextlist = getdocumenttext(document)
   
  # Make explicit unicode version
  newparatextlist = []
  for paratext in paratextlist:
  newparatextlist.append(paratext.encode("utf-8"))
   
  ## Print our documnts test with two newlines under each paragraph
  description = '\n\n'.join(newparatextlist)
   
  hash = scrape.mkhash(description)
  #print hash
  doc = foidocsdb.get(hash)
  #print doc
  if doc == None:
  print "saving "+ hash
  edate = datetime.fromtimestamp(mktime()).strftime("%Y-%m-%d")
  doc = {'_id': hash, 'agencyID': self.getAgencyID(), 'url': self.getURL(), 'docID': hash,
  "date": edate,"title": "Disclosure Log Updated"}
  self.getDescription(entry,entry, doc)
  foidocsdb.save(doc)
  else:
  print "already saved"
   
   
class GenericRSSDisclogScraper(GenericDisclogScraper): class GenericRSSDisclogScraper(GenericDisclogScraper):
   
def doScrape(self): def doScrape(self):
foidocsdb = scrape.couch['disclosr-foidocuments'] foidocsdb = scrape.couch['disclosr-foidocuments']
(url,mime_type,content) = scrape.fetchURL(scrape.docsdb, self.getURL(), "foidocuments", self.getAgencyID()) (url,mime_type,content) = scrape.fetchURL(scrape.docsdb, self.getURL(), "foidocuments", self.getAgencyID())
feed = feedparser.parse(content) feed = feedparser.parse(content)
for entry in feed.entries: for entry in feed.entries:
#print entry #print entry
print entry.id print entry.id
hash = scrape.mkhash(entry.id) hash = scrape.mkhash(entry.id)
#print hash #print hash
doc = foidocsdb.get(hash) doc = foidocsdb.get(hash)
#print doc #print doc
if doc == None: if doc == None:
print "saving "+ hash print "saving "+ hash
edate = datetime.fromtimestamp(mktime( entry.published_parsed)).strftime("%Y-%m-%d") edate = datetime.fromtimestamp(mktime( entry.published_parsed)).strftime("%Y-%m-%d")
doc = {'_id': hash, 'agencyID': self.getAgencyID(), 'url': entry.link, 'docID': entry.id, doc = {'_id': hash, 'agencyID': self.getAgencyID(), 'url': entry.link, 'docID': entry.id,
"date": edate,"title": entry.title} "date": edate,"title": entry.title}
self.getDescription(entry,entry, doc) self.getDescription(entry,entry, doc)
foidocsdb.save(doc) foidocsdb.save(doc)
else: else:
print "already saved" print "already saved"
def getDescription(self, content, entry, doc): def getDescription(self, content, entry, doc):
""" get description from rss entry""" """ get description from rss entry"""
doc.update({'description': content.summary}) doc.update({'description': content.summary})
return return
   
class GenericOAICDisclogScraper(GenericDisclogScraper): class GenericOAICDisclogScraper(GenericDisclogScraper):
__metaclass__ = abc.ABCMeta __metaclass__ = abc.ABCMeta
@abc.abstractmethod @abc.abstractmethod
def getColumns(self,columns): def getColumns(self,columns):
""" rearranges columns if required """ """ rearranges columns if required """
return return
def getColumnCount(self): def getColumnCount(self):
return 5 return 5
def getDescription(self, content, entry, doc): def getDescription(self, content, entry, doc):
""" get description from rss entry""" """ get description from rss entry"""
descriptiontxt = "" descriptiontxt = ""
for string in content.stripped_strings: for string in content.stripped_strings:
descriptiontxt = descriptiontxt + " \n" + string descriptiontxt = descriptiontxt + " \n" + string
doc.update({'description': descriptiontxt}) doc.update({'description': descriptiontxt})
return return
def getTitle(self, content, entry, doc): def getTitle(self, content, entry, doc):
doc.update({'title': (''.join(content.stripped_strings))}) doc.update({'title': (''.join(content.stripped_strings))})
return return
def getTable(self, soup): def getTable(self, soup):
return soup.table return soup.table
def getRows(self, table): def getRows(self, table):
return table.find_all('tr') return table.find_all('tr')
def getDate(self, content, entry, doc): def getDate(self, content, entry, doc):
date = ''.join(content.stripped_strings).strip() date = ''.join(content.stripped_strings).strip()
(a,b,c) = date.partition("(") (a,b,c) = date.partition("(")
date = self.remove_control_chars(a.replace("Octber","October")) date = self.remove_control_chars(a.replace("Octber","October"))
print date print date
edate = parse(date, dayfirst=True, fuzzy=True).strftime("%Y-%m-%d") edate = parse(date, dayfirst=True, fuzzy=True).strftime("%Y-%m-%d")
print edate print edate
doc.update({'date': edate}) doc.update({'date': edate})
return return
def getLinks(self, content, entry, doc): def getLinks(self, content, entry, doc):
links = [] links = []
for atag in entry.find_all("a"): for atag in entry.find_all("a"):
if atag.has_key('href'): if atag.has_key('href'):
links.append(scrape.fullurl(content,atag['href'])) links.append(scrape.fullurl(content,atag['href']))
if links != []: if links != []:
doc.update({'links': links}) doc.update({'links': links})
return return
   
def doScrape(self): def doScrape(self):
foidocsdb = scrape.couch['disclosr-foidocuments'] foidocsdb = scrape.couch['disclosr-foidocuments']
(url,mime_type,content) = scrape.fetchURL(scrape.docsdb, self.getURL(), "foidocuments", self.getAgencyID()) (url,mime_type,content) = scrape.fetchURL(scrape.docsdb, self.getURL(), "foidocuments", self.getAgencyID())
if content != None: if content != None:
if mime_type == "text/html" or mime_type == "application/xhtml+xml" or mime_type =="application/xml": if mime_type == "text/html" or mime_type == "application/xhtml+xml" or mime_type =="application/xml":
# http://www.crummy.com/software/BeautifulSoup/documentation.html # http://www.crummy.com/software/BeautifulSoup/documentation.html
soup = BeautifulSoup(content) soup = BeautifulSoup(content)
table = self.getTable(soup) table = self.getTable(soup)
for row in self.getRows(table): for row in self.getRows(table):
columns = row.find_all('td') columns = row.find_all('td')
if len(columns) == self.getColumnCount(): if len(columns) == self.getColumnCount():
(id, date, title, description, notes) = self.getColumns(columns) (id, date, title, description, notes) = self.getColumns(columns)
print self.remove_control_chars(''.join(id.stripped_strings)) print self.remove_control_chars(''.join(id.stripped_strings))
if id.string == None: if id.string == None:
hash = scrape.mkhash(self.remove_control_chars(url+(''.join(date.stripped_strings)))) hash = scrape.mkhash(self.remove_control_chars(url+(''.join(date.stripped_strings))))
else: else:
hash = scrape.mkhash(self.remove_control_chars(url+(''.join(id.stripped_strings)))) hash = scrape.mkhash(self.remove_control_chars(url+(''.join(id.stripped_strings))))
doc = foidocsdb.get(hash) doc = foidocsdb.get(hash)
if doc == None: if doc == None:
print "saving " +hash print "saving " +hash
doc = {'_id': hash, 'agencyID': self.getAgencyID(), 'url': self.getURL(), 'docID': (''.join(id.stripped_strings))} doc = {'_id': hash, 'agencyID': self.getAgencyID(), 'url': self.getURL(), 'docID': (''.join(id.stripped_strings))}
self.getLinks(self.getURL(),row,doc) self.getLinks(self.getURL(),row,doc)
self.getTitle(title,row, doc) self.getTitle(title,row, doc)
self.getDate(date,row, doc) self.getDate(date,row, doc)
self.getDescription(description,row, doc) self.getDescription(description,row, doc)
if notes != None: if notes != None:
doc.update({ 'notes': (''.join(notes.stripped_strings))}) doc.update({ 'notes': (''.join(notes.stripped_strings))})
foidocsdb.save(doc) foidocsdb.save(doc)
else: else:
print "already saved "+hash print "already saved "+hash
elif len(row.find_all('th')) == self.getColumnCount(): elif len(row.find_all('th')) == self.getColumnCount():
print "header row" print "header row"
else: else:
print "ERROR number of columns incorrect" print "ERROR number of columns incorrect"
print row print row
   
<?php <?php
   
// Agency X updated Y, new files, diff of plain text/link text, // Agency X updated Y, new files, diff of plain text/link text,
// feed for just one agency or all // feed for just one agency or all
// This is a minimum example of using the Universal Feed Generator Class // This is a minimum example of using the Universal Feed Generator Class
include("../lib/FeedWriter/FeedTypes.php"); include("../lib/FeedWriter/FeedTypes.php");
include_once('../include/common.inc.php'); include_once('../include/common.inc.php');
//Creating an instance of FeedWriter class. //Creating an instance of FeedWriter class.
$TestFeed = new RSS2FeedWriter(); $TestFeed = new RSS2FeedWriter();
//Setting the channel elements //Setting the channel elements
//Use wrapper functions for common channelelements //Use wrapper functions for common channelelements
$TestFeed->setTitle('Last Modified - All'); $TestFeed->setTitle('disclosurelo.gs Newest Entries - All');
$TestFeed->setLink('http://disclosurelo.gs/rss.xml.php'); $TestFeed->setLink('http://disclosurelo.gs/rss.xml.php');
$TestFeed->setDescription('Latest entries'); $TestFeed->setDescription('disclosurelo.gs Newest Entries - All Agencies');
$TestFeed->setChannelElement('language', 'en-us'); $TestFeed->setChannelElement('language', 'en-us');
$TestFeed->setChannelElement('pubDate', date(DATE_RSS, time())); $TestFeed->setChannelElement('pubDate', date(DATE_RSS, time()));
//Retriving informations from database //Retriving informations from database
$idtoname = Array(); $idtoname = Array();
$agenciesdb = $server->get_db('disclosr-agencies'); $agenciesdb = $server->get_db('disclosr-agencies');
foreach ($agenciesdb->get_view("app", "byCanonicalName")->rows as $row) { foreach ($agenciesdb->get_view("app", "byCanonicalName")->rows as $row) {
$idtoname[$row->id] = trim($row->value->name); $idtoname[$row->id] = trim($row->value->name);
} }
$foidocsdb = $server->get_db('disclosr-foidocuments'); $foidocsdb = $server->get_db('disclosr-foidocuments');
$rows = $foidocsdb->get_view("app", "byDate", Array('9999-99-99','0000-00-00', 50), true)->rows; $rows = $foidocsdb->get_view("app", "byDate", Array('9999-99-99','0000-00-00', 50), true)->rows;
//print_r($rows); //print_r($rows);
foreach ($rows as $row) { foreach ($rows as $row) {
//Create an empty FeedItem //Create an empty FeedItem
$newItem = $TestFeed->createNewItem(); $newItem = $TestFeed->createNewItem();
//Add elements to the feed item //Add elements to the feed item
$newItem->setTitle($row->value->title); $newItem->setTitle($row->value->title);
$newItem->setLink("view.php?id=".$row->value->_id); $newItem->setLink("http://disclosurelo.gs/view.php?id=".$row->value->_id);
$newItem->setDate(date("c", strtotime($row->value->date))); $newItem->setDate(strtotime($row->value->date));
$newItem->setDescription(displayLogEntry($row,$idtoname)); $newItem->setDescription(displayLogEntry($row,$idtoname));
$newItem->addElement('guid', $row->value->_id,array('isPermaLink'=>'true')); $newItem->addElement('guid', "http://disclosurelo.gs/view.php?id=".$row->value->_id,array('isPermaLink'=>'true'));
//Now add the feed item //Now add the feed item
$TestFeed->addItem($newItem); $TestFeed->addItem($newItem);
} }
//OK. Everything is done. Now genarate the feed. //OK. Everything is done. Now genarate the feed.
$TestFeed->generateFeed(); $TestFeed->generateFeed();
?> ?>
   
  GNU GENERAL PUBLIC LICENSE
  Version 3, 29 June 2007
 
  Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
  Everyone is permitted to copy and distribute verbatim copies
  of this license document, but changing it is not allowed.
 
  Preamble
 
  The GNU General Public License is a free, copyleft license for
  software and other kinds of works.
 
  The licenses for most software and other practical works are designed
  to take away your freedom to share and change the works. By contrast,
  the GNU General Public License is intended to guarantee your freedom to
  share and change all versions of a program--to make sure it remains free
  software for all its users. We, the Free Software Foundation, use the
  GNU General Public License for most of our software; it applies also to
  any other work released this way by its authors. You can apply it to
  your programs, too.
 
  When we speak of free software, we are referring to freedom, not
  price. Our General Public Licenses are designed to make sure that you
  have the freedom to distribute copies of free software (and charge for
  them if you wish), that you receive source code or can get it if you
  want it, that you can change the software or use pieces of it in new
  free programs, and that you know you can do these things.
 
  To protect your rights, we need to prevent others from denying you
  these rights or asking you to surrender the rights. Therefore, you have
  certain responsibilities if you distribute copies of the software, or if
  you modify it: responsibilities to respect the freedom of others.
 
  For example, if you distribute copies of such a program, whether
  gratis or for a fee, you must pass on to the recipients the same
  freedoms that you received. You must make sure that they, too, receive
  or can get the source code. And you must show them these terms so they
  know their rights.
 
  Developers that use the GNU GPL protect your rights with two steps:
  (1) assert copyright on the software, and (2) offer you this License
  giving you legal permission to copy, distribute and/or modify it.
 
  For the developers' and authors' protection, the GPL clearly explains
  that there is no warranty for this free software. For both users' and
  authors' sake, the GPL requires that modified versions be marked as
  changed, so that their problems will not be attributed erroneously to
  authors of previous versions.
 
  Some devices are designed to deny users access to install or run
  modified versions of the software inside them, although the manufacturer
  can do so. This is fundamentally incompatible with the aim of
  protecting users' freedom to change the software. The systematic
  pattern of such abuse occurs in the area of products for individuals to
  use, which is precisely where it is most unacc