import sys,os |
import sys,os |
sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) |
sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) |
import scrape |
import scrape |
from bs4 import BeautifulSoup |
from bs4 import BeautifulSoup |
import parsedatetime as pdt |
import parsedatetime as pdt |
from time import mktime |
from time import mktime |
from datetime import datetime |
from datetime import datetime |
import feedparser |
import feedparser |
import abc |
import abc |
|
|
class GenericDisclogScraper(object): |
class GenericDisclogScraper(object): |
__metaclass__ = abc.ABCMeta |
__metaclass__ = abc.ABCMeta |
agencyID = None |
agencyID = None |
disclogURL = None |
disclogURL = None |
def getAgencyID(self): |
def getAgencyID(self): |
""" disclosr agency id """ |
""" disclosr agency id """ |
if self.agencyID == None: |
if self.agencyID == None: |
self.agencyID = os.path.basename(sys.argv[0]).replace(".py","") |
self.agencyID = os.path.basename(sys.argv[0]).replace(".py","") |
return self.agencyID |
return self.agencyID |
|
|
def getURL(self): |
def getURL(self): |
""" disclog URL""" |
""" disclog URL""" |
if self.disclogURL == None: |
if self.disclogURL == None: |
agency = scrape.agencydb.get(self.getAgencyID()) |
agency = scrape.agencydb.get(self.getAgencyID()) |
self.disclogURL = agency['FOIDocumentsURL'] |
self.disclogURL = agency['FOIDocumentsURL'] |
return self.disclogURL |
return self.disclogURL |
|
|
@abc.abstractmethod |
@abc.abstractmethod |
def doScrape(self): |
def doScrape(self): |
""" do the scraping """ |
""" do the scraping """ |
return |
return |
|
|
|
@abc.abstractmethod |
|
def getDescription(self, content, entry, doc): |
|
""" get description""" |
|
return |
|
|
|
|
|
|
class GenericRSSDisclogScraper(GenericDisclogScraper): |
class GenericRSSDisclogScraper(GenericDisclogScraper): |
def getDescription(self, entry, doc): |
|
""" get description from rss entry""" |
|
doc['description'] = entry.summary |
|
return |
|
|
|
def doScrape(self): |
def doScrape(self): |
foidocsdb = scrape.couch['disclosr-foidocuments'] |
foidocsdb = scrape.couch['disclosr-foidocuments'] |
(url,mime_type,content) = scrape.fetchURL(scrape.docsdb, self.getURL(), "foidocuments", self.getAgencyID()) |
(url,mime_type,content) = scrape.fetchURL(scrape.docsdb, self.getURL(), "foidocuments", self.getAgencyID()) |
feed = feedparser.parse(content) |
feed = feedparser.parse(content) |
for entry in feed.entries: |
for entry in feed.entries: |
#print entry |
#print entry |
print entry.id |
print entry.id |
hash = scrape.mkhash(entry.id) |
hash = scrape.mkhash(entry.id) |
#print hash |
#print hash |
doc = foidocsdb.get(hash) |
doc = foidocsdb.get(hash) |
#print doc |
#print doc |
if doc == None: |
if doc == None: |
print "saving" |
print "saving" |
edate = datetime.fromtimestamp(mktime( entry.published_parsed)).strftime("%Y-%m-%d") |
edate = datetime.fromtimestamp(mktime( entry.published_parsed)).strftime("%Y-%m-%d") |
doc = {'_id': hash, 'agencyID': self.getAgencyID(), 'url': entry.link, 'docID': entry.id, |
doc = {'_id': hash, 'agencyID': self.getAgencyID(), 'url': entry.link, 'docID': entry.id, |
"date": edate,"title": entry.title} |
"date": edate,"title": entry.title} |
self.getDescription(entry, doc) |
self.getDescription(entry,entry, doc) |
foidocsdb.save(doc) |
foidocsdb.save(doc) |
else: |
else: |
print "already saved" |
print "already saved" |
|
def getDescription(self, content, entry, doc): |
|
""" get description from rss entry""" |
|
doc.update({'description': content.summary}) |
|
return |
|
|
class GenericOAICDisclogScraper(GenericDisclogScraper): |
class GenericOAICDisclogScraper(GenericDisclogScraper): |
__metaclass__ = abc.ABCMeta |
__metaclass__ = abc.ABCMeta |
@abc.abstractmethod |
@abc.abstractmethod |
def getColumns(self,columns): |
def getColumns(self,columns): |
""" rearranges columns if required """ |
""" rearranges columns if required """ |
|
return |
|
def getColumnCount(self): |
|
return 5 |
|
def getDescription(self, content, entry, doc): |
|
""" get description from rss entry""" |
|
descriptiontxt = "" |
|
for string in content.stripped_strings: |
|
descriptiontxt = descriptiontxt + " \n" + string |
|
doc.update({'description': descriptiontxt}) |
return |
return |
|
|
def doScrape(self): |
def doScrape(self): |
cal = pdt.Calendar() |
cal = pdt.Calendar() |
foidocsdb = scrape.couch['disclosr-foidocuments'] |
foidocsdb = scrape.couch['disclosr-foidocuments'] |
(url,mime_type,content) = scrape.fetchURL(scrape.docsdb, self.getURL(), "foidocuments", self.getAgencyID()) |
(url,mime_type,content) = scrape.fetchURL(scrape.docsdb, self.getURL(), "foidocuments", self.getAgencyID()) |
if content != None: |
if content != None: |
if mime_type == "text/html" or mime_type == "application/xhtml+xml" or mime_type =="application/xml": |
if mime_type == "text/html" or mime_type == "application/xhtml+xml" or mime_type =="application/xml": |
# http://www.crummy.com/software/BeautifulSoup/documentation.html |
# http://www.crummy.com/software/BeautifulSoup/documentation.html |
soup = BeautifulSoup(content) |
soup = BeautifulSoup(content) |
for row in soup.table.find_all('tr'): |
for row in soup.table.find_all('tr'): |
columns = row.find_all('td') |
columns = row.find_all('td') |
if len(columns) == 5: |
if len(columns) == self.getColumnCount(): |
(id, date, description, title, notes) = self.getColumns(columns) |
(id, date, description, title, notes) = self.getColumns(columns) |
print id.string |
print id.string |
hash = scrape.mkhash(url+id.string) |
hash = scrape.mkhash(url+id.string) |
links = [] |
links = [] |
for atag in row.find_all("a"): |
for atag in row.find_all("a"): |
if atag.has_key('href'): |
if atag.has_key('href'): |
links.append(scrape.fullurl(url,atag['href'])) |
links.append(scrape.fullurl(url,atag['href'])) |
doc = foidocsdb.get(hash) |
doc = foidocsdb.get(hash) |
descriptiontxt = "" |
|
for string in description.stripped_strings: |
|
descriptiontxt = descriptiontxt + " \n" + string |
|
|
|
if doc == None: |
if doc == None: |
print "saving" |
print "saving" |
dtresult = cal.parseDateText(date.string) |
dtresult = cal.parseDateText(date.string) |
if len(dtresult) == 2: |
if len(dtresult) == 2: |
(dtdate,dtr) = dtresult |
(dtdate,dtr) = dtresult |
print dtdate |
print dtdate |
edate = ""+str(dtdate[0])+'-'+str(dtdate[1])+'-'+str(dtdate[2]) |
edate = ""+str(dtdate[0])+'-'+str(dtdate[1])+'-'+str(dtdate[2]) |
else: |
else: |
edate = "" |
edate = datetime.strptime(date.string, "%d %B %Y").strftime("%Y-%m-%d") |
doc = {'_id': hash, 'agencyID': self.getAgencyID(), 'url': self.getURL(), "links": links, 'docID': id.string, |
doc = {'_id': hash, 'agencyID': self.getAgencyID(), 'url': self.getURL(), 'docID': id.string, |
"date": edate, "description": descriptiontxt,"title": title.string,"notes": notes.string} |
"date": edate,"title": title.string} |
|
self.getDescription(description,row, doc) |
|
|
|
if links != []: |
|
doc.update({'links': links}) |
|
if notes != None: |
|
doc.update({ 'notes': notes.string}) |
foidocsdb.save(doc) |
foidocsdb.save(doc) |
else: |
else: |
print "already saved" |
print "already saved "+hash |
|
|
elif len(row.find_all('th')) == 5: |
elif len(row.find_all('th')) == self.getColumnCount(): |
print "header row" |
print "header row" |
|
|
else: |
else: |
print "ERROR number of columns incorrect" |
print "ERROR number of columns incorrect" |
print row |
print row |
|
|