beginnings rss scraper
beginnings rss scraper


Former-commit-id: 78b7f5ee0c86281368da5eb0ed92ce1ad9cc575d

[submodule "couchdb/couchdb-lucene"] [submodule "couchdb/couchdb-lucene"]
path = couchdb/couchdb-lucene path = couchdb/couchdb-lucene
url = https://github.com/rnewson/couchdb-lucene.git url = https://github.com/rnewson/couchdb-lucene.git
[submodule "couchdb/settee"] [submodule "couchdb/settee"]
path = couchdb/settee path = couchdb/settee
url = https://github.com/inadarei/settee.git url = https://github.com/inadarei/settee.git
[submodule "lib/php-diff"] [submodule "lib/php-diff"]
path = lib/php-diff path = lib/php-diff
url = https://github.com/chrisboulton/php-diff.git url = https://github.com/chrisboulton/php-diff.git
[submodule "lib/Requests"] [submodule "lib/Requests"]
path = lib/Requests path = lib/Requests
url = https://github.com/rmccue/Requests.git url = https://github.com/rmccue/Requests.git
[submodule "js/flotr2"] [submodule "js/flotr2"]
path = js/flotr2 path = js/flotr2
url = https://github.com/HumbleSoftware/Flotr2.git url = https://github.com/HumbleSoftware/Flotr2.git
[submodule "lib/phpquery"] [submodule "lib/phpquery"]
path = lib/phpquery path = lib/phpquery
url = https://github.com/TobiaszCudnik/phpquery.git url = https://github.com/TobiaszCudnik/phpquery.git
[submodule "js/sigma"] [submodule "js/sigma"]
path = js/sigma path = js/sigma
url = https://github.com/jacomyal/sigma.js.git url = https://github.com/jacomyal/sigma.js.git
[submodule "js/bubbletree"] [submodule "js/bubbletree"]
path = js/bubbletree path = js/bubbletree
url = https://github.com/okfn/bubbletree.git url = https://github.com/okfn/bubbletree.git
[submodule "lib/querypath"] [submodule "lib/querypath"]
path = lib/querypath path = lib/querypath
url = https://github.com/technosophos/querypath.git url = https://github.com/technosophos/querypath.git
[submodule "lib/amon-php"] [submodule "lib/amon-php"]
path = lib/amon-php path = lib/amon-php
url = https://github.com/martinrusev/amon-php.git url = https://github.com/martinrusev/amon-php.git
  [submodule "documents/lib/parsedatetime"]
  path = documents/lib/parsedatetime
  url = git://github.com/bear/parsedatetime.git
   
import sys,os import sys,os
sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
import scrape import scrape
  from bs4 import BeautifulSoup
  import parsedatetime as pdt
  from time import mktime
  from datetime import datetime
  import feedparser
  import abc
   
from bs4 import BeautifulSoup class GenericRSSDisclogScraper(object):
import abc __metaclass__ = abc.ABCMeta
import dateutil.parser @abc.abstractmethod
  def getAgencyID(self):
  """ disclosr agency id """
  return
   
  @abc.abstractmethod
  def getURL(self):
  """ disclog URL"""
  return
  def doScrape(self):
  foidocsdb = scrape.couch['disclosr-foidocuments']
  (url,mime_type,content) = scrape.fetchURL(scrape.docsdb, self.getURL(), "foidocuments", self.getAgencyID())
  feed = feedparser.parse(content)
  for entry in feed.entries:
  #print entry
  print entry.id
  hash = scrape.mkhash(entry.link)
  doc = foidocsdb.get(hash)
  if doc == None:
  print "saving"
  edate = datetime.fromtimestamp(mktime( entry.published_parsed)).strftime("%Y-%m-%d")
  doc = {'id': hash, 'agencyID': self.getAgencyID(), 'url': entry.link, 'docID': entry.id,
  "date": edate, "description": entry.summary,"title": entry.title}
  foidocsdb.save(doc)
  else:
  print "already saved"
   
class GenericOAICDisclogScraper(object): class GenericOAICDisclogScraper(object):
__metaclass__ = abc.ABCMeta __metaclass__ = abc.ABCMeta
@abc.abstractmethod @abc.abstractmethod
def getAgencyID(self): def getAgencyID(self):
""" disclosr agency id """ """ disclosr agency id """
return return
   
@abc.abstractmethod @abc.abstractmethod
def getURL(self): def getURL(self):
""" disclog URL""" """ disclog URL"""
return return
   
@abc.abstractmethod @abc.abstractmethod
def getColumns(self,columns): def getColumns(self,columns):
""" rearranges columns if required """ """ rearranges columns if required """
return return
   
def doScrape(self): def doScrape(self):
  cal = pdt.Calendar()
foidocsdb = scrape.couch['disclosr-foidocuments'] foidocsdb = scrape.couch['disclosr-foidocuments']
(url,mime_type,content) = scrape.fetchURL(scrape.docsdb, self.getURL(), "foidocuments", self.getAgencyID()) (url,mime_type,content) = scrape.fetchURL(scrape.docsdb, self.getURL(), "foidocuments", self.getAgencyID())
if content != None: if content != None:
if mime_type == "text/html" or mime_type == "application/xhtml+xml" or mime_type =="application/xml": if mime_type == "text/html" or mime_type == "application/xhtml+xml" or mime_type =="application/xml":
# http://www.crummy.com/software/BeautifulSoup/documentation.html # http://www.crummy.com/software/BeautifulSoup/documentation.html
soup = BeautifulSoup(content) soup = BeautifulSoup(content)
for row in soup.table.find_all('tr'): for row in soup.table.find_all('tr'):
columns = row.find_all('td') columns = row.find_all('td')
if len(columns) == 5: if len(columns) == 5:
(id, date, description, title, notes) = self.getColumns(columns) (id, date, description, title, notes) = self.getColumns(columns)
print id.string print id.string
hash = scrape.mkhash(url+id.string) hash = scrape.mkhash(url+id.string)
links = [] links = []
for atag in row.find_all("a"): for atag in row.find_all("a"):
if atag.has_key('href'): if atag.has_key('href'):
links.append(scrape.fullurl(url,atag['href'])) links.append(scrape.fullurl(url,atag['href']))
doc = foidocsdb.get(hash) doc = foidocsdb.get(hash)
descriptiontxt = "" descriptiontxt = ""
for string in description.stripped_strings: for string in description.stripped_strings:
descriptiontxt = descriptiontxt + string descriptiontxt = descriptiontxt + " \n" + string
if doc == None: if doc == None:
print "saving" print "saving"
edate = dateutil.parser.parse(date.string).date().strftime("%Y-%m-%d") dtresult = cal.parseDateText(date.string)
doc = {'_id': hash, 'agencyID': self.getAgencyID(), 'url': self.getURL(), "links": links, 'docID': id.string, if len(dtresult) == 2:
  (dtdate,dtr) = dtresult
  print dtdate
  edate = ""+str(dtdate[0])+'-'+str(dtdate[1])+'-'+str(dtdate[2])
  else:
  edate = ""
  doc = {'id': hash, 'agencyID': self.getAgencyID(), 'url': self.getURL(), "links": links, 'docID': id.string,
"date": edate, "description": descriptiontxt,"title": title.string,"notes": notes.string} "date": edate, "description": descriptiontxt,"title": title.string,"notes": notes.string}
foidocsdb.save(doc) foidocsdb.save(doc)
else: else:
print "already saved" print "already saved"
elif len(row.find_all('th')) == 5: elif len(row.find_all('th')) == 5:
print "header row" print "header row"
else: else:
print "ERROR number of columns incorrect" print "ERROR number of columns incorrect"
print row print row
   
<?php <?php
include('template.inc.php'); include('template.inc.php');
include_header_documents(""); include_header_documents("");
include_once('../include/common.inc.php'); include_once('../include/common.inc.php');
?> ?>
<?php <?php
   
   
   
$agenciesdb = $server->get_db('disclosr-agencies'); $agenciesdb = $server->get_db('disclosr-agencies');
   
$idtoname = Array(); $idtoname = Array();
foreach ($agenciesdb->get_view("app", "byName")->rows as $row) { foreach ($agenciesdb->get_view("app", "byCanonicalName")->rows as $row) {
$idtoname[$row->value] = trim($row->key); $idtoname[$row->id] = trim($row->value->name);
} }
$foidocsdb = $server->get_db('disclosr-foidocuments'); $foidocsdb = $server->get_db('disclosr-foidocuments');
try { try {
$rows = $foidocsdb->get_view("app", "byDate", Array('9999-99-99','0000-00-00'), true)->rows; $rows = $foidocsdb->get_view("app", "byDate", Array('9999-99-99','0000-00-00'), true)->rows;
   
   
if ($rows) { if ($rows) {
foreach ($rows as $row) { foreach ($rows as $row) {
//print_r($row); //print_r($row);
displayLogEntry($row,$idtoname); displayLogEntry($row,$idtoname);
/* 1/1/11 title (Dept dfggdfgdf) /* 1/1/11 title (Dept dfggdfgdf)
description: description:
source link: source link:
documents: documents:
#1 title link */ #1 title link */
} }
} }
} catch (SetteeRestClientException $e) { } catch (SetteeRestClientException $e) {
setteErrorHandler($e); setteErrorHandler($e);
} }
include_footer_documents(); include_footer_documents();
?> ?>
   
 
#http://packages.python.org/CouchDB/client.html #http://packages.python.org/CouchDB/client.html
import couchdb import couchdb
import urllib2 import urllib2
from BeautifulSoup import BeautifulSoup from BeautifulSoup import BeautifulSoup
import re import re
import hashlib import hashlib
from urlparse import urljoin from urlparse import urljoin
import time import time
import os import os
import mimetypes import mimetypes
import re import re
import urllib import urllib
import urlparse import urlparse
   
def mkhash(input): def mkhash(input):
return hashlib.md5(input).hexdigest().encode("utf-8") return hashlib.md5(input).hexdigest().encode("utf-8")
   
def canonurl(url): def canonurl(url):
r"""Return the canonical, ASCII-encoded form of a UTF-8 encoded URL, or '' r"""Return the canonical, ASCII-encoded form of a UTF-8 encoded URL, or ''
if the URL looks invalid. if the URL looks invalid.
>>> canonurl('\xe2\x9e\xa1.ws') # tinyarro.ws >>> canonurl('\xe2\x9e\xa1.ws') # tinyarro.ws
'http://xn--hgi.ws/' 'http://xn--hgi.ws/'
""" """
# strip spaces at the ends and ensure it's prefixed with 'scheme://' # strip spaces at the ends and ensure it's prefixed with 'scheme://'
url = url.strip() url = url.strip()
if not url: if not url:
return '' return ''
if not urlparse.urlsplit(url).scheme: if not urlparse.urlsplit(url).scheme:
url = 'http://' + url url = 'http://' + url
   
# turn it into Unicode # turn it into Unicode
#try: #try:
# url = unicode(url, 'utf-8') # url = unicode(url, 'utf-8')
#except UnicodeDecodeError: #except UnicodeDecodeError:
# return '' # bad UTF-8 chars in URL # return '' # bad UTF-8 chars in URL
   
# parse the URL into its components # parse the URL into its components
parsed = urlparse.urlsplit(url) parsed = urlparse.urlsplit(url)
scheme, netloc, path, query, fragment = parsed scheme, netloc, path, query, fragment = parsed
   
# ensure scheme is a letter followed by letters, digits, and '+-.' chars # ensure scheme is a letter followed by letters, digits, and '+-.' chars
if not re.match(r'[a-z][-+.a-z0-9]*$', scheme, flags=re.I): if not re.match(r'[a-z][-+.a-z0-9]*$', scheme, flags=re.I):
return '' return ''
scheme = str(scheme) scheme = str(scheme)
   
# ensure domain and port are valid, eg: sub.domain.<1-to-6-TLD-chars>[:port] # ensure domain and port are valid, eg: sub.domain.<1-to-6-TLD-chars>[:port]
match = re.match(r'(.+\.[a-z0-9]{1,6})(:\d{1,5})?$', netloc, flags=re.I) match = re.match(r'(.+\.[a-z0-9]{1,6})(:\d{1,5})?$', netloc, flags=re.I)
if not match: if not match:
return '' return ''
domain, port = match.groups() domain, port = match.groups()
netloc = domain + (port if port else '') netloc = domain + (port if port else '')
netloc = netloc.encode('idna') netloc = netloc.encode('idna')
   
# ensure path is valid and convert Unicode chars to %-encoded # ensure path is valid and convert Unicode chars to %-encoded
if not path: if not path:
path = '/' # eg: 'http://google.com' -> 'http://google.com/' path = '/' # eg: 'http://google.com' -> 'http://google.com/'
path = urllib.quote(urllib.unquote(path.encode('utf-8')), safe='/;') path = urllib.quote(urllib.unquote(path.encode('utf-8')), safe='/;')
   
# ensure query is valid # ensure query is valid
query = urllib.quote(urllib.unquote(query.encode('utf-8')), safe='=&?/') query = urllib.quote(urllib.unquote(query.encode('utf-8')), safe='=&?/')
   
# ensure fragment is valid # ensure fragment is valid
fragment = urllib.quote(urllib.unquote(fragment.encode('utf-8'))) fragment = urllib.quote(urllib.unquote(fragment.encode('utf-8')))
   
# piece it all back together, truncating it to a maximum of 4KB # piece it all back together, truncating it to a maximum of 4KB
url = urlparse.urlunsplit((scheme, netloc, path, query, fragment)) url = urlparse.urlunsplit((scheme, netloc, path, query, fragment))
return url[:4096] return url[:4096]
   
def fullurl(url,href): def fullurl(url,href):
href = href.replace(" ","%20") href = href.replace(" ","%20")
href = re.sub('#.*$','',href) href = re.sub('#.*$','',href)
return urljoin(url,href) return urljoin(url,href)
   
#http://diveintopython.org/http_web_services/etags.html #http://diveintopython.org/http_web_services/etags.html
class NotModifiedHandler(urllib2.BaseHandler): class NotModifiedHandler(urllib2.BaseHandler):
def http_error_304(self, req, fp, code, message, headers): def http_error_304(self, req, fp, code, message, headers):
addinfourl = urllib2.addinfourl(fp, headers, req.get_full_url()) addinfourl = urllib2.addinfourl(fp, headers, req.get_full_url())
addinfourl.code = code addinfourl.code = code
return addinfourl return addinfourl
   
def fetchURL(docsdb, url, fieldName, agencyID, scrape_again=True): def fetchURL(docsdb, url, fieldName, agencyID, scrape_again=True):
url = canonurl(url) url = canonurl(url)
hash = mkhash(url) hash = mkhash(url)
req = urllib2.Request(url) req = urllib2.Request(url)
print "Fetching %s" % url print "Fetching %s" % url
if url.startswith("mailto") or url.startswith("javascript") or url.startswith("#") or url == None or url == "": if url.startswith("mailto") or url.startswith("javascript") or url.startswith("#") or url == None or url == "":
print "Not a valid HTTP url" print "Not a valid HTTP url"
return (None,None,None) return (None,None,None)
doc = docsdb.get(hash) doc = docsdb.get(hash)
if doc == None: if doc == None:
doc = {'_id': hash, 'agencyID': agencyID, 'url': url, 'fieldName':fieldName} doc = {'_id': hash, 'agencyID': agencyID, 'url': url, 'fieldName':fieldName}
else: else:
if (('page_scraped' in doc) and (time.time() - doc['page_scraped']) < 60*24*14*1000): if (('page_scraped' in doc) and (time.time() - doc['page_scraped']) < 60*24*14*1000):
print "Uh oh, trying to scrape URL again too soon!" print "Uh oh, trying to scrape URL again too soon!"
last_attachment_fname = doc["_attachments"].keys()[-1] last_attachment_fname = doc["_attachments"].keys()[-1]
last_attachment = docsdb.get_attachment(doc,last_attachment_fname) last_attachment = docsdb.get_attachment(doc,last_attachment_fname)
return (doc['url'],doc['mime_type'],last_attachment.read()) return (doc['url'],doc['mime_type'],last_attachment.read())
if scrape_again == False: if scrape_again == False:
print "Not scraping this URL again as requested" print "Not scraping this URL again as requested"
return (None,None,None) return (None,None,None)
   
time.sleep(3) # wait 3 seconds to give webserver time to recover time.sleep(3) # wait 3 seconds to give webserver time to recover
req.add_header("User-Agent", "Mozilla/4.0 (compatible; Prometheus webspider; owner maxious@lambdacomplex.org)") req.add_header("User-Agent", "Mozilla/4.0 (compatible; Prometheus webspider; owner maxious@lambdacomplex.org)")
#if there is a previous version stored in couchdb, load caching helper tags #if there is a previous version stored in couchdb, load caching helper tags
if doc.has_key('etag'): if doc.has_key('etag'):
req.add_header("If-None-Match", doc['etag']) req.add_header("If-None-Match", doc['etag'])
if doc.has_key('last_modified'): if doc.has_key('last_modified'):
req.add_header("If-Modified-Since", doc['last_modified']) req.add_header("If-Modified-Since", doc['last_modified'])
opener = urllib2.build_opener(NotModifiedHandler()) opener = urllib2.build_opener(NotModifiedHandler())
try: try:
url_handle = opener.open(req) url_handle = opener.open(req)
doc['url'] = url_handle.geturl() # may have followed a redirect to a new url doc['url'] = url_handle.geturl() # may have followed a redirect to a new url
headers = url_handle.info() # the addinfourls have the .info() too headers = url_handle.info() # the addinfourls have the .info() too
doc['etag'] = headers.getheader("ETag") doc['etag'] = headers.getheader("ETag")
doc['last_modified'] = headers.getheader("Last-Modified") doc['last_modified'] = headers.getheader("Last-Modified")
doc['date'] = headers.getheader("Date") doc['date'] = headers.getheader("Date")
doc['page_scraped'] = time.time() doc['page_scraped'] = time.time()
doc['web_server'] = headers.getheader("Server") doc['web_server'] = headers.getheader("Server")
doc['via'] = headers.getheader("Via") doc['via'] = headers.getheader("Via")
doc['powered_by'] = headers.getheader("X-Powered-By") doc['powered_by'] = headers.getheader("X-Powered-By")
doc['file_size'] = headers.getheader("Content-Length") doc['file_size'] = headers.getheader("Content-Length")
content_type = headers.getheader("Content-Type") content_type = headers.getheader("Content-Type")
if content_type != None: if content_type != None:
doc['mime_type'] = content_type.split(";")[0] doc['mime_type'] = content_type.split(";")[0]
else: else:
(type,encoding) = mimetypes.guess_type(url) (type,encoding) = mimetypes.guess_type(url)
doc['mime_type'] = type doc['mime_type'] = type
if hasattr(url_handle, 'code'): if hasattr(url_handle, 'code'):
if url_handle.code == 304: if url_handle.code == 304:
print "the web page has not been modified" print "the web page has not been modified"
return (None,None,None) return (None,None,None)
else: else:
content = url_handle.read() content = url_handle.read()
docsdb.save(doc) docsdb.save(doc)
doc = docsdb.get(hash) # need to get a _rev doc = docsdb.get(hash) # need to get a _rev
docsdb.put_attachment(doc, content, str(time.time())+"-"+os.path.basename(url), doc['mime_type']) docsdb.put_attachment(doc, content, str(time.time())+"-"+os.path.basename(url), doc['mime_type'])
return (doc['url'], doc['mime_type'], content) return (doc['url'], doc['mime_type'], content)
#store as attachment epoch-filename #store as attachment epoch-filename
except urllib2.URLError as e: except urllib2.URLError as e:
error = "" error = ""
if hasattr(e, 'reason'): if hasattr(e, 'reason'):