1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
|
# -*- coding: utf-8 -*-
# YAML-tagger:
# Type: university
# Status: unfinished
# Name: Norges Teknisk-Naturvitenskaplige universitet
# Format: HTML
# Datatype:
# Vendor:
# Run: daily
#
# Tidligere ble det publisert PDF-er
import scraperwiki
import json
from BeautifulSoup import BeautifulSoup
import datetime
import dateutil.parser
import lxml.html
import urlparse
import urllib2
# Make sure Scraperwiki believe this is the source from this database
scraperwiki.scrape("http://www.ntnu.no/aktuelt/offentlig-journal")
lazycache=scraperwiki.swimport('lazycache')
postlistelib=scraperwiki.swimport('postliste-python-lib')
agency = 'Norges teknisk-naturvitenskapelige universitet'
def report_errors(errors):
if 0 < len(errors):
print "Errors:"
for e in errors:
print e
raise ValueError(str(len(errors)) + "errors detected")
def out_of_cpu(arg, spent, hard, soft):
report_errors(arg)
def process_pdf(parser, pdfurl, errors):
postlistelib.exit_if_no_cpu_left(0, out_of_cpu, errors)
try:
pdfcontent = scraperwiki.scrape(pdfurl)
parser.preprocess(pdfurl, pdfcontent)
pdfcontent = None
except ValueError, e: # Some PDFs can not be parsed! This should be investigated
print "PDF format problem"
errors.append(e)
except IndexError, e:
errors.append(e)
except urllib2.HTTPError, e:
errors.append(str(e) + " " + pdfurl)
def process_page_queue(parser, errors):
try:
parser.process_pages()
postlistelib.exit_if_no_cpu_left(0, out_of_cpu, errors)
except scraperwiki.CPUTimeExceededError, e:
errors.append("Processing pages interrupted")
def process_journal_pdfs(parser, listurl, errors):
# print "Finding PDFs on " + listurl
# u = urllib.parse.urlparse(listurl)
html = scraperwiki.scrape(listurl)
root = lxml.html.fromstring(html)
html = None
for ahref in root.cssselect("ul a"):
href = ahref.attrib['href']
url = urlparse.urljoin(listurl, href)
if -1 != href.find("file://") or -1 == url.find(".pdf"):
# print "Skipping non-http URL " + url
continue
if parser.is_already_scraped(url):
True
# print "Skipping already scraped " + url
else:
# print "Will process " + url
process_pdf(parser, url, errors)
def test_small_pdfs(parser):
# Test with some smaller PDFs
errors = []
process_pdf(parser, "http://www.ntnu.no/offjour/2012-06.25.pdf", errors)
process_pdf(parser, "http://www.ntnu.no/offjour/2012-06.13.pdf ", errors) # Strange format?
process_page_queue(parser, errors)
report_errors(errors)
exit(0)
errors = []
parser = postlistelib.PDFJournalParser(agency=agency)
#test_small_pdfs(parser)
process_journal_pdfs(parser, "http://www.ntnu.no/aktuelt/offentlig-journal", errors)
process_page_queue(parser, errors)
report_errors(errors)
|