2 Commits

Author SHA1 Message Date
deb775d243 Port it to python3 2023-11-23 09:43:42 -05:00
8ebc4de1a7 Re-attempt migration with modernize 2023-11-22 14:48:24 -05:00
31 changed files with 245 additions and 184 deletions

View File

@ -1,7 +1,7 @@
export venv_dir="env" export venv_dir="env"
export spec="" export spec=""
export venv="`which virtualenv-2.7`" export venv="/usr/bin/virtualenv"
if [ x"$venv" = x ]; then #if [ x"$venv" = x ]; then
export venv="`which virtualenv`" # export venv="`which virtualenv`"
export spec="-p 2.7" # export spec="-p 2.7"
fi #fi

View File

@ -1,11 +1,13 @@
pytz>=2012 pytz>=2012
Flask==1.0.2 Flask==3.0.0
Babel==1.3 Babel==2.13.1
Flask-Babel==0.9 Flask-Babel==4.0.0
Flask-Caching==1.4.0 Flask-Caching==2.1.0
Jinja2==2.10 Jinja2==3.1.2
Pygments==1.6 Pygments>=1.6
python-ctags python-ctags3
docutils==0.11 docutils==0.20.1
gunicorn==0.17.2 gunicorn==0.17.2
werkzeug==0.16.1 werkzeug>=0.16.1
markupsafe>=2.0.1
six

View File

@ -1,5 +1,7 @@
# -*- coding: utf-8 -*- # -*- coding: utf-8 -*-
from flask import Flask, request, g, redirect, url_for, abort, render_template, send_from_directory, safe_join from __future__ import absolute_import
from flask import Flask, request, g, redirect, url_for, abort, render_template, send_from_directory
from werkzeug.utils import safe_join
try: try:
from flaskext.babel import Babel from flaskext.babel import Babel
except ImportError: except ImportError:
@ -140,7 +142,7 @@ cache = Cache(app, config=CACHE_CONFIG)
################# #################
# Babel selectors # Babel selectors
@babel.localeselector #@babel.localeselector
def get_locale(): def get_locale():
# If viewing specs, require English # If viewing specs, require English
if request.path.startswith('/spec'): if request.path.startswith('/spec'):
@ -152,7 +154,7 @@ def get_locale():
# header the browser transmits. The best match wins. # header the browser transmits. The best match wins.
return request.accept_languages.best_match(SUPPORTED_LANGS) return request.accept_languages.best_match(SUPPORTED_LANGS)
@babel.domainselector #@babel.domainselector
def get_domains(): def get_domains():
domains = [] domains = []
frags = request.path.split('/', 2) frags = request.path.split('/', 2)
@ -167,6 +169,7 @@ def get_domains():
domains.append(DEFAULT_GETTEXT_DOMAIN) domains.append(DEFAULT_GETTEXT_DOMAIN)
return domains return domains
babel.init_app(app, locale_selector=get_locale, default_domain=get_domains)
########################## ##########################
# Hooks - helper functions # Hooks - helper functions
@ -213,7 +216,7 @@ def detect_theme():
theme = 'duck' theme = 'duck'
if 'style' in request.cookies: if 'style' in request.cookies:
theme = request.cookies['style'] theme = request.cookies['style']
if 'theme' in request.args.keys(): if 'theme' in list(request.args.keys()):
theme = request.args['theme'] theme = request.args['theme']
# TEMPORARY: enable external themes # TEMPORARY: enable external themes
# TODO: Remove this (and the corresponding lines in global/layout.html # TODO: Remove this (and the corresponding lines in global/layout.html
@ -263,5 +266,5 @@ def server_error(error):
return render_template('global/error_500.html'), 500 return render_template('global/error_500.html'), 500
# Import these to ensure they get loaded # Import these to ensure they get loaded
import templatevars from . import templatevars
import urls from . import urls

View File

@ -6,14 +6,19 @@
Based on perl code by Eddie Kohler; heavily modified. Based on perl code by Eddie Kohler; heavily modified.
""" """
from __future__ import absolute_import
from __future__ import print_function
import cStringIO import cStringIO
import re import re
import sys import sys
import os import os
import config from . import config
import rank from . import rank
from six.moves import map
from six.moves import range
from six.moves import zip
__all__ = [ 'ParseError', 'BibTeX', 'BibTeXEntry', 'htmlize', __all__ = [ 'ParseError', 'BibTeX', 'BibTeXEntry', 'htmlize',
'ParsedAuthor', 'FileIter', 'Parser', 'parseFile', 'ParsedAuthor', 'FileIter', 'Parser', 'parseFile',
@ -66,7 +71,7 @@ class BibTeX:
"""Add a BibTeX entry to this file.""" """Add a BibTeX entry to this file."""
k = ent.key k = ent.key
if self.byKey.get(ent.key.lower()): if self.byKey.get(ent.key.lower()):
print >> sys.stderr, "Already have an entry named %s"%k print("Already have an entry named %s"%k, file=sys.stderr)
return return
self.entries.append(ent) self.entries.append(ent)
self.byKey[ent.key.lower()] = ent self.byKey[ent.key.lower()] = ent
@ -79,7 +84,7 @@ class BibTeX:
try: try:
cr = self.byKey[ent['crossref'].lower()] cr = self.byKey[ent['crossref'].lower()]
except KeyError: except KeyError:
print "No such crossref: %s"% ent['crossref'] print("No such crossref: %s"% ent['crossref'])
break break
if seen.get(cr.key): if seen.get(cr.key):
raise ParseError("Circular crossref at %s" % ent.key) raise ParseError("Circular crossref at %s" % ent.key)
@ -87,12 +92,12 @@ class BibTeX:
del ent.entries['crossref'] del ent.entries['crossref']
if cr.entryLine < ent.entryLine: if cr.entryLine < ent.entryLine:
print "Warning: crossref %s used after declaration"%cr.key print("Warning: crossref %s used after declaration"%cr.key)
for k in cr.entries.keys(): for k in cr.entries.keys():
if ent.entries.has_key(k): if k in ent.entries:
print "ERROR: %s defined both in %s and in %s"%( print("ERROR: %s defined both in %s and in %s"%(
k,ent.key,cr.key) k,ent.key,cr.key))
else: else:
ent.entries[k] = cr.entries[k] ent.entries[k] = cr.entries[k]
@ -105,7 +110,7 @@ class BibTeX:
rk = "title" rk = "title"
for ent in self.entries: for ent in self.entries:
if ent.type in config.OMIT_ENTRIES or not ent.has_key(rk): if ent.type in config.OMIT_ENTRIES or rk not in ent:
ent.check() ent.check()
del self.byKey[ent.key.lower()] del self.byKey[ent.key.lower()]
else: else:
@ -130,7 +135,7 @@ def buildAuthorTable(entries):
for e in entries: for e in entries:
for author in e.parsedAuthor: for author in e.parsedAuthor:
if result.has_key(author): if author in result:
continue continue
c = author c = author
@ -143,14 +148,14 @@ def buildAuthorTable(entries):
if 0: if 0:
for a,c in result.items(): for a,c in result.items():
if a != c: if a != c:
print "Collapsing authors: %s => %s" % (a,c) print("Collapsing authors: %s => %s" % (a,c))
if 0: if 0:
print parseAuthor("Franz Kaashoek")[0].collapsesTo( print(parseAuthor("Franz Kaashoek")[0].collapsesTo(
parseAuthor("M. Franz Kaashoek")[0]) parseAuthor("M. Franz Kaashoek")[0]))
print parseAuthor("Paul F. Syverson")[0].collapsesTo( print(parseAuthor("Paul F. Syverson")[0].collapsesTo(
parseAuthor("Paul Syverson")[0]) parseAuthor("Paul Syverson")[0]))
print parseAuthor("Paul Syverson")[0].collapsesTo( print(parseAuthor("Paul Syverson")[0].collapsesTo(
parseAuthor("Paul F. Syverson")[0]) parseAuthor("Paul F. Syverson")[0]))
return result return result
@ -221,7 +226,7 @@ def splitEntriesByAuthor(entries):
htmlResult[sortkey] = secname htmlResult[sortkey] = secname
result.setdefault(sortkey, []).append(ent) result.setdefault(sortkey, []).append(ent)
sortnames = result.keys() sortnames = list(result.keys())
sortnames.sort() sortnames.sort()
sections = [ (htmlResult[n], result[n]) for n in sortnames ] sections = [ (htmlResult[n], result[n]) for n in sortnames ]
return sections, url_map return sections, url_map
@ -255,13 +260,13 @@ def sortEntriesByDate(entries):
monthname = match.group(1) monthname = match.group(1)
mon = MONTHS.index(monthname) mon = MONTHS.index(monthname)
except ValueError: except ValueError:
print "Unknown month %r in %s"%(ent.get("month"), ent.key) print("Unknown month %r in %s"%(ent.get("month"), ent.key))
mon = 0 mon = 0
try: try:
date = int(ent['year'])*13 + mon date = int(ent['year'])*13 + mon
except KeyError: except KeyError:
print "ERROR: No year field in %s"%ent.key print("ERROR: No year field in %s"%ent.key)
date = 10000*13 date = 10000*13
except ValueError: except ValueError:
date = 10000*13 date = 10000*13
@ -286,7 +291,7 @@ class BibTeXEntry:
def get(self, k, v=None): def get(self, k, v=None):
return self.entries.get(k,v) return self.entries.get(k,v)
def has_key(self, k): def has_key(self, k):
return self.entries.has_key(k) return k in self.entries
def __getitem__(self, k): def __getitem__(self, k):
return self.entries[k] return self.entries[k]
def __setitem__(self, k, v): def __setitem__(self, k, v):
@ -318,7 +323,7 @@ class BibTeXEntry:
else: else:
df = DISPLAYED_FIELDS df = DISPLAYED_FIELDS
for f in df: for f in df:
if not self.entries.has_key(f): if f not in self.entries:
continue continue
v = self.entries[f] v = self.entries[f]
if v.startswith("<span class='bad'>"): if v.startswith("<span class='bad'>"):
@ -330,7 +335,7 @@ class BibTeXEntry:
d.append("%%%%% "+("ERROR: Non-ASCII characters: '%r'\n"%np)) d.append("%%%%% "+("ERROR: Non-ASCII characters: '%r'\n"%np))
d.append(" ") d.append(" ")
v = v.replace("&", "&amp;") v = v.replace("&", "&amp;")
if invStrings.has_key(v): if v in invStrings:
s = "%s = %s,\n" %(f, invStrings[v]) s = "%s = %s,\n" %(f, invStrings[v])
else: else:
s = "%s = {%s},\n" % (f, v) s = "%s = {%s},\n" % (f, v)
@ -359,7 +364,7 @@ class BibTeXEntry:
none.""" none."""
errs = self._check() errs = self._check()
for e in errs: for e in errs:
print e print(e)
return not errs return not errs
def _check(self): def _check(self):
@ -396,7 +401,7 @@ class BibTeXEntry:
not self['booktitle'].startswith("{Proceedings of"): not self['booktitle'].startswith("{Proceedings of"):
errs.append("ERROR: %s's booktitle (%r) doesn't start with 'Proceedings of'" % (self.key, self['booktitle'])) errs.append("ERROR: %s's booktitle (%r) doesn't start with 'Proceedings of'" % (self.key, self['booktitle']))
if self.has_key("pages") and not re.search(r'\d+--\d+', self['pages']): if "pages" in self and not re.search(r'\d+--\d+', self['pages']):
errs.append("ERROR: Misformed pages in %s"%self.key) errs.append("ERROR: Misformed pages in %s"%self.key)
if self.type == 'proceedings': if self.type == 'proceedings':
@ -551,8 +556,8 @@ class BibTeXEntry:
cache_section = self.get('www_cache_section', ".") cache_section = self.get('www_cache_section', ".")
if cache_section not in config.CACHE_SECTIONS: if cache_section not in config.CACHE_SECTIONS:
if cache_section != ".": if cache_section != ".":
print >>sys.stderr, "Unrecognized cache section %s"%( print("Unrecognized cache section %s"%(
cache_section) cache_section), file=sys.stderr)
cache_section="." cache_section="."
for key, name, ext in (('www_abstract_url', 'abstract','abstract'), for key, name, ext in (('www_abstract_url', 'abstract','abstract'),
@ -766,13 +771,13 @@ class ParsedAuthor:
short = o.first; long = self.first short = o.first; long = self.first
initials_s = "".join([n[0] for n in short]) initials_s = "".join([n[0] for n in short])
initials_l = "".join([n[0] for n in long]) initials_l = "".join([n[0] for n in int])
idx = initials_l.find(initials_s) idx = initials_l.find(initials_s)
if idx < 0: if idx < 0:
return self return self
n = long[:idx] n = int[:idx]
for i in range(idx, idx+len(short)): for i in range(idx, idx+len(short)):
a = long[i]; b = short[i-idx] a = int[i]; b = short[i-idx]
if a == b: if a == b:
n.append(a) n.append(a)
elif len(a) == 2 and a[1] == '.' and a[0] == b[0]: elif len(a) == 2 and a[1] == '.' and a[0] == b[0]:
@ -781,7 +786,7 @@ class ParsedAuthor:
n.append(a) n.append(a)
else: else:
return self return self
n += long[idx+len(short):] n += int[idx+len(short):]
if n == self.first: if n == self.first:
return self return self
@ -842,7 +847,7 @@ def _split(s,w=79,indent=8):
first = 1 first = 1
indentation = "" indentation = ""
while len(s) > w: while len(s) > w:
for i in xrange(w-1, 20, -1): for i in range(w-1, 20, -1):
if s[i] == ' ': if s[i] == ' ':
r.append(indentation+s[:i]) r.append(indentation+s[:i])
s = s[i+1:] s = s[i+1:]
@ -866,7 +871,7 @@ class FileIter:
if string: if string:
file = cStringIO.StringIO(string) file = cStringIO.StringIO(string)
if file: if file:
it = iter(file.xreadlines()) it = iter(file)
self.iter = it self.iter = it
assert self.iter assert self.iter
self.lineno = 0 self.lineno = 0
@ -880,7 +885,7 @@ def parseAuthor(s):
try: try:
return _parseAuthor(s) return _parseAuthor(s)
except: except:
print >>sys.stderr, "Internal error while parsing author %r"%s print("Internal error while parsing author %r"%s, file=sys.stderr)
raise raise
def _parseAuthor(s): def _parseAuthor(s):
@ -891,7 +896,7 @@ def _parseAuthor(s):
while s: while s:
s = s.strip() s = s.strip()
bracelevel = 0 bracelevel = 0
for i in xrange(len(s)): for i in range(len(s)):
if s[i] == '{': if s[i] == '{':
bracelevel += 1 bracelevel += 1
elif s[i] == '}': elif s[i] == '}':
@ -947,8 +952,8 @@ def _parseAuthor(s):
return parsedAuthors return parsedAuthors
ALLCHARS = "".join(map(chr,range(256))) ALLCHARS = "".join(map(chr,list(range(256))))
PRINTINGCHARS = "\t\n\r"+"".join(map(chr,range(32, 127))) PRINTINGCHARS = "\t\n\r"+"".join(map(chr,list(range(32, 127))))
LC_CHARS = "abcdefghijklmnopqrstuvwxyz" LC_CHARS = "abcdefghijklmnopqrstuvwxyz"
SV_DELCHARS = ("ABCDEFGHIJKLMNOPQRSTUVWXYZ" SV_DELCHARS = ("ABCDEFGHIJKLMNOPQRSTUVWXYZ"
"abcdefghijklmnopqrstuvwxyz" "abcdefghijklmnopqrstuvwxyz"
@ -1049,7 +1054,7 @@ class Parser:
continue continue
data.append(line) data.append(line)
data.append(" ") data.append(" ")
line = it.next() line = next(it)
self.litStringLine = 0 self.litStringLine = 0
elif line[0] == '{': elif line[0] == '{':
bracelevel += 1 bracelevel += 1
@ -1076,13 +1081,13 @@ class Parser:
#print bracelevel, "C", repr(line) #print bracelevel, "C", repr(line)
data.append(line) data.append(line)
data.append(" ") data.append(" ")
line = it.next() line = next(it)
elif line[0] == '#': elif line[0] == '#':
print >>sys.stderr, "Weird concat on line %s"%it.lineno print("Weird concat on line %s"%it.lineno, file=sys.stderr)
elif line[0] in "},": elif line[0] in "},":
if not data: if not data:
print >>sys.stderr, "No data after field on line %s"%( print("No data after field on line %s"%(
it.lineno) it.lineno), file=sys.stderr)
else: else:
m = RAW_DATA_RE.match(line) m = RAW_DATA_RE.match(line)
if m: if m:
@ -1170,7 +1175,7 @@ class Parser:
else: else:
key = v[0] key = v[0]
d = {} d = {}
for i in xrange(1,len(v),2): for i in range(1,len(v),2):
d[v[i].lower()] = v[i+1] d[v[i].lower()] = v[i+1]
ent = BibTeXEntry(self.curEntType, key, d) ent = BibTeXEntry(self.curEntType, key, d)
ent.entryLine = self.entryLine ent.entryLine = self.entryLine
@ -1197,11 +1202,11 @@ class Parser:
def _parse(self): def _parse(self):
it = self.fileiter it = self.fileiter
line = it.next() line = next(it)
while 1: while 1:
# Skip blank lines. # Skip blank lines.
while not line or line.isspace() or OUTER_COMMENT_RE.match(line): while not line or line.isspace() or OUTER_COMMENT_RE.match(line):
line = it.next() line = next(it)
# Get the first line of an entry. # Get the first line of an entry.
m = ENTRY_BEGIN_RE.match(line) m = ENTRY_BEGIN_RE.match(line)
if m: if m:
@ -1215,7 +1220,7 @@ class Parser:
def _advance(it,line): def _advance(it,line):
while not line or line.isspace() or COMMENT_RE.match(line): while not line or line.isspace() or COMMENT_RE.match(line):
line = it.next() line = next(it)
return line return line
# Matches a comment line outside of an entry. # Matches a comment line outside of an entry.
@ -1265,5 +1270,5 @@ if __name__ == '__main__':
for e in r.entries: for e in r.entries:
if e.type in ("proceedings", "journal"): continue if e.type in ("proceedings", "journal"): continue
print e.to_html() print(e.to_html())

View File

@ -1,5 +1,6 @@
# Copyright 2003-2006, Nick Mathewson. See LICENSE for licensing info. # Copyright 2003-2006, Nick Mathewson. See LICENSE for licensing info.
from __future__ import absolute_import
import re import re
_KEYS = [ "ALL_TAGS", _KEYS = [ "ALL_TAGS",
@ -19,7 +20,7 @@ del _k
def load(cfgFile): def load(cfgFile):
mod = {} mod = {}
execfile(cfgFile, mod) exec(compile(open(cfgFile, "rb").read(), cfgFile, 'exec'), mod)
for _k in _KEYS: for _k in _KEYS:
try: try:
globals()[_k]=mod[_k] globals()[_k]=mod[_k]

View File

@ -8,6 +8,9 @@
Based on the original C++ metaphone implementation.) Based on the original C++ metaphone implementation.)
""" """
from __future__ import print_function
from six.moves import map
from six.moves import range
TRIPLES = { TRIPLES = {
'dge': 'j', 'dge': 'j',
@ -45,7 +48,7 @@ SINGLETONS = {
'z': 's', 'z': 's',
} }
ALLCHARS = "".join(map(chr, range(256))) ALLCHARS = "".join(map(chr, list(range(256))))
NONLCCHARS = "".join([c for c in ALLCHARS if not c.islower()]) NONLCCHARS = "".join([c for c in ALLCHARS if not c.islower()])
def metaphone(s): def metaphone(s):
"""Return the metaphone equivalent of a provided string""" """Return the metaphone equivalent of a provided string"""
@ -182,7 +185,7 @@ def metaphone(s):
return "".join(result) return "".join(result)
def demo(a): def demo(a):
print a, "=>", metaphone(a) print(a, "=>", metaphone(a))
if __name__ == '__main__': if __name__ == '__main__':
demo("Nick. Mathewson") demo("Nick. Mathewson")

View File

@ -4,10 +4,12 @@
# http://scholar.google.com/scholar?as_epq= # http://scholar.google.com/scholar?as_epq=
# Take care of the caching setup # Take care of the caching setup
from __future__ import absolute_import
from __future__ import print_function
cache_expire = 60*60*24*30 # 30 days cache_expire = 60*60*24*30 # 30 days
# Checks # Checks
import config from . import config
import os import os
import sys import sys
from os.path import exists, isdir, join, getmtime from os.path import exists, isdir, join, getmtime
@ -32,8 +34,8 @@ def cache_folder():
return r return r
import re import re
from urllib2 import urlopen, build_opener from six.moves.urllib.request import urlopen, build_opener
from urllib import quote from six.moves.urllib.parse import quote
from datetime import date from datetime import date
import hashlib import hashlib
@ -64,21 +66,21 @@ def getPageForTitle(title, cache=True, update=True, save=True):
# Access cache or network # Access cache or network
if exists(join(cache_folder(), md5h(url))) and cache: if exists(join(cache_folder(), md5h(url))) and cache:
return url, file(join(cache_folder(), md5h(url)),'r').read() return url, open(join(cache_folder(), md5h(url)),'r').read()
elif update: elif update:
print "Downloading rank for %r."%title print("Downloading rank for %r."%title)
# Make a custom user agent (so that we are not filtered by Google)! # Make a custom user agent (so that we are not filtered by Google)!
opener = build_opener() opener = build_opener()
opener.addheaders = [('User-agent', 'Anon.Bib.0.1')] opener.addheaders = [('User-agent', 'Anon.Bib.0.1')]
print "connecting..." print("connecting...")
connection = opener.open(url) connection = opener.open(url)
print "reading" print("reading")
page = connection.read() page = connection.read()
print "done" print("done")
if save: if save:
file(join(cache_folder(), md5h(url)),'w').write(page) open(join(cache_folder(), md5h(url)),'w').write(page)
return url, page return url, page
else: else:
return url, None return url, None
@ -140,20 +142,20 @@ def get_rank_html(title, years=None, base_url=".", update=True,
def TestScholarFormat(): def TestScholarFormat():
# We need to ensure that Google Scholar does not change its page format under our feet # We need to ensure that Google Scholar does not change its page format under our feet
# Use some cases to check if all is good # Use some cases to check if all is good
print "Checking google scholar formats..." print("Checking google scholar formats...")
stopAndGoCites = getCite("Stop-and-Go MIXes: Providing Probabilistic Anonymity in an Open System", False)[0] stopAndGoCites = getCite("Stop-and-Go MIXes: Providing Probabilistic Anonymity in an Open System", False)[0]
dragonCites = getCite("Mixes protected by Dragons and Pixies: an empirical study", False, save=False)[0] dragonCites = getCite("Mixes protected by Dragons and Pixies: an empirical study", False, save=False)[0]
if stopAndGoCites in (0, None): if stopAndGoCites in (0, None):
print """OOPS.\n print("""OOPS.\n
It looks like Google Scholar changed their URL format or their output format. It looks like Google Scholar changed their URL format or their output format.
I went to count the cites for the Stop-and-Go MIXes paper, and got nothing.""" I went to count the cites for the Stop-and-Go MIXes paper, and got nothing.""")
sys.exit(1) sys.exit(1)
if dragonCites != None: if dragonCites != None:
print """OOPS.\n print("""OOPS.\n
It looks like Google Scholar changed their URL format or their output format. It looks like Google Scholar changed their URL format or their output format.
I went to count the cites for a fictitious paper, and found some.""" I went to count the cites for a fictitious paper, and found some.""")
sys.exit(1) sys.exit(1)
def urlIsUseless(u): def urlIsUseless(u):
@ -170,7 +172,7 @@ URLTYPES=[ "pdf", "ps", "txt", "ps_gz", "html" ]
if __name__ == '__main__': if __name__ == '__main__':
# First download the bibliography file. # First download the bibliography file.
import BibTeX from . import BibTeX
suggest = False suggest = False
if sys.argv[1] == 'suggest': if sys.argv[1] == 'suggest':
suggest = True suggest = True
@ -182,7 +184,7 @@ if __name__ == '__main__':
bib = BibTeX.parseFile(config.MASTER_BIB) bib = BibTeX.parseFile(config.MASTER_BIB)
remove_old() remove_old()
print "Downloading missing ranks." print("Downloading missing ranks.")
for ent in bib.entries: for ent in bib.entries:
getCite(ent['title'], cache=True, update=True) getCite(ent['title'], cache=True, update=True)
@ -190,13 +192,13 @@ if __name__ == '__main__':
for ent in bib.entries: for ent in bib.entries:
haveOne = False haveOne = False
for utype in URLTYPES: for utype in URLTYPES:
if ent.has_key("www_%s_url"%utype): if "www_%s_url"%utype in ent:
haveOne = True haveOne = True
break break
if haveOne: if haveOne:
continue continue
print ent.key, "has no URLs given." print(ent.key, "has no URLs given.")
urls = [ u for u in getPaperURLs(ent['title']) if not urlIsUseless(u) ] urls = [ u for u in getPaperURLs(ent['title']) if not urlIsUseless(u) ]
for u in urls: for u in urls:
print "\t", u print("\t", u)

View File

@ -8,14 +8,17 @@
cleaned up a little, and all the duplicate entries commented out. cleaned up a little, and all the duplicate entries commented out.
""" """
from __future__ import absolute_import
from __future__ import print_function
import sys import sys
import re import re
from six.moves import zip
assert sys.version_info[:3] >= (2,2,0) assert sys.version_info[:3] >= (2,2,0)
import BibTeX from . import BibTeX
import config from . import config
import metaphone from . import metaphone
_MPCACHE = {} _MPCACHE = {}
def soundsLike(s1, s2): def soundsLike(s1, s2):
@ -168,16 +171,16 @@ class MasterBibTeX(BibTeX.BibTeX):
matches = m2 matches = m2
if not matches: if not matches:
print "No match for %s"%e.key print("No match for %s"%e.key)
if matches[-1][1] is e: if matches[-1][1] is e:
print "%s matches for %s: OK."%(len(matches), e.key) print("%s matches for %s: OK."%(len(matches), e.key))
else: else:
print "%s matches for %s: %s is best!" %(len(matches), e.key, print("%s matches for %s: %s is best!" %(len(matches), e.key,
matches[-1][1].key) matches[-1][1].key))
if len(matches) > 1: if len(matches) > 1:
for g, m in matches: for g, m in matches:
print "%%%% goodness", g print("%%%% goodness", g)
print m print(m)
def noteToURL(note): def noteToURL(note):
@ -202,7 +205,7 @@ def emit(f,ent):
global all_ok global all_ok
errs = ent._check() errs = ent._check()
if master.byKey.has_key(ent.key.strip().lower()): if ent.key.strip().lower() in master.byKey:
errs.append("ERROR: Key collision with master file") errs.append("ERROR: Key collision with master file")
if errs: if errs:
@ -232,61 +235,61 @@ def emit(f,ent):
if errs: if errs:
all_ok = 0 all_ok = 0
for e in errs: for e in errs:
print >>f, "%%%%", e print("%%%%", e, file=f)
print >>f, ent.format(77, 4, v=1, invStrings=invStrings) print(ent.format(77, 4, v=1, invStrings=invStrings), file=f)
def emitKnown(f, ent, matches): def emitKnown(f, ent, matches):
print >>f, "%% Candidates are:", ", ".join([e.key for g,e in matches]) print("%% Candidates are:", ", ".join([e.key for g,e in matches]), file=f)
print >>f, "%%" print("%%", file=f)
print >>f, "%"+(ent.format(77,4,1,invStrings).replace("\n", "\n%")) print("%"+(ent.format(77,4,1,invStrings).replace("\n", "\n%")), file=f)
if __name__ == '__main__': if __name__ == '__main__':
if len(sys.argv) != 3: if len(sys.argv) != 3:
print "reconcile.py expects 2 arguments" print("reconcile.py expects 2 arguments")
sys.exit(1) sys.exit(1)
config.load(sys.argv[1]) config.load(sys.argv[1])
print "========= Scanning master ==========" print("========= Scanning master ==========")
master = MasterBibTeX() master = MasterBibTeX()
master = BibTeX.parseFile(config.MASTER_BIB, result=master) master = BibTeX.parseFile(config.MASTER_BIB, result=master)
master.buildIndex() master.buildIndex()
print "========= Scanning new file ========" print("========= Scanning new file ========")
try: try:
fn = sys.argv[2] fn = sys.argv[2]
input = BibTeX.parseFile(fn) input = BibTeX.parseFile(fn)
except BibTeX.ParseError, e: except BibTeX.ParseError as e:
print "Error parsing %s: %s"%(fn,e) print("Error parsing %s: %s"%(fn,e))
sys.exit(1) sys.exit(1)
f = open('tmp.bib', 'w') f = open('tmp.bib', 'w')
keys = input.newStrings.keys() keys = list(input.newStrings.keys())
keys.sort() keys.sort()
for k in keys: for k in keys:
v = input.newStrings[k] v = input.newStrings[k]
print >>f, "@string{%s = {%s}}"%(k,v) print("@string{%s = {%s}}"%(k,v), file=f)
invStrings = input.invStrings invStrings = input.invStrings
for e in input.entries: for e in input.entries:
if not (e.get('title') and e.get('author')): if not (e.get('title') and e.get('author')):
print >>f, "%%\n%%%% Not enough information to search for a match: need title and author.\n%%" print("%%\n%%%% Not enough information to search for a match: need title and author.\n%%", file=f)
emit(f, e) emit(f, e)
continue continue
matches = master.includes(e, all=1) matches = master.includes(e, all=1)
if not matches: if not matches:
print >>f, "%%\n%%%% This entry is probably new: No match found.\n%%" print("%%\n%%%% This entry is probably new: No match found.\n%%", file=f)
emit(f, e) emit(f, e)
else: else:
print >>f, "%%" print("%%", file=f)
print >>f, "%%%% Possible match found for this entry; max goodness",\ print("%%%% Possible match found for this entry; max goodness",\
matches[-1][0], "\n%%" matches[-1][0], "\n%%", file=f)
emitKnown(f, e, matches) emitKnown(f, e, matches)
if not all_ok: if not all_ok:
print >>f, "\n\n\nErrors remain; not finished.\n" print("\n\n\nErrors remain; not finished.\n", file=f)
f.close() f.close()

View File

@ -3,8 +3,9 @@
"""Unit tests for anonbib.""" """Unit tests for anonbib."""
import BibTeX from __future__ import absolute_import
import metaphone from . import BibTeX
from . import metaphone
#import reconcile #import reconcile
#import writeHTML #import writeHTML
#import updateCache #import updateCache

View File

@ -4,19 +4,21 @@
"""Download files in bibliography into a local cache. """Download files in bibliography into a local cache.
""" """
from __future__ import absolute_import
from __future__ import print_function
import os import os
import sys import sys
import signal import signal
import time import time
import gzip import gzip
import BibTeX from . import BibTeX
import config from . import config
import urllib2 import six.moves.urllib.request, six.moves.urllib.error, six.moves.urllib.parse
import getopt import getopt
import socket import socket
import errno import errno
import httplib import six.moves.http_client
FILE_TYPES = [ "txt", "html", "pdf", "ps", "ps.gz", "abstract" ] FILE_TYPES = [ "txt", "html", "pdf", "ps", "ps.gz", "abstract" ]
BIN_FILE_TYPES = [ 'pdf', 'ps.gz' ] BIN_FILE_TYPES = [ 'pdf', 'ps.gz' ]
@ -53,12 +55,12 @@ def downloadFile(key, ftype, section, url,timeout=None):
signal.alarm(timeout) signal.alarm(timeout)
try: try:
try: try:
infile = urllib2.urlopen(url) infile = six.moves.urllib.request.urlopen(url)
except httplib.InvalidURL, e: except six.moves.http_client.InvalidURL as e:
raise UIError("Invalid URL %s: %s"%(url,e)) raise UIError("Invalid URL %s: %s"%(url,e))
except IOError, e: except IOError as e:
raise UIError("Cannot connect to url %s: %s"%(url,e)) raise UIError("Cannot connect to url %s: %s"%(url,e))
except socket.error, e: except socket.error as e:
if getattr(e,"errno",-1) == errno.EINTR: if getattr(e,"errno",-1) == errno.EINTR:
raise UIError("Connection timed out to url %s"%url) raise UIError("Connection timed out to url %s"%url)
else: else:
@ -80,9 +82,9 @@ def downloadFile(key, ftype, section, url,timeout=None):
outfile.close() outfile.close()
urlfile = open(fnameURL, 'w') urlfile = open(fnameURL, 'w')
print >>urlfile, time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) print(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), file=urlfile)
if "\n" in url: url = url.replace("\n", " ") if "\n" in url: url = url.replace("\n", " ")
print >>urlfile, url print(url, file=urlfile)
urlfile.close() urlfile.close()
os.rename(fnameTmp, fname) os.rename(fnameTmp, fname)
@ -105,7 +107,7 @@ def getCachedURL(key, ftype, section):
lines = f.readlines() lines = f.readlines()
f.close() f.close()
if len(lines) != 2: if len(lines) != 2:
print >>sys.stderr, "ERROR: unexpected number of lines in", urlFname print("ERROR: unexpected number of lines in", urlFname, file=sys.stderr)
return lines[1].strip() return lines[1].strip()
def downloadAll(bibtex, missingOnly=0): def downloadAll(bibtex, missingOnly=0):
@ -119,29 +121,29 @@ def downloadAll(bibtex, missingOnly=0):
if missingOnly: if missingOnly:
cachedURL = getCachedURL(key, ftype, section) cachedURL = getCachedURL(key, ftype, section)
if cachedURL == url: if cachedURL == url:
print >>sys.stderr,"Skipping",url print("Skipping",url, file=sys.stderr)
continue continue
elif cachedURL is not None: elif cachedURL is not None:
print >>sys.stderr,"URL for %s.%s has changed"%(key,ftype) print("URL for %s.%s has changed"%(key,ftype), file=sys.stderr)
else: else:
print >>sys.stderr,"I have no copy of %s.%s"%(key,ftype) print("I have no copy of %s.%s"%(key,ftype), file=sys.stderr)
try: try:
downloadFile(key, ftype, section, url) downloadFile(key, ftype, section, url)
print "Downloaded",url print("Downloaded",url)
except UIError, e: except UIError as e:
print >>sys.stderr, str(e) print(str(e), file=sys.stderr)
errors.append((key,ftype,url,str(e))) errors.append((key,ftype,url,str(e)))
except (IOError, socket.error), e: except (IOError, socket.error) as e:
msg = "Error downloading %s: %s"%(url,str(e)) msg = "Error downloading %s: %s"%(url,str(e))
print >>sys.stderr, msg print(msg, file=sys.stderr)
errors.append((key,ftype,url,msg)) errors.append((key,ftype,url,msg))
if urls.has_key("ps") and not urls.has_key("ps.gz"): if "ps" in urls and "ps.gz" not in urls:
# Say, this is something we'd like to have gzipped locally. # Say, this is something we'd like to have gzipped locally.
psFname = getCacheFname(key, "ps", section) psFname = getCacheFname(key, "ps", section)
psGzFname = getCacheFname(key, "ps.gz", section) psGzFname = getCacheFname(key, "ps.gz", section)
if os.path.exists(psFname) and not os.path.exists(psGzFname): if os.path.exists(psFname) and not os.path.exists(psGzFname):
# This is something we haven't gzipped yet. # This is something we haven't gzipped yet.
print "Compressing a copy of",psFname print("Compressing a copy of",psFname)
outf = gzip.GzipFile(psGzFname, "wb") outf = gzip.GzipFile(psGzFname, "wb")
inf = open(psFname, "rb") inf = open(psFname, "rb")
while 1: while 1:
@ -156,9 +158,9 @@ def downloadAll(bibtex, missingOnly=0):
if __name__ == '__main__': if __name__ == '__main__':
if len(sys.argv) == 2: if len(sys.argv) == 2:
print "Loading from %s"%sys.argv[1] print("Loading from %s"%sys.argv[1])
else: else:
print >>sys.stderr, "Expected a single configuration file as an argument" print("Expected a single configuration file as an argument", file=sys.stderr)
sys.exit(1) sys.exit(1)
config.load(sys.argv[1]) config.load(sys.argv[1])

View File

@ -1,3 +1,4 @@
from __future__ import absolute_import
from flask import render_template from flask import render_template
from i2p2www import ANONBIB_CFG, ANONBIB_FILE from i2p2www import ANONBIB_CFG, ANONBIB_FILE

View File

@ -3,16 +3,20 @@
"""Generate indices by author, topic, date, and BibTeX key.""" """Generate indices by author, topic, date, and BibTeX key."""
from __future__ import absolute_import
from __future__ import print_function
import sys import sys
import re import re
import os import os
import json import json
from six.moves import map
from six.moves import range
assert sys.version_info[:3] >= (2,2,0) assert sys.version_info[:3] >= (2,2,0)
os.umask(022) os.umask(0o22)
import BibTeX from . import BibTeX
import config from . import config
def getTemplate(name): def getTemplate(name):
f = open(name) f = open(name)
@ -39,15 +43,15 @@ def writeBody(f, sections, section_urls, cache_path, base_url):
sDisp = re.sub(r'\s+', ' ', s.strip()) sDisp = re.sub(r'\s+', ' ', s.strip())
sDisp = sDisp.replace(" ", "&nbsp;") sDisp = sDisp.replace(" ", "&nbsp;")
if u: if u:
print >>f, ('<li><h3><a name="%s"></a><a href="%s">%s</a></h3>'%( print(('<li><h3><a name="%s"></a><a href="%s">%s</a></h3>'%(
(BibTeX.url_untranslate(s), u, sDisp))) (BibTeX.url_untranslate(s), u, sDisp))), file=f)
else: else:
print >>f, ('<li><h3><a name="%s">%s</a></h3>'%( print(('<li><h3><a name="%s">%s</a></h3>'%(
BibTeX.url_untranslate(s),sDisp)) BibTeX.url_untranslate(s),sDisp)), file=f)
print >>f, "<ul class='expand'>" print("<ul class='expand'>", file=f)
for e in entries: for e in entries:
print >>f, e.to_html(cache_path=cache_path, base_url=base_url) print(e.to_html(cache_path=cache_path, base_url=base_url), file=f)
print >>f, "</ul></li>" print("</ul></li>", file=f)
def writeHTML(f, sections, sectionType, fieldName, choices, def writeHTML(f, sections, sectionType, fieldName, choices,
tag, config, cache_url_path, section_urls={}): tag, config, cache_url_path, section_urls={}):
@ -69,7 +73,7 @@ def writeHTML(f, sections, sectionType, fieldName, choices,
# #
tagListStr = [] tagListStr = []
st = config.TAG_SHORT_TITLES.keys() st = list(config.TAG_SHORT_TITLES.keys())
st.sort() st.sort()
root = "../"*pathLength(config.TAG_DIRECTORIES[tag]) root = "../"*pathLength(config.TAG_DIRECTORIES[tag])
if root == "": root = "." if root == "": root = "."
@ -104,10 +108,10 @@ def writeHTML(f, sections, sectionType, fieldName, choices,
} }
header, footer = getTemplate(config.TEMPLATE_FILE) header, footer = getTemplate(config.TEMPLATE_FILE)
print >>f, header%fields print(header%fields, file=f)
writeBody(f, sections, section_urls, cache_path=cache_url_path, writeBody(f, sections, section_urls, cache_path=cache_url_path,
base_url=root) base_url=root)
print >>f, footer%fields print(footer%fields, file=f)
def jsonDumper(obj): def jsonDumper(obj):
if isinstance(obj, BibTeX.BibTeXEntry): if isinstance(obj, BibTeX.BibTeXEntry):
@ -125,7 +129,7 @@ def writePageSet(config, bib, tag):
bib_entries = bib.entries[:] bib_entries = bib.entries[:]
if not bib_entries: if not bib_entries:
print >>sys.stderr, "No entries with tag %r; skipping"%tag print("No entries with tag %r; skipping"%tag, file=sys.stderr)
return return
tagdir = config.TAG_DIRECTORIES[tag] tagdir = config.TAG_DIRECTORIES[tag]
@ -133,7 +137,7 @@ def writePageSet(config, bib, tag):
cache_url_path = BibTeX.smartJoin("../"*pathLength(tagdir), cache_url_path = BibTeX.smartJoin("../"*pathLength(tagdir),
config.CACHE_DIR) config.CACHE_DIR)
if not os.path.exists(outdir): if not os.path.exists(outdir):
os.makedirs(outdir, 0755) os.makedirs(outdir, 0o755)
##### Sorted views: ##### Sorted views:
## By topic. ## By topic.
@ -174,7 +178,7 @@ def writePageSet(config, bib, tag):
except ValueError: except ValueError:
last_year = int(entries[-2][1][0].get('year')) last_year = int(entries[-2][1][0].get('year'))
years = map(str, range(first_year, last_year+1)) years = list(map(str, list(range(first_year, last_year+1))))
if entries[-1][0] == 'Unknown': if entries[-1][0] == 'Unknown':
years.append("Unknown") years.append("Unknown")
@ -216,15 +220,15 @@ def writePageSet(config, bib, tag):
header,footer = getTemplate(config.BIBTEX_TEMPLATE_FILE) header,footer = getTemplate(config.BIBTEX_TEMPLATE_FILE)
f = open(os.path.join(outdir,"bibtex.html"), 'w') f = open(os.path.join(outdir,"bibtex.html"), 'w')
print >>f, header % { 'command_line' : "", print(header % { 'command_line' : "",
'title': config.TAG_TITLES[tag], 'title': config.TAG_TITLES[tag],
'root': root } 'root': root }, file=f)
for ent in entries: for ent in entries:
print >>f, ( print((
("<tr><td class='bibtex'><a name='%s'>%s</a>" ("<tr><td class='bibtex'><a name='%s'>%s</a>"
"<pre class='bibtex'>%s</pre></td></tr>") "<pre class='bibtex'>%s</pre></td></tr>")
%(BibTeX.url_untranslate(ent.key), ent.key, ent.format(90,8,1))) %(BibTeX.url_untranslate(ent.key), ent.key, ent.format(90,8,1))), file=f)
print >>f, footer print(footer, file=f)
f.close() f.close()
f = open(os.path.join(outdir,"bibtex.json"), 'w') f = open(os.path.join(outdir,"bibtex.json"), 'w')
@ -234,9 +238,9 @@ def writePageSet(config, bib, tag):
if __name__ == '__main__': if __name__ == '__main__':
if len(sys.argv) == 2: if len(sys.argv) == 2:
print "Loading from %s"%sys.argv[1] print("Loading from %s"%sys.argv[1])
else: else:
print >>sys.stderr, "Expected a single configuration file as an argument" print("Expected a single configuration file as an argument", file=sys.stderr)
sys.exit(1) sys.exit(1)
config.load(sys.argv[1]) config.load(sys.argv[1])

View File

@ -1,3 +1,4 @@
from __future__ import absolute_import
import codecs import codecs
import datetime import datetime
from docutils.core import publish_parts from docutils.core import publish_parts

View File

@ -1,3 +1,4 @@
from __future__ import absolute_import
from flask import abort, g, redirect, render_template, request, url_for from flask import abort, g, redirect, render_template, request, url_for
from werkzeug.contrib.atom import AtomFeed from werkzeug.contrib.atom import AtomFeed

View File

@ -1,3 +1,4 @@
from __future__ import absolute_import
from flask import redirect, render_template, request from flask import redirect, render_template, request
from i2p2www import CURRENT_I2P_VERSION, MIRRORS_FILE from i2p2www import CURRENT_I2P_VERSION, MIRRORS_FILE

View File

@ -1,3 +1,4 @@
from __future__ import absolute_import
from flask import abort, redirect, render_template, request from flask import abort, redirect, render_template, request
try: try:
import json import json
@ -166,13 +167,13 @@ def downloads_redirect(version, net, protocol, domain, file):
} }
if not protocol: if not protocol:
protocol = mirrors.keys()[randint(0, len(mirrors) - 1)] protocol = list(mirrors.keys())[randint(0, len(mirrors) - 1)]
if not protocol in mirrors: if not protocol in mirrors:
abort(404) abort(404)
mirrors=mirrors[protocol] mirrors=mirrors[protocol]
if not domain: if not domain:
domain = mirrors.keys()[randint(0, len(mirrors) - 1)] domain = list(mirrors.keys())[randint(0, len(mirrors) - 1)]
if not domain in mirrors: if not domain in mirrors:
abort(404) abort(404)
return render_template('downloads/redirect.html', return render_template('downloads/redirect.html',

View File

@ -1,5 +1,7 @@
# -*- coding: utf8 -*- # -*- coding: utf8 -*-
from __future__ import absolute_import
from __future__ import print_function
import os import os
import sys import sys
from jinja2 import nodes from jinja2 import nodes
@ -9,6 +11,7 @@ from pygments import highlight
from pygments.lexers import get_lexer_by_name, guess_lexer from pygments.lexers import get_lexer_by_name, guess_lexer
from pygments.formatters import HtmlFormatter from pygments.formatters import HtmlFormatter
from pygments.util import ClassNotFound from pygments.util import ClassNotFound
import six
try: try:
import ctags import ctags
@ -29,8 +32,8 @@ def we_are_frozen():
def module_path(): def module_path():
encoding = sys.getfilesystemencoding() encoding = sys.getfilesystemencoding()
if we_are_frozen(): if we_are_frozen():
return os.path.dirname(unicode(sys.executable, encoding)) return os.path.dirname(six.text_type(sys.executable, encoding))
return os.path.dirname(unicode(__file__, encoding)) return os.path.dirname(six.text_type(__file__, encoding))
class HighlightExtension(Extension): class HighlightExtension(Extension):

View File

@ -9,14 +9,21 @@
:license: BSD, see LICENSE for details. :license: BSD, see LICENSE for details.
""" """
from __future__ import absolute_import
from __future__ import print_function
import os import os
import sys import sys
import os.path import os.path
import StringIO try:
from StringIO import StringIO
except ImportError:
from io import StringIO
from pygments.formatter import Formatter from pygments.formatter import Formatter
from pygments.token import Token, Text, STANDARD_TYPES from pygments.token import Token, Text, STANDARD_TYPES
from pygments.util import get_bool_opt, get_int_opt, get_list_opt, bytes from pygments.util import get_bool_opt, get_int_opt, get_list_opt#, bytes
import six
from six.moves import range
try: try:
import ctags import ctags
@ -459,7 +466,7 @@ class I2PHtmlFormatter(Formatter):
""" """
if arg is None: if arg is None:
arg = ('cssclass' in self.options and '.'+self.cssclass or '') arg = ('cssclass' in self.options and '.'+self.cssclass or '')
if isinstance(arg, basestring): if isinstance(arg, six.string_types):
args = [arg] args = [arg]
else: else:
args = list(arg) args = list(arg)
@ -473,7 +480,7 @@ class I2PHtmlFormatter(Formatter):
return ', '.join(tmp) return ', '.join(tmp)
styles = [(level, ttype, cls, style) styles = [(level, ttype, cls, style)
for cls, (style, ttype, level) in self.class2style.iteritems() for cls, (style, ttype, level) in six.iteritems(self.class2style)
if cls and style] if cls and style]
styles.sort() styles.sort()
lines = ['%s { %s } /* %s */' % (prefix(cls), style, repr(ttype)[6:]) lines = ['%s { %s } /* %s */' % (prefix(cls), style, repr(ttype)[6:])
@ -511,8 +518,8 @@ class I2PHtmlFormatter(Formatter):
cssfilename = os.path.join(os.path.dirname(filename), cssfilename = os.path.join(os.path.dirname(filename),
self.cssfile) self.cssfile)
except AttributeError: except AttributeError:
print >>sys.stderr, 'Note: Cannot determine output file name, ' \ print('Note: Cannot determine output file name, ' \
'using current directory as base for the CSS file name' 'using current directory as base for the CSS file name', file=sys.stderr)
cssfilename = self.cssfile cssfilename = self.cssfile
# write CSS file only if noclobber_cssfile isn't given as an option. # write CSS file only if noclobber_cssfile isn't given as an option.
try: try:
@ -521,7 +528,7 @@ class I2PHtmlFormatter(Formatter):
cf.write(CSSFILE_TEMPLATE % cf.write(CSSFILE_TEMPLATE %
{'styledefs': self.get_style_defs('body')}) {'styledefs': self.get_style_defs('body')})
cf.close() cf.close()
except IOError, err: except IOError as err:
err.strerror = 'Error writing CSS file: ' + err.strerror err.strerror = 'Error writing CSS file: ' + err.strerror
raise raise

View File

@ -1,5 +1,7 @@
from __future__ import absolute_import
from math import ceil from math import ceil
from werkzeug import import_string, cached_property from werkzeug.utils import cached_property, import_string
from six.moves import range
######################## ########################
# General helper methods # General helper methods
@ -56,7 +58,7 @@ class Pagination(object):
def iter_pages(self, left_edge=2, left_current=2, def iter_pages(self, left_edge=2, left_current=2,
right_current=5, right_edge=2): right_current=5, right_edge=2):
last = 0 last = 0
for num in xrange(1, self.pages + 1): for num in range(1, self.pages + 1):
if num <= left_edge or \ if num <= left_edge or \
(num > self.page - left_current - 1 and \ (num > self.page - left_current - 1 and \
num < self.page + right_current) or \ num < self.page + right_current) or \

View File

@ -1,3 +1,4 @@
from __future__ import absolute_import
from flask import g, redirect, url_for from flask import g, redirect, url_for
@ -197,7 +198,7 @@ LEGACY_RELEASES_MAP={
'0.9.8': (2013, 9, 30), '0.9.8': (2013, 9, 30),
'0.9.8.1': (2013, 10, 2), '0.9.8.1': (2013, 10, 2),
'0.9.9': (2013, 12, 7), '0.9.9': (2013, 12, 7),
'0.9.10': (2014, 01, 22), '0.9.10': (2014, 0o1, 22),
} }
def legacy_show(f): def legacy_show(f):
@ -232,5 +233,6 @@ def legacy_release(version):
else: else:
return legacy_show('release-%s' % version) return legacy_show('release-%s' % version)
def legacy_blog(lang, (year, month, day), title): def legacy_blog(lang, xxx_todo_changeme, title):
(year, month, day) = xxx_todo_changeme
return redirect(url_for('blog_post', lang=lang, slug=('%d/%02d/%02d/%s' % (year, month, day, title))), 301) return redirect(url_for('blog_post', lang=lang, slug=('%d/%02d/%02d/%s' % (year, month, day, title))), 301)

View File

@ -1,3 +1,4 @@
from __future__ import absolute_import
from pygments.lexer import RegexLexer, bygroups from pygments.lexer import RegexLexer, bygroups
from pygments.token import * from pygments.token import *

View File

@ -1,3 +1,5 @@
from __future__ import absolute_import
from __future__ import print_function
import codecs import codecs
import datetime import datetime
from docutils.core import publish_parts from docutils.core import publish_parts
@ -54,7 +56,7 @@ def get_meetings_ids(num=0):
# iterate over all files # iterate over all files
for f in v[2]: for f in v[2]:
# ignore all non-.rst files # ignore all non-.rst files
print("Meeting file found", f) print(("Meeting file found", f))
if not f.endswith('.rst'): if not f.endswith('.rst'):
continue continue
try: try:

View File

@ -1,3 +1,4 @@
from __future__ import absolute_import
import codecs import codecs
from flask import abort, render_template, request, safe_join, send_from_directory from flask import abort, render_template, request, safe_join, send_from_directory
import os.path import os.path

View File

@ -1,3 +1,4 @@
from __future__ import absolute_import
from flask import g, make_response, render_template, request, safe_join from flask import g, make_response, render_template, request, safe_join
import os.path import os.path

View File

@ -1,3 +1,4 @@
from __future__ import absolute_import
import codecs import codecs
from collections import defaultdict from collections import defaultdict
from docutils import io from docutils import io
@ -23,6 +24,7 @@ import os.path
from i2p2www import PROPOSAL_DIR, SPEC_DIR from i2p2www import PROPOSAL_DIR, SPEC_DIR
from i2p2www import helpers from i2p2www import helpers
from six.moves import range
SPEC_METATAGS = { SPEC_METATAGS = {

View File

@ -1,7 +1,9 @@
from __future__ import absolute_import
import ctags import ctags
from flask import g, request, safe_join, url_for from flask import g, request, url_for
from werkzeug.utils import safe_join
import os.path import os.path
from urlparse import urlsplit, urlunsplit from six.moves.urllib.parse import urlsplit, urlunsplit
from i2p2www import ( from i2p2www import (
CANONICAL_DOMAIN, CANONICAL_DOMAIN,

View File

@ -1,3 +1,4 @@
from __future__ import absolute_import
from werkzeug.routing import BaseConverter from werkzeug.routing import BaseConverter
from i2p2www import app from i2p2www import app

View File

@ -1,3 +1,4 @@
from __future__ import absolute_import
from flask import abort, redirect, render_template, safe_join, send_from_directory, url_for from flask import abort, redirect, render_template, safe_join, send_from_directory, url_for
import os.path import os.path

View File

@ -1,9 +1,11 @@
from __future__ import absolute_import
from werkzeug import BaseRequest, BaseResponse, ETagResponseMixin, escape, run_simple, SharedDataMiddleware from werkzeug import BaseRequest, BaseResponse, ETagResponseMixin, escape, run_simple, SharedDataMiddleware
from werkzeug.exceptions import HTTPException from werkzeug.exceptions import HTTPException
import os import os
import sha import sha
from time import time from time import time
from random import choice from random import choice
from six.moves import range
class Request(BaseRequest): class Request(BaseRequest):
"""Useful subclass of the default request that knows how to build urls.""" """Useful subclass of the default request that knows how to build urls."""

View File

@ -14,6 +14,8 @@
# Modify as needed, or use a symlink. # Modify as needed, or use a symlink.
from __future__ import absolute_import
from six.moves import range
netdbdir = 'netdb' netdbdir = 'netdb'
database = 'Driver=SQLite;DATABASE=I2PnetDb' database = 'Driver=SQLite;DATABASE=I2PnetDb'
@ -136,7 +138,7 @@ def application(environ, start_response):
new = [] new = []
if len(entries) > 150: if len(entries) > 150:
# select some randomly # select some randomly
for i in xrange(100): for i in range(100):
while True: while True:
sel = choice(entries) sel = choice(entries)
if not sel.startswith('routerInfo-'): if not sel.startswith('routerInfo-'):

View File

@ -1,4 +1,5 @@
#!env/bin/python #!env/bin/python
from __future__ import absolute_import
from i2p2www import app from i2p2www import app
import os import os