Compare commits
2 Commits
Author | SHA1 | Date | |
---|---|---|---|
deb775d243 | |||
8ebc4de1a7 |
@ -1,7 +1,7 @@
|
||||
export venv_dir="env"
|
||||
export spec=""
|
||||
export venv="`which virtualenv-2.7`"
|
||||
if [ x"$venv" = x ]; then
|
||||
export venv="`which virtualenv`"
|
||||
export spec="-p 2.7"
|
||||
fi
|
||||
export venv="/usr/bin/virtualenv"
|
||||
#if [ x"$venv" = x ]; then
|
||||
# export venv="`which virtualenv`"
|
||||
# export spec="-p 2.7"
|
||||
#fi
|
||||
|
20
etc/reqs.txt
20
etc/reqs.txt
@ -1,11 +1,13 @@
|
||||
pytz>=2012
|
||||
Flask==1.0.2
|
||||
Babel==1.3
|
||||
Flask-Babel==0.9
|
||||
Flask-Caching==1.4.0
|
||||
Jinja2==2.10
|
||||
Pygments==1.6
|
||||
python-ctags
|
||||
docutils==0.11
|
||||
Flask==3.0.0
|
||||
Babel==2.13.1
|
||||
Flask-Babel==4.0.0
|
||||
Flask-Caching==2.1.0
|
||||
Jinja2==3.1.2
|
||||
Pygments>=1.6
|
||||
python-ctags3
|
||||
docutils==0.20.1
|
||||
gunicorn==0.17.2
|
||||
werkzeug==0.16.1
|
||||
werkzeug>=0.16.1
|
||||
markupsafe>=2.0.1
|
||||
six
|
@ -1,5 +1,7 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
from flask import Flask, request, g, redirect, url_for, abort, render_template, send_from_directory, safe_join
|
||||
from __future__ import absolute_import
|
||||
from flask import Flask, request, g, redirect, url_for, abort, render_template, send_from_directory
|
||||
from werkzeug.utils import safe_join
|
||||
try:
|
||||
from flaskext.babel import Babel
|
||||
except ImportError:
|
||||
@ -140,7 +142,7 @@ cache = Cache(app, config=CACHE_CONFIG)
|
||||
#################
|
||||
# Babel selectors
|
||||
|
||||
@babel.localeselector
|
||||
#@babel.localeselector
|
||||
def get_locale():
|
||||
# If viewing specs, require English
|
||||
if request.path.startswith('/spec'):
|
||||
@ -152,7 +154,7 @@ def get_locale():
|
||||
# header the browser transmits. The best match wins.
|
||||
return request.accept_languages.best_match(SUPPORTED_LANGS)
|
||||
|
||||
@babel.domainselector
|
||||
#@babel.domainselector
|
||||
def get_domains():
|
||||
domains = []
|
||||
frags = request.path.split('/', 2)
|
||||
@ -167,6 +169,7 @@ def get_domains():
|
||||
domains.append(DEFAULT_GETTEXT_DOMAIN)
|
||||
return domains
|
||||
|
||||
babel.init_app(app, locale_selector=get_locale, default_domain=get_domains)
|
||||
|
||||
##########################
|
||||
# Hooks - helper functions
|
||||
@ -213,7 +216,7 @@ def detect_theme():
|
||||
theme = 'duck'
|
||||
if 'style' in request.cookies:
|
||||
theme = request.cookies['style']
|
||||
if 'theme' in request.args.keys():
|
||||
if 'theme' in list(request.args.keys()):
|
||||
theme = request.args['theme']
|
||||
# TEMPORARY: enable external themes
|
||||
# TODO: Remove this (and the corresponding lines in global/layout.html
|
||||
@ -263,5 +266,5 @@ def server_error(error):
|
||||
return render_template('global/error_500.html'), 500
|
||||
|
||||
# Import these to ensure they get loaded
|
||||
import templatevars
|
||||
import urls
|
||||
from . import templatevars
|
||||
from . import urls
|
||||
|
@ -6,14 +6,19 @@
|
||||
Based on perl code by Eddie Kohler; heavily modified.
|
||||
"""
|
||||
|
||||
from __future__ import absolute_import
|
||||
from __future__ import print_function
|
||||
import cStringIO
|
||||
import re
|
||||
import sys
|
||||
import os
|
||||
|
||||
import config
|
||||
from . import config
|
||||
|
||||
import rank
|
||||
from . import rank
|
||||
from six.moves import map
|
||||
from six.moves import range
|
||||
from six.moves import zip
|
||||
|
||||
__all__ = [ 'ParseError', 'BibTeX', 'BibTeXEntry', 'htmlize',
|
||||
'ParsedAuthor', 'FileIter', 'Parser', 'parseFile',
|
||||
@ -66,7 +71,7 @@ class BibTeX:
|
||||
"""Add a BibTeX entry to this file."""
|
||||
k = ent.key
|
||||
if self.byKey.get(ent.key.lower()):
|
||||
print >> sys.stderr, "Already have an entry named %s"%k
|
||||
print("Already have an entry named %s"%k, file=sys.stderr)
|
||||
return
|
||||
self.entries.append(ent)
|
||||
self.byKey[ent.key.lower()] = ent
|
||||
@ -79,7 +84,7 @@ class BibTeX:
|
||||
try:
|
||||
cr = self.byKey[ent['crossref'].lower()]
|
||||
except KeyError:
|
||||
print "No such crossref: %s"% ent['crossref']
|
||||
print("No such crossref: %s"% ent['crossref'])
|
||||
break
|
||||
if seen.get(cr.key):
|
||||
raise ParseError("Circular crossref at %s" % ent.key)
|
||||
@ -87,12 +92,12 @@ class BibTeX:
|
||||
del ent.entries['crossref']
|
||||
|
||||
if cr.entryLine < ent.entryLine:
|
||||
print "Warning: crossref %s used after declaration"%cr.key
|
||||
print("Warning: crossref %s used after declaration"%cr.key)
|
||||
|
||||
for k in cr.entries.keys():
|
||||
if ent.entries.has_key(k):
|
||||
print "ERROR: %s defined both in %s and in %s"%(
|
||||
k,ent.key,cr.key)
|
||||
if k in ent.entries:
|
||||
print("ERROR: %s defined both in %s and in %s"%(
|
||||
k,ent.key,cr.key))
|
||||
else:
|
||||
ent.entries[k] = cr.entries[k]
|
||||
|
||||
@ -105,7 +110,7 @@ class BibTeX:
|
||||
rk = "title"
|
||||
|
||||
for ent in self.entries:
|
||||
if ent.type in config.OMIT_ENTRIES or not ent.has_key(rk):
|
||||
if ent.type in config.OMIT_ENTRIES or rk not in ent:
|
||||
ent.check()
|
||||
del self.byKey[ent.key.lower()]
|
||||
else:
|
||||
@ -130,7 +135,7 @@ def buildAuthorTable(entries):
|
||||
|
||||
for e in entries:
|
||||
for author in e.parsedAuthor:
|
||||
if result.has_key(author):
|
||||
if author in result:
|
||||
continue
|
||||
|
||||
c = author
|
||||
@ -143,14 +148,14 @@ def buildAuthorTable(entries):
|
||||
if 0:
|
||||
for a,c in result.items():
|
||||
if a != c:
|
||||
print "Collapsing authors: %s => %s" % (a,c)
|
||||
print("Collapsing authors: %s => %s" % (a,c))
|
||||
if 0:
|
||||
print parseAuthor("Franz Kaashoek")[0].collapsesTo(
|
||||
parseAuthor("M. Franz Kaashoek")[0])
|
||||
print parseAuthor("Paul F. Syverson")[0].collapsesTo(
|
||||
parseAuthor("Paul Syverson")[0])
|
||||
print parseAuthor("Paul Syverson")[0].collapsesTo(
|
||||
parseAuthor("Paul F. Syverson")[0])
|
||||
print(parseAuthor("Franz Kaashoek")[0].collapsesTo(
|
||||
parseAuthor("M. Franz Kaashoek")[0]))
|
||||
print(parseAuthor("Paul F. Syverson")[0].collapsesTo(
|
||||
parseAuthor("Paul Syverson")[0]))
|
||||
print(parseAuthor("Paul Syverson")[0].collapsesTo(
|
||||
parseAuthor("Paul F. Syverson")[0]))
|
||||
|
||||
return result
|
||||
|
||||
@ -221,7 +226,7 @@ def splitEntriesByAuthor(entries):
|
||||
|
||||
htmlResult[sortkey] = secname
|
||||
result.setdefault(sortkey, []).append(ent)
|
||||
sortnames = result.keys()
|
||||
sortnames = list(result.keys())
|
||||
sortnames.sort()
|
||||
sections = [ (htmlResult[n], result[n]) for n in sortnames ]
|
||||
return sections, url_map
|
||||
@ -255,13 +260,13 @@ def sortEntriesByDate(entries):
|
||||
monthname = match.group(1)
|
||||
mon = MONTHS.index(monthname)
|
||||
except ValueError:
|
||||
print "Unknown month %r in %s"%(ent.get("month"), ent.key)
|
||||
print("Unknown month %r in %s"%(ent.get("month"), ent.key))
|
||||
mon = 0
|
||||
|
||||
try:
|
||||
date = int(ent['year'])*13 + mon
|
||||
except KeyError:
|
||||
print "ERROR: No year field in %s"%ent.key
|
||||
print("ERROR: No year field in %s"%ent.key)
|
||||
date = 10000*13
|
||||
except ValueError:
|
||||
date = 10000*13
|
||||
@ -286,7 +291,7 @@ class BibTeXEntry:
|
||||
def get(self, k, v=None):
|
||||
return self.entries.get(k,v)
|
||||
def has_key(self, k):
|
||||
return self.entries.has_key(k)
|
||||
return k in self.entries
|
||||
def __getitem__(self, k):
|
||||
return self.entries[k]
|
||||
def __setitem__(self, k, v):
|
||||
@ -318,7 +323,7 @@ class BibTeXEntry:
|
||||
else:
|
||||
df = DISPLAYED_FIELDS
|
||||
for f in df:
|
||||
if not self.entries.has_key(f):
|
||||
if f not in self.entries:
|
||||
continue
|
||||
v = self.entries[f]
|
||||
if v.startswith("<span class='bad'>"):
|
||||
@ -330,7 +335,7 @@ class BibTeXEntry:
|
||||
d.append("%%%%% "+("ERROR: Non-ASCII characters: '%r'\n"%np))
|
||||
d.append(" ")
|
||||
v = v.replace("&", "&")
|
||||
if invStrings.has_key(v):
|
||||
if v in invStrings:
|
||||
s = "%s = %s,\n" %(f, invStrings[v])
|
||||
else:
|
||||
s = "%s = {%s},\n" % (f, v)
|
||||
@ -359,7 +364,7 @@ class BibTeXEntry:
|
||||
none."""
|
||||
errs = self._check()
|
||||
for e in errs:
|
||||
print e
|
||||
print(e)
|
||||
return not errs
|
||||
|
||||
def _check(self):
|
||||
@ -396,7 +401,7 @@ class BibTeXEntry:
|
||||
not self['booktitle'].startswith("{Proceedings of"):
|
||||
errs.append("ERROR: %s's booktitle (%r) doesn't start with 'Proceedings of'" % (self.key, self['booktitle']))
|
||||
|
||||
if self.has_key("pages") and not re.search(r'\d+--\d+', self['pages']):
|
||||
if "pages" in self and not re.search(r'\d+--\d+', self['pages']):
|
||||
errs.append("ERROR: Misformed pages in %s"%self.key)
|
||||
|
||||
if self.type == 'proceedings':
|
||||
@ -551,8 +556,8 @@ class BibTeXEntry:
|
||||
cache_section = self.get('www_cache_section', ".")
|
||||
if cache_section not in config.CACHE_SECTIONS:
|
||||
if cache_section != ".":
|
||||
print >>sys.stderr, "Unrecognized cache section %s"%(
|
||||
cache_section)
|
||||
print("Unrecognized cache section %s"%(
|
||||
cache_section), file=sys.stderr)
|
||||
cache_section="."
|
||||
|
||||
for key, name, ext in (('www_abstract_url', 'abstract','abstract'),
|
||||
@ -766,13 +771,13 @@ class ParsedAuthor:
|
||||
short = o.first; long = self.first
|
||||
|
||||
initials_s = "".join([n[0] for n in short])
|
||||
initials_l = "".join([n[0] for n in long])
|
||||
initials_l = "".join([n[0] for n in int])
|
||||
idx = initials_l.find(initials_s)
|
||||
if idx < 0:
|
||||
return self
|
||||
n = long[:idx]
|
||||
n = int[:idx]
|
||||
for i in range(idx, idx+len(short)):
|
||||
a = long[i]; b = short[i-idx]
|
||||
a = int[i]; b = short[i-idx]
|
||||
if a == b:
|
||||
n.append(a)
|
||||
elif len(a) == 2 and a[1] == '.' and a[0] == b[0]:
|
||||
@ -781,7 +786,7 @@ class ParsedAuthor:
|
||||
n.append(a)
|
||||
else:
|
||||
return self
|
||||
n += long[idx+len(short):]
|
||||
n += int[idx+len(short):]
|
||||
|
||||
if n == self.first:
|
||||
return self
|
||||
@ -842,7 +847,7 @@ def _split(s,w=79,indent=8):
|
||||
first = 1
|
||||
indentation = ""
|
||||
while len(s) > w:
|
||||
for i in xrange(w-1, 20, -1):
|
||||
for i in range(w-1, 20, -1):
|
||||
if s[i] == ' ':
|
||||
r.append(indentation+s[:i])
|
||||
s = s[i+1:]
|
||||
@ -866,7 +871,7 @@ class FileIter:
|
||||
if string:
|
||||
file = cStringIO.StringIO(string)
|
||||
if file:
|
||||
it = iter(file.xreadlines())
|
||||
it = iter(file)
|
||||
self.iter = it
|
||||
assert self.iter
|
||||
self.lineno = 0
|
||||
@ -880,7 +885,7 @@ def parseAuthor(s):
|
||||
try:
|
||||
return _parseAuthor(s)
|
||||
except:
|
||||
print >>sys.stderr, "Internal error while parsing author %r"%s
|
||||
print("Internal error while parsing author %r"%s, file=sys.stderr)
|
||||
raise
|
||||
|
||||
def _parseAuthor(s):
|
||||
@ -891,7 +896,7 @@ def _parseAuthor(s):
|
||||
while s:
|
||||
s = s.strip()
|
||||
bracelevel = 0
|
||||
for i in xrange(len(s)):
|
||||
for i in range(len(s)):
|
||||
if s[i] == '{':
|
||||
bracelevel += 1
|
||||
elif s[i] == '}':
|
||||
@ -947,8 +952,8 @@ def _parseAuthor(s):
|
||||
|
||||
return parsedAuthors
|
||||
|
||||
ALLCHARS = "".join(map(chr,range(256)))
|
||||
PRINTINGCHARS = "\t\n\r"+"".join(map(chr,range(32, 127)))
|
||||
ALLCHARS = "".join(map(chr,list(range(256))))
|
||||
PRINTINGCHARS = "\t\n\r"+"".join(map(chr,list(range(32, 127))))
|
||||
LC_CHARS = "abcdefghijklmnopqrstuvwxyz"
|
||||
SV_DELCHARS = ("ABCDEFGHIJKLMNOPQRSTUVWXYZ"
|
||||
"abcdefghijklmnopqrstuvwxyz"
|
||||
@ -1049,7 +1054,7 @@ class Parser:
|
||||
continue
|
||||
data.append(line)
|
||||
data.append(" ")
|
||||
line = it.next()
|
||||
line = next(it)
|
||||
self.litStringLine = 0
|
||||
elif line[0] == '{':
|
||||
bracelevel += 1
|
||||
@ -1076,13 +1081,13 @@ class Parser:
|
||||
#print bracelevel, "C", repr(line)
|
||||
data.append(line)
|
||||
data.append(" ")
|
||||
line = it.next()
|
||||
line = next(it)
|
||||
elif line[0] == '#':
|
||||
print >>sys.stderr, "Weird concat on line %s"%it.lineno
|
||||
print("Weird concat on line %s"%it.lineno, file=sys.stderr)
|
||||
elif line[0] in "},":
|
||||
if not data:
|
||||
print >>sys.stderr, "No data after field on line %s"%(
|
||||
it.lineno)
|
||||
print("No data after field on line %s"%(
|
||||
it.lineno), file=sys.stderr)
|
||||
else:
|
||||
m = RAW_DATA_RE.match(line)
|
||||
if m:
|
||||
@ -1170,7 +1175,7 @@ class Parser:
|
||||
else:
|
||||
key = v[0]
|
||||
d = {}
|
||||
for i in xrange(1,len(v),2):
|
||||
for i in range(1,len(v),2):
|
||||
d[v[i].lower()] = v[i+1]
|
||||
ent = BibTeXEntry(self.curEntType, key, d)
|
||||
ent.entryLine = self.entryLine
|
||||
@ -1197,11 +1202,11 @@ class Parser:
|
||||
|
||||
def _parse(self):
|
||||
it = self.fileiter
|
||||
line = it.next()
|
||||
line = next(it)
|
||||
while 1:
|
||||
# Skip blank lines.
|
||||
while not line or line.isspace() or OUTER_COMMENT_RE.match(line):
|
||||
line = it.next()
|
||||
line = next(it)
|
||||
# Get the first line of an entry.
|
||||
m = ENTRY_BEGIN_RE.match(line)
|
||||
if m:
|
||||
@ -1215,7 +1220,7 @@ class Parser:
|
||||
|
||||
def _advance(it,line):
|
||||
while not line or line.isspace() or COMMENT_RE.match(line):
|
||||
line = it.next()
|
||||
line = next(it)
|
||||
return line
|
||||
|
||||
# Matches a comment line outside of an entry.
|
||||
@ -1265,5 +1270,5 @@ if __name__ == '__main__':
|
||||
|
||||
for e in r.entries:
|
||||
if e.type in ("proceedings", "journal"): continue
|
||||
print e.to_html()
|
||||
print(e.to_html())
|
||||
|
||||
|
@ -1,5 +1,6 @@
|
||||
# Copyright 2003-2006, Nick Mathewson. See LICENSE for licensing info.
|
||||
|
||||
from __future__ import absolute_import
|
||||
import re
|
||||
|
||||
_KEYS = [ "ALL_TAGS",
|
||||
@ -19,7 +20,7 @@ del _k
|
||||
|
||||
def load(cfgFile):
|
||||
mod = {}
|
||||
execfile(cfgFile, mod)
|
||||
exec(compile(open(cfgFile, "rb").read(), cfgFile, 'exec'), mod)
|
||||
for _k in _KEYS:
|
||||
try:
|
||||
globals()[_k]=mod[_k]
|
||||
|
@ -8,6 +8,9 @@
|
||||
|
||||
Based on the original C++ metaphone implementation.)
|
||||
"""
|
||||
from __future__ import print_function
|
||||
from six.moves import map
|
||||
from six.moves import range
|
||||
|
||||
TRIPLES = {
|
||||
'dge': 'j',
|
||||
@ -45,7 +48,7 @@ SINGLETONS = {
|
||||
'z': 's',
|
||||
}
|
||||
|
||||
ALLCHARS = "".join(map(chr, range(256)))
|
||||
ALLCHARS = "".join(map(chr, list(range(256))))
|
||||
NONLCCHARS = "".join([c for c in ALLCHARS if not c.islower()])
|
||||
def metaphone(s):
|
||||
"""Return the metaphone equivalent of a provided string"""
|
||||
@ -182,7 +185,7 @@ def metaphone(s):
|
||||
return "".join(result)
|
||||
|
||||
def demo(a):
|
||||
print a, "=>", metaphone(a)
|
||||
print(a, "=>", metaphone(a))
|
||||
|
||||
if __name__ == '__main__':
|
||||
demo("Nick. Mathewson")
|
||||
|
@ -4,10 +4,12 @@
|
||||
# http://scholar.google.com/scholar?as_epq=
|
||||
|
||||
# Take care of the caching setup
|
||||
from __future__ import absolute_import
|
||||
from __future__ import print_function
|
||||
cache_expire = 60*60*24*30 # 30 days
|
||||
|
||||
# Checks
|
||||
import config
|
||||
from . import config
|
||||
import os
|
||||
import sys
|
||||
from os.path import exists, isdir, join, getmtime
|
||||
@ -32,8 +34,8 @@ def cache_folder():
|
||||
return r
|
||||
|
||||
import re
|
||||
from urllib2 import urlopen, build_opener
|
||||
from urllib import quote
|
||||
from six.moves.urllib.request import urlopen, build_opener
|
||||
from six.moves.urllib.parse import quote
|
||||
from datetime import date
|
||||
import hashlib
|
||||
|
||||
@ -64,21 +66,21 @@ def getPageForTitle(title, cache=True, update=True, save=True):
|
||||
|
||||
# Access cache or network
|
||||
if exists(join(cache_folder(), md5h(url))) and cache:
|
||||
return url, file(join(cache_folder(), md5h(url)),'r').read()
|
||||
return url, open(join(cache_folder(), md5h(url)),'r').read()
|
||||
elif update:
|
||||
print "Downloading rank for %r."%title
|
||||
print("Downloading rank for %r."%title)
|
||||
|
||||
# Make a custom user agent (so that we are not filtered by Google)!
|
||||
opener = build_opener()
|
||||
opener.addheaders = [('User-agent', 'Anon.Bib.0.1')]
|
||||
|
||||
print "connecting..."
|
||||
print("connecting...")
|
||||
connection = opener.open(url)
|
||||
print "reading"
|
||||
print("reading")
|
||||
page = connection.read()
|
||||
print "done"
|
||||
print("done")
|
||||
if save:
|
||||
file(join(cache_folder(), md5h(url)),'w').write(page)
|
||||
open(join(cache_folder(), md5h(url)),'w').write(page)
|
||||
return url, page
|
||||
else:
|
||||
return url, None
|
||||
@ -140,20 +142,20 @@ def get_rank_html(title, years=None, base_url=".", update=True,
|
||||
def TestScholarFormat():
|
||||
# We need to ensure that Google Scholar does not change its page format under our feet
|
||||
# Use some cases to check if all is good
|
||||
print "Checking google scholar formats..."
|
||||
print("Checking google scholar formats...")
|
||||
stopAndGoCites = getCite("Stop-and-Go MIXes: Providing Probabilistic Anonymity in an Open System", False)[0]
|
||||
dragonCites = getCite("Mixes protected by Dragons and Pixies: an empirical study", False, save=False)[0]
|
||||
|
||||
if stopAndGoCites in (0, None):
|
||||
print """OOPS.\n
|
||||
print("""OOPS.\n
|
||||
It looks like Google Scholar changed their URL format or their output format.
|
||||
I went to count the cites for the Stop-and-Go MIXes paper, and got nothing."""
|
||||
I went to count the cites for the Stop-and-Go MIXes paper, and got nothing.""")
|
||||
sys.exit(1)
|
||||
|
||||
if dragonCites != None:
|
||||
print """OOPS.\n
|
||||
print("""OOPS.\n
|
||||
It looks like Google Scholar changed their URL format or their output format.
|
||||
I went to count the cites for a fictitious paper, and found some."""
|
||||
I went to count the cites for a fictitious paper, and found some.""")
|
||||
sys.exit(1)
|
||||
|
||||
def urlIsUseless(u):
|
||||
@ -170,7 +172,7 @@ URLTYPES=[ "pdf", "ps", "txt", "ps_gz", "html" ]
|
||||
|
||||
if __name__ == '__main__':
|
||||
# First download the bibliography file.
|
||||
import BibTeX
|
||||
from . import BibTeX
|
||||
suggest = False
|
||||
if sys.argv[1] == 'suggest':
|
||||
suggest = True
|
||||
@ -182,7 +184,7 @@ if __name__ == '__main__':
|
||||
bib = BibTeX.parseFile(config.MASTER_BIB)
|
||||
remove_old()
|
||||
|
||||
print "Downloading missing ranks."
|
||||
print("Downloading missing ranks.")
|
||||
for ent in bib.entries:
|
||||
getCite(ent['title'], cache=True, update=True)
|
||||
|
||||
@ -190,13 +192,13 @@ if __name__ == '__main__':
|
||||
for ent in bib.entries:
|
||||
haveOne = False
|
||||
for utype in URLTYPES:
|
||||
if ent.has_key("www_%s_url"%utype):
|
||||
if "www_%s_url"%utype in ent:
|
||||
haveOne = True
|
||||
break
|
||||
if haveOne:
|
||||
continue
|
||||
print ent.key, "has no URLs given."
|
||||
print(ent.key, "has no URLs given.")
|
||||
urls = [ u for u in getPaperURLs(ent['title']) if not urlIsUseless(u) ]
|
||||
for u in urls:
|
||||
print "\t", u
|
||||
print("\t", u)
|
||||
|
||||
|
@ -8,14 +8,17 @@
|
||||
cleaned up a little, and all the duplicate entries commented out.
|
||||
"""
|
||||
|
||||
from __future__ import absolute_import
|
||||
from __future__ import print_function
|
||||
import sys
|
||||
import re
|
||||
from six.moves import zip
|
||||
|
||||
assert sys.version_info[:3] >= (2,2,0)
|
||||
|
||||
import BibTeX
|
||||
import config
|
||||
import metaphone
|
||||
from . import BibTeX
|
||||
from . import config
|
||||
from . import metaphone
|
||||
|
||||
_MPCACHE = {}
|
||||
def soundsLike(s1, s2):
|
||||
@ -168,16 +171,16 @@ class MasterBibTeX(BibTeX.BibTeX):
|
||||
matches = m2
|
||||
|
||||
if not matches:
|
||||
print "No match for %s"%e.key
|
||||
print("No match for %s"%e.key)
|
||||
if matches[-1][1] is e:
|
||||
print "%s matches for %s: OK."%(len(matches), e.key)
|
||||
print("%s matches for %s: OK."%(len(matches), e.key))
|
||||
else:
|
||||
print "%s matches for %s: %s is best!" %(len(matches), e.key,
|
||||
matches[-1][1].key)
|
||||
print("%s matches for %s: %s is best!" %(len(matches), e.key,
|
||||
matches[-1][1].key))
|
||||
if len(matches) > 1:
|
||||
for g, m in matches:
|
||||
print "%%%% goodness", g
|
||||
print m
|
||||
print("%%%% goodness", g)
|
||||
print(m)
|
||||
|
||||
|
||||
def noteToURL(note):
|
||||
@ -202,7 +205,7 @@ def emit(f,ent):
|
||||
global all_ok
|
||||
|
||||
errs = ent._check()
|
||||
if master.byKey.has_key(ent.key.strip().lower()):
|
||||
if ent.key.strip().lower() in master.byKey:
|
||||
errs.append("ERROR: Key collision with master file")
|
||||
|
||||
if errs:
|
||||
@ -232,61 +235,61 @@ def emit(f,ent):
|
||||
if errs:
|
||||
all_ok = 0
|
||||
for e in errs:
|
||||
print >>f, "%%%%", e
|
||||
print("%%%%", e, file=f)
|
||||
|
||||
print >>f, ent.format(77, 4, v=1, invStrings=invStrings)
|
||||
print(ent.format(77, 4, v=1, invStrings=invStrings), file=f)
|
||||
|
||||
def emitKnown(f, ent, matches):
|
||||
print >>f, "%% Candidates are:", ", ".join([e.key for g,e in matches])
|
||||
print >>f, "%%"
|
||||
print >>f, "%"+(ent.format(77,4,1,invStrings).replace("\n", "\n%"))
|
||||
print("%% Candidates are:", ", ".join([e.key for g,e in matches]), file=f)
|
||||
print("%%", file=f)
|
||||
print("%"+(ent.format(77,4,1,invStrings).replace("\n", "\n%")), file=f)
|
||||
|
||||
if __name__ == '__main__':
|
||||
if len(sys.argv) != 3:
|
||||
print "reconcile.py expects 2 arguments"
|
||||
print("reconcile.py expects 2 arguments")
|
||||
sys.exit(1)
|
||||
|
||||
config.load(sys.argv[1])
|
||||
|
||||
print "========= Scanning master =========="
|
||||
print("========= Scanning master ==========")
|
||||
master = MasterBibTeX()
|
||||
master = BibTeX.parseFile(config.MASTER_BIB, result=master)
|
||||
master.buildIndex()
|
||||
|
||||
print "========= Scanning new file ========"
|
||||
print("========= Scanning new file ========")
|
||||
try:
|
||||
fn = sys.argv[2]
|
||||
input = BibTeX.parseFile(fn)
|
||||
except BibTeX.ParseError, e:
|
||||
print "Error parsing %s: %s"%(fn,e)
|
||||
except BibTeX.ParseError as e:
|
||||
print("Error parsing %s: %s"%(fn,e))
|
||||
sys.exit(1)
|
||||
|
||||
f = open('tmp.bib', 'w')
|
||||
keys = input.newStrings.keys()
|
||||
keys = list(input.newStrings.keys())
|
||||
keys.sort()
|
||||
for k in keys:
|
||||
v = input.newStrings[k]
|
||||
print >>f, "@string{%s = {%s}}"%(k,v)
|
||||
print("@string{%s = {%s}}"%(k,v), file=f)
|
||||
|
||||
invStrings = input.invStrings
|
||||
|
||||
for e in input.entries:
|
||||
if not (e.get('title') and e.get('author')):
|
||||
print >>f, "%%\n%%%% Not enough information to search for a match: need title and author.\n%%"
|
||||
print("%%\n%%%% Not enough information to search for a match: need title and author.\n%%", file=f)
|
||||
emit(f, e)
|
||||
continue
|
||||
|
||||
matches = master.includes(e, all=1)
|
||||
if not matches:
|
||||
print >>f, "%%\n%%%% This entry is probably new: No match found.\n%%"
|
||||
print("%%\n%%%% This entry is probably new: No match found.\n%%", file=f)
|
||||
emit(f, e)
|
||||
else:
|
||||
print >>f, "%%"
|
||||
print >>f, "%%%% Possible match found for this entry; max goodness",\
|
||||
matches[-1][0], "\n%%"
|
||||
print("%%", file=f)
|
||||
print("%%%% Possible match found for this entry; max goodness",\
|
||||
matches[-1][0], "\n%%", file=f)
|
||||
emitKnown(f, e, matches)
|
||||
|
||||
if not all_ok:
|
||||
print >>f, "\n\n\nErrors remain; not finished.\n"
|
||||
print("\n\n\nErrors remain; not finished.\n", file=f)
|
||||
|
||||
f.close()
|
||||
|
@ -3,8 +3,9 @@
|
||||
|
||||
"""Unit tests for anonbib."""
|
||||
|
||||
import BibTeX
|
||||
import metaphone
|
||||
from __future__ import absolute_import
|
||||
from . import BibTeX
|
||||
from . import metaphone
|
||||
#import reconcile
|
||||
#import writeHTML
|
||||
#import updateCache
|
||||
|
@ -4,19 +4,21 @@
|
||||
"""Download files in bibliography into a local cache.
|
||||
"""
|
||||
|
||||
from __future__ import absolute_import
|
||||
from __future__ import print_function
|
||||
import os
|
||||
import sys
|
||||
import signal
|
||||
import time
|
||||
import gzip
|
||||
|
||||
import BibTeX
|
||||
import config
|
||||
import urllib2
|
||||
from . import BibTeX
|
||||
from . import config
|
||||
import six.moves.urllib.request, six.moves.urllib.error, six.moves.urllib.parse
|
||||
import getopt
|
||||
import socket
|
||||
import errno
|
||||
import httplib
|
||||
import six.moves.http_client
|
||||
|
||||
FILE_TYPES = [ "txt", "html", "pdf", "ps", "ps.gz", "abstract" ]
|
||||
BIN_FILE_TYPES = [ 'pdf', 'ps.gz' ]
|
||||
@ -53,12 +55,12 @@ def downloadFile(key, ftype, section, url,timeout=None):
|
||||
signal.alarm(timeout)
|
||||
try:
|
||||
try:
|
||||
infile = urllib2.urlopen(url)
|
||||
except httplib.InvalidURL, e:
|
||||
infile = six.moves.urllib.request.urlopen(url)
|
||||
except six.moves.http_client.InvalidURL as e:
|
||||
raise UIError("Invalid URL %s: %s"%(url,e))
|
||||
except IOError, e:
|
||||
except IOError as e:
|
||||
raise UIError("Cannot connect to url %s: %s"%(url,e))
|
||||
except socket.error, e:
|
||||
except socket.error as e:
|
||||
if getattr(e,"errno",-1) == errno.EINTR:
|
||||
raise UIError("Connection timed out to url %s"%url)
|
||||
else:
|
||||
@ -80,9 +82,9 @@ def downloadFile(key, ftype, section, url,timeout=None):
|
||||
outfile.close()
|
||||
|
||||
urlfile = open(fnameURL, 'w')
|
||||
print >>urlfile, time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
|
||||
print(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), file=urlfile)
|
||||
if "\n" in url: url = url.replace("\n", " ")
|
||||
print >>urlfile, url
|
||||
print(url, file=urlfile)
|
||||
urlfile.close()
|
||||
|
||||
os.rename(fnameTmp, fname)
|
||||
@ -105,7 +107,7 @@ def getCachedURL(key, ftype, section):
|
||||
lines = f.readlines()
|
||||
f.close()
|
||||
if len(lines) != 2:
|
||||
print >>sys.stderr, "ERROR: unexpected number of lines in", urlFname
|
||||
print("ERROR: unexpected number of lines in", urlFname, file=sys.stderr)
|
||||
return lines[1].strip()
|
||||
|
||||
def downloadAll(bibtex, missingOnly=0):
|
||||
@ -119,29 +121,29 @@ def downloadAll(bibtex, missingOnly=0):
|
||||
if missingOnly:
|
||||
cachedURL = getCachedURL(key, ftype, section)
|
||||
if cachedURL == url:
|
||||
print >>sys.stderr,"Skipping",url
|
||||
print("Skipping",url, file=sys.stderr)
|
||||
continue
|
||||
elif cachedURL is not None:
|
||||
print >>sys.stderr,"URL for %s.%s has changed"%(key,ftype)
|
||||
print("URL for %s.%s has changed"%(key,ftype), file=sys.stderr)
|
||||
else:
|
||||
print >>sys.stderr,"I have no copy of %s.%s"%(key,ftype)
|
||||
print("I have no copy of %s.%s"%(key,ftype), file=sys.stderr)
|
||||
try:
|
||||
downloadFile(key, ftype, section, url)
|
||||
print "Downloaded",url
|
||||
except UIError, e:
|
||||
print >>sys.stderr, str(e)
|
||||
print("Downloaded",url)
|
||||
except UIError as e:
|
||||
print(str(e), file=sys.stderr)
|
||||
errors.append((key,ftype,url,str(e)))
|
||||
except (IOError, socket.error), e:
|
||||
except (IOError, socket.error) as e:
|
||||
msg = "Error downloading %s: %s"%(url,str(e))
|
||||
print >>sys.stderr, msg
|
||||
print(msg, file=sys.stderr)
|
||||
errors.append((key,ftype,url,msg))
|
||||
if urls.has_key("ps") and not urls.has_key("ps.gz"):
|
||||
if "ps" in urls and "ps.gz" not in urls:
|
||||
# Say, this is something we'd like to have gzipped locally.
|
||||
psFname = getCacheFname(key, "ps", section)
|
||||
psGzFname = getCacheFname(key, "ps.gz", section)
|
||||
if os.path.exists(psFname) and not os.path.exists(psGzFname):
|
||||
# This is something we haven't gzipped yet.
|
||||
print "Compressing a copy of",psFname
|
||||
print("Compressing a copy of",psFname)
|
||||
outf = gzip.GzipFile(psGzFname, "wb")
|
||||
inf = open(psFname, "rb")
|
||||
while 1:
|
||||
@ -156,9 +158,9 @@ def downloadAll(bibtex, missingOnly=0):
|
||||
|
||||
if __name__ == '__main__':
|
||||
if len(sys.argv) == 2:
|
||||
print "Loading from %s"%sys.argv[1]
|
||||
print("Loading from %s"%sys.argv[1])
|
||||
else:
|
||||
print >>sys.stderr, "Expected a single configuration file as an argument"
|
||||
print("Expected a single configuration file as an argument", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
config.load(sys.argv[1])
|
||||
|
||||
|
@ -1,3 +1,4 @@
|
||||
from __future__ import absolute_import
|
||||
from flask import render_template
|
||||
|
||||
from i2p2www import ANONBIB_CFG, ANONBIB_FILE
|
||||
|
@ -3,16 +3,20 @@
|
||||
|
||||
"""Generate indices by author, topic, date, and BibTeX key."""
|
||||
|
||||
from __future__ import absolute_import
|
||||
from __future__ import print_function
|
||||
import sys
|
||||
import re
|
||||
import os
|
||||
import json
|
||||
from six.moves import map
|
||||
from six.moves import range
|
||||
|
||||
assert sys.version_info[:3] >= (2,2,0)
|
||||
os.umask(022)
|
||||
os.umask(0o22)
|
||||
|
||||
import BibTeX
|
||||
import config
|
||||
from . import BibTeX
|
||||
from . import config
|
||||
|
||||
def getTemplate(name):
|
||||
f = open(name)
|
||||
@ -39,15 +43,15 @@ def writeBody(f, sections, section_urls, cache_path, base_url):
|
||||
sDisp = re.sub(r'\s+', ' ', s.strip())
|
||||
sDisp = sDisp.replace(" ", " ")
|
||||
if u:
|
||||
print >>f, ('<li><h3><a name="%s"></a><a href="%s">%s</a></h3>'%(
|
||||
(BibTeX.url_untranslate(s), u, sDisp)))
|
||||
print(('<li><h3><a name="%s"></a><a href="%s">%s</a></h3>'%(
|
||||
(BibTeX.url_untranslate(s), u, sDisp))), file=f)
|
||||
else:
|
||||
print >>f, ('<li><h3><a name="%s">%s</a></h3>'%(
|
||||
BibTeX.url_untranslate(s),sDisp))
|
||||
print >>f, "<ul class='expand'>"
|
||||
print(('<li><h3><a name="%s">%s</a></h3>'%(
|
||||
BibTeX.url_untranslate(s),sDisp)), file=f)
|
||||
print("<ul class='expand'>", file=f)
|
||||
for e in entries:
|
||||
print >>f, e.to_html(cache_path=cache_path, base_url=base_url)
|
||||
print >>f, "</ul></li>"
|
||||
print(e.to_html(cache_path=cache_path, base_url=base_url), file=f)
|
||||
print("</ul></li>", file=f)
|
||||
|
||||
def writeHTML(f, sections, sectionType, fieldName, choices,
|
||||
tag, config, cache_url_path, section_urls={}):
|
||||
@ -69,7 +73,7 @@ def writeHTML(f, sections, sectionType, fieldName, choices,
|
||||
|
||||
#
|
||||
tagListStr = []
|
||||
st = config.TAG_SHORT_TITLES.keys()
|
||||
st = list(config.TAG_SHORT_TITLES.keys())
|
||||
st.sort()
|
||||
root = "../"*pathLength(config.TAG_DIRECTORIES[tag])
|
||||
if root == "": root = "."
|
||||
@ -104,10 +108,10 @@ def writeHTML(f, sections, sectionType, fieldName, choices,
|
||||
}
|
||||
|
||||
header, footer = getTemplate(config.TEMPLATE_FILE)
|
||||
print >>f, header%fields
|
||||
print(header%fields, file=f)
|
||||
writeBody(f, sections, section_urls, cache_path=cache_url_path,
|
||||
base_url=root)
|
||||
print >>f, footer%fields
|
||||
print(footer%fields, file=f)
|
||||
|
||||
def jsonDumper(obj):
|
||||
if isinstance(obj, BibTeX.BibTeXEntry):
|
||||
@ -125,7 +129,7 @@ def writePageSet(config, bib, tag):
|
||||
bib_entries = bib.entries[:]
|
||||
|
||||
if not bib_entries:
|
||||
print >>sys.stderr, "No entries with tag %r; skipping"%tag
|
||||
print("No entries with tag %r; skipping"%tag, file=sys.stderr)
|
||||
return
|
||||
|
||||
tagdir = config.TAG_DIRECTORIES[tag]
|
||||
@ -133,7 +137,7 @@ def writePageSet(config, bib, tag):
|
||||
cache_url_path = BibTeX.smartJoin("../"*pathLength(tagdir),
|
||||
config.CACHE_DIR)
|
||||
if not os.path.exists(outdir):
|
||||
os.makedirs(outdir, 0755)
|
||||
os.makedirs(outdir, 0o755)
|
||||
##### Sorted views:
|
||||
|
||||
## By topic.
|
||||
@ -174,7 +178,7 @@ def writePageSet(config, bib, tag):
|
||||
except ValueError:
|
||||
last_year = int(entries[-2][1][0].get('year'))
|
||||
|
||||
years = map(str, range(first_year, last_year+1))
|
||||
years = list(map(str, list(range(first_year, last_year+1))))
|
||||
if entries[-1][0] == 'Unknown':
|
||||
years.append("Unknown")
|
||||
|
||||
@ -216,15 +220,15 @@ def writePageSet(config, bib, tag):
|
||||
|
||||
header,footer = getTemplate(config.BIBTEX_TEMPLATE_FILE)
|
||||
f = open(os.path.join(outdir,"bibtex.html"), 'w')
|
||||
print >>f, header % { 'command_line' : "",
|
||||
print(header % { 'command_line' : "",
|
||||
'title': config.TAG_TITLES[tag],
|
||||
'root': root }
|
||||
'root': root }, file=f)
|
||||
for ent in entries:
|
||||
print >>f, (
|
||||
print((
|
||||
("<tr><td class='bibtex'><a name='%s'>%s</a>"
|
||||
"<pre class='bibtex'>%s</pre></td></tr>")
|
||||
%(BibTeX.url_untranslate(ent.key), ent.key, ent.format(90,8,1)))
|
||||
print >>f, footer
|
||||
%(BibTeX.url_untranslate(ent.key), ent.key, ent.format(90,8,1))), file=f)
|
||||
print(footer, file=f)
|
||||
f.close()
|
||||
|
||||
f = open(os.path.join(outdir,"bibtex.json"), 'w')
|
||||
@ -234,9 +238,9 @@ def writePageSet(config, bib, tag):
|
||||
|
||||
if __name__ == '__main__':
|
||||
if len(sys.argv) == 2:
|
||||
print "Loading from %s"%sys.argv[1]
|
||||
print("Loading from %s"%sys.argv[1])
|
||||
else:
|
||||
print >>sys.stderr, "Expected a single configuration file as an argument"
|
||||
print("Expected a single configuration file as an argument", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
config.load(sys.argv[1])
|
||||
|
||||
|
@ -1,3 +1,4 @@
|
||||
from __future__ import absolute_import
|
||||
import codecs
|
||||
import datetime
|
||||
from docutils.core import publish_parts
|
||||
|
@ -1,3 +1,4 @@
|
||||
from __future__ import absolute_import
|
||||
from flask import abort, g, redirect, render_template, request, url_for
|
||||
from werkzeug.contrib.atom import AtomFeed
|
||||
|
||||
|
@ -1,3 +1,4 @@
|
||||
from __future__ import absolute_import
|
||||
from flask import redirect, render_template, request
|
||||
from i2p2www import CURRENT_I2P_VERSION, MIRRORS_FILE
|
||||
|
||||
|
@ -1,3 +1,4 @@
|
||||
from __future__ import absolute_import
|
||||
from flask import abort, redirect, render_template, request
|
||||
try:
|
||||
import json
|
||||
@ -166,13 +167,13 @@ def downloads_redirect(version, net, protocol, domain, file):
|
||||
}
|
||||
|
||||
if not protocol:
|
||||
protocol = mirrors.keys()[randint(0, len(mirrors) - 1)]
|
||||
protocol = list(mirrors.keys())[randint(0, len(mirrors) - 1)]
|
||||
if not protocol in mirrors:
|
||||
abort(404)
|
||||
mirrors=mirrors[protocol]
|
||||
|
||||
if not domain:
|
||||
domain = mirrors.keys()[randint(0, len(mirrors) - 1)]
|
||||
domain = list(mirrors.keys())[randint(0, len(mirrors) - 1)]
|
||||
if not domain in mirrors:
|
||||
abort(404)
|
||||
return render_template('downloads/redirect.html',
|
||||
|
@ -1,5 +1,7 @@
|
||||
# -*- coding: utf8 -*-
|
||||
|
||||
from __future__ import absolute_import
|
||||
from __future__ import print_function
|
||||
import os
|
||||
import sys
|
||||
from jinja2 import nodes
|
||||
@ -9,6 +11,7 @@ from pygments import highlight
|
||||
from pygments.lexers import get_lexer_by_name, guess_lexer
|
||||
from pygments.formatters import HtmlFormatter
|
||||
from pygments.util import ClassNotFound
|
||||
import six
|
||||
|
||||
try:
|
||||
import ctags
|
||||
@ -29,8 +32,8 @@ def we_are_frozen():
|
||||
def module_path():
|
||||
encoding = sys.getfilesystemencoding()
|
||||
if we_are_frozen():
|
||||
return os.path.dirname(unicode(sys.executable, encoding))
|
||||
return os.path.dirname(unicode(__file__, encoding))
|
||||
return os.path.dirname(six.text_type(sys.executable, encoding))
|
||||
return os.path.dirname(six.text_type(__file__, encoding))
|
||||
|
||||
|
||||
class HighlightExtension(Extension):
|
||||
|
@ -9,14 +9,21 @@
|
||||
:license: BSD, see LICENSE for details.
|
||||
"""
|
||||
|
||||
from __future__ import absolute_import
|
||||
from __future__ import print_function
|
||||
import os
|
||||
import sys
|
||||
import os.path
|
||||
import StringIO
|
||||
try:
|
||||
from StringIO import StringIO
|
||||
except ImportError:
|
||||
from io import StringIO
|
||||
|
||||
from pygments.formatter import Formatter
|
||||
from pygments.token import Token, Text, STANDARD_TYPES
|
||||
from pygments.util import get_bool_opt, get_int_opt, get_list_opt, bytes
|
||||
from pygments.util import get_bool_opt, get_int_opt, get_list_opt#, bytes
|
||||
import six
|
||||
from six.moves import range
|
||||
|
||||
try:
|
||||
import ctags
|
||||
@ -459,7 +466,7 @@ class I2PHtmlFormatter(Formatter):
|
||||
"""
|
||||
if arg is None:
|
||||
arg = ('cssclass' in self.options and '.'+self.cssclass or '')
|
||||
if isinstance(arg, basestring):
|
||||
if isinstance(arg, six.string_types):
|
||||
args = [arg]
|
||||
else:
|
||||
args = list(arg)
|
||||
@ -473,7 +480,7 @@ class I2PHtmlFormatter(Formatter):
|
||||
return ', '.join(tmp)
|
||||
|
||||
styles = [(level, ttype, cls, style)
|
||||
for cls, (style, ttype, level) in self.class2style.iteritems()
|
||||
for cls, (style, ttype, level) in six.iteritems(self.class2style)
|
||||
if cls and style]
|
||||
styles.sort()
|
||||
lines = ['%s { %s } /* %s */' % (prefix(cls), style, repr(ttype)[6:])
|
||||
@ -511,8 +518,8 @@ class I2PHtmlFormatter(Formatter):
|
||||
cssfilename = os.path.join(os.path.dirname(filename),
|
||||
self.cssfile)
|
||||
except AttributeError:
|
||||
print >>sys.stderr, 'Note: Cannot determine output file name, ' \
|
||||
'using current directory as base for the CSS file name'
|
||||
print('Note: Cannot determine output file name, ' \
|
||||
'using current directory as base for the CSS file name', file=sys.stderr)
|
||||
cssfilename = self.cssfile
|
||||
# write CSS file only if noclobber_cssfile isn't given as an option.
|
||||
try:
|
||||
@ -521,7 +528,7 @@ class I2PHtmlFormatter(Formatter):
|
||||
cf.write(CSSFILE_TEMPLATE %
|
||||
{'styledefs': self.get_style_defs('body')})
|
||||
cf.close()
|
||||
except IOError, err:
|
||||
except IOError as err:
|
||||
err.strerror = 'Error writing CSS file: ' + err.strerror
|
||||
raise
|
||||
|
||||
|
@ -1,5 +1,7 @@
|
||||
from __future__ import absolute_import
|
||||
from math import ceil
|
||||
from werkzeug import import_string, cached_property
|
||||
from werkzeug.utils import cached_property, import_string
|
||||
from six.moves import range
|
||||
|
||||
########################
|
||||
# General helper methods
|
||||
@ -56,7 +58,7 @@ class Pagination(object):
|
||||
def iter_pages(self, left_edge=2, left_current=2,
|
||||
right_current=5, right_edge=2):
|
||||
last = 0
|
||||
for num in xrange(1, self.pages + 1):
|
||||
for num in range(1, self.pages + 1):
|
||||
if num <= left_edge or \
|
||||
(num > self.page - left_current - 1 and \
|
||||
num < self.page + right_current) or \
|
||||
|
@ -1,3 +1,4 @@
|
||||
from __future__ import absolute_import
|
||||
from flask import g, redirect, url_for
|
||||
|
||||
|
||||
@ -197,7 +198,7 @@ LEGACY_RELEASES_MAP={
|
||||
'0.9.8': (2013, 9, 30),
|
||||
'0.9.8.1': (2013, 10, 2),
|
||||
'0.9.9': (2013, 12, 7),
|
||||
'0.9.10': (2014, 01, 22),
|
||||
'0.9.10': (2014, 0o1, 22),
|
||||
}
|
||||
|
||||
def legacy_show(f):
|
||||
@ -232,5 +233,6 @@ def legacy_release(version):
|
||||
else:
|
||||
return legacy_show('release-%s' % version)
|
||||
|
||||
def legacy_blog(lang, (year, month, day), title):
|
||||
def legacy_blog(lang, xxx_todo_changeme, title):
|
||||
(year, month, day) = xxx_todo_changeme
|
||||
return redirect(url_for('blog_post', lang=lang, slug=('%d/%02d/%02d/%s' % (year, month, day, title))), 301)
|
||||
|
@ -1,3 +1,4 @@
|
||||
from __future__ import absolute_import
|
||||
from pygments.lexer import RegexLexer, bygroups
|
||||
from pygments.token import *
|
||||
|
||||
|
@ -1,3 +1,5 @@
|
||||
from __future__ import absolute_import
|
||||
from __future__ import print_function
|
||||
import codecs
|
||||
import datetime
|
||||
from docutils.core import publish_parts
|
||||
@ -54,7 +56,7 @@ def get_meetings_ids(num=0):
|
||||
# iterate over all files
|
||||
for f in v[2]:
|
||||
# ignore all non-.rst files
|
||||
print("Meeting file found", f)
|
||||
print(("Meeting file found", f))
|
||||
if not f.endswith('.rst'):
|
||||
continue
|
||||
try:
|
||||
|
@ -1,3 +1,4 @@
|
||||
from __future__ import absolute_import
|
||||
import codecs
|
||||
from flask import abort, render_template, request, safe_join, send_from_directory
|
||||
import os.path
|
||||
|
@ -1,3 +1,4 @@
|
||||
from __future__ import absolute_import
|
||||
from flask import g, make_response, render_template, request, safe_join
|
||||
import os.path
|
||||
|
||||
|
@ -1,3 +1,4 @@
|
||||
from __future__ import absolute_import
|
||||
import codecs
|
||||
from collections import defaultdict
|
||||
from docutils import io
|
||||
@ -23,6 +24,7 @@ import os.path
|
||||
|
||||
from i2p2www import PROPOSAL_DIR, SPEC_DIR
|
||||
from i2p2www import helpers
|
||||
from six.moves import range
|
||||
|
||||
|
||||
SPEC_METATAGS = {
|
||||
|
@ -1,7 +1,9 @@
|
||||
from __future__ import absolute_import
|
||||
import ctags
|
||||
from flask import g, request, safe_join, url_for
|
||||
from flask import g, request, url_for
|
||||
from werkzeug.utils import safe_join
|
||||
import os.path
|
||||
from urlparse import urlsplit, urlunsplit
|
||||
from six.moves.urllib.parse import urlsplit, urlunsplit
|
||||
|
||||
from i2p2www import (
|
||||
CANONICAL_DOMAIN,
|
||||
|
@ -1,3 +1,4 @@
|
||||
from __future__ import absolute_import
|
||||
from werkzeug.routing import BaseConverter
|
||||
|
||||
from i2p2www import app
|
||||
|
@ -1,3 +1,4 @@
|
||||
from __future__ import absolute_import
|
||||
from flask import abort, redirect, render_template, safe_join, send_from_directory, url_for
|
||||
import os.path
|
||||
|
||||
|
@ -1,9 +1,11 @@
|
||||
from __future__ import absolute_import
|
||||
from werkzeug import BaseRequest, BaseResponse, ETagResponseMixin, escape, run_simple, SharedDataMiddleware
|
||||
from werkzeug.exceptions import HTTPException
|
||||
import os
|
||||
import sha
|
||||
from time import time
|
||||
from random import choice
|
||||
from six.moves import range
|
||||
|
||||
class Request(BaseRequest):
|
||||
"""Useful subclass of the default request that knows how to build urls."""
|
||||
|
@ -14,6 +14,8 @@
|
||||
|
||||
|
||||
# Modify as needed, or use a symlink.
|
||||
from __future__ import absolute_import
|
||||
from six.moves import range
|
||||
netdbdir = 'netdb'
|
||||
database = 'Driver=SQLite;DATABASE=I2PnetDb'
|
||||
|
||||
@ -136,7 +138,7 @@ def application(environ, start_response):
|
||||
new = []
|
||||
if len(entries) > 150:
|
||||
# select some randomly
|
||||
for i in xrange(100):
|
||||
for i in range(100):
|
||||
while True:
|
||||
sel = choice(entries)
|
||||
if not sel.startswith('routerInfo-'):
|
||||
|
@ -1,4 +1,5 @@
|
||||
#!env/bin/python
|
||||
from __future__ import absolute_import
|
||||
from i2p2www import app
|
||||
import os
|
||||
|
||||
|
Reference in New Issue
Block a user