1 Commits
master ... 2to3

Author SHA1 Message Date
c77994a585 re-attempt migration using 2to3 2023-11-22 14:46:53 -05:00
20 changed files with 251 additions and 250 deletions

View File

@ -67,32 +67,32 @@ SUPPORTED_LANGS = [
] ]
SUPPORTED_LANG_NAMES = { SUPPORTED_LANG_NAMES = {
'ar': u'Arabic العربية', 'ar': 'Arabic العربية',
'id': u'Bahasa Indonesia', 'id': 'Bahasa Indonesia',
'zh': u'Chinese 中文', 'zh': 'Chinese 中文',
'zh_TW': u'Chinese 中文 (繁體中文, 台灣)', 'zh_TW': 'Chinese 中文 (繁體中文, 台灣)',
'de': u'Deutsch', 'de': 'Deutsch',
'en': u'English', 'en': 'English',
'es': u'Castellano', 'es': 'Castellano',
'fr': u'Français', 'fr': 'Français',
'el': u'Greek Ελληνικά', 'el': 'Greek Ελληνικά',
'he': u'Hebrew עברית', 'he': 'Hebrew עברית',
'hu': u'Hungarian', 'hu': 'Hungarian',
'it': u'Italiano', 'it': 'Italiano',
'ja': u'Japanese 日本語', 'ja': 'Japanese 日本語',
'ko': u'Korean 한국말', 'ko': 'Korean 한국말',
'mg': u'Fiteny Malagasy', 'mg': 'Fiteny Malagasy',
'nl': u'Nederlands', 'nl': 'Nederlands',
'fa': u'Persian فارسی', 'fa': 'Persian فارسی',
'pl': u'Polski', 'pl': 'Polski',
'pt': u'Português', 'pt': 'Português',
'pt_BR': u'Português do Brasil', 'pt_BR': 'Português do Brasil',
'ro': u'Română', 'ro': 'Română',
'ru': u'Russian Русский язык', 'ru': 'Russian Русский язык',
'fi': u'Suomi', 'fi': 'Suomi',
'sv': u'Svenska', 'sv': 'Svenska',
'tr': u'Türkçe', 'tr': 'Türkçe',
'uk': u'Ukrainian Українська', 'uk': 'Ukrainian Українська',
} }
RTL_LANGS = [ RTL_LANGS = [
@ -213,7 +213,7 @@ def detect_theme():
theme = 'duck' theme = 'duck'
if 'style' in request.cookies: if 'style' in request.cookies:
theme = request.cookies['style'] theme = request.cookies['style']
if 'theme' in request.args.keys(): if 'theme' in list(request.args.keys()):
theme = request.args['theme'] theme = request.args['theme']
# TEMPORARY: enable external themes # TEMPORARY: enable external themes
# TODO: Remove this (and the corresponding lines in global/layout.html # TODO: Remove this (and the corresponding lines in global/layout.html
@ -263,5 +263,5 @@ def server_error(error):
return render_template('global/error_500.html'), 500 return render_template('global/error_500.html'), 500
# Import these to ensure they get loaded # Import these to ensure they get loaded
import templatevars from . import templatevars
import urls from . import urls

View File

@ -6,14 +6,14 @@
Based on perl code by Eddie Kohler; heavily modified. Based on perl code by Eddie Kohler; heavily modified.
""" """
import cStringIO import io
import re import re
import sys import sys
import os import os
import config from . import config
import rank from . import rank
__all__ = [ 'ParseError', 'BibTeX', 'BibTeXEntry', 'htmlize', __all__ = [ 'ParseError', 'BibTeX', 'BibTeXEntry', 'htmlize',
'ParsedAuthor', 'FileIter', 'Parser', 'parseFile', 'ParsedAuthor', 'FileIter', 'Parser', 'parseFile',
@ -66,7 +66,7 @@ class BibTeX:
"""Add a BibTeX entry to this file.""" """Add a BibTeX entry to this file."""
k = ent.key k = ent.key
if self.byKey.get(ent.key.lower()): if self.byKey.get(ent.key.lower()):
print >> sys.stderr, "Already have an entry named %s"%k print("Already have an entry named %s"%k, file=sys.stderr)
return return
self.entries.append(ent) self.entries.append(ent)
self.byKey[ent.key.lower()] = ent self.byKey[ent.key.lower()] = ent
@ -79,7 +79,7 @@ class BibTeX:
try: try:
cr = self.byKey[ent['crossref'].lower()] cr = self.byKey[ent['crossref'].lower()]
except KeyError: except KeyError:
print "No such crossref: %s"% ent['crossref'] print("No such crossref: %s"% ent['crossref'])
break break
if seen.get(cr.key): if seen.get(cr.key):
raise ParseError("Circular crossref at %s" % ent.key) raise ParseError("Circular crossref at %s" % ent.key)
@ -87,12 +87,12 @@ class BibTeX:
del ent.entries['crossref'] del ent.entries['crossref']
if cr.entryLine < ent.entryLine: if cr.entryLine < ent.entryLine:
print "Warning: crossref %s used after declaration"%cr.key print("Warning: crossref %s used after declaration"%cr.key)
for k in cr.entries.keys(): for k in list(cr.entries.keys()):
if ent.entries.has_key(k): if k in ent.entries:
print "ERROR: %s defined both in %s and in %s"%( print("ERROR: %s defined both in %s and in %s"%(
k,ent.key,cr.key) k,ent.key,cr.key))
else: else:
ent.entries[k] = cr.entries[k] ent.entries[k] = cr.entries[k]
@ -105,7 +105,7 @@ class BibTeX:
rk = "title" rk = "title"
for ent in self.entries: for ent in self.entries:
if ent.type in config.OMIT_ENTRIES or not ent.has_key(rk): if ent.type in config.OMIT_ENTRIES or rk not in ent:
ent.check() ent.check()
del self.byKey[ent.key.lower()] del self.byKey[ent.key.lower()]
else: else:
@ -122,7 +122,7 @@ def buildAuthorTable(entries):
authorsByLast.setdefault(tuple(a.last), []).append(a) authorsByLast.setdefault(tuple(a.last), []).append(a)
# map from author to collapsed author. # map from author to collapsed author.
result = {} result = {}
for k,v in config.COLLAPSE_AUTHORS.items(): for k,v in list(config.COLLAPSE_AUTHORS.items()):
a = parseAuthor(k)[0] a = parseAuthor(k)[0]
c = parseAuthor(v)[0] c = parseAuthor(v)[0]
result[c] = c result[c] = c
@ -130,7 +130,7 @@ def buildAuthorTable(entries):
for e in entries: for e in entries:
for author in e.parsedAuthor: for author in e.parsedAuthor:
if result.has_key(author): if author in result:
continue continue
c = author c = author
@ -141,16 +141,16 @@ def buildAuthorTable(entries):
result[author] = c result[author] = c
if 0: if 0:
for a,c in result.items(): for a,c in list(result.items()):
if a != c: if a != c:
print "Collapsing authors: %s => %s" % (a,c) print("Collapsing authors: %s => %s" % (a,c))
if 0: if 0:
print parseAuthor("Franz Kaashoek")[0].collapsesTo( print(parseAuthor("Franz Kaashoek")[0].collapsesTo(
parseAuthor("M. Franz Kaashoek")[0]) parseAuthor("M. Franz Kaashoek")[0]))
print parseAuthor("Paul F. Syverson")[0].collapsesTo( print(parseAuthor("Paul F. Syverson")[0].collapsesTo(
parseAuthor("Paul Syverson")[0]) parseAuthor("Paul Syverson")[0]))
print parseAuthor("Paul Syverson")[0].collapsesTo( print(parseAuthor("Paul Syverson")[0].collapsesTo(
parseAuthor("Paul F. Syverson")[0]) parseAuthor("Paul F. Syverson")[0]))
return result return result
@ -221,7 +221,7 @@ def splitEntriesByAuthor(entries):
htmlResult[sortkey] = secname htmlResult[sortkey] = secname
result.setdefault(sortkey, []).append(ent) result.setdefault(sortkey, []).append(ent)
sortnames = result.keys() sortnames = list(result.keys())
sortnames.sort() sortnames.sort()
sections = [ (htmlResult[n], result[n]) for n in sortnames ] sections = [ (htmlResult[n], result[n]) for n in sortnames ]
return sections, url_map return sections, url_map
@ -255,13 +255,13 @@ def sortEntriesByDate(entries):
monthname = match.group(1) monthname = match.group(1)
mon = MONTHS.index(monthname) mon = MONTHS.index(monthname)
except ValueError: except ValueError:
print "Unknown month %r in %s"%(ent.get("month"), ent.key) print("Unknown month %r in %s"%(ent.get("month"), ent.key))
mon = 0 mon = 0
try: try:
date = int(ent['year'])*13 + mon date = int(ent['year'])*13 + mon
except KeyError: except KeyError:
print "ERROR: No year field in %s"%ent.key print("ERROR: No year field in %s"%ent.key)
date = 10000*13 date = 10000*13
except ValueError: except ValueError:
date = 10000*13 date = 10000*13
@ -286,7 +286,7 @@ class BibTeXEntry:
def get(self, k, v=None): def get(self, k, v=None):
return self.entries.get(k,v) return self.entries.get(k,v)
def has_key(self, k): def has_key(self, k):
return self.entries.has_key(k) return k in self.entries
def __getitem__(self, k): def __getitem__(self, k):
return self.entries[k] return self.entries[k]
def __setitem__(self, k, v): def __setitem__(self, k, v):
@ -312,13 +312,13 @@ class BibTeXEntry:
d = ["@%s{%s,\n" % (self.type, self.key)] d = ["@%s{%s,\n" % (self.type, self.key)]
if v: if v:
df = DISPLAYED_FIELDS[:] df = DISPLAYED_FIELDS[:]
for k in self.entries.keys(): for k in list(self.entries.keys()):
if k not in df: if k not in df:
df.append(k) df.append(k)
else: else:
df = DISPLAYED_FIELDS df = DISPLAYED_FIELDS
for f in df: for f in df:
if not self.entries.has_key(f): if f not in self.entries:
continue continue
v = self.entries[f] v = self.entries[f]
if v.startswith("<span class='bad'>"): if v.startswith("<span class='bad'>"):
@ -330,7 +330,7 @@ class BibTeXEntry:
d.append("%%%%% "+("ERROR: Non-ASCII characters: '%r'\n"%np)) d.append("%%%%% "+("ERROR: Non-ASCII characters: '%r'\n"%np))
d.append(" ") d.append(" ")
v = v.replace("&", "&amp;") v = v.replace("&", "&amp;")
if invStrings.has_key(v): if v in invStrings:
s = "%s = %s,\n" %(f, invStrings[v]) s = "%s = %s,\n" %(f, invStrings[v])
else: else:
s = "%s = {%s},\n" % (f, v) s = "%s = {%s},\n" % (f, v)
@ -359,7 +359,7 @@ class BibTeXEntry:
none.""" none."""
errs = self._check() errs = self._check()
for e in errs: for e in errs:
print e print(e)
return not errs return not errs
def _check(self): def _check(self):
@ -396,14 +396,14 @@ class BibTeXEntry:
not self['booktitle'].startswith("{Proceedings of"): not self['booktitle'].startswith("{Proceedings of"):
errs.append("ERROR: %s's booktitle (%r) doesn't start with 'Proceedings of'" % (self.key, self['booktitle'])) errs.append("ERROR: %s's booktitle (%r) doesn't start with 'Proceedings of'" % (self.key, self['booktitle']))
if self.has_key("pages") and not re.search(r'\d+--\d+', self['pages']): if "pages" in self and not re.search(r'\d+--\d+', self['pages']):
errs.append("ERROR: Misformed pages in %s"%self.key) errs.append("ERROR: Misformed pages in %s"%self.key)
if self.type == 'proceedings': if self.type == 'proceedings':
if self.get('title'): if self.get('title'):
errs.append("ERROR: %s is a proceedings: it should have a booktitle, not a title." % self.key) errs.append("ERROR: %s is a proceedings: it should have a booktitle, not a title." % self.key)
for field, value in self.entries.items(): for field, value in list(self.entries.items()):
if value.translate(ALLCHARS, PRINTINGCHARS): if value.translate(ALLCHARS, PRINTINGCHARS):
errs.append("ERROR: %s.%s has non-ASCII characters"%( errs.append("ERROR: %s.%s has non-ASCII characters"%(
self.key, field)) self.key, field))
@ -551,8 +551,8 @@ class BibTeXEntry:
cache_section = self.get('www_cache_section', ".") cache_section = self.get('www_cache_section', ".")
if cache_section not in config.CACHE_SECTIONS: if cache_section not in config.CACHE_SECTIONS:
if cache_section != ".": if cache_section != ".":
print >>sys.stderr, "Unrecognized cache section %s"%( print("Unrecognized cache section %s"%(
cache_section) cache_section), file=sys.stderr)
cache_section="." cache_section="."
for key, name, ext in (('www_abstract_url', 'abstract','abstract'), for key, name, ext in (('www_abstract_url', 'abstract','abstract'),
@ -766,13 +766,13 @@ class ParsedAuthor:
short = o.first; long = self.first short = o.first; long = self.first
initials_s = "".join([n[0] for n in short]) initials_s = "".join([n[0] for n in short])
initials_l = "".join([n[0] for n in long]) initials_l = "".join([n[0] for n in int])
idx = initials_l.find(initials_s) idx = initials_l.find(initials_s)
if idx < 0: if idx < 0:
return self return self
n = long[:idx] n = int[:idx]
for i in range(idx, idx+len(short)): for i in range(idx, idx+len(short)):
a = long[i]; b = short[i-idx] a = int[i]; b = short[i-idx]
if a == b: if a == b:
n.append(a) n.append(a)
elif len(a) == 2 and a[1] == '.' and a[0] == b[0]: elif len(a) == 2 and a[1] == '.' and a[0] == b[0]:
@ -781,7 +781,7 @@ class ParsedAuthor:
n.append(a) n.append(a)
else: else:
return self return self
n += long[idx+len(short):] n += int[idx+len(short):]
if n == self.first: if n == self.first:
return self return self
@ -842,7 +842,7 @@ def _split(s,w=79,indent=8):
first = 1 first = 1
indentation = "" indentation = ""
while len(s) > w: while len(s) > w:
for i in xrange(w-1, 20, -1): for i in range(w-1, 20, -1):
if s[i] == ' ': if s[i] == ' ':
r.append(indentation+s[:i]) r.append(indentation+s[:i])
s = s[i+1:] s = s[i+1:]
@ -864,14 +864,14 @@ class FileIter:
if fname: if fname:
file = open(fname, 'r') file = open(fname, 'r')
if string: if string:
file = cStringIO.StringIO(string) file = io.StringIO(string)
if file: if file:
it = iter(file.xreadlines()) it = iter(file)
self.iter = it self.iter = it
assert self.iter assert self.iter
self.lineno = 0 self.lineno = 0
self._next = it.next self._next = it.__next__
def next(self): def __next__(self):
self.lineno += 1 self.lineno += 1
return self._next() return self._next()
@ -880,7 +880,7 @@ def parseAuthor(s):
try: try:
return _parseAuthor(s) return _parseAuthor(s)
except: except:
print >>sys.stderr, "Internal error while parsing author %r"%s print("Internal error while parsing author %r"%s, file=sys.stderr)
raise raise
def _parseAuthor(s): def _parseAuthor(s):
@ -891,7 +891,7 @@ def _parseAuthor(s):
while s: while s:
s = s.strip() s = s.strip()
bracelevel = 0 bracelevel = 0
for i in xrange(len(s)): for i in range(len(s)):
if s[i] == '{': if s[i] == '{':
bracelevel += 1 bracelevel += 1
elif s[i] == '}': elif s[i] == '}':
@ -947,8 +947,8 @@ def _parseAuthor(s):
return parsedAuthors return parsedAuthors
ALLCHARS = "".join(map(chr,range(256))) ALLCHARS = "".join(map(chr,list(range(256))))
PRINTINGCHARS = "\t\n\r"+"".join(map(chr,range(32, 127))) PRINTINGCHARS = "\t\n\r"+"".join(map(chr,list(range(32, 127))))
LC_CHARS = "abcdefghijklmnopqrstuvwxyz" LC_CHARS = "abcdefghijklmnopqrstuvwxyz"
SV_DELCHARS = ("ABCDEFGHIJKLMNOPQRSTUVWXYZ" SV_DELCHARS = ("ABCDEFGHIJKLMNOPQRSTUVWXYZ"
"abcdefghijklmnopqrstuvwxyz" "abcdefghijklmnopqrstuvwxyz"
@ -995,7 +995,7 @@ class Parser:
self.strings.update(initial_strings) self.strings.update(initial_strings)
self.newStrings = {} self.newStrings = {}
self.invStrings = {} self.invStrings = {}
for k,v in config.INITIAL_STRINGS.items(): for k,v in list(config.INITIAL_STRINGS.items()):
self.invStrings[v]=k self.invStrings[v]=k
self.fileiter = fileiter self.fileiter = fileiter
if result is None: if result is None:
@ -1049,7 +1049,7 @@ class Parser:
continue continue
data.append(line) data.append(line)
data.append(" ") data.append(" ")
line = it.next() line = next(it)
self.litStringLine = 0 self.litStringLine = 0
elif line[0] == '{': elif line[0] == '{':
bracelevel += 1 bracelevel += 1
@ -1076,13 +1076,13 @@ class Parser:
#print bracelevel, "C", repr(line) #print bracelevel, "C", repr(line)
data.append(line) data.append(line)
data.append(" ") data.append(" ")
line = it.next() line = next(it)
elif line[0] == '#': elif line[0] == '#':
print >>sys.stderr, "Weird concat on line %s"%it.lineno print("Weird concat on line %s"%it.lineno, file=sys.stderr)
elif line[0] in "},": elif line[0] in "},":
if not data: if not data:
print >>sys.stderr, "No data after field on line %s"%( print("No data after field on line %s"%(
it.lineno) it.lineno), file=sys.stderr)
else: else:
m = RAW_DATA_RE.match(line) m = RAW_DATA_RE.match(line)
if m: if m:
@ -1170,7 +1170,7 @@ class Parser:
else: else:
key = v[0] key = v[0]
d = {} d = {}
for i in xrange(1,len(v),2): for i in range(1,len(v),2):
d[v[i].lower()] = v[i+1] d[v[i].lower()] = v[i+1]
ent = BibTeXEntry(self.curEntType, key, d) ent = BibTeXEntry(self.curEntType, key, d)
ent.entryLine = self.entryLine ent.entryLine = self.entryLine
@ -1197,11 +1197,11 @@ class Parser:
def _parse(self): def _parse(self):
it = self.fileiter it = self.fileiter
line = it.next() line = next(it)
while 1: while 1:
# Skip blank lines. # Skip blank lines.
while not line or line.isspace() or OUTER_COMMENT_RE.match(line): while not line or line.isspace() or OUTER_COMMENT_RE.match(line):
line = it.next() line = next(it)
# Get the first line of an entry. # Get the first line of an entry.
m = ENTRY_BEGIN_RE.match(line) m = ENTRY_BEGIN_RE.match(line)
if m: if m:
@ -1215,7 +1215,7 @@ class Parser:
def _advance(it,line): def _advance(it,line):
while not line or line.isspace() or COMMENT_RE.match(line): while not line or line.isspace() or COMMENT_RE.match(line):
line = it.next() line = next(it)
return line return line
# Matches a comment line outside of an entry. # Matches a comment line outside of an entry.
@ -1265,5 +1265,5 @@ if __name__ == '__main__':
for e in r.entries: for e in r.entries:
if e.type in ("proceedings", "journal"): continue if e.type in ("proceedings", "journal"): continue
print e.to_html() print(e.to_html())

View File

@ -19,7 +19,7 @@ del _k
def load(cfgFile): def load(cfgFile):
mod = {} mod = {}
execfile(cfgFile, mod) exec(compile(open(cfgFile, "rb").read(), cfgFile, 'exec'), mod)
for _k in _KEYS: for _k in _KEYS:
try: try:
globals()[_k]=mod[_k] globals()[_k]=mod[_k]
@ -28,7 +28,7 @@ def load(cfgFile):
INITIAL_STRINGS.update(_EXTRA_INITIAL_STRINGS) INITIAL_STRINGS.update(_EXTRA_INITIAL_STRINGS)
AUTHOR_RE_LIST[:] = [ AUTHOR_RE_LIST[:] = [
(re.compile(k, re.I), v,) for k, v in AUTHOR_URLS.items() (re.compile(k, re.I), v,) for k, v in list(AUTHOR_URLS.items())
] ]
NO_COLLAPSE_AUTHORS_RE_LIST[:] = [ NO_COLLAPSE_AUTHORS_RE_LIST[:] = [
@ -36,7 +36,7 @@ def load(cfgFile):
] ]
ALPHABETIZE_AUTHOR_AS_RE_LIST[:] = [ ALPHABETIZE_AUTHOR_AS_RE_LIST[:] = [
(re.compile(k, re.I), v,) for k,v in ALPHABETIZE_AUTHOR_AS.items() (re.compile(k, re.I), v,) for k,v in list(ALPHABETIZE_AUTHOR_AS.items())
] ]
_EXTRA_INITIAL_STRINGS = { _EXTRA_INITIAL_STRINGS = {

View File

@ -45,7 +45,7 @@ SINGLETONS = {
'z': 's', 'z': 's',
} }
ALLCHARS = "".join(map(chr, range(256))) ALLCHARS = "".join(map(chr, list(range(256))))
NONLCCHARS = "".join([c for c in ALLCHARS if not c.islower()]) NONLCCHARS = "".join([c for c in ALLCHARS if not c.islower()])
def metaphone(s): def metaphone(s):
"""Return the metaphone equivalent of a provided string""" """Return the metaphone equivalent of a provided string"""
@ -182,7 +182,7 @@ def metaphone(s):
return "".join(result) return "".join(result)
def demo(a): def demo(a):
print a, "=>", metaphone(a) print(a, "=>", metaphone(a))
if __name__ == '__main__': if __name__ == '__main__':
demo("Nick. Mathewson") demo("Nick. Mathewson")

View File

@ -7,7 +7,7 @@
cache_expire = 60*60*24*30 # 30 days cache_expire = 60*60*24*30 # 30 days
# Checks # Checks
import config from . import config
import os import os
import sys import sys
from os.path import exists, isdir, join, getmtime from os.path import exists, isdir, join, getmtime
@ -32,8 +32,8 @@ def cache_folder():
return r return r
import re import re
from urllib2 import urlopen, build_opener from urllib.request import urlopen, build_opener
from urllib import quote from urllib.parse import quote
from datetime import date from datetime import date
import hashlib import hashlib
@ -66,17 +66,17 @@ def getPageForTitle(title, cache=True, update=True, save=True):
if exists(join(cache_folder(), md5h(url))) and cache: if exists(join(cache_folder(), md5h(url))) and cache:
return url, file(join(cache_folder(), md5h(url)),'r').read() return url, file(join(cache_folder(), md5h(url)),'r').read()
elif update: elif update:
print "Downloading rank for %r."%title print("Downloading rank for %r."%title)
# Make a custom user agent (so that we are not filtered by Google)! # Make a custom user agent (so that we are not filtered by Google)!
opener = build_opener() opener = build_opener()
opener.addheaders = [('User-agent', 'Anon.Bib.0.1')] opener.addheaders = [('User-agent', 'Anon.Bib.0.1')]
print "connecting..." print("connecting...")
connection = opener.open(url) connection = opener.open(url)
print "reading" print("reading")
page = connection.read() page = connection.read()
print "done" print("done")
if save: if save:
file(join(cache_folder(), md5h(url)),'w').write(page) file(join(cache_folder(), md5h(url)),'w').write(page)
return url, page return url, page
@ -140,20 +140,20 @@ def get_rank_html(title, years=None, base_url=".", update=True,
def TestScholarFormat(): def TestScholarFormat():
# We need to ensure that Google Scholar does not change its page format under our feet # We need to ensure that Google Scholar does not change its page format under our feet
# Use some cases to check if all is good # Use some cases to check if all is good
print "Checking google scholar formats..." print("Checking google scholar formats...")
stopAndGoCites = getCite("Stop-and-Go MIXes: Providing Probabilistic Anonymity in an Open System", False)[0] stopAndGoCites = getCite("Stop-and-Go MIXes: Providing Probabilistic Anonymity in an Open System", False)[0]
dragonCites = getCite("Mixes protected by Dragons and Pixies: an empirical study", False, save=False)[0] dragonCites = getCite("Mixes protected by Dragons and Pixies: an empirical study", False, save=False)[0]
if stopAndGoCites in (0, None): if stopAndGoCites in (0, None):
print """OOPS.\n print("""OOPS.\n
It looks like Google Scholar changed their URL format or their output format. It looks like Google Scholar changed their URL format or their output format.
I went to count the cites for the Stop-and-Go MIXes paper, and got nothing.""" I went to count the cites for the Stop-and-Go MIXes paper, and got nothing.""")
sys.exit(1) sys.exit(1)
if dragonCites != None: if dragonCites != None:
print """OOPS.\n print("""OOPS.\n
It looks like Google Scholar changed their URL format or their output format. It looks like Google Scholar changed their URL format or their output format.
I went to count the cites for a fictitious paper, and found some.""" I went to count the cites for a fictitious paper, and found some.""")
sys.exit(1) sys.exit(1)
def urlIsUseless(u): def urlIsUseless(u):
@ -170,7 +170,7 @@ URLTYPES=[ "pdf", "ps", "txt", "ps_gz", "html" ]
if __name__ == '__main__': if __name__ == '__main__':
# First download the bibliography file. # First download the bibliography file.
import BibTeX from . import BibTeX
suggest = False suggest = False
if sys.argv[1] == 'suggest': if sys.argv[1] == 'suggest':
suggest = True suggest = True
@ -182,7 +182,7 @@ if __name__ == '__main__':
bib = BibTeX.parseFile(config.MASTER_BIB) bib = BibTeX.parseFile(config.MASTER_BIB)
remove_old() remove_old()
print "Downloading missing ranks." print("Downloading missing ranks.")
for ent in bib.entries: for ent in bib.entries:
getCite(ent['title'], cache=True, update=True) getCite(ent['title'], cache=True, update=True)
@ -190,13 +190,13 @@ if __name__ == '__main__':
for ent in bib.entries: for ent in bib.entries:
haveOne = False haveOne = False
for utype in URLTYPES: for utype in URLTYPES:
if ent.has_key("www_%s_url"%utype): if "www_%s_url"%utype in ent:
haveOne = True haveOne = True
break break
if haveOne: if haveOne:
continue continue
print ent.key, "has no URLs given." print(ent.key, "has no URLs given.")
urls = [ u for u in getPaperURLs(ent['title']) if not urlIsUseless(u) ] urls = [ u for u in getPaperURLs(ent['title']) if not urlIsUseless(u) ]
for u in urls: for u in urls:
print "\t", u print("\t", u)

View File

@ -13,9 +13,9 @@ import re
assert sys.version_info[:3] >= (2,2,0) assert sys.version_info[:3] >= (2,2,0)
import BibTeX from . import BibTeX
import config from . import config
import metaphone from . import metaphone
_MPCACHE = {} _MPCACHE = {}
def soundsLike(s1, s2): def soundsLike(s1, s2):
@ -168,16 +168,16 @@ class MasterBibTeX(BibTeX.BibTeX):
matches = m2 matches = m2
if not matches: if not matches:
print "No match for %s"%e.key print("No match for %s"%e.key)
if matches[-1][1] is e: if matches[-1][1] is e:
print "%s matches for %s: OK."%(len(matches), e.key) print("%s matches for %s: OK."%(len(matches), e.key))
else: else:
print "%s matches for %s: %s is best!" %(len(matches), e.key, print("%s matches for %s: %s is best!" %(len(matches), e.key,
matches[-1][1].key) matches[-1][1].key))
if len(matches) > 1: if len(matches) > 1:
for g, m in matches: for g, m in matches:
print "%%%% goodness", g print("%%%% goodness", g)
print m print(m)
def noteToURL(note): def noteToURL(note):
@ -202,7 +202,7 @@ def emit(f,ent):
global all_ok global all_ok
errs = ent._check() errs = ent._check()
if master.byKey.has_key(ent.key.strip().lower()): if ent.key.strip().lower() in master.byKey:
errs.append("ERROR: Key collision with master file") errs.append("ERROR: Key collision with master file")
if errs: if errs:
@ -210,7 +210,7 @@ def emit(f,ent):
note = ent.get("note") note = ent.get("note")
if ent.getURL() and not note: if ent.getURL() and not note:
ent['note'] = "\url{%s}"%ent.getURL() ent['note'] = "\\url{%s}"%ent.getURL()
elif note: elif note:
m = re.match(r'\\url{(.*)}', note) m = re.match(r'\\url{(.*)}', note)
if m: if m:
@ -232,61 +232,61 @@ def emit(f,ent):
if errs: if errs:
all_ok = 0 all_ok = 0
for e in errs: for e in errs:
print >>f, "%%%%", e print("%%%%", e, file=f)
print >>f, ent.format(77, 4, v=1, invStrings=invStrings) print(ent.format(77, 4, v=1, invStrings=invStrings), file=f)
def emitKnown(f, ent, matches): def emitKnown(f, ent, matches):
print >>f, "%% Candidates are:", ", ".join([e.key for g,e in matches]) print("%% Candidates are:", ", ".join([e.key for g,e in matches]), file=f)
print >>f, "%%" print("%%", file=f)
print >>f, "%"+(ent.format(77,4,1,invStrings).replace("\n", "\n%")) print("%"+(ent.format(77,4,1,invStrings).replace("\n", "\n%")), file=f)
if __name__ == '__main__': if __name__ == '__main__':
if len(sys.argv) != 3: if len(sys.argv) != 3:
print "reconcile.py expects 2 arguments" print("reconcile.py expects 2 arguments")
sys.exit(1) sys.exit(1)
config.load(sys.argv[1]) config.load(sys.argv[1])
print "========= Scanning master ==========" print("========= Scanning master ==========")
master = MasterBibTeX() master = MasterBibTeX()
master = BibTeX.parseFile(config.MASTER_BIB, result=master) master = BibTeX.parseFile(config.MASTER_BIB, result=master)
master.buildIndex() master.buildIndex()
print "========= Scanning new file ========" print("========= Scanning new file ========")
try: try:
fn = sys.argv[2] fn = sys.argv[2]
input = BibTeX.parseFile(fn) input = BibTeX.parseFile(fn)
except BibTeX.ParseError, e: except BibTeX.ParseError as e:
print "Error parsing %s: %s"%(fn,e) print("Error parsing %s: %s"%(fn,e))
sys.exit(1) sys.exit(1)
f = open('tmp.bib', 'w') f = open('tmp.bib', 'w')
keys = input.newStrings.keys() keys = list(input.newStrings.keys())
keys.sort() keys.sort()
for k in keys: for k in keys:
v = input.newStrings[k] v = input.newStrings[k]
print >>f, "@string{%s = {%s}}"%(k,v) print("@string{%s = {%s}}"%(k,v), file=f)
invStrings = input.invStrings invStrings = input.invStrings
for e in input.entries: for e in input.entries:
if not (e.get('title') and e.get('author')): if not (e.get('title') and e.get('author')):
print >>f, "%%\n%%%% Not enough information to search for a match: need title and author.\n%%" print("%%\n%%%% Not enough information to search for a match: need title and author.\n%%", file=f)
emit(f, e) emit(f, e)
continue continue
matches = master.includes(e, all=1) matches = master.includes(e, all=1)
if not matches: if not matches:
print >>f, "%%\n%%%% This entry is probably new: No match found.\n%%" print("%%\n%%%% This entry is probably new: No match found.\n%%", file=f)
emit(f, e) emit(f, e)
else: else:
print >>f, "%%" print("%%", file=f)
print >>f, "%%%% Possible match found for this entry; max goodness",\ print("%%%% Possible match found for this entry; max goodness",\
matches[-1][0], "\n%%" matches[-1][0], "\n%%", file=f)
emitKnown(f, e, matches) emitKnown(f, e, matches)
if not all_ok: if not all_ok:
print >>f, "\n\n\nErrors remain; not finished.\n" print("\n\n\nErrors remain; not finished.\n", file=f)
f.close() f.close()

View File

@ -3,8 +3,8 @@
"""Unit tests for anonbib.""" """Unit tests for anonbib."""
import BibTeX from . import BibTeX
import metaphone from . import metaphone
#import reconcile #import reconcile
#import writeHTML #import writeHTML
#import updateCache #import updateCache
@ -18,40 +18,40 @@ class MetaphoneTests(unittest.TestCase):
class BibTeXTests(unittest.TestCase): class BibTeXTests(unittest.TestCase):
def testTranslation(self): def testTranslation(self):
ut = BibTeX.url_untranslate ut = BibTeX.url_untranslate
self.assertEquals(ut("Fred"),"Fred") self.assertEqual(ut("Fred"),"Fred")
self.assertEquals(ut("Hello, World."), "Hello_2c_20World.") self.assertEqual(ut("Hello, World."), "Hello_2c_20World.")
te = BibTeX.TeXescapeURL te = BibTeX.TeXescapeURL
ute = BibTeX.unTeXescapeURL ute = BibTeX.unTeXescapeURL
self.assertEquals(te("http://example/~me/my_file"), self.assertEqual(te("http://example/~me/my_file"),
r"http://example/\{}~me/my\_file") r"http://example/\{}~me/my\_file")
self.assertEquals(ute(r"http:{}//example/\{}~me/my\_file"), self.assertEqual(ute(r"http:{}//example/\{}~me/my\_file"),
"http://example/~me/my_file") "http://example/~me/my_file")
h = BibTeX.htmlize h = BibTeX.htmlize
self.assertEquals(h("Hello, world"), "Hello, world") self.assertEqual(h("Hello, world"), "Hello, world")
self.assertEquals(h(r"\'a\`e\'{i}(\'\i)\"o&\^u"), self.assertEqual(h(r"\'a\`e\'{i}(\'\i)\"o&\^u"),
"&aacute;&egrave;&iacute;(&iacute;)&ouml;&amp;" "&aacute;&egrave;&iacute;(&iacute;)&ouml;&amp;"
"&ucirc;") "&ucirc;")
self.assertEquals(h(r"\~n and \c{c}"), "&ntilde; and &ccedil;") self.assertEqual(h(r"\~n and \c{c}"), "&ntilde; and &ccedil;")
self.assertEquals(h(r"\AE---a ligature"), "&AElig;&mdash;a ligature") self.assertEqual(h(r"\AE---a ligature"), "&AElig;&mdash;a ligature")
self.assertEquals(h(r"{\it 33}"), " 33") self.assertEqual(h(r"{\it 33}"), " 33")
self.assertEquals(h(r"Pages 33--99 or vice--versa?"), self.assertEqual(h(r"Pages 33--99 or vice--versa?"),
"Pages 33-99 or vice&ndash;versa?") "Pages 33-99 or vice&ndash;versa?")
t = BibTeX.txtize t = BibTeX.txtize
self.assertEquals(t("Hello, world"), "Hello, world") self.assertEqual(t("Hello, world"), "Hello, world")
self.assertEquals(t(r"\'a\`e\'{i}(\'\i)\"o&\^u"), self.assertEqual(t(r"\'a\`e\'{i}(\'\i)\"o&\^u"),
"aei(i)o&u") "aei(i)o&u")
self.assertEquals(t(r"\~n and \c{c}"), "n and c") self.assertEqual(t(r"\~n and \c{c}"), "n and c")
self.assertEquals(t(r"\AE---a ligature"), "AE---a ligature") self.assertEqual(t(r"\AE---a ligature"), "AE---a ligature")
self.assertEquals(t(r"{\it 33}"), " 33") self.assertEqual(t(r"{\it 33}"), " 33")
self.assertEquals(t(r"Pages 33--99 or vice--versa?"), self.assertEqual(t(r"Pages 33--99 or vice--versa?"),
"Pages 33--99 or vice--versa?") "Pages 33--99 or vice--versa?")
def authorsParseTo(self,authors,result): def authorsParseTo(self,authors,result):
pa = BibTeX.parseAuthor(authors) pa = BibTeX.parseAuthor(authors)
self.assertEquals(["|".join(["+".join(item) for item in self.assertEqual(["|".join(["+".join(item) for item in
[a.first,a.von,a.last,a.jr]]) [a.first,a.von,a.last,a.jr]])
for a in pa], for a in pa],
result) result)

View File

@ -10,13 +10,13 @@ import signal
import time import time
import gzip import gzip
import BibTeX from . import BibTeX
import config from . import config
import urllib2 import urllib.request, urllib.error, urllib.parse
import getopt import getopt
import socket import socket
import errno import errno
import httplib import http.client
FILE_TYPES = [ "txt", "html", "pdf", "ps", "ps.gz", "abstract" ] FILE_TYPES = [ "txt", "html", "pdf", "ps", "ps.gz", "abstract" ]
BIN_FILE_TYPES = [ 'pdf', 'ps.gz' ] BIN_FILE_TYPES = [ 'pdf', 'ps.gz' ]
@ -53,12 +53,12 @@ def downloadFile(key, ftype, section, url,timeout=None):
signal.alarm(timeout) signal.alarm(timeout)
try: try:
try: try:
infile = urllib2.urlopen(url) infile = urllib.request.urlopen(url)
except httplib.InvalidURL, e: except http.client.InvalidURL as e:
raise UIError("Invalid URL %s: %s"%(url,e)) raise UIError("Invalid URL %s: %s"%(url,e))
except IOError, e: except IOError as e:
raise UIError("Cannot connect to url %s: %s"%(url,e)) raise UIError("Cannot connect to url %s: %s"%(url,e))
except socket.error, e: except socket.error as e:
if getattr(e,"errno",-1) == errno.EINTR: if getattr(e,"errno",-1) == errno.EINTR:
raise UIError("Connection timed out to url %s"%url) raise UIError("Connection timed out to url %s"%url)
else: else:
@ -80,9 +80,9 @@ def downloadFile(key, ftype, section, url,timeout=None):
outfile.close() outfile.close()
urlfile = open(fnameURL, 'w') urlfile = open(fnameURL, 'w')
print >>urlfile, time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) print(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), file=urlfile)
if "\n" in url: url = url.replace("\n", " ") if "\n" in url: url = url.replace("\n", " ")
print >>urlfile, url print(url, file=urlfile)
urlfile.close() urlfile.close()
os.rename(fnameTmp, fname) os.rename(fnameTmp, fname)
@ -105,7 +105,7 @@ def getCachedURL(key, ftype, section):
lines = f.readlines() lines = f.readlines()
f.close() f.close()
if len(lines) != 2: if len(lines) != 2:
print >>sys.stderr, "ERROR: unexpected number of lines in", urlFname print("ERROR: unexpected number of lines in", urlFname, file=sys.stderr)
return lines[1].strip() return lines[1].strip()
def downloadAll(bibtex, missingOnly=0): def downloadAll(bibtex, missingOnly=0):
@ -115,33 +115,33 @@ def downloadAll(bibtex, missingOnly=0):
urls = getURLs(e) urls = getURLs(e)
key = e.key key = e.key
section = e.get("www_cache_section", ".") section = e.get("www_cache_section", ".")
for ftype, url in urls.items(): for ftype, url in list(urls.items()):
if missingOnly: if missingOnly:
cachedURL = getCachedURL(key, ftype, section) cachedURL = getCachedURL(key, ftype, section)
if cachedURL == url: if cachedURL == url:
print >>sys.stderr,"Skipping",url print("Skipping",url, file=sys.stderr)
continue continue
elif cachedURL is not None: elif cachedURL is not None:
print >>sys.stderr,"URL for %s.%s has changed"%(key,ftype) print("URL for %s.%s has changed"%(key,ftype), file=sys.stderr)
else: else:
print >>sys.stderr,"I have no copy of %s.%s"%(key,ftype) print("I have no copy of %s.%s"%(key,ftype), file=sys.stderr)
try: try:
downloadFile(key, ftype, section, url) downloadFile(key, ftype, section, url)
print "Downloaded",url print("Downloaded",url)
except UIError, e: except UIError as e:
print >>sys.stderr, str(e) print(str(e), file=sys.stderr)
errors.append((key,ftype,url,str(e))) errors.append((key,ftype,url,str(e)))
except (IOError, socket.error), e: except (IOError, socket.error) as e:
msg = "Error downloading %s: %s"%(url,str(e)) msg = "Error downloading %s: %s"%(url,str(e))
print >>sys.stderr, msg print(msg, file=sys.stderr)
errors.append((key,ftype,url,msg)) errors.append((key,ftype,url,msg))
if urls.has_key("ps") and not urls.has_key("ps.gz"): if "ps" in urls and "ps.gz" not in urls:
# Say, this is something we'd like to have gzipped locally. # Say, this is something we'd like to have gzipped locally.
psFname = getCacheFname(key, "ps", section) psFname = getCacheFname(key, "ps", section)
psGzFname = getCacheFname(key, "ps.gz", section) psGzFname = getCacheFname(key, "ps.gz", section)
if os.path.exists(psFname) and not os.path.exists(psGzFname): if os.path.exists(psFname) and not os.path.exists(psGzFname):
# This is something we haven't gzipped yet. # This is something we haven't gzipped yet.
print "Compressing a copy of",psFname print("Compressing a copy of",psFname)
outf = gzip.GzipFile(psGzFname, "wb") outf = gzip.GzipFile(psGzFname, "wb")
inf = open(psFname, "rb") inf = open(psFname, "rb")
while 1: while 1:
@ -156,9 +156,9 @@ def downloadAll(bibtex, missingOnly=0):
if __name__ == '__main__': if __name__ == '__main__':
if len(sys.argv) == 2: if len(sys.argv) == 2:
print "Loading from %s"%sys.argv[1] print("Loading from %s"%sys.argv[1])
else: else:
print >>sys.stderr, "Expected a single configuration file as an argument" print("Expected a single configuration file as an argument", file=sys.stderr)
sys.exit(1) sys.exit(1)
config.load(sys.argv[1]) config.load(sys.argv[1])

View File

@ -9,10 +9,10 @@ import os
import json import json
assert sys.version_info[:3] >= (2,2,0) assert sys.version_info[:3] >= (2,2,0)
os.umask(022) os.umask(0o22)
import BibTeX from . import BibTeX
import config from . import config
def getTemplate(name): def getTemplate(name):
f = open(name) f = open(name)
@ -39,15 +39,15 @@ def writeBody(f, sections, section_urls, cache_path, base_url):
sDisp = re.sub(r'\s+', ' ', s.strip()) sDisp = re.sub(r'\s+', ' ', s.strip())
sDisp = sDisp.replace(" ", "&nbsp;") sDisp = sDisp.replace(" ", "&nbsp;")
if u: if u:
print >>f, ('<li><h3><a name="%s"></a><a href="%s">%s</a></h3>'%( print(('<li><h3><a name="%s"></a><a href="%s">%s</a></h3>'%(
(BibTeX.url_untranslate(s), u, sDisp))) (BibTeX.url_untranslate(s), u, sDisp))), file=f)
else: else:
print >>f, ('<li><h3><a name="%s">%s</a></h3>'%( print(('<li><h3><a name="%s">%s</a></h3>'%(
BibTeX.url_untranslate(s),sDisp)) BibTeX.url_untranslate(s),sDisp)), file=f)
print >>f, "<ul class='expand'>" print("<ul class='expand'>", file=f)
for e in entries: for e in entries:
print >>f, e.to_html(cache_path=cache_path, base_url=base_url) print(e.to_html(cache_path=cache_path, base_url=base_url), file=f)
print >>f, "</ul></li>" print("</ul></li>", file=f)
def writeHTML(f, sections, sectionType, fieldName, choices, def writeHTML(f, sections, sectionType, fieldName, choices,
tag, config, cache_url_path, section_urls={}): tag, config, cache_url_path, section_urls={}):
@ -69,7 +69,7 @@ def writeHTML(f, sections, sectionType, fieldName, choices,
# #
tagListStr = [] tagListStr = []
st = config.TAG_SHORT_TITLES.keys() st = list(config.TAG_SHORT_TITLES.keys())
st.sort() st.sort()
root = "../"*pathLength(config.TAG_DIRECTORIES[tag]) root = "../"*pathLength(config.TAG_DIRECTORIES[tag])
if root == "": root = "." if root == "": root = "."
@ -104,10 +104,10 @@ def writeHTML(f, sections, sectionType, fieldName, choices,
} }
header, footer = getTemplate(config.TEMPLATE_FILE) header, footer = getTemplate(config.TEMPLATE_FILE)
print >>f, header%fields print(header%fields, file=f)
writeBody(f, sections, section_urls, cache_path=cache_url_path, writeBody(f, sections, section_urls, cache_path=cache_url_path,
base_url=root) base_url=root)
print >>f, footer%fields print(footer%fields, file=f)
def jsonDumper(obj): def jsonDumper(obj):
if isinstance(obj, BibTeX.BibTeXEntry): if isinstance(obj, BibTeX.BibTeXEntry):
@ -125,7 +125,7 @@ def writePageSet(config, bib, tag):
bib_entries = bib.entries[:] bib_entries = bib.entries[:]
if not bib_entries: if not bib_entries:
print >>sys.stderr, "No entries with tag %r; skipping"%tag print("No entries with tag %r; skipping"%tag, file=sys.stderr)
return return
tagdir = config.TAG_DIRECTORIES[tag] tagdir = config.TAG_DIRECTORIES[tag]
@ -133,7 +133,7 @@ def writePageSet(config, bib, tag):
cache_url_path = BibTeX.smartJoin("../"*pathLength(tagdir), cache_url_path = BibTeX.smartJoin("../"*pathLength(tagdir),
config.CACHE_DIR) config.CACHE_DIR)
if not os.path.exists(outdir): if not os.path.exists(outdir):
os.makedirs(outdir, 0755) os.makedirs(outdir, 0o755)
##### Sorted views: ##### Sorted views:
## By topic. ## By topic.
@ -174,7 +174,7 @@ def writePageSet(config, bib, tag):
except ValueError: except ValueError:
last_year = int(entries[-2][1][0].get('year')) last_year = int(entries[-2][1][0].get('year'))
years = map(str, range(first_year, last_year+1)) years = list(map(str, list(range(first_year, last_year+1))))
if entries[-1][0] == 'Unknown': if entries[-1][0] == 'Unknown':
years.append("Unknown") years.append("Unknown")
@ -216,15 +216,15 @@ def writePageSet(config, bib, tag):
header,footer = getTemplate(config.BIBTEX_TEMPLATE_FILE) header,footer = getTemplate(config.BIBTEX_TEMPLATE_FILE)
f = open(os.path.join(outdir,"bibtex.html"), 'w') f = open(os.path.join(outdir,"bibtex.html"), 'w')
print >>f, header % { 'command_line' : "", print(header % { 'command_line' : "",
'title': config.TAG_TITLES[tag], 'title': config.TAG_TITLES[tag],
'root': root } 'root': root }, file=f)
for ent in entries: for ent in entries:
print >>f, ( print((
("<tr><td class='bibtex'><a name='%s'>%s</a>" ("<tr><td class='bibtex'><a name='%s'>%s</a>"
"<pre class='bibtex'>%s</pre></td></tr>") "<pre class='bibtex'>%s</pre></td></tr>")
%(BibTeX.url_untranslate(ent.key), ent.key, ent.format(90,8,1))) %(BibTeX.url_untranslate(ent.key), ent.key, ent.format(90,8,1))), file=f)
print >>f, footer print(footer, file=f)
f.close() f.close()
f = open(os.path.join(outdir,"bibtex.json"), 'w') f = open(os.path.join(outdir,"bibtex.json"), 'w')
@ -234,13 +234,13 @@ def writePageSet(config, bib, tag):
if __name__ == '__main__': if __name__ == '__main__':
if len(sys.argv) == 2: if len(sys.argv) == 2:
print "Loading from %s"%sys.argv[1] print("Loading from %s"%sys.argv[1])
else: else:
print >>sys.stderr, "Expected a single configuration file as an argument" print("Expected a single configuration file as an argument", file=sys.stderr)
sys.exit(1) sys.exit(1)
config.load(sys.argv[1]) config.load(sys.argv[1])
bib = BibTeX.parseFile(config.MASTER_BIB) bib = BibTeX.parseFile(config.MASTER_BIB)
for tag in config.TAG_DIRECTORIES.keys(): for tag in list(config.TAG_DIRECTORIES.keys()):
writePageSet(config, bib, tag) writePageSet(config, bib, tag)

View File

@ -10,10 +10,10 @@ from i2p2www import helpers
BLOG_METATAGS = { BLOG_METATAGS = {
'author': u'I2P devs', 'author': 'I2P devs',
'category': None, 'category': None,
'date': None, 'date': None,
'excerpt': u'', 'excerpt': '',
} }
BLOG_LIST_METATAGS = [ BLOG_LIST_METATAGS = [

View File

@ -141,12 +141,12 @@ def downloads_config():
def downloads_select(version, file): def downloads_select(version, file):
mirrors=read_mirrors() mirrors=read_mirrors()
obj=[] obj=[]
for net in mirrors.keys(): for net in list(mirrors.keys()):
a={} a={}
a['key']=net a['key']=net
a['name']=net a['name']=net
a['protocols']=[] a['protocols']=[]
for protocol in mirrors[net].keys(): for protocol in list(mirrors[net].keys()):
b={} b={}
b['key']=protocol b['key']=protocol
b['name']=protocol b['name']=protocol
@ -166,13 +166,13 @@ def downloads_redirect(version, net, protocol, domain, file):
} }
if not protocol: if not protocol:
protocol = mirrors.keys()[randint(0, len(mirrors) - 1)] protocol = list(mirrors.keys())[randint(0, len(mirrors) - 1)]
if not protocol in mirrors: if not protocol in mirrors:
abort(404) abort(404)
mirrors=mirrors[protocol] mirrors=mirrors[protocol]
if not domain: if not domain:
domain = mirrors.keys()[randint(0, len(mirrors) - 1)] domain = list(mirrors.keys())[randint(0, len(mirrors) - 1)]
if not domain in mirrors: if not domain in mirrors:
abort(404) abort(404)
return render_template('downloads/redirect.html', return render_template('downloads/redirect.html',

View File

@ -29,8 +29,8 @@ def we_are_frozen():
def module_path(): def module_path():
encoding = sys.getfilesystemencoding() encoding = sys.getfilesystemencoding()
if we_are_frozen(): if we_are_frozen():
return os.path.dirname(unicode(sys.executable, encoding)) return os.path.dirname(str(sys.executable, encoding))
return os.path.dirname(unicode(__file__, encoding)) return os.path.dirname(str(__file__, encoding))
class HighlightExtension(Extension): class HighlightExtension(Extension):

View File

@ -12,7 +12,7 @@
import os import os
import sys import sys
import os.path import os.path
import StringIO import io
from pygments.formatter import Formatter from pygments.formatter import Formatter
from pygments.token import Token, Text, STANDARD_TYPES from pygments.token import Token, Text, STANDARD_TYPES
@ -27,11 +27,11 @@ __all__ = ['I2PHtmlFormatter', 'TextSpecFormatter']
_escape_html_table = { _escape_html_table = {
ord('&'): u'&amp;', ord('&'): '&amp;',
ord('<'): u'&lt;', ord('<'): '&lt;',
ord('>'): u'&gt;', ord('>'): '&gt;',
ord('"'): u'&quot;', ord('"'): '&quot;',
ord("'"): u'&#39;', ord("'"): '&#39;',
} }
kinds = { kinds = {
@ -459,7 +459,7 @@ class I2PHtmlFormatter(Formatter):
""" """
if arg is None: if arg is None:
arg = ('cssclass' in self.options and '.'+self.cssclass or '') arg = ('cssclass' in self.options and '.'+self.cssclass or '')
if isinstance(arg, basestring): if isinstance(arg, str):
args = [arg] args = [arg]
else: else:
args = list(arg) args = list(arg)
@ -473,7 +473,7 @@ class I2PHtmlFormatter(Formatter):
return ', '.join(tmp) return ', '.join(tmp)
styles = [(level, ttype, cls, style) styles = [(level, ttype, cls, style)
for cls, (style, ttype, level) in self.class2style.iteritems() for cls, (style, ttype, level) in self.class2style.items()
if cls and style] if cls and style]
styles.sort() styles.sort()
lines = ['%s { %s } /* %s */' % (prefix(cls), style, repr(ttype)[6:]) lines = ['%s { %s } /* %s */' % (prefix(cls), style, repr(ttype)[6:])
@ -511,8 +511,8 @@ class I2PHtmlFormatter(Formatter):
cssfilename = os.path.join(os.path.dirname(filename), cssfilename = os.path.join(os.path.dirname(filename),
self.cssfile) self.cssfile)
except AttributeError: except AttributeError:
print >>sys.stderr, 'Note: Cannot determine output file name, ' \ print('Note: Cannot determine output file name, ' \
'using current directory as base for the CSS file name' 'using current directory as base for the CSS file name', file=sys.stderr)
cssfilename = self.cssfile cssfilename = self.cssfile
# write CSS file only if noclobber_cssfile isn't given as an option. # write CSS file only if noclobber_cssfile isn't given as an option.
try: try:
@ -521,7 +521,7 @@ class I2PHtmlFormatter(Formatter):
cf.write(CSSFILE_TEMPLATE % cf.write(CSSFILE_TEMPLATE %
{'styledefs': self.get_style_defs('body')}) {'styledefs': self.get_style_defs('body')})
cf.close() cf.close()
except IOError, err: except IOError as err:
err.strerror = 'Error writing CSS file: ' + err.strerror err.strerror = 'Error writing CSS file: ' + err.strerror
raise raise
@ -540,7 +540,7 @@ class I2PHtmlFormatter(Formatter):
yield 0, DOC_FOOTER yield 0, DOC_FOOTER
def _wrap_tablelinenos(self, inner): def _wrap_tablelinenos(self, inner):
dummyoutfile = StringIO.StringIO() dummyoutfile = io.StringIO()
lncount = 0 lncount = 0
for t, line in inner: for t, line in inner:
if t: if t:
@ -884,7 +884,7 @@ class TextSpecFormatter(Formatter):
else: else:
outfile.write(value) outfile.write(value)
for ref in refs.values(): for ref in list(refs.values()):
if enc: if enc:
outfile.write(ref.encode(enc)) outfile.write(ref.encode(enc))
else: else:

View File

@ -56,7 +56,7 @@ class Pagination(object):
def iter_pages(self, left_edge=2, left_current=2, def iter_pages(self, left_edge=2, left_current=2,
right_current=5, right_edge=2): right_current=5, right_edge=2):
last = 0 last = 0
for num in xrange(1, self.pages + 1): for num in range(1, self.pages + 1):
if num <= left_edge or \ if num <= left_edge or \
(num > self.page - left_current - 1 and \ (num > self.page - left_current - 1 and \
num < self.page + right_current) or \ num < self.page + right_current) or \

View File

@ -197,7 +197,7 @@ LEGACY_RELEASES_MAP={
'0.9.8': (2013, 9, 30), '0.9.8': (2013, 9, 30),
'0.9.8.1': (2013, 10, 2), '0.9.8.1': (2013, 10, 2),
'0.9.9': (2013, 12, 7), '0.9.9': (2013, 12, 7),
'0.9.10': (2014, 01, 22), '0.9.10': (2014, 0o1, 22),
} }
def legacy_show(f): def legacy_show(f):
@ -232,5 +232,6 @@ def legacy_release(version):
else: else:
return legacy_show('release-%s' % version) return legacy_show('release-%s' % version)
def legacy_blog(lang, (year, month, day), title): def legacy_blog(lang, xxx_todo_changeme, title):
(year, month, day) = xxx_todo_changeme
return redirect(url_for('blog_post', lang=lang, slug=('%d/%02d/%02d/%s' % (year, month, day, title))), 301) return redirect(url_for('blog_post', lang=lang, slug=('%d/%02d/%02d/%s' % (year, month, day, title))), 301)

View File

@ -54,7 +54,7 @@ def get_meetings_ids(num=0):
# iterate over all files # iterate over all files
for f in v[2]: for f in v[2]:
# ignore all non-.rst files # ignore all non-.rst files
print("Meeting file found", f) print(("Meeting file found", f))
if not f.endswith('.rst'): if not f.endswith('.rst'):
continue continue
try: try:

View File

@ -40,12 +40,12 @@ SPEC_CATEGORY_SORT = defaultdict(lambda: 999, {
}) })
PROPOSAL_METATAGS = { PROPOSAL_METATAGS = {
'author': u'I2P devs', 'author': 'I2P devs',
'created': None, 'created': None,
'editor': None, 'editor': None,
'implementedin': None, 'implementedin': None,
'lastupdated': None, 'lastupdated': None,
'status': u'Draft', 'status': 'Draft',
'supercededby': None, 'supercededby': None,
'supercedes': None, 'supercedes': None,
'target': None, 'target': None,
@ -70,18 +70,18 @@ PROPOSAL_STATUS_SORT = defaultdict(lambda: 999, {
}) })
METATAG_LABELS = { METATAG_LABELS = {
'accuratefor': u'Accurate for', 'accuratefor': 'Accurate for',
'author': u'Author', 'author': 'Author',
'category': u'Category', 'category': 'Category',
'created': u'Created', 'created': 'Created',
'editor': u'Editor', 'editor': 'Editor',
'implementedin': u'Implemented in', 'implementedin': 'Implemented in',
'lastupdated': u'Last updated', 'lastupdated': 'Last updated',
'status': u'Status', 'status': 'Status',
'supercededby': u'Superceded by', 'supercededby': 'Superceded by',
'supercedes': u'Supercedes', 'supercedes': 'Supercedes',
'target': u'Target', 'target': 'Target',
'thread': u'Thread', 'thread': 'Thread',
} }
@ -150,7 +150,7 @@ def render_rst(directory, name, meta_parser, template):
# Change highlight formatter # Change highlight formatter
content = content.replace('{% highlight', "{% highlight formatter='textspec'") content = content.replace('{% highlight', "{% highlight formatter='textspec'")
# Metatags # Metatags
for (metatag, label) in METATAG_LABELS.items(): for (metatag, label) in list(METATAG_LABELS.items()):
content = content.replace(' :%s' % metatag, label) content = content.replace(' :%s' % metatag, label)
# render the post with Jinja2 to handle URLs etc. # render the post with Jinja2 to handle URLs etc.

View File

@ -1,7 +1,7 @@
import ctags import ctags
from flask import g, request, safe_join, url_for from flask import g, request, safe_join, url_for
import os.path import os.path
from urlparse import urlsplit, urlunsplit from urllib.parse import urlsplit, urlunsplit
from i2p2www import ( from i2p2www import (
CANONICAL_DOMAIN, CANONICAL_DOMAIN,

View File

@ -23,7 +23,7 @@ def app(environ, start_response):
path = req.path[1:] path = req.path[1:]
if path == '': if path == '':
# page # page
page = u'<html><head><title>NetDB</title></head><body><ul>%s</ul></body></html>' page = '<html><head><title>NetDB</title></head><body><ul>%s</ul></body></html>'
# generate links # generate links
entries = os.listdir('netdb') entries = os.listdir('netdb')
@ -46,7 +46,7 @@ def app(environ, start_response):
res += '<li><a href="%s">%s</a></li>' % (entry, entry) res += '<li><a href="%s">%s</a></li>' % (entry, entry)
resp = Response(page % res, mimetype='text/html') resp = Response(page % res, mimetype='text/html')
elif path == 'robots.txt': elif path == 'robots.txt':
dat = u"User-agent: *\nDisallow: /routerInfo-*.dat$\n" dat = "User-agent: *\nDisallow: /routerInfo-*.dat$\n"
resp = Response(dat, mimetype='text/plain') resp = Response(dat, mimetype='text/plain')
else: else:
# load file # load file

View File

@ -122,7 +122,7 @@ def application(environ, start_response):
if path == '': if path == '':
page = u'<html><head><title>NetDB</title></head><body><ul>%s</ul></body></html>' page = '<html><head><title>NetDB</title></head><body><ul>%s</ul></body></html>'
if len(info) == 0: if len(info) == 0:
# tag the ip as new # tag the ip as new
@ -136,7 +136,7 @@ def application(environ, start_response):
new = [] new = []
if len(entries) > 150: if len(entries) > 150:
# select some randomly # select some randomly
for i in xrange(100): for i in range(100):
while True: while True:
sel = choice(entries) sel = choice(entries)
if not sel.startswith('routerInfo-'): if not sel.startswith('routerInfo-'):
@ -179,7 +179,7 @@ def application(environ, start_response):
resp.add_etag() resp.add_etag()
elif path == 'robots.txt': elif path == 'robots.txt':
dat = u"User-agent: *\nDisallow: /routerInfo-*.dat$\n" dat = "User-agent: *\nDisallow: /routerInfo-*.dat$\n"
resp = Response(dat, mimetype='text/plain') resp = Response(dat, mimetype='text/plain')
resp.add_etag() resp.add_etag()