2012-12-19 06:53:59 +00:00
|
|
|
import codecs
|
|
|
|
import datetime
|
|
|
|
from docutils.core import publish_parts
|
2013-01-16 01:52:31 +00:00
|
|
|
from flask import abort, g, render_template_string, safe_join, url_for
|
2012-12-19 06:53:59 +00:00
|
|
|
import os
|
|
|
|
import os.path
|
|
|
|
|
|
|
|
from i2p2www import BLOG_DIR
|
|
|
|
|
|
|
|
|
2013-01-08 01:33:22 +00:00
|
|
|
SUPPORTED_METATAGS = {
|
|
|
|
'author': u'I2P devs',
|
|
|
|
'category': None,
|
2013-01-16 01:41:24 +00:00
|
|
|
'date': None,
|
2013-01-08 01:33:22 +00:00
|
|
|
'excerpt': u'',
|
|
|
|
}
|
|
|
|
|
2013-01-16 02:03:21 +00:00
|
|
|
LIST_METATAGS = [
|
|
|
|
'category',
|
|
|
|
]
|
|
|
|
|
2013-01-08 01:33:22 +00:00
|
|
|
|
2012-12-19 06:53:59 +00:00
|
|
|
#####################
|
|
|
|
# Blog helper methods
|
|
|
|
|
|
|
|
def get_blog_feed_items(num=0):
|
2013-01-16 01:41:24 +00:00
|
|
|
posts = get_blog_posts(num, True)
|
2012-12-19 06:53:59 +00:00
|
|
|
items = []
|
2013-01-09 00:32:32 +00:00
|
|
|
for post in posts:
|
2013-01-16 01:41:24 +00:00
|
|
|
meta = post[1]
|
|
|
|
parts = post[2]
|
2013-01-08 01:33:22 +00:00
|
|
|
a = {}
|
|
|
|
a['title'] = parts['title']
|
|
|
|
a['content'] = meta['excerpt'] if len(meta['excerpt']) > 0 else parts['fragment']
|
2013-01-09 00:32:32 +00:00
|
|
|
a['url'] = url_for('blog_post', lang=g.lang, slug=post[0])
|
2013-01-08 01:33:22 +00:00
|
|
|
a['updated'] = datetime.datetime.strptime(meta['date'], '%Y-%m-%d')
|
|
|
|
items.append(a)
|
2012-12-19 06:53:59 +00:00
|
|
|
return items
|
|
|
|
|
2013-01-16 01:41:24 +00:00
|
|
|
def get_blog_posts(num=0, return_parts=False):
|
2012-12-19 06:53:59 +00:00
|
|
|
"""
|
2013-01-09 00:32:32 +00:00
|
|
|
Returns the latest #num valid posts sorted by date, or all slugs if num=0.
|
2012-12-19 06:53:59 +00:00
|
|
|
"""
|
|
|
|
slugs = get_blog_slugs(num)
|
2013-01-09 00:32:32 +00:00
|
|
|
posts= []
|
2012-12-19 06:53:59 +00:00
|
|
|
for slug in slugs:
|
2013-01-09 00:32:32 +00:00
|
|
|
parts = render_blog_post(slug)
|
2013-01-08 01:33:22 +00:00
|
|
|
if parts:
|
|
|
|
meta = get_metadata_from_meta(parts['meta'])
|
2013-01-16 01:41:24 +00:00
|
|
|
meta['date'] = meta['date'] if meta['date'] else get_date_from_slug(slug)
|
2013-01-08 01:33:22 +00:00
|
|
|
titlepart = slug.rsplit('/', 1)[1]
|
2013-01-16 01:41:24 +00:00
|
|
|
meta['title'] = ' '.join(titlepart.split('_'))
|
|
|
|
if return_parts:
|
|
|
|
posts.append((slug, meta, parts))
|
|
|
|
else:
|
|
|
|
posts.append((slug, meta))
|
2013-01-09 00:32:32 +00:00
|
|
|
return posts
|
2012-12-19 06:53:59 +00:00
|
|
|
|
|
|
|
def get_blog_slugs(num=0):
|
|
|
|
"""
|
|
|
|
Returns the latest #num valid slugs sorted by date, or all slugs if num=0.
|
|
|
|
"""
|
|
|
|
# list of slugs
|
|
|
|
slugs=[]
|
|
|
|
# walk over all directories/files
|
|
|
|
for v in os.walk(BLOG_DIR):
|
|
|
|
# iterate over all files
|
|
|
|
slugbase = os.path.relpath(v[0], BLOG_DIR)
|
|
|
|
for f in v[2]:
|
|
|
|
# ignore all non-.rst files
|
|
|
|
if not f.endswith('.rst'):
|
|
|
|
continue
|
|
|
|
slugs.append(safe_join(slugbase, f[:-4]))
|
|
|
|
slugs.sort()
|
|
|
|
slugs.reverse()
|
|
|
|
if (num > 0):
|
|
|
|
return slugs[:num]
|
|
|
|
return slugs
|
|
|
|
|
|
|
|
def get_date_from_slug(slug):
|
|
|
|
parts = slug.split('/')
|
|
|
|
return "%s-%s-%s" % (parts[0], parts[1], parts[2])
|
|
|
|
|
2013-01-09 00:32:32 +00:00
|
|
|
def render_blog_post(slug):
|
2012-12-19 06:53:59 +00:00
|
|
|
"""
|
2013-01-09 00:32:32 +00:00
|
|
|
Render the blog post
|
2012-12-19 06:53:59 +00:00
|
|
|
TODO:
|
|
|
|
- caching
|
|
|
|
- move to own file
|
|
|
|
"""
|
|
|
|
# check if that file actually exists
|
|
|
|
path = safe_join(BLOG_DIR, slug + ".rst")
|
|
|
|
if not os.path.exists(path):
|
|
|
|
abort(404)
|
|
|
|
|
|
|
|
# read file
|
|
|
|
with codecs.open(path, encoding='utf-8') as fd:
|
|
|
|
content = fd.read()
|
|
|
|
|
2013-01-16 01:52:31 +00:00
|
|
|
# render the post with Jinja2 to handle URLs etc.
|
|
|
|
rendered_content = render_template_string(content)
|
|
|
|
|
|
|
|
# publish the post with docutils
|
|
|
|
return publish_parts(source=rendered_content, source_path=BLOG_DIR, writer_name="html")
|
2013-01-08 01:33:22 +00:00
|
|
|
|
|
|
|
def get_metadata_from_meta(meta):
|
|
|
|
metaLines = meta.split('\n')
|
|
|
|
ret = {}
|
|
|
|
for metaTag in SUPPORTED_METATAGS:
|
|
|
|
metaLine = [s for s in metaLines if 'name="%s"' % metaTag in s]
|
|
|
|
ret[metaTag] = metaLine[0].split('content="')[1].split('"')[0] if len(metaLine) > 0 else SUPPORTED_METATAGS[metaTag]
|
2013-01-16 02:03:21 +00:00
|
|
|
if metaTag in LIST_METATAGS:
|
|
|
|
ret[metaTag] = [s.strip() for s in ret[metaTag].split(',')]
|
2013-01-08 01:33:22 +00:00
|
|
|
return ret
|