Mercurial > wikked
changeset 130:72e5f588f989
Proper support for meta-listing pages:
- Added support for URL endpoints.
- Moved all category-related stuff to generic meta handling.
- Generic meta-listing pages in the front end.
Miscellaneous fixes:
- Remove previous "ready" items before caching new ones.
- Only un-"ready" pages that have includes and queries.
- Better error handling in `DatabasePage` auto-update.
- Make sure pages have extended data cached before running queries.
author | Ludovic Chabant <ludovic@chabant.com> |
---|---|
date | Wed, 27 Nov 2013 23:32:45 -0800 |
parents | ace48040b01d |
children | 9d22cf4d2412 |
files | static/js/wikked/app.js static/js/wikked/handlebars.js static/js/wikked/models.js static/js/wikked/views.js static/tpl/meta-page.html wikked/db.py wikked/formatter.py wikked/fs.py wikked/page.py wikked/templates/meta_page.html wikked/utils.py wikked/views.py wikked/wiki.py |
diffstat | 13 files changed, 199 insertions(+), 47 deletions(-) [+] |
line wrap: on
line diff
--- a/static/js/wikked/app.js Wed Nov 27 23:28:46 2013 -0800 +++ b/static/js/wikked/app.js Wed Nov 27 23:32:45 2013 -0800 @@ -64,7 +64,7 @@ routes: { 'read/*path': "readPage", '': "readMainPage", - 'category/*path': "readCategoryPage", + 'meta/:name/*path': "readMetaPage", 'edit/*path': "editPage", 'changes/*path': "showPageHistory", 'inlinks/*path': "showIncomingLinks", @@ -93,12 +93,12 @@ readMainPage: function() { this.readPage('main-page'); }, - readCategoryPage: function(path) { - var view = new Views.CategoryView({ - model: new Models.CategoryModel({ path: path }) + readMetaPage: function(name, path) { + var view = new Views.MetaPageView({ + model: new Models.MetaPageModel({ name: name, path: path }) }); this.viewManager.switchView(view); - this.navigate('/category/' + path); + this.navigate('/meta/' + name + '/' + path); }, editPage: function(path) { var view = new Views.PageEditView({
--- a/static/js/wikked/handlebars.js Wed Nov 27 23:28:46 2013 -0800 +++ b/static/js/wikked/handlebars.js Wed Nov 27 23:32:45 2013 -0800 @@ -107,7 +107,7 @@ }); Handlebars.registerHelper('get_cat_url', function(url, options) { url = url.toString(); - return '/#/category/' + url.replace(/^\//, ''); + return '/#/meta/category/' + url.replace(/^\//, ''); }); });
--- a/static/js/wikked/models.js Wed Nov 27 23:28:46 2013 -0800 +++ b/static/js/wikked/models.js Wed Nov 27 23:32:45 2013 -0800 @@ -225,6 +225,9 @@ this.footer.addExtraUrl('Pages Linking Here', function() { return '/#/inlinks/' + model.id; }, 1); this.footer.addExtraUrl('JSON', function() { return '/api/read/' + model.id; }); }, + checkStatePath: function() { + return this.get('path'); + }, _onChange: function() { if (this.getMeta('redirect')) { // Handle redirects. @@ -246,10 +249,13 @@ } }); - var CategoryModel = exports.CategoryModel = MasterPageModel.extend({ + var MetaPageModel = exports.MetaPageModel = MasterPageModel.extend({ action: 'read', url: function() { - return '/api/query?category=' + this.get('path'); + return '/api/read_meta/' + this.get('name') + '/' + this.get('path'); + }, + checkStatePath: function() { + return this.getMeta('url'); } });
--- a/static/js/wikked/views.js Wed Nov 27 23:28:46 2013 -0800 +++ b/static/js/wikked/views.js Wed Nov 27 23:32:45 2013 -0800 @@ -11,7 +11,7 @@ 'js/wikked/models', 'js/wikked/util', 'text!tpl/read-page.html', - 'text!tpl/category.html', + 'text!tpl/meta-page.html', 'text!tpl/edit-page.html', 'text!tpl/history-page.html', 'text!tpl/revision-page.html', @@ -31,7 +31,7 @@ 'text!tpl/special-orphans.html' ], function($, _, Backbone, Handlebars, BootstrapTooltip, Client, Models, Util, - tplReadPage, tplCategory, tplEditPage, tplHistoryPage, tplRevisionPage, tplDiffPage, tplInLinksPage, + tplReadPage, tplMetaPage, tplEditPage, tplHistoryPage, tplRevisionPage, tplDiffPage, tplInLinksPage, tplNav, tplFooter, tplSearchResults, tplLogin, tplErrorNotAuthorized, tplErrorNotFound, tplErrorUnauthorizedEdit, tplStateWarning, tplSpecialNav, tplSpecialPages, tplSpecialChanges, tplSpecialOrphans) { @@ -251,7 +251,7 @@ else jel.attr('href', '/#/read' + jel.attr('data-wiki-url')); }); - // If we've already rendered the content, see if need to display a + // If we've already rendered the content, see if we need to display a // warning about the page's state. if (this.model.get('content')) { if (this._pageState === undefined) { @@ -285,7 +285,10 @@ _checkPageState: function() { this._isCheckingPageState = true; var $view = this; - var stateModel = new Models.PageStateModel({ path: this.model.get('path') }); + var statePath = this.model.checkStatePath(); + if (!statePath) + return; + var stateModel = new Models.PageStateModel({ path: statePath }); stateModel.fetch({ success: function(model, response, options) { $view._pageState = model; @@ -298,8 +301,8 @@ } }); - var CategoryView = exports.CategoryView = MasterPageView.extend({ - defaultTemplateSource: tplCategory + var MetaPageView = exports.MetaPageView = PageReadView.extend({ + defaultTemplateSource: tplMetaPage }); var PageEditView = exports.PageEditView = MasterPageView.extend({
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/static/tpl/meta-page.html Wed Nov 27 23:32:45 2013 -0800 @@ -0,0 +1,10 @@ +<article> + <header> + <h1>{{meta_value}} <span class="decorator">{{meta_query}}</span></h1> + </header> + <section> + {{content}} + </section> + <footer> + </footer> +</article>
--- a/wikked/db.py Wed Nov 27 23:28:46 2013 -0800 +++ b/wikked/db.py Wed Nov 27 23:32:45 2013 -0800 @@ -5,7 +5,7 @@ import logging import datetime from sqlalchemy import ( - and_, + and_, or_, Column, Boolean, Integer, String, Text, DateTime, ForeignKey) from sqlalchemy.orm import relationship, backref, defer from wikked.web import db @@ -220,12 +220,16 @@ db.session.commit() if to_remove or added_db_objs: + # If pages have been added/removed/updated, invalidate everything + # in the wiki that has includes or queries. db_pages = db.session.query(SQLPage).\ options( defer(SQLPage.title), defer(SQLPage.raw_text), defer(SQLPage.formatted_text), defer(SQLPage.ready_text)).\ + join(SQLReadyMeta).\ + filter(or_(SQLReadyMeta.name == 'include', SQLReadyMeta.name == 'query')).\ all() for p in db_pages: p.is_ready = False @@ -279,12 +283,19 @@ def getLinksTo(self, url): q = db.session.query(SQLReadyLink).\ - filter(SQLReadyLink.target_url == url).\ - join(SQLReadyLink.source).\ - all() + filter(SQLReadyLink.target_url == url).\ + join(SQLReadyLink.source).\ + all() for l in q: yield l.source.url + def getUncachedPages(self): + q = db.session.query(SQLPage).\ + filter(SQLPage.is_ready == False).\ + all() + for p in q: + yield p + def _createSchema(self): db.drop_all() db.create_all() @@ -343,8 +354,8 @@ db_obj = db.session.query(SQLPage).filter(SQLPage.id == page._id).one() db_obj.ready_text = page._data.text - db_obj.is_ready = True - + + del db_obj.ready_meta[:] for name, value in page._data.ext_meta.iteritems(): if isinstance(value, bool): value = "" @@ -354,9 +365,12 @@ for v in value: db_obj.ready_meta.append(SQLReadyMeta(name, v)) + del db_obj.ready_links[:] for link_url in page._data.ext_links: db_obj.ready_links.append(SQLReadyLink(link_url)) + db_obj.is_ready = True + db.session.commit()
--- a/wikked/formatter.py Wed Nov 27 23:28:46 2013 -0800 +++ b/wikked/formatter.py Wed Nov 27 23:32:45 2013 -0800 @@ -43,6 +43,9 @@ 'include': self._processInclude, 'query': self._processQuery } + self.endpoints = { + 'url': self._formatUrlLink + } def formatText(self, ctx, text): text = FILE_FORMAT_REGEX.sub("\n", text) @@ -105,27 +108,36 @@ def _processWikiLinks(self, ctx, text): s = self - # [[url:Something/Blah.ext]] + # [[endpoint:Something/Blah.ext]] def repl1(m): - url = m.group(1).strip() - if url.startswith('/'): - return '/files' + url - abs_url = os.path.join('/files', ctx.urldir, url) - abs_url = os.path.normpath(abs_url).replace('\\', '/') - return abs_url - text = re.sub(r'\[\[url\:([^\]]+)\]\]', repl1, text) + endpoint = m.group(1) + value = m.group(2).strip() + if endpoint in self.endpoints: + return self.endpoints[endpoint](ctx, endpoint, value, value) + return self._formatMetaLink(ctx, endpoint, value, value) + text = re.sub(r'\[\[(\w[\w\d]+)\:([^\]]+)\]\]', repl1, text) + + # [[display name|endpoint:Something/Whatever]] + def repl2(m): + display = m.group(1).strip() + endpoint = m.group(2) + value = m.group(3).strip() + if endpoint in self.endpoints: + return self.endpoints[endpoint](ctx, endpoint, value, display) + return self._formatMetaLink(ctx, endpoint, value, display) + text = re.sub(r'\[\[([^\|\]]+)\|\s*(\w[\w\d]+)\:([^\]]+)\]\]', repl2, text) # [[display name|Whatever/PageName]] - def repl2(m): + def repl3(m): return s._formatWikiLink(ctx, m.group(1).strip(), m.group(2).strip()) - text = re.sub(r'\[\[([^\|\]]+)\|([^\]]+)\]\]', repl2, text) + text = re.sub(r'\[\[([^\|\]]+)\|([^\]]+)\]\]', repl3, text) # [[Namespace/PageName]] - def repl3(m): + def repl4(m): a, b = m.group(1, 2) url = b if a is None else (a + b) return s._formatWikiLink(ctx, b, url) - text = re.sub(r'\[\[([^\]]+/)?([^\]]+)\]\]', repl3, text) + text = re.sub(r'\[\[([^\]]+/)?([^\]]+)\]\]', repl4, text) return text @@ -176,6 +188,18 @@ mod_attr = ' data-wiki-mod="%s"' % modifier return '<div class="wiki-query"%s>%s</div>\n' % (mod_attr, processed_args) + def _formatUrlLink(self, ctx, endpoint, value, display): + if value.startswith('/'): + abs_url = '/files' + value + else: + abs_url = os.path.join('/files', ctx.urldir, value) + abs_url = os.path.normpath(abs_url).replace('\\', '/') + return '<a class="wiki-asset" href="%s">%s</a>' % (abs_url, display) + + def _formatMetaLink(self, ctx, endpoint, value, display): + ctx.out_links.append("%s:%s" % (endpoint, value)) + return '<a class="wiki-meta-link" data-wiki-meta="%s" data-wiki-value="%s">%s</a>' % (endpoint, value, display) + def _formatWikiLink(self, ctx, display, url): ctx.out_links.append(url) return '<a class="wiki-link" data-wiki-url="%s">%s</a>' % (url, display)
--- a/wikked/fs.py Wed Nov 27 23:28:46 2013 -0800 +++ b/wikked/fs.py Wed Nov 27 23:32:45 2013 -0800 @@ -3,10 +3,14 @@ import re import string import codecs +import fnmatch import logging from utils import PageNotFoundError, title_to_url, path_to_url +META_ENDPOINT = '_meta' + + class PageInfo(object): def __init__(self, url, path): self.url = url @@ -45,8 +49,6 @@ dirnames[:] = [d for d in dirnames if os.path.join(dirpath, d) not in self.excluded] for filename in filenames: path = os.path.join(dirpath, filename) - if path in self.excluded: - continue page_info = self.getPageInfo(path) if page_info is not None: yield page_info @@ -55,7 +57,7 @@ if not isinstance(path, unicode): path = unicode(path) for e in self.excluded: - if path.startswith(e): + if fnmatch.fnmatch(path, e): return None return self._getPageInfo(path) @@ -83,7 +85,11 @@ return self._getPhysicalPath(url, False) def _getPageInfo(self, path): + meta = None rel_path = os.path.relpath(path, self.root) + if rel_path.startswith(META_ENDPOINT + os.sep): + rel_path = rel_path[len(META_ENDPOINT) + 1:] + meta, rel_path = rel_path.split(os.sep, 1) rel_path_split = os.path.splitext(rel_path) ext = rel_path_split[1].lstrip('.') name = rel_path_split[0] @@ -93,22 +99,50 @@ return None url = path_to_url(unicode(name), strip_ext=True) + if meta: + url = u"%s:%s" % (meta.lower(), url) return PageInfo(url, path) def _getPhysicalPath(self, url, is_file): + endpoint = None + m = re.match(r'(\w[\w\d]+)\:(.*)', url) + if m: + endpoint = str(m.group(1)) + url = str(m.group(2)).strip() + if url[0] != '/': raise ValueError("Page URLs need to be absolute: " + url) if string.find(url, '..') >= 0: raise ValueError("Page URLs can't contain '..': " + url) + # Find the root directory in which we'll be searching for the + # page file. + skip_endpoint = True + if endpoint: + # Find the endpoint that gets transformed to the value + # we see in the URL. + endpoint_root = os.path.join(self.root, META_ENDPOINT) + names = os.listdir(endpoint_root) + for name in names: + name_formatted = title_to_url(name) + if name_formatted == endpoint: + current = os.path.join(endpoint_root, name) + break + else: + raise PageNotFoundError("No such meta endpoint: %s" % endpoint) + skip_endpoint = False + else: + current = self.root + # For each "part" in the given URL, find the first # file-system entry that would get slugified to an # equal string. - current = self.root parts = unicode(url[1:]).lower().split('/') for i, part in enumerate(parts): names = os.listdir(current) for name in names: + if skip_endpoint and i == 0 and name == META_ENDPOINT: + continue name_formatted = title_to_url(name) if is_file and i == len(parts) - 1: # If we're looking for a file and this is the last part, @@ -122,6 +156,6 @@ break else: # Failed to find a part of the URL. - raise PageNotFoundError("No such page: " + url) + raise PageNotFoundError("No such page: %s" % url) return current
--- a/wikked/page.py Wed Nov 27 23:28:46 2013 -0800 +++ b/wikked/page.py Wed Nov 27 23:32:45 2013 -0800 @@ -256,11 +256,15 @@ if path_time >= db_obj.time: self.wiki.logger.debug( "Updating database cache for page '%s'." % self.url) - fs_page = FileSystemPage(self.wiki, self.url) - fs_page._ensureData() - added_ids = self.wiki.db.update([fs_page]) - fs_page._data._db_id = added_ids[0] - return fs_page._data + try: + fs_page = FileSystemPage(self.wiki, self.url) + fs_page._ensureData() + added_ids = self.wiki.db.update([fs_page]) + fs_page._data._db_id = added_ids[0] + return fs_page._data + except Exception as e: + msg = "Error updating database cache from the file-system: %s" % e + raise PageLoadingError(msg, e) data = PageData() data._db_id = db_obj.id
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/wikked/templates/meta_page.html Wed Nov 27 23:32:45 2013 -0800 @@ -0,0 +1,13 @@ +{% if info_text %} +{{info_text|safe}} +{% else %} +<p>No additional information is available for this page. You can write some right now.</p> +{% endif %} + +<h2>Pages in {{name}} "{{value}}"</h2> + +<ul> + {% for p in pages %} + <li><a class="wiki-link" data-wiki-url="{{p.url}}">{{p.title}}</a></li> + {% endfor %} +</ul>
--- a/wikked/utils.py Wed Nov 27 23:28:46 2013 -0800 +++ b/wikked/utils.py Wed Nov 27 23:32:45 2013 -0800 @@ -28,6 +28,7 @@ def get_absolute_url(base_url, url, do_slugify=True): + base_url = re.sub(r'^(\w[\w\d]+)\:', '', base_url) if base_url[0] != '/': raise ValueError("The base URL must be absolute. Got: %s" % base_url)
--- a/wikked/views.py Wed Nov 27 23:28:46 2013 -0800 +++ b/wikked/views.py Wed Nov 27 23:32:45 2013 -0800 @@ -1,3 +1,4 @@ +import re import time import urllib import string @@ -11,7 +12,7 @@ from page import Page, DatabasePage, PageData, PageLoadingError from fs import PageNotFoundError from formatter import PageFormatter, FormattingContext -from utils import title_to_url, path_to_url +from utils import title_to_url, path_to_url, namespace_title_to_url import scm @@ -82,6 +83,11 @@ def make_absolute(url): + m = re.match(r'(\w[\w\d]+)\:(.*)', url) + if m: + endpoint = str(m.group(1)) + path = string.lstrip(str(m.group(2)), '/') + return '%s:/%s' % (endpoint, path) return '/' + string.lstrip(url, '/') @@ -212,6 +218,37 @@ return make_auth_response(result) +@app.route('/api/read_meta/<name>/<value>') +def api_read_meta_page(name, value): + query = {name: [value]} + pages = g.wiki.getPages(meta_query=query) + tpl_data = { + 'name': name, + 'value': value, + 'pages': [get_page_meta(p) for p in pages] + } + + url_value = namespace_title_to_url(value) + info_page = get_page_or_none( + "%s:/%s" % (name, url_value), + force_resolve=('force_resolve' in request.args)) + if info_page: + tpl_data['info_text'] = info_page.text + + text = render_template('meta_page.html', **tpl_data) + result = { + 'meta_query': name, + 'meta_value': value, + 'query': query, + 'meta': {}, + 'text': text + } + if info_page: + result['meta'] = get_page_meta(info_page) + + return make_auth_response(result) + + @app.route('/api/revision/<path:url>') def api_read_page_rev(url): rev = request.args.get('rev')
--- a/wikked/wiki.py Wed Nov 27 23:28:46 2013 -0800 +++ b/wikked/wiki.py Wed Nov 27 23:32:45 2013 -0800 @@ -180,6 +180,8 @@ def getPages(self, subdir=None, meta_query=None): """ Gets all the pages in the wiki, or in the given sub-directory. """ + if meta_query: + self._cachePages() for page in self.db.getPages(subdir, meta_query): yield DatabasePage(self, db_obj=page) @@ -258,10 +260,14 @@ def _cachePages(self, only_urls=None): self.logger.debug("Caching extended page data...") - urls = only_urls or self.getPageUrls() - for url in urls: - page = self.getPage(url) - page._ensureExtendedData() + if only_urls: + for url in only_urls: + page = self.getPage(url) + page._ensureExtendedData() + else: + for db_obj in self.db.getUncachedPages(): + page = DatabasePage(self, db_obj=db_obj) + page._ensureExtendedData() def _loadConfig(self, parameters): # Merge the default settings with any settings provided by