Mercurial > wikked
changeset 35:2b35d719f342
Handle wiki and page permissions for read/write access.
Refactored code to only have one place where page title "slugification" happens.
Made that "slugification" better by replacing diacritics with their ANSI
character equivalent (on both server and client).
author | Ludovic Chabant <ludovic@chabant.com> |
---|---|
date | Sun, 06 Jan 2013 20:22:36 -0800 |
parents | bbc7f7cca5cf |
children | 87b9a8eb8a24 |
files | wikked/auth.py wikked/fs.py wikked/static/js/wikked/client.js wikked/views.py wikked/wiki.py |
diffstat | 5 files changed, 211 insertions(+), 43 deletions(-) [+] |
line wrap: on
line diff
--- a/wikked/auth.py Sun Jan 06 20:20:41 2013 -0800 +++ b/wikked/auth.py Sun Jan 06 20:22:36 2013 -0800 @@ -1,3 +1,4 @@ +import re import logging @@ -28,20 +29,49 @@ if logger is None: logger = logging.getLogger('wikked.auth') self.logger = logger + self._updatePermissions(config) self._updateUserInfos(config) def getUsers(self): - for user in self.users: + for user in self._users: yield self._createUser(user) def getUser(self, username): - for user in self.users: + for user in self._users: if user['username'] == username: return self._createUser(user) return None + def isPageReadable(self, page, username): + return self._isAllowedForMeta(page, 'readers', username) + + def isPageWritable(self, page, username): + return self._isAllowedForMeta(page, 'writers', username) + + def _isAllowedForMeta(self, page, meta_name, username): + if (self._permissions[meta_name] is not None and + username not in self._permissions[meta_name]): + return False + if meta_name in page.all_meta['user']: + allowed = [r.strip() for r in re.split(r'[ ,;]', page.all_meta['user'][meta_name])] + if username is None: + return 'anonymous' in allowed + else: + return '*' in allowed or username in allowed + return True + + def _updatePermissions(self, config): + self._permissions = { + 'readers': None, + 'writers': None + } + if config.has_option('permissions', 'readers'): + self._permissions['readers'] = [p.strip() for p in re.split(r'[ ,;]', config.get('permissions', 'readers'))] + if config.has_option('permissions', 'writers'): + self._permissions['writers'] = [p.strip() for p in re.split(r'[ ,;]', config.get('permissions', 'writers'))] + def _updateUserInfos(self, config): - self.users = [] + self._users = [] if config.has_section('users'): groups = [] if config.has_section('groups'): @@ -50,10 +80,10 @@ for user in config.items('users'): user_info = { 'username': user[0], 'password': user[1], 'groups': [] } for group in groups: - users_in_group = [u.strip() for u in group[1].split(',')] + users_in_group = [u.strip() for u in re.split(r'[ ,;]', group[1])] if user[0] in users_in_group: user_info['groups'].append(group[0]) - self.users.append(user_info) + self._users.append(user_info) def _createUser(self, user_info): user = User(user_info['username'], user_info['password'])
--- a/wikked/fs.py Sun Jan 06 20:20:41 2013 -0800 +++ b/wikked/fs.py Sun Jan 06 20:22:36 2013 -0800 @@ -17,11 +17,15 @@ file-system paths, and for scanning the file-system to list existing pages. """ - def __init__(self, root): - self.root = root + def __init__(self, root, slugify=None): + self.root = unicode(root) + self.slugify = slugify self.excluded = [] self.page_extensions = None + if slugify is None: + self.slugify = lambda x: x + def getPageInfos(self, subdir=None): basepath = self.root if subdir is not None: @@ -38,6 +42,8 @@ yield page_info def getPageInfo(self, path): + if not isinstance(path, unicode): + path = unicode(path) for e in self.excluded: if path.startswith(e): return None @@ -53,7 +59,7 @@ 'url': url, 'path': path, 'name': name_split[0], - 'ext': name_split[1], + 'ext': name_split[1].lstrip('.'), 'content': content } @@ -68,17 +74,21 @@ return self._getPhysicalPath(url, False) def _getPageInfo(self, path): - rel_path = os.path.relpath(path, self.root) - rel_path_split = os.path.splitext(rel_path) - if self.page_extensions is not None and rel_path_split[1] not in self.page_extensions: - return None - url = re.sub(r'[^A-Za-z0-9_\.\-\(\)/]+', '-', rel_path_split[0].lower()) - return { - 'url': url, - 'path': path, - 'name': rel_path_split[0], - 'ext': rel_path_split[1] - } + rel_path = os.path.relpath(path, self.root) + rel_path_split = os.path.splitext(rel_path) + ext = rel_path_split[1].lstrip('.') + name = rel_path_split[0] + if len(ext) == 0: + return None + if self.page_extensions is not None and ext not in self.page_extensions: + return None + url = self.slugify(name) + return { + 'url': url, + 'path': path, + 'name': name, + 'ext': ext + } def getPhysicalPagePath(self, url): return self._getPhysicalPath(url, True) @@ -91,11 +101,11 @@ # file-system entry that would get slugified to an # equal string. current = self.root - parts = url.lower().split('/') + parts = unicode(url).lower().split('/') for i, part in enumerate(parts): names = os.listdir(current) for name in names: - name_formatted = re.sub(r'[^A-Za-z0-9_\.\-\(\)]+', '-', name.lower()) + name_formatted = self.slugify(name) if is_file and i == len(parts) - 1: # If we're looking for a file and this is the last part, # look for something similar but with an extension.
--- a/wikked/static/js/wikked/client.js Sun Jan 06 20:20:41 2013 -0800 +++ b/wikked/static/js/wikked/client.js Sun Jan 06 20:22:36 2013 -0800 @@ -3,9 +3,106 @@ */ define(function() { + var defaultDiacriticsRemovalMap = [ + {'base':'A', 'letters':/[\u0041\u24B6\uFF21\u00C0\u00C1\u00C2\u1EA6\u1EA4\u1EAA\u1EA8\u00C3\u0100\u0102\u1EB0\u1EAE\u1EB4\u1EB2\u0226\u01E0\u00C4\u01DE\u1EA2\u00C5\u01FA\u01CD\u0200\u0202\u1EA0\u1EAC\u1EB6\u1E00\u0104\u023A\u2C6F]/g}, + {'base':'AA','letters':/[\uA732]/g}, + {'base':'AE','letters':/[\u00C6\u01FC\u01E2]/g}, + {'base':'AO','letters':/[\uA734]/g}, + {'base':'AU','letters':/[\uA736]/g}, + {'base':'AV','letters':/[\uA738\uA73A]/g}, + {'base':'AY','letters':/[\uA73C]/g}, + {'base':'B', 'letters':/[\u0042\u24B7\uFF22\u1E02\u1E04\u1E06\u0243\u0182\u0181]/g}, + {'base':'C', 'letters':/[\u0043\u24B8\uFF23\u0106\u0108\u010A\u010C\u00C7\u1E08\u0187\u023B\uA73E]/g}, + {'base':'D', 'letters':/[\u0044\u24B9\uFF24\u1E0A\u010E\u1E0C\u1E10\u1E12\u1E0E\u0110\u018B\u018A\u0189\uA779]/g}, + {'base':'DZ','letters':/[\u01F1\u01C4]/g}, + {'base':'Dz','letters':/[\u01F2\u01C5]/g}, + {'base':'E', 'letters':/[\u0045\u24BA\uFF25\u00C8\u00C9\u00CA\u1EC0\u1EBE\u1EC4\u1EC2\u1EBC\u0112\u1E14\u1E16\u0114\u0116\u00CB\u1EBA\u011A\u0204\u0206\u1EB8\u1EC6\u0228\u1E1C\u0118\u1E18\u1E1A\u0190\u018E]/g}, + {'base':'F', 'letters':/[\u0046\u24BB\uFF26\u1E1E\u0191\uA77B]/g}, + {'base':'G', 'letters':/[\u0047\u24BC\uFF27\u01F4\u011C\u1E20\u011E\u0120\u01E6\u0122\u01E4\u0193\uA7A0\uA77D\uA77E]/g}, + {'base':'H', 'letters':/[\u0048\u24BD\uFF28\u0124\u1E22\u1E26\u021E\u1E24\u1E28\u1E2A\u0126\u2C67\u2C75\uA78D]/g}, + {'base':'I', 'letters':/[\u0049\u24BE\uFF29\u00CC\u00CD\u00CE\u0128\u012A\u012C\u0130\u00CF\u1E2E\u1EC8\u01CF\u0208\u020A\u1ECA\u012E\u1E2C\u0197]/g}, + {'base':'J', 'letters':/[\u004A\u24BF\uFF2A\u0134\u0248]/g}, + {'base':'K', 'letters':/[\u004B\u24C0\uFF2B\u1E30\u01E8\u1E32\u0136\u1E34\u0198\u2C69\uA740\uA742\uA744\uA7A2]/g}, + {'base':'L', 'letters':/[\u004C\u24C1\uFF2C\u013F\u0139\u013D\u1E36\u1E38\u013B\u1E3C\u1E3A\u0141\u023D\u2C62\u2C60\uA748\uA746\uA780]/g}, + {'base':'LJ','letters':/[\u01C7]/g}, + {'base':'Lj','letters':/[\u01C8]/g}, + {'base':'M', 'letters':/[\u004D\u24C2\uFF2D\u1E3E\u1E40\u1E42\u2C6E\u019C]/g}, + {'base':'N', 'letters':/[\u004E\u24C3\uFF2E\u01F8\u0143\u00D1\u1E44\u0147\u1E46\u0145\u1E4A\u1E48\u0220\u019D\uA790\uA7A4]/g}, + {'base':'NJ','letters':/[\u01CA]/g}, + {'base':'Nj','letters':/[\u01CB]/g}, + {'base':'O', 'letters':/[\u004F\u24C4\uFF2F\u00D2\u00D3\u00D4\u1ED2\u1ED0\u1ED6\u1ED4\u00D5\u1E4C\u022C\u1E4E\u014C\u1E50\u1E52\u014E\u022E\u0230\u00D6\u022A\u1ECE\u0150\u01D1\u020C\u020E\u01A0\u1EDC\u1EDA\u1EE0\u1EDE\u1EE2\u1ECC\u1ED8\u01EA\u01EC\u00D8\u01FE\u0186\u019F\uA74A\uA74C]/g}, + {'base':'OI','letters':/[\u01A2]/g}, + {'base':'OO','letters':/[\uA74E]/g}, + {'base':'OU','letters':/[\u0222]/g}, + {'base':'P', 'letters':/[\u0050\u24C5\uFF30\u1E54\u1E56\u01A4\u2C63\uA750\uA752\uA754]/g}, + {'base':'Q', 'letters':/[\u0051\u24C6\uFF31\uA756\uA758\u024A]/g}, + {'base':'R', 'letters':/[\u0052\u24C7\uFF32\u0154\u1E58\u0158\u0210\u0212\u1E5A\u1E5C\u0156\u1E5E\u024C\u2C64\uA75A\uA7A6\uA782]/g}, + {'base':'S', 'letters':/[\u0053\u24C8\uFF33\u1E9E\u015A\u1E64\u015C\u1E60\u0160\u1E66\u1E62\u1E68\u0218\u015E\u2C7E\uA7A8\uA784]/g}, + {'base':'T', 'letters':/[\u0054\u24C9\uFF34\u1E6A\u0164\u1E6C\u021A\u0162\u1E70\u1E6E\u0166\u01AC\u01AE\u023E\uA786]/g}, + {'base':'TZ','letters':/[\uA728]/g}, + {'base':'U', 'letters':/[\u0055\u24CA\uFF35\u00D9\u00DA\u00DB\u0168\u1E78\u016A\u1E7A\u016C\u00DC\u01DB\u01D7\u01D5\u01D9\u1EE6\u016E\u0170\u01D3\u0214\u0216\u01AF\u1EEA\u1EE8\u1EEE\u1EEC\u1EF0\u1EE4\u1E72\u0172\u1E76\u1E74\u0244]/g}, + {'base':'V', 'letters':/[\u0056\u24CB\uFF36\u1E7C\u1E7E\u01B2\uA75E\u0245]/g}, + {'base':'VY','letters':/[\uA760]/g}, + {'base':'W', 'letters':/[\u0057\u24CC\uFF37\u1E80\u1E82\u0174\u1E86\u1E84\u1E88\u2C72]/g}, + {'base':'X', 'letters':/[\u0058\u24CD\uFF38\u1E8A\u1E8C]/g}, + {'base':'Y', 'letters':/[\u0059\u24CE\uFF39\u1EF2\u00DD\u0176\u1EF8\u0232\u1E8E\u0178\u1EF6\u1EF4\u01B3\u024E\u1EFE]/g}, + {'base':'Z', 'letters':/[\u005A\u24CF\uFF3A\u0179\u1E90\u017B\u017D\u1E92\u1E94\u01B5\u0224\u2C7F\u2C6B\uA762]/g}, + {'base':'a', 'letters':/[\u0061\u24D0\uFF41\u1E9A\u00E0\u00E1\u00E2\u1EA7\u1EA5\u1EAB\u1EA9\u00E3\u0101\u0103\u1EB1\u1EAF\u1EB5\u1EB3\u0227\u01E1\u00E4\u01DF\u1EA3\u00E5\u01FB\u01CE\u0201\u0203\u1EA1\u1EAD\u1EB7\u1E01\u0105\u2C65\u0250]/g}, + {'base':'aa','letters':/[\uA733]/g}, + {'base':'ae','letters':/[\u00E6\u01FD\u01E3]/g}, + {'base':'ao','letters':/[\uA735]/g}, + {'base':'au','letters':/[\uA737]/g}, + {'base':'av','letters':/[\uA739\uA73B]/g}, + {'base':'ay','letters':/[\uA73D]/g}, + {'base':'b', 'letters':/[\u0062\u24D1\uFF42\u1E03\u1E05\u1E07\u0180\u0183\u0253]/g}, + {'base':'c', 'letters':/[\u0063\u24D2\uFF43\u0107\u0109\u010B\u010D\u00E7\u1E09\u0188\u023C\uA73F\u2184]/g}, + {'base':'d', 'letters':/[\u0064\u24D3\uFF44\u1E0B\u010F\u1E0D\u1E11\u1E13\u1E0F\u0111\u018C\u0256\u0257\uA77A]/g}, + {'base':'dz','letters':/[\u01F3\u01C6]/g}, + {'base':'e', 'letters':/[\u0065\u24D4\uFF45\u00E8\u00E9\u00EA\u1EC1\u1EBF\u1EC5\u1EC3\u1EBD\u0113\u1E15\u1E17\u0115\u0117\u00EB\u1EBB\u011B\u0205\u0207\u1EB9\u1EC7\u0229\u1E1D\u0119\u1E19\u1E1B\u0247\u025B\u01DD]/g}, + {'base':'f', 'letters':/[\u0066\u24D5\uFF46\u1E1F\u0192\uA77C]/g}, + {'base':'g', 'letters':/[\u0067\u24D6\uFF47\u01F5\u011D\u1E21\u011F\u0121\u01E7\u0123\u01E5\u0260\uA7A1\u1D79\uA77F]/g}, + {'base':'h', 'letters':/[\u0068\u24D7\uFF48\u0125\u1E23\u1E27\u021F\u1E25\u1E29\u1E2B\u1E96\u0127\u2C68\u2C76\u0265]/g}, + {'base':'hv','letters':/[\u0195]/g}, + {'base':'i', 'letters':/[\u0069\u24D8\uFF49\u00EC\u00ED\u00EE\u0129\u012B\u012D\u00EF\u1E2F\u1EC9\u01D0\u0209\u020B\u1ECB\u012F\u1E2D\u0268\u0131]/g}, + {'base':'j', 'letters':/[\u006A\u24D9\uFF4A\u0135\u01F0\u0249]/g}, + {'base':'k', 'letters':/[\u006B\u24DA\uFF4B\u1E31\u01E9\u1E33\u0137\u1E35\u0199\u2C6A\uA741\uA743\uA745\uA7A3]/g}, + {'base':'l', 'letters':/[\u006C\u24DB\uFF4C\u0140\u013A\u013E\u1E37\u1E39\u013C\u1E3D\u1E3B\u017F\u0142\u019A\u026B\u2C61\uA749\uA781\uA747]/g}, + {'base':'lj','letters':/[\u01C9]/g}, + {'base':'m', 'letters':/[\u006D\u24DC\uFF4D\u1E3F\u1E41\u1E43\u0271\u026F]/g}, + {'base':'n', 'letters':/[\u006E\u24DD\uFF4E\u01F9\u0144\u00F1\u1E45\u0148\u1E47\u0146\u1E4B\u1E49\u019E\u0272\u0149\uA791\uA7A5]/g}, + {'base':'nj','letters':/[\u01CC]/g}, + {'base':'o', 'letters':/[\u006F\u24DE\uFF4F\u00F2\u00F3\u00F4\u1ED3\u1ED1\u1ED7\u1ED5\u00F5\u1E4D\u022D\u1E4F\u014D\u1E51\u1E53\u014F\u022F\u0231\u00F6\u022B\u1ECF\u0151\u01D2\u020D\u020F\u01A1\u1EDD\u1EDB\u1EE1\u1EDF\u1EE3\u1ECD\u1ED9\u01EB\u01ED\u00F8\u01FF\u0254\uA74B\uA74D\u0275]/g}, + {'base':'oi','letters':/[\u01A3]/g}, + {'base':'ou','letters':/[\u0223]/g}, + {'base':'oo','letters':/[\uA74F]/g}, + {'base':'p','letters':/[\u0070\u24DF\uFF50\u1E55\u1E57\u01A5\u1D7D\uA751\uA753\uA755]/g}, + {'base':'q','letters':/[\u0071\u24E0\uFF51\u024B\uA757\uA759]/g}, + {'base':'r','letters':/[\u0072\u24E1\uFF52\u0155\u1E59\u0159\u0211\u0213\u1E5B\u1E5D\u0157\u1E5F\u024D\u027D\uA75B\uA7A7\uA783]/g}, + {'base':'s','letters':/[\u0073\u24E2\uFF53\u00DF\u015B\u1E65\u015D\u1E61\u0161\u1E67\u1E63\u1E69\u0219\u015F\u023F\uA7A9\uA785\u1E9B]/g}, + {'base':'t','letters':/[\u0074\u24E3\uFF54\u1E6B\u1E97\u0165\u1E6D\u021B\u0163\u1E71\u1E6F\u0167\u01AD\u0288\u2C66\uA787]/g}, + {'base':'tz','letters':/[\uA729]/g}, + {'base':'u','letters':/[\u0075\u24E4\uFF55\u00F9\u00FA\u00FB\u0169\u1E79\u016B\u1E7B\u016D\u00FC\u01DC\u01D8\u01D6\u01DA\u1EE7\u016F\u0171\u01D4\u0215\u0217\u01B0\u1EEB\u1EE9\u1EEF\u1EED\u1EF1\u1EE5\u1E73\u0173\u1E77\u1E75\u0289]/g}, + {'base':'v','letters':/[\u0076\u24E5\uFF56\u1E7D\u1E7F\u028B\uA75F\u028C]/g}, + {'base':'vy','letters':/[\uA761]/g}, + {'base':'w','letters':/[\u0077\u24E6\uFF57\u1E81\u1E83\u0175\u1E87\u1E85\u1E98\u1E89\u2C73]/g}, + {'base':'x','letters':/[\u0078\u24E7\uFF58\u1E8B\u1E8D]/g}, + {'base':'y','letters':/[\u0079\u24E8\uFF59\u1EF3\u00FD\u0177\u1EF9\u0233\u1E8F\u00FF\u1EF7\u1E99\u1EF5\u01B4\u024F\u1EFF]/g}, + {'base':'z','letters':/[\u007A\u24E9\uFF5A\u017A\u1E91\u017C\u017E\u1E93\u1E95\u01B6\u0225\u0240\u2C6C\uA763]/g} + ]; + function removeDiacritics(str, removalMap) { + if (!removalMap) { + removalMap = defaultDiacriticsRemovalMap; + } + for (var i=0; i<removalMap.length; i++) { + str = str.replace(removalMap[i].letters, removalMap[i].base); + } + return str; + } + var PageFormatter = { formatLink: function(link) { - return link.toLowerCase().replace(/[^a-z0-9_\.\-\(\)\/]+/g, '-'); + ansi_link = removeDiacritics(link); + return ansi_link.toLowerCase().replace(/[^a-z0-9_\.\-\(\)\/]+/g, '-'); }, formatText: function(text) { var $f = this;
--- a/wikked/views.py Sun Jan 06 20:20:41 2013 -0800 +++ b/wikked/views.py Sun Jan 06 20:22:36 2013 -0800 @@ -15,6 +15,11 @@ import scm +DONT_CHECK = 0 +CHECK_FOR_READ = 1 +CHECK_FOR_WRITE = 2 + + def get_page_or_none(url): try: page = wiki.getPage(url) @@ -23,13 +28,25 @@ except PageNotFoundError: return None -def get_page_or_404(url): +def get_page_or_404(url, check_perms=DONT_CHECK): page = get_page_or_none(url) if page is not None: + if check_perms == CHECK_FOR_READ and not is_page_readable(page): + abort(401) + elif check_perms == CHECK_FOR_WRITE and not is_page_writable(page): + abort(401) return page abort(404) +def is_page_readable(page, user=current_user): + return page.wiki.auth.isPageReadable(page, user.get_id()) + + +def is_page_writable(page, user=current_user): + return page.wiki.auth.isPageWritable(page, user.get_id()) + + def get_history_data(history): hist_data = [] for i, rev in enumerate(reversed(history)): @@ -46,11 +63,15 @@ f_info = wiki.fs.getPageInfo(f['path']) if f_info is None: continue + page = wiki.getPage(f_info['url']) + if not is_page_readable(page): + continue rev_data['pages'].append({ 'url': f_info['url'], 'action': scm.ACTION_NAMES[f['action']] }) - hist_data.append(rev_data) + if len(rev_data['pages']) > 0: + hist_data.append(rev_data) return hist_data @@ -85,21 +106,22 @@ @app.route('/api/list/<path:url>') def api_list_pages(url): - page_metas = [page.all_meta for page in wiki.getPages(url)] + pages = filter(is_page_readable, wiki.getPages(url)) + page_metas = [page.all_meta for page in pages] result = { 'path': url, 'pages': list(page_metas) } return make_auth_response(result) @app.route('/api/read/<path:url>') def api_read_page(url): - page = get_page_or_404(url) + page = get_page_or_404(url, CHECK_FOR_READ) result = { 'path': url, 'meta': page.all_meta, 'text': page.formatted_text } return make_auth_response(result) @app.route('/api/raw/<path:url>') def api_read_page_raw(url): - page = get_page_or_404(url) + page = get_page_or_404(url, CHECK_FOR_READ) result = { 'path': url, 'meta': page.all_meta, 'text': page.raw_text } return make_auth_response(result) @@ -109,7 +131,7 @@ rev = request.args.get('rev') if rev is None: abort(400) - page = get_page_or_404(url) + page = get_page_or_404(url, CHECK_FOR_READ) page_rev = page.getRevision(rev) meta = dict(page.all_meta, rev=rev) result = { 'path': url, 'meta': meta, 'text': page_rev } @@ -122,7 +144,7 @@ rev2 = request.args.get('rev2') if rev1 is None: abort(400) - page = get_page_or_404(url) + page = get_page_or_404(url, CHECK_FOR_READ) diff = page.getDiff(rev1, rev2) if 'raw' not in request.args: lexer = get_lexer_by_name('diff') @@ -138,7 +160,7 @@ @app.route('/api/state/<path:url>') def api_get_state(url): - page = get_page_or_404(url) + page = get_page_or_404(url, CHECK_FOR_READ) state = page.getState() return make_auth_response({ 'path': url, @@ -149,7 +171,7 @@ @app.route('/api/outlinks/<path:url>') def api_get_outgoing_links(url): - page = get_page_or_404(url) + page = get_page_or_404(url, CHECK_FOR_READ) links = [] for link in page.out_links: other = get_page_or_none(link) @@ -167,11 +189,11 @@ @app.route('/api/inlinks/<path:url>') def api_get_incoming_links(url): - page = get_page_or_404(url) + page = get_page_or_404(url, CHECK_FOR_READ) links = [] for link in page.in_links: other = get_page_or_none(link) - if other is not None: + if other is not None and is_page_readable(other): links.append({ 'url': link, 'meta': other.all_meta @@ -186,7 +208,7 @@ @app.route('/api/edit/<path:url>', methods=['GET', 'PUT', 'POST']) def api_edit_page(url): if request.method == 'GET': - page = get_page_or_404(url) + page = get_page_or_404(url, CHECK_FOR_READ) result = { 'path': url, 'meta': page.all_meta, @@ -198,6 +220,8 @@ } return make_auth_response(result) + get_page_or_404(url, CHECK_FOR_WRITE) + if not 'text' in request.form: abort(400) text = request.form['text'] @@ -231,7 +255,7 @@ @app.route('/api/orphans') def api_special_orphans(): orphans = [] - for page in wiki.getPages(): + for page in filter(is_page_readable, wiki.getPages()): if len(page.in_links) == 0: orphans.append({ 'path': page.url, 'meta': page.all_meta }) result = { 'orphans': orphans } @@ -248,7 +272,7 @@ @app.route('/api/history/<path:url>') def api_page_history(url): - page = get_page_or_404(url) + page = get_page_or_404(url, CHECK_FOR_READ) history = page.getHistory() hist_data = get_history_data(history) result = { 'url': url, 'meta': page.all_meta, 'history': hist_data } @@ -258,7 +282,10 @@ @app.route('/api/search') def api_search(): query = request.args.get('q') - hits = wiki.index.search(query) + def is_hit_readable(hit): + page = get_page_or_none(hit['url']) + return page is None or is_page_readable(page) + hits = filter(is_hit_readable, wiki.index.search(query)) result = { 'query': query, 'hits': hits } return make_auth_response(result)
--- a/wikked/wiki.py Sun Jan 06 20:20:41 2013 -0800 +++ b/wikked/wiki.py Sun Jan 06 20:22:36 2013 -0800 @@ -4,6 +4,7 @@ import time import logging import itertools +import unicodedata from ConfigParser import SafeConfigParser import markdown from fs import FileSystem @@ -57,7 +58,10 @@ def _processWikiMeta(self, ctx, text): def repl1(m): - ctx.meta[str(m.group(1))] = str(m.group(3)) if m.group(3) is not None else True + if m.group(3) is not None and len(str(m.group(3))) > 0: + ctx.meta[str(m.group(1))] = str(m.group(3)) + else: + ctx.meta[str(m.group(1))] = True return '' text = re.sub(r'^\[\[((__|\+)?[a-zA-Z][a-zA-Z0-9_\-]+):\s*(.*)\]\]\s*$', repl1, text, flags=re.MULTILINE) return text @@ -215,7 +219,10 @@ @staticmethod def title_to_url(title): - return re.sub(r'[^A-Za-z0-9_\.\-\(\)/]+', '-', title.lower()) + # Remove diacritics (accents, etc.) and replace them with ASCII equivelent. + ansi_title = ''.join((c for c in unicodedata.normalize('NFD', title) if unicodedata.category(c) != 'Mn')) + # Now replace spaces and punctuation with a hyphen. + return re.sub(r'[^A-Za-z0-9_\.\-\(\)/]+', '-', ansi_title.lower()) class Wiki(object): @@ -233,7 +240,7 @@ if os.path.isfile(config_path): self.config.read(config_path) - self.fs = FileSystem(root) + self.fs = FileSystem(root, slugify=Page.title_to_url) self.scm = MercurialSourceControl(root, self.logger) self.cache = None #Cache(os.path.join(root, '.cache')) self.index = WhooshWikiIndex(os.path.join(root, '.index'), logger=self.logger) @@ -253,9 +260,6 @@ } self.fs.page_extensions = list(set(itertools.chain(*self.formatters.itervalues()))) - if self.index is not None: - self.index.update(self.getPages()) - @property def root(self): return self.fs.root