changeset 131:9d22cf4d2412

Massive change that should have been several smaller ones, but whatever: - URLs are not slugified anymore, just quoted. - `getPage` now raises an error if the page doesn't exist. - Fixed the Mercurial SCM provider to be able to commit new files. - Fixed various issues with meta-pages and new files. - Better exception classes. - Configurable default file extension, main page name, and templates folder. - New CLI command to cache the wiki. - A few other small fixes.
author Ludovic Chabant <ludovic@chabant.com>
date Sun, 01 Dec 2013 21:50:29 -0800
parents 72e5f588f989
children e5d4b61e7a4c
files manage.py static/css/wikked.less static/js/wikked/app.js static/js/wikked/models.js static/js/wikked/views.js wikked/auth.py wikked/cache.py wikked/db.py wikked/db/__init__.py wikked/db/base.py wikked/db/sql.py wikked/formatter.py wikked/fs.py wikked/indexer.py wikked/indexer/__init__.py wikked/indexer/base.py wikked/indexer/native.py wikked/page.py wikked/resolver.py wikked/resources/defaults.cfg wikked/scm.py wikked/scm/__init__.py wikked/scm/base.py wikked/scm/git.py wikked/scm/mercurial.py wikked/templates/meta_page.html wikked/utils.py wikked/views.py wikked/web.py wikked/wiki.py
diffstat 27 files changed, 1473 insertions(+), 1278 deletions(-) [+]
line wrap: on
line diff
--- a/manage.py	Wed Nov 27 23:32:45 2013 -0800
+++ b/manage.py	Sun Dec 01 21:50:29 2013 -0800
@@ -1,3 +1,7 @@
+
+# Configure logging.
+import logging
+logging.basicConfig(level=logging.DEBUG)
 
 # Configure a simpler log format.
 from wikked import settings
@@ -49,6 +53,13 @@
 
 
 @manager.command
+def cache():
+    """ Makes sure the extended cache is valid for the whole wiki.
+    """
+    wiki._cachePages()
+
+
+@manager.command
 def list(fs=False):
     """ Lists page names in the wiki.
     """
--- a/static/css/wikked.less	Wed Nov 27 23:32:45 2013 -0800
+++ b/static/css/wikked.less	Sun Dec 01 21:50:29 2013 -0800
@@ -36,7 +36,8 @@
     &:hover { color: @colorBlue; text-decoration: underline; }
     &:active { color: @colorBlue; }
 }
-a.wiki-link.missing {
+a.wiki-link.missing,
+a.wiki-meta-link.missing {
     color: @colorOrange;
     &:hover { color: @colorOrange; text-decoration: underline; }
 }
--- a/static/js/wikked/app.js	Wed Nov 27 23:32:45 2013 -0800
+++ b/static/js/wikked/app.js	Sun Dec 01 21:50:29 2013 -0800
@@ -66,6 +66,7 @@
             '':                     "readMainPage",
             'meta/:name/*path':     "readMetaPage",
             'edit/*path':           "editPage",
+            'edit_meta/:name/*path':"editMetaPage",
             'changes/*path':        "showPageHistory",
             'inlinks/*path':        "showIncomingLinks",
             'revision/*path/:rev':  "readPageRevision",
@@ -91,7 +92,7 @@
             this.navigate('/read/' + path);
         },
         readMainPage: function() {
-            this.readPage('main-page');
+            this.readPage('');
         },
         readMetaPage: function(name, path) {
             var view = new Views.MetaPageView({
@@ -107,6 +108,13 @@
             this.viewManager.switchView(view);
             this.navigate('/edit/' + path);
         },
+        editMetaPage: function(name, path) {
+            var view = new Views.MetaPageEditView({
+                model: new Models.MetaPageEditModel({ name: name, path: path })
+            });
+            this.viewManager.switchView(view);
+            this.navigate('/edit_meta/' + name + '/' + path);
+        },
         showPageHistory: function(path) {
             var view = new Views.PageHistoryView({
                 model: new Models.PageHistoryModel({ path: path })
--- a/static/js/wikked/models.js	Wed Nov 27 23:32:45 2013 -0800
+++ b/static/js/wikked/models.js	Sun Dec 01 21:50:29 2013 -0800
@@ -265,15 +265,13 @@
     });
 
     var PageEditModel = exports.PageEditModel = MasterPageModel.extend({
+        action: 'edit',
         urlRoot: '/api/edit/',
-        action: 'edit',
         doEdit: function(form) {
             var $model = this;
-            var path = this.get('path');
-            this.navigate('/read/' + path, { trigger: true });
-            $.post('/api/edit/' + path, $(form).serialize())
+            $.post(this.url(), $(form).serialize())
                 .success(function(data) {
-                    $model.navigate('/read/' + path, { trigger: true });
+                    $model._onEditSuccess();
                 })
                 .error(function() {
                     alert('Error saving page...');
@@ -281,7 +279,38 @@
         },
         _onChangePath: function(path) {
             PageEditModel.__super__._onChangePath.apply(this, arguments);
-            this.set('url_read', '/#/read/' + path);
+            this.set('url_read', this._getReadPath(path));
+        },
+        _onEditSuccess: function() {
+            this.navigate('/read/' + this.get('path'), { trigger: true });
+        },
+        _getReadPath: function(path) {
+            return '/#/read/' + path;
+        }
+    });
+
+    var MetaPageEditModel = exports.MetaPageEditModel = PageEditModel.extend({
+        action: 'edit',
+        url: function() {
+            return '/api/edit_meta/' + this.get('name') + '/' + this.get('path');
+        },
+        initialize: function() {
+            MetaPageEditModel.__super__.initialize.apply(this, arguments);
+            this.on('change:name', function(model, name) {
+                model._onChangeName(name);
+            });
+        },
+        _onChangeName: function(name) {
+            this.set('url_read', name + '/' + this.get('path'));
+        },
+        _onEditSuccess: function() {
+            this.navigate(
+                '/meta/' + this.get('name') + '/' + this.get('path'),
+                { trigger: true }
+            );
+        },
+        _getReadPath: function(path) {
+            return '/#/meta/' + this.get('name') + '/' + path;
         }
     });
 
--- a/static/js/wikked/views.js	Wed Nov 27 23:32:45 2013 -0800
+++ b/static/js/wikked/views.js	Sun Dec 01 21:50:29 2013 -0800
@@ -244,12 +244,22 @@
 
             // Replace all wiki links with proper hyperlinks using the JS app's
             // URL scheme.
-            this.$('a.wiki-link[data-wiki-url]').each(function(i) {
+            this.$('a.wiki-link').each(function(i) {
                 var jel = $(this);
+                var wiki_url = jel.attr('data-wiki-url').replace(/^\//, '');
                 if (jel.hasClass('missing') || jel.attr('data-action') == 'edit')
-                    jel.attr('href', '/#/edit' + jel.attr('data-wiki-url'));
+                    jel.attr('href', '/#/edit/' + wiki_url);
                 else
-                    jel.attr('href', '/#/read' + jel.attr('data-wiki-url'));
+                    jel.attr('href', '/#/read/' + wiki_url);
+            });
+            this.$('a.wiki-meta-link').each(function(i) {
+                var jel = $(this);
+                var meta_name = jel.attr('data-wiki-meta');
+                var meta_value = jel.attr('data-wiki-value');
+                if (jel.hasClass('missing') || jel.attr('data-action') == 'edit')
+                    jel.attr('href', '/#/edit_meta/' + meta_name + '/' + meta_value);
+                else
+                    jel.attr('href', '/#/meta/' + meta_name + '/' + meta_value);
             });
             // If we've already rendered the content, see if we need to display a
             // warning about the page's state.
@@ -443,6 +453,9 @@
         }
     });
 
+    var MetaPageEditView = exports.MetaPageEditView = PageEditView.extend({
+    });
+
     var PageHistoryView = exports.PageHistoryView = MasterPageView.extend({
         defaultTemplateSource: tplHistoryPage,
         events: {
--- a/wikked/auth.py	Wed Nov 27 23:32:45 2013 -0800
+++ b/wikked/auth.py	Sun Dec 01 21:50:29 2013 -0800
@@ -2,6 +2,9 @@
 import logging
 
 
+logger = logging.getLogger(__name__)
+
+
 class User(object):
     """ A user with an account on the wiki.
     """
@@ -29,10 +32,7 @@
 class UserManager(object):
     """ A class that keeps track of users and their permissions.
     """
-    def __init__(self, config, logger=None):
-        if logger is None:
-            logger = logging.getLogger('wikked.auth')
-        self.logger = logger
+    def __init__(self, config):
         self._updatePermissions(config)
         self._updateUserInfos(config)
 
--- a/wikked/cache.py	Wed Nov 27 23:32:45 2013 -0800
+++ b/wikked/cache.py	Sun Dec 01 21:50:29 2013 -0800
@@ -1,5 +1,6 @@
 import os
 import os.path
+import logging
 
 try:
     import simplejson as json
@@ -7,6 +8,9 @@
     import json
 
 
+logger = logging.getLogger(__name__)
+
+
 class Cache(object):
     def __init__(self, root):
         self.cache_dir = root
--- a/wikked/db.py	Wed Nov 27 23:32:45 2013 -0800
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,381 +0,0 @@
-import os
-import os.path
-import types
-import string
-import logging
-import datetime
-from sqlalchemy import (
-        and_, or_,
-        Column, Boolean, Integer, String, Text, DateTime, ForeignKey)
-from sqlalchemy.orm import relationship, backref, defer
-from wikked.web import db
-
-
-class Database(object):
-    """ The base class for a database cache.
-    """
-    def __init__(self, logger=None):
-        if logger is None:
-            logger = logging.getLogger('wikked.db')
-        self.logger = logger
-
-    def initDb(self):
-        raise NotImplementedError()
-
-    def open(self):
-        raise NotImplementedError()
-
-    def close(self):
-        raise NotImplementedError()
-
-    def reset(self, pages):
-        raise NotImplementedError()
-
-    def update(self, pages, force=False):
-        raise NotImplementedError()
-
-    def getPageUrls(self, subdir=None):
-        raise NotImplementedError()
-
-    def getPages(self, subdir=None, meta_query=None):
-        raise NotImplementedError()
-
-    def getPage(self, url=None, path=None):
-        raise NotImplementedError()
-
-    def pageExists(self, url=None, path=None):
-        raise NotImplementedError()
-
-    def getLinksTo(self, url):
-        raise NotImplementedError()
-
-
-Base = db.Model
-
-class SQLPage(Base):
-    __tablename__ = 'pages'
-
-    id = Column(Integer, primary_key=True)
-    time = Column(DateTime)
-    url = Column(Text)
-    path = Column(Text)
-    title = Column(Text)
-    raw_text = Column(Text)
-    formatted_text = Column(Text)
-    
-    meta = relationship('SQLMeta', order_by='SQLMeta.id', 
-            backref=backref('page'),
-            cascade='all, delete, delete-orphan')
-    links = relationship('SQLLink', order_by='SQLLink.id', 
-            backref=backref('source'),
-            cascade='all, delete, delete-orphan')
-
-    ready_text = Column(Text)
-    is_ready = Column(Boolean)
-
-    ready_meta = relationship('SQLReadyMeta', order_by='SQLReadyMeta.id',
-            backref=backref('page'),
-            cascade='all, delete, delete-orphan')
-    ready_links = relationship('SQLReadyLink', order_by='SQLReadyLink.id', 
-            backref=backref('source'),
-            cascade='all, delete, delete-orphan')
-
-
-class SQLMeta(Base):
-    __tablename__ = 'meta'
-
-    id = Column(Integer, primary_key=True)
-    page_id = Column(Integer, ForeignKey('pages.id'))
-    name = Column(String(128))
-    value = Column(Text)
-
-    def __init__(self, name=None, value=None):
-        self.name = name
-        self.value = value
-
-
-class SQLReadyMeta(Base):
-    __tablename__ = 'ready_meta'
-
-    id = Column(Integer, primary_key=True)
-    page_id = Column(Integer, ForeignKey('pages.id'))
-    name = Column(String(128))
-    value = Column(Text)
-
-    def __init__(self, name=None, value=None):
-        self.name = name
-        self.value = value
-
-
-class SQLLink(Base):
-    __tablename__ = 'links'
-
-    id = Column(Integer, primary_key=True)
-    source_id = Column(Integer, ForeignKey('pages.id'))
-    target_url = Column(Text)
-
-    def __init__(self, target_url=None):
-        self.target_url = target_url
-
-
-class SQLReadyLink(Base):
-    __tablename__ = 'ready_links'
- 
-    id = Column(Integer, primary_key=True)
-    source_id = Column(Integer, ForeignKey('pages.id'))
-    target_url = Column(Text)
-
-    def __init__(self, target_url=None):
-        self.target_url = target_url
-
-
-class SQLInfo(Base):
-    __tablename__ = 'info'
-
-    id = Column(Integer, primary_key=True)
-    name = Column(String(64))
-    str_value = Column(String(256))
-    int_value = Column(Integer)
-    time_value = Column(DateTime)
-
-
-class SQLDatabase(Database):
-    """ A database cache based on SQL.
-    """
-    schema_version = 3
-
-    def __init__(self, db_path, logger=None):
-        Database.__init__(self, logger)
-        self.db_path = db_path
-
-    def initDb(self):
-        create_schema = False
-        if self.db_path != 'sqlite:///:memory:':
-            if not os.path.exists(os.path.dirname(self.db_path)):
-                # No database on disk... create one.
-                self.logger.debug("Creating SQL database at: %s" % self.db_path)
-                create_schema = True
-            else:
-                # The existing schema is outdated, re-create it.
-                schema_version = self._getSchemaVersion()
-                if schema_version < self.schema_version:
-                    self.logger.debug(
-                            "SQL database is outdated (got version %s), will re-create.",
-                            schema_version)
-                    create_schema = True
-                else:
-                    self.logger.debug(
-                            "SQL database has up-to-date schema.")
-        else:
-            create_schema = True
-        if create_schema:
-            self._createSchema()
-
-    def open(self):
-        self.logger.debug("Opening connection")
-
-    def close(self):
-        self.logger.debug("Closing connection")
-
-    def reset(self, pages):
-        self.logger.debug("Re-creating SQL database.")
-        self._createSchema()
-        for page in pages:
-            self._addPage(page)
-        db.session.commit()
-
-    def update(self, pages, force=False):
-        to_update = set()
-        already_added = set()
-        to_remove = []
-        pages = list(pages)
-
-        self.logger.debug("Updating SQL database...")
-        page_urls = [p.url for p in pages]
-        db_pages = db.session.query(SQLPage).\
-                all()
-        for p in db_pages:
-            if not os.path.isfile(p.path):
-                # File was deleted.
-                to_remove.append(p)
-            else:
-                already_added.add(p.path)
-                path_time = datetime.datetime.fromtimestamp(
-                    os.path.getmtime(p.path))
-                if path_time > p.time or (force and p.url in page_urls):
-                    # File has changed since last index.
-                    to_remove.append(p)
-                    to_update.add(p.path)
-        for p in to_remove:
-            self._removePage(p)
-
-        db.session.commit()
-
-        added_db_objs = []
-        for p in pages:
-            if (p.path in to_update or
-                p.path not in already_added):
-                added_db_objs.append(self._addPage(p))
-
-        db.session.commit()
-
-        if to_remove or added_db_objs:
-            # If pages have been added/removed/updated, invalidate everything
-            # in the wiki that has includes or queries.
-            db_pages = db.session.query(SQLPage).\
-                    options(
-                            defer(SQLPage.title),
-                            defer(SQLPage.raw_text),
-                            defer(SQLPage.formatted_text),
-                            defer(SQLPage.ready_text)).\
-                    join(SQLReadyMeta).\
-                    filter(or_(SQLReadyMeta.name == 'include', SQLReadyMeta.name == 'query')).\
-                    all()
-            for p in db_pages:
-                p.is_ready = False
-            
-            db.session.commit()
-
-        self.logger.debug("...done updating SQL database.")
-        return [o.id for o in added_db_objs]
-
-    def getPageUrls(self, subdir=None):
-        q = db.session.query(SQLPage.url)
-        if subdir:
-            subdir = string.rstrip(subdir, '/') + '/%'
-            q = q.filter(SQLPage.url.like(subdir))
-        urls = []
-        for p in q.all():
-            urls.append(p.url)
-        return urls
-
-    def getPages(self, subdir=None, meta_query=None):
-        q = db.session.query(SQLPage)
-        if meta_query:
-            q = q.join(SQLReadyMeta)
-            for name, values in meta_query.iteritems():
-                for v in values:
-                    q = q.filter(and_(SQLReadyMeta.name == name, SQLReadyMeta.value == v))
-        if subdir:
-            subdir = string.rstrip(subdir, '/') + '/%'
-            q = q.filter(SQLPage.url.like(subdir))
-        pages = []
-        for p in q.all():
-            pages.append(p)
-        return pages
-
-    def getPage(self, url=None, path=None):
-        if not url and not path:
-            raise ValueError("Either URL or path need to be specified.")
-        if url and path:
-            raise ValueError("Can't specify both URL and path.")
-        if url:
-            q = db.session.query(SQLPage).filter_by(url=url)
-            page = q.first()
-            return page
-        if path:
-            q = db.session.query(SQLPage).filter_by(path=path)
-            page = q.first()
-            return page
-
-    def pageExists(self, url=None, path=None):
-        return self.getPage(url, path) is not None
-
-    def getLinksTo(self, url):
-        q = db.session.query(SQLReadyLink).\
-                filter(SQLReadyLink.target_url == url).\
-                join(SQLReadyLink.source).\
-                all()
-        for l in q:
-            yield l.source.url
-
-    def getUncachedPages(self):
-        q = db.session.query(SQLPage).\
-                filter(SQLPage.is_ready == False).\
-                all()
-        for p in q:
-            yield p
-
-    def _createSchema(self):
-        db.drop_all()
-        db.create_all()
-
-        ver = SQLInfo()
-        ver.name = 'schema_version'
-        ver.int_value = self.schema_version
-        db.session.add(ver)
-        db.session.commit()
-
-    def _getSchemaVersion(self):
-        try:
-            q = db.session.query(SQLInfo).\
-                    filter(SQLInfo.name == 'schema_version').\
-                    first()
-            if q is None:
-                return 0
-        except:
-            return -1
-        return q.int_value
-
-    def _addPage(self, page):
-        self.logger.debug("Adding page '%s' to SQL database." % page.url)
-
-        po = SQLPage()
-        po.time = datetime.datetime.now()
-        po.url = page.url
-        po.path = page.path
-        po.title = page.title
-        po.raw_text = page.raw_text
-        po.formatted_text = page.getFormattedText()
-        po.ready_text = None
-        po.is_ready = False
-
-        for name, value in page.getLocalMeta().iteritems():
-            if isinstance(value, bool):
-                value = ""
-            if isinstance(value, types.StringTypes):
-                po.meta.append(SQLMeta(name, value))
-            else:
-                for v in value:
-                    po.meta.append(SQLMeta(name, v))
-
-        for link_url in page.getLocalLinks():
-            po.links.append(SQLLink(link_url))
-
-        db.session.add(po)
-
-        return po
-
-    def _cacheExtendedData(self, page):
-        self.logger.debug("Caching extended data for page '%s' [%d]." % (page.url, page._id))
-
-        if not hasattr(page, '_id') or not page._id:
-            raise Exception("Given page '%s' has no `_id` attribute set." % page.url)
-        db_obj = db.session.query(SQLPage).filter(SQLPage.id == page._id).one()
-
-        db_obj.ready_text = page._data.text
-
-        del db_obj.ready_meta[:]
-        for name, value in page._data.ext_meta.iteritems():
-            if isinstance(value, bool):
-                value = ""
-            if isinstance(value, types.StringTypes):
-                db_obj.ready_meta.append(SQLReadyMeta(name, value))
-            else:
-                for v in value:
-                    db_obj.ready_meta.append(SQLReadyMeta(name, v))
-
-        del db_obj.ready_links[:]
-        for link_url in page._data.ext_links:
-            db_obj.ready_links.append(SQLReadyLink(link_url))
-
-        db_obj.is_ready = True
-
-        db.session.commit()
-
-
-    def _removePage(self, page):
-        self.logger.debug("Removing page '%s' [%d] from SQL database." %
-            (page.url, page.id))
-        db.session.delete(page)
-
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/wikked/db/base.py	Sun Dec 01 21:50:29 2013 -0800
@@ -0,0 +1,38 @@
+
+
+class Database(object):
+    """ The base class for a database cache.
+    """
+    def __init__(self):
+        pass
+
+    def initDb(self, wiki):
+        raise NotImplementedError()
+
+    def open(self):
+        raise NotImplementedError()
+
+    def close(self):
+        raise NotImplementedError()
+
+    def reset(self, pages):
+        raise NotImplementedError()
+
+    def update(self, pages, force=False):
+        raise NotImplementedError()
+
+    def getPageUrls(self, subdir=None):
+        raise NotImplementedError()
+
+    def getPages(self, subdir=None, meta_query=None):
+        raise NotImplementedError()
+
+    def getPage(self, url=None, path=None):
+        raise NotImplementedError()
+
+    def pageExists(self, url=None, path=None):
+        raise NotImplementedError()
+
+    def getLinksTo(self, url):
+        raise NotImplementedError()
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/wikked/db/sql.py	Sun Dec 01 21:50:29 2013 -0800
@@ -0,0 +1,468 @@
+import os
+import os.path
+import types
+import string
+import logging
+import datetime
+from sqlalchemy import (
+        create_engine,
+        and_, or_,
+        Column, Boolean, Integer, String, Text, DateTime, ForeignKey)
+from sqlalchemy.orm import (
+        scoped_session, sessionmaker,
+        relationship, backref, defer)
+from sqlalchemy.ext.declarative import declarative_base
+from base import Database
+from wikked.page import Page, FileSystemPage, PageData, PageLoadingError
+from wikked.formatter import SINGLE_METAS
+from wikked.utils import PageNotFoundError
+
+
+logger = logging.getLogger(__name__)
+
+
+Base = declarative_base()
+
+
+class SQLPage(Base):
+    __tablename__ = 'pages'
+
+    id = Column(Integer, primary_key=True)
+    time = Column(DateTime)
+    url = Column(Text)
+    path = Column(Text)
+    title = Column(Text)
+    raw_text = Column(Text)
+    formatted_text = Column(Text)
+    
+    meta = relationship('SQLMeta', order_by='SQLMeta.id', 
+            backref=backref('page'),
+            cascade='all, delete, delete-orphan')
+    links = relationship('SQLLink', order_by='SQLLink.id', 
+            backref=backref('source'),
+            cascade='all, delete, delete-orphan')
+
+    ready_text = Column(Text)
+    is_ready = Column(Boolean)
+
+    ready_meta = relationship('SQLReadyMeta', order_by='SQLReadyMeta.id',
+            backref=backref('page'),
+            cascade='all, delete, delete-orphan')
+    ready_links = relationship('SQLReadyLink', order_by='SQLReadyLink.id', 
+            backref=backref('source'),
+            cascade='all, delete, delete-orphan')
+
+
+class SQLMeta(Base):
+    __tablename__ = 'meta'
+
+    id = Column(Integer, primary_key=True)
+    page_id = Column(Integer, ForeignKey('pages.id'))
+    name = Column(String(128))
+    value = Column(Text)
+
+    def __init__(self, name=None, value=None):
+        self.name = name
+        self.value = value
+
+
+class SQLReadyMeta(Base):
+    __tablename__ = 'ready_meta'
+
+    id = Column(Integer, primary_key=True)
+    page_id = Column(Integer, ForeignKey('pages.id'))
+    name = Column(String(128))
+    value = Column(Text)
+
+    def __init__(self, name=None, value=None):
+        self.name = name
+        self.value = value
+
+
+class SQLLink(Base):
+    __tablename__ = 'links'
+
+    id = Column(Integer, primary_key=True)
+    source_id = Column(Integer, ForeignKey('pages.id'))
+    target_url = Column(Text)
+
+    def __init__(self, target_url=None):
+        self.target_url = target_url
+
+
+class SQLReadyLink(Base):
+    __tablename__ = 'ready_links'
+ 
+    id = Column(Integer, primary_key=True)
+    source_id = Column(Integer, ForeignKey('pages.id'))
+    target_url = Column(Text)
+
+    def __init__(self, target_url=None):
+        self.target_url = target_url
+
+
+class SQLInfo(Base):
+    __tablename__ = 'info'
+
+    id = Column(Integer, primary_key=True)
+    name = Column(String(64))
+    str_value = Column(String(256))
+    int_value = Column(Integer)
+    time_value = Column(DateTime)
+
+
+class SQLDatabase(Database):
+    """ A database cache based on SQL.
+    """
+    schema_version = 3
+
+    def __init__(self, db_path):
+        Database.__init__(self)
+        self.db_path = db_path
+        self.engine = None
+
+    def initDb(self, wiki):
+        self.wiki = wiki
+
+        engine_url = 'sqlite:///' + self.db_path
+        self.engine = create_engine(engine_url, convert_unicode=True)
+        self.session = scoped_session(sessionmaker(
+                autocommit=False,
+                autoflush=False,
+                bind=self.engine))
+
+        Base.query = self.session.query_property()
+
+        create_schema = False
+        if self.db_path != 'sqlite:///:memory:':
+            if not os.path.exists(os.path.dirname(self.db_path)):
+                # No database on disk... create one.
+                logger.debug("Creating SQL database at: %s" % self.db_path)
+                create_schema = True
+            else:
+                # The existing schema is outdated, re-create it.
+                schema_version = self._getSchemaVersion()
+                if schema_version < self.schema_version:
+                    logger.debug(
+                            "SQL database is outdated (got version %s), will re-create.",
+                            schema_version)
+                    create_schema = True
+                else:
+                    logger.debug(
+                            "SQL database has up-to-date schema.")
+        else:
+            create_schema = True
+        if create_schema:
+            self._createSchema()
+
+    def open(self):
+        logger.debug("Opening connection")
+
+    def close(self):
+        logger.debug("Closing connection")
+
+    def reset(self, pages):
+        logger.debug("Re-creating SQL database.")
+        self._createSchema()
+        for page in pages:
+            self._addPage(page)
+        self.session.commit()
+
+    def update(self, pages, force=False):
+        to_update = set()
+        already_added = set()
+        to_remove = []
+        pages = list(pages)
+
+        logger.debug("Updating SQL database...")
+        page_urls = [p.url for p in pages]
+        db_pages = self.session.query(SQLPage).\
+                all()
+        for p in db_pages:
+            if not os.path.isfile(p.path):
+                # File was deleted.
+                to_remove.append(p)
+            else:
+                already_added.add(p.path)
+                path_time = datetime.datetime.fromtimestamp(
+                    os.path.getmtime(p.path))
+                if path_time > p.time or (force and p.url in page_urls):
+                    # File has changed since last index.
+                    to_remove.append(p)
+                    to_update.add(p.path)
+        for p in to_remove:
+            self._removePage(p)
+
+        self.session.commit()
+
+        added_db_objs = []
+        for p in pages:
+            if (p.path in to_update or
+                p.path not in already_added):
+                added_db_objs.append(self._addPage(p))
+
+        self.session.commit()
+
+        if to_remove or added_db_objs:
+            # If pages have been added/removed/updated, invalidate everything
+            # in the wiki that has includes or queries.
+            db_pages = self.session.query(SQLPage).\
+                    options(
+                            defer(SQLPage.title),
+                            defer(SQLPage.raw_text),
+                            defer(SQLPage.formatted_text),
+                            defer(SQLPage.ready_text)).\
+                    join(SQLReadyMeta).\
+                    filter(or_(SQLReadyMeta.name == 'include', SQLReadyMeta.name == 'query')).\
+                    all()
+            for p in db_pages:
+                p.is_ready = False
+            
+            self.session.commit()
+
+        logger.debug("...done updating SQL database.")
+        return [o.id for o in added_db_objs]
+
+    def getPageUrls(self, subdir=None):
+        q = self.session.query(SQLPage.url)
+        if subdir:
+            subdir = string.rstrip(subdir, '/') + '/%'
+            q = q.filter(SQLPage.url.like(subdir))
+        urls = []
+        for p in q.all():
+            urls.append(p.url)
+        return urls
+
+    def getPages(self, subdir=None, meta_query=None):
+        q = self.session.query(SQLPage)
+        if meta_query:
+            q = q.join(SQLReadyMeta)
+            for name, values in meta_query.iteritems():
+                for v in values:
+                    q = q.filter(and_(SQLReadyMeta.name == name, SQLReadyMeta.value == v))
+        if subdir:
+            subdir = string.rstrip(subdir, '/') + '/%'
+            q = q.filter(SQLPage.url.like(subdir))
+        for p in q.all():
+            yield SQLDatabasePage(self.wiki, db_obj=p)
+
+    def getPage(self, url=None, path=None, raise_if_none=True):
+        if not url and not path:
+            raise ValueError("Either URL or path need to be specified.")
+        if url and path:
+            raise ValueError("Can't specify both URL and path.")
+        if url:
+            q = self.session.query(SQLPage).filter_by(url=url)
+            page = q.first()
+            if page is None:
+                if raise_if_none:
+                    raise PageNotFoundError(url)
+                return None
+            return SQLDatabasePage(self.wiki, db_obj=page)
+        if path:
+            q = self.session.query(SQLPage).filter_by(path=path)
+            page = q.first()
+            if page is None:
+                if raise_if_none:
+                    raise PageNotFoundError(path)
+                return None
+            return SQLDatabasePage(self.wiki, db_obj=page)
+        raise NotImplementedError()
+
+    def pageExists(self, url=None, path=None):
+        # TODO: replace with an `EXIST` query.
+        return self.getPage(url, path, raise_if_none=False) is not None
+
+    def getLinksTo(self, url):
+        q = self.session.query(SQLReadyLink).\
+                filter(SQLReadyLink.target_url == url).\
+                join(SQLReadyLink.source).\
+                all()
+        for l in q:
+            yield l.source.url
+
+    def getUncachedPages(self):
+        q = self.session.query(SQLPage).\
+                filter(SQLPage.is_ready == False).\
+                all()
+        for p in q:
+            yield SQLDatabasePage(self.wiki, db_obj=p)
+
+    def _createSchema(self):
+        Base.metadata.drop_all(self.engine)
+        Base.metadata.create_all(self.engine)
+
+        ver = SQLInfo()
+        ver.name = 'schema_version'
+        ver.int_value = self.schema_version
+        self.session.add(ver)
+        self.session.commit()
+
+    def _getSchemaVersion(self):
+        try:
+            q = self.session.query(SQLInfo).\
+                    filter(SQLInfo.name == 'schema_version').\
+                    first()
+            if q is None:
+                return 0
+        except:
+            return -1
+        return q.int_value
+
+    def _addPage(self, page):
+        logger.debug("Adding page '%s' to SQL database." % page.url)
+
+        po = SQLPage()
+        po.time = datetime.datetime.now()
+        po.url = page.url
+        po.path = page.path
+        po.title = page.title
+        po.raw_text = page.raw_text
+        po.formatted_text = page.getFormattedText()
+        po.ready_text = None
+        po.is_ready = False
+
+        for name, value in page.getLocalMeta().iteritems():
+            if isinstance(value, bool):
+                value = ""
+            if isinstance(value, types.StringTypes):
+                po.meta.append(SQLMeta(name, value))
+            else:
+                for v in value:
+                    po.meta.append(SQLMeta(name, v))
+
+        for link_url in page.getLocalLinks():
+            po.links.append(SQLLink(link_url))
+
+        self.session.add(po)
+
+        return po
+
+    def _cacheExtendedData(self, page):
+        logger.debug("Caching extended data for page '%s' [%d]." % (page.url, page._id))
+
+        if not hasattr(page, '_id') or not page._id:
+            raise Exception("Given page '%s' has no `_id` attribute set." % page.url)
+        db_obj = self.session.query(SQLPage).filter(SQLPage.id == page._id).one()
+
+        db_obj.ready_text = page._data.text
+
+        del db_obj.ready_meta[:]
+        for name, value in page._data.ext_meta.iteritems():
+            if isinstance(value, bool):
+                value = ""
+            if isinstance(value, types.StringTypes):
+                db_obj.ready_meta.append(SQLReadyMeta(name, value))
+            else:
+                for v in value:
+                    db_obj.ready_meta.append(SQLReadyMeta(name, v))
+
+        del db_obj.ready_links[:]
+        for link_url in page._data.ext_links:
+            db_obj.ready_links.append(SQLReadyLink(link_url))
+
+        db_obj.is_ready = True
+
+        self.session.commit()
+
+
+    def _removePage(self, page):
+        logger.debug("Removing page '%s' [%d] from SQL database." %
+            (page.url, page.id))
+        self.session.delete(page)
+
+
+class SQLDatabasePage(Page):
+    """ A page that can load its properties from a database.
+    """
+    def __init__(self, wiki, url=None, db_obj=None):
+        if url and db_obj:
+            raise Exception("You can't specify both an url and a database object.")
+        if not url and not db_obj:
+            raise Exception("You need to specify either a url or a database object.")
+
+        super(SQLDatabasePage, self).__init__(wiki, url or db_obj.url)
+        self.auto_update = wiki.config.get('wiki', 'auto_update')
+        self._db_obj = db_obj
+
+    @property
+    def path(self):
+        if self._db_obj:
+            return self._db_obj.path
+        return super(SQLDatabasePage, self).path
+
+    @property
+    def _id(self):
+        if self._db_obj:
+            return self._db_obj.id
+        self._ensureData()
+        return self._data._db_id
+
+    def _loadData(self):
+        try:
+            db_obj = self._db_obj or self.wiki.db.getPage(self.url)
+        except PageNotFoundError:
+            raise PageNotFoundError(self.url, "Please run `update` or `reset`.")
+        data = self._loadFromDbObject(db_obj)
+        self._db_obj = None
+        return data
+
+    def _onExtendedDataLoaded(self):
+        self.wiki.db._cacheExtendedData(self)
+
+    def _loadFromDbObject(self, db_obj, bypass_auto_update=False):
+        if not bypass_auto_update and self.auto_update:
+            path_time = datetime.datetime.fromtimestamp(
+                os.path.getmtime(db_obj.path))
+            if path_time >= db_obj.time:
+                logger.debug(
+                    "Updating database cache for page '%s'." % self.url)
+                try:
+                    fs_page = FileSystemPage(self.wiki, self.url)
+                    fs_page._ensureData()
+                    added_ids = self.wiki.db.update([fs_page])
+                    fs_page._data._db_id = added_ids[0]
+                    return fs_page._data
+                except Exception as e:
+                    msg = "Error updating database cache from the file-system: %s" % e
+                    raise PageLoadingError(msg, e)
+
+        data = PageData()
+        data._db_id = db_obj.id
+        data.path = db_obj.path
+        split = os.path.splitext(data.path)
+        data.filename = split[0]
+        data.extension = split[1].lstrip('.')
+        data.title = db_obj.title
+        data.raw_text = db_obj.raw_text
+        data.formatted_text = db_obj.formatted_text
+
+        data.local_meta = {}
+        for m in db_obj.meta:
+            value = data.local_meta.get(m.name)
+            if m.name in SINGLE_METAS:
+                data.local_meta[m.name] = m.value
+            else:
+                if value is None:
+                    data.local_meta[m.name] = [m.value]
+                else:
+                    data.local_meta[m.name].append(m.value)
+
+        data.local_links = [l.target_url for l in db_obj.links]
+
+        if db_obj.is_ready and not self._force_resolve:
+            # If we have extended cache data from the database, we might as
+            # well load it now too.
+            data.text = db_obj.ready_text
+            for m in db_obj.ready_meta:
+                value = data.ext_meta.get(m.name)
+                if value is None:
+                    data.ext_meta[m.name] = [m.value]
+                else:
+                    data.ext_meta[m.name].append(m.value)
+            data.ext_links = [l.target_url for l in db_obj.ready_links]
+            # Flag this data as completely loaded.
+            data.has_extended_data = True
+
+        return data
+
--- a/wikked/formatter.py	Wed Nov 27 23:32:45 2013 -0800
+++ b/wikked/formatter.py	Sun Dec 01 21:50:29 2013 -0800
@@ -1,6 +1,7 @@
 import os
 import os.path
 import re
+import logging
 import jinja2
 from StringIO import StringIO
 from utils import get_meta_name_and_modifiers, html_escape
@@ -11,6 +12,9 @@
 FILE_FORMAT_REGEX = re.compile(r'\r\n?', re.MULTILINE)
 
 
+logger = logging.getLogger(__name__)
+
+
 class BaseContext(object):
     """ Base context for formatting pages. """
     def __init__(self, url):
@@ -197,7 +201,8 @@
         return '<a class="wiki-asset" href="%s">%s</a>' % (abs_url, display)
 
     def _formatMetaLink(self, ctx, endpoint, value, display):
-        ctx.out_links.append("%s:%s" % (endpoint, value))
+        meta_url = '%s:%s' % (endpoint, value)
+        ctx.out_links.append(meta_url)
         return '<a class="wiki-meta-link" data-wiki-meta="%s" data-wiki-value="%s">%s</a>' % (endpoint, value, display)
 
     def _formatWikiLink(self, ctx, display, url):
--- a/wikked/fs.py	Wed Nov 27 23:32:45 2013 -0800
+++ b/wikked/fs.py	Sun Dec 01 21:50:29 2013 -0800
@@ -5,12 +5,16 @@
 import codecs
 import fnmatch
 import logging
-from utils import PageNotFoundError, title_to_url, path_to_url
+import itertools
+from utils import PageNotFoundError, NamespaceNotFoundError
 
 
 META_ENDPOINT = '_meta'
 
 
+logger = logging.getLogger(__name__)
+
+
 class PageInfo(object):
     def __init__(self, url, path):
         self.url = url
@@ -30,15 +34,21 @@
         file-system paths, and for scanning the file-system
         to list existing pages.
     """
-    def __init__(self, root, logger=None):
+    def __init__(self, root):
         self.root = unicode(root)
 
-        if logger is None:
-            logger = logging.getLogger('wikked.fs')
-        self.logger = logger
-
         self.excluded = []
         self.page_extensions = None
+        self.default_extension = '.txt'
+
+    def initFs(self, wiki):
+        self.page_extensions = list(set(
+            itertools.chain(*wiki.formatters.itervalues())))
+
+        self.excluded += wiki.parameters.getSpecialFilenames()
+        self.excluded += wiki.scm.getSpecialFilenames()
+
+        self.default_extension = wiki.config.get('wiki', 'default_extension')
 
     def getPageInfos(self, subdir=None):
         basepath = self.root
@@ -66,7 +76,8 @@
         return PageInfo(url, path)
 
     def setPage(self, url, content):
-        path = self.getPhysicalPagePath(url)
+        path = self.getPhysicalPagePath(url, make_new=True)
+        logger.debug("Saving page '%s' to: %s" % (url, path))
         with codecs.open(path, 'w', encoding='utf-8') as f:
             f.write(content)
         return PageInfo(url, path)
@@ -78,11 +89,11 @@
         except PageNotFoundError:
             return False
 
-    def getPhysicalPagePath(self, url):
-        return self._getPhysicalPath(url, True)
+    def getPhysicalPagePath(self, url, make_new=False):
+        return self._getPhysicalPath(url, is_file=True, make_new=make_new)
 
-    def getPhysicalNamespacePath(self, url):
-        return self._getPhysicalPath(url, False)
+    def getPhysicalNamespacePath(self, url, make_new=False):
+        return self._getPhysicalPath(url, is_file=False, make_new=make_new)
 
     def _getPageInfo(self, path):
         meta = None
@@ -98,12 +109,12 @@
         if self.page_extensions is not None and ext not in self.page_extensions:
             return None
 
-        url = path_to_url(unicode(name), strip_ext=True)
+        url = '/' + name
         if meta:
-            url = u"%s:%s" % (meta.lower(), url)
+            url = u"%s:/%s" % (meta.lower(), name)
         return PageInfo(url, path)
 
-    def _getPhysicalPath(self, url, is_file):
+    def _getPhysicalPath(self, url, is_file=True, make_new=False):
         endpoint = None
         m = re.match(r'(\w[\w\d]+)\:(.*)', url)
         if m:
@@ -117,45 +128,37 @@
 
         # Find the root directory in which we'll be searching for the
         # page file.
-        skip_endpoint = True
+        root = self.root
         if endpoint:
-            # Find the endpoint that gets transformed to the value
-            # we see in the URL.
-            endpoint_root = os.path.join(self.root, META_ENDPOINT)
-            names = os.listdir(endpoint_root)
-            for name in names:
-                name_formatted = title_to_url(name)
-                if name_formatted == endpoint:
-                    current = os.path.join(endpoint_root, name)
-                    break
-            else:
-                raise PageNotFoundError("No such meta endpoint: %s" % endpoint)
-            skip_endpoint = False
-        else:
-            current = self.root
+            root = os.path.join(self.root, META_ENDPOINT, endpoint)
+
+        # Make the URL into a relative file-system path.
+        url_path = url[1:].replace('/', os.sep)
+
+        # If we want a non-existing file's path, just build that.
+        if make_new:
+            return os.path.join(root, url_path + '.' + self.default_extension)
 
-        # For each "part" in the given URL, find the first
-        # file-system entry that would get slugified to an
-        # equal string.
-        parts = unicode(url[1:]).lower().split('/')
-        for i, part in enumerate(parts):
-            names = os.listdir(current)
-            for name in names:
-                if skip_endpoint and i == 0 and name == META_ENDPOINT:
-                    continue
-                name_formatted = title_to_url(name)
-                if is_file and i == len(parts) - 1:
-                    # If we're looking for a file and this is the last part,
-                    # look for something similar but with an extension.
-                    if re.match(r"%s\.[a-z]+" % re.escape(part), name_formatted):
-                        current = os.path.join(current, name)
-                        break
-                else:
-                    if name_formatted == part:
-                        current = os.path.join(current, name)
-                        break
-            else:
-                # Failed to find a part of the URL.
-                raise PageNotFoundError("No such page: %s" % url)
-        return current
+        # Find the right file-system entry for this URL.
+        url_path = os.path.join(root, url_path)
+        if is_file:
+            dirname, basename = os.path.split(url_path)
+            if not os.path.isdir(dirname):
+                self._throwNotFoundError(url, root, is_file)
+            filenames = os.listdir(dirname)
+            for filename in filenames:
+                name, ext = os.path.splitext(filename)
+                if name == basename:
+                    return os.path.join(dirname, filename)
+            self._throwNotFoundError(url, root, is_file)
+        else:
+            if os.path.isdir(url_path):
+                return url_path
+            self._throwNotFoundError(url, root, is_file)
 
+    def _throwNotFoundError(self, url, searched, is_file):
+        if is_file:
+            raise PageNotFoundError("No such page '%s' in: %s" % (url, searched))
+        else:
+            raise NamespaceNotFoundError("No such namespace '%s' in: %s" % (url, searched))
+
--- a/wikked/indexer.py	Wed Nov 27 23:32:45 2013 -0800
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,122 +0,0 @@
-import os
-import os.path
-import codecs
-import logging
-from whoosh.index import create_in, open_dir
-from whoosh.fields import Schema, ID, TEXT, STORED
-from whoosh.qparser import QueryParser
-
-
-class WikiIndex(object):
-    def __init__(self, logger=None):
-        self.logger = logger
-        if logger is None:
-            self.logger = logging.getLogger('wikked.index')
-
-    def initIndex(self):
-        raise NotImplementedError()
-
-    def reset(self, pages):
-        raise NotImplementedError()
-
-    def update(self, pages):
-        raise NotImplementedError()
-
-    def search(self, query):
-        raise NotImplementedError()
-
-
-class WhooshWikiIndex(WikiIndex):
-    def __init__(self, store_dir, logger=None):
-        WikiIndex.__init__(self, logger)
-        self.store_dir = store_dir
-
-    def initIndex(self):
-        if not os.path.isdir(self.store_dir):
-            self.logger.debug("Creating new index in: " + self.store_dir)
-            os.makedirs(self.store_dir)
-            self.ix = create_in(self.store_dir, self._getSchema())
-        else:
-            self.ix = open_dir(self.store_dir)
-
-    def reset(self, pages):
-        self.logger.debug("Re-creating new index in: " + self.store_dir)
-        self.ix = create_in(self.store_dir, schema=self._getSchema())
-        writer = self.ix.writer()
-        for page in pages:
-            self._indexPage(writer, page)
-        writer.commit()
-
-    def update(self, pages):
-        self.logger.debug("Updating index...")
-        to_reindex = set()
-        already_indexed = set()
-
-        with self.ix.searcher() as searcher:
-            writer = self.ix.writer()
-
-            for fields in searcher.all_stored_fields():
-                indexed_url = fields['url']
-                indexed_path = fields['path']
-                indexed_time = fields['time']
-
-                if not os.path.isfile(indexed_path):
-                    # File was deleted.
-                    self._unindexPage(writer, indexed_url)
-                else:
-                    already_indexed.add(indexed_path)
-                    if os.path.getmtime(indexed_path) > indexed_time:
-                        # File has changed since last index.
-                        self._unindexPage(writer, indexed_url)
-                        to_reindex.add(indexed_path)
-
-            for page in pages:
-                if page.path in to_reindex or page.path not in already_indexed:
-                    self._indexPage(writer, page)
-
-            writer.commit()
-        self.logger.debug("...done updating index.")
-
-    def search(self, query):
-        with self.ix.searcher() as searcher:
-            title_qp = QueryParser("title", self.ix.schema).parse(query)
-            content_qp = QueryParser("content", self.ix.schema).parse(query)
-            comp_query = title_qp | content_qp
-            results = searcher.search(comp_query)
-
-            page_infos = []
-            for hit in results:
-                page_info = {
-                        'title': hit['title'],
-                        'url': hit['url']
-                        }
-                page_info['title_highlights'] = hit.highlights('title')
-                with codecs.open(hit['path'], 'r', encoding='utf-8') as f:
-                    content = f.read()
-                page_info['content_highlights'] = hit.highlights('content', text=content)
-                page_infos.append(page_info)
-            return page_infos
-
-    def _getSchema(self):
-        schema = Schema(
-                url=ID(stored=True),
-                title=TEXT(stored=True),
-                content=TEXT,
-                path=STORED,
-                time=STORED
-                )
-        return schema
-
-    def _indexPage(self, writer, page):
-        self.logger.debug("Indexing '%s'." % page.url)
-        writer.add_document(
-            url=unicode(page.url),
-            title=unicode(page.title),
-            content=unicode(page.raw_text),
-            path=page.path,
-            time=os.path.getmtime(page.path)
-            )
-
-    def _unindexPage(self, writer, url):
-        self.logger.debug("Removing '%s' from index." % url)
-        writer.delete_by_term('url', url)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/wikked/indexer/base.py	Sun Dec 01 21:50:29 2013 -0800
@@ -0,0 +1,18 @@
+
+
+class WikiIndex(object):
+    def __init__(self):
+        pass
+
+    def initIndex(self, wiki):
+        raise NotImplementedError()
+
+    def reset(self, pages):
+        raise NotImplementedError()
+
+    def update(self, pages):
+        raise NotImplementedError()
+
+    def search(self, query):
+        raise NotImplementedError()
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/wikked/indexer/native.py	Sun Dec 01 21:50:29 2013 -0800
@@ -0,0 +1,108 @@
+import os
+import os.path
+import codecs
+import logging
+from base import WikiIndex
+from whoosh.index import create_in, open_dir
+from whoosh.fields import Schema, ID, TEXT, STORED
+from whoosh.qparser import QueryParser
+
+
+logger = logging.getLogger(__name__)
+
+
+class WhooshWikiIndex(WikiIndex):
+    def __init__(self, store_dir):
+        WikiIndex.__init__(self)
+        self.store_dir = store_dir
+
+    def initIndex(self, wiki):
+        if not os.path.isdir(self.store_dir):
+            logger.debug("Creating new index in: " + self.store_dir)
+            os.makedirs(self.store_dir)
+            self.ix = create_in(self.store_dir, self._getSchema())
+        else:
+            self.ix = open_dir(self.store_dir)
+
+    def reset(self, pages):
+        logger.debug("Re-creating new index in: " + self.store_dir)
+        self.ix = create_in(self.store_dir, schema=self._getSchema())
+        writer = self.ix.writer()
+        for page in pages:
+            self._indexPage(writer, page)
+        writer.commit()
+
+    def update(self, pages):
+        logger.debug("Updating index...")
+        to_reindex = set()
+        already_indexed = set()
+
+        with self.ix.searcher() as searcher:
+            writer = self.ix.writer()
+
+            for fields in searcher.all_stored_fields():
+                indexed_url = fields['url']
+                indexed_path = fields['path']
+                indexed_time = fields['time']
+
+                if not os.path.isfile(indexed_path):
+                    # File was deleted.
+                    self._unindexPage(writer, indexed_url)
+                else:
+                    already_indexed.add(indexed_path)
+                    if os.path.getmtime(indexed_path) > indexed_time:
+                        # File has changed since last index.
+                        self._unindexPage(writer, indexed_url)
+                        to_reindex.add(indexed_path)
+
+            for page in pages:
+                if page.path in to_reindex or page.path not in already_indexed:
+                    self._indexPage(writer, page)
+
+            writer.commit()
+        logger.debug("...done updating index.")
+
+    def search(self, query):
+        with self.ix.searcher() as searcher:
+            title_qp = QueryParser("title", self.ix.schema).parse(query)
+            content_qp = QueryParser("content", self.ix.schema).parse(query)
+            comp_query = title_qp | content_qp
+            results = searcher.search(comp_query)
+
+            page_infos = []
+            for hit in results:
+                page_info = {
+                        'title': hit['title'],
+                        'url': hit['url']
+                        }
+                page_info['title_highlights'] = hit.highlights('title')
+                with codecs.open(hit['path'], 'r', encoding='utf-8') as f:
+                    content = f.read()
+                page_info['content_highlights'] = hit.highlights('content', text=content)
+                page_infos.append(page_info)
+            return page_infos
+
+    def _getSchema(self):
+        schema = Schema(
+                url=ID(stored=True),
+                title=TEXT(stored=True),
+                content=TEXT,
+                path=STORED,
+                time=STORED
+                )
+        return schema
+
+    def _indexPage(self, writer, page):
+        logger.debug("Indexing '%s'." % page.url)
+        writer.add_document(
+            url=unicode(page.url),
+            title=unicode(page.title),
+            content=unicode(page.raw_text),
+            path=page.path,
+            time=os.path.getmtime(page.path)
+            )
+
+    def _unindexPage(self, writer, url):
+        logger.debug("Removing '%s' from index." % url)
+        writer.delete_by_term('url', url)
+
--- a/wikked/page.py	Wed Nov 27 23:32:45 2013 -0800
+++ b/wikked/page.py	Sun Dec 01 21:50:29 2013 -0800
@@ -1,11 +1,13 @@
 import os
 import os.path
 import re
-import datetime
+import logging
 import jinja2
-from formatter import PageFormatter, FormattingContext, SINGLE_METAS
+from formatter import PageFormatter, FormattingContext
 from resolver import PageResolver, CircularIncludeError
-from utils import PageNotFoundError
+
+
+logger = logging.getLogger(__name__)
 
 
 class PageLoadingError(Exception):
@@ -211,97 +213,3 @@
         for p in page_infos:
             yield FileSystemPage(wiki, page_info=p)
 
-
-class DatabasePage(Page):
-    """ A page that can load its properties from a database.
-    """
-    def __init__(self, wiki, url=None, db_obj=None):
-        if url and db_obj:
-            raise Exception("You can't specify both an url and a database object.")
-        if not url and not db_obj:
-            raise Exception("You need to specify either a url or a database object.")
-
-        super(DatabasePage, self).__init__(wiki, url or db_obj.url)
-        self.auto_update = wiki.config.get('wiki', 'auto_update')
-        self._db_obj = db_obj
-
-    @property
-    def path(self):
-        if self._db_obj:
-            return self._db_obj.path
-        return super(DatabasePage, self).path
-
-    @property
-    def _id(self):
-        if self._db_obj:
-            return self._db_obj.id
-        self._ensureData()
-        return self._data._db_id
-
-    def _loadData(self):
-        db_obj = self._db_obj or self.wiki.db.getPage(self.url)
-        if db_obj is None:
-            raise PageNotFoundError("Can't find page '%s' in the database. Please run `update` or `reset`." % self.url)
-        data = self._loadFromDbObject(db_obj)
-        self._db_obj = None
-        return data
-
-    def _onExtendedDataLoaded(self):
-        self.wiki.db._cacheExtendedData(self)
-
-    def _loadFromDbObject(self, db_obj, bypass_auto_update=False):
-        if not bypass_auto_update and self.auto_update:
-            path_time = datetime.datetime.fromtimestamp(
-                os.path.getmtime(db_obj.path))
-            if path_time >= db_obj.time:
-                self.wiki.logger.debug(
-                    "Updating database cache for page '%s'." % self.url)
-                try:
-                    fs_page = FileSystemPage(self.wiki, self.url)
-                    fs_page._ensureData()
-                    added_ids = self.wiki.db.update([fs_page])
-                    fs_page._data._db_id = added_ids[0]
-                    return fs_page._data
-                except Exception as e:
-                    msg = "Error updating database cache from the file-system: %s" % e
-                    raise PageLoadingError(msg, e)
-
-        data = PageData()
-        data._db_id = db_obj.id
-        data.path = db_obj.path
-        split = os.path.splitext(data.path)
-        data.filename = split[0]
-        data.extension = split[1].lstrip('.')
-        data.title = db_obj.title
-        data.raw_text = db_obj.raw_text
-        data.formatted_text = db_obj.formatted_text
-
-        data.local_meta = {}
-        for m in db_obj.meta:
-            value = data.local_meta.get(m.name)
-            if m.name in SINGLE_METAS:
-                data.local_meta[m.name] = m.value
-            else:
-                if value is None:
-                    data.local_meta[m.name] = [m.value]
-                else:
-                    data.local_meta[m.name].append(m.value)
-
-        data.local_links = [l.target_url for l in db_obj.links]
-
-        if db_obj.is_ready and not self._force_resolve:
-            # If we have extended cache data from the database, we might as
-            # well load it now too.
-            data.text = db_obj.ready_text
-            for m in db_obj.ready_meta:
-                value = data.ext_meta.get(m.name)
-                if value is None:
-                    data.ext_meta[m.name] = [m.value]
-                else:
-                    data.ext_meta[m.name].append(m.value)
-            data.ext_links = [l.target_url for l in db_obj.ready_links]
-            # Flag this data as completely loaded.
-            data.has_extended_data = True
-
-        return data
-
--- a/wikked/resolver.py	Wed Nov 27 23:32:45 2013 -0800
+++ b/wikked/resolver.py	Sun Dec 01 21:50:29 2013 -0800
@@ -1,8 +1,15 @@
 import re
+import urllib
 import os.path
+import logging
 import jinja2
-from utils import (get_meta_name_and_modifiers, namespace_title_to_url,
-        get_absolute_url, html_unescape)
+from utils import (
+        PageNotFoundError,
+        get_meta_name_and_modifiers, get_absolute_url,
+        html_unescape)
+
+
+logger = logging.getLogger(__name__)
 
 
 class FormatterNotFound(Exception):
@@ -12,20 +19,33 @@
     pass
 
 
-class CircularIncludeError(Exception):
+class IncludeError(Exception):
+    """ An exception raised when an include cannot be resolved.
+    """
+    def __init__(self, include_url, ref_url, message=None, *args):
+        Exception.__init__(self, include_url, ref_url, message, *args)
+
+    def __str__(self):
+        include_url = self.args[0]
+        ref_url = self.args[1]
+        message = self.args[2]
+        res = "Error including '%s' from '%s'." % (include_url, ref_url)
+        if message:
+            res += " " + message
+        return res
+
+
+class CircularIncludeError(IncludeError):
     """ An exception raised when a circular include is found
         while rendering a page.
     """
-    def __init__(self, current_url, url_trail, message=None):
-        Exception.__init__(self, current_url, url_trail, message)
+    def __init__(self, include_url, ref_url, url_trail):
+        IncludeError.__init__(self, include_url, ref_url, None, url_trail)
 
     def __str__(self):
-        current_url = self.args[0]
-        url_trail = self.args[1]
-        message = self.args[2]
-        res = "Circular include detected at '%s' (after %s)" % (current_url, url_trail)
-        if message:
-            res += ": %s" % message
+        url_trail = self.args[3]
+        res = IncludeError.__init__(self)
+        res += " Circular include detected after: %s" % url_trail
         return res
 
 
@@ -46,10 +66,10 @@
             return len(self.url_trail) > 1
         raise ValueError("Unknown modifier: " + modifier)
 
-    def getAbsoluteUrl(self, url, base_url=None):
+    def getAbsoluteUrl(self, url, base_url=None, quote=False):
         if base_url is None:
             base_url = self.root_page.url
-        return get_absolute_url(base_url, url)
+        return get_absolute_url(base_url, url, quote)
 
 
 class ResolveOutput(object):
@@ -111,8 +131,8 @@
         try:
             return self._unsafeRun()
         except Exception as e:
-            self.wiki.logger.error("Error resolving page '%s':" % self.page.url)
-            self.wiki.logger.exception(unicode(e.message))
+            logger.error("Error resolving page '%s':" % self.page.url)
+            logger.exception(unicode(e.message))
             self.output = ResolveOutput(self.page)
             self.output.text = u'<div class="error">%s</div>' % e
             return self.output
@@ -174,12 +194,12 @@
             # Resolve link states.
             def repl1(m):
                 raw_url = unicode(m.group('url'))
-                raw_url = self.ctx.getAbsoluteUrl(raw_url)
-                url = namespace_title_to_url(raw_url)
+                url = self.ctx.getAbsoluteUrl(raw_url)
                 self.output.out_links.append(url)
+                quoted_url = urllib.quote(url)
                 if self.wiki.pageExists(url):
-                    return '<a class="wiki-link" data-wiki-url="%s">' % url
-                return '<a class="wiki-link missing" data-wiki-url="%s">' % url
+                    return '<a class="wiki-link" data-wiki-url="%s">' % quoted_url
+                return '<a class="wiki-link missing" data-wiki-url="%s">' % quoted_url
 
             final_text = re.sub(
                     r'<a class="wiki-link" data-wiki-url="(?P<url>[^"]+)">',
@@ -204,7 +224,9 @@
         # `Templates` folder.
         include_url = opts['url']
         if include_url[0] != '/':
-            include_url = self.ctx.getAbsoluteUrl('/templates/' + include_url, self.page.url)
+            include_url = self.ctx.getAbsoluteUrl(
+                    self.page.wiki.templates_url + include_url,
+                    self.page.url)
             if not self.wiki.pageExists(include_url):
                 include_url = self.ctx.getAbsoluteUrl(opts['url'], self.page.url)
         else:
@@ -212,7 +234,7 @@
 
         # Check for circular includes.
         if include_url in self.ctx.url_trail:
-            raise CircularIncludeError(include_url, self.ctx.url_trail)
+            raise CircularIncludeError(include_url, self.page.url, self.ctx.url_trail)
 
         # Parse the templating parameters.
         parameters = dict(self.parameters)
@@ -235,7 +257,10 @@
 
         # Re-run the resolver on the included page to get its final
         # formatted text.
-        page = self.wiki.getPage(include_url)
+        try:
+            page = self.wiki.getPage(include_url)
+        except PageNotFoundError:
+            raise IncludeError(include_url, self.page.url)
         current_url_trail = list(self.ctx.url_trail)
         self.ctx.url_trail.append(page.url)
         child = PageResolver(page, self.ctx, parameters)
@@ -278,8 +303,8 @@
                     if self._isPageMatch(p, key, value):
                         matched_pages.append(p)
                 except Exception as e:
-                    self.wiki.logger.error("Can't query page '%s' for '%s':" % (p.url, self.page.url))
-                    self.wiki.logger.exception(unicode(e.message))
+                    logger.error("Can't query page '%s' for '%s':" % (p.url, self.page.url))
+                    logger.exception(unicode(e.message))
 
         # No match: return the 'empty' template.
         if len(matched_pages) == 0:
@@ -301,7 +326,11 @@
 
     def _valueOrPageText(self, value, with_url=False):
         if re.match(r'^\[\[.*\]\]$', value):
-            page = self.wiki.getPage(value[2:-2])
+            include_url = value[2:-2]
+            try:
+                page = self.wiki.getPage(include_url)
+            except PageNotFoundError:
+                raise IncludeError(include_url, self.page.url)
             if with_url:
                 return (page.url, page.text)
             return page.text
@@ -347,7 +376,9 @@
                 v = v[:pipe_idx]
 
             if v[0] != '/':
-                include_url = self.ctx.getAbsoluteUrl('/templates/' + v, page.url)
+                include_url = self.ctx.getAbsoluteUrl(
+                        self.page.wiki.templates_url + v,
+                        page.url)
                 if not self.wiki.pageExists(include_url):
                     include_url = self.ctx.getAbsoluteUrl(v, page.url)
             else:
@@ -356,7 +387,10 @@
 
         # Recurse into included pages.
         for url in included_urls:
-            p = self.wiki.getPage(url)
+            try:
+                p = self.wiki.getPage(url)
+            except PageNotFoundError:
+                raise IncludeError(url, page.url)
             if self._isPageMatch(p, name, value, level + 1):
                 return True
 
--- a/wikked/resources/defaults.cfg	Wed Nov 27 23:32:45 2013 -0800
+++ b/wikked/resources/defaults.cfg	Sun Dec 01 21:50:29 2013 -0800
@@ -1,3 +1,7 @@
 [wiki]
 auto_update=False
 default_extension=md
+naming_policy=capitalize
+main_page=Main Page
+templates_dir=Templates
+
--- a/wikked/scm.py	Wed Nov 27 23:32:45 2013 -0800
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,388 +0,0 @@
-import re
-import os
-import os.path
-import time
-import logging
-import tempfile
-import subprocess
-
-try:
-    import pygit2
-    SUPPORTS_GIT = True
-except ImportError:
-    SUPPORTS_GIT = False
-
-
-STATE_COMMITTED = 0
-STATE_MODIFIED = 1
-STATE_NEW = 2
-STATE_NAMES = ['committed', 'modified', 'new']
-
-ACTION_ADD = 0
-ACTION_DELETE = 1
-ACTION_EDIT = 2
-ACTION_NAMES = ['add', 'delete', 'edit']
-
-
-class SourceControl(object):
-    def __init__(self, logger=None):
-        self.logger = logger
-        if logger is None:
-            self.logger = logging.getLogger('wikked.scm')
-
-    def initRepo(self):
-        raise NotImplementedError()
-
-    def getSpecialFilenames(self):
-        raise NotImplementedError()
-
-    def getHistory(self, path=None, limit=10):
-        raise NotImplementedError()
-
-    def getState(self, path):
-        raise NotImplementedError()
-
-    def getRevision(self, path, rev):
-        raise NotImplementedError()
-
-    def diff(self, path, rev1, rev2):
-        raise NotImplementedError()
-
-    def commit(self, paths, op_meta):
-        raise NotImplementedError()
-
-    def revert(self, paths=None):
-        raise NotImplementedError()
-
-
-class Revision(object):
-    def __init__(self, rev_id=-1):
-        self.rev_id = rev_id
-        self.rev_name = rev_id
-        self.author = None
-        self.timestamp = 0
-        self.description = None
-        self.files = []
-
-    @property
-    def is_local(self):
-        return self.rev_id == -1
-
-    @property
-    def is_committed(self):
-        return self.rev_id != -1
-
-
-class MercurialBaseSourceControl(SourceControl):
-    def __init__(self, root, logger=None):
-        SourceControl.__init__(self, logger)
-        self.root = root
-        self.actions = {
-                'A': ACTION_ADD,
-                'R': ACTION_DELETE,
-                'M': ACTION_EDIT
-                }
-
-    def initRepo(self):
-        # Make a Mercurial repo if there's none.
-        if not os.path.isdir(os.path.join(self.root, '.hg')):
-            self.logger.info("Creating Mercurial repository at: " + self.root)
-            self._run('init', self.root, norepo=True)
-
-        # Create a `.hgignore` file is there's none.
-        ignore_path = os.path.join(self.root, '.hgignore')
-        if not os.path.isfile(ignore_path):
-            self.logger.info("Creating `.hgignore` file.")
-            with open(ignore_path, 'w') as f:
-                f.write('.wiki')
-            self._run('add', ignore_path)
-            self._run('commit', ignore_path, '-m', 'Created .hgignore.')
-
-    def getSpecialFilenames(self):
-        specials = ['.hg', '.hgignore', '.hgtags']
-        return [os.path.join(self.root, d) for d in specials]
-
-    def _run(self, cmd, *args, **kwargs):
-        exe = [self.hg]
-        if 'norepo' not in kwargs or not kwargs['norepo']:
-            exe += ['-R', self.root]
-        exe.append(cmd)
-        exe += args
-        self.logger.debug("Running Mercurial: " + str(exe))
-        return subprocess.check_output(exe)
-
-
-class MercurialSourceControl(MercurialBaseSourceControl):
-    def __init__(self, root, logger=None):
-        MercurialBaseSourceControl.__init__(self, root, logger)
-
-        self.hg = 'hg'
-        self.log_style = os.path.join(os.path.dirname(__file__), 'resources', 'hg_log.style')
-
-    def getHistory(self, path=None, limit=10):
-        if path is not None:
-            st_out = self._run('status', path)
-            if len(st_out) > 0 and st_out[0] == '?':
-                return []
-
-        log_args = []
-        if path is not None:
-            log_args.append(path)
-        log_args += ['--style', self.log_style]
-        log_args += ['-l', limit]
-        log_out = self._run('log', *log_args)
-
-        revisions = []
-        for group in log_out.split("$$$\n"):
-            if group == '':
-                continue
-            revisions.append(self._parseRevision(group))
-        return revisions
-
-    def getState(self, path):
-        st_out = self._run('status', path)
-        if len(st_out) > 0:
-            if st_out[0] == '?' or st_out[0] == 'A':
-                return STATE_NEW
-            if st_out[0] == 'M':
-                return STATE_MODIFIED
-        return STATE_COMMITTED
-
-    def getRevision(self, path, rev):
-        cat_out = self._run('cat', '-r', rev, path)
-        return cat_out
-
-    def diff(self, path, rev1, rev2):
-        if rev2 is None:
-            diff_out = self._run('diff', '-c', rev1, '--git', path)
-        else:
-            diff_out = self._run('diff', '-r', rev1, '-r', rev2, '--git', path)
-        return diff_out
-
-    def commit(self, paths, op_meta):
-        if 'message' not in op_meta or not op_meta['message']:
-            raise ValueError("No commit message specified.")
-
-        # Check if any of those paths needs to be added.
-        st_out = self._run('status', *paths)
-        add_paths = []
-        for line in st_out.splitlines():
-            if line[0] == '?':
-                add_paths.append(line[2:])
-        if len(add_paths) > 0:
-            self._run('add', *paths)
-
-        # Create a temp file with the commit message.
-        f, temp = tempfile.mkstemp()
-        with os.fdopen(f, 'w') as fd:
-            fd.write(op_meta['message'])
-
-        # Commit and clean up the temp file.
-        try:
-            commit_args = list(paths) + ['-l', temp]
-            if 'author' in op_meta:
-                commit_args += ['-u', op_meta['author']]
-            self._run('commit', *commit_args)
-        finally:
-            os.remove(temp)
-
-    def revert(self, paths=None):
-        if paths is not None:
-            self._run('revert', '-C', paths)
-        else:
-            self._run('revert', '-a', '-C')
-
-    def _parseRevision(self, group):
-        lines = group.split("\n")
-
-        m = re.match(r'(\d+) ([0-9a-f]+) \[([^\]]+)\] ([^ ]+)', lines[0])
-        if m is None:
-            raise Exception('Error parsing history from Mercurial, got: ' + lines[0])
-
-        rev = Revision()
-        rev.rev_id = int(m.group(1))
-        rev.rev_name = rev.rev_id[:12]
-        rev.rev_hash = m.group(2)
-        rev.author = m.group(3)
-        rev.timestamp = float(m.group(4))
-
-        i = 1
-        rev.description = ''
-        while lines[i] != '---':
-            if i > 1:
-                rev.description += "\n"
-            rev.description += lines[i]
-            i += 1
-
-        rev.files = []
-        for j in range(i + 1, len(lines)):
-            if lines[j] == '':
-                continue
-            rev.files.append({
-                'path': lines[j][2:],
-                'action': self.actions[lines[j][0]]
-                })
-
-        return rev
-
-
-class MercurialCommandServerSourceControl(MercurialBaseSourceControl):
-    def __init__(self, root, logger=None):
-        MercurialBaseSourceControl.__init__(self, root, logger)
-
-        import hglib
-        self.client = hglib.open(self.root)
-
-    def getHistory(self, path=None, limit=10):
-        if path is not None:
-            rel_path = os.path.relpath(path, self.root)
-            status = self.client.status(include=[rel_path])
-            if len(status) > 0 and status[0] == '?':
-                return []
-
-        needs_files = False
-        if path is not None:
-            repo_revs = self.client.log(files=[path], follow=True, limit=limit)
-        else:
-            needs_files = True
-            repo_revs = self.client.log(follow=True, limit=limit)
-        revisions = []
-        for rev in repo_revs:
-            r = Revision(rev.node)
-            r.rev_name = rev.node[:12]
-            r.author = unicode(rev.author)
-            r.timestamp = time.mktime(rev.date.timetuple())
-            r.description = unicode(rev.desc)
-            if needs_files:
-                rev_statuses = self.client.status(change=rev.node)
-                for rev_status in rev_statuses:
-                    r.files.append({
-                        'path': rev_status[1].decode('utf-8', 'replace'),
-                        'action': self.actions[rev_status[0]]
-                        })
-            revisions.append(r)
-        return revisions
-
-    def getState(self, path):
-        rel_path = os.path.relpath(path, self.root)
-        statuses = self.client.status(include=[rel_path])
-        if len(statuses) == 0:
-            return STATE_COMMITTED
-        status = statuses[0]
-        if status[0] == '?' or status[0] == 'A':
-            return STATE_NEW
-        if status[0] == 'M':
-            return STATE_MODIFIED
-        raise Exception("Unsupported status: %s" % status)
-            
-    def getRevision(self, path, rev):
-        rel_path = os.path.relpath(path, self.root)
-        return self.client.cat([rel_path], rev=rev)
-
-    def diff(self, path, rev1, rev2):
-        rel_path = os.path.relpath(path, self.root)
-        if rev2 is None:
-            return self.client.diff(files=[rel_path], change=rev1, git=True)
-        return self.client.diff(files=[rel_path], revs=[rev1, rev2], git=True)
-
-    def commit(self, paths, op_meta):
-        if 'message' not in op_meta or not op_meta['message']:
-            raise ValueError("No commit message specified.")
-
-        # Get repo-relative paths.
-        rel_paths = [os.path.relpath(p, self.root) for p in paths]
-
-        # Check if any of those paths needs to be added.
-        status = self.client.status(unknown=True)
-        add_paths = []
-        for s in status:
-            if s[1] in rel_paths:
-                add_paths.append(s[1])
-        if len(add_paths) > 0:
-            self.client.add(files=add_paths)
-
-        # Commit!
-        if 'author' in op_meta:
-            self.client.commit(include=rel_paths, message=op_meta['message'], user=op_meta['author'])
-        else:
-            self.client.commit(include=rel_paths, message=op_meta['message'])
-
-    def revert(self, paths=None):
-        if paths is not None:
-            rel_paths = [os.path.relpath(p, self.root) for p in paths]
-            self.client.revert(files=rel_paths, nobackup=True)
-        else:
-            self.client.revert(all=True, nobackup=True)
-
-
-class GitBaseSourceControl(SourceControl):
-    def __init__(self, root, logger=None):
-        SourceControl.__init__(self, logger)
-        self.root = root
-
-    def initRepo(self):
-        # Make a Git repo if there's none.
-        if not os.path.isdir(os.path.join(self.root, '.git')):
-            self.logger.info("Creating Git repository at: " + self.root)
-            self._initRepo(self.root)
-
-        # Create a `.gitignore` file there's none.
-        ignore_path = os.path.join(self.root, '.gitignore')
-        if not os.path.isfile(ignore_path):
-            self.logger.info("Creating `.gitignore` file.")
-            with open(ignore_path, 'w') as f:
-                f.write('.wiki')
-            self._add(ignore_path)
-            self._commit('Created .gitignore.', [ignore_path])
-
-    def getSpecialFilenames(self):
-        specials = ['.git', '.gitignore']
-        return [os.path.join(self.root, d) for d in specials]
-
-    def getState(self, path):
-        return self._status(path)
-
-    def _run(self, cmd, *args, **kwargs):
-        exe = [self.git]
-        if 'norepo' not in kwargs or not kwargs['norepo']:
-            exe.append('--git-dir="%s"' % self.root)
-        exe.append(cmd)
-        exe += args
-        self.logger.debug("Running Git: " + str(exe))
-        return subprocess.check_output(exe)
-
-
-class GitLibSourceControl(GitBaseSourceControl):
-    def __init__(self, root, logger=None):
-        if not SUPPORTS_GIT:
-            raise Exception("Can't support Git because pygit2 is not available.")
-        GitBaseSourceControl.__init__(self, root, logger)
-
-    def initRepo(self):
-        GitBaseSourceControl.initRepo(self)
-        self.repo = pygit2.Repository(self.root)
-
-    def _initRepo(self, path):
-        pygit2.init_repository(path, False)
-
-    def _add(self, paths):
-        pass
-
-    def _commit(self, message, paths):
-        pass
-
-    def _status(self, path):
-        flags = self.repo.status_file(self._getRepoPath(path))
-        if flags == pygit2.GIT_STATUS_CURRENT:
-            return STATE_COMMITTED
-        if (flags & pygit2.GIT_STATUS_WT_MODIFIED or
-                flags & pygit2.GIT_STATUS_INDEX_MODIFIED):
-            return STATE_MODIFIED
-        if (flags & pygit2.GIT_STATUS_WT_NEW or
-                flags & pygit2.GIT_STATUS_INDEX_NEW):
-            return STATE_NEW
-        raise Exception("Unsupported status flag combination: %s" % flags)
-
-    def _getRepoPath(self, path):
-        return os.path.relpath(path, self.root).replace('\\', '/')
-
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/wikked/scm/base.py	Sun Dec 01 21:50:29 2013 -0800
@@ -0,0 +1,59 @@
+
+STATE_COMMITTED = 0
+STATE_MODIFIED = 1
+STATE_NEW = 2
+STATE_NAMES = ['committed', 'modified', 'new']
+
+ACTION_ADD = 0
+ACTION_DELETE = 1
+ACTION_EDIT = 2
+ACTION_NAMES = ['add', 'delete', 'edit']
+
+
+class SourceControl(object):
+    def __init__(self):
+        pass
+
+    def initRepo(self, wiki):
+        raise NotImplementedError()
+
+    def getSpecialFilenames(self):
+        raise NotImplementedError()
+
+    def getHistory(self, path=None, limit=10):
+        raise NotImplementedError()
+
+    def getState(self, path):
+        raise NotImplementedError()
+
+    def getRevision(self, path, rev):
+        raise NotImplementedError()
+
+    def diff(self, path, rev1, rev2):
+        raise NotImplementedError()
+
+    def commit(self, paths, op_meta):
+        raise NotImplementedError()
+
+    def revert(self, paths=None):
+        raise NotImplementedError()
+
+
+class Revision(object):
+    def __init__(self, rev_id=-1):
+        self.rev_id = rev_id
+        self.rev_name = rev_id
+        self.author = None
+        self.timestamp = 0
+        self.description = None
+        self.files = []
+
+    @property
+    def is_local(self):
+        return self.rev_id == -1
+
+    @property
+    def is_committed(self):
+        return self.rev_id != -1
+
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/wikked/scm/git.py	Sun Dec 01 21:50:29 2013 -0800
@@ -0,0 +1,89 @@
+import os
+import os.path
+import logging
+import subprocess
+from base import (
+        SourceControl,
+        STATE_NEW, STATE_MODIFIED, STATE_COMMITTED)
+
+try:
+    import pygit2
+    SUPPORTS_GIT = True
+except ImportError:
+    SUPPORTS_GIT = False
+
+
+logger = logging.getLogger(__name__)
+
+
+class GitBaseSourceControl(SourceControl):
+    def __init__(self, root):
+        SourceControl.__init__(self)
+        self.root = root
+
+    def initRepo(self, wiki):
+        # Make a Git repo if there's none.
+        if not os.path.isdir(os.path.join(self.root, '.git')):
+            logger.info("Creating Git repository at: " + self.root)
+            self._initRepo(self.root)
+
+        # Create a `.gitignore` file there's none.
+        ignore_path = os.path.join(self.root, '.gitignore')
+        if not os.path.isfile(ignore_path):
+            logger.info("Creating `.gitignore` file.")
+            with open(ignore_path, 'w') as f:
+                f.write('.wiki')
+            self._add(ignore_path)
+            self._commit('Created .gitignore.', [ignore_path])
+
+    def getSpecialFilenames(self):
+        specials = ['.git', '.gitignore']
+        return [os.path.join(self.root, d) for d in specials]
+
+    def getState(self, path):
+        return self._status(path)
+
+    def _run(self, cmd, *args, **kwargs):
+        exe = [self.git]
+        if 'norepo' not in kwargs or not kwargs['norepo']:
+            exe.append('--git-dir="%s"' % self.root)
+        exe.append(cmd)
+        exe += args
+        logger.debug("Running Git: " + str(exe))
+        return subprocess.check_output(exe)
+
+
+class GitLibSourceControl(GitBaseSourceControl):
+    def __init__(self, root):
+        if not SUPPORTS_GIT:
+            raise Exception("Can't support Git because pygit2 is not available.")
+        GitBaseSourceControl.__init__(self, root)
+
+    def initRepo(self, wiki):
+        GitBaseSourceControl.initRepo(self, wiki)
+        self.repo = pygit2.Repository(self.root)
+
+    def _initRepo(self, path):
+        pygit2.init_repository(path, False)
+
+    def _add(self, paths):
+        pass
+
+    def _commit(self, message, paths):
+        pass
+
+    def _status(self, path):
+        flags = self.repo.status_file(self._getRepoPath(path))
+        if flags == pygit2.GIT_STATUS_CURRENT:
+            return STATE_COMMITTED
+        if (flags & pygit2.GIT_STATUS_WT_MODIFIED or
+                flags & pygit2.GIT_STATUS_INDEX_MODIFIED):
+            return STATE_MODIFIED
+        if (flags & pygit2.GIT_STATUS_WT_NEW or
+                flags & pygit2.GIT_STATUS_INDEX_NEW):
+            return STATE_NEW
+        raise Exception("Unsupported status flag combination: %s" % flags)
+
+    def _getRepoPath(self, path):
+        return os.path.relpath(path, self.root).replace('\\', '/')
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/wikked/scm/mercurial.py	Sun Dec 01 21:50:29 2013 -0800
@@ -0,0 +1,256 @@
+import re
+import os
+import os.path
+import time
+import logging
+import tempfile
+import subprocess
+from hglib.error import CommandError
+from hglib.util import cmdbuilder
+from base import (
+        SourceControl, Revision,
+        ACTION_ADD, ACTION_EDIT, ACTION_DELETE,
+        STATE_NEW, STATE_MODIFIED, STATE_COMMITTED)
+
+
+logger = logging.getLogger(__name__)
+
+
+class MercurialBaseSourceControl(SourceControl):
+    def __init__(self, root):
+        SourceControl.__init__(self)
+        self.root = root
+        self.actions = {
+                'A': ACTION_ADD,
+                'R': ACTION_DELETE,
+                'M': ACTION_EDIT
+                }
+
+    def initRepo(self, wiki):
+        # Make a Mercurial repo if there's none.
+        if not os.path.isdir(os.path.join(self.root, '.hg')):
+            logger.info("Creating Mercurial repository at: " + self.root)
+            self._run('init', self.root, norepo=True)
+
+        # Create a `.hgignore` file is there's none.
+        ignore_path = os.path.join(self.root, '.hgignore')
+        if not os.path.isfile(ignore_path):
+            logger.info("Creating `.hgignore` file.")
+            with open(ignore_path, 'w') as f:
+                f.write('.wiki')
+            self._run('add', ignore_path)
+            self._run('commit', ignore_path, '-m', 'Created .hgignore.')
+
+    def getSpecialFilenames(self):
+        specials = ['.hg', '.hgignore', '.hgtags']
+        return [os.path.join(self.root, d) for d in specials]
+
+    def _run(self, cmd, *args, **kwargs):
+        exe = [self.hg]
+        if 'norepo' not in kwargs or not kwargs['norepo']:
+            exe += ['-R', self.root]
+        exe.append(cmd)
+        exe += args
+        logger.debug("Running Mercurial: " + str(exe))
+        return subprocess.check_output(exe)
+
+
+class MercurialSourceControl(MercurialBaseSourceControl):
+    def __init__(self, root):
+        MercurialBaseSourceControl.__init__(self, root)
+
+        self.hg = 'hg'
+        self.log_style = os.path.join(os.path.dirname(__file__), 'resources', 'hg_log.style')
+
+    def getHistory(self, path=None, limit=10):
+        if path is not None:
+            st_out = self._run('status', path)
+            if len(st_out) > 0 and st_out[0] == '?':
+                return []
+
+        log_args = []
+        if path is not None:
+            log_args.append(path)
+        log_args += ['--style', self.log_style]
+        log_args += ['-l', limit]
+        log_out = self._run('log', *log_args)
+
+        revisions = []
+        for group in log_out.split("$$$\n"):
+            if group == '':
+                continue
+            revisions.append(self._parseRevision(group))
+        return revisions
+
+    def getState(self, path):
+        st_out = self._run('status', path)
+        if len(st_out) > 0:
+            if st_out[0] == '?' or st_out[0] == 'A':
+                return STATE_NEW
+            if st_out[0] == 'M':
+                return STATE_MODIFIED
+        return STATE_COMMITTED
+
+    def getRevision(self, path, rev):
+        cat_out = self._run('cat', '-r', rev, path)
+        return cat_out
+
+    def diff(self, path, rev1, rev2):
+        if rev2 is None:
+            diff_out = self._run('diff', '-c', rev1, '--git', path)
+        else:
+            diff_out = self._run('diff', '-r', rev1, '-r', rev2, '--git', path)
+        return diff_out
+
+    def commit(self, paths, op_meta):
+        if 'message' not in op_meta or not op_meta['message']:
+            raise ValueError("No commit message specified.")
+
+        # Check if any of those paths needs to be added.
+        st_out = self._run('status', *paths)
+        add_paths = []
+        for line in st_out.splitlines():
+            if line[0] == '?':
+                add_paths.append(line[2:])
+        if len(add_paths) > 0:
+            self._run('add', *paths)
+
+        # Create a temp file with the commit message.
+        f, temp = tempfile.mkstemp()
+        with os.fdopen(f, 'w') as fd:
+            fd.write(op_meta['message'])
+
+        # Commit and clean up the temp file.
+        try:
+            commit_args = list(paths) + ['-l', temp]
+            if 'author' in op_meta:
+                commit_args += ['-u', op_meta['author']]
+            self._run('commit', *commit_args)
+        finally:
+            os.remove(temp)
+
+    def revert(self, paths=None):
+        if paths is not None:
+            self._run('revert', '-C', paths)
+        else:
+            self._run('revert', '-a', '-C')
+
+    def _parseRevision(self, group):
+        lines = group.split("\n")
+
+        m = re.match(r'(\d+) ([0-9a-f]+) \[([^\]]+)\] ([^ ]+)', lines[0])
+        if m is None:
+            raise Exception('Error parsing history from Mercurial, got: ' + lines[0])
+
+        rev = Revision()
+        rev.rev_id = int(m.group(1))
+        rev.rev_name = rev.rev_id[:12]
+        rev.rev_hash = m.group(2)
+        rev.author = m.group(3)
+        rev.timestamp = float(m.group(4))
+
+        i = 1
+        rev.description = ''
+        while lines[i] != '---':
+            if i > 1:
+                rev.description += "\n"
+            rev.description += lines[i]
+            i += 1
+
+        rev.files = []
+        for j in range(i + 1, len(lines)):
+            if lines[j] == '':
+                continue
+            rev.files.append({
+                'path': lines[j][2:],
+                'action': self.actions[lines[j][0]]
+                })
+
+        return rev
+
+
+class MercurialCommandServerSourceControl(MercurialBaseSourceControl):
+    def __init__(self, root):
+        MercurialBaseSourceControl.__init__(self, root)
+
+        import hglib
+        self.client = hglib.open(self.root)
+
+    def getHistory(self, path=None, limit=10):
+        if path is not None:
+            rel_path = os.path.relpath(path, self.root)
+            status = self.client.status(include=[rel_path])
+            if len(status) > 0 and status[0] == '?':
+                return []
+
+        needs_files = False
+        if path is not None:
+            repo_revs = self.client.log(files=[path], follow=True, limit=limit)
+        else:
+            needs_files = True
+            repo_revs = self.client.log(follow=True, limit=limit)
+        revisions = []
+        for rev in repo_revs:
+            r = Revision(rev.node)
+            r.rev_name = rev.node[:12]
+            r.author = unicode(rev.author)
+            r.timestamp = time.mktime(rev.date.timetuple())
+            r.description = unicode(rev.desc)
+            if needs_files:
+                rev_statuses = self.client.status(change=rev.node)
+                for rev_status in rev_statuses:
+                    r.files.append({
+                        'path': rev_status[1].decode('utf-8', 'replace'),
+                        'action': self.actions[rev_status[0]]
+                        })
+            revisions.append(r)
+        return revisions
+
+    def getState(self, path):
+        rel_path = os.path.relpath(path, self.root)
+        statuses = self.client.status(include=[rel_path])
+        if len(statuses) == 0:
+            return STATE_COMMITTED
+        status = statuses[0]
+        if status[0] == '?' or status[0] == 'A':
+            return STATE_NEW
+        if status[0] == 'M':
+            return STATE_MODIFIED
+        raise Exception("Unsupported status: %s" % status)
+            
+    def getRevision(self, path, rev):
+        rel_path = os.path.relpath(path, self.root)
+        return self.client.cat([rel_path], rev=rev)
+
+    def diff(self, path, rev1, rev2):
+        rel_path = os.path.relpath(path, self.root)
+        if rev2 is None:
+            return self.client.diff(files=[rel_path], change=rev1, git=True)
+        return self.client.diff(files=[rel_path], revs=[rev1, rev2], git=True)
+
+    def commit(self, paths, op_meta):
+        if 'message' not in op_meta or not op_meta['message']:
+            raise ValueError("No commit message specified.")
+
+        kwargs = {}
+        if 'author' in op_meta:
+            kwargs['u'] = op_meta['author']
+        try:
+            # We need to write our own command because somehow the `commit`
+            # method in `hglib` doesn't support specifying the file(s)
+            # directly -- only with `--include`. Weird.
+            args = cmdbuilder('commit', *paths,
+                    debug=True, m=op_meta['message'], A=True,
+                    **kwargs)
+            self.client.rawcommand(args)
+        except CommandError as e:
+            logger.error("Failed running command '%s', got code '%s' and message '%s'. Output: %s" % (
+                e.args, e.ret, e.err, e.out))
+            raise
+
+    def revert(self, paths=None):
+        if paths is not None:
+            rel_paths = [os.path.relpath(p, self.root) for p in paths]
+            self.client.revert(files=rel_paths, nobackup=True)
+        else:
+            self.client.revert(all=True, nobackup=True)
--- a/wikked/templates/meta_page.html	Wed Nov 27 23:32:45 2013 -0800
+++ b/wikked/templates/meta_page.html	Sun Dec 01 21:50:29 2013 -0800
@@ -1,7 +1,7 @@
 {% if info_text %}
 {{info_text|safe}}
 {% else %}
-<p>No additional information is available for this page. You can write some right now.</p>
+<p>No additional information is available for this page. You can <a class="wiki-meta-link missing" data-wiki-meta="{{name}}" data-wiki-value="{{safe_value}}" data-action="edit">write some right now</a>.</p>
 {% endif %}
 
 <h2>Pages in {{name}} "{{value}}"</h2>
--- a/wikked/utils.py	Wed Nov 27 23:32:45 2013 -0800
+++ b/wikked/utils.py	Sun Dec 01 21:50:29 2013 -0800
@@ -1,7 +1,7 @@
 import re
 import os
 import os.path
-import unicodedata
+import urllib
 from xml.sax.saxutils import escape, unescape
 
 
@@ -9,6 +9,22 @@
     """ An error raised when no physical file
        is found for a given URL.
     """
+    def __init__(self, url, message=None, *args):
+        Exception.__init__(self, url, message, *args)
+
+    def __str__(self):
+        url = self.args[0]
+        message = self.args[1]
+        res = "Can't find page '%s'." % url
+        if message:
+            res += ' ' + message
+        return res
+
+
+class NamespaceNotFoundError(Exception):
+    """ An error raised when no physical directory is found
+        for a given URL.
+    """
     pass
 
 
@@ -27,7 +43,7 @@
     return None
 
 
-def get_absolute_url(base_url, url, do_slugify=True):
+def get_absolute_url(base_url, url, quote=False):
     base_url = re.sub(r'^(\w[\w\d]+)\:', '', base_url)
     if base_url[0] != '/':
         raise ValueError("The base URL must be absolute. Got: %s" % base_url)
@@ -42,51 +58,11 @@
         urldir = os.path.dirname(base_url)
         raw_abs_url = os.path.join(urldir, url)
         abs_url = os.path.normpath(raw_abs_url).replace('\\', '/')
-    if do_slugify:
-        abs_url = namespace_title_to_url(abs_url)
+    if quote:
+        abs_url = urllib.quote(abs_url)
     return abs_url
 
 
-def namespace_title_to_url(url):
-    url_parts = url.split('/')
-    result = ''
-    if url[0] == '/':
-        result = '/'
-        url_parts = url_parts[1:]
-    for i, part in enumerate(url_parts):
-        if i > 0:
-            result += '/'
-        result += title_to_url(part)
-    return result
-
-
-def title_to_url(title):
-    # Remove diacritics (accents, etc.) and replace them with ASCII
-    # equivelent.
-    ansi_title = ''.join((c for c in
-        unicodedata.normalize('NFD', unicode(title))
-        if unicodedata.category(c) != 'Mn'))
-    # Now replace spaces and punctuation with a hyphen.
-    return re.sub(r'[^A-Za-z0-9_\.\-\(\)\{\}]+', '-', ansi_title.lower())
-
-
-def path_to_url(path, strip_ext=True):
-    if strip_ext:
-        path = os.path.splitext(path)[0]
-
-    url = ''
-    parts = path.lower().split(os.sep) # unicode(path)
-    for i, part in enumerate(parts):
-        url += '/' + title_to_url(part)
-    return url
-
-
-def url_to_title(url):
-    def upperChar(m):
-        return m.group(0).upper()
-    return re.sub(r'^.|\s\S', upperChar, url.lower().replace('-', ' '))
-
-
 def get_meta_name_and_modifiers(name):
     """ Strips a meta name from any leading modifiers like `__` or `+`
         and returns both as a tuple. If no modifier was found, the
--- a/wikked/views.py	Wed Nov 27 23:32:45 2013 -0800
+++ b/wikked/views.py	Sun Dec 01 21:50:29 2013 -0800
@@ -9,11 +9,10 @@
 from pygments.lexers import get_lexer_by_name
 from pygments.formatters import get_formatter_by_name
 from web import app, login_manager
-from page import Page, DatabasePage, PageData, PageLoadingError
+from page import Page, PageData, PageLoadingError
 from fs import PageNotFoundError
 from formatter import PageFormatter, FormattingContext
-from utils import title_to_url, path_to_url, namespace_title_to_url
-import scm
+from scm.base import STATE_NAMES, ACTION_NAMES
 
 
 DONT_CHECK = 0
@@ -21,22 +20,9 @@
 CHECK_FOR_WRITE = 2
 
 
-def get_category_meta(category):
-    result = []
-    for item in category:
-        result.append({
-            'url': urllib.quote_plus(item),
-            'name': item
-            })
-    return result
-
-COERCE_META = {
-    'redirect': title_to_url,
-    'category': get_category_meta
-    }
-
-
 class DummyPage(Page):
+    """ A dummy page for previewing in-progress editing.
+    """
     def __init__(self, wiki, url, text):
         Page.__init__(self, wiki, url)
         self._text = text
@@ -55,12 +41,27 @@
         data.local_meta = ctx.meta
         data.local_links = ctx.out_links
 
-        data.title = Page.url_to_title(self.url)
+        data.title = make_page_title(self.url)
 
         return data
 
 
+def url_from_viewarg(url):
+    url = urllib.unquote(url)
+    m = re.match(r'(\w[\w\d]+)\:(.*)', url)
+    if m:
+        endpoint = str(m.group(1))
+        path = string.lstrip(str(m.group(2)), '/')
+        return '%s:/%s' % (endpoint, path)
+    return '/' + string.lstrip(url, '/')
+
+
+def make_page_title(url):
+    return url[1:]
+
+
 def get_page_or_none(url, force_resolve=False):
+    url = url_from_viewarg(url)
     try:
         page = g.wiki.getPage(url)
         if force_resolve:
@@ -79,18 +80,10 @@
         elif check_perms == CHECK_FOR_WRITE and not is_page_writable(page):
             abort(401)
         return page
+    app.logger.error("No such page: " + url)
     abort(404)
 
 
-def make_absolute(url):
-    m = re.match(r'(\w[\w\d]+)\:(.*)', url)
-    if m:
-        endpoint = str(m.group(1))
-        path = string.lstrip(str(m.group(2)), '/')
-        return '%s:/%s' % (endpoint, path)
-    return '/' + string.lstrip(url, '/')
-
-
 def is_page_readable(page, user=current_user):
     return page.wiki.auth.isPageReadable(page, user.get_id())
 
@@ -105,13 +98,23 @@
     else:
         meta = dict(page.meta)
     meta['title'] = page.title
-    meta['url'] = page.url
+    meta['url'] = urllib.quote(page.url)
     for name in COERCE_META:
         if name in meta:
             meta[name] = COERCE_META[name](meta[name])
     return meta
 
 
+def get_category_meta(category):
+    result = []
+    for item in category:
+        result.append({
+            'url': urllib.quote(item),
+            'name': item
+            })
+    return result
+
+
 def get_history_data(history, needs_files=False):
     hist_data = []
     for i, rev in enumerate(reversed(history)):
@@ -128,23 +131,21 @@
             for f in rev.files:
                 url = None
                 path = os.path.join(g.wiki.root, f['path'])
-                db_obj = g.wiki.db.getPage(path=path)
-                if db_obj is not None:
-                    try:
-                        # Hide pages that the user can't see.
-                        page = DatabasePage(g.wiki, db_obj=db_obj)
-                        if not is_page_readable(page):
-                            continue
-                        url = page.url
-                    except PageNotFoundError:
-                        pass
-                    except PageLoadingError:
-                        pass
+                try:
+                    page = g.wiki.db.getPage(path=path)
+                    # Hide pages that the user can't see.
+                    if not is_page_readable(page):
+                        continue
+                    url = page.url
+                except PageNotFoundError:
+                    pass
+                except PageLoadingError:
+                    pass
                 if not url:
-                    url = path_to_url(f['path'])
+                    url = os.path.splitext(f['path'])[0]
                 rev_data['pages'].append({
                     'url': url,
-                    'action': scm.ACTION_NAMES[f['action']]
+                    'action': ACTION_NAMES[f['action']]
                     })
             rev_data['num_pages'] = len(rev_data['pages'])
             rev_data['make_collapsable'] = len(rev_data['pages']) > 1
@@ -155,6 +156,63 @@
     return hist_data
 
 
+def get_edit_page(url, default_title=None, custom_data=None):
+    page = get_page_or_none(url)
+    if page is None:
+        result = {
+                'meta': {
+                    'url': urllib.quote(url),
+                    'title': default_title or make_page_title(url)
+                    },
+                'text': ''
+                }
+    else:
+        if not is_page_writable(page):
+            abort(401)
+        result = {
+                'meta': get_page_meta(page, True),
+                'text': page.raw_text
+                }
+    result['commit_meta'] = {
+            'author': request.remote_addr,
+            'desc': 'Editing ' + result['meta']['title']
+            }
+    if custom_data:
+        result.update(custom_data)
+    return make_auth_response(result)
+
+
+def do_edit_page(url, default_message):
+    page = get_page_or_none(url)
+    if page and not is_page_writable(page):
+        app.logger.error("Page '%s' is not writable for user '%s'." % (url, current_user.get_id()))
+        abort(401)
+
+    if not 'text' in request.form:
+        abort(400)
+    text = request.form['text']
+    author = request.remote_addr
+    if 'author' in request.form and len(request.form['author']) > 0:
+        author = request.form['author']
+    message = 'Edited ' + url
+    if 'message' in request.form and len(request.form['message']) > 0:
+        message = request.form['message']
+
+    page_fields = {
+            'text': text,
+            'author': author,
+            'message': message
+            }
+    g.wiki.setPage(url, page_fields)
+    result = {'saved': 1}
+    return make_auth_response(result)
+
+
+COERCE_META = {
+    'category': get_category_meta
+    }
+
+
 def make_auth_response(data):
     if current_user.is_authenticated():
         data['auth'] = {
@@ -195,16 +253,21 @@
 
 @app.route('/api/list/<path:url>')
 def api_list_pages(url):
-    pages = filter(is_page_readable, g.wiki.getPages(make_absolute(url)))
+    pages = filter(is_page_readable, g.wiki.getPages(url_from_viewarg(url)))
     page_metas = [get_page_meta(page) for page in pages]
     result = {'path': url, 'pages': list(page_metas)}
     return make_auth_response(result)
 
 
+@app.route('/api/read/')
+def api_read_main_page():
+    return api_read_page(g.wiki.main_page_url)
+
+
 @app.route('/api/read/<path:url>')
 def api_read_page(url):
     page = get_page_or_404(
-            make_absolute(url), 
+            url,
             check_perms=CHECK_FOR_READ,
             force_resolve=('force_resolve' in request.args))
     result = {'meta': get_page_meta(page), 'text': page.text}
@@ -213,24 +276,28 @@
 
 @app.route('/api/raw/<path:url>')
 def api_read_page_raw(url):
-    page = get_page_or_404(make_absolute(url), CHECK_FOR_READ)
+    page = get_page_or_404(url, CHECK_FOR_READ)
     result = {'meta': get_page_meta(page), 'text': page.raw_text}
     return make_auth_response(result)
 
 
 @app.route('/api/read_meta/<name>/<value>')
 def api_read_meta_page(name, value):
+    quoted_value = value
+    value = urllib.unquote(value)
+
     query = {name: [value]}
     pages = g.wiki.getPages(meta_query=query)
     tpl_data = {
             'name': name,
             'value': value,
+            'safe_value': quoted_value,
             'pages': [get_page_meta(p) for p in pages]
         }
 
-    url_value = namespace_title_to_url(value)
+    meta_page_url = '%s:/%s' % (name, value)
     info_page = get_page_or_none(
-            "%s:/%s" % (name, url_value),
+            meta_page_url,
             force_resolve=('force_resolve' in request.args))
     if info_page:
         tpl_data['info_text'] = info_page.text
@@ -240,7 +307,10 @@
             'meta_query': name,
             'meta_value': value,
             'query': query,
-            'meta': {},
+            'meta': {
+                    'url': urllib.quote(meta_page_url),
+                    'title': value
+                },
             'text': text
         }
     if info_page:
@@ -254,7 +324,7 @@
     rev = request.args.get('rev')
     if rev is None:
         abort(400)
-    page = get_page_or_404(make_absolute(url), CHECK_FOR_READ)
+    page = get_page_or_404(url, CHECK_FOR_READ)
     page_rev = page.getRevision(rev)
     meta = dict(get_page_meta(page, True), rev=rev)
     result = {'meta': meta, 'text': page_rev}
@@ -278,7 +348,7 @@
     rev2 = request.args.get('rev2')
     if rev1 is None:
         abort(400)
-    page = get_page_or_404(make_absolute(url), CHECK_FOR_READ)
+    page = get_page_or_404(url, CHECK_FOR_READ)
     diff = page.getDiff(rev1, rev2)
     if 'raw' not in request.args:
         lexer = get_lexer_by_name('diff')
@@ -294,17 +364,17 @@
 
 @app.route('/api/state/<path:url>')
 def api_get_state(url):
-    page = get_page_or_404(make_absolute(url), CHECK_FOR_READ)
+    page = get_page_or_404(url, CHECK_FOR_READ)
     state = page.getState()
     return make_auth_response({
         'meta': get_page_meta(page, True),
-        'state': scm.STATE_NAMES[state]
+        'state': STATE_NAMES[state]
         })
 
 
 @app.route('/api/outlinks/<path:url>')
 def api_get_outgoing_links(url):
-    page = get_page_or_404(make_absolute(url), CHECK_FOR_READ)
+    page = get_page_or_404(url, CHECK_FOR_READ)
     links = []
     for link in page.links:
         other = get_page_or_none(link)
@@ -322,7 +392,7 @@
 
 @app.route('/api/inlinks/<path:url>')
 def api_get_incoming_links(url):
-    page = get_page_or_404(make_absolute(url), CHECK_FOR_READ)
+    page = get_page_or_404(url, CHECK_FOR_READ)
     links = []
     for link in page.getIncomingLinks():
         other = get_page_or_none(link)
@@ -340,51 +410,31 @@
 
 @app.route('/api/edit/<path:url>', methods=['GET', 'POST'])
 def api_edit_page(url):
-    url = make_absolute(url)
+    url = url_from_viewarg(url)
     if request.method == 'GET':
-        page = get_page_or_none(url)
-        if page is None:
-            result = {
-                    'meta': {
-                        'url': url,
-                        'name': os.path.basename(url),
-                        'title': Page.url_to_title(url)
-                        },
-                    'text': ''
-                    }
-        else:
-            if not is_page_writable(page):
-                abort(401)
-            result = {
-                    'meta': get_page_meta(page, True),
-                    'text': page.raw_text
-                    }
-        result['commit_meta'] = {
-                'author': request.remote_addr,
-                'desc': 'Editing ' + result['meta']['title']
+        return get_edit_page(url)
+
+    default_message = 'Edited ' + url
+    return do_edit_page(url, default_message)
+
+
+@app.route('/api/edit_meta/<name>/<path:value>', methods=['GET', 'POST'])
+def api_edit_meta_page(name, value):
+    value = urllib.unquote(value)
+    meta_page_url = '%s:/%s' % (name, value)
+
+    if request.method == 'GET':
+        custom_data = {
+                'meta_query': name,
+                'meta_value': value
                 }
-        return make_auth_response(result)
-
-    get_page_or_404(url, CHECK_FOR_WRITE)
+        return get_edit_page(
+                meta_page_url,
+                default_title=('%s: %s' % (name, value)),
+                custom_data=custom_data)
 
-    if not 'text' in request.form:
-        abort(400)
-    text = request.form['text']
-    author = request.remote_addr
-    if 'author' in request.form and len(request.form['author']) > 0:
-        author = request.form['author']
-    message = 'Edited ' + url
-    if 'message' in request.form and len(request.form['message']) > 0:
-        message = request.form['message']
-
-    page_fields = {
-            'text': text,
-            'author': author,
-            'message': message
-            }
-    g.wiki.setPage(url, page_fields)
-    result = {'saved': 1}
-    return make_auth_response(result)
+    default_message = 'Edited %s %s' % (name, value)
+    return do_edit_page(meta_page_url, default_message)
 
 
 @app.route('/api/revert/<path:url>', methods=['POST'])
@@ -399,7 +449,7 @@
     if 'message' in request.form and len(request.form['message']) > 0:
         message = request.form['message']
 
-    url = make_absolute(url)
+    url = url_from_viewarg(url)
     page_fields = {
             'rev': rev,
             'author': author,
@@ -457,7 +507,6 @@
 
 @app.route('/api/history/<path:url>')
 def api_page_history(url):
-    url = make_absolute(url)
     page = get_page_or_404(url, CHECK_FOR_READ)
     history = page.getHistory()
     hist_data = get_history_data(history)
--- a/wikked/web.py	Wed Nov 27 23:32:45 2013 -0800
+++ b/wikked/web.py	Sun Dec 01 21:50:29 2013 -0800
@@ -1,15 +1,22 @@
 import os
 import os.path
+import logging
 from flask import Flask, abort, g
 from utils import find_wiki_root
 
 # Create the main app.
-app = Flask("wikked")
+app = Flask("wikked.web")
 app.config.from_object('wikked.settings')
 app.config.from_envvar('WIKKED_SETTINGS', silent=True)
 
 
-# Find the wiki root.
+# Setup some config defaults.
+app.config.setdefault('SQL_DEBUG', False)
+app.config.setdefault('SQL_COMMIT_ON_TEARDOWN', False)
+
+
+# Find the wiki root, and further configure the app if there's a
+# config file in there.
 wiki_root = find_wiki_root()
 config_path = os.path.join(wiki_root, '.wiki', 'app.cfg')
 if os.path.isfile(config_path):
@@ -29,13 +36,13 @@
 
 
 # Customize logging.
-if app.config.get('LOG_FORMAT'):
-    import logging
-    handler = logging.StreamHandler()
-    handler.setLevel(logging.DEBUG)
-    handler.setFormatter(logging.Formatter(app.config['LOG_FORMAT']))
-    app.logger.handlers = []
-    app.logger.addHandler(handler)
+if app.config['DEBUG']:
+    l = logging.getLogger('wikked')
+    l.setLevel(logging.DEBUG)
+
+if app.config['SQL_DEBUG']:
+    l = logging.getLogger('sqlalchemy')
+    l.setLevel(logging.DEBUG)
 
 
 # Set the wiki as a request global, and open/close the database.
@@ -50,15 +57,18 @@
 
 @app.teardown_request
 def teardown_request(exception):
-    pass
+    return exception
 
 
-# SQLAlchemy extension.
-from flask.ext.sqlalchemy import SQLAlchemy
-# TODO: get the path from the wiki parameters
-app.config['SQLALCHEMY_DATABASE_URI'] = ('sqlite:///' + 
-        os.path.join(wiki_root, '.wiki', 'wiki.db'))
-db = SQLAlchemy(app)
+# SQLAlchemy.
+@app.teardown_appcontext
+def shutdown_session(exception=None):
+    wiki = getattr(g, 'wiki', None)
+    if wiki:
+        if app.config['SQL_COMMIT_ON_TEARDOWN'] and exception is None:
+            wiki.db.session.commit()
+        wiki.db.session.remove()
+        return exception
 
 
 # Login extension.
@@ -98,7 +108,6 @@
 
 def create_wiki(update_on_start=True):
     params = WikiParameters(root=wiki_root)
-    params.logger = app.logger
     wiki = Wiki(params)
     wiki.start(update_on_start)
     return wiki
@@ -108,5 +117,6 @@
 
 
 # Import the views.
+# (this creates a PyLint warning but it's OK)
 import views
 
--- a/wikked/wiki.py	Wed Nov 27 23:32:45 2013 -0800
+++ b/wikked/wiki.py	Sun Dec 01 21:50:29 2013 -0800
@@ -2,17 +2,20 @@
 import os.path
 import time
 import logging
-import itertools
 import importlib
 from ConfigParser import SafeConfigParser, NoOptionError
-from page import DatabasePage, FileSystemPage
+from page import FileSystemPage
 from fs import FileSystem
-from db import SQLDatabase
-from scm import MercurialCommandServerSourceControl, GitLibSourceControl
-from indexer import WhooshWikiIndex
+from db.sql import SQLDatabase
+from scm.mercurial import MercurialCommandServerSourceControl
+from scm.git import GitLibSourceControl
+from indexer.native import WhooshWikiIndex
 from auth import UserManager
 
 
+logger = logging.getLogger(__name__)
+
+
 def passthrough_formatter(text):
     """ Passthrough formatter. Pretty simple stuff. """
     return text
@@ -39,19 +42,14 @@
         self.index_path = os.path.join(self.root, '.wiki', 'index')
         self.db_path = os.path.join(self.root, '.wiki', 'wiki.db')
 
-    def logger_factory(self):
-        if getattr(self, 'logger', None):
-            return self.logger
-        return logging.getLogger(__name__)
-
     def fs_factory(self, config):
-        return FileSystem(self.root, logger=self.logger_factory())
+        return FileSystem(self.root)
 
     def index_factory(self, config):
-        return WhooshWikiIndex(self.index_path, logger=self.logger_factory())
+        return WhooshWikiIndex(self.index_path)
 
     def db_factory(self, config):
-        return SQLDatabase(self.db_path, logger=self.logger_factory())
+        return SQLDatabase(self.db_path)
 
     def scm_factory(self, config):
         try:
@@ -67,9 +65,9 @@
                 scm_type = 'hg'
 
         if scm_type == 'hg':
-            return MercurialCommandServerSourceControl(self.root, logger=self.logger_factory())
+            return MercurialCommandServerSourceControl(self.root)
         elif scm_type == 'git':
-            return GitLibSourceControl(self.root, logger=self.logger_factory())
+            return GitLibSourceControl(self.root)
         else:
             raise InitializationError("No such source control: " + scm_type)
 
@@ -105,10 +103,12 @@
         if parameters is None:
             raise ValueError("No parameters were given to the wiki.")
 
-        self.logger = parameters.logger_factory()
-        self.logger.debug("Initializing wiki.")
+        logger.debug("Initializing wiki.")
 
+        self.parameters = parameters
         self.config = self._loadConfig(parameters)
+        self.main_page_url = '/' + self.config.get('wiki', 'main_page').strip('/')
+        self.templates_url = '/' + self.config.get('wiki', 'templates_dir').strip('/') + '/'
 
         self.formatters = parameters.formatters
 
@@ -117,12 +117,7 @@
         self.db = parameters.db_factory(self.config)
         self.scm = parameters.scm_factory(self.config)
 
-        self.auth = UserManager(self.config, logger=self.logger)
-
-        self.fs.page_extensions = list(set(
-            itertools.chain(*self.formatters.itervalues())))
-        self.fs.excluded += parameters.getSpecialFilenames()
-        self.fs.excluded += self.scm.getSpecialFilenames()
+        self.auth = UserManager(self.config)
 
     @property
     def root(self):
@@ -131,9 +126,10 @@
     def start(self, update=True):
         """ Properly initializes the wiki and all its sub-systems.
         """
-        self.scm.initRepo()
-        self.index.initIndex()
-        self.db.initDb()
+        self.fs.initFs(self)
+        self.scm.initRepo(self)
+        self.index.initIndex(self)
+        self.db.initDb(self)
 
         if update:
             self.update()
@@ -142,7 +138,7 @@
         self.db.close()
 
     def reset(self, cache_ext_data=True):
-        self.logger.debug("Resetting wiki data...")
+        logger.debug("Resetting wiki data...")
         page_infos = self.fs.getPageInfos()
         fs_pages = FileSystemPage.fromPageInfos(self, page_infos)
         self.db.reset(fs_pages)
@@ -153,7 +149,7 @@
 
     def update(self, url=None, cache_ext_data=True):
         updated_urls = []
-        self.logger.debug("Updating pages...")
+        logger.debug("Updating pages...")
         if url:
             page_info = self.fs.getPage(url)
             fs_page = FileSystemPage(self, page_info=page_info)
@@ -183,12 +179,12 @@
         if meta_query:
             self._cachePages()
         for page in self.db.getPages(subdir, meta_query):
-            yield DatabasePage(self, db_obj=page)
+            yield page
 
     def getPage(self, url):
         """ Gets the page for a given URL.
         """
-        return DatabasePage(self, url)
+        return self.db.getPage(url)
 
     def setPage(self, url, page_fields):
         """ Updates or creates a page for a given URL.
@@ -259,14 +255,13 @@
         return self.scm.getHistory(limit=limit)
 
     def _cachePages(self, only_urls=None):
-        self.logger.debug("Caching extended page data...")
+        logger.debug("Caching extended page data...")
         if only_urls:
             for url in only_urls:
                 page = self.getPage(url)
                 page._ensureExtendedData()
         else:
-            for db_obj in self.db.getUncachedPages():
-                page = DatabasePage(self, db_obj=db_obj)
+            for page in self.db.getUncachedPages():
                 page._ensureExtendedData()
 
     def _loadConfig(self, parameters):