Merge branch 'master' into record_last_compaction
diff --git a/.gitignore b/.gitignore
index faa07f9..a1cba1e 100644
--- a/.gitignore
+++ b/.gitignore
@@ -54,6 +54,7 @@
 src/rebar/
 src/snappy/
 src/triq/
+src/hyper/
 tmp/
 
 src/couch/*.o
diff --git a/.travis.yml b/.travis.yml
index fe84f87..b2e7ff0 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -7,7 +7,6 @@
    - 19.3
    - 18.3
    - 17.5
-   - R16B03-1
 
 addons:
   apt:
@@ -64,6 +63,14 @@
 after_failure:
   - build-aux/logfile-uploader.py
 
+# start a push build on master and release branches + PRs build on every branch
+# Avoid double build on PRs (See https://github.com/travis-ci/travis-ci/issues/1147)
+branches:
+  only:
+    - master
+    - /^\d+\.x\.x$/
+    - /^\d+\.\d+\.x$/
+
 # Re-enable once test suite is reliable
 #notifications:
 #  email: false
diff --git a/INSTALL.Unix.md b/INSTALL.Unix.md
index b2d4fbd..bfd9c89 100644
--- a/INSTALL.Unix.md
+++ b/INSTALL.Unix.md
@@ -137,8 +137,10 @@
 
 You can install the remaining dependencies by running:
 
-    pkg install npm4 help2man openssl icu curl git \
-        autoconf automake libtool node spidermonkey185
+    pkg install help2man openssl icu curl git bash \
+        autoconf automake libtool node spidermonkey185 \
+        erlang node8 npm-node8 lang/python py27-sphinx py27-pip
+    pip install --upgrade sphinx_rtd_theme nose requests hypothesis
 
 ## Installing
 
diff --git a/Jenkinsfile b/Jenkinsfile
index 905a85f..46fb723 100644
--- a/Jenkinsfile
+++ b/Jenkinsfile
@@ -39,13 +39,13 @@
       }
       steps {
         // This image has the oldest Erlang we support, 16B03
-        sh 'docker pull couchdbdev/ubuntu-14.04-erlang-default:latest'
+        sh 'docker pull couchdbdev/debian-jessie-erlang-17.5.3:latest'
         timeout(time: 15, unit: "MINUTES") {
           // https://github.com/jenkins-infra/jenkins.io/blob/master/Jenkinsfile#64
           // We need the jenkins user mapped inside of the image
           // npm config cache below is required because /home/jenkins doesn't
           // ACTUALLY exist in the image
-          withDockerContainer(image: 'couchdbdev/ubuntu-14.04-erlang-default', args: '-e npm_config_cache=npm-cache -e HOME=. -v=/etc/passwd:/etc/passwd -v /etc/group:/etc/group') {
+          withDockerContainer(image: 'couchdbdev/debian-jessie-erlang-17.5.3', args: '-e npm_config_cache=npm-cache -e HOME=. -v=/etc/passwd:/etc/passwd -v /etc/group:/etc/group') {
             sh '''
               set
               rm -rf apache-couchdb-*
@@ -78,24 +78,48 @@
     // Build packages on supported platforms using esl's erlang
     stage('Test') {
       steps {
-        parallel(centos6erlang183: {
+        parallel(freebsd: {
+          node(label: 'couchdb && freebsd') {
+            timeout(time: 60, unit: "MINUTES") {
+              deleteDir()
+              unstash 'tarball'
+              withEnv(['HOME='+pwd()]) {
+                sh '''
+                  cwd=$(pwd)
+                  mkdir -p $COUCHDB_IO_LOG_DIR
+  
+                  # Build CouchDB from tarball & test
+                  builddir=$(mktemp -d)
+                  cd $builddir
+                  tar -xf $cwd/apache-couchdb-*.tar.gz
+                  cd apache-couchdb-*
+                  ./configure --with-curl
+                  gmake check || (build-aux/logfile-uploader.py && false)
+
+                  # No package build for FreeBSD at this time
+                '''
+              } // withEnv
+            } // timeout
+            deleteDir()
+          } // node
+        },
+        centos6: {
           node(label: 'ubuntu') {
             timeout(time: 60, unit: "MINUTES") {
-              sh 'docker pull couchdbdev/centos-6-erlang-18.3'
-              withDockerContainer(image: 'couchdbdev/centos-6-erlang-18.3', args: '-e LD_LIBRARY_PATH=/usr/local/bin') {
+              sh 'docker pull couchdbdev/centos-6-erlang-19.3.6'
+              withDockerContainer(image: 'couchdbdev/centos-6-erlang-19.3.6') {
                 sh 'rm -f apache-couchdb-*.tar.gz'
                 unstash 'tarball'
                 sh '''
                   cwd=$(pwd)
                   mkdir -p $COUCHDB_IO_LOG_DIR
 
-                  # Build CouchDB from tarball
+                  # Build CouchDB from tarball & test
                   builddir=$(mktemp -d)
                   cd $builddir
                   tar -xf $cwd/apache-couchdb-*.tar.gz
                   cd apache-couchdb-*
                   ./configure --with-curl
-                  make all
                   make check || (build-aux/logfile-uploader.py && false)
 
                   # Build CouchDB packages
@@ -118,24 +142,23 @@
             deleteDir()
           } // node
         },
-        centos7erlang183: {
+        centos7: {
           node(label: 'ubuntu') {
             timeout(time: 60, unit: "MINUTES") {
-              sh 'docker pull couchdbdev/centos-7-erlang-18.3'
-              withDockerContainer(image: 'couchdbdev/centos-7-erlang-18.3', args: '-e LD_LIBRARY_PATH=/usr/local/bin') {
+              sh 'docker pull couchdbdev/centos-7-erlang-19.3.6'
+              withDockerContainer(image: 'couchdbdev/centos-7-erlang-19.3.6') {
                 sh 'rm -f apache-couchdb-*.tar.gz'
                 unstash 'tarball'
                 sh '''
                   cwd=$(pwd)
                   mkdir -p $COUCHDB_IO_LOG_DIR
 
-                  # Build CouchDB from tarball
+                  # Build CouchDB from tarball & test
                   builddir=$(mktemp -d)
                   cd $builddir
                   tar -xf $cwd/apache-couchdb-*.tar.gz
                   cd apache-couchdb-*
                   ./configure --with-curl
-                  make all
                   make check || (build-aux/logfile-uploader.py && false)
 
                   # Build CouchDB packages
@@ -158,24 +181,23 @@
             deleteDir()
           } // node
         },
-        ubuntu1404erlang183: {
+        ubuntutrusty: {
           node(label: 'ubuntu') {
             timeout(time: 60, unit: "MINUTES") {
-              sh 'docker pull couchdbdev/ubuntu-14.04-erlang-18.3'
-              withDockerContainer(image: 'couchdbdev/ubuntu-14.04-erlang-18.3') {
+              sh 'docker pull couchdbdev/ubuntu-trusty-erlang-19.3.6'
+              withDockerContainer(image: 'couchdbdev/ubuntu-trusty-erlang-19.3.6') {
                 sh 'rm -f apache-couchdb-*.tar.gz'
                 unstash 'tarball'
                 sh '''
                   cwd=$(pwd)
                   mkdir -p $COUCHDB_IO_LOG_DIR
 
-                  # Build CouchDB from tarball
+                  # Build CouchDB from tarball & test
                   builddir=$(mktemp -d)
                   cd $builddir
                   tar -xf $cwd/apache-couchdb-*.tar.gz
                   cd apache-couchdb-*
                   ./configure --with-curl
-                  make all
                   make check || (build-aux/logfile-uploader.py && false)
 
                   # Build CouchDB packages
@@ -190,7 +212,7 @@
 
                   # Cleanup & save for posterity
                   rm -rf $cwd/pkgs/$platform && mkdir -p $cwd/pkgs/$platform
-                  mv ../couchdb/*deb $cwd/pkgs/$platform || true
+                  mv ../couchdb/*.deb $cwd/pkgs/$platform || true
                 '''
               } // withDocker
             } // timeout
@@ -198,24 +220,23 @@
             deleteDir()
           } // node
         },
-        ubuntu1604erlang183: {
+        ubuntuxenial: {
           node(label: 'ubuntu') {
             timeout(time: 60, unit: "MINUTES") {
-              sh 'docker pull couchdbdev/ubuntu-16.04-erlang-18.3'
-              withDockerContainer(image: 'couchdbdev/ubuntu-16.04-erlang-18.3') {
+              sh 'docker pull couchdbdev/ubuntu-xenial-erlang-19.3.6'
+              withDockerContainer(image: 'couchdbdev/ubuntu-xenial-erlang-19.3.6') {
                 sh 'rm -f apache-couchdb-*.tar.gz'
                 unstash 'tarball'
                 sh '''
                   cwd=$(pwd)
                   mkdir -p $COUCHDB_IO_LOG_DIR
 
-                  # Build CouchDB from tarball
+                  # Build CouchDB from tarball & test
                   builddir=$(mktemp -d)
                   cd $builddir
                   tar -xf $cwd/apache-couchdb-*.tar.gz
                   cd apache-couchdb-*
                   ./configure --with-curl
-                  make all
                   make check || (build-aux/logfile-uploader.py && false)
 
                   # Build CouchDB packages
@@ -230,7 +251,7 @@
 
                   # Cleanup & save for posterity
                   rm -rf $cwd/pkgs/$platform && mkdir -p $cwd/pkgs/$platform
-                  mv ../couchdb/*deb $cwd/pkgs/$platform || true
+                  mv ../couchdb/*.deb $cwd/pkgs/$platform || true
                 '''
               } // withDocker
             } // timeout
@@ -238,24 +259,23 @@
             deleteDir()
           } // node
         },
-        debian8erlang183: {
+        ubuntubionic: {
           node(label: 'ubuntu') {
             timeout(time: 60, unit: "MINUTES") {
-              sh 'docker pull couchdbdev/debian-8-erlang-18.3'
-              withDockerContainer(image: 'couchdbdev/debian-8-erlang-18.3') {
+              sh 'docker pull couchdbdev/ubuntu-bionic-erlang-19.3.6'
+              withDockerContainer(image: 'couchdbdev/ubuntu-bionic-erlang-19.3.6') {
                 sh 'rm -f apache-couchdb-*.tar.gz'
                 unstash 'tarball'
                 sh '''
                   cwd=$(pwd)
                   mkdir -p $COUCHDB_IO_LOG_DIR
 
-                  # Build CouchDB from tarball
+                  # Build CouchDB from tarball & test
                   builddir=$(mktemp -d)
                   cd $builddir
                   tar -xf $cwd/apache-couchdb-*.tar.gz
                   cd apache-couchdb-*
                   ./configure --with-curl
-                  make all
                   make check || (build-aux/logfile-uploader.py && false)
 
                   # Build CouchDB packages
@@ -270,7 +290,7 @@
 
                   # Cleanup & save for posterity
                   rm -rf $cwd/pkgs/$platform && mkdir -p $cwd/pkgs/$platform
-                  mv ../couchdb/*deb $cwd/pkgs/$platform || true
+                  mv ../couchdb/*.deb $cwd/pkgs/$platform || true
                 '''
               } // withDocker
             } // timeout
@@ -278,24 +298,23 @@
             deleteDir()
           } // node
         },
-        debian9erlang183: {
+        debianjessie: {
           node(label: 'ubuntu') {
             timeout(time: 60, unit: "MINUTES") {
-              sh 'docker pull couchdbdev/debian-9-erlang-18.3'
-              withDockerContainer(image: 'couchdbdev/debian-9-erlang-18.3') {
+              sh 'docker pull couchdbdev/debian-jessie-erlang-19.3.6'
+              withDockerContainer(image: 'couchdbdev/debian-jessie-erlang-19.3.6') {
                 sh 'rm -f apache-couchdb-*.tar.gz'
                 unstash 'tarball'
                 sh '''
                   cwd=$(pwd)
                   mkdir -p $COUCHDB_IO_LOG_DIR
 
-                  # Build CouchDB from tarball
+                  # Build CouchDB from tarball & test
                   builddir=$(mktemp -d)
                   cd $builddir
                   tar -xf $cwd/apache-couchdb-*.tar.gz
                   cd apache-couchdb-*
                   ./configure --with-curl
-                  make all
                   make check || (build-aux/logfile-uploader.py && false)
 
                   # Build CouchDB packages
@@ -310,7 +329,46 @@
 
                   # Cleanup & save for posterity
                   rm -rf $cwd/pkgs/$platform && mkdir -p $cwd/pkgs/$platform
-                  mv ../couchdb/*deb $cwd/pkgs/$platform || true
+                  mv ../couchdb/*.deb $cwd/pkgs/$platform || true
+                '''
+              } // withDocker
+            } // timeout
+            archiveArtifacts artifacts: 'pkgs/**', fingerprint: true
+            deleteDir()
+          } // node
+        },
+        debianstretch: {
+          node(label: 'ubuntu') {
+            timeout(time: 60, unit: "MINUTES") {
+              sh 'docker pull couchdbdev/debian-stretch-erlang-19.3.6'
+              withDockerContainer(image: 'couchdbdev/debian-stretch-erlang-19.3.6') {
+                sh 'rm -f apache-couchdb-*.tar.gz'
+                unstash 'tarball'
+                sh '''
+                  cwd=$(pwd)
+                  mkdir -p $COUCHDB_IO_LOG_DIR
+
+                  # Build CouchDB from tarball & test
+                  builddir=$(mktemp -d)
+                  cd $builddir
+                  tar -xf $cwd/apache-couchdb-*.tar.gz
+                  cd apache-couchdb-*
+                  ./configure --with-curl
+                  make check || (build-aux/logfile-uploader.py && false)
+
+                  # Build CouchDB packages
+                  cd $builddir
+                  git clone https://github.com/apache/couchdb-pkg
+                  mkdir couchdb
+                  cp $cwd/apache-couchdb-*.tar.gz couchdb
+                  tar -xf $cwd/apache-couchdb-*.tar.gz -C couchdb
+                  cd couchdb-pkg
+                  platform=$(lsb_release -cs)
+                  make $platform PLATFORM=$platform
+
+                  # Cleanup & save for posterity
+                  rm -rf $cwd/pkgs/$platform && mkdir -p $cwd/pkgs/$platform
+                  mv ../couchdb/*.deb $cwd/pkgs/$platform || true
                 '''
               } // withDocker
             } // timeout
@@ -335,8 +393,8 @@
         }
       }
       steps {
-        sh 'docker pull couchdbdev/debian-8-base:latest'
-        withDockerContainer(image: 'couchdbdev/debian-8-base:latest', args: '-e npm_config_cache=npm-cache -e HOME=. -v=/etc/passwd:/etc/passwd -v /etc/group:/etc/group') {
+        sh 'docker pull couchdbdev/debian-stretch-erlang-19.3.6:latest'
+        withDockerContainer(image: 'couchdbdev/debian-stretch-erlang-19.3.6:latest', args: '-e npm_config_cache=npm-cache -e HOME=. -v=/etc/passwd:/etc/passwd -v /etc/group:/etc/group') {
           withCredentials([file(credentialsId: 'jenkins-key', variable: 'KEY')]) {
             sh 'rm -rf pkgs *.tar.gz'
             unarchive mapping: ['pkgs/' : '.']
@@ -346,19 +404,28 @@
               rsync -avz -e "ssh -o StrictHostKeyChecking=no -i $KEY" jenkins@couchdb-vm2.apache.org:/var/www/html/$BRANCH_NAME . || mkdir -p $BRANCH_NAME
               rm -rf $BRANCH_NAME/debian/* $BRANCH_NAME/el6/* $BRANCH_NAME/el7/*
               mkdir -p $BRANCH_NAME/debian $BRANCH_NAME/el6 $BRANCH_NAME/el7 $BRANCH_NAME/source
+              rsync -avz -e "ssh -o StrictHostKeyChecking=no -i $KEY" jenkins@couchdb-vm2.apache.org:/var/www/html/js .
             '''
             echo 'Building Debian repo...'
             sh '''
               git clone https://github.com/apache/couchdb-pkg
-              reprepro -b couchdb-pkg/repo includedeb jessie pkgs/jessie/*deb
-              reprepro -b couchdb-pkg/repo includedeb trusty pkgs/trusty/*deb
-              reprepro -b couchdb-pkg/repo includedeb xenial pkgs/xenial/*deb
-              reprepro -b couchdb-pkg/repo includedeb stretch pkgs/stretch/*deb
+              cp js/debian-jessie/*.deb pkgs/jessie
+              reprepro -b couchdb-pkg/repo includedeb jessie pkgs/jessie/*.deb
+              cp js/debian-stretch/*.deb pkgs/stretch
+              reprepro -b couchdb-pkg/repo includedeb stretch pkgs/stretch/*.deb
+              cp js/ubuntu-trusty/*.deb pkgs/trusty
+              reprepro -b couchdb-pkg/repo includedeb trusty pkgs/trusty/*.deb
+              cp js/ubuntu-xenial/*.deb pkgs/xenial
+              reprepro -b couchdb-pkg/repo includedeb xenial pkgs/xenial/*.deb
+              cp js/ubuntu-bionic/*.deb pkgs/bionic
+              reprepro -b couchdb-pkg/repo includedeb bionic pkgs/bionic/*.deb
             '''
             echo 'Building CentOS repos...'
             sh '''
+              cp js/centos-6/*rpm pkgs/centos6
+              cp js/centos-7/*rpm pkgs/centos7
               cd pkgs/centos6 && createrepo --database .
-              cd ../centos7 && rm -f js* && createrepo --database .
+              cd ../centos7 && createrepo --database .
             '''
             echo 'Building tree to upload...'
             sh '''
diff --git a/LICENSE b/LICENSE
index a209352..6034c71 100644
--- a/LICENSE
+++ b/LICENSE
@@ -2274,3 +2274,27 @@
 LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
 OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
 SUCH DAMAGE.
+
+For the src/hyper component:
+
+The MIT License (MIT)
+
+Copyright (c) 2014 Game Analytics ApS
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/Makefile b/Makefile
index d56d4e8..27d9531 100644
--- a/Makefile
+++ b/Makefile
@@ -17,7 +17,11 @@
 ifeq ($(IN_RELEASE), true)
 COUCHDB_VERSION = $(vsn_major).$(vsn_minor).$(vsn_patch)
 else
-RELTAG = $(shell git describe | grep -E '^[0-9]+\.[0-9]\.[0-9]+(-RC[0-9]+)?$$')
+# IN_RC generates a tarball that has the -RCx suffix in the name if needed
+IN_RC = $(shell git describe --tags --always --first-parent \
+							| grep -Eo -- '-RC[0-9]+' 2>/dev/null)
+RELTAG = $(shell git describe --dirty --abbrev=0 --tags --always --first-parent \
+				| grep -Eo '^[0-9]+\.[0-9]\.[0-9]+')
 ifeq ($(RELTAG),)
 COUCHDB_VERSION_SUFFIX = $(shell git rev-parse --short --verify HEAD)
 COUCHDB_VERSION = $(vsn_major).$(vsn_minor).$(vsn_patch)-$(COUCHDB_VERSION_SUFFIX)
@@ -30,20 +34,23 @@
 
 # Rebar options
 apps=
-skip_deps=folsom,meck,mochiweb,triq,snappy
+skip_deps=folsom,meck,mochiweb,triq,snappy,bcrypt,hyper
 suites=
 tests=
 
+COMPILE_OPTS=$(shell echo "\
+	apps=$(apps) \
+	" | sed -e 's/[a-z_]\{1,\}= / /g')
 EUNIT_OPTS=$(shell echo "\
 	apps=$(apps) \
 	skip_deps=$(skip_deps) \
 	suites=$(suites) \
 	tests=$(tests) \
-	" | sed -e 's/[a-z]\+= / /g')
+	" | sed -e 's/[a-z]\{1,\}= / /g')
 DIALYZE_OPTS=$(shell echo "\
 	apps=$(apps) \
 	skip_deps=$(skip_deps) \
-	" | sed -e 's/[a-z]\+= / /g')
+	" | sed -e 's/[a-z]\{1,\}= / /g')
 
 #ignore javascript tests
 ignore_js_suites=
@@ -73,9 +80,9 @@
 
 
 .PHONY: couch
-# target: couch - Build CouchDB core
+# target: couch - Build CouchDB core, use ERL_OPTS to provide custom compiler's options
 couch: config.erl
-	@COUCHDB_VERSION=$(COUCHDB_VERSION) $(REBAR) compile
+	@COUCHDB_VERSION=$(COUCHDB_VERSION) $(REBAR) compile $(COMPILE_OPTS)
 	@cp src/couch/priv/couchjs bin/
 
 
@@ -289,8 +296,8 @@
 	@mkdir -p apache-couchdb-$(COUCHDB_VERSION)/share/docs/man
 	@cp src/docs/build/man/apachecouchdb.1 apache-couchdb-$(COUCHDB_VERSION)/share/docs/man/
 
-	@tar czf apache-couchdb-$(COUCHDB_VERSION).tar.gz apache-couchdb-$(COUCHDB_VERSION)
-	@echo "Done: apache-couchdb-$(COUCHDB_VERSION).tar.gz"
+	@tar czf apache-couchdb-$(COUCHDB_VERSION)$(IN_RC).tar.gz apache-couchdb-$(COUCHDB_VERSION)
+	@echo "Done: apache-couchdb-$(COUCHDB_VERSION)$(IN_RC).tar.gz"
 
 
 .PHONY: release
diff --git a/NOTICE b/NOTICE
index c040338..481e755 100644
--- a/NOTICE
+++ b/NOTICE
@@ -184,3 +184,7 @@
     1997 Niels Provos <provos@physnet.uni-hamburg.de>
   - The asynchronous queue code (c_src/async_queue.c and c_src/async_queue.h)
     is from the esnappy project, copyright 2011 Konstantin V. Sorokin.
+
+* hyper
+
+  Copyright (c) 2014 Game Analytics ApS
diff --git a/rebar.config.script b/rebar.config.script
index 7ae2136..1411752 100644
--- a/rebar.config.script
+++ b/rebar.config.script
@@ -47,28 +47,28 @@
 
 DepDescs = [
 %% Independent Apps
-{config,           "config",           {tag, "1.0.3"}},
+{config,           "config",           {tag, "1.0.4"}},
 {b64url,           "b64url",           {tag, "1.0.1"}},
 {ets_lru,          "ets-lru",          {tag, "1.0.0"}},
 {khash,            "khash",            {tag, "1.0.1"}},
-{snappy,           "snappy",           {tag, "CouchDB-1.0.0"}},
+{snappy,           "snappy",           {tag, "CouchDB-1.0.1"}},
 {ioq,              "ioq",              {tag, "1.0.1"}},
 
 %% Non-Erlang deps
 {docs,             {url, "https://github.com/apache/couchdb-documentation"},
-                   {tag, "2.1.0"}, [raw]},
+                   {tag, "2.1.2"}, [raw]},
 {fauxton,          {url, "https://github.com/apache/couchdb-fauxton"},
-                   {tag, "v1.1.13"}, [raw]},
+                   {tag, "v1.1.15"}, [raw]},
 %% Third party deps
 {folsom,           "folsom",           {tag, "CouchDB-0.8.2"}},
+{hyper,            "hyper",            {tag, "CouchDB-2.2.0-3"}},
 {ibrowse,          "ibrowse",          {tag, "CouchDB-4.0.1"}},
 {jiffy,            "jiffy",            {tag, "CouchDB-0.14.11-2"}},
 {mochiweb,         "mochiweb",         {tag, "v2.17.0"}},
 {meck,             "meck",             {tag, "0.8.8"}},
 {bcrypt,           {url, "https://github.com/apache/couchdb-erlang-bcrypt"},
                    {tag, "1.0.2"}},
-{triq,             {url, "https://gitlab.com/triq/triq.git"},
-                   "79bd272025434e152745067c36350faa7274c653"}
+{triq,             "triq",             {tag, "v1.2.0"}}
 ],
 
 
@@ -87,13 +87,18 @@
         {AppName, ".*", {git, Url, Version}, Options}
 end,
 
+ErlOpts = case os:getenv("ERL_OPTS") of
+    false -> [];
+    Opts -> [list_to_atom(O) || O <- string:tokens(Opts, ",")]
+end,
+
 AddConfig = [
-    {require_otp_vsn, "R16B03|R16B03-1|17|18|19|20"},
+    {require_otp_vsn, "17|18|19|20"},
     {deps_dir, "src"},
     {deps, lists:map(MakeDep, DepDescs)},
     {sub_dirs, SubDirs},
     {lib_dirs, ["src"]},
-    {erl_opts, [bin_opt_info, debug_info, {i, "../"}]},
+    {erl_opts, [{i, "../"} | ErlOpts]},
     {eunit_opts, [verbose]},
     {plugins, [eunit_plugin]},
     {dialyzer, [
diff --git a/rel/overlay/etc/default.ini b/rel/overlay/etc/default.ini
index 74087ea..0f0d547 100644
--- a/rel/overlay/etc/default.ini
+++ b/rel/overlay/etc/default.ini
@@ -8,7 +8,7 @@
 view_index_dir = {{view_index_dir}}
 ; util_driver_dir =
 ; plugin_dir =
-os_process_timeout = 5000 ; 5 seconds. for view and external servers.
+os_process_timeout = 5000 ; 5 seconds. for view servers.
 max_dbs_open = 500
 delayed_commits = false
 ; Method used to compress everything that is appended to database and view index files, except
@@ -32,7 +32,6 @@
 ; btree_chunk_size = 1279
 ; maintenance_mode = false
 ; stem_interactive_updates = true
-; update_lru_on_read = true
 ; uri_file =
 ; The speed of processing the _changes feed with doc_ids filter can be
 ; influenced directly with this setting - increase for faster processing at the
@@ -49,12 +48,22 @@
 ; applied conservatively. For example 1.0e+16 could be encoded as 1e16, so 4 used
 ; for size calculation instead of 7.
 ;max_document_size = 4294967296 ; bytes
-
+;
 ; Maximum attachment size.
 ; max_attachment_size = infinity
+;
+; Do not update the least recently used DB cache on reads, only writes
+;update_lru_on_read = false
+;
 ; The default storage engine to use when creating databases
 ; is set as a key into the [couchdb_engines] section.
 default_engine = couch
+;
+; Enable this to only "soft-delete" databases when DELETE /{db} requests are
+; made. This will place a .recovery directory in your data directory and
+; move deleted databases/shards there instead. You can then manually delete
+; these files later, as desired.
+;enable_database_recovery = false
 
 [couchdb_engines]
 ; The keys in this section are the filename extension that
@@ -84,6 +93,11 @@
 ; _dbs_info in a request
 max_db_number_for_dbs_info_req = 100
 
+; authentication handlers
+; authentication_handlers = {chttpd_auth, cookie_authentication_handler}, {chttpd_auth, default_authentication_handler}
+; uncomment the next line to enable proxy authentication
+; authentication_handlers = {chttpd_auth, proxy_authentication_handler}, {chttpd_auth, cookie_authentication_handler}, {chttpd_auth, default_authentication_handler}
+
 [database_compaction]
 ; larger buffer sizes can originate smaller files
 doc_buffer_size = 524288 ; value in bytes
@@ -268,7 +282,6 @@
 
 [daemons]
 index_server={couch_index_server, start_link, []}
-external_manager={couch_external_manager, start_link, []}
 query_servers={couch_proc_manager, start_link, []}
 vhosts={couch_httpd_vhost, start_link, []}
 httpd={couch_httpd, start_link, []}
@@ -313,12 +326,6 @@
 _temp_view = {couch_mrview_http, handle_temp_view_req}
 _view_cleanup = {couch_mrview_http, handle_cleanup_req}
 
-; The external module takes an optional argument allowing you to narrow it to a
-; single script. Otherwise the script name is inferred from the first path section
-; after _external's own path.
-; _mypath = {couch_httpd_external, handle_external_req, <<"mykey">>}
-; _external = {couch_httpd_external, handle_external_req}
-
 [httpd_design_handlers]
 _compact = {couch_mrview_http, handle_compact_req}
 _info = {couch_mrview_http, handle_info_req}
@@ -329,21 +336,6 @@
 _view = {couch_mrview_http, handle_view_req}
 _view_changes = {couch_mrview_http, handle_view_changes_req}
 
-; enable external as an httpd handler, then link it with commands here.
-; note, this api is still under consideration.
-; [external]
-; mykey = /path/to/mycommand
-
-; Here you can setup commands for CouchDB to manage
-; while it is alive. It will attempt to keep each command
-; alive if it exits.
-; [os_daemons]
-; some_daemon_name = /path/to/script -with args
-; [os_daemon_settings]
-; max_retries = 3
-; retry_time = 5
-
-
 [uuids]
 ; Known algorithms:
 ;   random - 128 bits of random awesome
diff --git a/rel/overlay/etc/local.ini b/rel/overlay/etc/local.ini
index 6b46f0f..e3b7b15 100644
--- a/rel/overlay/etc/local.ini
+++ b/rel/overlay/etc/local.ini
@@ -46,23 +46,12 @@
 [query_servers]
 ;nodejs = /usr/local/bin/couchjs-node /path/to/couchdb/share/server/main.js
 
-
-[httpd_global_handlers]
-;_google = {couch_httpd_proxy, handle_proxy_req, <<"http://www.google.com">>}
-
 [couch_httpd_auth]
 ; If you set this to true, you should also uncomment the WWW-Authenticate line
 ; above. If you don't configure a WWW-Authenticate header, CouchDB will send
 ; Basic realm="server" in order to prevent you getting logged out.
 ; require_valid_user = false
 
-[os_daemons]
-; For any commands listed here, CouchDB will attempt to ensure that
-; the process remains alive. Daemons should monitor their environment
-; to know when to exit. This can most easily be accomplished by exiting
-; when stdin is closed.
-;foo = /path/to/command -with args
-
 [daemons]
 ; enable SSL support by uncommenting the following line and supply the PEM's below.
 ; the default ssl port CouchDB listens on is 6984
@@ -103,9 +92,6 @@
 [vhosts]
 ;example.com = /database/
 
-[update_notification]
-;unique notifier name=/full/path/to/exe -with "cmd line arg"
-
 ; To create an admin account uncomment the '[admins]' section below and add a
 ; line in the format 'username = password'. When you next start CouchDB, it
 ; will change the password to a hash (so that your passwords don't linger
diff --git a/rel/overlay/etc/vm.args b/rel/overlay/etc/vm.args
index acb4571..e9f0737 100644
--- a/rel/overlay/etc/vm.args
+++ b/rel/overlay/etc/vm.args
@@ -45,3 +45,9 @@
 
 # Comment this line out to enable the interactive Erlang shell on startup
 +Bd -noinput
+
+# Force use of the smp scheduler, fixes #1296
+-smp enable
+
+# Set maximum SSL session lifetime to reap terminated replication readers
+-ssl session_lifetime 300
diff --git a/rel/reltool.config b/rel/reltool.config
index aa31006..5e86d96 100644
--- a/rel/reltool.config
+++ b/rel/reltool.config
@@ -47,6 +47,7 @@
         fabric,
         folsom,
         global_changes,
+        hyper,
         ibrowse,
         ioq,
         jiffy,
@@ -101,6 +102,7 @@
     {app, fabric, [{incl_cond, include}]},
     {app, folsom, [{incl_cond, include}]},
     {app, global_changes, [{incl_cond, include}]},
+    {app, hyper, [{incl_cond, include}]},
     {app, ibrowse, [{incl_cond, include}]},
     {app, ioq, [{incl_cond, include}]},
     {app, jiffy, [{incl_cond, include}]},
diff --git a/src/chttpd/src/chttpd.erl b/src/chttpd/src/chttpd.erl
index 6be0d18..c0179ba 100644
--- a/src/chttpd/src/chttpd.erl
+++ b/src/chttpd/src/chttpd.erl
@@ -104,10 +104,12 @@
     end,
     SslOpts = ServerOpts ++ ClientOpts,
 
-    Options =
+    Options0 =
         [{port, Port},
          {ssl, true},
          {ssl_opts, SslOpts}],
+    CustomServerOpts = get_server_options("httpsd"),
+    Options = merge_server_options(Options0, CustomServerOpts),
     start_link(https, Options).
 
 start_link(Name, Options) ->
@@ -124,9 +126,8 @@
         {name, Name},
         {ip, IP}
     ],
-    ServerOptsCfg = config:get("chttpd", "server_options", "[]"),
-    {ok, ServerOpts} = couch_util:parse_term(ServerOptsCfg),
-    Options2 = lists:keymerge(1, lists:sort(Options1), lists:sort(ServerOpts)),
+    ServerOpts = get_server_options("chttpd"),
+    Options2 = merge_server_options(Options1, ServerOpts),
     case mochiweb_http:start(Options2) of
     {ok, Pid} ->
         {ok, Pid};
@@ -135,6 +136,14 @@
         {error, Reason}
     end.
 
+get_server_options(Module) ->
+    ServerOptsCfg = config:get(Module, "server_options", "[]"),
+    {ok, ServerOpts} = couch_util:parse_term(ServerOptsCfg),
+    ServerOpts.
+
+merge_server_options(A, B) ->
+    lists:keymerge(1, lists:sort(A), lists:sort(B)).
+
 stop() ->
     catch mochiweb_http:stop(https),
     mochiweb_http:stop(?MODULE).
@@ -288,11 +297,7 @@
         not_preflight ->
             case chttpd_auth:authenticate(HttpReq, fun authenticate_request/1) of
             #httpd{} = Req ->
-                HandlerFun = chttpd_handlers:url_handler(
-                    HandlerKey, fun chttpd_db:handle_request/1),
-                AuthorizedReq = chttpd_auth:authorize(possibly_hack(Req),
-                    fun chttpd_auth_request:authorize_request/1),
-                {AuthorizedReq, HandlerFun(AuthorizedReq)};
+                handle_req_after_auth(HandlerKey, Req);
             Response ->
                 {HttpReq, Response}
             end;
@@ -303,6 +308,17 @@
         {HttpReq, catch_error(HttpReq, Tag, Error)}
     end.
 
+handle_req_after_auth(HandlerKey, HttpReq) ->
+    try
+        HandlerFun = chttpd_handlers:url_handler(HandlerKey,
+            fun chttpd_db:handle_request/1),
+        AuthorizedReq = chttpd_auth:authorize(possibly_hack(HttpReq),
+            fun chttpd_auth_request:authorize_request/1),
+        {AuthorizedReq, HandlerFun(AuthorizedReq)}
+    catch Tag:Error ->
+        {HttpReq, catch_error(HttpReq, Tag, Error)}
+    end.
+
 catch_error(_HttpReq, throw, {http_head_abort, Resp}) ->
     {ok, Resp};
 catch_error(_HttpReq, throw, {http_abort, Resp, Reason}) ->
@@ -1238,4 +1254,38 @@
     ok = meck:unload(couch_log),
     Message.
 
+handle_req_after_auth_test() ->
+    Headers = mochiweb_headers:make([{"HOST", "127.0.0.1:15984"}]),
+    MochiReq = mochiweb_request:new(socket, [], 'PUT', "/newdb", version,
+        Headers),
+    UserCtx = #user_ctx{name = <<"retain_user">>},
+    Roles = [<<"_reader">>],
+    AuthorizedCtx = #user_ctx{name = <<"retain_user">>, roles = Roles},
+    Req = #httpd{
+        mochi_req = MochiReq,
+        begin_ts = {1458,588713,124003},
+        original_method = 'PUT',
+        peer = "127.0.0.1",
+        nonce = "nonce",
+        user_ctx = UserCtx
+    },
+    AuthorizedReq = Req#httpd{user_ctx = AuthorizedCtx},
+    ok = meck:new(chttpd_handlers, [passthrough]),
+    ok = meck:new(chttpd_auth, [passthrough]),
+    ok = meck:expect(chttpd_handlers, url_handler, fun(_Key, _Fun) ->
+         fun(_Req) -> handled_authorized_req end
+    end),
+    ok = meck:expect(chttpd_auth, authorize, fun(_Req, _Fun) ->
+        AuthorizedReq
+    end),
+    ?assertEqual({AuthorizedReq, handled_authorized_req},
+        handle_req_after_auth(foo_key, Req)),
+    ok = meck:expect(chttpd_auth, authorize, fun(_Req, _Fun) ->
+        meck:exception(throw, {http_abort, resp, some_reason})
+    end),
+    ?assertEqual({Req, {aborted, resp, some_reason}},
+        handle_req_after_auth(foo_key, Req)),
+    ok = meck:unload(chttpd_handlers),
+    ok = meck:unload(chttpd_auth).
+
 -endif.
diff --git a/src/chttpd/src/chttpd_auth.erl b/src/chttpd/src/chttpd_auth.erl
index be12148..6602468 100644
--- a/src/chttpd/src/chttpd_auth.erl
+++ b/src/chttpd/src/chttpd_auth.erl
@@ -17,6 +17,7 @@
 
 -export([default_authentication_handler/1]).
 -export([cookie_authentication_handler/1]).
+-export([proxy_authentication_handler/1]).
 -export([party_mode_handler/1]).
 
 -export([handle_session_req/1]).
@@ -47,6 +48,9 @@
 cookie_authentication_handler(Req) ->
     couch_httpd_auth:cookie_authentication_handler(Req, chttpd_auth_cache).
 
+proxy_authentication_handler(Req) ->
+    couch_httpd_auth:proxy_authentication_handler(Req).
+
 party_mode_handler(Req) ->
     case config:get("chttpd", "require_valid_user", "false") of
     "true" ->
diff --git a/src/chttpd/src/chttpd_db.erl b/src/chttpd/src/chttpd_db.erl
index ed0adea..7761007 100644
--- a/src/chttpd/src/chttpd_db.erl
+++ b/src/chttpd/src/chttpd_db.erl
@@ -314,7 +314,6 @@
     end.
 
 do_db_req(#httpd{path_parts=[DbName|_], user_ctx=Ctx}=Req, Fun) ->
-    fabric:get_security(DbName, [{user_ctx,Ctx}]), % calls check_is_reader
     {ok, Db} = couch_db:clustered_db(DbName, Ctx),
     Fun(Req, Db).
 
diff --git a/src/chttpd/src/chttpd_misc.erl b/src/chttpd/src/chttpd_misc.erl
index 253da23..95345d4 100644
--- a/src/chttpd/src/chttpd_misc.erl
+++ b/src/chttpd/src/chttpd_misc.erl
@@ -293,11 +293,15 @@
 % "value"
 handle_node_req(#httpd{method='PUT', path_parts=[_, Node, <<"_config">>, Section, Key]}=Req) ->
     couch_util:check_config_blacklist(Section),
-    Value = chttpd:json_body(Req),
+    Value = couch_util:trim(chttpd:json_body(Req)),
     Persist = chttpd:header_value(Req, "X-Couch-Persist") /= "false",
     OldValue = call_node(Node, config, get, [Section, Key, ""]),
-    ok = call_node(Node, config, set, [Section, Key, ?b2l(Value), Persist]),
-    send_json(Req, 200, list_to_binary(OldValue));
+    case call_node(Node, config, set, [Section, Key, ?b2l(Value), Persist]) of
+        ok ->
+            send_json(Req, 200, list_to_binary(OldValue));
+        {error, Reason} ->
+            chttpd:send_error(Req, {bad_request, Reason})
+    end;
 % GET /_node/$node/_config/Section/Key
 handle_node_req(#httpd{method='GET', path_parts=[_, Node, <<"_config">>, Section, Key]}=Req) ->
     case call_node(Node, config, get, [Section, Key, undefined]) of
diff --git a/src/couch/src/couch.app.src b/src/couch/src/couch.app.src
index 524b728..cf5cee6 100644
--- a/src/couch/src/couch.app.src
+++ b/src/couch/src/couch.app.src
@@ -47,6 +47,7 @@
         couch_log,
         couch_event,
         ioq,
-        couch_stats
+        couch_stats,
+        hyper
     ]}
 ]}.
diff --git a/src/couch/src/couch_bt_engine.erl b/src/couch/src/couch_bt_engine.erl
index 264269a..c8eaa8f 100644
--- a/src/couch/src/couch_bt_engine.erl
+++ b/src/couch/src/couch_bt_engine.erl
@@ -115,7 +115,7 @@
     %% Delete any leftover compaction files. If we don't do this a
     %% subsequent request for this DB will try to open them to use
     %% as a recovery.
-    delete_compaction_files(RootDir, FilePath, [{context, delete}]),
+    delete_compaction_files(RootDir, FilePath, [{context, compaction}]),
 
     % Delete the actual database file
     couch_file:delete(RootDir, FilePath, Async).
@@ -770,7 +770,7 @@
 
 delete_compaction_files(FilePath) ->
     RootDir = config:get("couchdb", "database_dir", "."),
-    DelOpts = [{context, delete}],
+    DelOpts = [{context, compaction}],
     delete_compaction_files(RootDir, FilePath, DelOpts).
 
 
diff --git a/src/couch/src/couch_db.erl b/src/couch/src/couch_db.erl
index fc22c3a..61925d9 100644
--- a/src/couch/src/couch_db.erl
+++ b/src/couch/src/couch_db.erl
@@ -879,16 +879,10 @@
             {[], AccErrors}, Bucket),
         prep_and_validate_replicated_updates(Db, RestBuckets, RestOldInfo, [ValidatedBucket | AccPrepped], AccErrors3);
     #full_doc_info{rev_tree=OldTree} ->
-        RevsLimit = get_revs_limit(Db),
         OldLeafs = couch_key_tree:get_all_leafs_full(OldTree),
         OldLeafsLU = [{Start, RevId} || {Start, [{RevId, _}|_]} <- OldLeafs],
-        NewRevTree = lists:foldl(
-            fun(NewDoc, AccTree) ->
-                {NewTree, _} = couch_key_tree:merge(AccTree,
-                    couch_doc:to_path(NewDoc), RevsLimit),
-                NewTree
-            end,
-            OldTree, Bucket),
+        NewPaths = lists:map(fun couch_doc:to_path/1, Bucket),
+        NewRevTree = couch_key_tree:multi_merge(OldTree, NewPaths),
         Leafs = couch_key_tree:get_all_leafs_full(NewRevTree),
         LeafRevsFullDict = dict:from_list( [{{Start, RevId}, FullPath} || {Start, [{RevId, _}|_]}=FullPath <- Leafs]),
         {ValidatedBucket, AccErrors3} =
diff --git a/src/couch/src/couch_db_updater.erl b/src/couch/src/couch_db_updater.erl
index 79567e9..acb9ec1 100644
--- a/src/couch/src/couch_db_updater.erl
+++ b/src/couch/src/couch_db_updater.erl
@@ -82,16 +82,17 @@
 
 handle_call({set_security, NewSec}, _From, #db{} = Db) ->
     {ok, NewDb} = couch_db_engine:set_security(Db, NewSec),
-    NewSecDb = NewDb#db{
+    NewSecDb = commit_data(NewDb#db{
         security = NewSec
-    },
+    }),
     ok = gen_server:call(couch_server, {db_updated, NewSecDb}, infinity),
     {reply, ok, NewSecDb, idle_limit()};
 
 handle_call({set_revs_limit, Limit}, _From, Db) ->
     {ok, Db2} = couch_db_engine:set_revs_limit(Db, Limit),
-    ok = gen_server:call(couch_server, {db_updated, Db2}, infinity),
-    {reply, ok, Db2, idle_limit()};
+    Db3 = commit_data(Db2),
+    ok = gen_server:call(couch_server, {db_updated, Db3}, infinity),
+    {reply, ok, Db3, idle_limit()};
 
 handle_call({purge_docs, _IdRevs}, _From,
         #db{compactor_pid=Pid}=Db) when Pid /= nil ->
@@ -160,12 +161,12 @@
     Pairs = pair_purge_info(PreviousFDIs, FDIs),
 
     {ok, Db2} = couch_db_engine:write_doc_infos(Db, Pairs, [], PurgedIdRevs),
-
-    ok = gen_server:call(couch_server, {db_updated, Db2}, infinity),
+    Db3 = commit_data(Db2),
+    ok = gen_server:call(couch_server, {db_updated, Db3}, infinity),
     couch_event:notify(Db#db.name, updated),
 
-    PurgeSeq = couch_db_engine:get_purge_seq(Db2),
-    {reply, {ok, PurgeSeq, PurgedIdRevs}, Db2, idle_limit()};
+    PurgeSeq = couch_db_engine:get_purge_seq(Db3),
+    {reply, {ok, PurgeSeq, PurgedIdRevs}, Db3, idle_limit()};
 
 handle_call(Msg, From, Db) ->
     case couch_db_engine:handle_db_updater_call(Msg, From, Db) of
@@ -503,23 +504,24 @@
         [OldDocInfo|RestOldInfo], AccNewInfos, AccRemoveSeqs, AccSeq) ->
     erlang:put(last_id_merged, OldDocInfo#full_doc_info.id), % for debugging
     NewDocInfo0 = lists:foldl(fun({Client, NewDoc}, OldInfoAcc) ->
-        merge_rev_tree(OldInfoAcc, NewDoc, Client, Limit, MergeConflicts)
+        merge_rev_tree(OldInfoAcc, NewDoc, Client, MergeConflicts)
     end, OldDocInfo, NewDocs),
+    NewDocInfo1 = maybe_stem_full_doc_info(NewDocInfo0, Limit),
     % When MergeConflicts is false, we updated #full_doc_info.deleted on every
     % iteration of merge_rev_tree. However, merge_rev_tree does not update
     % #full_doc_info.deleted when MergeConflicts is true, since we don't need
     % to know whether the doc is deleted between iterations. Since we still
     % need to know if the doc is deleted after the merge happens, we have to
     % set it here.
-    NewDocInfo1 = case MergeConflicts of
+    NewDocInfo2 = case MergeConflicts of
         true ->
-            NewDocInfo0#full_doc_info{
-                deleted = couch_doc:is_deleted(NewDocInfo0)
+            NewDocInfo1#full_doc_info{
+                deleted = couch_doc:is_deleted(NewDocInfo1)
             };
         false ->
-            NewDocInfo0
+            NewDocInfo1
     end,
-    if NewDocInfo1 == OldDocInfo ->
+    if NewDocInfo2 == OldDocInfo ->
         % nothing changed
         merge_rev_trees(Limit, MergeConflicts, RestDocsList, RestOldInfo,
             AccNewInfos, AccRemoveSeqs, AccSeq);
@@ -528,7 +530,7 @@
         % important to note that the update_seq on OldDocInfo should
         % be identical to the value on NewDocInfo1.
         OldSeq = OldDocInfo#full_doc_info.update_seq,
-        NewDocInfo2 = NewDocInfo1#full_doc_info{
+        NewDocInfo3 = NewDocInfo2#full_doc_info{
             update_seq = AccSeq + 1
         },
         RemoveSeqs = case OldSeq of
@@ -536,10 +538,10 @@
             _ -> [OldSeq | AccRemoveSeqs]
         end,
         merge_rev_trees(Limit, MergeConflicts, RestDocsList, RestOldInfo,
-            [NewDocInfo2|AccNewInfos], RemoveSeqs, AccSeq+1)
+            [NewDocInfo3|AccNewInfos], RemoveSeqs, AccSeq+1)
     end.
 
-merge_rev_tree(OldInfo, NewDoc, Client, Limit, false)
+merge_rev_tree(OldInfo, NewDoc, Client, false)
         when OldInfo#full_doc_info.deleted ->
     % We're recreating a document that was previously
     % deleted. To check that this is a recreation from
@@ -573,7 +575,7 @@
             % Merge our modified new doc into the tree
             #full_doc_info{rev_tree=OldTree} = OldInfo,
             NewTree0 = couch_doc:to_path(NewDoc2),
-            case couch_key_tree:merge(OldTree, NewTree0, Limit) of
+            case couch_key_tree:merge(OldTree, NewTree0) of
                 {NewTree1, new_leaf} ->
                     % We changed the revision id so inform the caller
                     send_result(Client, NewDoc, {ok, {OldPos+1, NewRevId}}),
@@ -588,7 +590,7 @@
             send_result(Client, NewDoc, conflict),
             OldInfo
     end;
-merge_rev_tree(OldInfo, NewDoc, Client, Limit, false) ->
+merge_rev_tree(OldInfo, NewDoc, Client, false) ->
     % We're attempting to merge a new revision into an
     % undeleted document. To not be a conflict we require
     % that the merge results in extending a branch.
@@ -596,7 +598,7 @@
     OldTree = OldInfo#full_doc_info.rev_tree,
     NewTree0 = couch_doc:to_path(NewDoc),
     NewDeleted = NewDoc#doc.deleted,
-    case couch_key_tree:merge(OldTree, NewTree0, Limit) of
+    case couch_key_tree:merge(OldTree, NewTree0) of
         {NewTree, new_leaf} when not NewDeleted ->
             OldInfo#full_doc_info{
                 rev_tree = NewTree,
@@ -614,14 +616,23 @@
             send_result(Client, NewDoc, conflict),
             OldInfo
     end;
-merge_rev_tree(OldInfo, NewDoc, _Client, Limit, true) ->
+merge_rev_tree(OldInfo, NewDoc, _Client, true) ->
     % We're merging in revisions without caring about
     % conflicts. Most likely this is a replication update.
     OldTree = OldInfo#full_doc_info.rev_tree,
     NewTree0 = couch_doc:to_path(NewDoc),
-    {NewTree, _} = couch_key_tree:merge(OldTree, NewTree0, Limit),
+    {NewTree, _} = couch_key_tree:merge(OldTree, NewTree0),
     OldInfo#full_doc_info{rev_tree = NewTree}.
 
+maybe_stem_full_doc_info(#full_doc_info{rev_tree = Tree} = Info, Limit) ->
+    case config:get_boolean("couchdb", "stem_interactive_updates", true) of
+        true ->
+            Stemmed = couch_key_tree:stem(Tree, Limit),
+            Info#full_doc_info{rev_tree = Stemmed};
+        false ->
+            Info
+    end.
+
 update_docs_int(Db, DocsList, LocalDocs, MergeConflicts, FullCommit) ->
     UpdateSeq = couch_db_engine:get_update_seq(Db),
     RevsLimit = couch_db_engine:get_revs_limit(Db),
diff --git a/src/couch/src/couch_httpd_misc_handlers.erl b/src/couch/src/couch_httpd_misc_handlers.erl
index e2fc9f2..0c70bcb 100644
--- a/src/couch/src/couch_httpd_misc_handlers.erl
+++ b/src/couch/src/couch_httpd_misc_handlers.erl
@@ -262,7 +262,7 @@
         <<"admins">> ->
             couch_passwords:hash_admin_password(RawValue);
         _ ->
-            RawValue
+            couch_util:trim(RawValue)
         end
     end,
     OldValue = config:get(Section, Key, ""),
diff --git a/src/couch/src/couch_httpd_multipart.erl b/src/couch/src/couch_httpd_multipart.erl
index e556b28..33795a3 100644
--- a/src/couch/src/couch_httpd_multipart.erl
+++ b/src/couch/src/couch_httpd_multipart.erl
@@ -208,7 +208,8 @@
         {ok, {WriterRef, _}} ->
             case num_mp_writers() of
                 N when N > 1 ->
-                    num_mp_writers(N - 1);
+                    num_mp_writers(N - 1),
+                    orddict:erase(WriterPid, Counters);
                 _ ->
                     abort_parsing
             end;
diff --git a/src/couch/src/couch_key_tree.erl b/src/couch/src/couch_key_tree.erl
index cd661e2..9415041 100644
--- a/src/couch/src/couch_key_tree.erl
+++ b/src/couch/src/couch_key_tree.erl
@@ -59,7 +59,7 @@
 map/2,
 map_leafs/2,
 mapfold/3,
-merge/3,
+multi_merge/2,
 merge/2,
 remove_leafs/2,
 stem/2
@@ -71,16 +71,13 @@
 -type revtree() :: [tree()].
 
 
-%% @doc Merge a path into the given tree and then stem the result.
-%% Although Tree is of type tree(), it must not contain any branches.
--spec merge(revtree(), tree() | path(), pos_integer()) ->
-                {revtree(), new_leaf | new_branch | internal_node}.
-merge(RevTree, Tree, StemDepth) ->
-    {Merged, Result} = merge(RevTree, Tree),
-    case config:get("couchdb", "stem_interactive_updates", "true") of
-        "true" -> {stem(Merged, StemDepth), Result};
-        _ -> {Merged, Result}
-    end.
+%% @doc Merge multiple paths into the given tree.
+-spec multi_merge(revtree(), tree()) -> revtree().
+multi_merge(RevTree, Trees) ->
+    lists:foldl(fun(Tree, RevTreeAcc) ->
+        {NewRevTree, _} = merge(RevTreeAcc, Tree),
+        NewRevTree
+    end, RevTree, lists:sort(Trees)).
 
 
 %% @doc Merge a path into a tree.
@@ -470,6 +467,70 @@
 
 
 stem(Trees, Limit) ->
+    try
+        {_, Branches} = lists:foldl(fun(Tree, {Seen, TreeAcc}) ->
+            {NewSeen, NewBranches} = stem_tree(Tree, Limit, Seen),
+            {NewSeen, NewBranches ++ TreeAcc}
+        end, {sets:new(), []}, Trees),
+        lists:sort(Branches)
+    catch throw:dupe_keys ->
+        repair_tree(Trees, Limit)
+    end.
+
+
+stem_tree({Depth, Child}, Limit, Seen) ->
+    case stem_tree(Depth, Child, Limit, Seen) of
+        {NewSeen, _, NewChild, NewBranches} ->
+            {NewSeen, [{Depth, NewChild} | NewBranches]};
+        {NewSeen, _, NewBranches} ->
+            {NewSeen, NewBranches}
+    end.
+
+
+stem_tree(_Depth, {Key, _Val, []} = Leaf, Limit, Seen) ->
+    {check_key(Key, Seen), Limit - 1, Leaf, []};
+
+stem_tree(Depth, {Key, Val, Children}, Limit, Seen0) ->
+    Seen1 = check_key(Key, Seen0),
+    FinalAcc = lists:foldl(fun(Child, Acc) ->
+        {SeenAcc, LimitPosAcc, ChildAcc, BranchAcc} = Acc,
+        case stem_tree(Depth + 1, Child, Limit, SeenAcc) of
+            {NewSeenAcc, LimitPos, NewChild, NewBranches} ->
+                NewLimitPosAcc = erlang:max(LimitPos, LimitPosAcc),
+                NewChildAcc = [NewChild | ChildAcc],
+                NewBranchAcc = NewBranches ++ BranchAcc,
+                {NewSeenAcc, NewLimitPosAcc, NewChildAcc, NewBranchAcc};
+            {NewSeenAcc, LimitPos, NewBranches} ->
+                NewLimitPosAcc = erlang:max(LimitPos, LimitPosAcc),
+                NewBranchAcc = NewBranches ++ BranchAcc,
+                {NewSeenAcc, NewLimitPosAcc, ChildAcc, NewBranchAcc}
+        end
+    end, {Seen1, -1, [], []}, Children),
+    {FinalSeen, FinalLimitPos, FinalChildren, FinalBranches} = FinalAcc,
+    case FinalLimitPos of
+        N when N > 0, length(FinalChildren) > 0 ->
+            FinalNode = {Key, Val, lists:reverse(FinalChildren)},
+            {FinalSeen, FinalLimitPos - 1, FinalNode, FinalBranches};
+        0 when length(FinalChildren) > 0 ->
+            NewBranches = lists:map(fun(Child) ->
+                {Depth + 1, Child}
+            end, lists:reverse(FinalChildren)),
+            {FinalSeen, -1, NewBranches ++ FinalBranches};
+        N when N < 0, length(FinalChildren) == 0 ->
+            {FinalSeen, FinalLimitPos - 1, FinalBranches}
+    end.
+
+
+check_key(Key, Seen) ->
+    case sets:is_element(Key, Seen) of
+        true ->
+            throw(dupe_keys);
+        false ->
+            sets:add_element(Key, Seen)
+    end.
+
+
+repair_tree(Trees, Limit) ->
     % flatten each branch in a tree into a tree path, sort by starting rev #
     Paths = lists:sort(lists:map(fun({Pos, Path}) ->
         StemmedPath = lists:sublist(Path, Limit),
diff --git a/src/couch/src/couch_native_process.erl b/src/couch/src/couch_native_process.erl
index 6d66c93..8f8ce8b 100644
--- a/src/couch/src/couch_native_process.erl
+++ b/src/couch/src/couch_native_process.erl
@@ -226,6 +226,18 @@
     end,
     Resp = lists:map(FilterFunWrapper, Docs),
     {State, [true, Resp]};
+ddoc(State, {_, Fun}, [<<"views">>|_], [Docs]) ->
+    MapFunWrapper = fun(Doc) ->
+        case catch Fun(Doc) of
+        undefined -> true;
+        ok -> false;
+        false -> false;
+        [_|_] -> true;
+        {'EXIT', Error} -> couch_log:error("~p", [Error])
+        end
+    end,
+    Resp = lists:map(MapFunWrapper, Docs),
+    {State, [true, Resp]};
 ddoc(State, {_, Fun}, [<<"shows">>|_], Args) ->
     Resp = case (catch apply(Fun, Args)) of
         FunResp when is_list(FunResp) ->
diff --git a/src/couch/src/couch_query_servers.erl b/src/couch/src/couch_query_servers.erl
index f31d24c..de8ef1e 100644
--- a/src/couch/src/couch_query_servers.erl
+++ b/src/couch/src/couch_query_servers.erl
@@ -17,6 +17,7 @@
 -export([reduce/3, rereduce/3,validate_doc_update/5]).
 -export([filter_docs/5]).
 -export([filter_view/3]).
+-export([finalize/2]).
 -export([rewrite/3]).
 
 -export([with_ddoc_proc/2, proc_prompt/2, ddoc_prompt/3, ddoc_proc_prompt/3, json_doc/1]).
@@ -86,6 +87,17 @@
      [Heads | group_reductions_results(Tails)]
     end.
 
+finalize(<<"_approx_count_distinct",_/binary>>, Reduction) ->
+    true = hyper:is_hyper(Reduction),
+    {ok, round(hyper:card(Reduction))};
+finalize(<<"_stats",_/binary>>, {_, _, _, _, _} = Unpacked) ->
+    {ok, pack_stats(Unpacked)};
+finalize(<<"_stats",_/binary>>, {Packed}) ->
+    % Legacy code path before we had the finalize operation
+    {ok, {Packed}};
+finalize(_RedSrc, Reduction) ->
+    {ok, Reduction}.
+
 rereduce(_Lang, [], _ReducedValues) ->
     {ok, []};
 rereduce(Lang, RedSrcs, ReducedValues) ->
@@ -171,7 +183,10 @@
     builtin_reduce(rereduce, BuiltinReds, KVs, [Count|Acc]);
 builtin_reduce(Re, [<<"_stats",_/binary>>|BuiltinReds], KVs, Acc) ->
     Stats = builtin_stats(Re, KVs),
-    builtin_reduce(Re, BuiltinReds, KVs, [Stats|Acc]).
+    builtin_reduce(Re, BuiltinReds, KVs, [Stats|Acc]);
+builtin_reduce(Re, [<<"_approx_count_distinct",_/binary>>|BuiltinReds], KVs, Acc) ->
+    Distinct = approx_count_distinct(Re, KVs),
+    builtin_reduce(Re, BuiltinReds, KVs, [Distinct|Acc]).
 
 
 builtin_sum_rows([], Acc) ->
@@ -236,11 +251,11 @@
     throw_sum_error(Else).
 
 builtin_stats(_, []) ->
-    {[{sum,0}, {count,0}, {min,0}, {max,0}, {sumsqr,0}]};
+    {0, 0, 0, 0, 0};
 builtin_stats(_, [[_,First]|Rest]) ->
-    Unpacked = lists:foldl(fun([_Key, Value], Acc) -> stat_values(Value, Acc) end,
-                           build_initial_accumulator(First), Rest),
-    pack_stats(Unpacked).
+    lists:foldl(fun([_Key, Value], Acc) ->
+        stat_values(Value, Acc)
+    end, build_initial_accumulator(First), Rest).
 
 stat_values(Value, Acc) when is_list(Value), is_list(Acc) ->
     lists:zipwith(fun stat_values/2, Value, Acc);
@@ -267,6 +282,8 @@
     [build_initial_accumulator(X) || X <- L];
 build_initial_accumulator(X) when is_number(X) ->
     {X, 1, X, X, X*X};
+build_initial_accumulator({_, _, _, _, _} = AlreadyUnpacked) ->
+    AlreadyUnpacked;
 build_initial_accumulator({Props}) ->
     unpack_stats({Props});
 build_initial_accumulator(Else) ->
@@ -303,6 +320,13 @@
         throw({invalid_value, iolist_to_binary(Msg)})
     end.
 
+% TODO allow customization of precision in the ddoc.
+approx_count_distinct(reduce, KVs) ->
+    lists:foldl(fun([[Key, _Id], _Value], Filter) ->
+        hyper:insert(term_to_binary(Key), Filter)
+    end, hyper:new(11), KVs);
+approx_count_distinct(rereduce, Reds) ->
+    hyper:union([Filter || [_, Filter] <- Reds]).
 
 % use the function stored in ddoc.validate_doc_update to test an update.
 -spec validate_doc_update(DDoc, EditDoc, DiskDoc, Ctx, SecObj) -> ok when
diff --git a/src/couch/src/couch_server.erl b/src/couch/src/couch_server.erl
index ff1bf9e..002f08e 100644
--- a/src/couch/src/couch_server.erl
+++ b/src/couch/src/couch_server.erl
@@ -210,6 +210,8 @@
 
 
 init([]) ->
+    couch_util:set_mqd_off_heap(),
+
     % Mark pluggable storage engines as a supported feature
     config:enable_feature('pluggable-storage-engines'),
 
@@ -223,7 +225,7 @@
     MaxDbsOpen = list_to_integer(
             config:get("couchdb", "max_dbs_open", integer_to_list(?MAX_DBS_OPEN))),
     UpdateLruOnRead =
-        config:get("couchdb", "update_lru_on_read", "true") =:= "true",
+        config:get("couchdb", "update_lru_on_read", "false") =:= "true",
     ok = config:listen_for_changes(?MODULE, nil),
     ok = couch_file:init_delete_dir(RootDir),
     hash_admin_passwords(),
@@ -523,7 +525,7 @@
         DelOpt = [{context, delete} | Options],
 
         % Make sure and remove all compaction data
-        delete_compaction_files(DbNameList, DelOpt),
+        delete_compaction_files(DbNameList, Options),
 
         {ok, {Engine, FilePath}} = get_engine(Server, DbNameList),
         RootDir = Server#server.root_dir,
diff --git a/src/couch/src/couch_util.erl b/src/couch/src/couch_util.erl
index f3a9249..936b562 100644
--- a/src/couch/src/couch_util.erl
+++ b/src/couch/src/couch_util.erl
@@ -37,6 +37,7 @@
 -export([unique_monotonic_integer/0]).
 -export([check_config_blacklist/1]).
 -export([check_md5/2]).
+-export([set_mqd_off_heap/0]).
 
 -include_lib("couch/include/couch_db.hrl").
 
@@ -301,15 +302,45 @@
 separate_cmd_args([Char|Rest], CmdAcc) ->
     separate_cmd_args(Rest, [Char | CmdAcc]).
 
-% Is a character whitespace?
-is_whitespace($\s) -> true;
-is_whitespace($\t) -> true;
-is_whitespace($\n) -> true;
-is_whitespace($\r) -> true;
+% Is a character whitespace (from https://en.wikipedia.org/wiki/Whitespace_character#Unicode)?
+is_whitespace(9) -> true;
+is_whitespace(10) -> true;
+is_whitespace(11) -> true;
+is_whitespace(12) -> true;
+is_whitespace(13) -> true;
+is_whitespace(32) -> true;
+is_whitespace(133) -> true;
+is_whitespace(160) -> true;
+is_whitespace(5760) -> true;
+is_whitespace(8192) -> true;
+is_whitespace(8193) -> true;
+is_whitespace(8194) -> true;
+is_whitespace(8195) -> true;
+is_whitespace(8196) -> true;
+is_whitespace(8197) -> true;
+is_whitespace(8198) -> true;
+is_whitespace(8199) -> true;
+is_whitespace(8200) -> true;
+is_whitespace(8201) -> true;
+is_whitespace(8202) -> true;
+is_whitespace(8232) -> true;
+is_whitespace(8233) -> true;
+is_whitespace(8239) -> true;
+is_whitespace(8287) -> true;
+is_whitespace(12288) -> true;
+is_whitespace(6158) -> true;
+is_whitespace(8203) -> true;
+is_whitespace(8204) -> true;
+is_whitespace(8205) -> true;
+is_whitespace(8288) -> true;
+is_whitespace(65279) -> true;
 is_whitespace(_Else) -> false.
 
 
 % removes leading and trailing whitespace from a string
+trim(String) when is_binary(String) ->
+    % mirror string:trim() behaviour of returning a binary when a binary is passed in
+    ?l2b(trim(?b2l(String)));
 trim(String) ->
     String2 = lists:dropwhile(fun is_whitespace/1, String),
     lists:reverse(lists:dropwhile(fun is_whitespace/1, lists:reverse(String2))).
@@ -639,6 +670,15 @@
 check_md5(_, _) -> throw(md5_mismatch).
 
 
+set_mqd_off_heap() ->
+    try
+        erlang:process_flag(message_queue_data, off_heap),
+        ok
+    catch error:badarg ->
+        ok
+    end.
+
+
 ensure_loaded(Module) when is_atom(Module) ->
     case code:ensure_loaded(Module) of
     {module, Module} ->
diff --git a/src/couch/src/test_engine_util.erl b/src/couch/src/test_engine_util.erl
index 8999753..fef9e9f 100644
--- a/src/couch/src/test_engine_util.erl
+++ b/src/couch/src/test_engine_util.erl
@@ -309,7 +309,8 @@
         conflict -> new_branch;
         _ -> new_leaf
     end,
-    {NewTree, NodeType} = couch_key_tree:merge(PrevRevTree, Path, RevsLimit),
+    {MergedTree, NodeType} = couch_key_tree:merge(PrevRevTree, Path),
+    NewTree = couch_key_tree:stem(MergedTree, RevsLimit),
 
     NewFDI = PrevFDI#full_doc_info{
         deleted = couch_doc:is_deleted(NewTree),
diff --git a/src/couch/src/test_util.erl b/src/couch/src/test_util.erl
index e0a53a6..738e9a3 100644
--- a/src/couch/src/test_util.erl
+++ b/src/couch/src/test_util.erl
@@ -15,6 +15,7 @@
 -include_lib("couch/include/couch_eunit.hrl").
 -include("couch_db.hrl").
 -include("couch_db_int.hrl").
+-include("couch_bt_engine.hrl").
 
 -export([init_code_path/0]).
 -export([source_file/1, build_file/1]).
@@ -234,7 +235,8 @@
     meck:unload(Mocked),
     stop_applications(Apps).
 
-fake_db(Fields) ->
+fake_db(Fields0) ->
+    {ok, Db, Fields} = maybe_set_engine(Fields0),
     Indexes = lists:zip(
             record_info(fields, db),
             lists:seq(2, record_info(size, db))
@@ -242,7 +244,27 @@
     lists:foldl(fun({FieldName, Value}, Acc) ->
         Idx = couch_util:get_value(FieldName, Indexes),
         setelement(Idx, Acc, Value)
-    end, #db{}, Fields).
+    end, Db, Fields).
+
+maybe_set_engine(Fields0) ->
+    case lists:member(engine, Fields0) of
+        true ->
+            {ok, #db{}, Fields0};
+        false ->
+            {ok, Header, Fields} = get_engine_header(Fields0),
+            Db = #db{engine = {couch_bt_engine, #st{header = Header}}},
+            {ok, Db, Fields}
+    end.
+
+get_engine_header(Fields) ->
+    Keys = [disk_version, update_seq, unused, id_tree_state,
+        seq_tree_state, local_tree_state, purge_seq, purged_docs,
+        security_ptr, revs_limit, uuid, epochs, compacted_seq],
+    {HeadFields, RestFields} = lists:partition(
+        fun({K, _}) -> lists:member(K, Keys) end, Fields),
+    Header0 = couch_bt_engine_header:new(),
+    Header = couch_bt_engine_header:set(Header0, HeadFields),
+    {ok, Header, RestFields}.
 
 now_us() ->
     {MegaSecs, Secs, MicroSecs} = os:timestamp(),
diff --git a/src/couch/test/couch_changes_tests.erl b/src/couch/test/couch_changes_tests.erl
index 673f2fa..e4ea761 100644
--- a/src/couch/test/couch_changes_tests.erl
+++ b/src/couch/test/couch_changes_tests.erl
@@ -47,9 +47,11 @@
         save_doc(Db1, {[{<<"_id">>, <<"doc7">>}]}),
         save_doc(Db1, {[{<<"_id">>, <<"doc8">>}]})
     ]],
+    config:set("native_query_servers", "erlang", "{couch_native_process, start_link, []}", _Persist=false),
     {DbName, list_to_tuple(Revs2)}.
 
 teardown({DbName, _}) ->
+    config:delete("native_query_servers", "erlang", _Persist=false),
     delete_db(DbName),
     ok.
 
@@ -153,7 +155,8 @@
             fun setup/0, fun teardown/1,
             [
                 fun should_filter_by_view/1,
-                fun should_filter_by_fast_view/1
+                fun should_filter_by_fast_view/1,
+                fun should_filter_by_erlang_view/1
             ]
         }
     }.
@@ -733,6 +736,39 @@
             ?assertEqual(UpSeq, ViewUpSeq)
         end).
 
+should_filter_by_erlang_view({DbName, _}) ->
+    ?_test(
+        begin
+            DDocId = <<"_design/app">>,
+            DDoc = couch_doc:from_json_obj({[
+                {<<"_id">>, DDocId},
+                {<<"language">>, <<"erlang">>},
+                {<<"views">>, {[
+                    {<<"valid">>, {[
+                        {<<"map">>, <<"fun({Doc}) ->"
+                            " case lists:keyfind(<<\"_id\">>, 1, Doc) of"
+                                " {<<\"_id\">>, <<\"doc3\">>} ->  Emit(Doc, null); "
+                                " false -> ok"
+                            " end "
+                        "end.">>}
+                    ]}}
+                ]}}
+            ]}),
+            ChArgs = #changes_args{filter = "_view"},
+            Req = {json_req, {[{
+                <<"query">>, {[
+                    {<<"view">>, <<"app/valid">>}
+                ]}
+            }]}},
+            ok = update_ddoc(DbName, DDoc),
+            {Rows, LastSeq, UpSeq} = run_changes_query(DbName, ChArgs, Req),
+            ?assertEqual(1, length(Rows)),
+            [#row{seq = Seq, id = Id}] = Rows,
+            ?assertEqual(<<"doc3">>, Id),
+            ?assertEqual(6, Seq),
+            ?assertEqual(UpSeq, LastSeq)
+    end).
+
 update_ddoc(DbName, DDoc) ->
     {ok, Db} = couch_db:open_int(DbName, [?ADMIN_CTX]),
     {ok, _} = couch_db:update_doc(Db, DDoc, []),
diff --git a/src/couch/test/couch_key_tree_tests.erl b/src/couch/test/couch_key_tree_tests.erl
index 88d9203..5d9cc83 100644
--- a/src/couch/test/couch_key_tree_tests.erl
+++ b/src/couch/test/couch_key_tree_tests.erl
@@ -16,138 +16,108 @@
 
 -define(DEPTH, 10).
 
-setup() ->
-    meck:new(config),
-    meck:expect(config, get, fun(_, _, Default) -> Default end).
-
-teardown(_) ->
-    meck:unload(config).
 
 key_tree_merge_test_()->
     {
         "Key tree merge",
-        {
-            setup,
-            fun setup/0, fun teardown/1,
-            [
-                should_merge_with_empty_tree(),
-                should_merge_reflexive(),
-                should_merge_prefix_of_a_tree_with_tree(),
-                should_produce_conflict_on_merge_with_unrelated_branch(),
-                should_merge_reflexive_for_child_nodes(),
-                should_merge_tree_to_itself(),
-                should_merge_tree_of_odd_length(),
-                should_merge_tree_with_stem(),
-                should_merge_with_stem_at_deeper_level(),
-                should_merge_with_stem_at_deeper_level_with_deeper_paths(),
-                should_merge_single_tree_with_deeper_stem(),
-                should_merge_tree_with_large_stem(),
-                should_merge_stems(),
-                should_create_conflicts_on_merge(),
-                should_create_no_conflicts_on_merge(),
-                should_ignore_conflicting_branch()
-            ]
-        }
+        [
+            should_merge_with_empty_tree(),
+            should_merge_reflexive(),
+            should_merge_prefix_of_a_tree_with_tree(),
+            should_produce_conflict_on_merge_with_unrelated_branch(),
+            should_merge_reflexive_for_child_nodes(),
+            should_merge_tree_to_itself(),
+            should_merge_tree_of_odd_length(),
+            should_merge_tree_with_stem(),
+            should_merge_with_stem_at_deeper_level(),
+            should_merge_with_stem_at_deeper_level_with_deeper_paths(),
+            should_merge_single_tree_with_deeper_stem(),
+            should_merge_tree_with_large_stem(),
+            should_merge_stems(),
+            should_create_conflicts_on_merge(),
+            should_create_no_conflicts_on_merge(),
+            should_ignore_conflicting_branch()
+        ]
     }.
 
 key_tree_missing_leaves_test_()->
     {
-        "Missing tree leaves",
-        {
-            setup,
-            fun setup/0, fun teardown/1,
-            [
-                should_not_find_missing_leaves(),
-                should_find_missing_leaves()
-            ]
-        }
+         "Missing tree leaves",
+         [
+             should_not_find_missing_leaves(),
+             should_find_missing_leaves()
+         ]
     }.
 
 key_tree_remove_leaves_test_()->
     {
         "Remove tree leaves",
-        {
-            setup,
-            fun setup/0, fun teardown/1,
-            [
-                should_have_no_effect_on_removing_no_leaves(),
-                should_have_no_effect_on_removing_non_existant_branch(),
-                should_remove_leaf(),
-                should_produce_empty_tree_on_removing_all_leaves(),
-                should_have_no_effect_on_removing_non_existant_node(),
-                should_produce_empty_tree_on_removing_last_leaf()
-            ]
-        }
+        [
+            should_have_no_effect_on_removing_no_leaves(),
+            should_have_no_effect_on_removing_non_existant_branch(),
+            should_remove_leaf(),
+            should_produce_empty_tree_on_removing_all_leaves(),
+            should_have_no_effect_on_removing_non_existant_node(),
+            should_produce_empty_tree_on_removing_last_leaf()
+        ]
     }.
 
 key_tree_get_leaves_test_()->
     {
         "Leaves retrieving",
-        {
-            setup,
-            fun setup/0, fun teardown/1,
-            [
-                should_extract_subtree(),
-                should_extract_subsubtree(),
-                should_gather_non_existant_leaf(),
-                should_gather_leaf(),
-                shoul_gather_multiple_leaves(),
-                should_gather_single_leaf_for_multiple_revs(),
-                should_gather_multiple_for_multiple_revs(),
-                should_retrieve_full_key_path(),
-                should_retrieve_full_key_path_for_node(),
-                should_retrieve_leaves_with_parent_node(),
-                should_retrieve_all_leaves()
-            ]
-        }
+        [
+            should_extract_subtree(),
+            should_extract_subsubtree(),
+            should_gather_non_existant_leaf(),
+            should_gather_leaf(),
+            shoul_gather_multiple_leaves(),
+            should_gather_single_leaf_for_multiple_revs(),
+            should_gather_multiple_for_multiple_revs(),
+            should_retrieve_full_key_path(),
+            should_retrieve_full_key_path_for_node(),
+            should_retrieve_leaves_with_parent_node(),
+            should_retrieve_all_leaves()
+        ]
     }.
 
 key_tree_leaf_counting_test_()->
     {
         "Leaf counting",
-        {
-            setup,
-            fun setup/0, fun teardown/1,
-            [
-                should_have_no_leaves_for_empty_tree(),
-                should_have_single_leaf_for_tree_with_single_node(),
-                should_have_two_leaves_for_tree_with_chindler_siblings(),
-                should_not_affect_on_leaf_counting_for_stemmed_tree()
-            ]
-        }
+        [
+            should_have_no_leaves_for_empty_tree(),
+            should_have_single_leaf_for_tree_with_single_node(),
+            should_have_two_leaves_for_tree_with_chindler_siblings(),
+            should_not_affect_on_leaf_counting_for_stemmed_tree()
+        ]
     }.
 
 key_tree_stemming_test_()->
     {
         "Stemming",
-        {
-            setup,
-            fun setup/0, fun teardown/1,
-            [
-                should_have_no_effect_for_stemming_more_levels_than_exists(),
-                should_return_one_deepest_node(),
-                should_return_two_deepest_nodes()
-            ]
-        }
+        [
+            should_have_no_effect_for_stemming_more_levels_than_exists(),
+            should_return_one_deepest_node(),
+            should_return_two_deepest_nodes()
+        ]
     }.
 
 
 should_merge_with_empty_tree()->
     One = {1, {"1","foo",[]}},
     ?_assertEqual({[One], new_leaf},
-                  couch_key_tree:merge([], One, ?DEPTH)).
+                  merge_and_stem([], One)).
 
 should_merge_reflexive()->
     One = {1, {"1","foo",[]}},
     ?_assertEqual({[One], internal_node},
-                  couch_key_tree:merge([One], One, ?DEPTH)).
+                  merge_and_stem([One], One)).
 
 should_merge_prefix_of_a_tree_with_tree()->
     One = {1, {"1","foo",[]}},
     TwoSibs = [{1, {"1","foo",[]}},
                {1, {"2","foo",[]}}],
     ?_assertEqual({TwoSibs, internal_node},
-                  couch_key_tree:merge(TwoSibs, One, ?DEPTH)).
+                  merge_and_stem(TwoSibs, One)).
 
 should_produce_conflict_on_merge_with_unrelated_branch()->
     TwoSibs = [{1, {"1","foo",[]}},
@@ -157,18 +127,33 @@
                  {1, {"2","foo",[]}},
                  {1, {"3","foo",[]}}],
     ?_assertEqual({ThreeSibs, new_branch},
-                  couch_key_tree:merge(TwoSibs, Three, ?DEPTH)).
+                  merge_and_stem(TwoSibs, Three)).
 
 should_merge_reflexive_for_child_nodes()->
     TwoChild = {1, {"1","foo", [{"1a", "bar", [{"1aa", "bar", []}]}]}},
     ?_assertEqual({[TwoChild], internal_node},
-                  couch_key_tree:merge([TwoChild], TwoChild, ?DEPTH)).
+                  merge_and_stem([TwoChild], TwoChild)).
 
 should_merge_tree_to_itself()->
     TwoChildSibs = {1, {"1","foo", [{"1a", "bar", []},
                                     {"1b", "bar", []}]}},
-    ?_assertEqual({[TwoChildSibs], new_branch},
-                  couch_key_tree:merge([TwoChildSibs], TwoChildSibs, ?DEPTH)).
+    Leafs = couch_key_tree:get_all_leafs([TwoChildSibs]),
+    Paths = lists:map(fun leaf_to_path/1, Leafs),
+    FinalTree = lists:foldl(fun(Path, TreeAcc) ->
+        {NewTree, internal_node} = merge_and_stem(TreeAcc, Path),
+        NewTree
+    end, [TwoChildSibs], Paths),
+    ?_assertEqual([TwoChildSibs], FinalTree).
+
+leaf_to_path({Value, {Start, Keys}}) ->
+    [Branch] = to_branch(Value, lists:reverse(Keys)),
+    {Start - length(Keys) + 1, Branch}.
+
+to_branch(Value, [Key]) ->
+    [{Key, Value, []}];
+to_branch(Value, [Key | RestKeys]) ->
+    [{Key, [], to_branch(Value, RestKeys)}].
+
 
 should_merge_tree_of_odd_length()->
     TwoChild = {1, {"1","foo", [{"1a", "bar", [{"1aa", "bar", []}]}]}},
@@ -176,9 +161,8 @@
                                     {"1b", "bar", []}]}},
     TwoChildPlusSibs = {1, {"1","foo", [{"1a", "bar", [{"1aa", "bar", []}]},
                                         {"1b", "bar", []}]}},
-
-    ?_assertEqual({[TwoChildPlusSibs], new_branch},
-                  couch_key_tree:merge([TwoChild], TwoChildSibs, ?DEPTH)).
+    ?_assertEqual({[TwoChildPlusSibs], new_leaf},
+                  merge_and_stem([TwoChildSibs], TwoChild)).
 
 should_merge_tree_with_stem()->
     Stemmed = {2, {"1a", "bar", []}},
@@ -186,52 +170,52 @@
                                     {"1b", "bar", []}]}},
 
     ?_assertEqual({[TwoChildSibs], internal_node},
-                  couch_key_tree:merge([TwoChildSibs], Stemmed, ?DEPTH)).
+                  merge_and_stem([TwoChildSibs], Stemmed)).
 
 should_merge_with_stem_at_deeper_level()->
     Stemmed = {3, {"1bb", "boo", []}},
     TwoChildSibs = {1, {"1","foo", [{"1a", "bar", []},
                                     {"1b", "bar", [{"1bb", "boo", []}]}]}},
     ?_assertEqual({[TwoChildSibs], internal_node},
-                  couch_key_tree:merge([TwoChildSibs], Stemmed, ?DEPTH)).
+                  merge_and_stem([TwoChildSibs], Stemmed)).
 
 should_merge_with_stem_at_deeper_level_with_deeper_paths()->
     Stemmed = {3, {"1bb", "boo", []}},
     StemmedTwoChildSibs = [{2,{"1a", "bar", []}},
                            {2,{"1b", "bar", [{"1bb", "boo", []}]}}],
     ?_assertEqual({StemmedTwoChildSibs, internal_node},
-                  couch_key_tree:merge(StemmedTwoChildSibs, Stemmed, ?DEPTH)).
+                  merge_and_stem(StemmedTwoChildSibs, Stemmed)).
 
 should_merge_single_tree_with_deeper_stem()->
     Stemmed = {3, {"1aa", "bar", []}},
     TwoChild = {1, {"1","foo", [{"1a", "bar", [{"1aa", "bar", []}]}]}},
     ?_assertEqual({[TwoChild], internal_node},
-                  couch_key_tree:merge([TwoChild], Stemmed, ?DEPTH)).
+                  merge_and_stem([TwoChild], Stemmed)).
 
 should_merge_tree_with_large_stem()->
     Stemmed = {2, {"1a", "bar", [{"1aa", "bar", []}]}},
     TwoChild = {1, {"1","foo", [{"1a", "bar", [{"1aa", "bar", []}]}]}},
     ?_assertEqual({[TwoChild], internal_node},
-                  couch_key_tree:merge([TwoChild], Stemmed, ?DEPTH)).
+                  merge_and_stem([TwoChild], Stemmed)).
 
 should_merge_stems()->
     StemmedA = {2, {"1a", "bar", [{"1aa", "bar", []}]}},
     StemmedB = {3, {"1aa", "bar", []}},
     ?_assertEqual({[StemmedA], internal_node},
-                  couch_key_tree:merge([StemmedA], StemmedB, ?DEPTH)).
+                  merge_and_stem([StemmedA], StemmedB)).
 
 should_create_conflicts_on_merge()->
     OneChild = {1, {"1","foo",[{"1a", "bar", []}]}},
     Stemmed = {3, {"1aa", "bar", []}},
     ?_assertEqual({[OneChild, Stemmed], new_branch},
-                  couch_key_tree:merge([OneChild], Stemmed, ?DEPTH)).
+                  merge_and_stem([OneChild], Stemmed)).
 
 should_create_no_conflicts_on_merge()->
     OneChild = {1, {"1","foo",[{"1a", "bar", []}]}},
     Stemmed = {3, {"1aa", "bar", []}},
     TwoChild = {1, {"1","foo", [{"1a", "bar", [{"1aa", "bar", []}]}]}},
     ?_assertEqual({[TwoChild], new_leaf},
-                  couch_key_tree:merge([OneChild, Stemmed], TwoChild, ?DEPTH)).
+                  merge_and_stem([OneChild, Stemmed], TwoChild)).
 
 should_ignore_conflicting_branch()->
     %% this test is based on couch-902-test-case2.py
@@ -260,7 +244,7 @@
     {
         "COUCHDB-902",
         ?_assertEqual({[FooBar], new_leaf},
-                      couch_key_tree:merge([Foo], Bar, ?DEPTH))
+                      merge_and_stem([Foo], Bar))
     }.
 
 should_not_find_missing_leaves()->
@@ -422,3 +406,8 @@
     TwoChild = [{0, {"1","foo", [{"1a", "bar", [{"1aa", "bar", []}]}]}}],
     Stemmed = [{1, {"1a", "bar", [{"1aa", "bar", []}]}}],
     ?_assertEqual(Stemmed, couch_key_tree:stem(TwoChild, 2)).
+
+
+merge_and_stem(RevTree, Tree) ->
+    {Merged, Result} = couch_key_tree:merge(RevTree, Tree),
+    {couch_key_tree:stem(Merged, ?DEPTH), Result}.
diff --git a/src/couch/test/couchdb_attachments_tests.erl b/src/couch/test/couchdb_attachments_tests.erl
index d9efac5..a85a01f 100644
--- a/src/couch/test/couchdb_attachments_tests.erl
+++ b/src/couch/test/couchdb_attachments_tests.erl
@@ -21,9 +21,9 @@
 -define(ATT_TXT_NAME, <<"file.erl">>).
 -define(FIXTURE_PNG, filename:join([?FIXTURESDIR, "logo.png"])).
 -define(FIXTURE_TXT, ?ABS_PATH(?FILE)).
--define(TIMEOUT, 1000).
--define(TIMEOUT_EUNIT, 10).
--define(TIMEWAIT, 100).
+-define(TIMEOUT, 5000).
+-define(TIMEOUT_EUNIT, 100).
+-define(TIMEWAIT, 1000).
 -define(i2l(I), integer_to_list(I)).
 
 
diff --git a/src/couch_log/src/couch_log_server.erl b/src/couch_log/src/couch_log_server.erl
index be44af8..ea5def8 100644
--- a/src/couch_log/src/couch_log_server.erl
+++ b/src/couch_log/src/couch_log_server.erl
@@ -58,6 +58,7 @@
 
 
 init(_) ->
+    couch_util:set_mqd_off_heap(),
     process_flag(trap_exit, true),
     {ok, #st{
         writer = couch_log_writer:init()
diff --git a/src/couch_mrview/src/couch_mrview.erl b/src/couch_mrview/src/couch_mrview.erl
index a099f37..82bbd79 100644
--- a/src/couch_mrview/src/couch_mrview.erl
+++ b/src/couch_mrview/src/couch_mrview.erl
@@ -41,6 +41,7 @@
     user_acc,
     last_go=ok,
     reduce_fun,
+    finalizer,
     update_seq,
     args
 }).
@@ -184,6 +185,8 @@
                 ok;
             ({_RedName, <<"_stats", _/binary>>}) ->
                 ok;
+            ({_RedName, <<"_approx_count_distinct", _/binary>>}) ->
+                ok;
             ({_RedName, <<"_", _/binary>> = Bad}) ->
                 Msg = ["`", Bad, "` is not a supported reduce function."],
                 throw({invalid_design_doc, Msg});
@@ -577,7 +580,14 @@
         last_go=Go
     }}.
 
-red_fold(Db, {_Nth, _Lang, View}=RedView, Args, Callback, UAcc) ->
+red_fold(Db, {NthRed, _Lang, View}=RedView, Args, Callback, UAcc) ->
+    Finalizer = case couch_util:get_value(finalizer, Args#mrargs.extra) of
+        undefined ->
+            {_, FunSrc} = lists:nth(NthRed, View#mrview.reduce_funs),
+            FunSrc;
+        CustomFun->
+            CustomFun
+    end,
     Acc = #mracc{
         db=Db,
         total_rows=null,
@@ -587,6 +597,7 @@
         callback=Callback,
         user_acc=UAcc,
         update_seq=View#mrview.update_seq,
+        finalizer=Finalizer,
         args=Args
     },
     Grouping = {key_group_level, Args#mrargs.group_level},
@@ -618,41 +629,50 @@
     {stop, Acc};
 red_fold(_Key, Red, #mracc{group_level=0} = Acc) ->
     #mracc{
+        finalizer=Finalizer,
         limit=Limit,
         callback=Callback,
         user_acc=UAcc0
     } = Acc,
-    Row = [{key, null}, {value, Red}],
+    Row = [{key, null}, {value, maybe_finalize(Red, Finalizer)}],
     {Go, UAcc1} = Callback({row, Row}, UAcc0),
     {Go, Acc#mracc{user_acc=UAcc1, limit=Limit-1, last_go=Go}};
 red_fold(Key, Red, #mracc{group_level=exact} = Acc) ->
     #mracc{
+        finalizer=Finalizer,
         limit=Limit,
         callback=Callback,
         user_acc=UAcc0
     } = Acc,
-    Row = [{key, Key}, {value, Red}],
+    Row = [{key, Key}, {value, maybe_finalize(Red, Finalizer)}],
     {Go, UAcc1} = Callback({row, Row}, UAcc0),
     {Go, Acc#mracc{user_acc=UAcc1, limit=Limit-1, last_go=Go}};
 red_fold(K, Red, #mracc{group_level=I} = Acc) when I > 0, is_list(K) ->
     #mracc{
+        finalizer=Finalizer,
         limit=Limit,
         callback=Callback,
         user_acc=UAcc0
     } = Acc,
-    Row = [{key, lists:sublist(K, I)}, {value, Red}],
+    Row = [{key, lists:sublist(K, I)}, {value, maybe_finalize(Red, Finalizer)}],
     {Go, UAcc1} = Callback({row, Row}, UAcc0),
     {Go, Acc#mracc{user_acc=UAcc1, limit=Limit-1, last_go=Go}};
 red_fold(K, Red, #mracc{group_level=I} = Acc) when I > 0 ->
     #mracc{
+        finalizer=Finalizer,
         limit=Limit,
         callback=Callback,
         user_acc=UAcc0
     } = Acc,
-    Row = [{key, K}, {value, Red}],
+    Row = [{key, K}, {value, maybe_finalize(Red, Finalizer)}],
     {Go, UAcc1} = Callback({row, Row}, UAcc0),
     {Go, Acc#mracc{user_acc=UAcc1, limit=Limit-1, last_go=Go}}.
 
+maybe_finalize(Red, null) ->
+    Red;
+maybe_finalize(Red, RedSrc) ->
+    {ok, Finalized} = couch_query_servers:finalize(RedSrc, Red),
+    Finalized.
 
 finish_fold(#mracc{last_go=ok, update_seq=UpdateSeq}=Acc,  ExtraMeta) ->
     #mracc{callback=Callback, user_acc=UAcc, args=Args}=Acc,
diff --git a/src/couch_mrview/src/couch_mrview_compactor.erl b/src/couch_mrview/src/couch_mrview_compactor.erl
index e9be89c..3ef1180 100644
--- a/src/couch_mrview/src/couch_mrview_compactor.erl
+++ b/src/couch_mrview/src/couch_mrview_compactor.erl
@@ -233,6 +233,8 @@
 
     {EmptyView#mrview{btree=NewBt,
                       seq_btree=NewSeqBt,
+                      update_seq=View#mrview.update_seq,
+                      purge_seq=View#mrview.purge_seq,
                       key_byseq_btree=NewKeyBySeqBt}, FinalAcc}.
 
 compact_view_btree(Btree, EmptyBtree, VID, BufferSize, Acc0) ->
diff --git a/src/couch_mrview/src/couch_mrview_index.erl b/src/couch_mrview/src/couch_mrview_index.erl
index aa1ee27..5d285d6 100644
--- a/src/couch_mrview/src/couch_mrview_index.erl
+++ b/src/couch_mrview/src/couch_mrview_index.erl
@@ -61,13 +61,14 @@
     } = State,
     {ok, FileSize} = couch_file:bytes(Fd),
     {ok, ExternalSize} = couch_mrview_util:calculate_external_size(Views),
+    {ok, ActiveViewSize} = couch_mrview_util:calculate_active_size(Views),
     LogBtSize = case LogBtree of
         nil ->
             0;
         _ ->
             couch_btree:size(LogBtree)
     end,
-    ActiveSize = couch_btree:size(IdBtree) + LogBtSize + ExternalSize,
+    ActiveSize = couch_btree:size(IdBtree) + LogBtSize + ActiveViewSize,
 
     UpdateOptions0 = get(update_options, State),
     UpdateOptions = [atom_to_binary(O, latin1) || O <- UpdateOptions0],
diff --git a/src/couch_mrview/src/couch_mrview_util.erl b/src/couch_mrview/src/couch_mrview_util.erl
index 0c6e5fc..eb461d0 100644
--- a/src/couch_mrview/src/couch_mrview_util.erl
+++ b/src/couch_mrview/src/couch_mrview_util.erl
@@ -23,6 +23,7 @@
 -export([fold/4, fold_reduce/4]).
 -export([temp_view_to_ddoc/1]).
 -export([calculate_external_size/1]).
+-export([calculate_active_size/1]).
 -export([validate_args/1]).
 -export([maybe_load_doc/3, maybe_load_doc/4]).
 -export([maybe_update_index_file/1]).
@@ -830,6 +831,22 @@
     {ok, lists:foldl(SumFun, 0, Views)}.
 
 
+calculate_active_size(Views) ->
+    BtSize = fun
+        (nil) -> 0;
+        (Bt) -> couch_btree:size(Bt)
+    end,
+    FoldFun = fun(View, Acc) ->
+        Sizes = [
+            BtSize(View#mrview.btree),
+            BtSize(View#mrview.seq_btree),
+            BtSize(View#mrview.key_byseq_btree)
+        ],
+        Acc + lists:sum([S || S <- Sizes, is_integer(S)])
+    end,
+    {ok, lists:foldl(FoldFun, 0, Views)}.
+
+
 sum_btree_sizes(nil, _) ->
     0;
 sum_btree_sizes(_, nil) ->
diff --git a/src/couch_mrview/test/couch_mrview_index_info_tests.erl b/src/couch_mrview/test/couch_mrview_index_info_tests.erl
index c994df9..efa03e7 100644
--- a/src/couch_mrview/test/couch_mrview_index_info_tests.erl
+++ b/src/couch_mrview/test/couch_mrview_index_info_tests.erl
@@ -18,14 +18,13 @@
 -define(TIMEOUT, 1000).
 
 
--ifdef(run_broken_tests).
-
 setup() ->
     {ok, Db} = couch_mrview_test_util:init_db(?tempdb(), map),
     couch_mrview:query_view(Db, <<"_design/bar">>, <<"baz">>),
     {ok, Info} = couch_mrview:get_info(Db, <<"_design/bar">>),
     {Db, Info}.
 
+
 teardown({Db, _}) ->
     couch_db:close(Db),
     couch_server:delete(couch_db:name(Db), [?ADMIN_CTX]),
@@ -37,39 +36,86 @@
         "Views index tests",
         {
             setup,
-            fun test_util:start_couch/0, fun test_util:stop_couch/1,
+            fun test_util:start_couch/0,
+            fun test_util:stop_couch/1,
             {
                 foreach,
-                fun setup/0, fun teardown/1,
+                fun setup/0,
+                fun teardown/1,
                 [
-                    fun should_get_property/1
+                    fun sig_is_binary/1,
+                    fun language_is_js/1,
+                    fun file_size_is_non_neg_int/1,
+                    fun active_size_is_non_neg_int/1,
+                    fun external_size_is_non_neg_int/1,
+                    fun disk_size_is_file_size/1,
+                    fun data_size_is_external_size/1,
+                    fun active_size_less_than_file_size/1,
+                    fun update_seq_is_non_neg_int/1,
+                    fun purge_seq_is_non_neg_int/1,
+                    fun update_opts_is_bin_list/1
                 ]
             }
         }
     }.
 
 
-should_get_property({_, Info}) ->
-    InfoProps = [
-        {signature, <<"276df562b152b3c4e5d34024f62672ed">>},
-        {language, <<"javascript">>},
-        {disk_size, 314},
-        {data_size, 263},
-        {update_seq, 11},
-        {purge_seq, 0},
-        {updater_running, false},
-        {compact_running, false},
-        {waiting_clients, 0}
-    ],
-    [
-        {atom_to_list(Key), ?_assertEqual(Val, getval(Key, Info))}
-        || {Key, Val} <- InfoProps
-    ].
+sig_is_binary({_, Info}) ->
+    ?_assert(is_binary(prop(signature, Info))).
 
 
-getval(Key, PL) ->
-    {value, {Key, Val}} = lists:keysearch(Key, 1, PL),
-    Val.
+language_is_js({_, Info}) ->
+    ?_assertEqual(<<"javascript">>, prop(language, Info)).
 
 
--endif.
+file_size_is_non_neg_int({_, Info}) ->
+    ?_assert(check_non_neg_int([sizes, file], Info)).
+
+
+active_size_is_non_neg_int({_, Info}) ->
+    ?_assert(check_non_neg_int([sizes, active], Info)).
+
+
+external_size_is_non_neg_int({_, Info}) ->
+    ?_assert(check_non_neg_int([sizes, external], Info)).
+
+
+disk_size_is_file_size({_, Info}) ->
+    ?_assertEqual(prop([sizes, file], Info), prop(disk_size, Info)).
+
+
+data_size_is_external_size({_, Info}) ->
+    ?_assertEqual(prop([sizes, external], Info), prop(data_size, Info)).
+
+
+active_size_less_than_file_size({_, Info}) ->
+    ?_assert(prop([sizes, active], Info) < prop([sizes, file], Info)).
+
+
+update_seq_is_non_neg_int({_, Info}) ->
+    ?_assert(check_non_neg_int(update_seq, Info)).
+
+
+purge_seq_is_non_neg_int({_, Info}) ->
+    ?_assert(check_non_neg_int(purge_seq, Info)).
+
+
+update_opts_is_bin_list({_, Info}) ->
+    Opts = prop(update_options, Info),
+    ?_assert(is_list(Opts) andalso
+            (Opts == [] orelse lists:all([is_binary(B) || B <- Opts]))).
+
+
+check_non_neg_int(Key, Info) ->
+    Size = prop(Key, Info),
+    is_integer(Size) andalso Size >= 0.
+
+
+prop(Key, {Props}) when is_list(Props) ->
+    prop(Key, Props);
+prop([Key], Info) ->
+    prop(Key, Info);
+prop([Key | Rest], Info) ->
+    prop(Rest, prop(Key, Info));
+prop(Key, Info) when is_atom(Key), is_list(Info) ->
+    couch_util:get_value(Key, Info).
diff --git a/src/couch_replicator/README.md b/src/couch_replicator/README.md
index fe975c1..9822f27 100644
--- a/src/couch_replicator/README.md
+++ b/src/couch_replicator/README.md
@@ -6,10 +6,11 @@
 everything is connected together.
 
 A natural place to start is the top application supervisor:
-`couch_replicator_sup`. It's a `rest_for_one` so if a child process terminates,
-the rest of the children in the hierarchy following it are also terminated.
-This structure implies a useful constraint -- children lower in the list can
-safely call their siblings which are higher in the list.
+`couch_replicator_sup`. It's a `rest_for_one` restart strategy supervisor,
+so if a child process terminates, the rest of the children in the hierarchy
+following it are also terminated. This structure implies a useful constraint --
+children lower in the list can safely call their siblings which are higher in
+the list.
 
 A description of each child:
 
@@ -32,17 +33,17 @@
     membership change include `couch_replicator_doc_processor` and
     `couch_replicator_db_changes`. When doc processor gets an `{cluster,
     stable}` event it will remove all the replication jobs not belonging to the
-    current node. When `couch_replicator_db_chanages` gets a `{cluster,
+    current node. When `couch_replicator_db_changes` gets a `{cluster,
     stable}` event, it will restart the `couch_multidb_changes` process it
     controls, which will launch an new scan of all the replicator databases.
 
   * `couch_replicator_connection`: Maintains a global replication connection
-    pool. It allows reusing connections across replication tasks. The Main
+    pool. It allows reusing connections across replication tasks. The main
     interface is `acquire/1` and `release/1`. The general idea is once a
     connection is established, it is kept around for
     `replicator.connection_close_interval` milliseconds in case another
     replication task wants to re-use it. It is worth pointing out how linking
-    and monitoring is handled: Workers are linked to the connection pool when
+    and monitoring is handled: workers are linked to the connection pool when
     they are created. If they crash, the connection pool will receive an 'EXIT'
     event and clean up after the worker. The connection pool also monitors
     owners (by monitoring the `Pid` from the `From` argument in the call to
@@ -50,21 +51,21 @@
     message. Another interesting thing is that connection establishment
     (creation) happens in the owner process so the pool is not blocked on it.
 
- * `couch_replicator_rate_limiter` : Implements a rate limiter to handle
+ * `couch_replicator_rate_limiter`: Implements a rate limiter to handle
     connection throttling from sources or targets where requests return 429
     error codes. Uses the Additive Increase / Multiplicative Decrease feedback
     control algorithm to converge on the channel capacity. Implemented using a
     16-way sharded ETS table to maintain connection state. The table sharding
     code is split out to `couch_replicator_rate_limiter_tables` module. The
-    purpose of the module it so maintain and continually estimate sleep
+    purpose of the module it to maintain and continually estimate sleep
     intervals for each connection represented as a `{Method, Url}` pair. The
     interval is updated accordingly on each call to `failure/1` or `success/1`
     calls. For a successful request, a client should call `success/1`. Whenever
     a 429 response is received the client should call `failure/1`. When no
-    failures are happening the code is ensuring the ETS tables are empty in
+    failures are happening the code ensures the ETS tables are empty in
     order to have a lower impact on a running system.
 
- * `couch_replicator_scheduler` : This is the core component of the scheduling
+ * `couch_replicator_scheduler`: This is the core component of the scheduling
     replicator. It's main task is to switch between replication jobs, by
     stopping some and starting others to ensure all of them make progress.
     Replication jobs which fail are penalized using an exponential backoff.
@@ -92,7 +93,7 @@
     function is called every `replicator.interval` milliseconds (default is
     60000 i.e. a minute). During each call the scheduler will try to stop some
     jobs, start some new ones and will also try to keep the maximum number of
-    jobs running less than `replicator.max_jobs` (deafult 500). So the
+    jobs running less than `replicator.max_jobs` (default 500). So the
     functions does these operations (actual code paste):
 
     ```
@@ -104,7 +105,7 @@
     update_running_jobs_stats(State#state.stats_pid)
     ```
 
-    `Running` is the total number of currently runnig jobs. `Pending` is the
+    `Running` is the total number of currently running jobs. `Pending` is the
     total number of jobs waiting to be run. `stop_excess_jobs` will stop any
     exceeding the `replicator.max_jobs` configured limit. This code takes
     effect if user reduces the `max_jobs` configuration value.
@@ -132,7 +133,7 @@
     interesting part is how the scheduler picks which jobs to stop and which
     ones to start:
 
-    * Stopping: When picking jobs to stop the cheduler will pick longest
+    * Stopping: When picking jobs to stop the scheduler will pick longest
       running continuous jobs first. The sorting callback function to get the
       longest running jobs is unsurprisingly called `longest_running/2`. To
       pick the longest running jobs it looks at the most recent `started`
@@ -163,9 +164,9 @@
     main idea is to penalize such jobs such that they are forced to wait an
     exponentially larger amount of time with each consecutive crash. A central
     part to this algorithm is determining what forms a sequence of consecutive
-    crashes. If a job starts then quickly crashes, and after next start it
+    crashes. If a job starts then quickly crashes, and after its next start it
     crashes again, then that would become a sequence of 2 consecutive crashes.
-    The penalty then would be calcualted by `backoff_micros/1` function where
+    The penalty then would be calculated by `backoff_micros/1` function where
     the consecutive crash count would end up as the exponent. However for
     practical concerns there is also maximum penalty specified and that's the
     equivalent of 10 consecutive crashes. Timewise it ends up being about 8
@@ -187,13 +188,13 @@
    not used to restart children. The scheduler handles restarts and error
    handling backoffs.
 
- * `couch_replicator_doc_processor`: The doc procesoor component is in charge
+ * `couch_replicator_doc_processor`: The doc processor component is in charge
    of processing replication document updates, turning them into replication
    jobs and adding those jobs to the scheduler. Unfortunately the only reason
    there is even a `couch_replicator_doc_processor` gen_server, instead of
    replication documents being turned to jobs and inserted into the scheduler
    directly, is because of one corner case -- filtered replications using
-   custom (Javascript mostly) filters. More about this later. It is better to
+   custom (JavaScript mostly) filters. More about this later. It is better to
    start with how updates flow through the doc processor:
 
    Document updates come via the `db_change/3` callback from
@@ -212,7 +213,7 @@
    `triggered` and `error`. Both of those states are removed from the document
    then then update proceeds in the regular fashion. `failed` documents are
    also ignored here. `failed` is a terminal state which indicates the document
-   was somehow unsuitable to become a replication job (it was malforemd or a
+   was somehow unsuitable to become a replication job (it was malformed or a
    duplicate). Otherwise the state update proceeds to `process_updated/2`.
 
    `process_updated/2` is where replication document updates are parsed and
@@ -283,7 +284,7 @@
    supervisor in the correct order (and monitored for crashes). This ensures
    the local replicator db exists, then returns `ignore`. This pattern is
    useful for doing setup-like things at the top level and in the correct order
-   regdaring the rest of the children in the supervisor.
+   regarding the rest of the children in the supervisor.
 
  * `couch_replicator_db_changes`: This process specializes and configures
    `couch_multidb_changes` so that it looks for `_replicator` suffixed shards
diff --git a/src/couch_replicator/src/couch_replicator_scheduler.erl b/src/couch_replicator/src/couch_replicator_scheduler.erl
index 0b39634..50896c5 100644
--- a/src/couch_replicator/src/couch_replicator_scheduler.erl
+++ b/src/couch_replicator/src/couch_replicator_scheduler.erl
@@ -138,11 +138,15 @@
             ErrorCount = consecutive_crashes(History, HealthThreshold),
             {State, Info} = case {Pid, ErrorCount} of
                 {undefined, 0}  ->
-                    {pending, null};
+                    case History of
+                        [{{crashed, Error}, _When} | _] ->
+                            {crashing, crash_reason_json(Error)};
+                        [_ | _] ->
+                            {pending, null}
+                    end;
                 {undefined, ErrorCount} when ErrorCount > 0 ->
                      [{{crashed, Error}, _When} | _] = History,
-                     ErrMsg = couch_replicator_utils:rep_error_to_binary(Error),
-                     {crashing, ErrMsg};
+                     {crashing, crash_reason_json(Error)};
                 {Pid, ErrorCount} when is_pid(Pid) ->
                      {running, null}
             end,
@@ -1021,7 +1025,11 @@
             t_oneshot_will_hog_the_scheduler(),
             t_if_excess_is_trimmed_rotation_doesnt_happen(),
             t_if_transient_job_crashes_it_gets_removed(),
-            t_if_permanent_job_crashes_it_stays_in_ets()
+            t_if_permanent_job_crashes_it_stays_in_ets(),
+            t_job_summary_running(),
+            t_job_summary_pending(),
+            t_job_summary_crashing_once(),
+            t_job_summary_crashing_many_times()
          ]
     }.
 
@@ -1300,6 +1308,72 @@
    end).
 
 
+t_job_summary_running() ->
+    ?_test(begin
+        Job =  #job{
+            id = job1,
+            pid = mock_pid(),
+            history = [added()],
+            rep = #rep{
+                db_name = <<"db1">>,
+                source = <<"s">>,
+                target = <<"t">>
+            }
+        },
+        setup_jobs([Job]),
+        Summary = job_summary(job1, ?DEFAULT_HEALTH_THRESHOLD_SEC),
+        ?assertEqual(running, proplists:get_value(state, Summary)),
+        ?assertEqual(null, proplists:get_value(info, Summary)),
+        ?assertEqual(0, proplists:get_value(error_count, Summary))
+    end).
+
+
+t_job_summary_pending() ->
+    ?_test(begin
+        Job =  #job{
+            id = job1,
+            pid = undefined,
+            history = [stopped(20), started(10), added()],
+            rep = #rep{source = <<"s">>, target = <<"t">>}
+        },
+        setup_jobs([Job]),
+        Summary = job_summary(job1, ?DEFAULT_HEALTH_THRESHOLD_SEC),
+        ?assertEqual(pending, proplists:get_value(state, Summary)),
+        ?assertEqual(null, proplists:get_value(info, Summary)),
+        ?assertEqual(0, proplists:get_value(error_count, Summary))
+    end).
+
+
+t_job_summary_crashing_once() ->
+    ?_test(begin
+        Job =  #job{
+            id = job1,
+            history = [crashed(?DEFAULT_HEALTH_THRESHOLD_SEC + 1), started(0)],
+            rep = #rep{source = <<"s">>, target = <<"t">>}
+        },
+        setup_jobs([Job]),
+        Summary = job_summary(job1, ?DEFAULT_HEALTH_THRESHOLD_SEC),
+        ?assertEqual(crashing, proplists:get_value(state, Summary)),
+        ?assertEqual(<<"some_reason">>, proplists:get_value(info, Summary)),
+        ?assertEqual(0, proplists:get_value(error_count, Summary))
+    end).
+
+
+t_job_summary_crashing_many_times() ->
+    ?_test(begin
+        Job =  #job{
+            id = job1,
+            history = [crashed(4), started(3), crashed(2), started(1)],
+            rep = #rep{source = <<"s">>, target = <<"t">>}
+        },
+        setup_jobs([Job]),
+        Summary = job_summary(job1, ?DEFAULT_HEALTH_THRESHOLD_SEC),
+        ?assertEqual(crashing, proplists:get_value(state, Summary)),
+        ?assertEqual(<<"some_reason">>, proplists:get_value(info, Summary)),
+        ?assertEqual(2, proplists:get_value(error_count, Summary))
+    end).
+
+
 % Test helper functions
 
 setup() ->
diff --git a/src/ddoc_cache/src/ddoc_cache_lru.erl b/src/ddoc_cache/src/ddoc_cache_lru.erl
index e94934d..248a76d 100644
--- a/src/ddoc_cache/src/ddoc_cache_lru.erl
+++ b/src/ddoc_cache/src/ddoc_cache_lru.erl
@@ -87,6 +87,7 @@
 
 
 init(_) ->
+    couch_util:set_mqd_off_heap(),
     process_flag(trap_exit, true),
     BaseOpts = [public, named_table],
     CacheOpts = [
diff --git a/src/fabric/src/fabric_db_delete.erl b/src/fabric/src/fabric_db_delete.erl
index 9ba55fb..c146cb6 100644
--- a/src/fabric/src/fabric_db_delete.erl
+++ b/src/fabric/src/fabric_db_delete.erl
@@ -79,12 +79,12 @@
         case {Ok + NotFound, Ok, NotFound} of
         {W, 0, W} ->
             {#shard{dbname=Name}, _} = hd(Counters),
-            couch_log:warning("~p not_found ~s", [?MODULE, Name]),
+            couch_log:warning("~p not_found ~d", [?MODULE, Name]),
             {stop, not_found};
         {W, _, _} ->
             {stop, ok};
-        {N, M, _} when N >= (W div 2 + 1), M > 0 ->
-            {stop, accepted};
+        {_, M, _} when M > 0 ->
+            {stop,accepted};
         _ ->
             {error, internal_server_error}
         end
diff --git a/src/fabric/src/fabric_doc_atts.erl b/src/fabric/src/fabric_doc_atts.erl
new file mode 100644
index 0000000..7ef5dd8
--- /dev/null
+++ b/src/fabric/src/fabric_doc_atts.erl
@@ -0,0 +1,168 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+%   http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(fabric_doc_atts).
+
+-include_lib("fabric/include/fabric.hrl").
+-include_lib("couch/include/couch_db.hrl").
+
+-export([
+    receiver/2,
+    receiver_callback/2
+]).
+
+
+receiver(_Req, undefined) ->
+    <<"">>;
+receiver(_Req, {unknown_transfer_encoding, Unknown}) ->
+    exit({unknown_transfer_encoding, Unknown});
+receiver(Req, chunked) ->
+    MiddleMan = spawn(fun() -> middleman(Req, chunked) end),
+    {fabric_attachment_receiver, MiddleMan, chunked};
+receiver(_Req, 0) ->
+    <<"">>;
+receiver(Req, Length) when is_integer(Length) ->
+    maybe_send_continue(Req),
+    Middleman = spawn(fun() -> middleman(Req, Length) end),
+    {fabric_attachment_receiver, Middleman, Length};
+receiver(_Req, Length) ->
+    exit({length_not_integer, Length}).
+
+
+receiver_callback(Middleman, chunked) ->
+    fun(4096, ChunkFun, State) ->
+        write_chunks(Middleman, ChunkFun, State)
+    end;
+receiver_callback(Middleman, Length) when is_integer(Length) ->
+    fun() ->
+        Middleman ! {self(), gimme_data},
+        Timeout = fabric_util:attachments_timeout(),
+        receive
+            {Middleman, Data} ->
+                rexi:reply(attachment_chunk_received),
+                Data
+        after Timeout ->
+            exit(timeout)
+        end
+    end.
+
+
+%%
+%% internal
+%%
+
+maybe_send_continue(#httpd{mochi_req = MochiReq} = Req) ->
+    case couch_httpd:header_value(Req, "expect") of
+    undefined ->
+        ok;
+    Expect ->
+        case string:to_lower(Expect) of
+        "100-continue" ->
+            MochiReq:start_raw_response({100, gb_trees:empty()});
+        _ ->
+            ok
+        end
+    end.
+
+write_chunks(MiddleMan, ChunkFun, State) ->
+    MiddleMan ! {self(), gimme_data},
+    Timeout = fabric_util:attachments_timeout(),
+    receive
+    {MiddleMan, ChunkRecordList} ->
+        rexi:reply(attachment_chunk_received),
+        case flush_chunks(ChunkRecordList, ChunkFun, State) of
+            {continue, NewState} ->
+                write_chunks(MiddleMan, ChunkFun, NewState);
+            {done, NewState} ->
+                NewState
+        end
+    after Timeout ->
+        exit(timeout)
+    end.
+
+flush_chunks([], _ChunkFun, State) ->
+    {continue, State};
+flush_chunks([{0, _}], _ChunkFun, State) ->
+    {done, State};
+flush_chunks([Chunk | Rest], ChunkFun, State) ->
+    NewState = ChunkFun(Chunk, State),
+    flush_chunks(Rest, ChunkFun, NewState).
+
+receive_unchunked_attachment(_Req, 0) ->
+    ok;
+receive_unchunked_attachment(Req, Length) ->
+    receive {MiddleMan, go} ->
+        Data = couch_httpd:recv(Req, 0),
+        MiddleMan ! {self(), Data}
+    end,
+    receive_unchunked_attachment(Req, Length - size(Data)).
+
+middleman(Req, chunked) ->
+    % spawn a process to actually receive the uploaded data
+    RcvFun = fun(ChunkRecord, ok) ->
+        receive {From, go} -> From ! {self(), ChunkRecord} end, ok
+    end,
+    Receiver = spawn(fun() -> couch_httpd:recv_chunked(Req,4096,RcvFun,ok) end),
+
+    % take requests from the DB writers and get data from the receiver
+    N = erlang:list_to_integer(config:get("cluster","n")),
+    Timeout = fabric_util:attachments_timeout(),
+    middleman_loop(Receiver, N, [], [], Timeout);
+
+middleman(Req, Length) ->
+    Receiver = spawn(fun() -> receive_unchunked_attachment(Req, Length) end),
+    N = erlang:list_to_integer(config:get("cluster","n")),
+    Timeout = fabric_util:attachments_timeout(),
+    middleman_loop(Receiver, N, [], [], Timeout).
+
+middleman_loop(Receiver, N, Counters0, ChunkList0, Timeout) ->
+    receive {From, gimme_data} ->
+        % Figure out how far along this writer (From) is in the list
+        ListIndex = case fabric_dict:lookup_element(From, Counters0) of
+        undefined -> 0;
+        I -> I
+        end,
+
+        % Talk to the receiver to get another chunk if necessary
+        ChunkList1 = if ListIndex == length(ChunkList0) ->
+            Receiver ! {self(), go},
+            receive
+                {Receiver, ChunkRecord} ->
+                    ChunkList0 ++ [ChunkRecord]
+            end;
+        true -> ChunkList0 end,
+
+        % reply to the writer
+        Reply = lists:nthtail(ListIndex, ChunkList1),
+        From ! {self(), Reply},
+
+        % Update the counter for this writer
+        Counters1 = fabric_dict:update_counter(From, length(Reply), Counters0),
+
+        % Drop any chunks that have been sent to all writers
+        Size = fabric_dict:size(Counters1),
+        NumToDrop = lists:min([I || {_, I} <- Counters1]),
+
+        {ChunkList3, Counters3} =
+        if Size == N andalso NumToDrop > 0 ->
+            ChunkList2 = lists:nthtail(NumToDrop, ChunkList1),
+            Counters2 = [{F, I-NumToDrop} || {F, I} <- Counters1],
+            {ChunkList2, Counters2};
+        true ->
+            {ChunkList1, Counters1}
+        end,
+
+        middleman_loop(Receiver, N, Counters3, ChunkList3, Timeout)
+    after Timeout ->
+        exit(Receiver, kill),
+        ok
+    end.
diff --git a/src/fabric/src/fabric_rpc.erl b/src/fabric/src/fabric_rpc.erl
index 4a69e7e..60526f4 100644
--- a/src/fabric/src/fabric_rpc.erl
+++ b/src/fabric/src/fabric_rpc.erl
@@ -142,8 +142,9 @@
     couch_mrview:query_view(Db, DDoc, ViewName, Args, fun reduce_cb/2, VAcc0).
 
 fix_skip_and_limit(Args) ->
-    #mrargs{skip=Skip, limit=Limit}=Args,
-    Args#mrargs{skip=0, limit=Skip+Limit}.
+    #mrargs{skip=Skip, limit=Limit, extra=Extra}=Args,
+    % the coordinator needs to finalize each row, so make sure the shards don't
+    Args#mrargs{skip=0, limit=Skip+Limit, extra=[{finalizer,null} | Extra]}.
 
 create_db(DbName) ->
     create_db(DbName, []).
@@ -439,6 +440,8 @@
                 throw({mp_parser_died, Reason})
         end
     end;
+make_att_reader({fabric_attachment_receiver, Middleman, Length}) ->
+    fabric_doc_atts:receiver_callback(Middleman, Length);
 make_att_reader(Else) ->
     Else.
 
diff --git a/src/fabric/src/fabric_view.erl b/src/fabric/src/fabric_view.erl
index dd0fcfd..69f4290 100644
--- a/src/fabric/src/fabric_view.erl
+++ b/src/fabric/src/fabric_view.erl
@@ -230,8 +230,9 @@
         end, Counters0, Records),
         Wrapped = [[V] || #view_row{value=V} <- Records],
         {ok, [Reduced]} = couch_query_servers:rereduce(Lang, [RedSrc], Wrapped),
+        {ok, Finalized} = couch_query_servers:finalize(RedSrc, Reduced),
         NewSt = St#collector{keys=RestKeys, rows=NewRowDict, counters=Counters},
-        {#view_row{key=Key, id=reduced, value=Reduced}, NewSt};
+        {#view_row{key=Key, id=reduced, value=Finalized}, NewSt};
     error ->
         get_next_row(St#collector{keys=RestKeys})
     end;
diff --git a/src/mango/src/mango_cursor.erl b/src/mango/src/mango_cursor.erl
index 5108d36..5d2ea71 100644
--- a/src/mango/src/mango_cursor.erl
+++ b/src/mango/src/mango_cursor.erl
@@ -48,18 +48,12 @@
 create(Db, Selector0, Opts) ->
     Selector = mango_selector:normalize(Selector0),
     UsableIndexes = mango_idx:get_usable_indexes(Db, Selector, Opts),
-    case length(UsableIndexes) of
-        0 ->
-            AllDocs = mango_idx:special(Db),
-            create_cursor(Db, AllDocs, Selector, Opts);
-        _ ->
-            case mango_cursor:maybe_filter_indexes_by_ddoc(UsableIndexes, Opts) of
-                [] ->
-                    % use_index doesn't match a valid index - fall back to a valid one
-                    create_cursor(Db, UsableIndexes, Selector, Opts);
-                UserSpecifiedIndex ->
-                    create_cursor(Db, UserSpecifiedIndex, Selector, Opts)
-            end
+    case mango_cursor:maybe_filter_indexes_by_ddoc(UsableIndexes, Opts) of
+        [] ->
+            % use_index doesn't match a valid index - fall back to a valid one
+            create_cursor(Db, UsableIndexes, Selector, Opts);
+        UserSpecifiedIndex ->
+            create_cursor(Db, UserSpecifiedIndex, Selector, Opts)
     end.
 
 
diff --git a/src/mango/src/mango_cursor_view.erl b/src/mango/src/mango_cursor_view.erl
index 1e2108b..dbea36e 100644
--- a/src/mango/src/mango_cursor_view.erl
+++ b/src/mango/src/mango_cursor_view.erl
@@ -70,7 +70,8 @@
         {end_key, maybe_replace_max_json(Args#mrargs.end_key)},
         {direction, Args#mrargs.direction},
         {stable, Args#mrargs.stable},
-        {update, Args#mrargs.update}
+        {update, Args#mrargs.update},
+        {conflicts, Args#mrargs.conflicts}
     ]}}].
 
 
@@ -283,9 +284,8 @@
     NewArgs = Args#mrargs{include_docs = IncludeDocs},
     apply_opts(Rest, NewArgs);
 apply_opts([{conflicts, true} | Rest], Args) ->
-    % I need to patch things so that views can specify
-    % parameters when loading the docs from disk
-    apply_opts(Rest, Args);
+    NewArgs = Args#mrargs{conflicts = true},
+    apply_opts(Rest, NewArgs);
 apply_opts([{conflicts, false} | Rest], Args) ->
     % Ignored cause default
     apply_opts(Rest, Args);
diff --git a/src/mango/src/mango_error.erl b/src/mango/src/mango_error.erl
index ad665e2..b2bbb39 100644
--- a/src/mango/src/mango_error.erl
+++ b/src/mango/src/mango_error.erl
@@ -308,7 +308,7 @@
     {
         400,
         <<"invalid_sort_json">>,
-        fmt("Sort must be an array of sort specs, not: ~w", [BadSort])
+        fmt("Sort must be an array of sort specs, not: ~p", [BadSort])
     };
 info(mango_sort, {invalid_sort_dir, BadSpec}) ->
     {
@@ -320,7 +320,7 @@
     {
         400,
         <<"invalid_sort_field">>,
-        fmt("Invalid sort field: ~w", [BadField])
+        fmt("Invalid sort field: ~p", [BadField])
     };
 info(mango_sort, {unsupported, mixed_sort}) ->
     {
diff --git a/src/mango/src/mango_idx.erl b/src/mango/src/mango_idx.erl
index ea5949c..8af92b9 100644
--- a/src/mango/src/mango_idx.erl
+++ b/src/mango/src/mango_idx.erl
@@ -66,13 +66,12 @@
 
     SortFields = get_sort_fields(Opts),
     UsableFilter = fun(I) -> is_usable(I, Selector, SortFields) end,
-    UsableIndexes1 = lists:filter(UsableFilter, UsableIndexes0),
 
-    case maybe_filter_by_sort_fields(UsableIndexes1, SortFields) of
-        {ok, SortIndexes} -> 
-            SortIndexes;
-        {error, no_usable_index} -> 
-            ?MANGO_ERROR({no_usable_index, missing_sort_index})
+    case lists:filter(UsableFilter, UsableIndexes0) of
+        [] -> 
+            ?MANGO_ERROR({no_usable_index, missing_sort_index});
+        UsableIndexes -> 
+            UsableIndexes
     end.
 
 
@@ -100,31 +99,6 @@
     end.
 
 
-maybe_filter_by_sort_fields(Indexes, []) ->
-    {ok, Indexes};
-
-maybe_filter_by_sort_fields(Indexes, SortFields) ->
-    FilterFun = fun(Idx) ->
-        Cols = mango_idx:columns(Idx),
-        case {mango_idx:type(Idx), Cols} of
-            {_, all_fields} ->
-                true;
-            {<<"text">>, _} ->
-                sets:is_subset(sets:from_list(SortFields), sets:from_list(Cols));
-            {<<"json">>, _} ->
-                lists:prefix(SortFields, Cols);
-            {<<"special">>, _} ->
-                lists:prefix(SortFields, Cols)
-        end
-    end,
-    case lists:filter(FilterFun, Indexes) of
-        [] ->
-            {error, no_usable_index};
-        FilteredIndexes ->
-            {ok, FilteredIndexes}
-    end.
-
-
 new(Db, Opts) ->
     Def = get_idx_def(Opts),
     Type = get_idx_type(Opts),
diff --git a/src/mango/src/mango_idx_special.erl b/src/mango/src/mango_idx_special.erl
index 12da1cb..ac6efc7 100644
--- a/src/mango/src/mango_idx_special.erl
+++ b/src/mango/src/mango_idx_special.erl
@@ -63,9 +63,11 @@
     [<<"_id">>].
 
 
-is_usable(#idx{def=all_docs}, Selector, _) ->
+is_usable(#idx{def=all_docs}, _Selector, []) ->
+    true;
+is_usable(#idx{def=all_docs} = Idx, Selector, SortFields) ->
     Fields = mango_idx_view:indexable_fields(Selector),
-    lists:member(<<"_id">>, Fields).
+    lists:member(<<"_id">>, Fields) and can_use_sort(Idx, SortFields, Selector).
 
 
 start_key([{'$gt', Key, _, _}]) ->
@@ -96,3 +98,10 @@
 end_key([{'$eq', Key, '$eq', Key}]) ->
     false = mango_json:special(Key),
     Key.
+
+
+can_use_sort(_Idx, [], _Selector) ->
+    true;
+can_use_sort(Idx, SortFields, _Selector) ->
+    Cols = columns(Idx),
+    lists:prefix(SortFields, Cols).
diff --git a/src/mango/src/mango_idx_view.erl b/src/mango/src/mango_idx_view.erl
index c9fe4c8..2d784b6 100644
--- a/src/mango/src/mango_idx_view.erl
+++ b/src/mango/src/mango_idx_view.erl
@@ -124,8 +124,15 @@
     % we don't need to check the selector for these
     RequiredFields1 = ordsets:subtract(lists:usort(RequiredFields), lists:usort(SortFields)),
 
-    mango_selector:has_required_fields(Selector, RequiredFields1)
-        andalso not is_text_search(Selector).
+    % _id and _rev are implicitly in every document so
+    % we don't need to check the selector for these either
+    RequiredFields2 = ordsets:subtract(
+        RequiredFields1,
+        [<<"_id">>, <<"_rev">>]),
+
+    mango_selector:has_required_fields(Selector, RequiredFields2)
+        andalso not is_text_search(Selector)
+        andalso can_use_sort(RequiredFields, SortFields, Selector).
 
 
 is_text_search({[]}) ->
@@ -505,3 +512,30 @@
                     max
             end
     end.
+
+
+% Can_use_sort works as follows:
+%
+% * no sort fields then we can use this
+% * Run out index columns we can't use this index
+% * If the current column is the start of the sort, return if sort is a prefix
+% * If the current column is constant, drop it and continue, else return false
+%
+% A constant column is a something that won't affect the sort
+% for example A: {$eq: 21}}
+%
+% Currently we only look at constant fields that are prefixes to the sort fields
+% set by the user. We considered adding in constant fields after sort fields
+% but were not 100% sure that it would not affect the sorting of the query.
+
+can_use_sort(_Cols, [], _Selector) ->
+    true;
+can_use_sort([], _SortFields, _Selector) ->
+    false;
+can_use_sort([Col | _] = Cols, [Col | _] = SortFields, _Selector) ->
+    lists:prefix(SortFields, Cols);
+can_use_sort([Col | RestCols], SortFields, Selector) ->
+    case mango_selector:is_constant_field(Selector, Col) of
+        true -> can_use_sort(RestCols, SortFields, Selector);
+        false -> false
+    end.
diff --git a/src/mango/src/mango_selector.erl b/src/mango/src/mango_selector.erl
index 968dc3c..fffadcd 100644
--- a/src/mango/src/mango_selector.erl
+++ b/src/mango/src/mango_selector.erl
@@ -16,7 +16,8 @@
 -export([
     normalize/1,
     match/2,
-    has_required_fields/2
+    has_required_fields/2,
+    is_constant_field/2
 ]).
 
 
@@ -638,11 +639,121 @@
     end.
 
 
+% Returns true if a field in the selector is a constant value e.g. {a: {$eq: 1}}
+is_constant_field({[]}, _Field) ->
+    false;
+
+is_constant_field(Selector, Field) when not is_list(Selector) ->
+    is_constant_field([Selector], Field);
+
+is_constant_field([], _Field) ->
+    false;
+
+is_constant_field([{[{<<"$and">>, Args}]}], Field) when is_list(Args) ->
+    lists:any(fun(Arg) -> is_constant_field(Arg, Field) end, Args);
+
+is_constant_field([{[{<<"$and">>, Args}]}], Field) ->
+    is_constant_field(Args, Field);
+
+is_constant_field([{[{Field, {[{Cond, _Val}]}}]} | _Rest], Field) ->
+    Cond =:= <<"$eq">>;
+
+is_constant_field([{[{_UnMatched, _}]} | Rest], Field) ->
+    is_constant_field(Rest, Field).
+
+
 %%%%%%%% module tests below %%%%%%%%
 
 -ifdef(TEST).
 -include_lib("eunit/include/eunit.hrl").
 
+is_constant_field_basic_test() ->
+    Selector = normalize({[{<<"A">>, <<"foo">>}]}),
+    Field = <<"A">>,
+    ?assertEqual(true, is_constant_field(Selector, Field)).
+
+is_constant_field_basic_two_test() ->
+    Selector = normalize({[{<<"$and">>,
+        [
+            {[{<<"cars">>,{[{<<"$eq">>,<<"2">>}]}}]},
+            {[{<<"age">>,{[{<<"$gt">>,10}]}}]}
+        ]
+    }]}),
+    Field = <<"cars">>,
+    ?assertEqual(true, is_constant_field(Selector, Field)).
+
+is_constant_field_not_eq_test() ->
+    Selector = normalize({[{<<"$and">>,
+        [
+            {[{<<"cars">>,{[{<<"$eq">>,<<"2">>}]}}]},
+            {[{<<"age">>,{[{<<"$gt">>,10}]}}]}
+        ]
+    }]}),
+    Field = <<"age">>,
+    ?assertEqual(false, is_constant_field(Selector, Field)).
+
+is_constant_field_missing_field_test() ->
+    Selector = normalize({[{<<"$and">>,
+        [
+            {[{<<"cars">>,{[{<<"$eq">>,<<"2">>}]}}]},
+            {[{<<"age">>,{[{<<"$gt">>,10}]}}]}
+        ]
+    }]}),
+    Field = <<"wrong">>,
+    ?assertEqual(false, is_constant_field(Selector, Field)).
+
+is_constant_field_or_field_test() ->
+    Selector = {[{<<"$or">>,
+          [
+              {[{<<"A">>, <<"foo">>}]},
+              {[{<<"B">>, <<"foo">>}]}
+          ]
+    }]},
+    Normalized = normalize(Selector),
+    Field = <<"A">>,
+    ?assertEqual(false, is_constant_field(Normalized, Field)).
+
+is_constant_field_empty_selector_test() ->
+    Selector = normalize({[]}),
+    Field = <<"wrong">>,
+    ?assertEqual(false, is_constant_field(Selector, Field)).
+
+is_constant_nested_and_test() ->
+    Selector1 = {[{<<"$and">>,
+          [
+              {[{<<"A">>, <<"foo">>}]}
+          ]
+    }]},
+    Selector2 = {[{<<"$and">>,
+          [
+              {[{<<"B">>, {[{<<"$gt">>,10}]}}]}
+          ]
+    }]},
+    Selector = {[{<<"$and">>,
+          [
+              Selector1,
+              Selector2
+          ]
+    }]},
+
+    Normalized = normalize(Selector),
+    ?assertEqual(true, is_constant_field(Normalized, <<"A">>)),
+    ?assertEqual(false, is_constant_field(Normalized, <<"B">>)).
+
+is_constant_combined_or_and_equals_test() ->
+    Selector = {[{<<"A">>, "foo"},
+          {<<"$or">>,
+              [
+                  {[{<<"B">>, <<"bar">>}]},
+                  {[{<<"B">>, <<"baz">>}]}
+              ]
+          },
+		  {<<"C">>, "qux"}
+	]},
+    Normalized = normalize(Selector),
+    ?assertEqual(true, is_constant_field(Normalized, <<"C">>)),
+    ?assertEqual(false, is_constant_field(Normalized, <<"B">>)).
+
 has_required_fields_basic_test() ->
     RequiredFields = [<<"A">>],
     Selector = {[{<<"A">>, <<"foo">>}]},
diff --git a/src/mango/test/02-basic-find-test.py b/src/mango/test/02-basic-find-test.py
index f7e151a..6a31d33 100644
--- a/src/mango/test/02-basic-find-test.py
+++ b/src/mango/test/02-basic-find-test.py
@@ -333,3 +333,10 @@
         assert explain["mrargs"]["start_key"] == [0]
         assert explain["mrargs"]["end_key"] == ["<MAX>"]
         assert explain["mrargs"]["include_docs"] == True
+
+    def test_sort_with_all_docs(self):
+        explain = self.db.find({
+            "_id": {"$gt": 0},
+            "age": {"$gt": 0}
+        }, sort=["_id"], explain=True)
+        self.assertEquals(explain["index"]["type"], "special")
diff --git a/src/mango/test/12-use-correct-index-test.py b/src/mango/test/12-use-correct-index-test.py
index 5a2b24d..7bb90eb 100644
--- a/src/mango/test/12-use-correct-index-test.py
+++ b/src/mango/test/12-use-correct-index-test.py
@@ -114,3 +114,16 @@
         explain = self.db.find(selector, explain=True)
         self.assertEqual(explain["index"]["ddoc"], "_design/bbb")
         self.assertEqual(explain["mrargs"]["end_key"], [10, '<MAX>'])
+
+    # all documents contain an _id and _rev field they
+    # should not be used to restrict indexes based on the
+    # fields required by the selector
+    def test_choose_index_with_id(self):
+        self.db.create_index(["name", "_id"], ddoc="aaa")
+        explain = self.db.find({"name": "Eddie"}, explain=True)
+        self.assertEqual(explain["index"]["ddoc"], '_design/aaa')
+
+    def test_choose_index_with_rev(self):
+        self.db.create_index(["name", "_rev"], ddoc="aaa")
+        explain = self.db.find({"name": "Eddie"}, explain=True)
+        self.assertEqual(explain["index"]["ddoc"], '_design/aaa')
diff --git a/src/mango/test/18-json-sort.py b/src/mango/test/18-json-sort.py
new file mode 100644
index 0000000..f8d2abe
--- /dev/null
+++ b/src/mango/test/18-json-sort.py
@@ -0,0 +1,222 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+import mango
+import copy
+import unittest
+
+DOCS = [
+    {
+        "_id": "1",
+        "name": "Jimi",
+        "age": 10,
+        "cars": 1
+    },
+    {
+        "_id": "2",
+        "name": "Eddie",
+        "age": 20,
+        "cars": 1
+    },
+    {
+        "_id": "3",
+        "name": "Jane",
+        "age": 30,
+        "cars": 2
+    },
+    {
+        "_id": "4",
+        "name": "Mary",
+        "age": 40,
+        "cars": 2
+    },
+    {
+        "_id": "5",
+        "name": "Sam",
+        "age": 50,
+        "cars": 3
+    }
+]
+
+class JSONIndexSortOptimisations(mango.DbPerClass):
+    def setUp(self):
+        self.db.recreate()
+        self.db.save_docs(copy.deepcopy(DOCS))
+
+    def test_works_for_basic_case(self):
+        self.db.create_index(["cars", "age"], name="cars-age")
+        selector = {
+            "cars": "2",
+            "age": {
+                "$gt": 10
+            }
+        }
+        explain = self.db.find(selector, sort=["age"], explain=True)
+        self.assertEqual(explain["index"]["name"], "cars-age")
+        self.assertEqual(explain["mrargs"]["direction"], "fwd")
+
+    def test_works_for_all_fields_specified(self):
+        self.db.create_index(["cars", "age"], name="cars-age")
+        selector = {
+            "cars": "2",
+            "age": {
+                "$gt": 10
+            }
+        }
+        explain = self.db.find(selector, sort=["cars", "age"], explain=True)
+        self.assertEqual(explain["index"]["name"], "cars-age")
+
+    def test_works_for_no_sort_fields_specified(self):
+        self.db.create_index(["cars", "age"], name="cars-age")
+        selector = {
+            "cars": {
+                "$gt": 10
+            },
+            "age": {
+                "$gt": 10
+            }
+        }
+        explain = self.db.find(selector, explain=True)
+        self.assertEqual(explain["index"]["name"], "cars-age")
+
+    def test_works_for_opp_dir_sort(self):
+        self.db.create_index(["cars", "age"], name="cars-age")
+        selector = {
+            "cars": "2",
+            "age": {
+                "$gt": 10
+            }
+        }
+        explain = self.db.find(selector, sort=[{"age": "desc"}], explain=True)
+        self.assertEqual(explain["index"]["name"], "cars-age")
+        self.assertEqual(explain["mrargs"]["direction"], "rev")
+    
+    def test_not_work_for_non_constant_field(self):
+        self.db.create_index(["cars", "age"], name="cars-age")
+        selector = {
+            "cars": {
+                "$gt": 10
+            },
+            "age": {
+                "$gt": 10
+            }
+        }
+        try:
+            self.db.find(selector, explain=True, sort=["age"])
+            raise Exception("Should not get here")
+        except Exception as e:
+            resp = e.response.json()
+            self.assertEqual(resp["error"], "no_usable_index")
+
+    def test_three_index_one(self):
+        self.db.create_index(["cars", "age", "name"], name="cars-age-name")
+        selector = {
+            "cars": "2",
+            "age": 10,
+            "name": {
+                "$gt": "AA"
+            }
+        }
+        explain = self.db.find(selector, sort=["name"], explain=True)
+        self.assertEqual(explain["index"]["name"], "cars-age-name")
+
+    def test_three_index_two(self):
+        self.db.create_index(["cars", "age", "name"], name="cars-age-name")
+        selector = {
+            "cars": "2",
+            "name": "Eddie",
+            "age": {
+                "$gt": 10
+            }
+        }
+        explain = self.db.find(selector, sort=["age"], explain=True)
+        self.assertEqual(explain["index"]["name"], "cars-age-name")
+
+    def test_three_index_fails(self):
+        self.db.create_index(["cars", "age", "name"], name="cars-age-name")
+        selector = {
+            "name": "Eddie",
+            "age": {
+                "$gt": 1
+            },
+            "cars": {
+                "$gt": "1"
+            }
+        }
+        try:
+            self.db.find(selector, explain=True, sort=["name"])
+            raise Exception("Should not get here")
+        except Exception as e:
+            resp = e.response.json()
+            self.assertEqual(resp["error"], "no_usable_index")
+
+    def test_empty_sort(self):
+        self.db.create_index(["cars", "age", "name"], name="cars-age-name")
+        selector = {
+            "name": {
+                "$gt": "Eddie",
+            },
+            "age": 10,
+            "cars": {
+                "$gt": "1"
+            }
+        }
+        explain = self.db.find(selector, explain=True)
+        self.assertEqual(explain["index"]["name"], "cars-age-name")
+
+    def test_in_between(self):
+        self.db.create_index(["cars", "age", "name"], name="cars-age-name")
+        selector = {
+            "name": "Eddie",
+            "age": 10,
+            "cars": {
+                "$gt": "1"
+            }
+        }
+        explain = self.db.find(selector, explain=True)
+        self.assertEqual(explain["index"]["name"], "cars-age-name")
+
+        try:
+            self.db.find(selector, sort=["cars", "name"], explain=True)
+            raise Exception("Should not get here")
+        except Exception as e:
+            resp = e.response.json()
+            self.assertEqual(resp["error"], "no_usable_index")
+    
+    def test_ignore_after_set_sort_value(self):
+        self.db.create_index(["cars", "age", "name"], name="cars-age-name")
+        selector = {
+            "age": {
+                "$gt": 10
+            },
+            "cars": 2,
+            "name": {
+                "$gt": "A"
+            }
+        }
+        explain = self.db.find(selector, sort=["age"], explain=True)
+        self.assertEqual(explain["index"]["name"], "cars-age-name")
+
+    def test_not_use_index_if_other_fields_in_sort(self):
+        self.db.create_index(["cars", "age"], name="cars-age")
+        selector = {
+            "age": 10,
+            "cars": {
+                "$gt": "1"
+            }
+        }
+        try:
+            self.db.find(selector, sort=["cars", "name"], explain=True)
+            raise Exception("Should not get here")
+        except Exception as e:
+            resp = e.response.json()
+            self.assertEqual(resp["error"], "no_usable_index")
diff --git a/src/mango/test/19-find-conflicts.py b/src/mango/test/19-find-conflicts.py
new file mode 100644
index 0000000..c6d59f0
--- /dev/null
+++ b/src/mango/test/19-find-conflicts.py
@@ -0,0 +1,41 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+import mango
+import copy
+
+DOC = [
+    {
+        "_id": "doc",
+        "a": 2
+    }
+]
+
+CONFLICT = [
+    {
+        "_id": "doc",
+        "_rev": "1-23202479633c2b380f79507a776743d5",
+        "a": 1
+    }
+]
+
+class ChooseCorrectIndexForDocs(mango.DbPerClass):
+    def setUp(self):
+        self.db.recreate()
+        self.db.save_docs(copy.deepcopy(DOC))
+        self.db.save_docs_with_conflicts(copy.deepcopy(CONFLICT))
+
+    def test_retrieve_conflicts(self):
+        self.db.create_index(["_conflicts"])
+        result = self.db.find({"_conflicts": { "$exists": True}}, conflicts=True)
+        self.assertEqual(result[0]['_conflicts'][0], '1-23202479633c2b380f79507a776743d5')
+        self.assertEqual(result[0]['_rev'], '1-3975759ccff3842adf690a5c10caee42')
diff --git a/src/mango/test/mango.py b/src/mango/test/mango.py
index 9b6b998..bc12bbc 100644
--- a/src/mango/test/mango.py
+++ b/src/mango/test/mango.py
@@ -95,6 +95,11 @@
     def save_doc(self, doc):
         self.save_docs([doc])
 
+    def save_docs_with_conflicts(self, docs, **kwargs):
+        body = json.dumps({"docs": docs, "new_edits": False})
+        r = self.sess.post(self.path("_bulk_docs"), data=body, params=kwargs)
+        r.raise_for_status()
+
     def save_docs(self, docs, **kwargs):
         body = json.dumps({"docs": docs})
         r = self.sess.post(self.path("_bulk_docs"), data=body, params=kwargs)
diff --git a/src/mem3/src/mem3_shards.erl b/src/mem3/src/mem3_shards.erl
index 0975d2f..da3b69a 100644
--- a/src/mem3/src/mem3_shards.erl
+++ b/src/mem3/src/mem3_shards.erl
@@ -184,6 +184,7 @@
     erlang:send_after(?RELISTEN_DELAY, whereis(?MODULE), restart_config_listener).
 
 init([]) ->
+    couch_util:set_mqd_off_heap(),
     ets:new(?SHARDS, [
         bag,
         public,
@@ -724,42 +725,25 @@
 
 
 mem3_shards_changes_test_() -> {
-    "Test mem3_shards changes listener", {
-        foreach,
-        fun setup_changes/0, fun teardown_changes/1,
+    "Test mem3_shards changes listener",
+    {
+        setup,
+        fun test_util:start_couch/0, fun test_util:stop_couch/1,
         [
-            fun should_kill_changes_listener_on_shutdown/1
+            fun should_kill_changes_listener_on_shutdown/0
         ]
     }
 }.
 
 
-setup_changes() ->
-    RespDb = test_util:fake_db([{name, <<"dbs">>}, {update_seq, 0}]),
-    ok = meck:expect(mem3_util, ensure_exists, ['_'], {ok, RespDb}),
-    ok = meck:expect(couch_db, close, ['_'], ok),
-    ok = application:start(config),
+should_kill_changes_listener_on_shutdown() ->
     {ok, Pid} = ?MODULE:start_link(),
+    {ok, ChangesPid} = get_changes_pid(),
+    ?assert(is_process_alive(ChangesPid)),
     true = erlang:unlink(Pid),
-    Pid.
-
-
-teardown_changes(Pid) ->
-    true = exit(Pid, shutdown),
-    ok = application:stop(config),
-    meck:unload().
-
-
-should_kill_changes_listener_on_shutdown(Pid) ->
-    ?_test(begin
-        ?assert(is_process_alive(Pid)),
-        {ok, ChangesPid} = get_changes_pid(),
-        ?assert(is_process_alive(ChangesPid)),
-        true = test_util:stop_sync_throw(
-            ChangesPid, fun() -> exit(Pid, shutdown) end, wait_timeout),
-        ?assertNot(is_process_alive(ChangesPid)),
-        ok
-    end).
-
+    true = test_util:stop_sync_throw(
+        ChangesPid, fun() -> exit(Pid, shutdown) end, wait_timeout),
+    ?assertNot(is_process_alive(ChangesPid)),
+    exit(Pid, shutdown).
 
 -endif.
diff --git a/src/mem3/test/01-config-default.ini b/src/mem3/test/01-config-default.ini
deleted file mode 100644
index dde92ce..0000000
--- a/src/mem3/test/01-config-default.ini
+++ /dev/null
@@ -1,14 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may not
-# use this file except in compliance with the License. You may obtain a copy of
-# the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations under
-# the License.
-
-[cluster]
-n=3
diff --git a/src/mem3/test/mem3_sync_security_test.erl b/src/mem3/test/mem3_sync_security_test.erl
index 8b6af3c..4e06dd8 100644
--- a/src/mem3/test/mem3_sync_security_test.erl
+++ b/src/mem3/test/mem3_sync_security_test.erl
@@ -19,11 +19,14 @@
 
 go_test() ->
     Ctx = test_util:start_couch([fabric, mem3]),
-    ok = meck:new(fabric, [passthrough]),
-    meck:expect(fabric, all_dbs, fun() ->
-        {ok, [<<"NoExistDb1">>, <<"NoExistDb2">>]}
-    end),
-    Result = mem3_sync_security:go(),
-    meck:unload(fabric),
-    test_util:stop_couch(Ctx),
-    ?assertEqual(ok, Result).
+    try
+        ok = meck:new(fabric, [passthrough]),
+        meck:expect(fabric, all_dbs, fun() ->
+            {ok, [<<"NoExistDb1">>, <<"NoExistDb2">>]}
+        end),
+        Result = mem3_sync_security:go(),
+        ?assertEqual(ok, Result)
+    after
+        meck:unload(),
+        test_util:stop_couch(Ctx)
+    end.
diff --git a/src/mem3/test/mem3_util_test.erl b/src/mem3/test/mem3_util_test.erl
index 163580c..214217e 100644
--- a/src/mem3/test/mem3_util_test.erl
+++ b/src/mem3/test/mem3_util_test.erl
@@ -121,47 +121,18 @@
 %% n_val tests
 
 nval_test_() ->
-    {"n_val tests explicit",
-     [
-      {setup,
-       fun () ->
-               meck:new([couch_log]),
-               meck:expect(couch_log, error, fun(_, _) -> ok end),
-               ok
-       end,
-       fun (_) -> meck:unload([couch_log]) end,
-       [
-        ?_assertEqual(2, mem3_util:n_val(2,4)),
-        ?_assertEqual(1, mem3_util:n_val(-1,4)),
-        ?_assertEqual(4, mem3_util:n_val(6,4))
+    {
+        setup,
+        fun() ->
+            meck:new([config, couch_log]),
+            meck:expect(couch_log, error, 2, ok),
+            meck:expect(config, get, 3, "5")
+        end,
+        fun(_) -> meck:unload() end,
+        [
+            ?_assertEqual(2, mem3_util:n_val(2, 4)),
+            ?_assertEqual(1, mem3_util:n_val(-1, 4)),
+            ?_assertEqual(4, mem3_util:n_val(6, 4)),
+            ?_assertEqual(5, mem3_util:n_val(undefined, 6))
         ]
-       }
-     ]
     }.
-
-
-config_01_setup() ->
-    Ini = filename:join([code:lib_dir(mem3, test), "01-config-default.ini"]),
-    {ok, Pid} = config:start_link([Ini]),
-    Pid.
-
-config_teardown(Pid) ->
-    test_util:stop_config(Pid).
-
-
-n_val_test_() ->
-    {"n_val tests with config",
-     [
-      {setup,
-       fun config_01_setup/0,
-       fun config_teardown/1,
-       fun(Pid) ->
-           {with, Pid, [
-               fun n_val_1/1
-            ]}
-       end}
-     ]
-    }.
-
-n_val_1(_Pid) ->
-    ?assertEqual(3, mem3_util:n_val(undefined, 4)).
diff --git a/src/rexi/src/rexi_server.erl b/src/rexi/src/rexi_server.erl
index 3d3f272..954ca88 100644
--- a/src/rexi/src/rexi_server.erl
+++ b/src/rexi/src/rexi_server.erl
@@ -39,6 +39,7 @@
     gen_server:start_link({local, ServerId}, ?MODULE, [], []).
 
 init([]) ->
+    couch_util:set_mqd_off_heap(),
     {ok, #st{}}.
 
 handle_call(get_errors, _From, #st{errors = Errors} = St) ->
diff --git a/test/javascript/run b/test/javascript/run
index 8ae4244..ca69e1f 100755
--- a/test/javascript/run
+++ b/test/javascript/run
@@ -134,10 +134,11 @@
                 tmp.append(name)
         tests = tmp
 
-    fmt = mkformatter(tests)
     passed = 0
     failed = 0
-    for test in tests:
+    if len(tests) > 0 :
+     fmt = mkformatter(tests)
+     for test in tests:
         result = run_couchjs(test, fmt)
         if result == 0:
             passed += 1
@@ -169,8 +170,7 @@
             elif os.path.isfile(pname + ".js"):
                 tests.append(pname + ".js")
             else:
-                sys.stderr.write("Unknown test: " + name + os.linesep)
-                exit(1)
+                sys.stderr.write("Waring - Unknown test: " + name + os.linesep)
     return tests
 
 
diff --git a/test/javascript/tests-cluster/with-quorum/attachments.js b/test/javascript/tests-cluster/with-quorum/attachments.js
new file mode 100644
index 0000000..f578f87
--- /dev/null
+++ b/test/javascript/tests-cluster/with-quorum/attachments.js
@@ -0,0 +1,36 @@
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+couchTests.attachments= function(debug) {
+  var db_name = get_random_db_name();
+  var db = new CouchDB(db_name, {"X-Couch-Full-Commit":"false"});
+  db.createDb();
+  if (debug) debugger;
+
+  var doc = db.save({_id:"dummy"});
+  T(doc.ok);
+
+  var xhr = CouchDB.request("PUT", "/" + db_name + "/dummy/foo.txt?rev=" + doc.rev, {
+    body:"This is no base64 encoded text",
+    headers:{"Content-Type": "text/plain;charset=utf-8"}
+  });
+  T(xhr.status == 201,"Should return 201");
+  var rev = JSON.parse(xhr.responseText).rev;
+
+  xhr = CouchDB.request("PUT", "/" + db_name + "/dummy/foo.txt?rev=" + rev, {
+    body:"This is no base64 encoded text-2",
+    headers:{"Content-Type": "text/plain;charset=utf-8"}
+  });
+  T(xhr.status == 201,"Should return 201");
+  
+  db.deleteDb();
+}
diff --git a/test/javascript/tests-cluster/with-quorum/attachments_delete.js b/test/javascript/tests-cluster/with-quorum/attachments_delete.js
new file mode 100644
index 0000000..ed7d2db
--- /dev/null
+++ b/test/javascript/tests-cluster/with-quorum/attachments_delete.js
@@ -0,0 +1,32 @@
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+couchTests.attachments_delete= function(debug) {
+  var db_name = get_random_db_name();
+  var db = new CouchDB(db_name, {"X-Couch-Full-Commit":"false"});
+  db.createDb();
+  if (debug) debugger;
+
+  var doc = db.save({_id:"dummy"});
+  T(doc.ok);
+  var xhr = CouchDB.request("PUT", "/" + db_name + "/dummy/foo.txt?rev=" + doc.rev, {
+    body:"This is no base64 encoded text",
+    headers:{"Content-Type": "text/plain;charset=utf-8"}
+  });
+  T(xhr.status == 201,"Should return 201 Accepted");
+  var rev = JSON.parse(xhr.responseText).rev;
+
+  xhr = CouchDB.request("DELETE", "/" + db_name + "/dummy/foo.txt?rev=" + rev);
+  T(xhr.status == 200,"Should return 200 Ok but returns "+xhr.status);
+
+  db.deleteDb();
+}
diff --git a/test/javascript/tests-cluster/with-quorum/attachments_delete_overridden_quorum.js b/test/javascript/tests-cluster/with-quorum/attachments_delete_overridden_quorum.js
new file mode 100644
index 0000000..1994a0a
--- /dev/null
+++ b/test/javascript/tests-cluster/with-quorum/attachments_delete_overridden_quorum.js
@@ -0,0 +1,36 @@
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+couchTests.attachments_delete_overridden_quorum= function(debug) {
+  var db_name = get_random_db_name();
+  var db = new CouchDB(db_name, {"X-Couch-Full-Commit":"false"},{"w":3});
+  db.createDb();
+  if (debug) debugger;
+
+  var doc = db.save({_id:"dummy"});
+  T(doc.ok);
+  var xhr = CouchDB.request("PUT", "/" + db_name + "/dummy/foo.txt?rev=" + doc.rev, {
+    body:"This is no base64 encoded text",
+    headers:{"Content-Type": "text/plain;charset=utf-8"}
+  });
+  var rev = JSON.parse(xhr.responseText).rev;
+
+  xhr = CouchDB.request("DELETE", "/" + db_name + "/dummy/foo.txt?rev=" + rev);
+  console.log("Skipped-TODO: Clarify correct behaviour. Is not considering overridden quorum. 202->"+xhr.status);
+  // TODO: Define correct behaviour
+  //T(xhr.status == 202,"Should return 202 but returns "+xhr.status);
+
+ //db.deleteDb();
+ // cleanup
+ // TODO DB deletions fails if the quorum is not met.
+ xhr = CouchDB.request("DELETE", "/" + db_name + "/");
+}
diff --git a/test/javascript/tests-cluster/with-quorum/attachments_overridden_quorum.js b/test/javascript/tests-cluster/with-quorum/attachments_overridden_quorum.js
new file mode 100644
index 0000000..22c8a4c
--- /dev/null
+++ b/test/javascript/tests-cluster/with-quorum/attachments_overridden_quorum.js
@@ -0,0 +1,40 @@
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+//Test attachments operations with an overridden quorum parameter
+couchTests.attachments_overriden_quorum= function(debug) {
+  var db_name = get_random_db_name();
+  var db = new CouchDB(db_name, {"X-Couch-Full-Commit":"false"},{"w":3});
+  db.createDb();
+  if (debug) debugger;
+
+  var doc = db.save({_id:"dummy"});
+  T(doc.ok);
+
+  var xhr = CouchDB.request("PUT", "/" + db_name + "/dummy/foo.txt?rev=" + doc.rev, {
+    body:"This is no base64 encoded text",
+    headers:{"Content-Type": "text/plain;charset=utf-8"}
+  });
+  //TODO: Define correct behaviour
+  //T(xhr.status == 202,"Should return 202");
+  var rev = JSON.parse(xhr.responseText).rev;
+
+  xhr = CouchDB.request("PUT", "/" + db_name + "/dummy/foo.txt?rev=" + rev, {
+    body:"This is no base64 encoded text-2",
+    headers:{"Content-Type": "text/plain;charset=utf-8"}
+  });
+  console.log("Skipped-TODO: Clarify correct behaviour. Is not considering overridden quorum. 202->"+xhr.status);
+  //TODO: Define correct behaviour
+  //T(xhr.status == 202,"Should return 202");
+
+  db.deleteDb();
+}
diff --git a/test/javascript/tests-cluster/with-quorum/db-creation.js b/test/javascript/tests-cluster/with-quorum/db_creation.js
similarity index 100%
rename from test/javascript/tests-cluster/with-quorum/db-creation.js
rename to test/javascript/tests-cluster/with-quorum/db_creation.js
diff --git a/test/javascript/tests-cluster/without-quorum/db-creation.js b/test/javascript/tests-cluster/with-quorum/db_creation_overridden_quorum.js
similarity index 60%
copy from test/javascript/tests-cluster/without-quorum/db-creation.js
copy to test/javascript/tests-cluster/with-quorum/db_creation_overridden_quorum.js
index 0d8ff83..14d319c 100644
--- a/test/javascript/tests-cluster/without-quorum/db-creation.js
+++ b/test/javascript/tests-cluster/with-quorum/db_creation_overridden_quorum.js
@@ -10,19 +10,19 @@
 // License for the specific language governing permissions and limitations under
 // the License.
 
-// Do DB creation under cluster without quorum conditions.
-couchTests.db_creation = function(debug) {
+// Do DB creation under cluster with quorum conditions but overriding write quorum.
+couchTests.db_creation_overridden_quorum = function(debug) {
 
   if (debug) debugger;
 
   var db_name = get_random_db_name()
-  var db = new CouchDB(db_name, {"X-Couch-Full-Commit":"false"});
+  var db = new CouchDB(db_name, {"X-Couch-Full-Commit":"false"},{"w":3});
 
-  // DB Creation should return 202- Accepted
+  // DB Creation should return 202 - Accepted
   xhr = CouchDB.request("PUT", "/" + db_name + "/");
-  T(xhr.status == 202);
+  console.log("Skipped-TODO: Clarify correct behaviour. Is not considering overridden quorum. 202->"+xhr.status)
+  //T(xhr.status == 202,"Should return 202");
 
   // cleanup
-  // TODO DB deletions fails if the quorum is not met.
-  xhr = CouchDB.request("DELETE", "/" + db_name + "/");
+  db.deleteDb();
 };
diff --git a/test/javascript/tests-cluster/without-quorum/db-creation.js b/test/javascript/tests-cluster/with-quorum/db_deletion.js
similarity index 70%
copy from test/javascript/tests-cluster/without-quorum/db-creation.js
copy to test/javascript/tests-cluster/with-quorum/db_deletion.js
index 0d8ff83..079fb49 100644
--- a/test/javascript/tests-cluster/without-quorum/db-creation.js
+++ b/test/javascript/tests-cluster/with-quorum/db_deletion.js
@@ -10,19 +10,21 @@
 // License for the specific language governing permissions and limitations under
 // the License.
 
-// Do DB creation under cluster without quorum conditions.
-couchTests.db_creation = function(debug) {
+// Do DB deletion under cluster with quorum conditions.
+couchTests.db_deletion = function(debug) {
 
   if (debug) debugger;
 
   var db_name = get_random_db_name()
   var db = new CouchDB(db_name, {"X-Couch-Full-Commit":"false"});
 
-  // DB Creation should return 202- Accepted
-  xhr = CouchDB.request("PUT", "/" + db_name + "/");
+  db.createDb();
+
+  // DB Deletion should return 202 - Acceted as the custer is not complete
+  xhr = CouchDB.request("DELETE", "/" + db_name + "/");
   T(xhr.status == 202);
 
-  // cleanup
-  // TODO DB deletions fails if the quorum is not met.
-  xhr = CouchDB.request("DELETE", "/" + db_name + "/");
+// DB Deletion should return 404 - Not found
+  xhr = CouchDB.request("DELETE", "/not-existing-db/");
+  T(xhr.status == 404);
 };
diff --git a/test/javascript/tests-cluster/with-quorum/db_deletion_overridden_quorum.js b/test/javascript/tests-cluster/with-quorum/db_deletion_overridden_quorum.js
new file mode 100644
index 0000000..01417eb
--- /dev/null
+++ b/test/javascript/tests-cluster/with-quorum/db_deletion_overridden_quorum.js
@@ -0,0 +1,23 @@
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+// Do DB deletion in a cluster with quorum conditions.
+couchTests.db_deletion_overridden_quorum = function(debug) {
+
+  if (debug) debugger;
+
+  var db_name = get_random_db_name()
+  var db = new CouchDB(db_name, {"X-Couch-Full-Commit":"false"},{"w":3});
+  db.createDb();
+  db.deleteDb();
+  T(db.last_req.status="202","Should return 202");
+};
diff --git a/test/javascript/tests-cluster/without-quorum/db-creation.js b/test/javascript/tests-cluster/with-quorum/doc_bulk.js
similarity index 62%
copy from test/javascript/tests-cluster/without-quorum/db-creation.js
copy to test/javascript/tests-cluster/with-quorum/doc_bulk.js
index 0d8ff83..4bdd3c8 100644
--- a/test/javascript/tests-cluster/without-quorum/db-creation.js
+++ b/test/javascript/tests-cluster/with-quorum/doc_bulk.js
@@ -10,19 +10,16 @@
 // License for the specific language governing permissions and limitations under
 // the License.
 
-// Do DB creation under cluster without quorum conditions.
-couchTests.db_creation = function(debug) {
-
+couchTests.doc_bulk = function(debug) {
+  var db_name = get_random_db_name();
+  var db = new CouchDB(db_name, {"X-Couch-Full-Commit":"false"});
+  db.createDb();
   if (debug) debugger;
 
-  var db_name = get_random_db_name()
-  var db = new CouchDB(db_name, {"X-Couch-Full-Commit":"false"});
+  var docs = makeDocs(5);
+  // Create the docs
+  var results = db.bulkSave(docs);
+  T(db.last_req.status="201","Should return 201")
 
-  // DB Creation should return 202- Accepted
-  xhr = CouchDB.request("PUT", "/" + db_name + "/");
-  T(xhr.status == 202);
-
-  // cleanup
-  // TODO DB deletions fails if the quorum is not met.
-  xhr = CouchDB.request("DELETE", "/" + db_name + "/");
-};
+  db.deleteDb();
+}
diff --git a/test/javascript/tests-cluster/with-quorum/doc_bulk_overridden_quorum.js b/test/javascript/tests-cluster/with-quorum/doc_bulk_overridden_quorum.js
new file mode 100644
index 0000000..0cf9a7e
--- /dev/null
+++ b/test/javascript/tests-cluster/with-quorum/doc_bulk_overridden_quorum.js
@@ -0,0 +1,25 @@
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+couchTests.doc_bulk_overridden_quorum = function(debug) {
+  var db_name = get_random_db_name();
+  var db = new CouchDB(db_name, {"X-Couch-Full-Commit":"false"},{"w":3});
+  db.createDb();
+  if (debug) debugger;
+
+  var docs = makeDocs(5);
+  // Create the docs
+  var results = db.bulkSave(docs);
+  T(db.last_req.status="202","Should return 202")
+
+  db.deleteDb();
+}
diff --git a/test/javascript/tests-cluster/without-quorum/db-creation.js b/test/javascript/tests-cluster/with-quorum/doc_copy.js
similarity index 62%
copy from test/javascript/tests-cluster/without-quorum/db-creation.js
copy to test/javascript/tests-cluster/with-quorum/doc_copy.js
index 0d8ff83..386ca56 100644
--- a/test/javascript/tests-cluster/without-quorum/db-creation.js
+++ b/test/javascript/tests-cluster/with-quorum/doc_copy.js
@@ -10,19 +10,18 @@
 // License for the specific language governing permissions and limitations under
 // the License.
 
-// Do DB creation under cluster without quorum conditions.
-couchTests.db_creation = function(debug) {
-
+couchTests.doc_copy = function(debug) {
+  var db_name = get_random_db_name();
+  var db = new CouchDB(db_name, {"X-Couch-Full-Commit":"false"});
+  db.createDb();
   if (debug) debugger;
 
-  var db_name = get_random_db_name()
-  var db = new CouchDB(db_name, {"X-Couch-Full-Commit":"false"});
+  db.save({_id:"dummy"});
 
-  // DB Creation should return 202- Accepted
-  xhr = CouchDB.request("PUT", "/" + db_name + "/");
-  T(xhr.status == 202);
+  var xhr = CouchDB.request("COPY", "/" + db_name + "/dummy", {
+    headers: {"Destination":"dummy2"}
+  });
+  T(xhr.status=="201","Should return 201 ");
 
-  // cleanup
-  // TODO DB deletions fails if the quorum is not met.
-  xhr = CouchDB.request("DELETE", "/" + db_name + "/");
-};
+  db.deleteDb();
+}
diff --git a/test/javascript/tests-cluster/with-quorum/doc_copy_overridden_quorum.js b/test/javascript/tests-cluster/with-quorum/doc_copy_overridden_quorum.js
new file mode 100644
index 0000000..23fbc97
--- /dev/null
+++ b/test/javascript/tests-cluster/with-quorum/doc_copy_overridden_quorum.js
@@ -0,0 +1,30 @@
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+couchTests.doc_copy_overriden_quorum = function(debug) {
+  var db_name = get_random_db_name();
+  var db = new CouchDB(db_name, {"X-Couch-Full-Commit":"false"},{"w":3});
+  db.createDb();
+  if (debug) debugger;
+
+  db.save({_id:"dummy"});
+
+  var xhr = CouchDB.request("COPY", "/" + db_name + "/dummy", {
+    headers: {"Destination":"dummy2"}
+  });
+  //TODO: Define correct behaviour
+  //T(xhr.status=="202","Should return 202");
+  console.log("Skipped-TODO: Clarify correct behaviour. Is not considering overridden quorum. 202->"+xhr.status);
+
+  db.deleteDb();
+
+}
diff --git a/test/javascript/tests-cluster/without-quorum/db-creation.js b/test/javascript/tests-cluster/with-quorum/doc_crud.js
similarity index 62%
copy from test/javascript/tests-cluster/without-quorum/db-creation.js
copy to test/javascript/tests-cluster/with-quorum/doc_crud.js
index 0d8ff83..f016cef 100644
--- a/test/javascript/tests-cluster/without-quorum/db-creation.js
+++ b/test/javascript/tests-cluster/with-quorum/doc_crud.js
@@ -10,19 +10,22 @@
 // License for the specific language governing permissions and limitations under
 // the License.
 
-// Do DB creation under cluster without quorum conditions.
-couchTests.db_creation = function(debug) {
-
+couchTests.doc_crud = function(debug) {
+  var db_name = get_random_db_name();
+  var db = new CouchDB(db_name, {"X-Couch-Full-Commit":"false"});
+  db.createDb();
   if (debug) debugger;
 
-  var db_name = get_random_db_name()
-  var db = new CouchDB(db_name, {"X-Couch-Full-Commit":"false"});
+  db.save({_id:"0",a:1});
+  T(db.last_req.status=="201");
 
-  // DB Creation should return 202- Accepted
-  xhr = CouchDB.request("PUT", "/" + db_name + "/");
-  T(xhr.status == 202);
+  var doc = db.open("0");
+  db.save(doc);
+  T(db.last_req.status=="201");
 
-  // cleanup
-  // TODO DB deletions fails if the quorum is not met.
-  xhr = CouchDB.request("DELETE", "/" + db_name + "/");
-};
+  doc = db.open("0");
+  db.deleteDoc(doc);
+  T(db.last_req.status="200");
+  db.deleteDb();
+
+}
diff --git a/test/javascript/tests-cluster/with-quorum/doc_crud_overridden_quorum.js b/test/javascript/tests-cluster/with-quorum/doc_crud_overridden_quorum.js
new file mode 100644
index 0000000..41502ca
--- /dev/null
+++ b/test/javascript/tests-cluster/with-quorum/doc_crud_overridden_quorum.js
@@ -0,0 +1,31 @@
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+couchTests.doc_crud_overridden_quorum = function(debug) {
+  var db_name = get_random_db_name();
+  var db = new CouchDB(db_name, {"X-Couch-Full-Commit":"false"},{"w":3});
+  db.createDb();
+  if (debug) debugger;
+
+  db.save({_id:"0",a:1});
+  T(db.last_req.status=="202","Should return 202 status");
+
+  var doc = db.open("0");
+  db.save(doc);
+  T(db.last_req.status=="202","Should return 202 status");
+
+  doc = db.open("0");
+  db.deleteDoc(doc);
+  T(db.last_req.status="202","Should return 202 status");
+
+  db.deleteDb();
+}
diff --git a/test/javascript/tests-cluster/without-quorum/attachments.js b/test/javascript/tests-cluster/without-quorum/attachments.js
new file mode 100644
index 0000000..5756343
--- /dev/null
+++ b/test/javascript/tests-cluster/without-quorum/attachments.js
@@ -0,0 +1,39 @@
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+couchTests.attachments= function(debug) {
+  var db_name = get_random_db_name();
+  var db = new CouchDB(db_name, {"X-Couch-Full-Commit":"false"});
+  db.createDb();
+  if (debug) debugger;
+
+  var doc = db.save({_id:"dummy"});
+  T(doc.ok);
+  var xhr = CouchDB.request("PUT", "/" + db_name + "/dummy/foo.txt?rev=" + doc.rev, {
+    body:"This is no base64 encoded text",
+    headers:{"Content-Type": "text/plain;charset=utf-8"}
+  });
+  T(xhr.status == 202,"Should return 202 Accepted");
+  var rev = JSON.parse(xhr.responseText).rev;
+
+  xhr = CouchDB.request("PUT", "/" + db_name + "/dummy/foo.txt?rev=" + rev, {
+    body:"This is no base64 encoded text-2",
+    headers:{"Content-Type": "text/plain;charset=utf-8"}
+  });
+  T(xhr.status == 202,"Should return 202 Accepted");
+  rev = JSON.parse(xhr.responseText).rev;
+
+ //db.deleteDb();
+ // cleanup
+ // TODO DB deletions fails if the quorum is not met.
+ xhr = CouchDB.request("DELETE", "/" + db_name + "/");
+}
diff --git a/test/javascript/tests-cluster/without-quorum/attachments_delete.js b/test/javascript/tests-cluster/without-quorum/attachments_delete.js
new file mode 100644
index 0000000..d05fcaf
--- /dev/null
+++ b/test/javascript/tests-cluster/without-quorum/attachments_delete.js
@@ -0,0 +1,37 @@
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+couchTests.attachments_delete= function(debug) {
+  var db_name = get_random_db_name();
+  var db = new CouchDB(db_name, {"X-Couch-Full-Commit":"false"});
+  db.createDb();
+  if (debug) debugger;
+
+  var doc = db.save({_id:"dummy"});
+  T(doc.ok);
+  var xhr = CouchDB.request("PUT", "/" + db_name + "/dummy/foo.txt?rev=" + doc.rev, {
+    body:"This is no base64 encoded text",
+    headers:{"Content-Type": "text/plain;charset=utf-8"}
+  });
+  T(xhr.status == 202,"Should return 202 Accepted");
+  var rev = JSON.parse(xhr.responseText).rev;
+
+  xhr = CouchDB.request("DELETE", "/" + db_name + "/dummy/foo.txt?rev=" + rev);
+  console.log("Skipped-TODO: Clarify correct behaviour. Is not considering quorum. 202->"+xhr.status);
+  //TODO: Define correct behaviour
+  //T(xhr.status == 202,"Should return 202 Accepted but returns "+xhr.status);
+
+ //db.deleteDb();
+ // cleanup
+ // TODO DB deletions fails if the quorum is not met.
+ xhr = CouchDB.request("DELETE", "/" + db_name + "/");
+}
diff --git a/test/javascript/tests-cluster/without-quorum/attachments_delete_overridden_quorum.js b/test/javascript/tests-cluster/without-quorum/attachments_delete_overridden_quorum.js
new file mode 100644
index 0000000..906391a
--- /dev/null
+++ b/test/javascript/tests-cluster/without-quorum/attachments_delete_overridden_quorum.js
@@ -0,0 +1,36 @@
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+couchTests.attachments_delete_overridden_quorum= function(debug) {
+  var db_name = get_random_db_name();
+  var db = new CouchDB(db_name, {"X-Couch-Full-Commit":"false"},{"w":1});
+  db.createDb();
+  if (debug) debugger;
+
+  var doc = db.save({_id:"dummy"});
+  T(doc.ok);
+  var xhr = CouchDB.request("PUT", "/" + db_name + "/dummy/foo.txt?rev=" + doc.rev, {
+    body:"This is no base64 encoded text",
+    headers:{"Content-Type": "text/plain;charset=utf-8"}
+  });
+  var rev = JSON.parse(xhr.responseText).rev;
+
+  xhr = CouchDB.request("DELETE", "/" + db_name + "/dummy/foo.txt?rev=" + rev);
+  console.log("Skipped-TODO: Clarify correct behaviour. Is not considering quorum. 202->"+xhr.status);
+  //TODO: Define correct behaviour
+  //T(xhr.status == 200,"Should return 200 but returns "+xhr.status);
+
+ //db.deleteDb();
+ // cleanup
+ // TODO DB deletions fails if the quorum is not met.
+ xhr = CouchDB.request("DELETE", "/" + db_name + "/");
+}
diff --git a/test/javascript/tests-cluster/without-quorum/attachments_overridden_quorum.js b/test/javascript/tests-cluster/without-quorum/attachments_overridden_quorum.js
new file mode 100644
index 0000000..434578f
--- /dev/null
+++ b/test/javascript/tests-cluster/without-quorum/attachments_overridden_quorum.js
@@ -0,0 +1,42 @@
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+//Test attachments operations with an overridden quorum parameter
+couchTests.attachments_overriden_quorum= function(debug) {
+  var db_name = get_random_db_name();
+  var db = new CouchDB(db_name, {"X-Couch-Full-Commit":"false"},{"w":1});
+  db.createDb();
+  if (debug) debugger;
+
+  var doc = db.save({_id:"dummy"});
+  T(doc.ok);
+
+  var xhr = CouchDB.request("PUT", "/" + db_name + "/dummy/foo.txt?rev=" + doc.rev, {
+    body:"This is no base64 encoded text",
+    headers:{"Content-Type": "text/plain;charset=utf-8"}
+  });
+  //TODO: Define correct behaviour
+  //T(xhr.status == 201,"Should return 201");
+  var rev = JSON.parse(xhr.responseText).rev;
+
+  xhr = CouchDB.request("PUT", "/" + db_name + "/dummy/foo.txt?rev=" + rev, {
+    body:"This is no base64 encoded text-2",
+    headers:{"Content-Type": "text/plain;charset=utf-8"}
+  });
+  //TODO: Define correct behaviour
+  //T(xhr.status == 201,"Should return 201");
+
+  //db.deleteDb();
+  // cleanup
+  // TODO DB deletions fails if the quorum is not met.
+  xhr = CouchDB.request("DELETE", "/" + db_name + "/");
+}
diff --git a/test/javascript/tests-cluster/without-quorum/db-creation.js b/test/javascript/tests-cluster/without-quorum/db_creation.js
similarity index 89%
rename from test/javascript/tests-cluster/without-quorum/db-creation.js
rename to test/javascript/tests-cluster/without-quorum/db_creation.js
index 0d8ff83..a21d377 100644
--- a/test/javascript/tests-cluster/without-quorum/db-creation.js
+++ b/test/javascript/tests-cluster/without-quorum/db_creation.js
@@ -23,6 +23,5 @@
   T(xhr.status == 202);
 
   // cleanup
-  // TODO DB deletions fails if the quorum is not met.
-  xhr = CouchDB.request("DELETE", "/" + db_name + "/");
+  db.deleteDb();
 };
diff --git a/test/javascript/tests-cluster/without-quorum/db-creation.js b/test/javascript/tests-cluster/without-quorum/db_creation_overridden_quorum.js
similarity index 64%
copy from test/javascript/tests-cluster/without-quorum/db-creation.js
copy to test/javascript/tests-cluster/without-quorum/db_creation_overridden_quorum.js
index 0d8ff83..6d5d798 100644
--- a/test/javascript/tests-cluster/without-quorum/db-creation.js
+++ b/test/javascript/tests-cluster/without-quorum/db_creation_overridden_quorum.js
@@ -10,18 +10,20 @@
 // License for the specific language governing permissions and limitations under
 // the License.
 
-// Do DB creation under cluster without quorum conditions.
-couchTests.db_creation = function(debug) {
+// Do DB creation under cluster with quorum conditions but overriding write quorum.
+couchTests.db_creation_overridden_quorum = function(debug) {
 
   if (debug) debugger;
 
   var db_name = get_random_db_name()
-  var db = new CouchDB(db_name, {"X-Couch-Full-Commit":"false"});
+  var db = new CouchDB(db_name, {"X-Couch-Full-Commit":"false"},{"w":1});
 
-  // DB Creation should return 202- Accepted
+  // DB Creation should return 201 - Created
   xhr = CouchDB.request("PUT", "/" + db_name + "/");
-  T(xhr.status == 202);
+  console.log("Skipped-TODO: Clarify correct behaviour. Is not considering overridden quorum. 201->"+xhr.status)
+  //T(xhr.status == 201,"Should return 201");
 
+  //db.deleteDb();
   // cleanup
   // TODO DB deletions fails if the quorum is not met.
   xhr = CouchDB.request("DELETE", "/" + db_name + "/");
diff --git a/test/javascript/tests-cluster/without-quorum/db-creation.js b/test/javascript/tests-cluster/without-quorum/db_deletion.js
similarity index 72%
copy from test/javascript/tests-cluster/without-quorum/db-creation.js
copy to test/javascript/tests-cluster/without-quorum/db_deletion.js
index 0d8ff83..006345e 100644
--- a/test/javascript/tests-cluster/without-quorum/db-creation.js
+++ b/test/javascript/tests-cluster/without-quorum/db_deletion.js
@@ -10,19 +10,21 @@
 // License for the specific language governing permissions and limitations under
 // the License.
 
-// Do DB creation under cluster without quorum conditions.
-couchTests.db_creation = function(debug) {
+// Do DB creation under cluster with quorum conditions.
+couchTests.db_deletion = function(debug) {
 
   if (debug) debugger;
 
   var db_name = get_random_db_name()
   var db = new CouchDB(db_name, {"X-Couch-Full-Commit":"false"});
 
-  // DB Creation should return 202- Accepted
-  xhr = CouchDB.request("PUT", "/" + db_name + "/");
-  T(xhr.status == 202);
-
-  // cleanup
-  // TODO DB deletions fails if the quorum is not met.
+  db.createDb();
+  
+  // DB Deletion should return 202 - Acepted
   xhr = CouchDB.request("DELETE", "/" + db_name + "/");
+  T(xhr.status == 202);
+  
+  // DB Deletion should return 404 - Not found
+  xhr = CouchDB.request("DELETE", "/not-existing-db/");
+  T(xhr.status == 404);
 };
diff --git a/test/javascript/tests-cluster/without-quorum/db-creation.js b/test/javascript/tests-cluster/without-quorum/db_deletion_overridden_quorum.js
similarity index 65%
copy from test/javascript/tests-cluster/without-quorum/db-creation.js
copy to test/javascript/tests-cluster/without-quorum/db_deletion_overridden_quorum.js
index 0d8ff83..11b344c 100644
--- a/test/javascript/tests-cluster/without-quorum/db-creation.js
+++ b/test/javascript/tests-cluster/without-quorum/db_deletion_overridden_quorum.js
@@ -10,19 +10,16 @@
 // License for the specific language governing permissions and limitations under
 // the License.
 
-// Do DB creation under cluster without quorum conditions.
-couchTests.db_creation = function(debug) {
+// Do DB deletion in a cluster with quorum conditions.
+couchTests.db_deletion_overridden_quorum = function(debug) {
 
   if (debug) debugger;
 
   var db_name = get_random_db_name()
-  var db = new CouchDB(db_name, {"X-Couch-Full-Commit":"false"});
+  var db = new CouchDB(db_name, {"X-Couch-Full-Commit":"false"},{"w":1});
+  db.createDb();
 
-  // DB Creation should return 202- Accepted
-  xhr = CouchDB.request("PUT", "/" + db_name + "/");
-  T(xhr.status == 202);
-
-  // cleanup
-  // TODO DB deletions fails if the quorum is not met.
+  // DB deletions does not consider overriden quorum param.
   xhr = CouchDB.request("DELETE", "/" + db_name + "/");
+  T(db.last_req.status="202","Should return 202");
 };
diff --git a/test/javascript/tests-cluster/without-quorum/db-creation.js b/test/javascript/tests-cluster/without-quorum/doc_bulk.js
similarity index 74%
copy from test/javascript/tests-cluster/without-quorum/db-creation.js
copy to test/javascript/tests-cluster/without-quorum/doc_bulk.js
index 0d8ff83..91578d8 100644
--- a/test/javascript/tests-cluster/without-quorum/db-creation.js
+++ b/test/javascript/tests-cluster/without-quorum/doc_bulk.js
@@ -10,19 +10,19 @@
 // License for the specific language governing permissions and limitations under
 // the License.
 
-// Do DB creation under cluster without quorum conditions.
-couchTests.db_creation = function(debug) {
-
+couchTests.doc_bulk = function(debug) {
+  var db_name = get_random_db_name();
+  var db = new CouchDB(db_name, {"X-Couch-Full-Commit":"false"});
+  db.createDb();
   if (debug) debugger;
 
-  var db_name = get_random_db_name()
-  var db = new CouchDB(db_name, {"X-Couch-Full-Commit":"false"});
+  var docs = makeDocs(5);
+  // Create the docs
+  var results = db.bulkSave(docs);
+  T(db.last_req.status="202","Should return 202")
 
-  // DB Creation should return 202- Accepted
-  xhr = CouchDB.request("PUT", "/" + db_name + "/");
-  T(xhr.status == 202);
-
+  //db.deleteDb();
   // cleanup
   // TODO DB deletions fails if the quorum is not met.
   xhr = CouchDB.request("DELETE", "/" + db_name + "/");
-};
+}
diff --git a/test/javascript/tests-cluster/without-quorum/db-creation.js b/test/javascript/tests-cluster/without-quorum/doc_bulk_overridden_quorum.js
similarity index 67%
copy from test/javascript/tests-cluster/without-quorum/db-creation.js
copy to test/javascript/tests-cluster/without-quorum/doc_bulk_overridden_quorum.js
index 0d8ff83..56fb11e 100644
--- a/test/javascript/tests-cluster/without-quorum/db-creation.js
+++ b/test/javascript/tests-cluster/without-quorum/doc_bulk_overridden_quorum.js
@@ -10,19 +10,19 @@
 // License for the specific language governing permissions and limitations under
 // the License.
 
-// Do DB creation under cluster without quorum conditions.
-couchTests.db_creation = function(debug) {
-
+couchTests.doc_bulk_overridden_quorum = function(debug) {
+  var db_name = get_random_db_name();
+  var db = new CouchDB(db_name, {"X-Couch-Full-Commit":"false"},{"w":1});
+  db.createDb();
   if (debug) debugger;
 
-  var db_name = get_random_db_name()
-  var db = new CouchDB(db_name, {"X-Couch-Full-Commit":"false"});
+  var docs = makeDocs(5);
+  // Create the docs
+  var results = db.bulkSave(docs);
+  T(db.last_req.status="201","Should return 201")
 
-  // DB Creation should return 202- Accepted
-  xhr = CouchDB.request("PUT", "/" + db_name + "/");
-  T(xhr.status == 202);
-
+  //db.deleteDb();
   // cleanup
   // TODO DB deletions fails if the quorum is not met.
   xhr = CouchDB.request("DELETE", "/" + db_name + "/");
-};
+}
diff --git a/test/javascript/tests-cluster/without-quorum/db-creation.js b/test/javascript/tests-cluster/without-quorum/doc_copy.js
similarity index 72%
copy from test/javascript/tests-cluster/without-quorum/db-creation.js
copy to test/javascript/tests-cluster/without-quorum/doc_copy.js
index 0d8ff83..7d7c35f 100644
--- a/test/javascript/tests-cluster/without-quorum/db-creation.js
+++ b/test/javascript/tests-cluster/without-quorum/doc_copy.js
@@ -10,19 +10,21 @@
 // License for the specific language governing permissions and limitations under
 // the License.
 
-// Do DB creation under cluster without quorum conditions.
-couchTests.db_creation = function(debug) {
-
+couchTests.doc_copy = function(debug) {
+  var db_name = get_random_db_name();
+  var db = new CouchDB(db_name, {"X-Couch-Full-Commit":"false"});
+  db.createDb();
   if (debug) debugger;
 
-  var db_name = get_random_db_name()
-  var db = new CouchDB(db_name, {"X-Couch-Full-Commit":"false"});
+  db.save({_id:"dummy"});
 
-  // DB Creation should return 202- Accepted
-  xhr = CouchDB.request("PUT", "/" + db_name + "/");
-  T(xhr.status == 202);
+  var xhr = CouchDB.request("COPY", "/" + db_name + "/dummy", {
+    headers: {"Destination":"dummy2"}
+  });
+  T(xhr.status=="202","Should return 202 ");
 
+  //db.deleteDb();
   // cleanup
   // TODO DB deletions fails if the quorum is not met.
   xhr = CouchDB.request("DELETE", "/" + db_name + "/");
-};
+}
diff --git a/test/javascript/tests-cluster/without-quorum/doc_copy_overridden_quorum.js b/test/javascript/tests-cluster/without-quorum/doc_copy_overridden_quorum.js
new file mode 100644
index 0000000..e72425d
--- /dev/null
+++ b/test/javascript/tests-cluster/without-quorum/doc_copy_overridden_quorum.js
@@ -0,0 +1,33 @@
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+couchTests.doc_copy_overriden_quorum = function(debug) {
+  var db_name = get_random_db_name();
+  var db = new CouchDB(db_name, {"X-Couch-Full-Commit":"false"},{"w":1});
+  db.createDb();
+  if (debug) debugger;
+
+  db.save({_id:"dummy"});
+
+  var xhr = CouchDB.request("COPY", "/" + db_name + "/dummy", {
+    headers: {"Destination":"dummy2"}
+  });
+  console.log("Skipped-TODO: Clarify correct behaviour. Is not considering overridden quorum. 201->"+xhr.status);
+  //TODO Defie correct behaviour
+  //T(xhr.status=="201","Should return 201");
+
+  //db.deleteDb();
+  // cleanup
+  // TODO DB deletions fails if the quorum is not met.
+  xhr = CouchDB.request("DELETE", "/" + db_name + "/");
+
+}
diff --git a/test/javascript/tests-cluster/without-quorum/db-creation.js b/test/javascript/tests-cluster/without-quorum/doc_crud.js
similarity index 65%
copy from test/javascript/tests-cluster/without-quorum/db-creation.js
copy to test/javascript/tests-cluster/without-quorum/doc_crud.js
index 0d8ff83..aa70697 100644
--- a/test/javascript/tests-cluster/without-quorum/db-creation.js
+++ b/test/javascript/tests-cluster/without-quorum/doc_crud.js
@@ -10,19 +10,26 @@
 // License for the specific language governing permissions and limitations under
 // the License.
 
-// Do DB creation under cluster without quorum conditions.
-couchTests.db_creation = function(debug) {
-
+couchTests.doc_crud = function(debug) {
+  var db_name = get_random_db_name();
+  var db = new CouchDB(db_name, {"X-Couch-Full-Commit":"false"});
+  db.createDb();
   if (debug) debugger;
 
-  var db_name = get_random_db_name()
-  var db = new CouchDB(db_name, {"X-Couch-Full-Commit":"false"});
+  db.save({_id:"0",a:1});
+  T(db.last_req.status=="202","Should return 202 status");
 
-  // DB Creation should return 202- Accepted
-  xhr = CouchDB.request("PUT", "/" + db_name + "/");
-  T(xhr.status == 202);
+  var doc = db.open("0");
+  db.save(doc);
+  T(db.last_req.status=="202","Should return 202 status");
 
+  doc = db.open("0");
+  db.deleteDoc(doc);
+  T(db.last_req.status="202","Should return 202 status");
+
+  //db.deleteDb();
   // cleanup
   // TODO DB deletions fails if the quorum is not met.
   xhr = CouchDB.request("DELETE", "/" + db_name + "/");
-};
+
+}
diff --git a/test/javascript/tests-cluster/without-quorum/doc_crud_overridden_quorum.js b/test/javascript/tests-cluster/without-quorum/doc_crud_overridden_quorum.js
new file mode 100644
index 0000000..44ab86e
--- /dev/null
+++ b/test/javascript/tests-cluster/without-quorum/doc_crud_overridden_quorum.js
@@ -0,0 +1,34 @@
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+couchTests.doc_crud_overridden_quorum = function(debug) {
+  var db_name = get_random_db_name();
+  var db = new CouchDB(db_name, {"X-Couch-Full-Commit":"false"},{"w":1});
+  db.createDb();
+  if (debug) debugger;
+
+  db.save({_id:"0",a:1});
+  T(db.last_req.status=="201","Should return 201 status");
+
+  var doc = db.open("0");
+  db.save(doc);
+  T(db.last_req.status=="201","Should return 201 status");
+
+  doc = db.open("0");
+  db.deleteDoc(doc);
+  T(db.last_req.status="200","Should return 200 status");
+
+  //db.deleteDb();
+  // cleanup
+  // TODO DB deletions fails if the quorum is not met.
+  xhr = CouchDB.request("DELETE", "/" + db_name + "/");
+}
diff --git a/test/javascript/tests/reduce_builtin.js b/test/javascript/tests/reduce_builtin.js
index 9c455e4..4686841 100644
--- a/test/javascript/tests/reduce_builtin.js
+++ b/test/javascript/tests/reduce_builtin.js
@@ -37,6 +37,12 @@
       emit(doc.integer, doc.integer);
   };
 
+  var check_approx_distinct = function(expected, estimated) {
+    // see https://en.wikipedia.org/wiki/HyperLogLog
+    var err =  1.04 / Math.sqrt(Math.pow(2, 11 - 1));
+    return Math.abs(expected - estimated) < expected * err;
+  };
+
   var result = db.query(map, "_sum");
   T(result.rows[0].value == 2*summate(numDocs));
   result = db.query(map, "_count");
@@ -47,27 +53,41 @@
   T(result.rows[0].value.min == 1);
   T(result.rows[0].value.max == 500);
   T(result.rows[0].value.sumsqr == 2*sumsqr(numDocs));
+  result = db.query(map, "_approx_count_distinct");
+  T(check_approx_distinct(numDocs, result.rows[0].value));
 
   result = db.query(map, "_sum", {startkey: 4, endkey: 4});
   T(result.rows[0].value == 8);
   result = db.query(map, "_count", {startkey: 4, endkey: 4});
   T(result.rows[0].value == 2);
+  result = db.query(map, "_approx_count_distinct", {startkey:4, endkey:4});
+  T(check_approx_distinct(1, result.rows[0].value));
 
   result = db.query(map, "_sum", {startkey: 4, endkey: 5});
   T(result.rows[0].value == 18);
   result = db.query(map, "_count", {startkey: 4, endkey: 5});
   T(result.rows[0].value == 4);
+  result = db.query(map, "_approx_count_distinct", {startkey:4, endkey:5});
+  T(check_approx_distinct(2, result.rows[0].value));
+
 
   result = db.query(map, "_sum", {startkey: 4, endkey: 6});
   T(result.rows[0].value == 30);
   result = db.query(map, "_count", {startkey: 4, endkey: 6});
   T(result.rows[0].value == 6);
+  result = db.query(map, "_approx_count_distinct", {startkey: 4, endkey: 6});
+  T(check_approx_distinct(3, result.rows[0].value));
 
   result = db.query(map, "_sum", {group:true, limit:3});
   T(result.rows[0].value == 2);
   T(result.rows[1].value == 4);
   T(result.rows[2].value == 6);
 
+  result = db.query(map, "_approx_count_distinct", {group:true, limit:3});
+  T(check_approx_distinct(1, result.rows[0].value));
+  T(check_approx_distinct(1, result.rows[1].value));
+  T(check_approx_distinct(1, result.rows[2].value));
+
   for(var i=1; i<numDocs/2; i+=30) {
     result = db.query(map, "_sum", {startkey: i, endkey: numDocs - i});
     T(result.rows[0].value == 2*(summate(numDocs-i) - summate(i-1)));
diff --git a/test/javascript/tests/users_db.js b/test/javascript/tests/users_db.js
index 34a7bad..20be325 100644
--- a/test/javascript/tests/users_db.js
+++ b/test/javascript/tests/users_db.js
@@ -205,6 +205,13 @@
       } finally {
         CouchDB.login("jan", "apple");
         usersDb.deleteDb(); // cleanup
+        waitForSuccess(function() {
+            var req = CouchDB.request("GET", usersDb.name);
+            if (req.status == 404) {
+              return true
+            }
+            throw({});
+        }, "usersdb.deleteDb")
         usersDb.createDb();
         usersDbAlt.deleteDb(); // cleanup
       }