(#228) - refactor idb
diff --git a/lib/adapters/idb/idb-bulk-docs.js b/lib/adapters/idb/idb-bulk-docs.js
new file mode 100644
index 0000000..92633f8
--- /dev/null
+++ b/lib/adapters/idb/idb-bulk-docs.js
@@ -0,0 +1,380 @@
+'use strict';
+
+var utils = require('../../utils');
+var idbUtils = require('./idb-utils');
+var idbConstants = require('./idb-constants');
+
+var ATTACH_AND_SEQ_STORE = idbConstants.ATTACH_AND_SEQ_STORE;
+var ATTACH_STORE = idbConstants.ATTACH_STORE;
+var BY_SEQ_STORE = idbConstants.BY_SEQ_STORE;
+var DOC_STORE = idbConstants.DOC_STORE;
+var LOCAL_STORE = idbConstants.LOCAL_STORE;
+var META_STORE = idbConstants.META_STORE;
+
+var compactRevs = idbUtils.compactRevs;
+var decodeMetadata = idbUtils.decodeMetadata;
+var encodeMetadata = idbUtils.encodeMetadata;
+var idbError = idbUtils.idbError;
+
+function IdbBulkDocs(req, opts, api, idb, Changes, callback) {
+  this.req = req;
+  this.opts = opts;
+  this.api = api;
+  this.idb = idb;
+  this.Changes = Changes;
+  this.callback = callback;
+  this.init();
+}
+
+IdbBulkDocs.prototype.init = function () {
+  var self = this;
+
+  self.docInfos = self.req.docs;
+
+  var docInfoError;
+
+  for (var i = 0, len = self.docInfos.length; i < len; i++) {
+    var doc = self.docInfos[i];
+    if (doc._id && utils.isLocalId(doc._id)) {
+      continue;
+    }
+    doc = self.docInfos[i] = utils.parseDoc(doc, self.opts.new_edits);
+    if (doc.error && !docInfoError) {
+      docInfoError = doc;
+    }
+  }
+
+  if (docInfoError) {
+    return self.callback(docInfoError);
+  }
+
+  self.results = new Array(self.docInfos.length);
+  self.fetchedDocs = new utils.Map();
+  self.preconditionErrored = false;
+  self.blobType = self.api._blobSupport ? 'blob' : 'base64';
+
+  utils.preprocessAttachments(self.docInfos, self.blobType, function (err) {
+    if (err) {
+      return self.callback(err);
+    }
+    self.startTransaction();
+  });
+};
+
+IdbBulkDocs.prototype.startTransaction = function () {
+  var self = this;
+
+  var stores = [
+    DOC_STORE, BY_SEQ_STORE,
+    ATTACH_STORE, META_STORE,
+    LOCAL_STORE, ATTACH_AND_SEQ_STORE
+  ];
+  var txn = self.idb.transaction(stores, 'readwrite');
+  txn.onerror = idbError(self.callback);
+  txn.ontimeout = idbError(self.callback);
+  txn.oncomplete = self.complete.bind(self);
+  self.txn = txn;
+  self.attachStore = txn.objectStore(ATTACH_STORE);
+  self.docStore = txn.objectStore(DOC_STORE);
+  self.bySeqStore = txn.objectStore(BY_SEQ_STORE);
+  self.attachStore = txn.objectStore(ATTACH_STORE);
+  self.attachAndSeqStore = txn.objectStore(ATTACH_AND_SEQ_STORE);
+
+  self.verifyAttachments(function (err) {
+    if (err) {
+      self.preconditionErrored = true;
+      return self.callback(err);
+    }
+    self.fetchExistingDocs();
+  });
+};
+
+IdbBulkDocs.prototype.processDocs = function () {
+  var self = this;
+
+  utils.processDocs(self.docInfos, self.api, self.fetchedDocs,
+    self.txn, self.results, self.writeDoc.bind(self), self.opts);
+};
+
+IdbBulkDocs.prototype.fetchExistingDocs = function () {
+  var self = this;
+
+  if (!self.docInfos.length) {
+    return;
+  }
+
+  var numFetched = 0;
+
+  function checkDone() {
+    if (++numFetched === self.docInfos.length) {
+      self.processDocs();
+    }
+  }
+
+  function readMetadata(event) {
+    var metadata = decodeMetadata(event.target.result);
+
+    if (metadata) {
+      self.fetchedDocs.set(metadata.id, metadata);
+    }
+    checkDone();
+  }
+
+  for (var i = 0, len = self.docInfos.length; i < len; i++) {
+    var docInfo = self.docInfos[i];
+    if (docInfo._id && utils.isLocalId(docInfo._id)) {
+      checkDone(); // skip local docs
+      continue;
+    }
+    var req = self.docStore.get(docInfo.metadata.id);
+    req.onsuccess = readMetadata;
+  }
+};
+
+IdbBulkDocs.prototype.complete = function () {
+  var self = this;
+
+  if (self.preconditionErrored) {
+    return;
+  }
+
+  for (var i = 0, len = self.results.length; i < len; i++) {
+    var result = self.results[i];
+    if (!result.error) {
+      // TODO: this should be set in utils.processDocs
+      // for local docs
+      result.ok = true;
+    }
+  }
+
+  self.Changes.notify(self.api._name);
+  self.api._docCount = -1; // invalidate
+  self.callback(null, self.results);
+};
+
+IdbBulkDocs.prototype.verifyAttachment = function (digest, callback) {
+  var self = this;
+
+  var req = self.attachStore.get(digest);
+  req.onsuccess = function (e) {
+    if (!e.target.result) {
+      var err = new Error('unknown stub attachment with digest ' + digest);
+      err.status = 412;
+      callback(err);
+    } else {
+      callback();
+    }
+  };
+};
+
+IdbBulkDocs.prototype.verifyAttachments = function (finish) {
+  var self = this;
+
+  var digests = [];
+  self.docInfos.forEach(function (docInfo) {
+    if (docInfo.data && docInfo.data._attachments) {
+      Object.keys(docInfo.data._attachments).forEach(function (filename) {
+        var att = docInfo.data._attachments[filename];
+        if (att.stub) {
+          digests.push(att.digest);
+        }
+      });
+    }
+  });
+  if (!digests.length) {
+    return finish();
+  }
+  var numDone = 0;
+  var err;
+
+  function checkDone() {
+    if (++numDone === digests.length) {
+      finish(err);
+    }
+  }
+  digests.forEach(function (digest) {
+    self.verifyAttachment(digest, function (attErr) {
+      if (attErr && !err) {
+        err = attErr;
+      }
+      checkDone();
+    });
+  });
+};
+
+IdbBulkDocs.prototype.writeDoc = function (docInfo, winningRev, deleted,
+                                           callback, isUpdate, resultsIdx) {
+  var self = this;
+
+  var doc = docInfo.data;
+  doc._id = docInfo.metadata.id;
+  doc._rev = docInfo.metadata.rev;
+
+  if (deleted) {
+    doc._deleted = true;
+  }
+
+  var hasAttachments = doc._attachments && Object.keys(doc._attachments).length;
+  if (hasAttachments) {
+    return self.writeAttachments(docInfo, winningRev, deleted,
+      callback, isUpdate, resultsIdx);
+  }
+
+  self.finishDoc(docInfo, winningRev, deleted,
+    callback, isUpdate, resultsIdx);
+};
+
+IdbBulkDocs.prototype.autoCompact = function (docInfo) {
+  var self = this;
+
+  var revsToDelete = utils.compactTree(docInfo.metadata);
+  compactRevs(revsToDelete, docInfo.metadata.id, self.txn);
+};
+
+IdbBulkDocs.prototype.finishDoc = function (docInfo, winningRev, deleted,
+                                            callback, isUpdate, resultsIdx) {
+  var self = this;
+
+  var doc = docInfo.data;
+  var metadata = docInfo.metadata;
+
+  doc._doc_id_rev = metadata.id + '::' + metadata.rev;
+  delete doc._id;
+  delete doc._rev;
+
+  function afterPutDoc(e) {
+    if (isUpdate && self.api.auto_compaction) {
+      self.autoCompact(docInfo);
+    }
+    metadata.seq = e.target.result;
+    // Current _rev is calculated from _rev_tree on read
+    delete metadata.rev;
+    var metadataToStore = encodeMetadata(metadata, winningRev, deleted);
+    var metaDataReq = self.docStore.put(metadataToStore);
+    metaDataReq.onsuccess = afterPutMetadata;
+  }
+
+  function afterPutDocError(e) {
+    // ConstraintError, need to update, not put (see #1638 for details)
+    e.preventDefault(); // avoid transaction abort
+    e.stopPropagation(); // avoid transaction onerror
+    var index = self.bySeqStore.index('_doc_id_rev');
+    var getKeyReq = index.getKey(doc._doc_id_rev);
+    getKeyReq.onsuccess = function (e) {
+      var putReq = self.bySeqStore.put(doc, e.target.result);
+      putReq.onsuccess = afterPutDoc;
+    };
+  }
+
+  function afterPutMetadata() {
+    self.results[resultsIdx] = {
+      ok: true,
+      id: metadata.id,
+      rev: winningRev
+    };
+    self.fetchedDocs.set(docInfo.metadata.id, docInfo.metadata);
+    self.insertAttachmentMappings(docInfo, metadata.seq, callback);
+  }
+
+  var putReq = self.bySeqStore.put(doc);
+
+  putReq.onsuccess = afterPutDoc;
+  putReq.onerror = afterPutDocError;
+};
+
+IdbBulkDocs.prototype.writeAttachments = function (docInfo, winningRev,
+                                                   deleted, callback,
+                                                   isUpdate, resultsIdx) {
+  var self = this;
+
+  var doc = docInfo.data;
+
+  var numDone = 0;
+  var attachments = Object.keys(doc._attachments);
+
+  function collectResults() {
+    if (numDone === attachments.length) {
+      self.finishDoc(docInfo, winningRev, deleted, callback, isUpdate,
+        resultsIdx);
+    }
+  }
+
+  function attachmentSaved() {
+    numDone++;
+    collectResults();
+  }
+
+  attachments.forEach(function (key) {
+    var att = docInfo.data._attachments[key];
+    if (!att.stub) {
+      var data = att.data;
+      delete att.data;
+      var digest = att.digest;
+      self.saveAttachment(digest, data, attachmentSaved);
+    } else {
+      numDone++;
+      collectResults();
+    }
+  });
+};
+
+// map seqs to attachment digests, which
+// we will need later during compaction
+IdbBulkDocs.prototype.insertAttachmentMappings = function (docInfo, seq,
+                                                           callback) {
+  var self = this;
+
+  var attsAdded = 0;
+  var attsToAdd = Object.keys(docInfo.data._attachments || {});
+
+  if (!attsToAdd.length) {
+    return callback();
+  }
+
+  function checkDone() {
+    if (++attsAdded === attsToAdd.length) {
+      callback();
+    }
+  }
+
+  function add(att) {
+    var digest = docInfo.data._attachments[att].digest;
+    var req = self.attachAndSeqStore.put({
+      seq: seq,
+      digestSeq: digest + '::' + seq
+    });
+
+    req.onsuccess = checkDone;
+    req.onerror = function (e) {
+      // this callback is for a constaint error, which we ignore
+      // because this docid/rev has already been associated with
+      // the digest (e.g. when new_edits == false)
+      e.preventDefault(); // avoid transaction abort
+      e.stopPropagation(); // avoid transaction onerror
+      checkDone();
+    };
+  }
+  for (var i = 0; i < attsToAdd.length; i++) {
+    add(attsToAdd[i]); // do in parallel
+  }
+};
+
+IdbBulkDocs.prototype.saveAttachment = function (digest, data, callback) {
+  var self = this;
+
+  var getKeyReq = self.attachStore.count(digest);
+  getKeyReq.onsuccess = function(e) {
+    var count = e.target.result;
+    if (count) {
+      return callback(); // already exists
+    }
+    var newAtt = {
+      digest: digest,
+      body: data
+    };
+    var putReq = self.attachStore.put(newAtt);
+    putReq.onsuccess = callback;
+  };
+};
+
+module.exports = IdbBulkDocs;
\ No newline at end of file
diff --git a/lib/adapters/idb/idb-constants.js b/lib/adapters/idb/idb-constants.js
new file mode 100644
index 0000000..2a6ccc7
--- /dev/null
+++ b/lib/adapters/idb/idb-constants.js
@@ -0,0 +1,26 @@
+'use strict';
+
+// IndexedDB requires a versioned database structure, so we use the
+// version here to manage migrations.
+exports.ADAPTER_VERSION = 5;
+
+// The object stores created for each database
+// DOC_STORE stores the document meta data, its revision history and state
+// Keyed by document id
+exports. DOC_STORE = 'document-store';
+// BY_SEQ_STORE stores a particular version of a document, keyed by its
+// sequence id
+exports.BY_SEQ_STORE = 'by-sequence';
+// Where we store attachments
+exports.ATTACH_STORE = 'attach-store';
+// Where we store many-to-many relations
+// between attachment digests and seqs
+exports.ATTACH_AND_SEQ_STORE = 'attach-seq-store';
+
+// Where we store database-wide meta data in a single record
+// keyed by id: META_STORE
+exports.META_STORE = 'meta-store';
+// Where we store local documents
+exports.LOCAL_STORE = 'local-store';
+// Where we detect blob support
+exports.DETECT_BLOB_SUPPORT_STORE = 'detect-blob-support';
\ No newline at end of file
diff --git a/lib/adapters/idb/idb-utils.js b/lib/adapters/idb/idb-utils.js
new file mode 100644
index 0000000..30c67a3
--- /dev/null
+++ b/lib/adapters/idb/idb-utils.js
@@ -0,0 +1,231 @@
+'use strict';
+
+var errors = require('../../deps/errors');
+var utils = require('../../utils');
+var constants = require('./idb-constants');
+
+function tryCode(fun, that, args) {
+  try {
+    fun.apply(that, args);
+  } catch (err) { // shouldn't happen
+    if (window.PouchDB) {
+      window.PouchDB.emit('error', err);
+    }
+  }
+}
+
+exports.taskQueue = {
+  running: false,
+  queue: []
+};
+
+exports.applyNext = function () {
+  if (exports.taskQueue.running || !exports.taskQueue.queue.length) {
+    return;
+  }
+  exports.taskQueue.running = true;
+  var item = exports.taskQueue.queue.shift();
+  item.action(function (err, res) {
+    tryCode(item.callback, this, [err, res]);
+    exports.taskQueue.running = false;
+    process.nextTick(exports.applyNext);
+  });
+};
+
+exports.idbError = function (callback) {
+  return function (event) {
+    var message = (event.target && event.target.error &&
+      event.target.error.name) || event.target;
+    callback(errors.error(errors.IDB_ERROR, message, event.type));
+  };
+};
+
+// Unfortunately, the metadata has to be stringified
+// when it is put into the database, because otherwise
+// IndexedDB can throw errors for deeply-nested objects.
+// Originally we just used JSON.parse/JSON.stringify; now
+// we use this custom vuvuzela library that avoids recursion.
+// If we could do it all over again, we'd probably use a
+// format for the revision trees other than JSON.
+exports.encodeMetadata = function (metadata, winningRev, deleted) {
+  return {
+    data: utils.safeJsonStringify(metadata),
+    winningRev: winningRev,
+    deletedOrLocal: deleted ? '1' : '0',
+    seq: metadata.seq, // highest seq for this doc
+    id: metadata.id
+  };
+};
+
+exports.decodeMetadata = function (storedObject) {
+  if (!storedObject) {
+    return null;
+  }
+  var metadata = utils.safeJsonParse(storedObject.data);
+  metadata.winningRev = storedObject.winningRev;
+  metadata.deletedOrLocal = storedObject.deletedOrLocal === '1';
+  metadata.seq = storedObject.seq;
+  return metadata;
+};
+
+// read the doc back out from the database. we don't store the
+// _id or _rev because we already have _doc_id_rev.
+exports.decodeDoc = function (doc) {
+  if (!doc) {
+    return doc;
+  }
+  var idx = utils.lastIndexOf(doc._doc_id_rev, ':');
+  doc._id = doc._doc_id_rev.substring(0, idx - 1);
+  doc._rev = doc._doc_id_rev.substring(idx + 1);
+  delete doc._doc_id_rev;
+  return doc;
+};
+
+// Read a blob from the database, encoding as necessary
+// and translating from base64 if the IDB doesn't support
+// native Blobs
+exports.readBlobData = function (body, type, encode, callback) {
+  if (encode) {
+    if (!body) {
+      callback('');
+    } else if (typeof body !== 'string') { // we have blob support
+      utils.readAsBinaryString(body, function (binary) {
+        callback(utils.btoa(binary));
+      });
+    } else { // no blob support
+      callback(body);
+    }
+  } else {
+    if (!body) {
+      callback(utils.createBlob([''], {type: type}));
+    } else if (typeof body !== 'string') { // we have blob support
+      callback(body);
+    } else { // no blob support
+      body = utils.fixBinary(atob(body));
+      callback(utils.createBlob([body], {type: type}));
+    }
+  }
+};
+
+exports.fetchAttachmentsIfNecessary = function (doc, opts, txn, cb) {
+  var attachments = Object.keys(doc._attachments || {});
+  if (!attachments.length) {
+    return cb && cb();
+  }
+  var numDone = 0;
+
+  function checkDone() {
+    if (++numDone === attachments.length && cb) {
+      cb();
+    }
+  }
+
+  function fetchAttachment(doc, att) {
+    var attObj = doc._attachments[att];
+    var digest = attObj.digest;
+    var req = txn.objectStore(constants.ATTACH_STORE).get(digest);
+    req.onsuccess = function (e) {
+      attObj.body = e.target.result.body;
+      checkDone();
+    };
+  }
+
+  attachments.forEach(function (att) {
+    if (opts.attachments && opts.include_docs) {
+      fetchAttachment(doc, att);
+    } else {
+      doc._attachments[att].stub = true;
+      checkDone();
+    }
+  });
+};
+
+// IDB-specific postprocessing necessary because
+// we don't know whether we stored a true Blob or
+// a base64-encoded string, and if it's a Blob it
+// needs to be read outside of the transaction context
+exports.postProcessAttachments = function (results) {
+  return utils.Promise.all(results.map(function (row) {
+    if (row.doc && row.doc._attachments) {
+      var attNames = Object.keys(row.doc._attachments);
+      return utils.Promise.all(attNames.map(function (att) {
+        var attObj = row.doc._attachments[att];
+        if (!('body' in attObj)) { // already processed
+          return;
+        }
+        var body = attObj.body;
+        var type = attObj.content_type;
+        return new utils.Promise(function (resolve) {
+          exports.readBlobData(body, type, true, function (base64) {
+            row.doc._attachments[att] = utils.extend(
+              utils.pick(attObj, ['digest', 'content_type']),
+              {data: base64}
+            );
+            resolve();
+          });
+        });
+      }));
+    }
+  }));
+};
+
+exports.compactRevs = function (revs, docId, txn) {
+
+  var possiblyOrphanedDigests = [];
+  var seqStore = txn.objectStore(constants.BY_SEQ_STORE);
+  var attStore = txn.objectStore(constants.ATTACH_STORE);
+  var attAndSeqStore = txn.objectStore(constants.ATTACH_AND_SEQ_STORE);
+  var count = revs.length;
+
+  function checkDone() {
+    count--;
+    if (!count) { // done processing all revs
+      deleteOrphanedAttachments();
+    }
+  }
+
+  function deleteOrphanedAttachments() {
+    if (!possiblyOrphanedDigests.length) {
+      return;
+    }
+    possiblyOrphanedDigests.forEach(function (digest) {
+      var countReq = attAndSeqStore.index('digestSeq').count(
+        global.IDBKeyRange.bound(
+          digest + '::', digest + '::\uffff', false, false));
+      countReq.onsuccess = function (e) {
+        var count = e.target.result;
+        if (!count) {
+          // orphaned
+          attStore.delete(digest);
+        }
+      };
+    });
+  }
+
+  revs.forEach(function (rev) {
+    var index = seqStore.index('_doc_id_rev');
+    var key = docId + "::" + rev;
+    index.getKey(key).onsuccess = function (e) {
+      var seq = e.target.result;
+      if (typeof seq !== 'number') {
+        return checkDone();
+      }
+      seqStore.delete(seq);
+
+      var cursor = attAndSeqStore.index('seq')
+        .openCursor(global.IDBKeyRange.only(seq));
+
+      cursor.onsuccess = function (event) {
+        var cursor = event.target.result;
+        if (cursor) {
+          var digest = cursor.value.digestSeq.split('::')[0];
+          possiblyOrphanedDigests.push(digest);
+          attAndSeqStore.delete(cursor.primaryKey);
+          cursor.continue();
+        } else { // done
+          checkDone();
+        }
+      };
+    };
+  });
+};
\ No newline at end of file
diff --git a/lib/adapters/idb.js b/lib/adapters/idb/idb.js
similarity index 64%
rename from lib/adapters/idb.js
rename to lib/adapters/idb/idb.js
index 268fce6..bac78bd 100644
--- a/lib/adapters/idb.js
+++ b/lib/adapters/idb/idb.js
@@ -1,262 +1,34 @@
 'use strict';
 
-var utils = require('../utils');
-var merge = require('../merge');
-var errors = require('../deps/errors');
+var utils = require('../../utils');
+var merge = require('../../merge');
+var errors = require('../../deps/errors');
+var idbUtils = require('./idb-utils');
+var idbConstants = require('./idb-constants');
+var IdbBulkDocs = require('./idb-bulk-docs');
 
-// IndexedDB requires a versioned database structure, so we use the
-// version here to manage migrations.
-var ADAPTER_VERSION = 5;
+var ADAPTER_VERSION = idbConstants.ADAPTER_VERSION;
+var ATTACH_AND_SEQ_STORE = idbConstants.ATTACH_AND_SEQ_STORE;
+var ATTACH_STORE = idbConstants.ATTACH_STORE;
+var BY_SEQ_STORE = idbConstants.BY_SEQ_STORE;
+var DETECT_BLOB_SUPPORT_STORE = idbConstants.DETECT_BLOB_SUPPORT_STORE;
+var DOC_STORE = idbConstants.DOC_STORE;
+var LOCAL_STORE = idbConstants.LOCAL_STORE;
+var META_STORE = idbConstants.META_STORE;
 
-// The object stores created for each database
-// DOC_STORE stores the document meta data, its revision history and state
-// Keyed by document id
-var DOC_STORE = 'document-store';
-// BY_SEQ_STORE stores a particular version of a document, keyed by its
-// sequence id
-var BY_SEQ_STORE = 'by-sequence';
-// Where we store attachments
-var ATTACH_STORE = 'attach-store';
-// Where we store many-to-many relations
-// between attachment digests and seqs
-var ATTACH_AND_SEQ_STORE = 'attach-seq-store';
-
-// Where we store database-wide meta data in a single record
-// keyed by id: META_STORE
-var META_STORE = 'meta-store';
-// Where we store local documents
-var LOCAL_STORE = 'local-store';
-// Where we detect blob support
-var DETECT_BLOB_SUPPORT_STORE = 'detect-blob-support';
+var applyNext = idbUtils.applyNext;
+var compactRevs = idbUtils.compactRevs;
+var decodeDoc = idbUtils.decodeDoc;
+var decodeMetadata = idbUtils.decodeMetadata;
+var encodeMetadata = idbUtils.encodeMetadata;
+var fetchAttachmentsIfNecessary = idbUtils.fetchAttachmentsIfNecessary;
+var idbError = idbUtils.idbError;
+var postProcessAttachments = idbUtils.postProcessAttachments;
+var readBlobData = idbUtils.readBlobData;
+var taskQueue = idbUtils.taskQueue;
 
 var cachedDBs = {};
-var taskQueue = {
-  running: false,
-  queue: []
-};
-
 var blobSupportPromise;
-var blobSupport = null;
-
-function tryCode(fun, that, args) {
-  try {
-    fun.apply(that, args);
-  } catch (err) { // shouldn't happen
-    if (window.PouchDB) {
-      window.PouchDB.emit('error', err);
-    }
-  }
-}
-
-function applyNext() {
-  if (taskQueue.running || !taskQueue.queue.length) {
-    return;
-  }
-  taskQueue.running = true;
-  var item = taskQueue.queue.shift();
-  item.action(function (err, res) {
-    tryCode(item.callback, this, [err, res]);
-    taskQueue.running = false;
-    process.nextTick(applyNext);
-  });
-}
-
-function idbError(callback) {
-  return function (event) {
-    var message = (event.target && event.target.error &&
-      event.target.error.name) || event.target;
-    callback(errors.error(errors.IDB_ERROR, message, event.type));
-  };
-}
-
-// Unfortunately, the metadata has to be stringified
-// when it is put into the database, because otherwise
-// IndexedDB can throw errors for deeply-nested objects.
-// Originally we just used JSON.parse/JSON.stringify; now
-// we use this custom vuvuzela library that avoids recursion.
-// If we could do it all over again, we'd probably use a
-// format for the revision trees other than JSON.
-function encodeMetadata(metadata, winningRev, deleted) {
-  return {
-    data: utils.safeJsonStringify(metadata),
-    winningRev: winningRev,
-    deletedOrLocal: deleted ? '1' : '0',
-    seq: metadata.seq, // highest seq for this doc
-    id: metadata.id
-  };
-}
-
-function decodeMetadata(storedObject) {
-  if (!storedObject) {
-    return null;
-  }
-  var metadata = utils.safeJsonParse(storedObject.data);
-  metadata.winningRev = storedObject.winningRev;
-  metadata.deletedOrLocal = storedObject.deletedOrLocal === '1';
-  metadata.seq = storedObject.seq;
-  return metadata;
-}
-
-// read the doc back out from the database. we don't store the
-// _id or _rev because we already have _doc_id_rev.
-function decodeDoc(doc) {
-  if (!doc) {
-    return doc;
-  }
-  var idx = utils.lastIndexOf(doc._doc_id_rev, ':');
-  doc._id = doc._doc_id_rev.substring(0, idx - 1);
-  doc._rev = doc._doc_id_rev.substring(idx + 1);
-  delete doc._doc_id_rev;
-  return doc;
-}
-
-// Read a blob from the database, encoding as necessary
-// and translating from base64 if the IDB doesn't support
-// native Blobs
-function readBlobData(body, type, encode, callback) {
-  if (encode) {
-    if (!body) {
-      callback('');
-    } else if (typeof body !== 'string') { // we have blob support
-      utils.readAsBinaryString(body, function (binary) {
-        callback(utils.btoa(binary));
-      });
-    } else { // no blob support
-      callback(body);
-    }
-  } else {
-    if (!body) {
-      callback(utils.createBlob([''], {type: type}));
-    } else if (typeof body !== 'string') { // we have blob support
-      callback(body);
-    } else { // no blob support
-      body = utils.fixBinary(atob(body));
-      callback(utils.createBlob([body], {type: type}));
-    }
-  }
-}
-
-function fetchAttachmentsIfNecessary(doc, opts, txn, cb) {
-  var attachments = Object.keys(doc._attachments || {});
-  if (!attachments.length) {
-    return cb && cb();
-  }
-  var numDone = 0;
-
-  function checkDone() {
-    if (++numDone === attachments.length && cb) {
-      cb();
-    }
-  }
-
-  function fetchAttachment(doc, att) {
-    var attObj = doc._attachments[att];
-    var digest = attObj.digest;
-    txn.objectStore(ATTACH_STORE).get(digest).onsuccess = function (e) {
-      attObj.body = e.target.result.body;
-      checkDone();
-    };
-  }
-
-  attachments.forEach(function (att) {
-    if (opts.attachments && opts.include_docs) {
-      fetchAttachment(doc, att);
-    } else {
-      doc._attachments[att].stub = true;
-      checkDone();
-    }
-  });
-}
-
-// IDB-specific postprocessing necessary because
-// we don't know whether we stored a true Blob or
-// a base64-encoded string, and if it's a Blob it
-// needs to be read outside of the transaction context
-function postProcessAttachments(results) {
-  return utils.Promise.all(results.map(function (row) {
-    if (row.doc && row.doc._attachments) {
-      var attNames = Object.keys(row.doc._attachments);
-      return utils.Promise.all(attNames.map(function (att) {
-        var attObj = row.doc._attachments[att];
-        if (!('body' in attObj)) { // already processed
-          return;
-        }
-        var body = attObj.body;
-        var type = attObj.content_type;
-        return new utils.Promise(function (resolve) {
-          readBlobData(body, type, true, function (base64) {
-            row.doc._attachments[att] = utils.extend(
-              utils.pick(attObj, ['digest', 'content_type']),
-              {data: base64}
-            );
-            resolve();
-          });
-        });
-      }));
-    }
-  }));
-}
-
-function compactRevs(revs, docId, txn) {
-
-  var possiblyOrphanedDigests = [];
-  var seqStore = txn.objectStore(BY_SEQ_STORE);
-  var attStore = txn.objectStore(ATTACH_STORE);
-  var attAndSeqStore = txn.objectStore(ATTACH_AND_SEQ_STORE);
-  var count = revs.length;
-
-  function checkDone() {
-    count--;
-    if (!count) { // done processing all revs
-      deleteOrphanedAttachments();
-    }
-  }
-
-  function deleteOrphanedAttachments() {
-    if (!possiblyOrphanedDigests.length) {
-      return;
-    }
-    possiblyOrphanedDigests.forEach(function (digest) {
-      var countReq = attAndSeqStore.index('digestSeq').count(
-        global.IDBKeyRange.bound(
-          digest + '::', digest + '::\uffff', false, false));
-      countReq.onsuccess = function (e) {
-        var count = e.target.result;
-        if (!count) {
-          // orphaned
-          attStore.delete(digest);
-        }
-      };
-    });
-  }
-
-  revs.forEach(function (rev) {
-    var index = seqStore.index('_doc_id_rev');
-    var key = docId + "::" + rev;
-    index.getKey(key).onsuccess = function (e) {
-      var seq = e.target.result;
-      if (typeof seq !== 'number') {
-        return checkDone();
-      }
-      seqStore.delete(seq);
-
-      var cursor = attAndSeqStore.index('seq')
-        .openCursor(global.IDBKeyRange.only(seq));
-
-      cursor.onsuccess = function (event) {
-        var cursor = event.target.result;
-        if (cursor) {
-          var digest = cursor.value.digestSeq.split('::')[0];
-          possiblyOrphanedDigests.push(digest);
-          attAndSeqStore.delete(cursor.primaryKey);
-          cursor.continue();
-        } else { // done
-          checkDone();
-        }
-      };
-    };
-  });
-}
 
 function IdbPouch(opts, callback) {
   var api = this;
@@ -277,7 +49,9 @@
   var instanceId = null;
   var idStored = false;
   var idb = null;
-  var docCount = -1;
+  api._docCount = -1;
+  api._blobSupport = null;
+  api._name = name;
 
   // called when creating a fresh new database
   function createSchema(db) {
@@ -507,312 +281,7 @@
   });
 
   api._bulkDocs = function idb_bulkDocs(req, opts, callback) {
-    var newEdits = opts.new_edits;
-    var userDocs = req.docs;
-    // Parse the docs, give them a sequence number for the result
-    var docInfos = userDocs.map(function (doc, i) {
-      if (doc._id && utils.isLocalId(doc._id)) {
-        return doc;
-      }
-      var newDoc = utils.parseDoc(doc, newEdits);
-      return newDoc;
-    });
-
-    var docInfoErrors = docInfos.filter(function (docInfo) {
-      return docInfo.error;
-    });
-    if (docInfoErrors.length) {
-      return callback(docInfoErrors[0]);
-    }
-
-    var results = new Array(docInfos.length);
-    var fetchedDocs = new utils.Map();
-    var preconditionErrored = false;
-
-    function processDocs() {
-      utils.processDocs(docInfos, api, fetchedDocs,
-        txn, results, writeDoc, opts);
-    }
-
-    function fetchExistingDocs(callback) {
-      if (!docInfos.length) {
-        return callback();
-      }
-
-      var numFetched = 0;
-
-      function checkDone() {
-        if (++numFetched === docInfos.length) {
-          callback();
-        }
-      }
-
-      docInfos.forEach(function (docInfo) {
-        if (docInfo._id && utils.isLocalId(docInfo._id)) {
-          return checkDone(); // skip local docs
-        }
-        var id = docInfo.metadata.id;
-        var req = txn.objectStore(DOC_STORE).get(id);
-        req.onsuccess = function process_docRead(event) {
-          var metadata = decodeMetadata(event.target.result);
-          if (metadata) {
-            fetchedDocs.set(id, metadata);
-          }
-          checkDone();
-        };
-      });
-    }
-
-    function complete() {
-      if (preconditionErrored) {
-        return;
-      }
-      var aresults = results.map(function (result) {
-        if (!Object.keys(result).length) {
-          return {
-            ok: true
-          };
-        }
-        if (result.error) {
-          return result;
-        }
-
-        var metadata = result.metadata;
-        var rev = merge.winningRev(metadata);
-
-        return {
-          ok: true,
-          id: metadata.id,
-          rev: rev
-        };
-      });
-      IdbPouch.Changes.notify(name);
-      docCount = -1; // invalidate
-      callback(null, aresults);
-    }
-
-    function verifyAttachment(digest, callback) {
-      var req = txn.objectStore([ATTACH_STORE]).get(digest);
-      req.onsuccess = function (e) {
-        if (!e.target.result) {
-          var err = new Error('unknown stub attachment with digest ' + digest);
-          err.status = 412;
-          callback(err);
-        } else {
-          callback();
-        }
-      };
-    }
-
-    function verifyAttachments(finish) {
-      var digests = [];
-      docInfos.forEach(function (docInfo) {
-        if (docInfo.data && docInfo.data._attachments) {
-          Object.keys(docInfo.data._attachments).forEach(function (filename) {
-            var att = docInfo.data._attachments[filename];
-            if (att.stub) {
-              digests.push(att.digest);
-            }
-          });
-        }
-      });
-      if (!digests.length) {
-        return finish();
-      }
-      var numDone = 0;
-      var err;
-
-      function checkDone() {
-        if (++numDone === digests.length) {
-          finish(err);
-        }
-      }
-      digests.forEach(function (digest) {
-        verifyAttachment(digest, function (attErr) {
-          if (attErr && !err) {
-            err = attErr;
-          }
-          checkDone();
-        });
-      });
-    }
-
-    function writeDoc(docInfo, winningRev, deleted, callback, isUpdate,
-                      resultsIdx) {
-      var err = null;
-      var recv = 0;
-      var id = docInfo.data._id = docInfo.metadata.id;
-      var rev = docInfo.data._rev = docInfo.metadata.rev;
-      var docIdRev = id + "::" + rev;
-      var attachments = Object.keys(docInfo.data._attachments || {});
-
-      if (deleted) {
-        docInfo.data._deleted = true;
-      }
-
-      function collectResults(attachmentErr) {
-        if (!err) {
-          if (attachmentErr) {
-            err = attachmentErr;
-            callback(err);
-          } else if (recv === attachments.length) {
-            finish();
-          }
-        }
-      }
-
-      function attachmentSaved(err) {
-        recv++;
-        collectResults(err);
-      }
-
-      attachments.forEach(function (key) {
-        var att = docInfo.data._attachments[key];
-        if (!att.stub) {
-          var data = att.data;
-          delete att.data;
-          var digest = att.digest;
-          saveAttachment(digest, data, attachmentSaved);
-        } else {
-          recv++;
-          collectResults();
-        }
-      });
-
-      // map seqs to attachment digests, which
-      // we will need later during compaction
-      function insertAttachmentMappings(seq, callback) {
-        var attsAdded = 0;
-        var attsToAdd = Object.keys(docInfo.data._attachments || {});
-
-        if (!attsToAdd.length) {
-          return callback();
-        }
-        function checkDone() {
-          if (++attsAdded === attsToAdd.length) {
-            callback();
-          }
-        }
-        function add(att) {
-          var digest = docInfo.data._attachments[att].digest;
-          var req = txn.objectStore(ATTACH_AND_SEQ_STORE).put({
-            seq: seq,
-            digestSeq: digest + '::' + seq
-          });
-
-          req.onsuccess = checkDone;
-          req.onerror = function (e) {
-            // this callback is for a constaint error, which we ignore
-            // because this docid/rev has already been associated with
-            // the digest (e.g. when new_edits == false)
-            e.preventDefault(); // avoid transaction abort
-            e.stopPropagation(); // avoid transaction onerror
-            checkDone();
-          };
-        }
-        for (var i = 0; i < attsToAdd.length; i++) {
-          add(attsToAdd[i]); // do in parallel
-        }
-      }
-
-      function finish() {
-        docInfo.data._doc_id_rev = docIdRev;
-        delete docInfo.data._id;
-        delete docInfo.data._rev;
-        var seqStore = txn.objectStore(BY_SEQ_STORE);
-        var index = seqStore.index('_doc_id_rev');
-
-        function autoCompact() {
-          if (!isUpdate || !api.auto_compaction) {
-            return; // nothing to do
-          }
-          var revsToDelete = utils.compactTree(docInfo.metadata);
-          compactRevs(revsToDelete, docInfo.metadata.id, txn);
-        }
-
-        function afterPut(e) {
-          autoCompact();
-          var metadata = docInfo.metadata;
-          var seq = e.target.result;
-          metadata.seq = seq;
-          // Current _rev is calculated from _rev_tree on read
-          delete metadata.rev;
-          var metadataToStore = encodeMetadata(metadata, winningRev, deleted);
-          var metaDataReq = txn.objectStore(DOC_STORE).put(metadataToStore);
-          metaDataReq.onsuccess = function () {
-            delete metadata.deletedOrLocal;
-            delete metadata.winningRev;
-            results[resultsIdx] = docInfo;
-            fetchedDocs.set(docInfo.metadata.id, docInfo.metadata);
-            insertAttachmentMappings(seq, function () {
-              utils.call(callback);
-            });
-          };
-        }
-
-        var putReq = seqStore.put(docInfo.data);
-
-        putReq.onsuccess = afterPut;
-        putReq.onerror = function (e) {
-          // ConstraintError, need to update, not put (see #1638 for details)
-          e.preventDefault(); // avoid transaction abort
-          e.stopPropagation(); // avoid transaction onerror
-          var getKeyReq = index.getKey(docInfo.data._doc_id_rev);
-          getKeyReq.onsuccess = function (e) {
-            var putReq = seqStore.put(docInfo.data, e.target.result);
-            putReq.onsuccess = afterPut;
-          };
-        };
-      }
-
-      if (!attachments.length) {
-        finish();
-      }
-    }
-
-    function saveAttachment(digest, data, callback) {
-      var objectStore = txn.objectStore(ATTACH_STORE);
-      objectStore.get(digest).onsuccess = function (e) {
-        var exists = e.target.result;
-        if (exists) {
-          // don't bother re-putting if it already exists
-          return utils.call(callback);
-        }
-        var newAtt = {
-          digest: digest,
-          body: data
-        };
-        objectStore.put(newAtt).onsuccess = function () {
-          utils.call(callback);
-        };
-      };
-    }
-
-    var txn;
-    var blobType = blobSupport ? 'blob' : 'base64';
-    utils.preprocessAttachments(docInfos, blobType, function (err) {
-      if (err) {
-        return callback(err);
-      }
-
-      var stores = [
-        DOC_STORE, BY_SEQ_STORE,
-        ATTACH_STORE, META_STORE,
-        LOCAL_STORE, ATTACH_AND_SEQ_STORE
-      ];
-      txn = idb.transaction(stores, 'readwrite');
-      txn.onerror = idbError(callback);
-      txn.ontimeout = idbError(callback);
-      txn.oncomplete = complete;
-
-      verifyAttachments(function (err) {
-        if (err) {
-          preconditionErrored = true;
-          return callback(err);
-        }
-        fetchExistingDocs(processDocs);
-      });
-    });
+    new IdbBulkDocs(req, opts, api, idb, IdbPouch.Changes, callback);
   };
 
   // First we look up the metadata in the ids database, then we fetch the
@@ -1025,8 +494,8 @@
   }
 
   function countDocs(callback) {
-    if (docCount !== -1) {
-      return callback(null, docCount);
+    if (api._docCount !== -1) {
+      return callback(null, api._docCount);
     }
 
     var count;
@@ -1037,8 +506,8 @@
     };
     txn.onerror = idbError(callback);
     txn.oncomplete = function () {
-      docCount = count;
-      callback(null, docCount);
+      api._docCount = count;
+      callback(null, api._docCount);
     };
   }
 
@@ -1501,7 +970,7 @@
     req.onsuccess = function (e) {
 
       var checkSetupComplete = function () {
-        if (blobSupport === null || !idStored) {
+        if (api._blobSupport === null || !idStored) {
           return;
         } else {
           cachedDBs[name] = {
@@ -1570,13 +1039,13 @@
             };
           };
         }).catch(function (err) {
-          blobSupport = false;
+          api._blobSupport = false;
           checkSetupComplete();
         });
       }
 
       blobSupportPromise.then(function (val) {
-        blobSupport = val;
+        api._blobSupport = val;
         checkSetupComplete();
       });
     };
diff --git a/lib/index.js b/lib/index.js
index a2b4249..c4356e4 100644
--- a/lib/index.js
+++ b/lib/index.js
@@ -15,7 +15,7 @@
 PouchDB.adapter('http', httpAdapter);
 PouchDB.adapter('https', httpAdapter);
 
-PouchDB.adapter('idb', require('./adapters/idb'));
+PouchDB.adapter('idb', require('./adapters/idb/idb'));
 PouchDB.adapter('websql', require('./adapters/websql'));
 PouchDB.plugin(require('pouchdb-mapreduce'));