| 'use strict'; |
| |
| var utils = require('../../utils'); |
| var merge = require('../../merge'); |
| var isDeleted = require('../../deps/docs/isDeleted'); |
| var isLocalId = require('../../deps/docs/isLocalId'); |
| var errors = require('../../deps/errors'); |
| var idbUtils = require('./utils'); |
| var idbConstants = require('./constants'); |
| var idbBulkDocs = require('./bulkDocs'); |
| var idbAllDocs = require('./allDocs'); |
| var checkBlobSupport = require('./blobSupport'); |
| var hasLocalStorage = require('../../deps/env/hasLocalStorage'); |
| |
| var ADAPTER_VERSION = idbConstants.ADAPTER_VERSION; |
| var ATTACH_AND_SEQ_STORE = idbConstants.ATTACH_AND_SEQ_STORE; |
| var ATTACH_STORE = idbConstants.ATTACH_STORE; |
| var BY_SEQ_STORE = idbConstants.BY_SEQ_STORE; |
| var DETECT_BLOB_SUPPORT_STORE = idbConstants.DETECT_BLOB_SUPPORT_STORE; |
| var DOC_STORE = idbConstants.DOC_STORE; |
| var LOCAL_STORE = idbConstants.LOCAL_STORE; |
| var META_STORE = idbConstants.META_STORE; |
| |
| var applyNext = idbUtils.applyNext; |
| var compactRevs = idbUtils.compactRevs; |
| var decodeDoc = idbUtils.decodeDoc; |
| var decodeMetadata = idbUtils.decodeMetadata; |
| var encodeMetadata = idbUtils.encodeMetadata; |
| var fetchAttachmentsIfNecessary = idbUtils.fetchAttachmentsIfNecessary; |
| var idbError = idbUtils.idbError; |
| var postProcessAttachments = idbUtils.postProcessAttachments; |
| var readBlobData = idbUtils.readBlobData; |
| var taskQueue = idbUtils.taskQueue; |
| var openTransactionSafely = idbUtils.openTransactionSafely; |
| |
| var cachedDBs = {}; |
| var blobSupportPromise; |
| |
| function IdbPouch(opts, callback) { |
| var api = this; |
| |
| taskQueue.queue.push({ |
| action: function (thisCallback) { |
| init(api, opts, thisCallback); |
| }, |
| callback: callback |
| }); |
| applyNext(); |
| } |
| |
| function init(api, opts, callback) { |
| |
| var dbName = opts.name; |
| |
| var idb = null; |
| api._meta = null; |
| |
| // called when creating a fresh new database |
| function createSchema(db) { |
| var docStore = db.createObjectStore(DOC_STORE, {keyPath : 'id'}); |
| db.createObjectStore(BY_SEQ_STORE, {autoIncrement: true}) |
| .createIndex('_doc_id_rev', '_doc_id_rev', {unique: true}); |
| db.createObjectStore(ATTACH_STORE, {keyPath: 'digest'}); |
| db.createObjectStore(META_STORE, {keyPath: 'id', autoIncrement: false}); |
| db.createObjectStore(DETECT_BLOB_SUPPORT_STORE); |
| |
| // added in v2 |
| docStore.createIndex('deletedOrLocal', 'deletedOrLocal', {unique : false}); |
| |
| // added in v3 |
| db.createObjectStore(LOCAL_STORE, {keyPath: '_id'}); |
| |
| // added in v4 |
| var attAndSeqStore = db.createObjectStore(ATTACH_AND_SEQ_STORE, |
| {autoIncrement: true}); |
| attAndSeqStore.createIndex('seq', 'seq'); |
| attAndSeqStore.createIndex('digestSeq', 'digestSeq', {unique: true}); |
| } |
| |
| // migration to version 2 |
| // unfortunately "deletedOrLocal" is a misnomer now that we no longer |
| // store local docs in the main doc-store, but whaddyagonnado |
| function addDeletedOrLocalIndex(txn, callback) { |
| var docStore = txn.objectStore(DOC_STORE); |
| docStore.createIndex('deletedOrLocal', 'deletedOrLocal', {unique : false}); |
| |
| docStore.openCursor().onsuccess = function (event) { |
| var cursor = event.target.result; |
| if (cursor) { |
| var metadata = cursor.value; |
| var deleted = isDeleted(metadata); |
| metadata.deletedOrLocal = deleted ? "1" : "0"; |
| docStore.put(metadata); |
| cursor.continue(); |
| } else { |
| callback(); |
| } |
| }; |
| } |
| |
| // migration to version 3 (part 1) |
| function createLocalStoreSchema(db) { |
| db.createObjectStore(LOCAL_STORE, {keyPath: '_id'}) |
| .createIndex('_doc_id_rev', '_doc_id_rev', {unique: true}); |
| } |
| |
| // migration to version 3 (part 2) |
| function migrateLocalStore(txn, cb) { |
| var localStore = txn.objectStore(LOCAL_STORE); |
| var docStore = txn.objectStore(DOC_STORE); |
| var seqStore = txn.objectStore(BY_SEQ_STORE); |
| |
| var cursor = docStore.openCursor(); |
| cursor.onsuccess = function (event) { |
| var cursor = event.target.result; |
| if (cursor) { |
| var metadata = cursor.value; |
| var docId = metadata.id; |
| var local = isLocalId(docId); |
| var rev = merge.winningRev(metadata); |
| if (local) { |
| var docIdRev = docId + "::" + rev; |
| // remove all seq entries |
| // associated with this docId |
| var start = docId + "::"; |
| var end = docId + "::~"; |
| var index = seqStore.index('_doc_id_rev'); |
| var range = IDBKeyRange.bound(start, end, false, false); |
| var seqCursor = index.openCursor(range); |
| seqCursor.onsuccess = function (e) { |
| seqCursor = e.target.result; |
| if (!seqCursor) { |
| // done |
| docStore.delete(cursor.primaryKey); |
| cursor.continue(); |
| } else { |
| var data = seqCursor.value; |
| if (data._doc_id_rev === docIdRev) { |
| localStore.put(data); |
| } |
| seqStore.delete(seqCursor.primaryKey); |
| seqCursor.continue(); |
| } |
| }; |
| } else { |
| cursor.continue(); |
| } |
| } else if (cb) { |
| cb(); |
| } |
| }; |
| } |
| |
| // migration to version 4 (part 1) |
| function addAttachAndSeqStore(db) { |
| var attAndSeqStore = db.createObjectStore(ATTACH_AND_SEQ_STORE, |
| {autoIncrement: true}); |
| attAndSeqStore.createIndex('seq', 'seq'); |
| attAndSeqStore.createIndex('digestSeq', 'digestSeq', {unique: true}); |
| } |
| |
| // migration to version 4 (part 2) |
| function migrateAttsAndSeqs(txn, callback) { |
| var seqStore = txn.objectStore(BY_SEQ_STORE); |
| var attStore = txn.objectStore(ATTACH_STORE); |
| var attAndSeqStore = txn.objectStore(ATTACH_AND_SEQ_STORE); |
| |
| // need to actually populate the table. this is the expensive part, |
| // so as an optimization, check first that this database even |
| // contains attachments |
| var req = attStore.count(); |
| req.onsuccess = function (e) { |
| var count = e.target.result; |
| if (!count) { |
| return callback(); // done |
| } |
| |
| seqStore.openCursor().onsuccess = function (e) { |
| var cursor = e.target.result; |
| if (!cursor) { |
| return callback(); // done |
| } |
| var doc = cursor.value; |
| var seq = cursor.primaryKey; |
| var atts = Object.keys(doc._attachments || {}); |
| var digestMap = {}; |
| for (var j = 0; j < atts.length; j++) { |
| var att = doc._attachments[atts[j]]; |
| digestMap[att.digest] = true; // uniq digests, just in case |
| } |
| var digests = Object.keys(digestMap); |
| for (j = 0; j < digests.length; j++) { |
| var digest = digests[j]; |
| attAndSeqStore.put({ |
| seq: seq, |
| digestSeq: digest + '::' + seq |
| }); |
| } |
| cursor.continue(); |
| }; |
| }; |
| } |
| |
| // migration to version 5 |
| // Instead of relying on on-the-fly migration of metadata, |
| // this brings the doc-store to its modern form: |
| // - metadata.winningrev |
| // - metadata.seq |
| // - stringify the metadata when storing it |
| function migrateMetadata(txn) { |
| |
| function decodeMetadataCompat(storedObject) { |
| if (!storedObject.data) { |
| // old format, when we didn't store it stringified |
| storedObject.deleted = storedObject.deletedOrLocal === '1'; |
| return storedObject; |
| } |
| return decodeMetadata(storedObject); |
| } |
| |
| // ensure that every metadata has a winningRev and seq, |
| // which was previously created on-the-fly but better to migrate |
| var bySeqStore = txn.objectStore(BY_SEQ_STORE); |
| var docStore = txn.objectStore(DOC_STORE); |
| var cursor = docStore.openCursor(); |
| cursor.onsuccess = function (e) { |
| var cursor = e.target.result; |
| if (!cursor) { |
| return; // done |
| } |
| var metadata = decodeMetadataCompat(cursor.value); |
| |
| metadata.winningRev = metadata.winningRev || merge.winningRev(metadata); |
| |
| function fetchMetadataSeq() { |
| // metadata.seq was added post-3.2.0, so if it's missing, |
| // we need to fetch it manually |
| var start = metadata.id + '::'; |
| var end = metadata.id + '::\uffff'; |
| var req = bySeqStore.index('_doc_id_rev').openCursor( |
| IDBKeyRange.bound(start, end)); |
| |
| var metadataSeq = 0; |
| req.onsuccess = function (e) { |
| var cursor = e.target.result; |
| if (!cursor) { |
| metadata.seq = metadataSeq; |
| return onGetMetadataSeq(); |
| } |
| var seq = cursor.primaryKey; |
| if (seq > metadataSeq) { |
| metadataSeq = seq; |
| } |
| cursor.continue(); |
| }; |
| } |
| |
| function onGetMetadataSeq() { |
| var metadataToStore = encodeMetadata(metadata, |
| metadata.winningRev, metadata.deleted); |
| |
| var req = docStore.put(metadataToStore); |
| req.onsuccess = function () { |
| cursor.continue(); |
| }; |
| } |
| |
| if (metadata.seq) { |
| return onGetMetadataSeq(); |
| } |
| |
| fetchMetadataSeq(); |
| }; |
| |
| } |
| |
| api.type = function () { |
| return 'idb'; |
| }; |
| |
| api._id = utils.toPromise(function (callback) { |
| callback(null, api._meta.instanceId); |
| }); |
| |
| api._bulkDocs = function idb_bulkDocs(req, opts, callback) { |
| idbBulkDocs(req, opts, api, idb, IdbPouch.Changes, callback); |
| }; |
| |
| // First we look up the metadata in the ids database, then we fetch the |
| // current revision(s) from the by sequence store |
| api._get = function idb_get(id, opts, callback) { |
| var doc; |
| var metadata; |
| var err; |
| var txn; |
| opts = utils.clone(opts); |
| if (opts.ctx) { |
| txn = opts.ctx; |
| } else { |
| var txnResult = openTransactionSafely(idb, |
| [DOC_STORE, BY_SEQ_STORE, ATTACH_STORE], 'readonly'); |
| if (txnResult.error) { |
| return callback(txnResult.error); |
| } |
| txn = txnResult.txn; |
| } |
| |
| function finish() { |
| callback(err, {doc: doc, metadata: metadata, ctx: txn}); |
| } |
| |
| txn.objectStore(DOC_STORE).get(id).onsuccess = function (e) { |
| metadata = decodeMetadata(e.target.result); |
| // we can determine the result here if: |
| // 1. there is no such document |
| // 2. the document is deleted and we don't ask about specific rev |
| // When we ask with opts.rev we expect the answer to be either |
| // doc (possibly with _deleted=true) or missing error |
| if (!metadata) { |
| err = errors.error(errors.MISSING_DOC, 'missing'); |
| return finish(); |
| } |
| if (isDeleted(metadata) && !opts.rev) { |
| err = errors.error(errors.MISSING_DOC, "deleted"); |
| return finish(); |
| } |
| var objectStore = txn.objectStore(BY_SEQ_STORE); |
| |
| var rev = opts.rev || metadata.winningRev; |
| var key = metadata.id + '::' + rev; |
| |
| objectStore.index('_doc_id_rev').get(key).onsuccess = function (e) { |
| doc = e.target.result; |
| if (doc) { |
| doc = decodeDoc(doc); |
| } |
| if (!doc) { |
| err = errors.error(errors.MISSING_DOC, 'missing'); |
| return finish(); |
| } |
| finish(); |
| }; |
| }; |
| }; |
| |
| api._getAttachment = function (attachment, opts, callback) { |
| var txn; |
| opts = utils.clone(opts); |
| if (opts.ctx) { |
| txn = opts.ctx; |
| } else { |
| var txnResult = openTransactionSafely(idb, |
| [DOC_STORE, BY_SEQ_STORE, ATTACH_STORE], 'readonly'); |
| if (txnResult.error) { |
| return callback(txnResult.error); |
| } |
| txn = txnResult.txn; |
| } |
| var digest = attachment.digest; |
| var type = attachment.content_type; |
| |
| txn.objectStore(ATTACH_STORE).get(digest).onsuccess = function (e) { |
| var body = e.target.result.body; |
| readBlobData(body, type, opts.binary, function (blobData) { |
| callback(null, blobData); |
| }); |
| }; |
| }; |
| |
| api._info = function idb_info(callback) { |
| |
| if (idb === null || !cachedDBs[dbName]) { |
| var error = new Error('db isn\'t open'); |
| error.id = 'idbNull'; |
| return callback(error); |
| } |
| var updateSeq; |
| var docCount; |
| |
| var txnResult = openTransactionSafely(idb, [BY_SEQ_STORE], 'readonly'); |
| if (txnResult.error) { |
| return callback(txnResult.error); |
| } |
| var txn = txnResult.txn; |
| var cursor = txn.objectStore(BY_SEQ_STORE).openCursor(null, 'prev'); |
| cursor.onsuccess = function (event) { |
| var cursor = event.target.result; |
| updateSeq = cursor ? cursor.key : 0; |
| // count within the same txn for consistency |
| docCount = api._meta.docCount; |
| }; |
| |
| txn.oncomplete = function () { |
| callback(null, { |
| doc_count: docCount, |
| update_seq: updateSeq, |
| // for debugging |
| idb_attachment_format: (api._meta.blobSupport ? 'binary' : 'base64') |
| }); |
| }; |
| }; |
| |
| api._allDocs = function idb_allDocs(opts, callback) { |
| idbAllDocs(opts, api, idb, callback); |
| }; |
| |
| api._changes = function (opts) { |
| opts = utils.clone(opts); |
| |
| if (opts.continuous) { |
| var id = dbName + ':' + utils.uuid(); |
| IdbPouch.Changes.addListener(dbName, id, api, opts); |
| IdbPouch.Changes.notify(dbName); |
| return { |
| cancel: function () { |
| IdbPouch.Changes.removeListener(dbName, id); |
| } |
| }; |
| } |
| |
| var docIds = opts.doc_ids && new utils.Set(opts.doc_ids); |
| var descending = opts.descending ? 'prev' : null; |
| |
| opts.since = opts.since || 0; |
| var lastSeq = opts.since; |
| |
| var limit = 'limit' in opts ? opts.limit : -1; |
| if (limit === 0) { |
| limit = 1; // per CouchDB _changes spec |
| } |
| var returnDocs; |
| if ('returnDocs' in opts) { |
| returnDocs = opts.returnDocs; |
| } else { |
| returnDocs = true; |
| } |
| |
| var results = []; |
| var numResults = 0; |
| var filter = utils.filterChange(opts); |
| var docIdsToMetadata = new utils.Map(); |
| |
| var txn; |
| var bySeqStore; |
| var docStore; |
| |
| function onGetCursor(cursor) { |
| |
| var doc = decodeDoc(cursor.value); |
| var seq = cursor.key; |
| |
| if (docIds && !docIds.has(doc._id)) { |
| return cursor.continue(); |
| } |
| |
| var metadata; |
| |
| function onGetMetadata() { |
| if (metadata.seq !== seq) { |
| // some other seq is later |
| return cursor.continue(); |
| } |
| |
| lastSeq = seq; |
| |
| if (metadata.winningRev === doc._rev) { |
| return onGetWinningDoc(doc); |
| } |
| |
| fetchWinningDoc(); |
| } |
| |
| function fetchWinningDoc() { |
| var docIdRev = doc._id + '::' + metadata.winningRev; |
| var req = bySeqStore.index('_doc_id_rev').openCursor( |
| IDBKeyRange.bound(docIdRev, docIdRev + '\uffff')); |
| req.onsuccess = function (e) { |
| onGetWinningDoc(decodeDoc(e.target.result.value)); |
| }; |
| } |
| |
| function onGetWinningDoc(winningDoc) { |
| |
| var change = opts.processChange(winningDoc, metadata, opts); |
| change.seq = metadata.seq; |
| if (filter(change)) { |
| numResults++; |
| if (returnDocs) { |
| results.push(change); |
| } |
| // process the attachment immediately |
| // for the benefit of live listeners |
| if (opts.attachments && opts.include_docs) { |
| fetchAttachmentsIfNecessary(winningDoc, opts, txn, function () { |
| postProcessAttachments([change], opts.binary).then(function () { |
| opts.onChange(change); |
| }); |
| }); |
| } else { |
| opts.onChange(change); |
| } |
| } |
| if (numResults !== limit) { |
| cursor.continue(); |
| } |
| } |
| |
| metadata = docIdsToMetadata.get(doc._id); |
| if (metadata) { // cached |
| return onGetMetadata(); |
| } |
| // metadata not cached, have to go fetch it |
| docStore.get(doc._id).onsuccess = function (event) { |
| metadata = decodeMetadata(event.target.result); |
| docIdsToMetadata.set(doc._id, metadata); |
| onGetMetadata(); |
| }; |
| } |
| |
| function onsuccess(event) { |
| var cursor = event.target.result; |
| |
| if (!cursor) { |
| return; |
| } |
| onGetCursor(cursor); |
| } |
| |
| function fetchChanges() { |
| var objectStores = [DOC_STORE, BY_SEQ_STORE]; |
| if (opts.attachments) { |
| objectStores.push(ATTACH_STORE); |
| } |
| var txnResult = openTransactionSafely(idb, objectStores, 'readonly'); |
| if (txnResult.error) { |
| return opts.complete(txnResult.error); |
| } |
| txn = txnResult.txn; |
| txn.onerror = idbError(opts.complete); |
| txn.oncomplete = onTxnComplete; |
| |
| bySeqStore = txn.objectStore(BY_SEQ_STORE); |
| docStore = txn.objectStore(DOC_STORE); |
| |
| var req; |
| |
| if (descending) { |
| req = bySeqStore.openCursor( |
| |
| null, descending); |
| } else { |
| req = bySeqStore.openCursor( |
| IDBKeyRange.lowerBound(opts.since, true)); |
| } |
| |
| req.onsuccess = onsuccess; |
| } |
| |
| fetchChanges(); |
| |
| function onTxnComplete() { |
| |
| function finish() { |
| opts.complete(null, { |
| results: results, |
| last_seq: lastSeq |
| }); |
| } |
| |
| if (!opts.continuous && opts.attachments) { |
| // cannot guarantee that postProcessing was already done, |
| // so do it again |
| postProcessAttachments(results).then(finish); |
| } else { |
| finish(); |
| } |
| } |
| }; |
| |
| api._close = function (callback) { |
| if (idb === null) { |
| return callback(errors.error(errors.NOT_OPEN)); |
| } |
| |
| // https://developer.mozilla.org/en-US/docs/IndexedDB/IDBDatabase#close |
| // "Returns immediately and closes the connection in a separate thread..." |
| idb.close(); |
| delete cachedDBs[dbName]; |
| idb = null; |
| callback(); |
| }; |
| |
| api._getRevisionTree = function (docId, callback) { |
| var txnResult = openTransactionSafely(idb, [DOC_STORE], 'readonly'); |
| if (txnResult.error) { |
| return callback(txnResult.error); |
| } |
| var txn = txnResult.txn; |
| var req = txn.objectStore(DOC_STORE).get(docId); |
| req.onsuccess = function (event) { |
| var doc = decodeMetadata(event.target.result); |
| if (!doc) { |
| callback(errors.error(errors.MISSING_DOC)); |
| } else { |
| callback(null, doc.rev_tree); |
| } |
| }; |
| }; |
| |
| // This function removes revisions of document docId |
| // which are listed in revs and sets this document |
| // revision to to rev_tree |
| api._doCompaction = function (docId, revs, callback) { |
| var stores = [ |
| DOC_STORE, |
| BY_SEQ_STORE, |
| ATTACH_STORE, |
| ATTACH_AND_SEQ_STORE |
| ]; |
| var txnResult = openTransactionSafely(idb, stores, 'readwrite'); |
| if (txnResult.error) { |
| return callback(txnResult.error); |
| } |
| var txn = txnResult.txn; |
| |
| var docStore = txn.objectStore(DOC_STORE); |
| |
| docStore.get(docId).onsuccess = function (event) { |
| var metadata = decodeMetadata(event.target.result); |
| merge.traverseRevTree(metadata.rev_tree, function (isLeaf, pos, |
| revHash, ctx, opts) { |
| var rev = pos + '-' + revHash; |
| if (revs.indexOf(rev) !== -1) { |
| opts.status = 'missing'; |
| } |
| }); |
| compactRevs(revs, docId, txn); |
| var winningRev = metadata.winningRev; |
| var deleted = metadata.deleted; |
| txn.objectStore(DOC_STORE).put( |
| encodeMetadata(metadata, winningRev, deleted)); |
| }; |
| txn.onerror = idbError(callback); |
| txn.oncomplete = function () { |
| utils.call(callback); |
| }; |
| }; |
| |
| |
| api._getLocal = function (id, callback) { |
| var txnResult = openTransactionSafely(idb, [LOCAL_STORE], 'readonly'); |
| if (txnResult.error) { |
| return callback(txnResult.error); |
| } |
| var tx = txnResult.txn; |
| var req = tx.objectStore(LOCAL_STORE).get(id); |
| |
| req.onerror = idbError(callback); |
| req.onsuccess = function (e) { |
| var doc = e.target.result; |
| if (!doc) { |
| callback(errors.error(errors.MISSING_DOC)); |
| } else { |
| delete doc['_doc_id_rev']; // for backwards compat |
| callback(null, doc); |
| } |
| }; |
| }; |
| |
| api._putLocal = function (doc, opts, callback) { |
| if (typeof opts === 'function') { |
| callback = opts; |
| opts = {}; |
| } |
| delete doc._revisions; // ignore this, trust the rev |
| var oldRev = doc._rev; |
| var id = doc._id; |
| if (!oldRev) { |
| doc._rev = '0-1'; |
| } else { |
| doc._rev = '0-' + (parseInt(oldRev.split('-')[1], 10) + 1); |
| } |
| |
| var tx = opts.ctx; |
| var ret; |
| if (!tx) { |
| var txnResult = openTransactionSafely(idb, [LOCAL_STORE], 'readwrite'); |
| if (txnResult.error) { |
| return callback(txnResult.error); |
| } |
| tx = txnResult.txn; |
| tx.onerror = idbError(callback); |
| tx.oncomplete = function () { |
| if (ret) { |
| callback(null, ret); |
| } |
| }; |
| } |
| |
| var oStore = tx.objectStore(LOCAL_STORE); |
| var req; |
| if (oldRev) { |
| req = oStore.get(id); |
| req.onsuccess = function (e) { |
| var oldDoc = e.target.result; |
| if (!oldDoc || oldDoc._rev !== oldRev) { |
| callback(errors.error(errors.REV_CONFLICT)); |
| } else { // update |
| var req = oStore.put(doc); |
| req.onsuccess = function () { |
| ret = {ok: true, id: doc._id, rev: doc._rev}; |
| if (opts.ctx) { // return immediately |
| callback(null, ret); |
| } |
| }; |
| } |
| }; |
| } else { // new doc |
| req = oStore.add(doc); |
| req.onerror = function (e) { |
| // constraint error, already exists |
| callback(errors.error(errors.REV_CONFLICT)); |
| e.preventDefault(); // avoid transaction abort |
| e.stopPropagation(); // avoid transaction onerror |
| }; |
| req.onsuccess = function () { |
| ret = {ok: true, id: doc._id, rev: doc._rev}; |
| if (opts.ctx) { // return immediately |
| callback(null, ret); |
| } |
| }; |
| } |
| }; |
| |
| api._removeLocal = function (doc, callback) { |
| var txnResult = openTransactionSafely(idb, [LOCAL_STORE], 'readwrite'); |
| if (txnResult.error) { |
| return callback(txnResult.error); |
| } |
| var tx = txnResult.txn; |
| var ret; |
| tx.oncomplete = function () { |
| if (ret) { |
| callback(null, ret); |
| } |
| }; |
| var id = doc._id; |
| var oStore = tx.objectStore(LOCAL_STORE); |
| var req = oStore.get(id); |
| |
| req.onerror = idbError(callback); |
| req.onsuccess = function (e) { |
| var oldDoc = e.target.result; |
| if (!oldDoc || oldDoc._rev !== doc._rev) { |
| callback(errors.error(errors.MISSING_DOC)); |
| } else { |
| oStore.delete(id); |
| ret = {ok: true, id: id, rev: '0-0'}; |
| } |
| }; |
| }; |
| |
| api._destroy = function (callback) { |
| IdbPouch.Changes.removeAllListeners(dbName); |
| |
| //Close open request for "dbName" database to fix ie delay. |
| if (IdbPouch.openReqList[dbName] && IdbPouch.openReqList[dbName].result) { |
| IdbPouch.openReqList[dbName].result.close(); |
| delete cachedDBs[dbName]; |
| } |
| var req = indexedDB.deleteDatabase(dbName); |
| |
| req.onsuccess = function () { |
| //Remove open request from the list. |
| if (IdbPouch.openReqList[dbName]) { |
| IdbPouch.openReqList[dbName] = null; |
| } |
| if (hasLocalStorage() && (dbName in localStorage)) { |
| delete localStorage[dbName]; |
| } |
| callback(null, { 'ok': true }); |
| }; |
| |
| req.onerror = idbError(callback); |
| }; |
| |
| var cached = cachedDBs[dbName]; |
| |
| if (cached) { |
| idb = cached.idb; |
| api._meta = cached.global; |
| process.nextTick(function () { |
| callback(null, api); |
| }); |
| return; |
| } |
| |
| var req = indexedDB.open(dbName, ADAPTER_VERSION); |
| |
| if (!('openReqList' in IdbPouch)) { |
| IdbPouch.openReqList = {}; |
| } |
| IdbPouch.openReqList[dbName] = req; |
| |
| req.onupgradeneeded = function (e) { |
| var db = e.target.result; |
| if (e.oldVersion < 1) { |
| return createSchema(db); // new db, initial schema |
| } |
| // do migrations |
| |
| var txn = e.currentTarget.transaction; |
| // these migrations have to be done in this function, before |
| // control is returned to the event loop, because IndexedDB |
| |
| if (e.oldVersion < 3) { |
| createLocalStoreSchema(db); // v2 -> v3 |
| } |
| if (e.oldVersion < 4) { |
| addAttachAndSeqStore(db); // v3 -> v4 |
| } |
| |
| var migrations = [ |
| addDeletedOrLocalIndex, // v1 -> v2 |
| migrateLocalStore, // v2 -> v3 |
| migrateAttsAndSeqs, // v3 -> v4 |
| migrateMetadata // v4 -> v5 |
| ]; |
| |
| var i = e.oldVersion; |
| |
| function next() { |
| var migration = migrations[i - 1]; |
| i++; |
| if (migration) { |
| migration(txn, next); |
| } |
| } |
| |
| next(); |
| }; |
| |
| req.onsuccess = function (e) { |
| |
| idb = e.target.result; |
| |
| idb.onversionchange = function () { |
| idb.close(); |
| delete cachedDBs[dbName]; |
| }; |
| idb.onabort = function () { |
| idb.close(); |
| delete cachedDBs[dbName]; |
| }; |
| |
| var txn = idb.transaction([ |
| META_STORE, |
| DETECT_BLOB_SUPPORT_STORE, |
| DOC_STORE |
| ], 'readwrite'); |
| |
| var req = txn.objectStore(META_STORE).get(META_STORE); |
| |
| var blobSupport = null; |
| var docCount = null; |
| var instanceId = null; |
| |
| req.onsuccess = function (e) { |
| |
| var checkSetupComplete = function () { |
| if (blobSupport === null || docCount === null || |
| instanceId === null) { |
| return; |
| } else { |
| api._meta = { |
| name: dbName, |
| instanceId: instanceId, |
| blobSupport: blobSupport, |
| docCount: docCount |
| }; |
| |
| cachedDBs[dbName] = { |
| idb: idb, |
| global: api._meta |
| }; |
| callback(null, api); |
| } |
| }; |
| |
| // |
| // fetch/store the id |
| // |
| |
| var meta = e.target.result || {id: META_STORE}; |
| if (dbName + '_id' in meta) { |
| instanceId = meta[dbName + '_id']; |
| checkSetupComplete(); |
| } else { |
| instanceId = utils.uuid(); |
| meta[dbName + '_id'] = instanceId; |
| txn.objectStore(META_STORE).put(meta).onsuccess = function () { |
| checkSetupComplete(); |
| }; |
| } |
| |
| // |
| // check blob support |
| // |
| |
| if (!blobSupportPromise) { |
| // make sure blob support is only checked once |
| blobSupportPromise = checkBlobSupport(txn, idb); |
| } |
| |
| blobSupportPromise.then(function (val) { |
| blobSupport = val; |
| checkSetupComplete(); |
| }); |
| |
| // |
| // count docs |
| // |
| |
| var index = txn.objectStore(DOC_STORE).index('deletedOrLocal'); |
| index.count(IDBKeyRange.only('0')).onsuccess = function (e) { |
| docCount = e.target.result; |
| checkSetupComplete(); |
| }; |
| |
| }; |
| }; |
| |
| req.onerror = function(e) { |
| var msg = 'Failed to open indexedDB, are you in private browsing mode?'; |
| console.error(msg); |
| callback(errors.error(errors.IDB_ERROR, msg)); |
| }; |
| |
| } |
| |
| IdbPouch.valid = function () { |
| // Issue #2533, we finally gave up on doing bug |
| // detection instead of browser sniffing. Safari brought us |
| // to our knees. |
| var isSafari = typeof openDatabase !== 'undefined' && |
| /(Safari|iPhone|iPad|iPod)/.test(navigator.userAgent) && |
| !/Chrome/.test(navigator.userAgent) && |
| !/BlackBerry/.test(navigator.platform); |
| |
| // some outdated implementations of IDB that appear on Samsung |
| // and HTC Android devices <4.4 are missing IDBKeyRange |
| return !isSafari && typeof indexedDB !== 'undefined' && |
| typeof IDBKeyRange !== 'undefined'; |
| }; |
| |
| IdbPouch.Changes = new utils.Changes(); |
| |
| module.exports = IdbPouch; |