| import { createError, IDB_ERROR } from 'pouchdb-errors'; |
| import { collectConflicts } from 'pouchdb-merge'; |
| import { |
| ATTACH_STORE, |
| BY_SEQ_STORE, |
| DOC_STORE, |
| META_STORE |
| } from './constants'; |
| import { |
| decodeDoc, |
| decodeMetadata, |
| fetchAttachmentsIfNecessary, |
| postProcessAttachments, |
| openTransactionSafely, |
| idbError |
| } from './utils'; |
| import runBatchedCursor from './runBatchedCursor'; |
| import getAll from './getAll'; |
| |
| function createKeyRange(start, end, inclusiveEnd, key, descending) { |
| try { |
| if (start && end) { |
| if (descending) { |
| return IDBKeyRange.bound(end, start, !inclusiveEnd, false); |
| } else { |
| return IDBKeyRange.bound(start, end, false, !inclusiveEnd); |
| } |
| } else if (start) { |
| if (descending) { |
| return IDBKeyRange.upperBound(start); |
| } else { |
| return IDBKeyRange.lowerBound(start); |
| } |
| } else if (end) { |
| if (descending) { |
| return IDBKeyRange.lowerBound(end, !inclusiveEnd); |
| } else { |
| return IDBKeyRange.upperBound(end, !inclusiveEnd); |
| } |
| } else if (key) { |
| return IDBKeyRange.only(key); |
| } |
| } catch (e) { |
| return {error: e}; |
| } |
| return null; |
| } |
| |
| function idbAllDocs(opts, idb, callback) { |
| var start = 'startkey' in opts ? opts.startkey : false; |
| var end = 'endkey' in opts ? opts.endkey : false; |
| var key = 'key' in opts ? opts.key : false; |
| var skip = opts.skip || 0; |
| var limit = typeof opts.limit === 'number' ? opts.limit : -1; |
| var inclusiveEnd = opts.inclusive_end !== false; |
| |
| var keyRange = createKeyRange(start, end, inclusiveEnd, key, opts.descending); |
| var keyRangeError = keyRange && keyRange.error; |
| if (keyRangeError && !(keyRangeError.name === "DataError" && |
| keyRangeError.code === 0)) { |
| // DataError with error code 0 indicates start is less than end, so |
| // can just do an empty query. Else need to throw |
| return callback(createError(IDB_ERROR, |
| keyRangeError.name, keyRangeError.message)); |
| } |
| |
| var stores = [DOC_STORE, BY_SEQ_STORE, META_STORE]; |
| |
| if (opts.attachments) { |
| stores.push(ATTACH_STORE); |
| } |
| var txnResult = openTransactionSafely(idb, stores, 'readonly'); |
| if (txnResult.error) { |
| return callback(txnResult.error); |
| } |
| var txn = txnResult.txn; |
| txn.oncomplete = onTxnComplete; |
| txn.onabort = idbError(callback); |
| var docStore = txn.objectStore(DOC_STORE); |
| var seqStore = txn.objectStore(BY_SEQ_STORE); |
| var metaStore = txn.objectStore(META_STORE); |
| var docIdRevIndex = seqStore.index('_doc_id_rev'); |
| var results = []; |
| var docCount; |
| |
| metaStore.get(META_STORE).onsuccess = function (e) { |
| docCount = e.target.result.docCount; |
| }; |
| |
| // if the user specifies include_docs=true, then we don't |
| // want to block the main cursor while we're fetching the doc |
| function fetchDocAsynchronously(metadata, row, winningRev) { |
| var key = metadata.id + "::" + winningRev; |
| docIdRevIndex.get(key).onsuccess = function onGetDoc(e) { |
| row.doc = decodeDoc(e.target.result); |
| if (opts.conflicts) { |
| var conflicts = collectConflicts(metadata); |
| if (conflicts.length) { |
| row.doc._conflicts = conflicts; |
| } |
| } |
| fetchAttachmentsIfNecessary(row.doc, opts, txn); |
| }; |
| } |
| |
| function allDocsInner(winningRev, metadata) { |
| var row = { |
| id: metadata.id, |
| key: metadata.id, |
| value: { |
| rev: winningRev |
| } |
| }; |
| var deleted = metadata.deleted; |
| if (opts.deleted === 'ok') { |
| results.push(row); |
| // deleted docs are okay with "keys" requests |
| if (deleted) { |
| row.value.deleted = true; |
| row.doc = null; |
| } else if (opts.include_docs) { |
| fetchDocAsynchronously(metadata, row, winningRev); |
| } |
| } else if (!deleted && skip-- <= 0) { |
| results.push(row); |
| if (opts.include_docs) { |
| fetchDocAsynchronously(metadata, row, winningRev); |
| } |
| } |
| } |
| |
| function processBatch(batchValues) { |
| for (var i = 0, len = batchValues.length; i < len; i++) { |
| if (results.length === limit) { |
| break; |
| } |
| var batchValue = batchValues[i]; |
| var metadata = decodeMetadata(batchValue); |
| var winningRev = metadata.winningRev; |
| allDocsInner(winningRev, metadata); |
| } |
| } |
| |
| function onBatch(batchKeys, batchValues, cursor) { |
| if (!cursor) { |
| return; |
| } |
| processBatch(batchValues); |
| if (results.length < limit) { |
| cursor.continue(); |
| } |
| } |
| |
| function onGetAll(e) { |
| var values = e.target.result; |
| if (opts.descending) { |
| values = values.reverse(); |
| } |
| processBatch(values); |
| } |
| |
| function onResultsReady() { |
| callback(null, { |
| total_rows: docCount, |
| offset: opts.skip, |
| rows: results |
| }); |
| } |
| |
| function onTxnComplete() { |
| if (opts.attachments) { |
| postProcessAttachments(results, opts.binary).then(onResultsReady); |
| } else { |
| onResultsReady(); |
| } |
| } |
| |
| // don't bother doing any requests if start > end or limit === 0 |
| if (keyRangeError || limit === 0) { |
| return; |
| } |
| if (limit === -1) { // just fetch everything |
| return getAll(docStore, keyRange, onGetAll); |
| } |
| // else do a cursor |
| // choose a batch size based on the skip, since we'll need to skip that many |
| runBatchedCursor(docStore, keyRange, opts.descending, limit + skip, onBatch); |
| } |
| |
| export default idbAllDocs; |