blob: fedae2c7721849a3cda362220eba371a0692272b [file] [log] [blame]
import { createError, IDB_ERROR } from 'pouchdb-errors';
import { collectConflicts } from 'pouchdb-merge';
import {
ATTACH_STORE,
BY_SEQ_STORE,
DOC_STORE,
META_STORE
} from './constants';
import {
decodeDoc,
decodeMetadata,
fetchAttachmentsIfNecessary,
postProcessAttachments,
openTransactionSafely,
idbError
} from './utils';
import runBatchedCursor from './runBatchedCursor';
import getAll from './getAll';
function allDocsKeys(keys, docStore, onBatch) {
// It's not guaranted to be returned in right order
var valuesBatch = new Array(keys.length);
var count = 0;
keys.forEach(function (key, index) {
docStore.get(key).onsuccess = function (event) {
if (event.target.result) {
valuesBatch[index] = event.target.result;
} else {
valuesBatch[index] = {key: key, error: 'not_found'};
}
count++;
if (count === keys.length) {
onBatch(keys, valuesBatch, {});
}
};
});
}
function createKeyRange(start, end, inclusiveEnd, key, descending) {
try {
if (start && end) {
if (descending) {
return IDBKeyRange.bound(end, start, !inclusiveEnd, false);
} else {
return IDBKeyRange.bound(start, end, false, !inclusiveEnd);
}
} else if (start) {
if (descending) {
return IDBKeyRange.upperBound(start);
} else {
return IDBKeyRange.lowerBound(start);
}
} else if (end) {
if (descending) {
return IDBKeyRange.lowerBound(end, !inclusiveEnd);
} else {
return IDBKeyRange.upperBound(end, !inclusiveEnd);
}
} else if (key) {
return IDBKeyRange.only(key);
}
} catch (e) {
return {error: e};
}
return null;
}
function idbAllDocs(opts, idb, callback) {
var start = 'startkey' in opts ? opts.startkey : false;
var end = 'endkey' in opts ? opts.endkey : false;
var key = 'key' in opts ? opts.key : false;
var keys = 'keys' in opts ? opts.keys : false;
var skip = opts.skip || 0;
var limit = typeof opts.limit === 'number' ? opts.limit : -1;
var inclusiveEnd = opts.inclusive_end !== false;
var keyRange ;
var keyRangeError;
if (!keys) {
keyRange = createKeyRange(start, end, inclusiveEnd, key, opts.descending);
keyRangeError = keyRange && keyRange.error;
if (keyRangeError &&
!(keyRangeError.name === "DataError" && keyRangeError.code === 0)) {
// DataError with error code 0 indicates start is less than end, so
// can just do an empty query. Else need to throw
return callback(createError(IDB_ERROR,
keyRangeError.name, keyRangeError.message));
}
}
var stores = [DOC_STORE, BY_SEQ_STORE, META_STORE];
if (opts.attachments) {
stores.push(ATTACH_STORE);
}
var txnResult = openTransactionSafely(idb, stores, 'readonly');
if (txnResult.error) {
return callback(txnResult.error);
}
var txn = txnResult.txn;
txn.oncomplete = onTxnComplete;
txn.onabort = idbError(callback);
var docStore = txn.objectStore(DOC_STORE);
var seqStore = txn.objectStore(BY_SEQ_STORE);
var metaStore = txn.objectStore(META_STORE);
var docIdRevIndex = seqStore.index('_doc_id_rev');
var results = [];
var docCount;
var updateSeq;
metaStore.get(META_STORE).onsuccess = function (e) {
docCount = e.target.result.docCount;
};
/* istanbul ignore if */
if (opts.update_seq) {
getMaxUpdateSeq(seqStore, function (e) {
if (e.target.result && e.target.result.length > 0) {
updateSeq = e.target.result[0];
}
});
}
function getMaxUpdateSeq(objectStore, onSuccess) {
function onCursor(e) {
var cursor = e.target.result;
var maxKey = undefined;
if (cursor && cursor.key) {
maxKey = cursor.key;
}
return onSuccess({
target: {
result: [maxKey]
}
});
}
objectStore.openCursor(null, 'prev').onsuccess = onCursor;
}
// if the user specifies include_docs=true, then we don't
// want to block the main cursor while we're fetching the doc
function fetchDocAsynchronously(metadata, row, winningRev) {
var key = metadata.id + "::" + winningRev;
docIdRevIndex.get(key).onsuccess = function onGetDoc(e) {
row.doc = decodeDoc(e.target.result) || {};
if (opts.conflicts) {
var conflicts = collectConflicts(metadata);
if (conflicts.length) {
row.doc._conflicts = conflicts;
}
}
fetchAttachmentsIfNecessary(row.doc, opts, txn);
};
}
function allDocsInner(winningRev, metadata) {
var row = {
id: metadata.id,
key: metadata.id,
value: {
rev: winningRev
}
};
var deleted = metadata.deleted;
if (deleted) {
if (keys) {
results.push(row);
// deleted docs are okay with "keys" requests
row.value.deleted = true;
row.doc = null;
}
} else if (skip-- <= 0) {
results.push(row);
if (opts.include_docs) {
fetchDocAsynchronously(metadata, row, winningRev);
}
}
}
function processBatch(batchValues) {
for (var i = 0, len = batchValues.length; i < len; i++) {
if (results.length === limit) {
break;
}
var batchValue = batchValues[i];
if (batchValue.error && keys) {
// key was not found with "keys" requests
results.push(batchValue);
continue;
}
var metadata = decodeMetadata(batchValue);
var winningRev = metadata.winningRev;
allDocsInner(winningRev, metadata);
}
}
function onBatch(batchKeys, batchValues, cursor) {
if (!cursor) {
return;
}
processBatch(batchValues);
if (results.length < limit) {
cursor.continue();
}
}
function onGetAll(e) {
var values = e.target.result;
if (opts.descending) {
values = values.reverse();
}
processBatch(values);
}
function onResultsReady() {
var returnVal = {
total_rows: docCount,
offset: opts.skip,
rows: results
};
/* istanbul ignore if */
if (opts.update_seq && updateSeq !== undefined) {
returnVal.update_seq = updateSeq;
}
callback(null, returnVal);
}
function onTxnComplete() {
if (opts.attachments) {
postProcessAttachments(results, opts.binary).then(onResultsReady);
} else {
onResultsReady();
}
}
// don't bother doing any requests if start > end or limit === 0
if (keyRangeError || limit === 0) {
return;
}
if (keys) {
return allDocsKeys(opts.keys, docStore, onBatch);
}
if (limit === -1) { // just fetch everything
return getAll(docStore, keyRange, onGetAll);
}
// else do a cursor
// choose a batch size based on the skip, since we'll need to skip that many
runBatchedCursor(docStore, keyRange, opts.descending, limit + skip, onBatch);
}
export default idbAllDocs;