| 'use strict'; |
| |
| var utils = require('../utils'); |
| var merge = require('../merge'); |
| var errors = require('../deps/errors'); |
| var vuvuzela = require('vuvuzela'); |
| var parseHexString = require('../deps/parse-hex'); |
| |
| function quote(str) { |
| return "'" + str + "'"; |
| } |
| |
| // escapeBlob and unescapeBlob are workarounds for a websql bug: |
| // https://code.google.com/p/chromium/issues/detail?id=422690 |
| // https://bugs.webkit.org/show_bug.cgi?id=137637 |
| // The goal is to never actually insert the \u0000 character |
| // in the database. |
| function escapeBlob(str) { |
| return str |
| .replace(/\u0002/g, '\u0002\u0002') |
| .replace(/\u0001/g, '\u0001\u0002') |
| .replace(/\u0000/g, '\u0001\u0001'); |
| } |
| |
| function unescapeBlob(str) { |
| return str |
| .replace(/\u0001\u0001/g, '\u0000') |
| .replace(/\u0001\u0002/g, '\u0001') |
| .replace(/\u0002\u0002/g, '\u0002'); |
| } |
| |
| var cachedDatabases = {}; |
| |
| var openDB = utils.getArguments(function (args) { |
| if (typeof global !== 'undefined') { |
| if (global.navigator && global.navigator.sqlitePlugin && |
| global.navigator.sqlitePlugin.openDatabase) { |
| return navigator.sqlitePlugin.openDatabase |
| .apply(navigator.sqlitePlugin, args); |
| } else if (global.sqlitePlugin && global.sqlitePlugin.openDatabase) { |
| return global.sqlitePlugin.openDatabase |
| .apply(global.sqlitePlugin, args); |
| } else { |
| var db = cachedDatabases[args[0]]; |
| if (!db) { |
| db = cachedDatabases[args[0]] = |
| global.openDatabase.apply(global, args); |
| } |
| return db; |
| } |
| } |
| }); |
| |
| var POUCH_VERSION = 1; |
| var ADAPTER_VERSION = 6; // used to manage migrations |
| |
| // The object stores created for each database |
| // DOC_STORE stores the document meta data, its revision history and state |
| var DOC_STORE = quote('document-store'); |
| // BY_SEQ_STORE stores a particular version of a document, keyed by its |
| // sequence id |
| var BY_SEQ_STORE = quote('by-sequence'); |
| // Where we store attachments |
| var ATTACH_STORE = quote('attach-store'); |
| var LOCAL_STORE = quote('local-store'); |
| var META_STORE = quote('metadata-store'); |
| // where we store many-to-many relations between attachment |
| // digests and seqs |
| var ATTACH_AND_SEQ_STORE = quote('attach-seq-store'); |
| |
| // these indexes cover the ground for most allDocs queries |
| var BY_SEQ_STORE_DELETED_INDEX_SQL = |
| 'CREATE INDEX IF NOT EXISTS \'by-seq-deleted-idx\' ON ' + |
| BY_SEQ_STORE + ' (seq, deleted)'; |
| var BY_SEQ_STORE_DOC_ID_REV_INDEX_SQL = |
| 'CREATE UNIQUE INDEX IF NOT EXISTS \'by-seq-doc-id-rev\' ON ' + |
| BY_SEQ_STORE + ' (doc_id, rev)'; |
| var DOC_STORE_WINNINGSEQ_INDEX_SQL = |
| 'CREATE INDEX IF NOT EXISTS \'doc-winningseq-idx\' ON ' + |
| DOC_STORE + ' (winningseq)'; |
| var ATTACH_AND_SEQ_STORE_SEQ_INDEX_SQL = |
| 'CREATE INDEX IF NOT EXISTS \'attach-seq-seq-idx\' ON ' + |
| ATTACH_AND_SEQ_STORE + ' (seq)'; |
| var ATTACH_AND_SEQ_STORE_ATTACH_INDEX_SQL = |
| 'CREATE UNIQUE INDEX IF NOT EXISTS \'attach-seq-digest-idx\' ON ' + |
| ATTACH_AND_SEQ_STORE + ' (digest, seq)'; |
| |
| var DOC_STORE_AND_BY_SEQ_JOINER = BY_SEQ_STORE + |
| '.seq = ' + DOC_STORE + '.winningseq'; |
| |
| var SELECT_DOCS = BY_SEQ_STORE + '.seq AS seq, ' + |
| BY_SEQ_STORE + '.deleted AS deleted, ' + |
| BY_SEQ_STORE + '.json AS data, ' + |
| BY_SEQ_STORE + '.rev AS rev, ' + |
| DOC_STORE + '.json AS metadata'; |
| |
| function select(selector, table, joiner, where, orderBy) { |
| return 'SELECT ' + selector + ' FROM ' + |
| (typeof table === 'string' ? table : table.join(' JOIN ')) + |
| (joiner ? (' ON ' + joiner) : '') + |
| (where ? (' WHERE ' + |
| (typeof where === 'string' ? where : where.join(' AND '))) : '') + |
| (orderBy ? (' ORDER BY ' + orderBy) : ''); |
| } |
| |
| function unknownError(callback) { |
| return function (event) { |
| // event may actually be a SQLError object, so report is as such |
| var errorNameMatch = event && event.constructor.toString() |
| .match(/function ([^\(]+)/); |
| var errorName = (errorNameMatch && errorNameMatch[1]) || event.type; |
| var errorReason = event.target || event.message; |
| callback(errors.error(errors.WSQ_ERROR, errorReason, errorName)); |
| }; |
| } |
| |
| function stringifyDoc(doc) { |
| // don't bother storing the id/rev. it uses lots of space, |
| // in persistent map/reduce especially |
| delete doc._id; |
| delete doc._rev; |
| return JSON.stringify(doc); |
| } |
| |
| function unstringifyDoc(doc, id, rev) { |
| doc = JSON.parse(doc); |
| doc._id = id; |
| doc._rev = rev; |
| return doc; |
| } |
| |
| function getSize(opts) { |
| if ('size' in opts) { |
| // triggers immediate popup in iOS, fixes #2347 |
| // e.g. 5000001 asks for 5 MB, 10000001 asks for 10 MB, |
| return opts.size * 1000000; |
| } |
| // In iOS, doesn't matter as long as it's <= 5000000. |
| // Except that if you request too much, our tests fail |
| // because of the native "do you accept?" popup. |
| // In Android <=4.3, this value is actually used as an |
| // honest-to-god ceiling for data, so we need to |
| // set it to a decently high number. |
| var isAndroid = /Android/.test(window.navigator.userAgent); |
| return isAndroid ? 5000000 : 1; // in PhantomJS, if you use 0 it will crash |
| } |
| |
| function WebSqlPouch(opts, callback) { |
| var api = this; |
| var instanceId = null; |
| var name = opts.name; |
| var size = getSize(opts); |
| var idRequests = []; |
| var docCount = -1; // cache sqlite count(*) for performance |
| var encoding; |
| |
| var db = openDB(name, POUCH_VERSION, name, size); |
| if (!db) { |
| return callback(errors.UNKNOWN_ERROR); |
| } else if (typeof db.readTransaction !== 'function') { |
| // doesn't exist in sqlite plugin |
| db.readTransaction = db.transaction; |
| } |
| |
| function dbCreated() { |
| // note the db name in case the browser upgrades to idb |
| if (utils.hasLocalStorage()) { |
| global.localStorage['_pouch__websqldb_' + name] = true; |
| } |
| callback(null, api); |
| } |
| |
| // In this migration, we added the 'deleted' and 'local' columns to the |
| // by-seq and doc store tables. |
| // To preserve existing user data, we re-process all the existing JSON |
| // and add these values. |
| // Called migration2 because it corresponds to adapter version (db_version) #2 |
| function runMigration2(tx, callback) { |
| // index used for the join in the allDocs query |
| tx.executeSql(DOC_STORE_WINNINGSEQ_INDEX_SQL); |
| |
| tx.executeSql('ALTER TABLE ' + BY_SEQ_STORE + |
| ' ADD COLUMN deleted TINYINT(1) DEFAULT 0', [], function () { |
| tx.executeSql(BY_SEQ_STORE_DELETED_INDEX_SQL); |
| tx.executeSql('ALTER TABLE ' + DOC_STORE + |
| ' ADD COLUMN local TINYINT(1) DEFAULT 0', [], function () { |
| tx.executeSql('CREATE INDEX IF NOT EXISTS \'doc-store-local-idx\' ON ' + |
| DOC_STORE + ' (local, id)'); |
| |
| var sql = 'SELECT ' + DOC_STORE + '.winningseq AS seq, ' + DOC_STORE + |
| '.json AS metadata FROM ' + BY_SEQ_STORE + ' JOIN ' + DOC_STORE + |
| ' ON ' + BY_SEQ_STORE + '.seq = ' + DOC_STORE + '.winningseq'; |
| |
| tx.executeSql(sql, [], function (tx, result) { |
| |
| var deleted = []; |
| var local = []; |
| |
| for (var i = 0; i < result.rows.length; i++) { |
| var item = result.rows.item(i); |
| var seq = item.seq; |
| var metadata = JSON.parse(item.metadata); |
| if (utils.isDeleted(metadata)) { |
| deleted.push(seq); |
| } |
| if (utils.isLocalId(metadata.id)) { |
| local.push(metadata.id); |
| } |
| } |
| tx.executeSql('UPDATE ' + DOC_STORE + 'SET local = 1 WHERE id IN (' + |
| local.map(function () { |
| return '?'; |
| }).join(',') + ')', local, function () { |
| tx.executeSql('UPDATE ' + BY_SEQ_STORE + |
| ' SET deleted = 1 WHERE seq IN (' + deleted.map(function () { |
| return '?'; |
| }).join(',') + ')', deleted, callback); |
| }); |
| }); |
| }); |
| }); |
| } |
| |
| // in this migration, we make all the local docs unversioned |
| function runMigration3(tx, callback) { |
| var local = 'CREATE TABLE IF NOT EXISTS ' + LOCAL_STORE + |
| ' (id UNIQUE, rev, json)'; |
| tx.executeSql(local, [], function () { |
| var sql = 'SELECT ' + DOC_STORE + '.id AS id, ' + |
| BY_SEQ_STORE + '.json AS data ' + |
| 'FROM ' + BY_SEQ_STORE + ' JOIN ' + |
| DOC_STORE + ' ON ' + BY_SEQ_STORE + '.seq = ' + |
| DOC_STORE + '.winningseq WHERE local = 1'; |
| tx.executeSql(sql, [], function (tx, res) { |
| var rows = []; |
| for (var i = 0; i < res.rows.length; i++) { |
| rows.push(res.rows.item(i)); |
| } |
| function doNext() { |
| if (!rows.length) { |
| return callback(tx); |
| } |
| var row = rows.shift(); |
| var rev = JSON.parse(row.data)._rev; |
| tx.executeSql('INSERT INTO ' + LOCAL_STORE + |
| ' (id, rev, json) VALUES (?,?,?)', |
| [row.id, rev, row.data], function (tx) { |
| tx.executeSql('DELETE FROM ' + DOC_STORE + ' WHERE id=?', |
| [row.id], function (tx) { |
| tx.executeSql('DELETE FROM ' + BY_SEQ_STORE + ' WHERE seq=?', |
| [row.seq], function () { |
| doNext(); |
| }); |
| }); |
| }); |
| } |
| doNext(); |
| }); |
| }); |
| } |
| |
| // in this migration, we remove doc_id_rev and just use rev |
| function runMigration4(tx, callback) { |
| |
| function updateRows(rows) { |
| function doNext() { |
| if (!rows.length) { |
| return callback(tx); |
| } |
| var row = rows.shift(); |
| var doc_id_rev = parseHexString(row.hex, encoding); |
| var idx = doc_id_rev.lastIndexOf('::'); |
| var doc_id = doc_id_rev.substring(0, idx); |
| var rev = doc_id_rev.substring(idx + 2); |
| var sql = 'UPDATE ' + BY_SEQ_STORE + |
| ' SET doc_id=?, rev=? WHERE doc_id_rev=?'; |
| tx.executeSql(sql, [doc_id, rev, doc_id_rev], function () { |
| doNext(); |
| }); |
| } |
| doNext(); |
| } |
| |
| var sql = 'ALTER TABLE ' + BY_SEQ_STORE + ' ADD COLUMN doc_id'; |
| tx.executeSql(sql, [], function (tx) { |
| var sql = 'ALTER TABLE ' + BY_SEQ_STORE + ' ADD COLUMN rev'; |
| tx.executeSql(sql, [], function (tx) { |
| tx.executeSql(BY_SEQ_STORE_DOC_ID_REV_INDEX_SQL, [], function (tx) { |
| var sql = 'SELECT hex(doc_id_rev) as hex FROM ' + BY_SEQ_STORE; |
| tx.executeSql(sql, [], function (tx, res) { |
| var rows = []; |
| for (var i = 0; i < res.rows.length; i++) { |
| rows.push(res.rows.item(i)); |
| } |
| updateRows(rows); |
| }); |
| }); |
| }); |
| }); |
| } |
| |
| // in this migration, we add the attach_and_seq table |
| // for issue #2818 |
| function runMigration5(tx, callback) { |
| |
| function migrateAttsAndSeqs(tx) { |
| // need to actually populate the table. this is the expensive part, |
| // so as an optimization, check first that this database even |
| // contains attachments |
| var sql = 'SELECT COUNT(*) AS cnt FROM ' + ATTACH_STORE; |
| tx.executeSql(sql, [], function (tx, res) { |
| var count = res.rows.item(0).cnt; |
| if (!count) { |
| return callback(tx); |
| } |
| |
| var offset = 0; |
| var pageSize = 10; |
| function nextPage() { |
| var sql = select( |
| SELECT_DOCS + ', ' + DOC_STORE + '.id AS id', |
| [DOC_STORE, BY_SEQ_STORE], |
| DOC_STORE_AND_BY_SEQ_JOINER, |
| null, |
| DOC_STORE + '.id ' |
| ); |
| sql += ' LIMIT ' + pageSize + ' OFFSET ' + offset; |
| offset += pageSize; |
| tx.executeSql(sql, [], function (tx, res) { |
| if (!res.rows.length) { |
| return callback(tx); |
| } |
| var digestSeqs = {}; |
| function addDigestSeq(digest, seq) { |
| // uniq digest/seq pairs, just in case there are dups |
| var seqs = digestSeqs[digest] = (digestSeqs[digest] || []); |
| if (seqs.indexOf(seq) === -1) { |
| seqs.push(seq); |
| } |
| } |
| for (var i = 0; i < res.rows.length; i++) { |
| var row = res.rows.item(i); |
| var doc = unstringifyDoc(row.data, row.id, row.rev); |
| var atts = Object.keys(doc._attachments || {}); |
| for (var j = 0; j < atts.length; j++) { |
| var att = doc._attachments[atts[j]]; |
| addDigestSeq(att.digest, row.seq); |
| } |
| } |
| var digestSeqPairs = []; |
| Object.keys(digestSeqs).forEach(function (digest) { |
| var seqs = digestSeqs[digest]; |
| seqs.forEach(function (seq) { |
| digestSeqPairs.push([digest, seq]); |
| }); |
| }); |
| if (!digestSeqPairs.length) { |
| return nextPage(); |
| } |
| var numDone = 0; |
| digestSeqPairs.forEach(function (pair) { |
| var sql = 'INSERT INTO ' + ATTACH_AND_SEQ_STORE + |
| ' (digest, seq) VALUES (?,?)'; |
| tx.executeSql(sql, pair, function () { |
| if (++numDone === digestSeqPairs.length) { |
| nextPage(); |
| } |
| }); |
| }); |
| }); |
| } |
| nextPage(); |
| }); |
| } |
| |
| var attachAndRev = 'CREATE TABLE IF NOT EXISTS ' + |
| ATTACH_AND_SEQ_STORE + ' (digest, seq INTEGER)'; |
| tx.executeSql(attachAndRev, [], function (tx) { |
| tx.executeSql( |
| ATTACH_AND_SEQ_STORE_ATTACH_INDEX_SQL, [], function (tx) { |
| tx.executeSql( |
| ATTACH_AND_SEQ_STORE_SEQ_INDEX_SQL, [], |
| migrateAttsAndSeqs); |
| }); |
| }); |
| } |
| |
| // in this migration, we use escapeBlob() and unescapeBlob() |
| // instead of reading out the binary as HEX, which is slow |
| function runMigration6(tx, callback) { |
| var sql = 'ALTER TABLE ' + ATTACH_STORE + |
| ' ADD COLUMN escaped TINYINT(1) DEFAULT 0'; |
| tx.executeSql(sql, [], callback); |
| } |
| |
| function checkEncoding(tx, cb) { |
| // UTF-8 on chrome/android, UTF-16 on safari < 7.1 |
| tx.executeSql('SELECT HEX("a") AS hex', [], function (tx, res) { |
| var hex = res.rows.item(0).hex; |
| encoding = hex.length === 2 ? 'UTF-8' : 'UTF-16'; |
| cb(); |
| } |
| ); |
| } |
| |
| function onGetInstanceId() { |
| while (idRequests.length > 0) { |
| var idCallback = idRequests.pop(); |
| idCallback(null, instanceId); |
| } |
| } |
| |
| function onGetVersion(tx, dbVersion) { |
| if (dbVersion === 0) { |
| // initial schema |
| |
| var meta = 'CREATE TABLE IF NOT EXISTS ' + META_STORE + |
| ' (dbid, db_version INTEGER)'; |
| var attach = 'CREATE TABLE IF NOT EXISTS ' + ATTACH_STORE + |
| ' (digest UNIQUE, escaped TINYINT(1), body BLOB)'; |
| var attachAndRev = 'CREATE TABLE IF NOT EXISTS ' + |
| ATTACH_AND_SEQ_STORE + ' (digest, seq INTEGER)'; |
| var doc = 'CREATE TABLE IF NOT EXISTS ' + DOC_STORE + |
| ' (id unique, json, winningseq)'; |
| var seq = 'CREATE TABLE IF NOT EXISTS ' + BY_SEQ_STORE + |
| ' (seq INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT, ' + |
| 'json, deleted TINYINT(1), doc_id, rev)'; |
| var local = 'CREATE TABLE IF NOT EXISTS ' + LOCAL_STORE + |
| ' (id UNIQUE, rev, json)'; |
| |
| // creates |
| tx.executeSql(attach); |
| tx.executeSql(local); |
| tx.executeSql(attachAndRev, [], function () { |
| tx.executeSql(ATTACH_AND_SEQ_STORE_SEQ_INDEX_SQL); |
| tx.executeSql(ATTACH_AND_SEQ_STORE_ATTACH_INDEX_SQL); |
| }); |
| tx.executeSql(doc, [], function () { |
| tx.executeSql(DOC_STORE_WINNINGSEQ_INDEX_SQL); |
| tx.executeSql(seq, [], function () { |
| tx.executeSql(BY_SEQ_STORE_DELETED_INDEX_SQL); |
| tx.executeSql(BY_SEQ_STORE_DOC_ID_REV_INDEX_SQL); |
| tx.executeSql(meta, [], function () { |
| // mark the db version, and new dbid |
| var initSeq = 'INSERT INTO ' + META_STORE + |
| ' (db_version, dbid) VALUES (?,?)'; |
| instanceId = utils.uuid(); |
| var initSeqArgs = [ADAPTER_VERSION, instanceId]; |
| tx.executeSql(initSeq, initSeqArgs, function (tx) { |
| onGetInstanceId(); |
| }); |
| }); |
| }); |
| }); |
| } else { // version > 0 |
| |
| var setupDone = function () { |
| var migrated = dbVersion < ADAPTER_VERSION; |
| if (migrated) { |
| // update the db version within this transaction |
| tx.executeSql('UPDATE ' + META_STORE + ' SET db_version = ' + |
| ADAPTER_VERSION); |
| } |
| // notify db.id() callers |
| var sql = 'SELECT dbid FROM ' + META_STORE; |
| tx.executeSql(sql, [], function (tx, result) { |
| instanceId = result.rows.item(0).dbid; |
| onGetInstanceId(); |
| }); |
| }; |
| |
| // would love to use promises here, but then websql |
| // ends the transaction early |
| var tasks = [ |
| runMigration2, |
| runMigration3, |
| runMigration4, |
| runMigration5, |
| runMigration6, |
| setupDone |
| ]; |
| |
| // run each migration sequentially |
| var i = dbVersion; |
| var nextMigration = function (tx) { |
| tasks[i - 1](tx, nextMigration); |
| i++; |
| }; |
| nextMigration(tx); |
| } |
| } |
| |
| function setup() { |
| db.transaction(function (tx) { |
| // first check the encoding |
| checkEncoding(tx, function () { |
| // then get the version |
| fetchVersion(tx); |
| }); |
| }, unknownError(callback), dbCreated); |
| } |
| |
| function fetchVersion(tx) { |
| var sql = 'SELECT sql FROM sqlite_master WHERE tbl_name = ' + META_STORE; |
| tx.executeSql(sql, [], function (tx, result) { |
| if (!result.rows.length) { |
| // database hasn't even been created yet (version 0) |
| onGetVersion(tx, 0); |
| } else if (!/db_version/.test(result.rows.item(0).sql)) { |
| // table was created, but without the new db_version column, |
| // so add it. |
| tx.executeSql('ALTER TABLE ' + META_STORE + |
| ' ADD COLUMN db_version INTEGER', [], function () { |
| // before version 2, this column didn't even exist |
| onGetVersion(tx, 1); |
| }); |
| } else { // column exists, we can safely get it |
| tx.executeSql('SELECT db_version FROM ' + META_STORE, |
| [], function (tx, result) { |
| var dbVersion = result.rows.item(0).db_version; |
| onGetVersion(tx, dbVersion); |
| }); |
| } |
| }); |
| } |
| |
| if (utils.isCordova() && typeof global !== 'undefined') { |
| //to wait until custom api is made in pouch.adapters before doing setup |
| global.addEventListener(name + '_pouch', function cordova_init() { |
| global.removeEventListener(name + '_pouch', cordova_init, false); |
| setup(); |
| }, false); |
| } else { |
| setup(); |
| } |
| |
| api.type = function () { |
| return 'websql'; |
| }; |
| |
| api._id = utils.toPromise(function (callback) { |
| callback(null, instanceId); |
| }); |
| |
| api._info = function (callback) { |
| db.readTransaction(function (tx) { |
| countDocs(tx, function (docCount) { |
| var sql = 'SELECT MAX(seq) AS seq FROM ' + BY_SEQ_STORE; |
| tx.executeSql(sql, [], function (tx, res) { |
| var updateSeq = res.rows.item(0).seq || 0; |
| callback(null, { |
| doc_count: docCount, |
| update_seq: updateSeq |
| }); |
| }); |
| }); |
| }, unknownError(callback)); |
| }; |
| |
| api._bulkDocs = function (req, opts, callback) { |
| |
| var newEdits = opts.new_edits; |
| var userDocs = req.docs; |
| |
| // Parse the docs, give them a sequence number for the result |
| var docInfos = userDocs.map(function (doc, i) { |
| if (doc._id && utils.isLocalId(doc._id)) { |
| return doc; |
| } |
| var newDoc = utils.parseDoc(doc, newEdits); |
| return newDoc; |
| }); |
| |
| var docInfoErrors = docInfos.filter(function (docInfo) { |
| return docInfo.error; |
| }); |
| if (docInfoErrors.length) { |
| return callback(docInfoErrors[0]); |
| } |
| |
| var tx; |
| var results = new Array(docInfos.length); |
| var fetchedDocs = new utils.Map(); |
| |
| var preconditionErrored; |
| function complete() { |
| if (preconditionErrored) { |
| return callback(preconditionErrored); |
| } |
| var aresults = results.map(function (result) { |
| if (!Object.keys(result).length) { |
| return { |
| ok: true |
| }; |
| } |
| if (result.error) { |
| return result; |
| } |
| |
| var metadata = result.metadata; |
| var rev = merge.winningRev(metadata); |
| |
| return { |
| ok: true, |
| id: metadata.id, |
| rev: rev |
| }; |
| }); |
| WebSqlPouch.Changes.notify(name); |
| docCount = -1; // invalidate |
| callback(null, aresults); |
| } |
| |
| function verifyAttachment(digest, callback) { |
| var sql = 'SELECT count(*) as cnt FROM ' + ATTACH_STORE + |
| ' WHERE digest=?'; |
| tx.executeSql(sql, [digest], function (tx, result) { |
| if (result.rows.item(0).cnt === 0) { |
| var err = new Error('unknown stub attachment with digest ' + digest); |
| err.status = 412; |
| callback(err); |
| } else { |
| callback(); |
| } |
| }); |
| } |
| |
| function verifyAttachments(finish) { |
| var digests = []; |
| docInfos.forEach(function (docInfo) { |
| if (docInfo.data && docInfo.data._attachments) { |
| Object.keys(docInfo.data._attachments).forEach(function (filename) { |
| var att = docInfo.data._attachments[filename]; |
| if (att.stub) { |
| digests.push(att.digest); |
| } |
| }); |
| } |
| }); |
| if (!digests.length) { |
| return finish(); |
| } |
| var numDone = 0; |
| var err; |
| |
| function checkDone() { |
| if (++numDone === digests.length) { |
| finish(err); |
| } |
| } |
| digests.forEach(function (digest) { |
| verifyAttachment(digest, function (attErr) { |
| if (attErr && !err) { |
| err = attErr; |
| } |
| checkDone(); |
| }); |
| }); |
| } |
| |
| |
| function writeDoc(docInfo, winningRev, deleted, callback, isUpdate, |
| resultsIdx) { |
| |
| function finish() { |
| var data = docInfo.data; |
| var deletedInt = deleted ? 1 : 0; |
| |
| var id = data._id; |
| var rev = data._rev; |
| var json = stringifyDoc(data); |
| var sql = 'INSERT INTO ' + BY_SEQ_STORE + |
| ' (doc_id, rev, json, deleted) VALUES (?, ?, ?, ?);'; |
| var sqlArgs = [id, rev, json, deletedInt]; |
| |
| // map seqs to attachment digests, which |
| // we will need later during compaction |
| function insertAttachmentMappings(seq, callback) { |
| var attsAdded = 0; |
| var attsToAdd = Object.keys(data._attachments || {}); |
| |
| if (!attsToAdd.length) { |
| return callback(); |
| } |
| function checkDone() { |
| if (++attsAdded === attsToAdd.length) { |
| callback(); |
| } |
| return false; // ack handling a constraint error |
| } |
| function add(att) { |
| var sql = 'INSERT INTO ' + ATTACH_AND_SEQ_STORE + |
| ' (digest, seq) VALUES (?,?)'; |
| var sqlArgs = [data._attachments[att].digest, seq]; |
| tx.executeSql(sql, sqlArgs, checkDone, checkDone); |
| // second callback is for a constaint error, which we ignore |
| // because this docid/rev has already been associated with |
| // the digest (e.g. when new_edits == false) |
| } |
| for (var i = 0; i < attsToAdd.length; i++) { |
| add(attsToAdd[i]); // do in parallel |
| } |
| } |
| |
| tx.executeSql(sql, sqlArgs, function (tx, result) { |
| var seq = result.insertId; |
| insertAttachmentMappings(seq, function () { |
| dataWritten(tx, seq); |
| }); |
| }, function () { |
| // constraint error, recover by updating instead (see #1638) |
| var fetchSql = select('seq', BY_SEQ_STORE, null, |
| 'doc_id=? AND rev=?'); |
| tx.executeSql(fetchSql, [id, rev], function (tx, res) { |
| var seq = res.rows.item(0).seq; |
| var sql = 'UPDATE ' + BY_SEQ_STORE + |
| ' SET json=?, deleted=? WHERE doc_id=? AND rev=?;'; |
| var sqlArgs = [json, deletedInt, id, rev]; |
| tx.executeSql(sql, sqlArgs, function (tx) { |
| insertAttachmentMappings(seq, function () { |
| dataWritten(tx, seq); |
| }); |
| }); |
| }); |
| return false; // ack that we've handled the error |
| }); |
| } |
| |
| function collectResults(attachmentErr) { |
| if (!err) { |
| if (attachmentErr) { |
| err = attachmentErr; |
| callback(err); |
| } else if (recv === attachments.length) { |
| finish(); |
| } |
| } |
| } |
| |
| var err = null; |
| var recv = 0; |
| |
| docInfo.data._id = docInfo.metadata.id; |
| docInfo.data._rev = docInfo.metadata.rev; |
| |
| if (deleted) { |
| docInfo.data._deleted = true; |
| } |
| |
| var attachments = docInfo.data._attachments ? |
| Object.keys(docInfo.data._attachments) : []; |
| |
| function attachmentSaved(err) { |
| recv++; |
| collectResults(err); |
| } |
| |
| for (var key in docInfo.data._attachments) { |
| if (!docInfo.data._attachments[key].stub) { |
| var data = docInfo.data._attachments[key].data; |
| delete docInfo.data._attachments[key].data; |
| var digest = docInfo.data._attachments[key].digest; |
| saveAttachment(digest, data, attachmentSaved); |
| } else { |
| recv++; |
| collectResults(); |
| } |
| } |
| |
| if (!attachments.length) { |
| finish(); |
| } |
| |
| function dataWritten(tx, seq) { |
| docInfo.metadata.seq = seq; |
| delete docInfo.metadata.rev; |
| |
| var sql = isUpdate ? |
| 'UPDATE ' + DOC_STORE + |
| ' SET json=?, winningseq=(SELECT seq FROM ' + BY_SEQ_STORE + |
| ' WHERE doc_id=' + DOC_STORE + '.id AND rev=?) WHERE id=?' |
| : 'INSERT INTO ' + DOC_STORE + |
| ' (id, winningseq, json) VALUES (?, ?, ?);'; |
| var metadataStr = vuvuzela.stringify(docInfo.metadata); |
| var id = docInfo.metadata.id; |
| var params = isUpdate ? |
| [metadataStr, winningRev, id] : |
| [id, seq, metadataStr]; |
| tx.executeSql(sql, params, function () { |
| results[resultsIdx] = docInfo; |
| fetchedDocs.set(id, docInfo.metadata); |
| callback(); |
| }); |
| } |
| } |
| |
| function processDocs() { |
| utils.processDocs(docInfos, api, fetchedDocs, |
| tx, results, writeDoc, opts); |
| } |
| |
| function fetchExistingDocs(callback) { |
| if (!docInfos.length) { |
| return callback(); |
| } |
| |
| var numFetched = 0; |
| |
| function checkDone() { |
| if (++numFetched === docInfos.length) { |
| callback(); |
| } |
| } |
| |
| docInfos.forEach(function (docInfo) { |
| if (docInfo._id && utils.isLocalId(docInfo._id)) { |
| return checkDone(); // skip local docs |
| } |
| var id = docInfo.metadata.id; |
| tx.executeSql('SELECT json FROM ' + DOC_STORE + |
| ' WHERE id = ?', [id], function (tx, result) { |
| if (result.rows.length) { |
| var metadata = vuvuzela.parse(result.rows.item(0).json); |
| fetchedDocs.set(id, metadata); |
| } |
| checkDone(); |
| }); |
| }); |
| } |
| |
| function saveAttachment(digest, data, callback) { |
| var sql = 'SELECT digest FROM ' + ATTACH_STORE + ' WHERE digest=?'; |
| tx.executeSql(sql, [digest], function (tx, result) { |
| if (result.rows.length) { // attachment already exists |
| return callback(); |
| } |
| // we could just insert before selecting and catch the error, |
| // but my hunch is that it's cheaper not to serialize the blob |
| // from JS to C if we don't have to (TODO: confirm this) |
| sql = 'INSERT INTO ' + ATTACH_STORE + |
| ' (digest, body, escaped) VALUES (?,?,1)'; |
| tx.executeSql(sql, [digest, escapeBlob(data)], function () { |
| callback(); |
| }, function () { |
| // ignore constaint errors, means it already exists |
| callback(); |
| return false; // ack we handled the error |
| }); |
| }); |
| } |
| |
| utils.preprocessAttachments(docInfos, 'binary', function (err) { |
| if (err) { |
| return callback(err); |
| } |
| db.transaction(function (txn) { |
| tx = txn; |
| verifyAttachments(function (err) { |
| if (err) { |
| preconditionErrored = err; |
| } else { |
| fetchExistingDocs(processDocs); |
| } |
| }); |
| }, unknownError(callback), complete); |
| }); |
| }; |
| |
| api._get = function (id, opts, callback) { |
| opts = utils.clone(opts); |
| var doc; |
| var metadata; |
| var err; |
| if (!opts.ctx) { |
| db.readTransaction(function (txn) { |
| opts.ctx = txn; |
| api._get(id, opts, callback); |
| }); |
| return; |
| } |
| var tx = opts.ctx; |
| |
| function finish() { |
| callback(err, {doc: doc, metadata: metadata, ctx: tx}); |
| } |
| |
| var sql; |
| var sqlArgs; |
| if (opts.rev) { |
| sql = select( |
| SELECT_DOCS, |
| [DOC_STORE, BY_SEQ_STORE], |
| DOC_STORE + '.id=' + BY_SEQ_STORE + '.doc_id', |
| [BY_SEQ_STORE + '.doc_id=?', BY_SEQ_STORE + '.rev=?']); |
| sqlArgs = [id, opts.rev]; |
| } else { |
| sql = select( |
| SELECT_DOCS, |
| [DOC_STORE, BY_SEQ_STORE], |
| DOC_STORE_AND_BY_SEQ_JOINER, |
| DOC_STORE + '.id=?'); |
| sqlArgs = [id]; |
| } |
| tx.executeSql(sql, sqlArgs, function (a, results) { |
| if (!results.rows.length) { |
| err = errors.MISSING_DOC; |
| return finish(); |
| } |
| var item = results.rows.item(0); |
| metadata = vuvuzela.parse(item.metadata); |
| if (item.deleted && !opts.rev) { |
| err = errors.error(errors.MISSING_DOC, 'deleted'); |
| return finish(); |
| } |
| doc = unstringifyDoc(item.data, metadata.id, item.rev); |
| finish(); |
| }); |
| }; |
| |
| function countDocs(tx, callback) { |
| |
| if (docCount !== -1) { |
| return callback(docCount); |
| } |
| |
| // count the total rows |
| var sql = select( |
| 'COUNT(' + DOC_STORE + '.id) AS \'num\'', |
| [DOC_STORE, BY_SEQ_STORE], |
| DOC_STORE_AND_BY_SEQ_JOINER, |
| BY_SEQ_STORE + '.deleted=0'); |
| |
| tx.executeSql(sql, [], function (tx, result) { |
| docCount = result.rows.item(0).num; |
| callback(docCount); |
| }); |
| } |
| |
| api._allDocs = function (opts, callback) { |
| var results = []; |
| var totalRows; |
| |
| var start = 'startkey' in opts ? opts.startkey : false; |
| var end = 'endkey' in opts ? opts.endkey : false; |
| var key = 'key' in opts ? opts.key : false; |
| var descending = 'descending' in opts ? opts.descending : false; |
| var limit = 'limit' in opts ? opts.limit : -1; |
| var offset = 'skip' in opts ? opts.skip : 0; |
| var inclusiveEnd = opts.inclusive_end !== false; |
| |
| var sqlArgs = []; |
| var criteria = []; |
| |
| if (key !== false) { |
| criteria.push(DOC_STORE + '.id = ?'); |
| sqlArgs.push(key); |
| } else if (start !== false || end !== false) { |
| if (start !== false) { |
| criteria.push(DOC_STORE + '.id ' + (descending ? '<=' : '>=') + ' ?'); |
| sqlArgs.push(start); |
| } |
| if (end !== false) { |
| var comparator = descending ? '>' : '<'; |
| if (inclusiveEnd) { |
| comparator += '='; |
| } |
| criteria.push(DOC_STORE + '.id ' + comparator + ' ?'); |
| sqlArgs.push(end); |
| } |
| if (key !== false) { |
| criteria.push(DOC_STORE + '.id = ?'); |
| sqlArgs.push(key); |
| } |
| } |
| |
| if (opts.deleted !== 'ok') { |
| // report deleted if keys are specified |
| criteria.push(BY_SEQ_STORE + '.deleted = 0'); |
| } |
| |
| db.readTransaction(function (tx) { |
| |
| // first count up the total rows |
| countDocs(tx, function (count) { |
| totalRows = count; |
| |
| if (limit === 0) { |
| return; |
| } |
| |
| // then actually fetch the documents |
| var sql = select( |
| SELECT_DOCS, |
| [DOC_STORE, BY_SEQ_STORE], |
| DOC_STORE_AND_BY_SEQ_JOINER, |
| criteria, |
| DOC_STORE + '.id ' + (descending ? 'DESC' : 'ASC') |
| ); |
| sql += ' LIMIT ' + limit + ' OFFSET ' + offset; |
| |
| function fetchAttachment(doc, att) { |
| var attObj = doc.doc._attachments[att]; |
| var attOpts = {encode: true, ctx: tx}; |
| api._getAttachment(attObj, attOpts, function (_, base64) { |
| doc.doc._attachments[att] = utils.extend( |
| utils.pick(attObj, ['digest', 'content_type']), |
| { data: base64 } |
| ); |
| }); |
| } |
| |
| tx.executeSql(sql, sqlArgs, function (tx, result) { |
| for (var i = 0, l = result.rows.length; i < l; i++) { |
| var item = result.rows.item(i); |
| var metadata = vuvuzela.parse(item.metadata); |
| var data = unstringifyDoc(item.data, metadata.id, item.rev); |
| var winningRev = data._rev; |
| var doc = { |
| id: metadata.id, |
| key: metadata.id, |
| value: {rev: winningRev} |
| }; |
| if (opts.include_docs) { |
| doc.doc = data; |
| doc.doc._rev = winningRev; |
| if (opts.conflicts) { |
| doc.doc._conflicts = merge.collectConflicts(metadata); |
| } |
| for (var att in doc.doc._attachments) { |
| if (doc.doc._attachments.hasOwnProperty(att)) { |
| if (opts.attachments) { |
| fetchAttachment(doc, att); |
| } else { |
| doc.doc._attachments[att].stub = true; |
| } |
| } |
| } |
| } |
| if (item.deleted) { |
| if (opts.deleted === 'ok') { |
| doc.value.deleted = true; |
| doc.doc = null; |
| } else { |
| continue; |
| } |
| } |
| results.push(doc); |
| } |
| }); |
| }); |
| }, unknownError(callback), function () { |
| callback(null, { |
| total_rows: totalRows, |
| offset: opts.skip, |
| rows: results |
| }); |
| }); |
| }; |
| |
| api._changes = function (opts) { |
| opts = utils.clone(opts); |
| |
| if (opts.continuous) { |
| var id = name + ':' + utils.uuid(); |
| WebSqlPouch.Changes.addListener(name, id, api, opts); |
| WebSqlPouch.Changes.notify(name); |
| return { |
| cancel: function () { |
| WebSqlPouch.Changes.removeListener(name, id); |
| } |
| }; |
| } |
| |
| var descending = opts.descending; |
| |
| // Ignore the `since` parameter when `descending` is true |
| opts.since = opts.since && !descending ? opts.since : 0; |
| |
| var limit = 'limit' in opts ? opts.limit : -1; |
| if (limit === 0) { |
| limit = 1; // per CouchDB _changes spec |
| } |
| |
| var returnDocs; |
| if ('returnDocs' in opts) { |
| returnDocs = opts.returnDocs; |
| } else { |
| returnDocs = true; |
| } |
| var results = []; |
| var numResults = 0; |
| function fetchChanges() { |
| |
| var criteria = [ |
| DOC_STORE + '.winningseq > ' + opts.since |
| ]; |
| var sqlArgs = []; |
| if (opts.doc_ids) { |
| criteria.push(DOC_STORE + '.id IN (' + opts.doc_ids.map(function () { |
| return '?'; |
| }).join(',') + ')'); |
| sqlArgs = opts.doc_ids; |
| } |
| |
| var sql = select(SELECT_DOCS, [DOC_STORE, BY_SEQ_STORE], |
| DOC_STORE_AND_BY_SEQ_JOINER, criteria, |
| DOC_STORE + '.winningseq ' + (descending ? 'DESC' : 'ASC')); |
| |
| var filter = utils.filterChange(opts); |
| if (!opts.view && !opts.filter) { |
| // we can just limit in the query |
| sql += ' LIMIT ' + limit; |
| } |
| |
| db.readTransaction(function (tx) { |
| tx.executeSql(sql, sqlArgs, function (tx, result) { |
| var lastSeq = 0; |
| for (var i = 0, l = result.rows.length; i < l; i++) { |
| var res = result.rows.item(i); |
| var metadata = vuvuzela.parse(res.metadata); |
| if (lastSeq < res.seq) { |
| lastSeq = res.seq; |
| } |
| var doc = unstringifyDoc(res.data, metadata.id, res.rev); |
| var change = opts.processChange(doc, metadata, opts); |
| change.seq = res.seq; |
| if (filter(change)) { |
| numResults++; |
| if (returnDocs) { |
| results.push(change); |
| } |
| opts.onChange(change); |
| } |
| if (numResults === limit) { |
| break; |
| } |
| } |
| if (!opts.continuous) { |
| opts.complete(null, { |
| results: results, |
| last_seq: lastSeq |
| }); |
| } |
| }); |
| }); |
| } |
| |
| fetchChanges(); |
| }; |
| |
| api._close = function (callback) { |
| //WebSQL databases do not need to be closed |
| callback(); |
| }; |
| |
| api._getAttachment = function (attachment, opts, callback) { |
| var res; |
| var tx = opts.ctx; |
| var digest = attachment.digest; |
| var type = attachment.content_type; |
| var sql = 'SELECT escaped, ' + |
| 'CASE WHEN escaped = 1 THEN body ELSE HEX(body) END AS body FROM ' + |
| ATTACH_STORE + ' WHERE digest=?'; |
| tx.executeSql(sql, [digest], function (tx, result) { |
| // websql has a bug where \u0000 causes early truncation in strings |
| // and blobs. to work around this, we used to use the hex() function, |
| // but that's not performant. after migration 6, we remove \u0000 |
| // and add it back in afterwards |
| var item = result.rows.item(0); |
| var data = item.escaped ? unescapeBlob(item.body) : |
| parseHexString(item.body, encoding); |
| if (opts.encode) { |
| res = btoa(data); |
| } else { |
| data = utils.fixBinary(data); |
| res = utils.createBlob([data], {type: type}); |
| } |
| callback(null, res); |
| }); |
| }; |
| |
| api._getRevisionTree = function (docId, callback) { |
| db.readTransaction(function (tx) { |
| var sql = 'SELECT json AS metadata FROM ' + DOC_STORE + ' WHERE id = ?'; |
| tx.executeSql(sql, [docId], function (tx, result) { |
| if (!result.rows.length) { |
| callback(errors.MISSING_DOC); |
| } else { |
| var data = vuvuzela.parse(result.rows.item(0).metadata); |
| callback(null, data.rev_tree); |
| } |
| }); |
| }); |
| }; |
| |
| api._doCompaction = function (docId, revs, callback) { |
| if (!revs.length) { |
| return callback(); |
| } |
| db.transaction(function (tx) { |
| |
| // update doc store |
| var sql = 'SELECT json AS metadata FROM ' + DOC_STORE + ' WHERE id = ?'; |
| tx.executeSql(sql, [docId], function (tx, result) { |
| var metadata = vuvuzela.parse(result.rows.item(0).metadata); |
| merge.traverseRevTree(metadata.rev_tree, function (isLeaf, pos, |
| revHash, ctx, opts) { |
| var rev = pos + '-' + revHash; |
| if (revs.indexOf(rev) !== -1) { |
| opts.status = 'missing'; |
| } |
| }); |
| |
| var sql = 'UPDATE ' + DOC_STORE + ' SET json = ? WHERE id = ?'; |
| tx.executeSql(sql, [vuvuzela.stringify(metadata), docId]); |
| }); |
| |
| // update by-seq and attach stores in parallel |
| revs.forEach(function (rev) { |
| var sql = 'SELECT seq FROM ' + BY_SEQ_STORE + |
| ' WHERE doc_id=? AND rev=?'; |
| tx.executeSql(sql, [docId, rev], function (tx, res) { |
| if (!res.rows.length) { |
| return; // already deleted |
| } |
| var seq = res.rows.item(0).seq; |
| |
| // find orphaned attachment digests |
| var sql = 'SELECT a1.digest AS digest ' + |
| 'FROM ' + ATTACH_AND_SEQ_STORE + ' a1 JOIN ' + |
| ATTACH_AND_SEQ_STORE + ' a2 ON a1.digest=a2.digest ' + |
| 'WHERE a1.seq=? ' + |
| 'GROUP BY a1.digest HAVING COUNT(*) = 1'; |
| tx.executeSql(sql, [seq], function (tx, res) { |
| var orphanedAttachments = []; |
| for (var i = 0; i < res.rows.length; i++) { |
| orphanedAttachments.push(res.rows.item(i).digest); |
| } |
| |
| tx.executeSql( |
| 'DELETE FROM ' + BY_SEQ_STORE + ' WHERE seq=?', [seq]); |
| tx.executeSql( |
| 'DELETE FROM ' + ATTACH_AND_SEQ_STORE + ' WHERE seq=?', [seq]); |
| orphanedAttachments.forEach(function (digest) { |
| tx.executeSql( |
| 'DELETE FROM ' + ATTACH_AND_SEQ_STORE + ' WHERE digest=?', |
| [digest]); |
| tx.executeSql( |
| 'DELETE FROM ' + ATTACH_STORE + ' WHERE digest=?', [digest]); |
| }); |
| }); |
| }); |
| }); |
| }, unknownError(callback), function () { |
| callback(); |
| }); |
| }; |
| |
| api._getLocal = function (id, callback) { |
| db.readTransaction(function (tx) { |
| var sql = 'SELECT json, rev FROM ' + LOCAL_STORE + ' WHERE id=?'; |
| tx.executeSql(sql, [id], function (tx, res) { |
| if (res.rows.length) { |
| var item = res.rows.item(0); |
| var doc = unstringifyDoc(item.json, id, item.rev); |
| callback(null, doc); |
| } else { |
| callback(errors.MISSING_DOC); |
| } |
| }); |
| }); |
| }; |
| |
| api._putLocal = function (doc, opts, callback) { |
| if (typeof opts === 'function') { |
| callback = opts; |
| opts = {}; |
| } |
| delete doc._revisions; // ignore this, trust the rev |
| var oldRev = doc._rev; |
| var id = doc._id; |
| var newRev; |
| if (!oldRev) { |
| newRev = doc._rev = '0-1'; |
| } else { |
| newRev = doc._rev = '0-' + (parseInt(oldRev.split('-')[1], 10) + 1); |
| } |
| var json = stringifyDoc(doc); |
| |
| var ret; |
| function putLocal(tx) { |
| var sql; |
| var values; |
| if (oldRev) { |
| sql = 'UPDATE ' + LOCAL_STORE + ' SET rev=?, json=? ' + |
| 'WHERE id=? AND rev=?'; |
| values = [newRev, json, id, oldRev]; |
| } else { |
| sql = 'INSERT INTO ' + LOCAL_STORE + ' (id, rev, json) VALUES (?,?,?)'; |
| values = [id, newRev, json]; |
| } |
| tx.executeSql(sql, values, function (tx, res) { |
| if (res.rowsAffected) { |
| ret = {ok: true, id: id, rev: newRev}; |
| if (opts.ctx) { // return immediately |
| callback(null, ret); |
| } |
| } else { |
| callback(errors.REV_CONFLICT); |
| } |
| }, function () { |
| callback(errors.REV_CONFLICT); |
| return false; // ack that we handled the error |
| }); |
| } |
| |
| if (opts.ctx) { |
| putLocal(opts.ctx); |
| } else { |
| db.transaction(function (tx) { |
| putLocal(tx); |
| }, unknownError(callback), function () { |
| if (ret) { |
| callback(null, ret); |
| } |
| }); |
| } |
| }; |
| |
| api._removeLocal = function (doc, callback) { |
| var ret; |
| db.transaction(function (tx) { |
| var sql = 'DELETE FROM ' + LOCAL_STORE + ' WHERE id=? AND rev=?'; |
| var params = [doc._id, doc._rev]; |
| tx.executeSql(sql, params, function (tx, res) { |
| if (!res.rowsAffected) { |
| return callback(errors.MISSING_DOC); |
| } |
| ret = {ok: true, id: doc._id, rev: '0-0'}; |
| }); |
| }, unknownError(callback), function () { |
| if (ret) { |
| callback(null, ret); |
| } |
| }); |
| }; |
| } |
| |
| WebSqlPouch.valid = function () { |
| if (typeof global !== 'undefined') { |
| if (global.navigator && |
| global.navigator.sqlitePlugin && |
| global.navigator.sqlitePlugin.openDatabase) { |
| return true; |
| } else if (global.sqlitePlugin && global.sqlitePlugin.openDatabase) { |
| return true; |
| } else if (global.openDatabase) { |
| return true; |
| } |
| } |
| return false; |
| }; |
| |
| WebSqlPouch.destroy = utils.toPromise(function (name, opts, callback) { |
| WebSqlPouch.Changes.removeAllListeners(name); |
| var size = getSize(opts); |
| var db = openDB(name, POUCH_VERSION, name, size); |
| db.transaction(function (tx) { |
| var stores = [DOC_STORE, BY_SEQ_STORE, ATTACH_STORE, META_STORE, |
| LOCAL_STORE, ATTACH_AND_SEQ_STORE]; |
| stores.forEach(function (store) { |
| tx.executeSql('DROP TABLE IF EXISTS ' + store, []); |
| }); |
| }, unknownError(callback), function () { |
| if (utils.hasLocalStorage()) { |
| delete global.localStorage['_pouch__websqldb_' + name]; |
| delete global.localStorage[name]; |
| } |
| callback(null, {'ok': true}); |
| }); |
| }); |
| |
| WebSqlPouch.Changes = new utils.Changes(); |
| |
| module.exports = WebSqlPouch; |