| "use strict"; |
| |
| var CHANGES_BATCH_SIZE = 25; |
| |
| // according to http://stackoverflow.com/a/417184/680742, |
| // the de facto URL length limit is 2000 characters. |
| // but since most of our measurements don't take the full |
| // URL into account, we fudge it a bit. |
| // TODO: we could measure the full URL to enforce exactly 2000 chars |
| var MAX_URL_LENGTH = 1800; |
| |
| var binStringToBluffer = |
| require('../../deps/binary/binaryStringToBlobOrBuffer'); |
| var b64StringToBluffer = |
| require('../../deps/binary/base64StringToBlobOrBuffer'); |
| var utils = require('../../utils'); |
| var Promise = utils.Promise; |
| var clone = utils.clone; |
| var base64 = require('../../deps/binary/base64'); |
| var btoa = base64.btoa; |
| var atob = base64.atob; |
| var errors = require('../../deps/errors'); |
| var log = require('debug')('pouchdb:http'); |
| var createMultipart = require('../../deps/ajax/multipart'); |
| var blufferToBase64 = require('../../deps/binary/blobOrBufferToBase64'); |
| var parseDoc = require('../../deps/docs/parseDoc'); |
| |
| function readAttachmentsAsBlobOrBuffer(row) { |
| var atts = row.doc && row.doc._attachments; |
| if (!atts) { |
| return; |
| } |
| Object.keys(atts).forEach(function (filename) { |
| var att = atts[filename]; |
| att.data = b64StringToBluffer(att.data, att.content_type); |
| }); |
| } |
| |
| function encodeDocId(id) { |
| if (/^_design/.test(id)) { |
| return '_design/' + encodeURIComponent(id.slice(8)); |
| } |
| if (/^_local/.test(id)) { |
| return '_local/' + encodeURIComponent(id.slice(7)); |
| } |
| return encodeURIComponent(id); |
| } |
| |
| function preprocessAttachments(doc) { |
| if (!doc._attachments || !Object.keys(doc._attachments)) { |
| return Promise.resolve(); |
| } |
| |
| return Promise.all(Object.keys(doc._attachments).map(function (key) { |
| var attachment = doc._attachments[key]; |
| if (attachment.data && typeof attachment.data !== 'string') { |
| return blufferToBase64(attachment.data).then(function (b64) { |
| attachment.data = b64; |
| }); |
| } |
| })); |
| } |
| |
| // Get all the information you possibly can about the URI given by name and |
| // return it as a suitable object. |
| function getHost(name, opts) { |
| // Prase the URI into all its little bits |
| var uri = utils.parseUri(name); |
| |
| // Store the user and password as a separate auth object |
| if (uri.user || uri.password) { |
| uri.auth = {username: uri.user, password: uri.password}; |
| } |
| |
| // Split the path part of the URI into parts using '/' as the delimiter |
| // after removing any leading '/' and any trailing '/' |
| var parts = uri.path.replace(/(^\/|\/$)/g, '').split('/'); |
| |
| // Store the first part as the database name and remove it from the parts |
| // array |
| uri.db = parts.pop(); |
| |
| // Restore the path by joining all the remaining parts (all the parts |
| // except for the database name) with '/'s |
| uri.path = parts.join('/'); |
| opts = opts || {}; |
| opts = clone(opts); |
| uri.headers = opts.headers || (opts.ajax && opts.ajax.headers) || {}; |
| |
| if (opts.auth || uri.auth) { |
| var nAuth = opts.auth || uri.auth; |
| var token = btoa(nAuth.username + ':' + nAuth.password); |
| uri.headers.Authorization = 'Basic ' + token; |
| } |
| |
| if (opts.headers) { |
| uri.headers = opts.headers; |
| } |
| |
| return uri; |
| } |
| |
| // Generate a URL with the host data given by opts and the given path |
| function genDBUrl(opts, path) { |
| return genUrl(opts, opts.db + '/' + path); |
| } |
| |
| // Generate a URL with the host data given by opts and the given path |
| function genUrl(opts, path) { |
| // If the host already has a path, then we need to have a path delimiter |
| // Otherwise, the path delimiter is the empty string |
| var pathDel = !opts.path ? '' : '/'; |
| |
| // If the host already has a path, then we need to have a path delimiter |
| // Otherwise, the path delimiter is the empty string |
| return opts.protocol + '://' + opts.host + ':' + opts.port + '/' + |
| opts.path + pathDel + path; |
| } |
| |
| // Implements the PouchDB API for dealing with CouchDB instances over HTTP |
| function HttpPouch(opts, callback) { |
| // The functions that will be publicly available for HttpPouch |
| var api = this; |
| |
| // Parse the URI given by opts.name into an easy-to-use object |
| var getHostFun = getHost; |
| |
| // TODO: this seems to only be used by yarong for the Thali project. |
| // Verify whether or not it's still needed. |
| /* istanbul ignore:next */ |
| if (opts.getHost) { |
| getHostFun = opts.getHost; |
| } |
| var host = getHostFun(opts.name, opts); |
| |
| // Generate the database URL based on the host |
| var dbUrl = genDBUrl(host, ''); |
| |
| api.getUrl = function () {return dbUrl; }; |
| api.getHeaders = function () {return clone(host.headers); }; |
| |
| var ajaxOpts = opts.ajax || {}; |
| opts = clone(opts); |
| function ajax(options, callback) { |
| var reqOpts = utils.extend(true, clone(ajaxOpts), options); |
| log(reqOpts.method + ' ' + reqOpts.url); |
| return utils.ajax(reqOpts, callback); |
| } |
| |
| function ajaxPromise(opts) { |
| return new Promise(function (resolve, reject) { |
| ajax(opts, function (err, res) { |
| if (err) { |
| return reject(err); |
| } |
| resolve(res); |
| }); |
| }); |
| } |
| |
| // Create a new CouchDB database based on the given opts |
| var createDB = function () { |
| ajax({ |
| headers: clone(host.headers), |
| method: 'PUT', |
| url: dbUrl |
| }, function (err) { |
| // If we get an "Unauthorized" error |
| /* istanbul ignore else */ |
| if (err && err.status === 401) { |
| // Test if the database already exists |
| ajax({headers: clone(host.headers), method: 'HEAD', url: dbUrl}, |
| function (err) { |
| // If there is still an error |
| if (err) { |
| // Give the error to the callback to deal with |
| callback(err); |
| } else { |
| // Continue as if there had been no errors |
| callback(null, api); |
| } |
| }); |
| // If there were no errros or if the only error is "Precondition Failed" |
| // (note: "Precondition Failed" occurs when we try to create a database |
| // that already exists) |
| } else if (!err || err.status === 412) { |
| // Continue as if there had been no errors |
| callback(null, api); |
| } else { |
| callback(err); |
| } |
| }); |
| }; |
| |
| if (!opts.skipSetup) { |
| ajax({ |
| headers: clone(host.headers), |
| method: 'GET', |
| url: dbUrl |
| }, function (err) { |
| //check if the db exists |
| if (err) { |
| if (err.status === 404) { |
| utils.explain404( |
| 'PouchDB is just detecting if the remote DB exists.'); |
| //if it doesn't, create it |
| createDB(); |
| } else { |
| callback(err); |
| } |
| } else { |
| //go do stuff with the db |
| callback(null, api); |
| } |
| }); |
| } |
| |
| api.type = function () { |
| return 'http'; |
| }; |
| |
| api.id = utils.adapterFun('id', function (callback) { |
| ajax({ |
| headers: clone(host.headers), |
| method: 'GET', |
| url: genUrl(host, '') |
| }, function (err, result) { |
| /* istanbul ignore next */ |
| if (err) { |
| return callback(err); |
| } |
| var uuid = result.uuid + host.db; |
| callback(null, uuid); |
| }); |
| }); |
| |
| api.request = utils.adapterFun('request', function (options, callback) { |
| options.headers = host.headers; |
| options.url = genDBUrl(host, options.url); |
| ajax(options, callback); |
| }); |
| |
| // Sends a POST request to the host calling the couchdb _compact function |
| // version: The version of CouchDB it is running |
| api.compact = utils.adapterFun('compact', function (opts, callback) { |
| if (typeof opts === 'function') { |
| callback = opts; |
| opts = {}; |
| } |
| opts = clone(opts); |
| ajax({ |
| headers: clone(host.headers), |
| url: genDBUrl(host, '_compact'), |
| method: 'POST' |
| }, function () { |
| function ping() { |
| api.info(function (err, res) { |
| if (!res.compact_running) { |
| callback(null, {ok: true}); |
| } else { |
| setTimeout(ping, opts.interval || 200); |
| } |
| }); |
| } |
| // Ping the http if it's finished compaction |
| ping(); |
| }); |
| }); |
| |
| // Calls GET on the host, which gets back a JSON string containing |
| // couchdb: A welcome string |
| // version: The version of CouchDB it is running |
| api._info = function (callback) { |
| ajax({ |
| headers: clone(host.headers), |
| method: 'GET', |
| url: genDBUrl(host, '') |
| }, function (err, res) { |
| /* istanbul ignore next */ |
| if (err) { |
| return callback(err); |
| } |
| res.host = genDBUrl(host, ''); |
| callback(null, res); |
| }); |
| }; |
| |
| // Get the document with the given id from the database given by host. |
| // The id could be solely the _id in the database, or it may be a |
| // _design/ID or _local/ID path |
| api.get = utils.adapterFun('get', function (id, opts, callback) { |
| // If no options were given, set the callback to the second parameter |
| if (typeof opts === 'function') { |
| callback = opts; |
| opts = {}; |
| } |
| opts = clone(opts); |
| |
| // List of parameters to add to the GET request |
| var params = []; |
| |
| // If it exists, add the opts.revs value to the list of parameters. |
| // If revs=true then the resulting JSON will include a field |
| // _revisions containing an array of the revision IDs. |
| if (opts.revs) { |
| params.push('revs=true'); |
| } |
| |
| // If it exists, add the opts.revs_info value to the list of parameters. |
| // If revs_info=true then the resulting JSON will include the field |
| // _revs_info containing an array of objects in which each object |
| // representing an available revision. |
| if (opts.revs_info) { |
| params.push('revs_info=true'); |
| } |
| |
| // If it exists, add the opts.open_revs value to the list of parameters. |
| // If open_revs=all then the resulting JSON will include all the leaf |
| // revisions. If open_revs=["rev1", "rev2",...] then the resulting JSON |
| // will contain an array of objects containing data of all revisions |
| if (opts.open_revs) { |
| if (opts.open_revs !== "all") { |
| opts.open_revs = JSON.stringify(opts.open_revs); |
| } |
| params.push('open_revs=' + opts.open_revs); |
| } |
| |
| // If it exists, add the opts.rev value to the list of parameters. |
| // If rev is given a revision number then get the specified revision. |
| if (opts.rev) { |
| params.push('rev=' + opts.rev); |
| } |
| |
| // If it exists, add the opts.conflicts value to the list of parameters. |
| // If conflicts=true then the resulting JSON will include the field |
| // _conflicts containing all the conflicting revisions. |
| if (opts.conflicts) { |
| params.push('conflicts=' + opts.conflicts); |
| } |
| |
| // Format the list of parameters into a valid URI query string |
| params = params.join('&'); |
| params = params === '' ? '' : '?' + params; |
| |
| id = encodeDocId(id); |
| |
| // Set the options for the ajax call |
| var options = { |
| headers: clone(host.headers), |
| method: 'GET', |
| url: genDBUrl(host, id + params) |
| }; |
| var getRequestAjaxOpts = opts.ajax || {}; |
| utils.extend(true, options, getRequestAjaxOpts); |
| |
| function fetchAttachments(doc) { |
| var atts = doc._attachments; |
| var filenames = atts && Object.keys(atts); |
| if (!atts || !filenames.length) { |
| return; |
| } |
| // we fetch these manually in separate XHRs, because |
| // Sync Gateway would normally send it back as multipart/mixed, |
| // which we cannot parse. Also, this is more efficient than |
| // receiving attachments as base64-encoded strings. |
| return Promise.all(filenames.map(function (filename) { |
| var att = atts[filename]; |
| var path = encodeDocId(doc._id) + '/' + encodeAttachmentId(filename) + |
| '?rev=' + doc._rev; |
| return ajaxPromise({ |
| headers: clone(host.headers), |
| method: 'GET', |
| url: genDBUrl(host, path), |
| binary: true |
| }).then(function (blob) { |
| if (opts.binary) { |
| return blob; |
| } |
| return blufferToBase64(blob); |
| }).then(function (data) { |
| delete att.stub; |
| delete att.length; |
| att.data = data; |
| }); |
| })); |
| } |
| |
| function fetchAllAttachments(docOrDocs) { |
| if (Array.isArray(docOrDocs)) { |
| return Promise.all(docOrDocs.map(function (doc) { |
| if (doc.ok) { |
| return fetchAttachments(doc.ok); |
| } |
| })); |
| } |
| return fetchAttachments(docOrDocs); |
| } |
| |
| ajaxPromise(options).then(function (res) { |
| return Promise.resolve().then(function () { |
| if (opts.attachments) { |
| return fetchAllAttachments(res); |
| } |
| }).then(function () { |
| callback(null, res); |
| }); |
| }).catch(callback); |
| }); |
| |
| // Delete the document given by doc from the database given by host. |
| api.remove = utils.adapterFun('remove', |
| function (docOrId, optsOrRev, opts, callback) { |
| var doc; |
| if (typeof optsOrRev === 'string') { |
| // id, rev, opts, callback style |
| doc = { |
| _id: docOrId, |
| _rev: optsOrRev |
| }; |
| if (typeof opts === 'function') { |
| callback = opts; |
| opts = {}; |
| } |
| } else { |
| // doc, opts, callback style |
| doc = docOrId; |
| if (typeof optsOrRev === 'function') { |
| callback = optsOrRev; |
| opts = {}; |
| } else { |
| callback = opts; |
| opts = optsOrRev; |
| } |
| } |
| |
| var rev = (doc._rev || opts.rev); |
| |
| // Delete the document |
| ajax({ |
| headers: clone(host.headers), |
| method: 'DELETE', |
| url: genDBUrl(host, encodeDocId(doc._id)) + '?rev=' + rev |
| }, callback); |
| }); |
| |
| function encodeAttachmentId(attachmentId) { |
| return attachmentId.split("/").map(encodeURIComponent).join("/"); |
| } |
| |
| // Get the attachment |
| api.getAttachment = |
| utils.adapterFun('getAttachment', function (docId, attachmentId, opts, |
| callback) { |
| if (typeof opts === 'function') { |
| callback = opts; |
| opts = {}; |
| } |
| var params = opts.rev ? ('?rev=' + opts.rev) : ''; |
| var url = genDBUrl(host, encodeDocId(docId)) + '/' + |
| encodeAttachmentId(attachmentId) + params; |
| ajax({ |
| headers: clone(host.headers), |
| method: 'GET', |
| url: url, |
| binary: true |
| }, callback); |
| }); |
| |
| // Remove the attachment given by the id and rev |
| api.removeAttachment = |
| utils.adapterFun('removeAttachment', function (docId, attachmentId, rev, |
| callback) { |
| |
| var url = genDBUrl(host, encodeDocId(docId) + '/' + |
| encodeAttachmentId(attachmentId)) + '?rev=' + rev; |
| |
| ajax({ |
| headers: clone(host.headers), |
| method: 'DELETE', |
| url: url |
| }, callback); |
| }); |
| |
| // Add the attachment given by blob and its contentType property |
| // to the document with the given id, the revision given by rev, and |
| // add it to the database given by host. |
| api.putAttachment = |
| utils.adapterFun('putAttachment', function (docId, attachmentId, rev, blob, |
| type, callback) { |
| if (typeof type === 'function') { |
| callback = type; |
| type = blob; |
| blob = rev; |
| rev = null; |
| } |
| var id = encodeDocId(docId) + '/' + encodeAttachmentId(attachmentId); |
| var url = genDBUrl(host, id); |
| if (rev) { |
| url += '?rev=' + rev; |
| } |
| |
| if (typeof blob === 'string') { |
| var binary; |
| try { |
| binary = atob(blob); |
| } catch (err) { |
| // it's not base64-encoded, so throw error |
| return callback(errors.error(errors.BAD_ARG, |
| 'Attachments need to be base64 encoded')); |
| } |
| blob = binary ? binStringToBluffer(binary, type) : ''; |
| } |
| |
| var opts = { |
| headers: clone(host.headers), |
| method: 'PUT', |
| url: url, |
| processData: false, |
| body: blob, |
| timeout: ajaxOpts.timeout || 60000 |
| }; |
| opts.headers['Content-Type'] = type; |
| // Add the attachment |
| ajax(opts, callback); |
| }); |
| |
| // Add the document given by doc (in JSON string format) to the database |
| // given by host. This fails if the doc has no _id field. |
| api.put = utils.adapterFun('put', utils.getArguments(function (args) { |
| var temp, temptype, opts; |
| var doc = args.shift(); |
| var id = '_id' in doc; |
| var callback = args.pop(); |
| if (typeof doc !== 'object' || Array.isArray(doc)) { |
| return callback(errors.error(errors.NOT_AN_OBJECT)); |
| } |
| |
| doc = clone(doc); |
| |
| preprocessAttachments(doc).then(function () { |
| while (true) { |
| temp = args.shift(); |
| temptype = typeof temp; |
| if (temptype === "string" && !id) { |
| doc._id = temp; |
| id = true; |
| } else if (temptype === "string" && id && !('_rev' in doc)) { |
| doc._rev = temp; |
| } else if (temptype === "object") { |
| opts = clone(temp); |
| } |
| if (!args.length) { |
| break; |
| } |
| } |
| opts = opts || {}; |
| |
| // check for any errors |
| // TODO: rename this function |
| parseDoc.invalidIdError(doc._id); |
| |
| // List of parameter to add to the PUT request |
| var params = []; |
| |
| // If it exists, add the opts.new_edits value to the list of parameters. |
| // If new_edits = false then the database will NOT assign this document a |
| // new revision number |
| if (opts && typeof opts.new_edits !== 'undefined') { |
| params.push('new_edits=' + opts.new_edits); |
| } |
| |
| // Format the list of parameters into a valid URI query string |
| params = params.join('&'); |
| if (params !== '') { |
| params = '?' + params; |
| } |
| |
| var ajaxOpts = { |
| headers: clone(host.headers), |
| method: 'PUT', |
| url: genDBUrl(host, encodeDocId(doc._id)) + params, |
| body: doc |
| }; |
| |
| return Promise.resolve().then(function () { |
| var hasNonStubAttachments = doc._attachments && |
| Object.keys(doc._attachments).filter(function (att) { |
| return !doc._attachments[att].stub; |
| }).length; |
| if (hasNonStubAttachments) { |
| // use multipart/related for more efficient attachment uploading |
| var multipart = createMultipart(doc); |
| ajaxOpts.body = multipart.body; |
| ajaxOpts.processData = false; |
| ajaxOpts.headers = utils.extend(ajaxOpts.headers, multipart.headers); |
| } |
| }).catch(function () { |
| throw new Error('Did you forget to base64-encode an attachment?'); |
| }).then(function () { |
| return ajaxPromise(ajaxOpts); |
| }).then(function (res) { |
| res.ok = true; // smooths out cloudant not doing this |
| callback(null, res); |
| }); |
| }).catch(callback); |
| })); |
| |
| // Add the document given by doc (in JSON string format) to the database |
| // given by host. This does not assume that doc is a new document |
| // (i.e. does not have a _id or a _rev field.) |
| api.post = utils.adapterFun('post', function (doc, opts, callback) { |
| // If no options were given, set the callback to be the second parameter |
| if (typeof opts === 'function') { |
| callback = opts; |
| opts = {}; |
| } |
| opts = clone(opts); |
| if (typeof doc !== 'object') { |
| return callback(errors.error(errors.NOT_AN_OBJECT)); |
| } |
| if (! ("_id" in doc)) { |
| doc._id = utils.uuid(); |
| } |
| api.put(doc, opts, function (err, res) { |
| if (err) { |
| return callback(err); |
| } |
| res.ok = true; |
| callback(null, res); |
| }); |
| }); |
| |
| // Update/create multiple documents given by req in the database |
| // given by host. |
| api._bulkDocs = function (req, opts, callback) { |
| // If new_edits=false then it prevents the database from creating |
| // new revision numbers for the documents. Instead it just uses |
| // the old ones. This is used in database replication. |
| req.new_edits = opts.new_edits; |
| |
| Promise.all(req.docs.map(preprocessAttachments)).then(function () { |
| // Update/create the documents |
| ajax({ |
| headers: clone(host.headers), |
| method: 'POST', |
| url: genDBUrl(host, '_bulk_docs'), |
| body: req |
| }, function (err, results) { |
| if (err) { |
| return callback(err); |
| } |
| results.forEach(function (result) { |
| result.ok = true; // smooths out cloudant not adding this |
| }); |
| callback(null, results); |
| }); |
| }).catch(callback); |
| }; |
| |
| // Get a listing of the documents in the database given |
| // by host and ordered by increasing id. |
| api.allDocs = utils.adapterFun('allDocs', function (opts, callback) { |
| if (typeof opts === 'function') { |
| callback = opts; |
| opts = {}; |
| } |
| opts = clone(opts); |
| // List of parameters to add to the GET request |
| var params = []; |
| var body; |
| var method = 'GET'; |
| |
| if (opts.conflicts) { |
| params.push('conflicts=true'); |
| } |
| |
| // If opts.descending is truthy add it to params |
| if (opts.descending) { |
| params.push('descending=true'); |
| } |
| |
| // If opts.include_docs exists, add the include_docs value to the |
| // list of parameters. |
| // If include_docs=true then include the associated document with each |
| // result. |
| if (opts.include_docs) { |
| params.push('include_docs=true'); |
| } |
| |
| if (opts.attachments) { |
| // added in CouchDB 1.6.0 |
| params.push('attachments=true'); |
| } |
| |
| if (opts.key) { |
| params.push('key=' + encodeURIComponent(JSON.stringify(opts.key))); |
| } |
| |
| // If opts.startkey exists, add the startkey value to the list of |
| // parameters. |
| // If startkey is given then the returned list of documents will |
| // start with the document whose id is startkey. |
| if (opts.startkey) { |
| params.push('startkey=' + |
| encodeURIComponent(JSON.stringify(opts.startkey))); |
| } |
| |
| // If opts.endkey exists, add the endkey value to the list of parameters. |
| // If endkey is given then the returned list of docuemnts will |
| // end with the document whose id is endkey. |
| if (opts.endkey) { |
| params.push('endkey=' + encodeURIComponent(JSON.stringify(opts.endkey))); |
| } |
| |
| if (typeof opts.inclusive_end !== 'undefined') { |
| params.push('inclusive_end=' + !!opts.inclusive_end); |
| } |
| |
| // If opts.limit exists, add the limit value to the parameter list. |
| if (typeof opts.limit !== 'undefined') { |
| params.push('limit=' + opts.limit); |
| } |
| |
| if (typeof opts.skip !== 'undefined') { |
| params.push('skip=' + opts.skip); |
| } |
| |
| // Format the list of parameters into a valid URI query string |
| params = params.join('&'); |
| if (params !== '') { |
| params = '?' + params; |
| } |
| |
| if (typeof opts.keys !== 'undefined') { |
| |
| var keysAsString = |
| 'keys=' + encodeURIComponent(JSON.stringify(opts.keys)); |
| if (keysAsString.length + params.length + 1 <= MAX_URL_LENGTH) { |
| // If the keys are short enough, do a GET. we do this to work around |
| // Safari not understanding 304s on POSTs (see issue #1239) |
| params += (params.indexOf('?') !== -1 ? '&' : '?') + keysAsString; |
| } else { |
| // If keys are too long, issue a POST request to circumvent GET |
| // query string limits |
| // see http://wiki.apache.org/couchdb/HTTP_view_API#Querying_Options |
| method = 'POST'; |
| body = JSON.stringify({keys: opts.keys}); |
| } |
| } |
| |
| // Get the document listing |
| ajaxPromise({ |
| headers: clone(host.headers), |
| method: method, |
| url: genDBUrl(host, '_all_docs' + params), |
| body: body |
| }).then(function (res) { |
| if (opts.include_docs && opts.attachments && opts.binary) { |
| res.rows.forEach(readAttachmentsAsBlobOrBuffer); |
| } |
| callback(null, res); |
| }).catch(callback); |
| }); |
| |
| // Get a list of changes made to documents in the database given by host. |
| // TODO According to the README, there should be two other methods here, |
| // api.changes.addListener and api.changes.removeListener. |
| api._changes = function (opts) { |
| // We internally page the results of a changes request, this means |
| // if there is a large set of changes to be returned we can start |
| // processing them quicker instead of waiting on the entire |
| // set of changes to return and attempting to process them at once |
| var batchSize = 'batch_size' in opts ? opts.batch_size : CHANGES_BATCH_SIZE; |
| |
| opts = clone(opts); |
| opts.timeout = opts.timeout || ajaxOpts.timeout || 30 * 1000; |
| |
| // We give a 5 second buffer for CouchDB changes to respond with |
| // an ok timeout |
| var params = { timeout: opts.timeout - (5 * 1000) }; |
| var limit = (typeof opts.limit !== 'undefined') ? opts.limit : false; |
| var returnDocs; |
| if ('returnDocs' in opts) { |
| returnDocs = opts.returnDocs; |
| } else { |
| returnDocs = true; |
| } |
| // |
| var leftToFetch = limit; |
| |
| if (opts.style) { |
| params.style = opts.style; |
| } |
| |
| if (opts.include_docs || opts.filter && typeof opts.filter === 'function') { |
| params.include_docs = true; |
| } |
| |
| if (opts.attachments) { |
| params.attachments = true; |
| } |
| |
| if (opts.continuous) { |
| params.feed = 'longpoll'; |
| } |
| |
| if (opts.conflicts) { |
| params.conflicts = true; |
| } |
| |
| if (opts.descending) { |
| params.descending = true; |
| } |
| |
| if (opts.filter && typeof opts.filter === 'string') { |
| params.filter = opts.filter; |
| if (opts.filter === '_view' && |
| opts.view && |
| typeof opts.view === 'string') { |
| params.view = opts.view; |
| } |
| } |
| |
| // If opts.query_params exists, pass it through to the changes request. |
| // These parameters may be used by the filter on the source database. |
| if (opts.query_params && typeof opts.query_params === 'object') { |
| for (var param_name in opts.query_params) { |
| /* istanbul ignore else */ |
| if (opts.query_params.hasOwnProperty(param_name)) { |
| params[param_name] = opts.query_params[param_name]; |
| } |
| } |
| } |
| |
| var method = 'GET'; |
| var body; |
| |
| if (opts.doc_ids) { |
| // set this automagically for the user; it's annoying that couchdb |
| // requires both a "filter" and a "doc_ids" param. |
| params.filter = '_doc_ids'; |
| |
| var docIdsJson = JSON.stringify(opts.doc_ids); |
| |
| if (docIdsJson.length < MAX_URL_LENGTH) { |
| params.doc_ids = docIdsJson; |
| } else { |
| // anything greater than ~2000 is unsafe for gets, so |
| // use POST instead |
| method = 'POST'; |
| body = {doc_ids: opts.doc_ids }; |
| } |
| } |
| |
| if (opts.continuous && api._useSSE) { |
| return api.sse(opts, params, returnDocs); |
| } |
| var xhr; |
| var lastFetchedSeq; |
| |
| // Get all the changes starting wtih the one immediately after the |
| // sequence number given by since. |
| var fetch = function (since, callback) { |
| if (opts.aborted) { |
| return; |
| } |
| params.since = since; |
| // "since" can be any kind of json object in Coudant/CouchDB 2.x |
| /* istanbul ignore next */ |
| if (typeof params.since === "object") { |
| params.since = JSON.stringify(params.since); |
| } |
| |
| if (opts.descending) { |
| if (limit) { |
| params.limit = leftToFetch; |
| } |
| } else { |
| params.limit = (!limit || leftToFetch > batchSize) ? |
| batchSize : leftToFetch; |
| } |
| |
| var paramStr = '?' + Object.keys(params).map(function (k) { |
| return k + '=' + encodeURIComponent(params[k]); |
| }).join('&'); |
| |
| // Set the options for the ajax call |
| var xhrOpts = { |
| headers: clone(host.headers), |
| method: method, |
| url: genDBUrl(host, '_changes' + paramStr), |
| // _changes can take a long time to generate, especially when filtered |
| timeout: opts.timeout, |
| body: body |
| }; |
| lastFetchedSeq = since; |
| |
| if (opts.aborted) { |
| return; |
| } |
| |
| // Get the changes |
| xhr = ajax(xhrOpts, callback); |
| }; |
| |
| // If opts.since exists, get all the changes from the sequence |
| // number given by opts.since. Otherwise, get all the changes |
| // from the sequence number 0. |
| var fetchTimeout = 10; |
| var fetchRetryCount = 0; |
| |
| var results = {results: []}; |
| |
| var fetched = function (err, res) { |
| if (opts.aborted) { |
| return; |
| } |
| var raw_results_length = 0; |
| // If the result of the ajax call (res) contains changes (res.results) |
| if (res && res.results) { |
| raw_results_length = res.results.length; |
| results.last_seq = res.last_seq; |
| // For each change |
| var req = {}; |
| req.query = opts.query_params; |
| res.results = res.results.filter(function (c) { |
| leftToFetch--; |
| var ret = utils.filterChange(opts)(c); |
| if (ret) { |
| if (opts.include_docs && opts.attachments && opts.binary) { |
| readAttachmentsAsBlobOrBuffer(c); |
| } |
| if (returnDocs) { |
| results.results.push(c); |
| } |
| utils.call(opts.onChange, c); |
| } |
| return ret; |
| }); |
| } else if (err) { |
| // In case of an error, stop listening for changes and call |
| // opts.complete |
| opts.aborted = true; |
| utils.call(opts.complete, err); |
| return; |
| } |
| |
| // The changes feed may have timed out with no results |
| // if so reuse last update sequence |
| if (res && res.last_seq) { |
| lastFetchedSeq = res.last_seq; |
| } |
| |
| var finished = (limit && leftToFetch <= 0) || |
| (res && raw_results_length < batchSize) || |
| (opts.descending); |
| |
| if ((opts.continuous && !(limit && leftToFetch <= 0)) || !finished) { |
| // Increase retry delay exponentially as long as errors persist |
| if (err) { |
| fetchRetryCount += 1; |
| } else { |
| fetchRetryCount = 0; |
| } |
| var timeoutMultiplier = 1 << fetchRetryCount; |
| var retryWait = fetchTimeout * timeoutMultiplier; |
| var maximumWait = opts.maximumWait || 30000; |
| |
| if (retryWait > maximumWait) { |
| utils.call(opts.complete, err || errors.error(errors.UNKNOWN_ERROR)); |
| return; |
| } |
| |
| // Queue a call to fetch again with the newest sequence number |
| setTimeout(function () { fetch(lastFetchedSeq, fetched); }, retryWait); |
| } else { |
| // We're done, call the callback |
| utils.call(opts.complete, null, results); |
| } |
| }; |
| |
| fetch(opts.since || 0, fetched); |
| |
| // Return a method to cancel this method from processing any more |
| return { |
| cancel: function () { |
| opts.aborted = true; |
| if (xhr) { |
| xhr.abort(); |
| } |
| } |
| }; |
| }; |
| |
| api.sse = function (opts, params, returnDocs) { |
| params.feed = 'eventsource'; |
| params.since = opts.since || 0; |
| params.limit = opts.limit; |
| delete params.timeout; |
| var paramStr = '?' + Object.keys(params).map(function (k) { |
| return k + '=' + params[k]; |
| }).join('&'); |
| var url = genDBUrl(host, '_changes' + paramStr); |
| var source = new EventSource(url); |
| var results = { |
| results: [], |
| last_seq: false |
| }; |
| var dispatched = false; |
| var open = false; |
| source.addEventListener('message', msgHandler, false); |
| source.onopen = function () { |
| open = true; |
| }; |
| source.onerror = errHandler; |
| return { |
| cancel: function () { |
| if (dispatched) { |
| return dispatched.cancel(); |
| } |
| source.removeEventListener('message', msgHandler, false); |
| source.close(); |
| } |
| }; |
| function msgHandler(e) { |
| var data = JSON.parse(e.data); |
| if (returnDocs) { |
| results.results.push(data); |
| } |
| results.last_seq = data.seq; |
| utils.call(opts.onChange, data); |
| } |
| function errHandler(err) { |
| source.removeEventListener('message', msgHandler, false); |
| if (open === false) { |
| // errored before it opened |
| // likely doesn't support EventSource |
| api._useSSE = false; |
| dispatched = api._changes(opts); |
| return; |
| } |
| source.close(); |
| utils.call(opts.complete, err); |
| } |
| |
| }; |
| |
| api._useSSE = false; |
| // Currently disabled due to failing chrome tests in saucelabs |
| // api._useSSE = typeof global.EventSource === 'function'; |
| |
| // Given a set of document/revision IDs (given by req), tets the subset of |
| // those that do NOT correspond to revisions stored in the database. |
| // See http://wiki.apache.org/couchdb/HttpPostRevsDiff |
| api.revsDiff = utils.adapterFun('revsDiff', function (req, opts, callback) { |
| // If no options were given, set the callback to be the second parameter |
| if (typeof opts === 'function') { |
| callback = opts; |
| opts = {}; |
| } |
| |
| // Get the missing document/revision IDs |
| ajax({ |
| headers: clone(host.headers), |
| method: 'POST', |
| url: genDBUrl(host, '_revs_diff'), |
| body: JSON.stringify(req) |
| }, callback); |
| }); |
| |
| api._close = function (callback) { |
| callback(); |
| }; |
| |
| api._destroy = function (callback) { |
| ajax({ |
| url: genDBUrl(host, ''), |
| method: 'DELETE', |
| headers: clone(host.headers) |
| }, function (err, resp) { |
| /* istanbul ignore next */ |
| if (err) { |
| api.emit('error', err); |
| return callback(err); |
| } |
| api.emit('destroyed'); |
| api.constructor.emit('destroyed', opts.name); |
| callback(null, resp); |
| }); |
| }; |
| } |
| |
| // HttpPouch is a valid adapter. |
| HttpPouch.valid = function () { |
| return true; |
| }; |
| |
| module.exports = HttpPouch; |