Remove public access to the db record
This completes the removal of public access to the db record from the
couch application. The large majority of which is removing direct access
to the #db.name, #db.main_pid, and #db.update_seq fields.
COUCHDB-3288
diff --git a/include/couch_db.hrl b/include/couch_db.hrl
index e7cd85d..5abb316 100644
--- a/include/couch_db.hrl
+++ b/include/couch_db.hrl
@@ -128,33 +128,6 @@
handler
}).
--record(db, {
- main_pid = nil,
- compactor_pid = nil,
- instance_start_time, % number of microsecs since jan 1 1970 as a binary string
- fd,
- fd_monitor,
- header = couch_db_header:new(),
- committed_update_seq,
- id_tree,
- seq_tree,
- local_tree,
- update_seq,
- name,
- filepath,
- validate_doc_funs = undefined,
- security = [],
- security_ptr = nil,
- user_ctx = #user_ctx{},
- waiting_delayed_commit = nil,
- revs_limit = 1000,
- fsync_options = [],
- options = [],
- compression,
- before_doc_update = nil, % nil | fun(Doc, Db) -> NewDoc
- after_doc_read = nil % nil | fun(Doc, Db) -> NewDoc
-}).
-
-record(view_fold_helper_funs, {
reduce_count,
passed_end,
diff --git a/src/couch_auth_cache.erl b/src/couch_auth_cache.erl
index 9b00a9d..54a6794 100644
--- a/src/couch_auth_cache.erl
+++ b/src/couch_auth_cache.erl
@@ -289,8 +289,9 @@
true = ets:insert(?STATE, {auth_db_name, AuthDbName}),
AuthDb = open_auth_db(),
true = ets:insert(?STATE, {auth_db, AuthDb}),
+ DbPid = couch_db:get_pid(AuthDb),
NewState#state{closed = [Ref|Closed],
- db_mon_ref = erlang:monitor(process, AuthDb#db.main_pid)}.
+ db_mon_ref = erlang:monitor(process, DbPid)}.
add_cache_entry(_, _, _, #state{max_cache_size = 0} = State) ->
@@ -331,13 +332,15 @@
nil ->
ok;
AuthDb2 ->
- case AuthDb2#db.update_seq > AuthDb#db.update_seq of
+ AuthDbSeq = couch_db:get_update_seq(AuthDb),
+ AuthDb2Seq = couch_db:get_update_seq(AuthDb2),
+ case AuthDb2Seq > AuthDbSeq of
true ->
{ok, _, _} = couch_db:enum_docs_since(
AuthDb2,
- AuthDb#db.update_seq,
+ AuthDbSeq,
fun(DocInfo, _, _) -> refresh_entry(AuthDb2, DocInfo) end,
- AuthDb#db.update_seq,
+ AuthDbSeq,
[]
),
true = ets:insert(?STATE, {auth_db, AuthDb2});
@@ -395,7 +398,9 @@
nil ->
false;
AuthDb2 ->
- AuthDb2#db.update_seq > AuthDb#db.update_seq
+ AuthDbSeq = couch_db:get_update_seq(AuthDb),
+ AuthDb2Seq = couch_db:get_update_seq(AuthDb2),
+ AuthDb2Seq > AuthDbSeq
end
end,
false
@@ -416,7 +421,7 @@
exec_if_auth_db(Fun, DefRes) ->
case ets:lookup(?STATE, auth_db) of
- [{auth_db, #db{} = AuthDb}] ->
+ [{auth_db, AuthDb}] ->
Fun(AuthDb);
_ ->
DefRes
diff --git a/src/couch_changes.erl b/src/couch_changes.erl
index 52ff39d..ea7f65c 100644
--- a/src/couch_changes.erl
+++ b/src/couch_changes.erl
@@ -78,9 +78,10 @@
_ ->
{false, undefined, undefined}
end,
+ DbName = couch_db:name(Db0),
{StartListenerFun, View} = if UseViewChanges ->
{ok, {_, View0, _}, _, _} = couch_mrview_util:get_view(
- Db0#db.name, DDocName, ViewName, #mrargs{}),
+ DbName, DDocName, ViewName, #mrargs{}),
case View0#mrview.seq_btree of
#btree{} ->
ok;
@@ -89,14 +90,14 @@
end,
SNFun = fun() ->
couch_event:link_listener(
- ?MODULE, handle_view_event, {self(), DDocName}, [{dbname, Db0#db.name}]
+ ?MODULE, handle_view_event, {self(), DDocName}, [{dbname, DbName}]
)
end,
{SNFun, View0};
true ->
SNFun = fun() ->
couch_event:link_listener(
- ?MODULE, handle_db_event, self(), [{dbname, Db0#db.name}]
+ ?MODULE, handle_db_event, self(), [{dbname, DbName}]
)
end,
{SNFun, undefined}
@@ -111,7 +112,7 @@
end,
View2 = if UseViewChanges ->
{ok, {_, View1, _}, _, _} = couch_mrview_util:get_view(
- Db0#db.name, DDocName, ViewName, #mrargs{}),
+ DbName, DDocName, ViewName, #mrargs{}),
View1;
true ->
undefined
@@ -219,11 +220,11 @@
catch _:_ ->
view
end,
- case Db#db.id_tree of
- undefined ->
+ case couch_db:is_clustered(Db) of
+ true ->
DIR = fabric_util:doc_id_and_rev(DDoc),
{fetch, FilterType, Style, DIR, VName};
- _ ->
+ false ->
{FilterType, Style, DDoc, VName}
end;
[] ->
@@ -242,11 +243,11 @@
[DName, FName] ->
{ok, DDoc} = open_ddoc(Db, <<"_design/", DName/binary>>),
check_member_exists(DDoc, [<<"filters">>, FName]),
- case Db#db.id_tree of
- undefined ->
+ case couch_db:is_clustered(Db) of
+ true ->
DIR = fabric_util:doc_id_and_rev(DDoc),
{fetch, custom, Style, Req, DIR, FName};
- _ ->
+ false->
{custom, Style, Req, DDoc, FName}
end;
@@ -395,15 +396,19 @@
throw({bad_request, "Selector error: fields must be JSON array"}).
-open_ddoc(#db{name=DbName, id_tree=undefined}, DDocId) ->
- case ddoc_cache:open_doc(mem3:dbname(DbName), DDocId) of
- {ok, _} = Resp -> Resp;
- Else -> throw(Else)
- end;
open_ddoc(Db, DDocId) ->
- case couch_db:open_doc(Db, DDocId, [ejson_body]) of
- {ok, _} = Resp -> Resp;
- Else -> throw(Else)
+ DbName = couch_db:name(Db),
+ case couch_db:is_clustered(Db) of
+ true ->
+ case ddoc_cache:open_doc(mem3:dbname(DbName), DDocId) of
+ {ok, _} = Resp -> Resp;
+ Else -> throw(Else)
+ end;
+ false ->
+ case couch_db:open_doc(Db, DDocId, [ejson_body]) of
+ {ok, _} = Resp -> Resp;
+ Else -> throw(Else)
+ end
end.
@@ -566,7 +571,7 @@
send_changes_doc_ids(Db, StartSeq, Dir, Fun, Acc0, {doc_ids, _Style, DocIds}) ->
- Lookups = couch_btree:lookup(Db#db.id_tree, DocIds),
+ Lookups = couch_db:get_full_doc_infos(Db, DocIds),
FullInfos = lists:foldl(fun
({ok, FDI}, Acc) -> [FDI | Acc];
(not_found, Acc) -> Acc
@@ -575,11 +580,9 @@
send_changes_design_docs(Db, StartSeq, Dir, Fun, Acc0, {design_docs, _Style}) ->
- FoldFun = fun(FullDocInfo, _, Acc) ->
- {ok, [FullDocInfo | Acc]}
- end,
+ FoldFun = fun(FDI, Acc) -> {ok, [FDI | Acc]} end,
KeyOpts = [{start_key, <<"_design/">>}, {end_key_gt, <<"_design0">>}],
- {ok, _, FullInfos} = couch_btree:fold(Db#db.id_tree, FoldFun, [], KeyOpts),
+ {ok, FullInfos} = couch_db:fold_docs(Db, FoldFun, [], KeyOpts),
send_lookup_changes(FullInfos, StartSeq, Dir, Db, Fun, Acc0).
@@ -640,8 +643,8 @@
true ->
case wait_updated(Timeout, TimeoutFun, UserAcc2) of
{updated, UserAcc4} ->
- DbOptions1 = [{user_ctx, Db#db.user_ctx} | DbOptions],
- case couch_db:open(Db#db.name, DbOptions1) of
+ DbOptions1 = [{user_ctx, couch_db:get_user_ctx(Db)} | DbOptions],
+ case couch_db:open(couch_db:name(Db), DbOptions1) of
{ok, Db2} ->
keep_sending_changes(
Args#changes_args{limit=NewLimit},
@@ -665,7 +668,8 @@
maybe_refresh_view(_, undefined, undefined) ->
undefined;
maybe_refresh_view(Db, DDocName, ViewName) ->
- {ok, {_, View, _}, _, _} = couch_mrview_util:get_view(Db#db.name, DDocName, ViewName, #mrargs{}),
+ DbName = couch_db:name(Db),
+ {ok, {_, View, _}, _, _} = couch_mrview_util:get_view(DbName, DDocName, ViewName, #mrargs{}),
View.
end_sending_changes(Callback, UserAcc, EndSeq, ResponseType) ->
diff --git a/src/couch_compaction_daemon.erl b/src/couch_compaction_daemon.erl
index 8f95eb2..f3b646d 100644
--- a/src/couch_compaction_daemon.erl
+++ b/src/couch_compaction_daemon.erl
@@ -319,7 +319,7 @@
{Frag, SpaceRequired} = frag(DbInfo),
couch_log:debug("Fragmentation for database `~s` is ~p%, estimated"
" space for compaction is ~p bytes.",
- [Db#db.name, Frag, SpaceRequired]),
+ [couch_db:name(Db), Frag, SpaceRequired]),
case check_frag(Threshold, Frag) of
false ->
false;
@@ -332,7 +332,7 @@
couch_log:warning("Compaction daemon - skipping database `~s` "
"compaction: the estimated necessary disk space is about ~p"
" bytes but the currently available disk space is ~p bytes.",
- [Db#db.name, SpaceRequired, Free]),
+ [couch_db:name(Db), SpaceRequired, Free]),
false
end
end
diff --git a/src/couch_db.erl b/src/couch_db.erl
index 3a29a3d..75fc730 100644
--- a/src/couch_db.erl
+++ b/src/couch_db.erl
@@ -22,6 +22,9 @@
incref/1,
decref/1,
+ clustered_db/2,
+ clustered_db/3,
+
monitor/1,
monitored_by/1,
is_idle/1,
@@ -32,21 +35,28 @@
name/1,
compression/1,
+ get_after_doc_read_fun/1,
+ get_before_doc_update_fun/1,
get_committed_update_seq/1,
get_compacted_seq/1,
+ get_compactor_pid/1,
get_db_info/1,
get_doc_count/1,
get_epochs/1,
+ get_filepath/1,
get_instance_start_time/1,
get_last_purged/1,
get_pid/1,
get_revs_limit/1,
get_security/1,
get_update_seq/1,
+ get_user_ctx/1,
get_uuid/1,
get_purge_seq/1,
+ is_db/1,
is_system_db/1,
+ is_clustered/1,
increment_update_seq/1,
set_revs_limit/2,
@@ -80,6 +90,8 @@
with_stream/3,
+ fold_docs/4,
+ fold_local_docs/4,
enum_docs/4,
enum_docs_reduce_to_count/1,
@@ -113,6 +125,7 @@
-include_lib("couch/include/couch_db.hrl").
+-include("couch_db_int.hrl").
-define(DBNAME_REGEX,
"^[a-z][a-z0-9\\_\\$()\\+\\-\\/]*" % use the stock CouchDB regex
@@ -187,6 +200,12 @@
{ok, NewDb#db{user_ctx = UserCtx, fd_monitor = NewRef}}
end.
+clustered_db(DbName, UserCtx) ->
+ clustered_db(DbName, UserCtx, []).
+
+clustered_db(DbName, UserCtx, SecProps) ->
+ {ok, #db{name = DbName, user_ctx = UserCtx, security = SecProps}}.
+
incref(#db{fd = Fd} = Db) ->
Ref = erlang:monitor(process, Fd),
{ok, Db#db{fd_monitor = Ref}}.
@@ -195,9 +214,19 @@
erlang:demonitor(Monitor, [flush]),
ok.
+is_db(#db{}) ->
+ true;
+is_db(_) ->
+ false.
+
is_system_db(#db{options = Options}) ->
lists:member(sys_db, Options).
+is_clustered(#db{main_pid = nil}) ->
+ true;
+is_clustered(#db{}) ->
+ false.
+
ensure_full_commit(#db{main_pid=Pid, instance_start_time=StartTime}) ->
ok = gen_server:call(Pid, full_commit, infinity),
{ok, StartTime}.
@@ -378,12 +407,21 @@
purge_docs(#db{main_pid=Pid}, IdsRevs) ->
gen_server:call(Pid, {purge_docs, IdsRevs}).
+get_after_doc_read_fun(#db{after_doc_read = Fun}) ->
+ Fun.
+
+get_before_doc_update_fun(#db{before_doc_update = Fun}) ->
+ Fun.
+
get_committed_update_seq(#db{committed_update_seq=Seq}) ->
Seq.
get_update_seq(#db{update_seq=Seq})->
Seq.
+get_user_ctx(#db{user_ctx = UserCtx}) ->
+ UserCtx.
+
get_purge_seq(#db{}=Db) ->
couch_db_header:purge_seq(Db#db.header).
@@ -410,12 +448,18 @@
validate_epochs(Epochs),
Epochs.
+get_filepath(#db{filepath = FilePath}) ->
+ FilePath.
+
get_instance_start_time(#db{instance_start_time = IST}) ->
IST.
get_compacted_seq(#db{}=Db) ->
couch_db_header:compacted_seq(Db#db.header).
+get_compactor_pid(#db{compactor_pid = Pid}) ->
+ Pid.
+
get_db_info(Db) ->
#db{fd=Fd,
header=Header,
@@ -1365,6 +1409,17 @@
[{start_key, SinceSeq + 1} | Options]),
{ok, enum_docs_since_reduce_to_count(LastReduction), AccOut}.
+
+fold_docs(Db, InFun, InAcc, Opts) ->
+ Wrapper = fun(FDI, _, Acc) -> InFun(FDI, Acc) end,
+ {ok, _, AccOut} = couch_btree:fold(Db#db.id_tree, Wrapper, InAcc, Opts),
+ {ok, AccOut}.
+
+fold_local_docs(Db, InFun, InAcc, Opts) ->
+ Wrapper = fun(FDI, _, Acc) -> InFun(FDI, Acc) end,
+ {ok, _, AccOut} = couch_btree:fold(Db#db.local_tree, Wrapper, InAcc, Opts),
+ {ok, AccOut}.
+
enum_docs(Db, InFun, InAcc, Options0) ->
{NS, Options} = extract_namespace(Options0),
enum_docs(Db, NS, InFun, InAcc, Options).
diff --git a/src/couch_db_int.hrl b/src/couch_db_int.hrl
new file mode 100644
index 0000000..fc739b7
--- /dev/null
+++ b/src/couch_db_int.hrl
@@ -0,0 +1,38 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-record(db, {
+ main_pid = nil,
+ compactor_pid = nil,
+ instance_start_time, % number of microsecs since jan 1 1970 as a binary string
+ fd,
+ fd_monitor,
+ header = couch_db_header:new(),
+ committed_update_seq,
+ id_tree,
+ seq_tree,
+ local_tree,
+ update_seq,
+ name,
+ filepath,
+ validate_doc_funs = undefined,
+ security = [],
+ security_ptr = nil,
+ user_ctx = #user_ctx{},
+ waiting_delayed_commit = nil,
+ revs_limit = 1000,
+ fsync_options = [],
+ options = [],
+ compression,
+ before_doc_update = nil, % nil | fun(Doc, Db) -> NewDoc
+ after_doc_read = nil % nil | fun(Doc, Db) -> NewDoc
+}).
\ No newline at end of file
diff --git a/src/couch_db_plugin.erl b/src/couch_db_plugin.erl
index 774e9e0..740b812 100644
--- a/src/couch_db_plugin.erl
+++ b/src/couch_db_plugin.erl
@@ -32,13 +32,15 @@
validate_dbname(DbName, Normalized, Default) ->
maybe_handle(validate_dbname, [DbName, Normalized], Default).
-before_doc_update(#db{before_doc_update = Fun} = Db, Doc0) ->
+before_doc_update(Db, Doc0) ->
+ Fun = couch_db:get_before_doc_update_fun(Db),
case with_pipe(before_doc_update, [Doc0, Db]) of
[Doc1, _Db] when is_function(Fun) -> Fun(Doc1, Db);
[Doc1, _Db] -> Doc1
end.
-after_doc_read(#db{after_doc_read = Fun} = Db, Doc0) ->
+after_doc_read(Db, Doc0) ->
+ Fun = couch_db:get_after_doc_read_fun(Db),
case with_pipe(after_doc_read, [Doc0, Db]) of
[Doc1, _Db] when is_function(Fun) -> Fun(Doc1, Db);
[Doc1, _Db] -> Doc1
diff --git a/src/couch_db_updater.erl b/src/couch_db_updater.erl
index 270fffe..8f6fc35 100644
--- a/src/couch_db_updater.erl
+++ b/src/couch_db_updater.erl
@@ -20,6 +20,7 @@
-export([init/1,terminate/2,handle_call/3,handle_cast/2,code_change/3,handle_info/2]).
-include_lib("couch/include/couch_db.hrl").
+-include("couch_db_int.hrl").
-record(comp_header, {
db_header,
diff --git a/src/couch_httpd_db.erl b/src/couch_httpd_db.erl
index e1af1bf..1198a67 100644
--- a/src/couch_httpd_db.erl
+++ b/src/couch_httpd_db.erl
@@ -70,7 +70,8 @@
handle_changes_req(#httpd{}=Req, _Db, _ChangesArgs, _ChangesFun) ->
couch_httpd:send_method_not_allowed(Req, "GET,HEAD,POST").
-handle_changes_req1(Req, #db{name=DbName}=Db, ChangesArgs, ChangesFun) ->
+handle_changes_req1(Req, Db, ChangesArgs, ChangesFun) ->
+ DbName = couch_db:name(Db),
AuthDbName = ?l2b(config:get("couch_httpd_auth", "authentication_db")),
case AuthDbName of
DbName ->
@@ -287,7 +288,7 @@
RequiredSeq > CommittedSeq ->
couch_db:ensure_full_commit(Db);
true ->
- {ok, Db#db.instance_start_time}
+ {ok, couch_db:get_instance_start_time(Db)}
end
end,
send_json(Req, 201, {[
@@ -733,7 +734,8 @@
update_doc(Req, Db, DocId, #doc{deleted=false}=Doc) ->
- Loc = absolute_uri(Req, "/" ++ couch_util:url_encode(Db#db.name) ++ "/" ++ couch_util:url_encode(DocId)),
+ DbName = couch_db:name(Db),
+ Loc = absolute_uri(Req, "/" ++ couch_util:url_encode(DbName) ++ "/" ++ couch_util:url_encode(DocId)),
update_doc(Req, Db, DocId, Doc, [{"Location", Loc}]);
update_doc(Req, Db, DocId, Doc) ->
update_doc(Req, Db, DocId, Doc, []).
@@ -1033,7 +1035,7 @@
[];
_ ->
[{"Location", absolute_uri(Req, "/" ++
- couch_util:url_encode(Db#db.name) ++ "/" ++
+ couch_util:url_encode(couch_db:name(Db)) ++ "/" ++
couch_util:url_encode(DocId) ++ "/" ++
couch_util:url_encode(FileName)
)}]
@@ -1145,7 +1147,7 @@
{"descending", "true"} ->
Args#changes_args{dir=rev};
{"since", "now"} ->
- UpdateSeq = couch_util:with_db(Db#db.name, fun(WDb) ->
+ UpdateSeq = couch_util:with_db(couch_db:name(Db), fun(WDb) ->
couch_db:get_update_seq(WDb)
end),
Args#changes_args{since=UpdateSeq};
diff --git a/src/couch_users_db.erl b/src/couch_users_db.erl
index 6f7b9af..c7b41f1 100644
--- a/src/couch_users_db.erl
+++ b/src/couch_users_db.erl
@@ -39,8 +39,8 @@
% -> 404 // Not Found
% Else
% -> save_doc
-before_doc_update(Doc, #db{user_ctx = UserCtx} = Db) ->
- #user_ctx{name=Name} = UserCtx,
+before_doc_update(Doc, Db) ->
+ #user_ctx{name=Name} = couch_db:get_user_ctx(Db),
DocName = get_doc_name(Doc),
case (catch couch_db:check_is_admin(Db)) of
ok ->
@@ -108,8 +108,8 @@
throw({forbidden,
<<"Only administrators can view design docs in the users database.">>})
end;
-after_doc_read(Doc, #db{user_ctx = UserCtx} = Db) ->
- #user_ctx{name=Name} = UserCtx,
+after_doc_read(Doc, Db) ->
+ #user_ctx{name=Name} = couch_db:get_user_ctx(Db),
DocName = get_doc_name(Doc),
case (catch couch_db:check_is_admin(Db)) of
ok ->
diff --git a/src/couch_util.erl b/src/couch_util.erl
index 6001ae2..d688c12 100644
--- a/src/couch_util.erl
+++ b/src/couch_util.erl
@@ -198,7 +198,9 @@
json_apply_field({Key, NewValue}, [], Acc) ->
{[{Key, NewValue}|Acc]}.
-json_user_ctx(#db{name=ShardName, user_ctx=Ctx}) ->
+json_user_ctx(Db) ->
+ ShardName = couch_db:name(Db),
+ Ctx = couch_db:get_user_ctx(Db),
{[{<<"db">>, mem3:dbname(ShardName)},
{<<"name">>,Ctx#user_ctx.name},
{<<"roles">>,Ctx#user_ctx.roles}]}.
@@ -455,9 +457,7 @@
url_encode(Id).
-with_db(Db, Fun) when is_record(Db, db) ->
- Fun(Db);
-with_db(DbName, Fun) ->
+with_db(DbName, Fun) when is_binary(DbName) ->
case couch_db:open_int(DbName, [?ADMIN_CTX]) of
{ok, Db} ->
try
@@ -467,6 +467,13 @@
end;
Else ->
throw(Else)
+ end;
+with_db(Db, Fun) ->
+ case couch_db:is_db(Db) of
+ true ->
+ Fun(Db);
+ false ->
+ erlang:error({invalid_db, Db})
end.
rfc1123_date() ->
diff --git a/test/couch_auth_cache_tests.erl b/test/couch_auth_cache_tests.erl
index 76179de..08aecd1 100644
--- a/test/couch_auth_cache_tests.erl
+++ b/test/couch_auth_cache_tests.erl
@@ -265,7 +265,7 @@
shutdown_db(DbName) ->
{ok, AuthDb} = couch_db:open_int(DbName, [?ADMIN_CTX]),
ok = couch_db:close(AuthDb),
- couch_util:shutdown_sync(AuthDb#db.main_pid),
+ couch_util:shutdown_sync(couch_db:get_pid(AuthDb)),
ok = timer:sleep(1000).
get_doc_rev(DbName, UserName) ->
diff --git a/test/couch_changes_tests.erl b/test/couch_changes_tests.erl
index 3c0e5f6..494d90f 100644
--- a/test/couch_changes_tests.erl
+++ b/test/couch_changes_tests.erl
@@ -645,7 +645,7 @@
]}),
ChArgs = #changes_args{filter = "app/valid"},
UserCtx = #user_ctx{name = <<"doc3">>, roles = []},
- DbRec = #db{name = DbName, user_ctx = UserCtx},
+ {ok, DbRec} = couch_db:clustered_db(DbName, UserCtx),
Req = {json_req, {[{
<<"userCtx">>, couch_util:json_user_ctx(DbRec)
}]}},
diff --git a/test/couch_db_plugin_tests.erl b/test/couch_db_plugin_tests.erl
index ea9b230..94dd3df 100644
--- a/test/couch_db_plugin_tests.erl
+++ b/test/couch_db_plugin_tests.erl
@@ -43,6 +43,7 @@
data_subscriptions() -> [].
processes() -> [].
notify(_, _, _) -> ok.
+fake_db() -> element(2, couch_db:clustered_db(fake, totes_fake)).
setup() ->
couch_tests:setup([
@@ -133,33 +134,33 @@
before_doc_update_match() ->
?assertMatch(
{true, [before_doc_update, doc]},
- couch_db_plugin:before_doc_update(#db{}, {true, [doc]})).
+ couch_db_plugin:before_doc_update(fake_db(), {true, [doc]})).
before_doc_update_no_match() ->
?assertMatch(
{false, [doc]},
- couch_db_plugin:before_doc_update(#db{}, {false, [doc]})).
+ couch_db_plugin:before_doc_update(fake_db(), {false, [doc]})).
before_doc_update_throw() ->
?assertThrow(
before_doc_update,
- couch_db_plugin:before_doc_update(#db{}, {fail, [doc]})).
+ couch_db_plugin:before_doc_update(fake_db(), {fail, [doc]})).
after_doc_read_match() ->
?assertMatch(
{true, [after_doc_read, doc]},
- couch_db_plugin:after_doc_read(#db{}, {true, [doc]})).
+ couch_db_plugin:after_doc_read(fake_db(), {true, [doc]})).
after_doc_read_no_match() ->
?assertMatch(
{false, [doc]},
- couch_db_plugin:after_doc_read(#db{}, {false, [doc]})).
+ couch_db_plugin:after_doc_read(fake_db(), {false, [doc]})).
after_doc_read_throw() ->
?assertThrow(
after_doc_read,
- couch_db_plugin:after_doc_read(#db{}, {fail, [doc]})).
+ couch_db_plugin:after_doc_read(fake_db(), {fail, [doc]})).
validate_docid_match() ->
diff --git a/test/couch_server_tests.erl b/test/couch_server_tests.erl
index c8f8381..4fd7ff2 100644
--- a/test/couch_server_tests.erl
+++ b/test/couch_server_tests.erl
@@ -32,8 +32,7 @@
setup().
teardown(Db) ->
- (catch couch_db:close(Db)),
- (catch file:delete(Db#db.filepath)).
+ (catch couch_db:close(Db)).
teardown(rename, Db) ->
config:set("couchdb", "enable_database_recovery", "false", false),
@@ -61,7 +60,9 @@
{foreachx, fun setup/1, fun teardown/2, [{Mod, Fun} || Fun <- Funs]}
}.
-should_rename_on_delete(_, #db{filepath = Origin, name = DbName}) ->
+should_rename_on_delete(_, Db) ->
+ DbName = couch_db:name(Db),
+ Origin = couch_db:get_filepath(Db),
?_test(begin
?assert(filelib:is_regular(Origin)),
?assertMatch(ok, couch_server:delete(DbName, [])),
@@ -74,7 +75,9 @@
?assert(filelib:is_regular(Renamed))
end).
-should_delete(_, #db{filepath = Origin, name = DbName}) ->
+should_delete(_, Db) ->
+ DbName = couch_db:name(Db),
+ Origin = couch_db:get_filepath(Db),
?_test(begin
?assert(filelib:is_regular(Origin)),
?assertMatch(ok, couch_server:delete(DbName, [])),
diff --git a/test/couchdb_compaction_daemon_tests.erl b/test/couchdb_compaction_daemon_tests.erl
index 25d9b13..70e505e 100644
--- a/test/couchdb_compaction_daemon_tests.erl
+++ b/test/couchdb_compaction_daemon_tests.erl
@@ -182,7 +182,7 @@
lists:foreach(fun(_) ->
Doc = couch_doc:from_json_obj({[{<<"_id">>, couch_uuids:new()}]}),
{ok, _} = couch_db:update_docs(Db, [Doc]),
- query_view(Db#db.name)
+ query_view(couch_db:name(Db))
end, lists:seq(1, 200)),
couch_db:close(Db).
diff --git a/test/couchdb_views_tests.erl b/test/couchdb_views_tests.erl
index f1fddfc..02e9d72 100644
--- a/test/couchdb_views_tests.erl
+++ b/test/couchdb_views_tests.erl
@@ -340,7 +340,7 @@
]}),
{ok, _} = couch_db:update_doc(MDb1, DDoc, []),
ok = populate_db(MDb1, 100, 100),
- query_view(MDb1#db.name, "foo", "foo"),
+ query_view(couch_db:name(MDb1), "foo", "foo"),
ok = couch_db:close(MDb1),
{ok, Db1} = couch_db:create(?tempdb(), [?ADMIN_CTX]),
@@ -350,8 +350,8 @@
{ok, Db3} = couch_db:create(?tempdb(), [?ADMIN_CTX]),
ok = couch_db:close(Db3),
- Writer1 = spawn_writer(Db1#db.name),
- Writer2 = spawn_writer(Db2#db.name),
+ Writer1 = spawn_writer(couch_db:name(Db1)),
+ Writer2 = spawn_writer(couch_db:name(Db2)),
?assert(is_process_alive(Writer1)),
?assert(is_process_alive(Writer2)),
@@ -361,16 +361,16 @@
%% Below we do exactly the same as couch_mrview:compact holds inside
%% because we need have access to compaction Pid, not a Ref.
- %% {ok, MonRef} = couch_mrview:compact(MDb1#db.name, <<"_design/foo">>,
+ %% {ok, MonRef} = couch_mrview:compact(MDb1, <<"_design/foo">>,
%% [monitor]),
{ok, Pid} = couch_index_server:get_index(
- couch_mrview_index, MDb1#db.name, <<"_design/foo">>),
+ couch_mrview_index, couch_db:name(MDb1), <<"_design/foo">>),
{ok, CPid} = gen_server:call(Pid, compact),
%% By suspending compaction process we ensure that compaction won't get
%% finished too early to make get_writer_status assertion fail.
erlang:suspend_process(CPid),
MonRef = erlang:monitor(process, CPid),
- Writer3 = spawn_writer(Db3#db.name),
+ Writer3 = spawn_writer(couch_db:name(Db3)),
?assert(is_process_alive(Writer3)),
?assert(is_process_alive(Writer1)),
@@ -526,7 +526,8 @@
count_users(DbName) ->
{ok, Db} = couch_db:open_int(DbName, [?ADMIN_CTX]),
- {monitored_by, Monitors} = erlang:process_info(Db#db.main_pid, monitored_by),
+ DbPid = couch_db:get_pid(Db),
+ {monitored_by, Monitors} = erlang:process_info(DbPid, monitored_by),
CouchFiles = [P || P <- Monitors, couch_file:process_info(P) =/= undefined],
ok = couch_db:close(Db),
length(lists:usort(Monitors) -- [self() | CouchFiles]).
@@ -552,7 +553,8 @@
{ok, Db} = couch_db:open_int(DbName, []),
ok = couch_db:close(Db),
- exit(Db#db.main_pid, shutdown),
+ DbPid = couch_db:get_pid(Db),
+ exit(DbPid, shutdown),
DbFile = filename:join([DbDir, ?b2l(DbName) ++ ".couch"]),
ok = file:delete(DbFile),
@@ -573,7 +575,8 @@
wait_db_compact_done(DbName, N) ->
{ok, Db} = couch_db:open_int(DbName, []),
ok = couch_db:close(Db),
- case is_pid(Db#db.compactor_pid) of
+ CompactorPid = couch_db:get_compactor_pid(Db),
+ case is_pid(CompactorPid) of
false ->
ok;
true ->