Merge branch '593-setup-single-node' of https://github.com/apache/couchdb-setup
diff --git a/.gitignore b/.gitignore
index 1dbfa4b..f84f14c 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,2 +1,4 @@
 ebin
 .rebar
+*~
+*.swp
diff --git a/README.md b/README.md
index a6c6d18..e30c400 100644
--- a/README.md
+++ b/README.md
@@ -2,22 +2,27 @@
 
 ### Testing
 
-```
+```bash
 git clone https://git-wip-us.apache.org/repos/asf/couchdb.git
 cd couchdb
 git checkout setup
 ./configure
 make
-
-# in dev/run comment out the line `connect_nodes("127.0.0.1", 15984)`
-
-dev/run --admin a:b
-
-# in a new terminal
-src/setup/test/t.sh
-
+dev/run --no-join -n 2 --admin a:b
 ```
 
+Then, in a new terminal:
+
+    $ src/setup/test/t.sh
+
+Before running each test, kill the `dev/run` script, then reset the
+CouchDB instances with:
+
+    $ rm -rf dev/lib/ dev/logs/
+    $ dev/run --no-join -n 2 --admin a:b
+
+before running the next shell script.
+
 The Plan:
 
 N. End User Action
diff --git a/src/setup.erl b/src/setup.erl
index 5a71004..d0ecd2c 100644
--- a/src/setup.erl
+++ b/src/setup.erl
@@ -12,8 +12,9 @@
 
 -module(setup).
 
--export([enable_cluster/1, finish_cluster/0, add_node/1, receive_cookie/1]).
--export([is_cluster_enabled/0, has_cluster_system_dbs/0]).
+-export([enable_cluster/1, finish_cluster/1, add_node/1, receive_cookie/1]).
+-export([is_cluster_enabled/0, has_cluster_system_dbs/1, cluster_system_dbs/0]).
+-export([enable_single_node/1, is_single_node_enabled/1]).
 
 -include_lib("../couch/include/couch_db.hrl").
 
@@ -44,25 +45,31 @@
     BindAddress = config:get("chttpd", "bind_address"),
     Admins = config:get("admins"),
     case {BindAddress, Admins} of
-        {"127.0.0.1", _} -> no;
-        {_,[]} -> no;
-        {_,_} -> ok
+        {"127.0.0.1", _} -> false;
+        {_,[]} -> false;
+        {_,_} -> true
     end.
 
+is_single_node_enabled(Dbs) ->
+    % admins != empty AND dbs exist
+    Admins = config:get("admins"),
+    HasDbs = has_cluster_system_dbs(Dbs),
+    case {Admins, HasDbs} of
+        {[], _} -> false;
+        {_, false} -> false;
+        {_,_} -> true
+    end.
 
 cluster_system_dbs() ->
     ["_users", "_replicator", "_global_changes"].
 
 
-has_cluster_system_dbs() ->
-    has_cluster_system_dbs(cluster_system_dbs()).
-
 has_cluster_system_dbs([]) ->
-    ok;
+    true;
 has_cluster_system_dbs([Db|Dbs]) ->
     case catch fabric:get_db_info(Db) of
         {ok, _} -> has_cluster_system_dbs(Dbs);
-        _ -> no
+        _ -> false
     end.
 
 enable_cluster(Options) ->
@@ -119,9 +126,9 @@
             {error, Else}
     end.
 
-enable_cluster_int(_Options, ok) ->
+enable_cluster_int(_Options, true) ->
     {error, cluster_enabled};
-enable_cluster_int(Options, no) ->
+enable_cluster_int(Options, false) ->
 
     % if no admin in config and no admin in req -> error
     CurrentAdmins = config:get("admins"),
@@ -132,13 +139,22 @@
           Pw -> Pw
         end
     },
-
+    ok = require_admins(CurrentAdmins, NewCredentials),
     % if bind_address == 127.0.0.1 and no bind_address in req -> error
     CurrentBindAddress = config:get("chttpd","bind_address"),
     NewBindAddress = proplists:get_value(bind_address, Options),
-    ok = require_admins(CurrentAdmins, NewCredentials),
     ok = require_bind_address(CurrentBindAddress, NewBindAddress),
+    NodeCount = couch_util:get_value(node_count, Options),
+    ok = require_node_count(NodeCount),
+    Port = proplists:get_value(port, Options),
 
+    setup_node(NewCredentials, NewBindAddress, NodeCount, Port),
+    couch_log:notice("Enable Cluster: ~p~n", [Options]).
+
+set_admin(Username, Password) ->
+    config:set("admins", binary_to_list(Username), binary_to_list(Password)).
+
+setup_node(NewCredentials, NewBindAddress, NodeCount, Port) ->
     case NewCredentials of
         {undefined, undefined} ->
             ok;
@@ -153,11 +169,8 @@
             config:set("chttpd", "bind_address", binary_to_list(NewBindAddress))
     end,
 
-    NodeCount = couch_util:get_value(node_count, Options),
-    ok = require_node_count(NodeCount),
     config:set_integer("cluster", "n", NodeCount),
 
-    Port = proplists:get_value(port, Options),
     case Port of
         undefined ->
             ok;
@@ -165,27 +178,46 @@
             config:set("chttpd", "port", binary_to_list(Port));
         Port when is_integer(Port) ->
             config:set_integer("chttpd", "port", Port)
-    end,
-    couch_log:notice("Enable Cluster: ~p~n", [Options]).
-
-set_admin(Username, Password) ->
-  config:set("admins", binary_to_list(Username), binary_to_list(Password)).
+    end.
 
 
-finish_cluster() ->
-    finish_cluster_int(has_cluster_system_dbs()).
-finish_cluster_int(ok) ->
+finish_cluster(Options) ->
+    Dbs = proplists:get_value(ensure_dbs_exist, Options, cluster_system_dbs()),
+    finish_cluster_int(Dbs, has_cluster_system_dbs(Dbs)).
+
+finish_cluster_int(_Dbs, true) ->
     {error, cluster_finished};
-finish_cluster_int(no) ->
-    lists:foreach(fun fabric:create_db/1, cluster_system_dbs()).
+finish_cluster_int(Dbs, false) ->
+    lists:foreach(fun fabric:create_db/1, Dbs).
+
+
+enable_single_node(Options) ->
+    % if no admin in config and no admin in req -> error
+    CurrentAdmins = config:get("admins"),
+    NewCredentials = {
+        proplists:get_value(username, Options),
+        case proplists:get_value(password_hash, Options) of
+          undefined -> proplists:get_value(password, Options);
+          Pw -> Pw
+        end
+    },
+    ok = require_admins(CurrentAdmins, NewCredentials),
+    % skip bind_address validation, anything is fine
+    NewBindAddress = proplists:get_value(bind_address, Options),
+    Port = proplists:get_value(port, Options),
+
+    setup_node(NewCredentials, NewBindAddress, 1, Port),
+    Dbs = proplists:get_value(ensure_dbs_exist, Options, cluster_system_dbs()),
+    finish_cluster_int(Dbs, has_cluster_system_dbs(Dbs)),
+    couch_log:notice("Enable Single Node: ~p~n", [Options]).
 
 
 add_node(Options) ->
     add_node_int(Options, is_cluster_enabled()).
 
-add_node_int(_Options, no) ->
+add_node_int(_Options, false) ->
     {error, cluster_not_enabled};
-add_node_int(Options, ok) ->
+add_node_int(Options, true) ->
     couch_log:notice("add node_int: ~p~n", [Options]),
     ErlangCookie = erlang:get_cookie(),
 
diff --git a/src/setup_httpd.erl b/src/setup_httpd.erl
index a23a3e2..f4e05ce 100644
--- a/src/setup_httpd.erl
+++ b/src/setup_httpd.erl
@@ -29,15 +29,27 @@
     end;
 handle_setup_req(#httpd{method='GET'}=Req) ->
     ok = chttpd:verify_is_server_admin(Req),
-    case setup:is_cluster_enabled() of
-        no ->
-            chttpd:send_json(Req, 200, {[{state, cluster_disabled}]});
-        ok ->
-            case setup:has_cluster_system_dbs() of
-                no ->
-                    chttpd:send_json(Req, 200, {[{state, cluster_enabled}]});
-                ok ->
-                    chttpd:send_json(Req, 200, {[{state, cluster_finished}]})
+    Dbs = chttpd:qs_json_value(Req, "ensure_dbs_exist", setup:cluster_system_dbs()),
+    couch_log:notice("Dbs: ~p~n", [Dbs]),
+    case erlang:list_to_integer(config:get("cluster", "n", undefined)) of
+        1 ->
+            case setup:is_single_node_enabled(Dbs) of
+                false ->
+                    chttpd:send_json(Req, 200, {[{state, single_node_disabled}]});
+                true ->
+                    chttpd:send_json(Req, 200, {[{state, single_node_enabled}]})
+            end;
+        _ ->
+            case setup:is_cluster_enabled() of
+                false ->
+                    chttpd:send_json(Req, 200, {[{state, cluster_disabled}]});
+                true ->
+                    case setup:has_cluster_system_dbs(Dbs) of
+                        false ->
+                            chttpd:send_json(Req, 200, {[{state, cluster_enabled}]});
+                        true ->
+                            chttpd:send_json(Req, 200, {[{state, cluster_finished}]})
+                    end
             end
     end;
 handle_setup_req(#httpd{}=Req) ->
@@ -74,7 +86,30 @@
 
 handle_action("finish_cluster", Setup) ->
     couch_log:notice("finish_cluster: ~p~n", [Setup]),
-    case setup:finish_cluster() of
+
+    Options = get_options([
+        {ensure_dbs_exist, <<"ensure_dbs_exist">>}
+    ], Setup),
+    case setup:finish_cluster(Options) of
+        {error, cluster_finished} ->
+            {error, <<"Cluster is already finished">>};
+        Else ->
+            couch_log:notice("finish_cluster: ~p~n", [Else]),
+            ok
+    end;
+
+handle_action("enable_single_node", Setup) ->
+    couch_log:notice("enable_single_node: ~p~n", [Setup]),
+
+    Options = get_options([
+        {ensure_dbs_exist, <<"ensure_dbs_exist">>},
+        {username, <<"username">>},
+        {password, <<"password">>},
+        {password_hash, <<"password_hash">>},
+        {bind_address, <<"bind_address">>},
+        {port, <<"port">>}
+    ], Setup),
+    case setup:enable_single_node(Options) of
         {error, cluster_finished} ->
             {error, <<"Cluster is already finished">>};
         Else ->
@@ -82,6 +117,7 @@
             ok
     end;
 
+
 handle_action("add_node", Setup) ->
     couch_log:notice("add_node: ~p~n", [Setup]),
 
diff --git a/test/t-single-node.sh b/test/t-single-node.sh
new file mode 100755
index 0000000..d490437
--- /dev/null
+++ b/test/t-single-node.sh
@@ -0,0 +1,46 @@
+#!/bin/sh -ex
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+HEADERS="-HContent-Type:application/json"
+# show cluster state:
+curl a:b@127.0.0.1:15986/_nodes/_all_docs
+curl a:b@127.0.0.1:15984/_cluster_setup
+
+# Enable Cluster on single node
+curl a:b@127.0.0.1:15984/_cluster_setup -d '{"action":"enable_single_node","username":"foo","password":"baz","bind_address":"127.0.0.1"}' $HEADERS
+
+# Show cluster state:
+curl a:b@127.0.0.1:15986/_nodes/_all_docs
+curl a:b@127.0.0.1:15984/_all_dbs
+curl a:b@127.0.0.1:15984/_cluster_setup
+
+# Delete a database
+curl -X DELETE a:b@127.0.0.1:15984/_global_changes
+
+# Should show single_node_disabled
+curl a:b@127.0.0.1:15984/_cluster_setup
+
+# Change the check
+curl -g 'a:b@127.0.0.1:15984/_cluster_setup?ensure_dbs_exist=["_replicator","_users"]'
+
+# delete all the things
+curl -X DELETE a:b@127.0.0.1:15984/_replicator
+curl -X DELETE a:b@127.0.0.1:15984/_users
+
+# setup only creating _users
+curl -g a:b@127.0.0.1:15984/_cluster_setup -d '{"action":"enable_single_node","username":"foo","password":"baz","bind_address":"127.0.0.1","ensure_dbs_exist":["_users"]}' $HEADERS
+
+# check it
+curl -g 'a:b@127.0.0.1:15984/_cluster_setup?ensure_dbs_exist=["_users"]'
+
+echo "YAY ALL GOOD"