Fix typos
diff --git a/.github/ISSUE_TEMPLATE/rfc.md b/.github/ISSUE_TEMPLATE/rfc.md
index 08bd054..a966bd9 100644
--- a/.github/ISSUE_TEMPLATE/rfc.md
+++ b/.github/ISSUE_TEMPLATE/rfc.md
@@ -62,7 +62,7 @@
 [NOTE]: # (   Headers and parameters accepted )
 [NOTE]: # (   JSON in [if a PUT or POST type] )
 [NOTE]: # (   JSON out )
-[NOTE]: # (   Valid status codes and their defintions )
+[NOTE]: # (   Valid status codes and their definitions )
 [NOTE]: # (   A proposed Request and Response block )
 
 ## HTTP API deprecations
diff --git a/dev/run b/dev/run
index ecc45c9..ef48f7a 100755
--- a/dev/run
+++ b/dev/run
@@ -538,7 +538,7 @@
         ctx["procs"].append(haproxy_proc)
 
 
-@log("Stoping proc {proc.pid}")
+@log("Stopping proc {proc.pid}")
 def kill_process(proc):
     if proc and proc.returncode is None:
         proc.kill()
diff --git a/mix.exs b/mix.exs
index dd6da4a..a2d83d3 100644
--- a/mix.exs
+++ b/mix.exs
@@ -28,7 +28,7 @@
   ```
   """
   use Mix.Task
-  @shortdoc "Outputs all availabe integration tests"
+  @shortdoc "Outputs all available integration tests"
   def run(_) do
     Path.wildcard(Path.join(Mix.Project.build_path(), "/**/ebin"))
     |> Enum.filter(&File.dir?/1)
diff --git a/rel/overlay/etc/default.ini b/rel/overlay/etc/default.ini
index 1b8441f..a9a4d64 100644
--- a/rel/overlay/etc/default.ini
+++ b/rel/overlay/etc/default.ini
@@ -415,7 +415,7 @@
 ;accept_jitter = 2000
 
 ; Minimum time in seconds replication jobs will be left running before being
-; rotated when all the schedule slots are filled. This migth be useful if
+; rotated when all the schedule slots are filled. This might be useful if
 ; max_jobs is very low, but jobs should be left running long enough to make at
 ; least some progress before being replaced
 ;min_run_time_sec = 60
@@ -492,7 +492,7 @@
 ; How much time to wait before retrying after a missing doc exception. This
 ; exception happens if the document was seen in the changes feed, but internal
 ; replication hasn't caught up yet, and fetching document's revisions
-; fails. This a common scenario when source is updated while continous
+; fails. This a common scenario when source is updated while continuous
 ; replication is running. The retry period would depend on how quickly internal
 ; replication is expected to catch up. In general this is an optimisation to
 ; avoid crashing the whole replication job, which would consume more resources
@@ -563,7 +563,7 @@
 ; The journald writer doesn't have any options. It still writes
 ; the logs to stderr, but without the timestamp prepended, since
 ; the journal will add it automatically, and with the log level
-; formated as per
+; formatted as per
 ; https://www.freedesktop.org/software/systemd/man/sd-daemon.html
 ;
 ;
@@ -605,7 +605,7 @@
 ;activity_monitor_max_jitter_msec = 10000
 ;
 ; Hold-off applied before notifying subscribers. Since active jobs can be
-; queried more effiently using a range read, increasing this value should make
+; queried more efficiently using a range read, increasing this value should make
 ; notifications more performant, however, it would also increase notification
 ; latency.
 ;type_monitor_holdoff_msec = 50
@@ -614,7 +614,7 @@
 ; value of "infinity" should work well in most cases.
 ;type_monitor_timeout_msec = infinity
 ;
-; How often to check for the presense of new job types.
+; How often to check for the presence of new job types.
 ;type_check_period_msec = 15000
 ;
 ; Jitter applied when checking for new job types.
diff --git a/rel/overlay/etc/local.ini b/rel/overlay/etc/local.ini
index b788e82..2dfb3e1 100644
--- a/rel/overlay/etc/local.ini
+++ b/rel/overlay/etc/local.ini
@@ -65,7 +65,7 @@
 ;tls_versions = [tlsv1, 'tlsv1.1', 'tlsv1.2']
 
 ; To enable Virtual Hosts in CouchDB, add a vhost = path directive. All requests to
-; the Virual Host will be redirected to the path. In the example below all requests
+; the Virtual Host will be redirected to the path. In the example below all requests
 ; to http://example.com/ are redirected to /database.
 ; If you run CouchDB on a specific port, include the port number in the vhost:
 ; example.com:5984 = /database
diff --git a/src/chttpd/src/chttpd.erl b/src/chttpd/src/chttpd.erl
index 7a05b2c..193bd93 100644
--- a/src/chttpd/src/chttpd.erl
+++ b/src/chttpd/src/chttpd.erl
@@ -1187,7 +1187,7 @@
         "security migration."
     >>};
 error_info(all_workers_died) ->
-    {503, <<"service unvailable">>, <<
+    {503, <<"service unavailable">>, <<
         "Nodes are unable to service this "
         "request due to overloading or maintenance mode."
     >>};
diff --git a/src/chttpd/src/chttpd_sup.erl b/src/chttpd/src/chttpd_sup.erl
index d8c6a12..32f2487 100644
--- a/src/chttpd/src/chttpd_sup.erl
+++ b/src/chttpd/src/chttpd_sup.erl
@@ -115,7 +115,7 @@
         details => "value must be a non-negative integer"
     }),
     couch_log:error(
-        "The value for `~s` should be string convertable "
+        "The value for `~s` should be string convertible "
         "to integer which is >= 0 (got `~p`)",
         [Key, Value]
     ),
diff --git a/src/couch/src/couch_debug.erl b/src/couch/src/couch_debug.erl
index 13b593e..1eecc74 100644
--- a/src/couch/src/couch_debug.erl
+++ b/src/couch/src/couch_debug.erl
@@ -126,8 +126,8 @@
     It calls a user provided callback for every node of the tree.
     `Fun(Key, Value, Pos, Acc) -> {NewValue, NewAcc}`.
     Where:
-      - Key of the node (usualy Pid of a process)
-      - Value of the node (usualy information collected by link_tree)
+      - Key of the node (usually Pid of a process)
+      - Value of the node (usually information collected by link_tree)
       - Pos - depth from the root of the tree
       - Acc - user's accumulator
 
@@ -142,8 +142,8 @@
     It calls a user provided callback
     `Fun(Key, Value, Pos) -> NewValue`
     Where:
-      - Key of the node (usualy Pid of a process)
-      - Value of the node (usualy information collected by link_tree)
+      - Key of the node (usually Pid of a process)
+      - Value of the node (usually information collected by link_tree)
       - Pos - depth from the root of the tree
 
     ---
@@ -155,8 +155,8 @@
     about the tree. It calls a user provided callback
     `Fun(Key, Value, Pos) -> NewValue`
     Where:
-      - Key of the node (usualy Pid of a process)
-      - Value of the node (usualy information collected by link_tree)
+      - Key of the node (usually Pid of a process)
+      - Value of the node (usually information collected by link_tree)
       - Pos - depth from the root of the tree
 
     ---
diff --git a/src/couch/src/couch_httpd.erl b/src/couch/src/couch_httpd.erl
index 1de47a3..87ba4b4 100644
--- a/src/couch/src/couch_httpd.erl
+++ b/src/couch/src/couch_httpd.erl
@@ -988,7 +988,7 @@
 http_respond_(#httpd{mochi_req = MochiReq}, 413, Headers, Args, Type) ->
     % Special handling for the 413 response. Make sure the socket is closed as
     % we don't know how much data was read before the error was thrown. Also
-    % drain all the data in the receive buffer to avoid connction being reset
+    % drain all the data in the receive buffer to avoid connection being reset
     % before the 413 response is parsed by the client. This is still racy, it
     % just increases the chances of 413 being detected correctly by the client
     % (rather than getting a brutal TCP reset).
diff --git a/src/couch/src/couch_httpd_auth.erl b/src/couch/src/couch_httpd_auth.erl
index 802dcd9..17797eb 100644
--- a/src/couch/src/couch_httpd_auth.erl
+++ b/src/couch/src/couch_httpd_auth.erl
@@ -191,7 +191,7 @@
 %   * X-Auth-CouchDB-Token : token to authenticate the authorization (x_auth_token
 %   in couch_httpd_auth section). This token is an hmac-sha1 created from secret key
 %   and username. The secret key should be the same in the client and couchdb node. s
-%   ecret key is the secret key in couch_httpd_auth section of ini. This token is optional
+%   secret key is the secret key in couch_httpd_auth section of ini. This token is optional
 %   if value of proxy_use_secret key in couch_httpd_auth section of ini isn't true.
 %
 proxy_authentication_handler(Req) ->
diff --git a/src/couch/src/couch_httpd_vhost.erl b/src/couch/src/couch_httpd_vhost.erl
index 024b905..3d31290 100644
--- a/src/couch/src/couch_httpd_vhost.erl
+++ b/src/couch/src/couch_httpd_vhost.erl
@@ -50,7 +50,7 @@
 %% example.com = /example
 %% *.example.com = /example
 %%
-%% The first line will rewrite the rquest to display the content of the
+%% The first line will rewrite the request to display the content of the
 %% example database. This rule works only if the Host header is
 %% 'example.com' and won't work for CNAMEs. Second rule on the other hand
 %% match all CNAMES to example db. So www.example.com or db.example.com
diff --git a/src/couch/src/couch_key_tree.erl b/src/couch/src/couch_key_tree.erl
index 90f3508..9f15037 100644
--- a/src/couch/src/couch_key_tree.erl
+++ b/src/couch/src/couch_key_tree.erl
@@ -27,7 +27,7 @@
 %% and C. We now have two key trees, A->B and A->C. When we go to replicate a
 %% second time, the key tree must combine these two trees which gives us
 %% A->(B|C). This is how conflicts are introduced. In terms of the key tree, we
-%% say that we have two leaves (B and C) that are not deleted. The presense of
+%% say that we have two leaves (B and C) that are not deleted. The presence of
 %% the multiple leaves indicate conflict. To remove a conflict, one of the
 %% edits (B or C) can be deleted, which results in, A->(B|C->D) where D is an
 %% edit that is specially marked with the a deleted=true flag.
@@ -110,7 +110,7 @@
     % Its helpful to note that this whole moving into sub-branches is due
     % to how we store trees that have been stemmed. When a path is
     % stemmed so that the root node is lost, we wrap it in a tuple with
-    % the number keys that have been droped. This number is the depth
+    % the number keys that have been dropped. This number is the depth
     % value that's used throughout this module.
     case merge_at([Nodes], Depth - IDepth, [INodes]) of
         {[Merged], Result} ->
diff --git a/src/couch/src/couch_util.erl b/src/couch/src/couch_util.erl
index dc13175..17144db 100644
--- a/src/couch/src/couch_util.erl
+++ b/src/couch/src/couch_util.erl
@@ -293,7 +293,7 @@
 % given a pathname "../foo/bar/" it gives back the fully qualified
 % absolute pathname.
 abs_pathname(" " ++ Filename) ->
-    % strip leading whitspace
+    % strip leading whitespace
     abs_pathname(Filename);
 abs_pathname([$/ | _] = Filename) ->
     Filename;
@@ -315,7 +315,7 @@
             OutFilename
     end.
 
-% if this as an executable with arguments, seperate out the arguments
+% if this as an executable with arguments, separate out the arguments
 % ""./foo\ bar.sh -baz=blah" -> {"./foo\ bar.sh", " -baz=blah"}
 separate_cmd_args("", CmdAcc) ->
     {lists:reverse(CmdAcc), ""};
@@ -380,7 +380,7 @@
 drop_dot_couch_ext(DbName) when is_list(DbName) ->
     binary_to_list(drop_dot_couch_ext(iolist_to_binary(DbName))).
 
-% takes a heirarchical list of dirs and removes the dots ".", double dots
+% takes a hierarchical list of dirs and removes the dots ".", double dots
 % ".." and the corresponding parent dirs.
 fix_path_list([], Acc) ->
     lists:reverse(Acc);
diff --git a/src/couch/test/eunit/couch_doc_json_tests.erl b/src/couch/test/eunit/couch_doc_json_tests.erl
index 776d274..c2b81c4 100644
--- a/src/couch/test/eunit/couch_doc_json_tests.erl
+++ b/src/couch/test/eunit/couch_doc_json_tests.erl
@@ -233,7 +233,7 @@
         },
         {
             {[{<<"_rev">>, "foo-bar"}]},
-            "Error if _rev's integer expection is broken."
+            "Error if _rev's integer exception is broken."
         },
         {
             {[{<<"_revisions">>, {[{<<"start">>, true}]}}]},
diff --git a/src/couch/test/eunit/couch_js_tests.erl b/src/couch/test/eunit/couch_js_tests.erl
index 1079678..ea28d40 100644
--- a/src/couch/test/eunit/couch_js_tests.erl
+++ b/src/couch/test/eunit/couch_js_tests.erl
@@ -71,7 +71,7 @@
     ?assertEqual([[[<<16#C3, 16#84>>, <<16#C3, 16#9C>>]]], Result).
 
 should_roundtrip_modified_utf8() ->
-    % Mimicing the test case from the mailing list
+    % Mimicking the test case from the mailing list
     Src = <<
         "function(doc) {\n"
         "  emit(doc.value.toLowerCase(), \"",
diff --git a/src/couch_jobs/README.md b/src/couch_jobs/README.md
index bc45d32..0a8bbd0 100644
--- a/src/couch_jobs/README.md
+++ b/src/couch_jobs/README.md
@@ -17,7 +17,7 @@
    packing and unpacking, reading ranges and also managing transaction objects.
 
  * `couch_jobs_pending`: This module implements the pending jobs queue. These
-   functions could all go in `couch_jobs_fdb` but the implemention was fairly
+   functions could all go in `couch_jobs_fdb` but the implementation was fairly
    self-contained, with its own private helper functions, so it made sense to
    move to a separate module.
 
diff --git a/src/couch_log/src/couch_log_trunc_io_fmt.erl b/src/couch_log/src/couch_log_trunc_io_fmt.erl
index cf18019..40f3248 100644
--- a/src/couch_log/src/couch_log_trunc_io_fmt.erl
+++ b/src/couch_log/src/couch_log_trunc_io_fmt.erl
@@ -118,7 +118,7 @@
 
 %% collect_cc([FormatChar], [Argument]) ->
 %%         {Control,[ControlArg],[FormatChar],[Arg]}.
-%%  Here we collect the argments for each control character.
+%%  Here we collect the arguments for each control character.
 %%  Be explicit to cause failure early.
 
 collect_cc([$w | Fmt], [A | Args]) -> {$w, [A], Fmt, Args};
diff --git a/src/couch_log/test/eunit/couch_log_formatter_test.erl b/src/couch_log/test/eunit/couch_log_formatter_test.erl
index d516c2b..a4de749 100644
--- a/src/couch_log/test/eunit/couch_log_formatter_test.erl
+++ b/src/couch_log/test/eunit/couch_log_formatter_test.erl
@@ -811,7 +811,7 @@
         lists:flatten(couch_log_formatter:format_trace(Trace))
     ),
 
-    % Excercising print_silly_list
+    % Exercising print_silly_list
     ?assertMatch(
         #log_entry{
             level = error,
@@ -826,7 +826,7 @@
         )
     ),
 
-    % Excercising print_silly_list
+    % Exercising print_silly_list
     ?assertMatch(
         #log_entry{
             level = error,
diff --git a/src/couch_prometheus/src/couch_prometheus_util.erl b/src/couch_prometheus/src/couch_prometheus_util.erl
index ea2cdf7..255df68 100644
--- a/src/couch_prometheus/src/couch_prometheus_util.erl
+++ b/src/couch_prometheus/src/couch_prometheus_util.erl
@@ -113,7 +113,7 @@
     {n, Count} = lists:keyfind(n, 1, Value),
     Quantiles = lists:map(
         fun({Perc, Val0}) ->
-            % Prometheus uses seconds, so we need to covert milliseconds to seconds
+            % Prometheus uses seconds, so we need to convert milliseconds to seconds
             Val = Val0 / 1000,
             case Perc of
                 50 -> {[{quantile, <<"0.5">>}], Val};
diff --git a/src/couch_replicator/README.md b/src/couch_replicator/README.md
index 5fe55ac..f0de661 100644
--- a/src/couch_replicator/README.md
+++ b/src/couch_replicator/README.md
@@ -5,7 +5,7 @@
 CouchDB developers. It dives a bit into the internal and explains how
 everything is connected together. A higher level overview is available in the
 [RFC](https://github.com/apache/couchdb-documentation/pull/581). This
-documention assumes the audience is familiar with that description as well as
+documentation assumes the audience is familiar with that description as well as
 with the [Couch Jobs
 RFC](https://github.com/apache/couchdb-documentation/blob/main/rfcs/007-background-jobs.md)
 as well as with the [Node Types
diff --git a/src/couch_replicator/src/couch_replicator.erl b/src/couch_replicator/src/couch_replicator.erl
index 0560d3f..746024d 100644
--- a/src/couch_replicator/src/couch_replicator.erl
+++ b/src/couch_replicator/src/couch_replicator.erl
@@ -258,7 +258,7 @@
                 Active = State =:= ?ST_PENDING orelse State =:= ?ST_RUNNING,
                 case SameRep andalso Active of
                     true ->
-                        % If a job with the same paremeters is running we don't
+                        % If a job with the same parameters is running we don't
                         % stop and just ignore the request. This is mainly for
                         % compatibility where users are able to idempotently
                         % POST the same job without it being stopped and
@@ -361,7 +361,7 @@
                 case SameRep andalso Active of
                     true ->
                         % Document was changed but none of the parameters
-                        % relevent for the replication job have changed, so
+                        % relevant for the replication job have changed, so
                         % make it a no-op
                         ok;
                     false ->
diff --git a/src/couch_replicator/src/couch_replicator_auth.erl b/src/couch_replicator/src/couch_replicator_auth.erl
index e5c024f..712a771 100644
--- a/src/couch_replicator/src/couch_replicator_auth.erl
+++ b/src/couch_replicator/src/couch_replicator_auth.erl
@@ -31,7 +31,7 @@
 % Note for plugin developers: consider using the "auth" field in the source and
 % target objects to store credentials. In that case non-owner and non-admin
 % users will have those credentials stripped when they read the replication
-% document, which mimicks the behavior for "headers" and user and pass fields
+% document, which mimics the behavior for "headers" and user and pass fields
 % in endpoint URLs".
 
 -callback initialize(#httpdb{}) ->
diff --git a/src/couch_replicator/src/couch_replicator_auth_session.erl b/src/couch_replicator/src/couch_replicator_auth_session.erl
index acd74a3..898401f 100644
--- a/src/couch_replicator/src/couch_replicator_auth_session.erl
+++ b/src/couch_replicator/src/couch_replicator_auth_session.erl
@@ -35,7 +35,7 @@
 %
 %  * If last request has an auth failure, check if request used a stale cookie
 %    In this case nothing is done, and the client is told to retry. Next time
-%    it updates its headers befor the request it should pick up the latest
+%    it updates its headers before the request it should pick up the latest
 %    cookie.
 %
 %  * If last request failed and cookie was the latest known cookie, schedule a
@@ -466,7 +466,7 @@
 next_refresh(NowSec, undefined, RefreshInterval) ->
     NowSec + RefreshInterval;
 next_refresh(NowSec, MaxAge, _) when is_integer(MaxAge) ->
-    % Apply a fudge factor to account for delays in receving the cookie
+    % Apply a fudge factor to account for delays in receiving the cookie
     % and / or time adjustments happening over a longer period of time
     NowSec + trunc(MaxAge * 0.9).
 
diff --git a/src/couch_replicator/src/couch_replicator_docs.erl b/src/couch_replicator/src/couch_replicator_docs.erl
index a4c4ccc..e283b50 100644
--- a/src/couch_replicator/src/couch_replicator_docs.erl
+++ b/src/couch_replicator/src/couch_replicator_docs.erl
@@ -249,7 +249,7 @@
     catch
         error:database_does_not_exist ->
             {not_found, database_does_not_exist};
-        % User can accidently write a VDU which prevents _replicator from
+        % User can accidentally write a VDU which prevents _replicator from
         % updating replication documents. Avoid crashing replicator and thus
         % preventing all other replication jobs on the node from running.
         throw:{forbidden, Reason} ->
diff --git a/src/couch_replicator/src/couch_replicator_httpc.erl b/src/couch_replicator/src/couch_replicator_httpc.erl
index 9fd79a3..9a93ade 100644
--- a/src/couch_replicator/src/couch_replicator_httpc.erl
+++ b/src/couch_replicator/src/couch_replicator_httpc.erl
@@ -357,7 +357,7 @@
     false;
 total_error_time_exceeded(#httpdb{first_error_timestamp = ErrorTimestamp}) ->
     HealthThresholdSec = couch_replicator_job:health_threshold(),
-    % Theshold value is halved because in the calling code the next step
+    % Threshold value is halved because in the calling code the next step
     % is a doubling. Not halving here could mean sleeping too long and
     % exceeding the health threshold.
     ThresholdUSec = (HealthThresholdSec / 2) * 1000000,
diff --git a/src/couch_replicator/src/couch_replicator_job.erl b/src/couch_replicator/src/couch_replicator_job.erl
index 1281ec5..f94c5fb 100644
--- a/src/couch_replicator/src/couch_replicator_job.erl
+++ b/src/couch_replicator/src/couch_replicator_job.erl
@@ -629,7 +629,7 @@
                 ->
                     % Conflicting job is a transient job, not associated with a
                     % _replicator doc, so we let this job retry. This is also
-                    % partly done for compatibility with pervious replicator
+                    % partly done for compatibility with previous replicator
                     % behavior.
                     Error = <<"Duplicate job running: ", OtherJobId/binary>>,
                     reschedule_on_error(JTx, Job, JobData, Error),
diff --git a/src/couch_replicator/src/couch_replicator_job_server.erl b/src/couch_replicator/src/couch_replicator_job_server.erl
index b96f6d4..c703dd7 100644
--- a/src/couch_replicator/src/couch_replicator_job_server.erl
+++ b/src/couch_replicator/src/couch_replicator_job_server.erl
@@ -94,7 +94,7 @@
                 in => replicator,
                 pid => Pid
             }),
-            LogMsg = "~p : unknown acceptor processs ~p",
+            LogMsg = "~p : unknown acceptor process ~p",
             couch_log:error(LogMsg, [?MODULE, Pid]),
             {stop, {unknown_acceptor_pid, Pid}, St}
     end;
diff --git a/src/couch_replicator/src/json_stream_parse.erl b/src/couch_replicator/src/json_stream_parse.erl
index 3478b98..a76c1df 100644
--- a/src/couch_replicator/src/json_stream_parse.erl
+++ b/src/couch_replicator/src/json_stream_parse.erl
@@ -25,7 +25,7 @@
 % tuple is the data itself, and the second element is a function to be called
 % next to get the next chunk of data in the stream.
 %
-% The EventFun is called everytime a json element is parsed. It must produce
+% The EventFun is called every time a json element is parsed. It must produce
 % a new function to be called for the next event.
 %
 % Events happen each time a new element in the json string is parsed.
diff --git a/src/couch_views/src/couch_views_jobs.erl b/src/couch_views/src/couch_views_jobs.erl
index debdc35..4bbc3b8 100644
--- a/src/couch_views/src/couch_views_jobs.erl
+++ b/src/couch_views/src/couch_views_jobs.erl
@@ -98,7 +98,7 @@
                     couch_jobs:remove(undefined, ?INDEX_JOB_TYPE, JobId),
                     erlang:error({ddoc_deleted, maps:get(<<"reason">>, Data)});
                 _OtherDocId ->
-                    % A different design doc wiht the same signature
+                    % A different design doc with the same signature
                     % was deleted. Resubmit this job which will overwrite
                     % the ddoc_id in the job.
                     retry
diff --git a/src/couch_views/src/couch_views_server.erl b/src/couch_views/src/couch_views_server.erl
index 3e9284c..dbd464e 100644
--- a/src/couch_views/src/couch_views_server.erl
+++ b/src/couch_views/src/couch_views_server.erl
@@ -71,7 +71,7 @@
             {reply, ok, spawn_acceptors(St1)};
         false ->
             ?LOG_ERROR(#{what => unknown_acceptor, pid => Pid}),
-            LogMsg = "~p : unknown acceptor processs ~p",
+            LogMsg = "~p : unknown acceptor process ~p",
             couch_log:error(LogMsg, [?MODULE, Pid]),
             {stop, {unknown_acceptor_pid, Pid}, St}
     end;
diff --git a/src/couch_views/src/couch_views_util.erl b/src/couch_views/src/couch_views_util.erl
index 63dd566..9cbebaf 100644
--- a/src/couch_views/src/couch_views_util.erl
+++ b/src/couch_views/src/couch_views_util.erl
@@ -188,7 +188,7 @@
         {red, exact, _} ->
             ok;
         {red, _, KeyList} when is_list(KeyList) ->
-            Msg = <<"Multi-key fetchs for reduce views must use `group=true`">>,
+            Msg = <<"Multi-key fetches for reduce views must use `group=true`">>,
             mrverror(Msg);
         _ ->
             ok
diff --git a/src/ctrace/README.md b/src/ctrace/README.md
index 6c687e8..c07534e 100644
--- a/src/ctrace/README.md
+++ b/src/ctrace/README.md
@@ -57,7 +57,7 @@
 Code instrumentation
 --------------------
 
-The span lifecycle is controled by
+The span lifecycle is controlled by
 
 - `ctrace:start_span`
 - `ctrace:finish_span`
@@ -105,7 +105,7 @@
   - httpd
   - internal trigger (replication or compaction jobs)
 - Start new child span when you cross layer boundaries
-- Start new child span when you cross node bounadary
+- Start new child span when you cross node boundary
 - Extend `<app>_httpd_handlers:handler_info/1` as needed to
   have operation ids. (We as community might need to work on
   naming conventions)
diff --git a/src/ctrace/src/ctrace_dsl.erl b/src/ctrace/src/ctrace_dsl.erl
index a62985d..780a239 100644
--- a/src/ctrace/src/ctrace_dsl.erl
+++ b/src/ctrace/src/ctrace_dsl.erl
@@ -54,7 +54,7 @@
 
 -spec validate_args(MapAST :: ast()) -> ok.
 validate_args(MapAST) ->
-    %% Unfortunatelly merl doesn't seem to support maps
+    %% Unfortunately merl doesn't seem to support maps
     %% so we had to do it manually
     lists:foldl(
         fun(AST, Bindings) ->
diff --git a/src/ebtree/src/ebtree.erl b/src/ebtree/src/ebtree.erl
index 43d68d0..6a69020 100644
--- a/src/ebtree/src/ebtree.erl
+++ b/src/ebtree/src/ebtree.erl
@@ -1215,7 +1215,7 @@
             ok
     end.
 
-%% data marshalling functions (encodes unnecesary fields as a NIL_REF)
+%% data marshalling functions (encodes unnecessary fields as a NIL_REF)
 
 encode_node(#tree{} = Tree, Key, #node{prev = undefined} = Node) ->
     encode_node(Tree, Key, Node#node{prev = []});
diff --git a/src/fabric/test/fabric2_db_misc_tests.erl b/src/fabric/test/fabric2_db_misc_tests.erl
index ae0295e..cbfe7cc 100644
--- a/src/fabric/test/fabric2_db_misc_tests.erl
+++ b/src/fabric/test/fabric2_db_misc_tests.erl
@@ -334,7 +334,7 @@
         erlfdb:wait(erlfdb:get(Tx, ?METADATA_VERSION_KEY))
     end),
 
-    % Save timetamp before ensure_current/1 is called
+    % Save timestamp before ensure_current/1 is called
     TsBeforeEnsureCurrent = erlang:monotonic_time(millisecond),
 
     % Perform a random operation which calls ensure_current
diff --git a/src/fabric/test/fabric2_doc_crud_tests.erl b/src/fabric/test/fabric2_doc_crud_tests.erl
index 7de0d5e..097282b 100644
--- a/src/fabric/test/fabric2_doc_crud_tests.erl
+++ b/src/fabric/test/fabric2_doc_crud_tests.erl
@@ -808,7 +808,7 @@
 
 create_2_large_local_docs({Db, _}) ->
     % Create a large doc then overwrite with a smaller one. The reason is to
-    % ensure the previous one correctly clears its range before writting the
+    % ensure the previous one correctly clears its range before writing the
     % new smaller one it its place.
     UUID = fabric2_util:uuid(),
     LDocId = <<?LOCAL_DOC_PREFIX, UUID/binary>>,
diff --git a/src/fabric/test/fabric2_index_tests.erl b/src/fabric/test/fabric2_index_tests.erl
index c95da51..b1ad385 100644
--- a/src/fabric/test/fabric2_index_tests.erl
+++ b/src/fabric/test/fabric2_index_tests.erl
@@ -82,7 +82,7 @@
     NoAutoUpdate = {[{<<"autoupdate">>, false}]},
     {_, _} = create_doc(Db1, <<"_design/doc3">>, NoAutoUpdate),
 
-    % Db2 doesn't have any desig documents
+    % Db2 doesn't have any design documents
     {ok, Db2} = fabric2_db:create(?tempdb(), [?ADMIN_CTX]),
 
     #{db1 => Db1, db2 => Db2, ctx => Ctx, indices => Indices}.
diff --git a/src/mango/src/mango_cursor_view.erl b/src/mango/src/mango_cursor_view.erl
index 713e32e..c67d68a 100644
--- a/src/mango/src/mango_cursor_view.erl
+++ b/src/mango/src/mango_cursor_view.erl
@@ -171,7 +171,7 @@
         fun(Idx, Acc) ->
             Cols = mango_idx:columns(Idx),
             Prefix = composite_prefix(Cols, FieldRanges),
-            % Calcuate the difference between the FieldRanges/Selector
+            % Calculate the difference between the FieldRanges/Selector
             % and the Prefix. We want to select the index with a prefix
             % that is as close to the FieldRanges as possible
             PrefixDifference = length(FieldRanges) - length(Prefix),
diff --git a/src/mango/src/mango_selector.erl b/src/mango/src/mango_selector.erl
index be2616f..584b2df 100644
--- a/src/mango/src/mango_selector.erl
+++ b/src/mango/src/mango_selector.erl
@@ -61,7 +61,7 @@
     match(Selector, {Props}, fun mango_json:cmp/2).
 
 % Convert each operator into a normalized version as well
-% as convert an implict operators into their explicit
+% as convert an implicit operators into their explicit
 % versions.
 norm_ops({[{<<"$and">>, Args}]}) when is_list(Args) ->
     {[{<<"$and">>, [norm_ops(A) || A <- Args]}]};
@@ -197,7 +197,7 @@
 %
 % Its important to note that we can only normalize
 % field names like this through boolean operators where
-% we can gaurantee commutativity. We can't necessarily
+% we can guarantee commutativity. We can't necessarily
 % do the same through the '$elemMatch' or '$allMatch'
 % operators but we can apply the same algorithm to its
 % arguments.
diff --git a/src/mango/src/mango_selector_text.erl b/src/mango/src/mango_selector_text.erl
index aaa1e33..cf739dc 100644
--- a/src/mango/src/mango_selector_text.erl
+++ b/src/mango/src/mango_selector_text.erl
@@ -303,7 +303,7 @@
 field_exists_query(Path) ->
     % We specify two here for :* and .* so that we don't incorrectly
     % match a path foo.name against foo.name_first (if were to just
-    % appened * isntead).
+    % append * isntead).
     Parts = [
         % We need to remove the period from the path list to indicate that it is
         % a path separator. We escape the colon because it is not used as a
diff --git a/test/elixir/README.md b/test/elixir/README.md
index 13d74a4..7429513 100644
--- a/test/elixir/README.md
+++ b/test/elixir/README.md
@@ -116,7 +116,7 @@
 
 Elixir has a number of benefits which makes writing unit tests easier.
 For example it is trivial to do codegeneration of tests.
-Bellow we present a few use cases where code-generation is really helpful.
+Below we present a few use cases where code-generation is really helpful.
 
 ## How to write ExUnit tests
 
diff --git a/test/elixir/test/config/suite.elixir b/test/elixir/test/config/suite.elixir
index 28d2e7c..834fb6d 100644
--- a/test/elixir/test/config/suite.elixir
+++ b/test/elixir/test/config/suite.elixir
@@ -96,8 +96,8 @@
     "bulk docs emits conflict error for duplicate doc `_id`s",
     "bulk docs raises conflict error for combined update & delete",
     "bulk docs raises error for `all_or_nothing` option",
-    "bulk docs raises error for invlaid `docs` parameter",
-    "bulk docs raises error for invlaid `new_edits` parameter",
+    "bulk docs raises error for invalid `docs` parameter",
+    "bulk docs raises error for invalid `new_edits` parameter",
     "bulk docs raises error for missing `docs` parameter",
     "bulk docs raises error for transaction larger than 10MB",
     "bulk docs supplies `id` if not provided in doc"
@@ -125,8 +125,8 @@
     "erlang function filtered changes",
     "function filtered changes",
     "map function filtered changes",
-    "non-existing desing doc and funcion for filtered changes",
-    "non-existing desing doc for filtered changes",
+    "non-existing design doc and function for filtered changes",
+    "non-existing design doc for filtered changes",
     "non-existing function for filtered changes"
   ],
   "CoffeeTest": [
diff --git a/test/elixir/test/cookie_auth_test.exs b/test/elixir/test/cookie_auth_test.exs
index 2a9d3d7..115beeb 100644
--- a/test/elixir/test/cookie_auth_test.exs
+++ b/test/elixir/test/cookie_auth_test.exs
@@ -236,7 +236,7 @@
         {:password, "eh, Boo-Boo?"}
       ])
 
-    # make sure we cant create duplicate users
+    # make sure we can't create duplicate users
     create_doc_expect_error(@users_db, duplicate_jchris_user_doc, 409, "conflict")
 
     # we can't create _names