ci: add typo ci check and fix typos (#4375)

### Motivation
Introduce typos ci to avoid typos. See https://lists.apache.org/thread/04hqqcnkfc5189zsxj0s5wm37t2x7bky

Signed-off-by: ZhangJian He <shoothzj@gmail.com>
diff --git a/.github/workflows/bk-ci.yml b/.github/workflows/bk-ci.yml
index 0dc2c7c..3a21186 100644
--- a/.github/workflows/bk-ci.yml
+++ b/.github/workflows/bk-ci.yml
@@ -483,6 +483,14 @@
         if: cancelled()
         run: ./dev/ci-tool print_thread_dumps
 
+  typo-check:
+    name: Typo Check
+    runs-on: ubuntu-latest
+    steps:
+      - uses: actions/checkout@v4
+      - name: Check typos
+        uses: crate-ci/typos@master
+
   owasp-dependency-check:
     name: OWASP Dependency Check
     runs-on: ubuntu-latest
diff --git a/.typos.toml b/.typos.toml
new file mode 100644
index 0000000..e8ef1d3
--- /dev/null
+++ b/.typos.toml
@@ -0,0 +1,56 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+[default.extend-words]
+# abbr
+"ba" = "ba"
+"bve" = "bve"
+"cace" = "cace"
+"cann" = "cann"
+"dbe" = "dbe"
+"entrys" = "entrys"
+"fo" = "fo"
+"ine" = "ine"
+"isse" = "isse"
+"mor" = "mor"
+"nwe" = "nwe"
+"nd" = "nd"
+"nin" = "nin"
+"oce" = "oce"
+"ot" = "ot"
+"ser" = "ser"
+"shouldnot" = "shouldnot"
+"tio" = "tio"
+"ue" = "ue"
+# keep for comptability
+"deleteable" = "deleteable"
+"infinit" = "infinit"
+"explict" = "explict"
+"uninitalize" = "uninitalize"
+# keyword fp
+"guage" = "guage"
+"passin" = "passin"
+"testng" = "testng"
+"vertx" = "vertx"
+"verticle" = "verticle"
+
+[files]
+extend-exclude = [
+    "bookkeeper-server/src/test/java/org/apache/bookkeeper/meta/TestLedgerMetadataSerDe.java",
+]
diff --git a/bookkeeper-common/src/main/java/org/apache/bookkeeper/common/util/Retries.java b/bookkeeper-common/src/main/java/org/apache/bookkeeper/common/util/Retries.java
index 6a52ef5..dff0075 100644
--- a/bookkeeper-common/src/main/java/org/apache/bookkeeper/common/util/Retries.java
+++ b/bookkeeper-common/src/main/java/org/apache/bookkeeper/common/util/Retries.java
@@ -75,7 +75,7 @@
      * @param task           a task to execute.
      * @param scheduler      scheduler to schedule the task and complete the futures.
      * @param key            the submit key for the scheduler.
-     * @param <ReturnT>      the return tye.
+     * @param <ReturnT>      the return type.
      * @return future represents the result of the task with retries.
      */
     public static <ReturnT> CompletableFuture<ReturnT> run(
diff --git a/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/BookieShell.java b/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/BookieShell.java
index 663bea1..00869c7 100644
--- a/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/BookieShell.java
+++ b/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/BookieShell.java
@@ -323,7 +323,7 @@
     }
 
     /**
-     * Intializes new cluster by creating required znodes for the cluster. If
+     * Initializes new cluster by creating required znodes for the cluster. If
      * ledgersrootpath is already existing then it will error out. If for any
      * reason it errors out while creating znodes for the cluster, then before
      * running initnewcluster again, try nuking existing cluster by running
@@ -704,7 +704,7 @@
 
             ReadLedgerCommand cmd = new ReadLedgerCommand(entryFormatter, ledgerIdFormatter);
             ReadLedgerCommand.ReadLedgerFlags flags = new ReadLedgerCommand.ReadLedgerFlags();
-            flags.bookieAddresss(bookieAddress);
+            flags.bookieAddress(bookieAddress);
             flags.firstEntryId(firstEntry);
             flags.forceRecovery(forceRecovery);
             flags.lastEntryId(lastEntry);
diff --git a/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/EntryLogManagerForEntryLogPerLedger.java b/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/EntryLogManagerForEntryLogPerLedger.java
index 6846b6b..ca6224e 100644
--- a/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/EntryLogManagerForEntryLogPerLedger.java
+++ b/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/EntryLogManagerForEntryLogPerLedger.java
@@ -152,7 +152,7 @@
          * 'expiry duration' and 'maximumSize' will be set to
          * entryLogPerLedgerCounterLimitsMultFactor times of
          * 'ledgerIdEntryLogMap' cache limits. This is needed because entries
-         * from 'ledgerIdEntryLogMap' can be removed from cache becasue of
+         * from 'ledgerIdEntryLogMap' can be removed from cache because of
          * accesstime expiry or cache size limits, but to know the actual number
          * of entrylogs per ledger, we should maintain this count for long time.
          */
diff --git a/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/EntryLoggerAllocator.java b/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/EntryLoggerAllocator.java
index aec2fb1..70b76aa 100644
--- a/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/EntryLoggerAllocator.java
+++ b/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/EntryLoggerAllocator.java
@@ -137,8 +137,8 @@
         }
     }
 
-    void setWritingLogId(long lodId) {
-        this.writingLogId = lodId;
+    void setWritingLogId(long logId) {
+        this.writingLogId = logId;
     }
 
     void setWritingCompactingLogId(long logId) {
diff --git a/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/InterleavedLedgerStorage.java b/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/InterleavedLedgerStorage.java
index 323f701..4c6b7a9 100644
--- a/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/InterleavedLedgerStorage.java
+++ b/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/InterleavedLedgerStorage.java
@@ -569,7 +569,7 @@
         // for interleaved ledger storage, we request a checkpoint when rotating a entry log file.
         // the checkpoint represent the point that all the entries added before this point are already
         // in ledger storage and ready to be synced to disk.
-        // TODO: we could consider remove checkpointSource and checkpointSouce#newCheckpoint
+        // TODO: we could consider remove checkpointSource and checkpointSource#newCheckpoint
         // later if we provide kind of LSN (Log/Journal Squeuence Number)
         // mechanism when adding entry. {@link https://github.com/apache/bookkeeper/issues/279}
         Checkpoint checkpoint = checkpointSource.newCheckpoint();
diff --git a/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/LedgerDirsManager.java b/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/LedgerDirsManager.java
index 42910b0..6689c96 100644
--- a/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/LedgerDirsManager.java
+++ b/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/LedgerDirsManager.java
@@ -200,20 +200,20 @@
 
     List<File> getDirsAboveUsableThresholdSize(long thresholdSize, boolean loggingNoWritable)
             throws NoWritableLedgerDirException {
-        List<File> fullLedgerDirsToAccomodate = new ArrayList<File>();
+        List<File> fullLedgerDirsToAccommodate = new ArrayList<File>();
         for (File dir: this.ledgerDirectories) {
             // Pick dirs which can accommodate little more than thresholdSize
             if (dir.getUsableSpace() > thresholdSize) {
-                fullLedgerDirsToAccomodate.add(dir);
+                fullLedgerDirsToAccommodate.add(dir);
             }
         }
 
-        if (!fullLedgerDirsToAccomodate.isEmpty()) {
+        if (!fullLedgerDirsToAccommodate.isEmpty()) {
             if (loggingNoWritable) {
                 LOG.info("No writable ledger dirs below diskUsageThreshold. "
-                    + "But Dirs that can accommodate {} are: {}", thresholdSize, fullLedgerDirsToAccomodate);
+                    + "But Dirs that can accommodate {} are: {}", thresholdSize, fullLedgerDirsToAccommodate);
             }
-            return fullLedgerDirsToAccomodate;
+            return fullLedgerDirsToAccommodate;
         }
 
         // We will reach here when we find no ledgerDir which has atleast
diff --git a/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/LedgerDirsMonitor.java b/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/LedgerDirsMonitor.java
index 7ad8ba1..32321ae 100644
--- a/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/LedgerDirsMonitor.java
+++ b/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/LedgerDirsMonitor.java
@@ -124,13 +124,13 @@
             }
         }
 
-        List<File> fullfilledDirs = new ArrayList<File>(ldm.getFullFilledLedgerDirs());
+        List<File> fulfilledDirs = new ArrayList<File>(ldm.getFullFilledLedgerDirs());
         boolean makeWritable = ldm.hasWritableLedgerDirs();
 
         // When bookie is in READONLY mode, i.e there are no writableLedgerDirs:
-        // - Update fullfilledDirs disk usage.
+        // - Update fulfilledDirs disk usage.
         // - If the total disk usage is below DiskLowWaterMarkUsageThreshold
-        // add fullfilledDirs back to writableLedgerDirs list if their usage is < conf.getDiskUsageThreshold.
+        // add fulfilledDirs back to writableLedgerDirs list if their usage is < conf.getDiskUsageThreshold.
         try {
             if (!makeWritable) {
                 float totalDiskUsage = diskChecker.getTotalDiskUsage(ldm.getAllLedgerDirs());
@@ -144,7 +144,7 @@
                 }
             }
             // Update all full-filled disk space usage
-            for (File dir : fullfilledDirs) {
+            for (File dir : fulfilledDirs) {
                 try {
                     diskUsages.put(dir, diskChecker.checkDir(dir));
                     if (makeWritable) {
@@ -254,7 +254,7 @@
 
     private void validateThreshold(float diskSpaceThreshold, float diskSpaceLwmThreshold) {
         if (diskSpaceThreshold <= 0 || diskSpaceThreshold >= 1 || diskSpaceLwmThreshold - diskSpaceThreshold > 1e-6) {
-            throw new IllegalArgumentException("Disk space threashold: "
+            throw new IllegalArgumentException("Disk space threshold: "
                     + diskSpaceThreshold + " and lwm threshold: " + diskSpaceLwmThreshold
                     + " are not valid. Should be > 0 and < 1 and diskSpaceThreshold >= diskSpaceLwmThreshold");
         }
diff --git a/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/storage/ldb/ReadCache.java b/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/storage/ldb/ReadCache.java
index ab2bc33..1d850c4 100644
--- a/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/storage/ldb/ReadCache.java
+++ b/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/storage/ldb/ReadCache.java
@@ -41,7 +41,7 @@
  *
  * <p>Uses the specified amount of memory and pairs it with a hashmap.
  *
- * <p>The memory is splitted in multiple segments that are used in a
+ * <p>The memory is split in multiple segments that are used in a
  * ring-buffer fashion. When the read cache is full, the oldest segment
  * is cleared and rotated to make space for new entries to be added to
  * the read cache.
diff --git a/bookkeeper-server/src/main/java/org/apache/bookkeeper/client/BookKeeper.java b/bookkeeper-server/src/main/java/org/apache/bookkeeper/client/BookKeeper.java
index d7043dc..751d40e 100644
--- a/bookkeeper-server/src/main/java/org/apache/bookkeeper/client/BookKeeper.java
+++ b/bookkeeper-server/src/main/java/org/apache/bookkeeper/client/BookKeeper.java
@@ -706,7 +706,7 @@
      * cheap to compute but does not protect against byzantine bookies (i.e., a
      * bookie might report fake bytes and a matching CRC32). The MAC code is more
      * expensive to compute, but is protected by a password, i.e., a bookie can't
-     * report fake bytes with a mathching MAC unless it knows the password.
+     * report fake bytes with a matching MAC unless it knows the password.
      * The CRC32C, which use SSE processor instruction, has better performance than CRC32.
      * Legacy DigestType for backward compatibility. If we want to add new DigestType,
      * we should add it in here, client.api.DigestType and DigestType in DataFormats.proto.
diff --git a/bookkeeper-server/src/main/java/org/apache/bookkeeper/client/BookKeeperAdmin.java b/bookkeeper-server/src/main/java/org/apache/bookkeeper/client/BookKeeperAdmin.java
index 1df915f..371c214 100644
--- a/bookkeeper-server/src/main/java/org/apache/bookkeeper/client/BookKeeperAdmin.java
+++ b/bookkeeper-server/src/main/java/org/apache/bookkeeper/client/BookKeeperAdmin.java
@@ -1297,7 +1297,7 @@
     }
 
     /**
-     * Intializes new cluster by creating required znodes for the cluster. If
+     * Initializes new cluster by creating required znodes for the cluster. If
      * ledgersrootpath is already existing then it will error out.
      *
      * @param conf
@@ -1569,7 +1569,7 @@
      * Triggers AuditTask by resetting lostBookieRecoveryDelay and then make
      * sure the ledgers stored in the given decommissioning bookie are properly
      * replicated and they are not underreplicated because of the given bookie.
-     * This method waits untill there are no underreplicatedledgers because of this
+     * This method waits until there are no underreplicatedledgers because of this
      * bookie. If the given Bookie is not shutdown yet, then it will throw
      * BKIllegalOpException.
      *
@@ -1612,7 +1612,7 @@
         Set<Long> ledgersStoredInThisBookie = bookieToLedgersMap.get(bookieAddress.toString());
         if ((ledgersStoredInThisBookie != null) && (!ledgersStoredInThisBookie.isEmpty())) {
             /*
-             * wait untill all the ledgers are replicated to other
+             * wait until all the ledgers are replicated to other
              * bookies by making sure that these ledgers metadata don't
              * contain this bookie as part of their ensemble.
              */
diff --git a/bookkeeper-server/src/main/java/org/apache/bookkeeper/client/DistributionSchedule.java b/bookkeeper-server/src/main/java/org/apache/bookkeeper/client/DistributionSchedule.java
index 2646d3a..295cbd9 100644
--- a/bookkeeper-server/src/main/java/org/apache/bookkeeper/client/DistributionSchedule.java
+++ b/bookkeeper-server/src/main/java/org/apache/bookkeeper/client/DistributionSchedule.java
@@ -22,7 +22,7 @@
 import org.apache.bookkeeper.net.BookieId;
 
 /**
- * This interface determins how entries are distributed among bookies.
+ * This interface determines how entries are distributed among bookies.
  *
  * <p>Every entry gets replicated to some number of replicas. The first replica for
  * an entry is given a replicaIndex of 0, and so on. To distribute write load,
diff --git a/bookkeeper-server/src/main/java/org/apache/bookkeeper/client/EnsemblePlacementPolicy.java b/bookkeeper-server/src/main/java/org/apache/bookkeeper/client/EnsemblePlacementPolicy.java
index 05a687b..58d2bc0 100644
--- a/bookkeeper-server/src/main/java/org/apache/bookkeeper/client/EnsemblePlacementPolicy.java
+++ b/bookkeeper-server/src/main/java/org/apache/bookkeeper/client/EnsemblePlacementPolicy.java
@@ -369,7 +369,7 @@
      *
      * <p>The default implementation will pick a bookie randomly from the ensemble.
      * Other placement policies will be able to do better decisions based on
-     * additional informations (eg: rack or region awareness).
+     * additional information (eg: rack or region awareness).
      *
      * @param metadata
      *            the {@link LedgerMetadata} object
diff --git a/bookkeeper-server/src/main/java/org/apache/bookkeeper/client/LedgerFragmentReplicator.java b/bookkeeper-server/src/main/java/org/apache/bookkeeper/client/LedgerFragmentReplicator.java
index 3d58bda..f6d54e1 100644
--- a/bookkeeper-server/src/main/java/org/apache/bookkeeper/client/LedgerFragmentReplicator.java
+++ b/bookkeeper-server/src/main/java/org/apache/bookkeeper/client/LedgerFragmentReplicator.java
@@ -233,11 +233,11 @@
             final Set<BookieId> targetBookieAddresses,
             final BiConsumer<Long, Long> onReadEntryFailureCallback)
             throws InterruptedException {
-        Set<LedgerFragment> partionedFragments = splitIntoSubFragments(lh, lf,
+        Set<LedgerFragment> partitionedFragments = splitIntoSubFragments(lh, lf,
                 bkc.getConf().getRereplicationEntryBatchSize());
         LOG.info("Replicating fragment {} in {} sub fragments.",
-                lf, partionedFragments.size());
-        replicateNextBatch(lh, partionedFragments.iterator(),
+                lf, partitionedFragments.size());
+        replicateNextBatch(lh, partitionedFragments.iterator(),
                 ledgerFragmentMcb, targetBookieAddresses, onReadEntryFailureCallback);
     }
 
@@ -559,7 +559,7 @@
     /**
      * Callback for recovery of a single ledger fragment. Once the fragment has
      * had all entries replicated, update the ensemble in zookeeper. Once
-     * finished propogate callback up to ledgerFragmentsMcb which should be a
+     * finished propagate callback up to ledgerFragmentsMcb which should be a
      * multicallback responsible for all fragments in a single ledger
      */
     static class SingleFragmentCallback implements AsyncCallback.VoidCallback {
diff --git a/bookkeeper-server/src/main/java/org/apache/bookkeeper/client/LedgerHandle.java b/bookkeeper-server/src/main/java/org/apache/bookkeeper/client/LedgerHandle.java
index c49aa65..6a15cb4 100644
--- a/bookkeeper-server/src/main/java/org/apache/bookkeeper/client/LedgerHandle.java
+++ b/bookkeeper-server/src/main/java/org/apache/bookkeeper/client/LedgerHandle.java
@@ -785,7 +785,7 @@
      * Read a sequence of entries asynchronously, allowing to read after the LastAddConfirmed range.
      * <br>This is the same of
      * {@link #asyncReadEntries(long, long, ReadCallback, Object) }
-     * but it lets the client read without checking the local value of LastAddConfirmed, so that it is possibile to
+     * but it lets the client read without checking the local value of LastAddConfirmed, so that it is possible to
      * read entries for which the writer has not received the acknowledge yet. <br>
      * For entries which are within the range 0..LastAddConfirmed BookKeeper guarantees that the writer has successfully
      * received the acknowledge.<br>
@@ -1009,7 +1009,7 @@
      * Read a sequence of entries asynchronously, allowing to read after the LastAddConfirmed range.
      * <br>This is the same of
      * {@link #asyncReadEntries(long, long, ReadCallback, Object) }
-     * but it lets the client read without checking the local value of LastAddConfirmed, so that it is possibile to
+     * but it lets the client read without checking the local value of LastAddConfirmed, so that it is possible to
      * read entries for which the writer has not received the acknowledge yet. <br>
      * For entries which are within the range 0..LastAddConfirmed BookKeeper guarantees that the writer has successfully
      * received the acknowledge.<br>
diff --git a/bookkeeper-server/src/main/java/org/apache/bookkeeper/client/WeightedRandomSelectionImpl.java b/bookkeeper-server/src/main/java/org/apache/bookkeeper/client/WeightedRandomSelectionImpl.java
index 16443c6..1a2b9f0 100644
--- a/bookkeeper-server/src/main/java/org/apache/bookkeeper/client/WeightedRandomSelectionImpl.java
+++ b/bookkeeper-server/src/main/java/org/apache/bookkeeper/client/WeightedRandomSelectionImpl.java
@@ -37,7 +37,7 @@
     Double randomMax;
     int maxProbabilityMultiplier;
     Map<T, WeightedObject> map;
-    TreeMap<Double, T> cummulativeMap = new TreeMap<Double, T>();
+    TreeMap<Double, T> cumulativeMap = new TreeMap<Double, T>();
     ReadWriteLock rwLock = new ReentrantReadWriteLock(true);
 
     WeightedRandomSelectionImpl() {
@@ -120,10 +120,10 @@
         // The probability of picking a bookie randomly is defaultPickProbability
         // but we change that priority by looking at the weight that each bookie
         // carries.
-        TreeMap<Double, T> tmpCummulativeMap = new TreeMap<Double, T>();
+        TreeMap<Double, T> tmpCumulativeMap = new TreeMap<Double, T>();
         Double key = 0.0;
         for (Map.Entry<T, Double> e : weightMap.entrySet()) {
-            tmpCummulativeMap.put(key, e.getKey());
+            tmpCumulativeMap.put(key, e.getKey());
             if (LOG.isDebugEnabled()) {
                 LOG.debug("Key: {} Value: {} AssignedKey: {} AssignedWeight: {}",
                         e.getKey(), e.getValue(), key, e.getValue());
@@ -134,7 +134,7 @@
         rwLock.writeLock().lock();
         try {
             this.map = map;
-            cummulativeMap = tmpCummulativeMap;
+            cumulativeMap = tmpCumulativeMap;
             randomMax = key;
         } finally {
             rwLock.writeLock().unlock();
@@ -148,8 +148,8 @@
             // pick a random number between 0 and randMax
             Double randomNum = randomMax * Math.random();
             // find the nearest key in the map corresponding to the randomNum
-            Double key = cummulativeMap.floorKey(randomNum);
-            return cummulativeMap.get(key);
+            Double key = cumulativeMap.floorKey(randomNum);
+            return cumulativeMap.get(key);
         } finally {
             rwLock.readLock().unlock();
         }
diff --git a/bookkeeper-server/src/main/java/org/apache/bookkeeper/client/api/OpenBuilder.java b/bookkeeper-server/src/main/java/org/apache/bookkeeper/client/api/OpenBuilder.java
index a0c09a3..9b843a1 100644
--- a/bookkeeper-server/src/main/java/org/apache/bookkeeper/client/api/OpenBuilder.java
+++ b/bookkeeper-server/src/main/java/org/apache/bookkeeper/client/api/OpenBuilder.java
@@ -53,7 +53,7 @@
     OpenBuilder withRecovery(boolean recovery);
 
     /**
-     * Sets the password to be used to open the ledger. It defauls to an empty password
+     * Sets the password to be used to open the ledger. It defaults to an empty password
      *
      * @param password the password to unlock the ledger
      *
diff --git a/bookkeeper-server/src/main/java/org/apache/bookkeeper/client/api/ReadHandle.java b/bookkeeper-server/src/main/java/org/apache/bookkeeper/client/api/ReadHandle.java
index e9bcddd..8e2e633 100644
--- a/bookkeeper-server/src/main/java/org/apache/bookkeeper/client/api/ReadHandle.java
+++ b/bookkeeper-server/src/main/java/org/apache/bookkeeper/client/api/ReadHandle.java
@@ -95,7 +95,7 @@
      * Read a sequence of entries asynchronously, allowing to read after the LastAddConfirmed range.
      * <br>This is the same of
      * {@link #read(long, long) }
-     * but it lets the client read without checking the local value of LastAddConfirmed, so that it is possibile to
+     * but it lets the client read without checking the local value of LastAddConfirmed, so that it is possible to
      * read entries for which the writer has not received the acknowledge yet. <br>
      * For entries which are within the range 0..LastAddConfirmed BookKeeper guarantees that the writer has successfully
      * received the acknowledge.<br>
diff --git a/bookkeeper-server/src/main/java/org/apache/bookkeeper/client/api/WriteAdvHandle.java b/bookkeeper-server/src/main/java/org/apache/bookkeeper/client/api/WriteAdvHandle.java
index 7fd80a0..9c33ecf 100644
--- a/bookkeeper-server/src/main/java/org/apache/bookkeeper/client/api/WriteAdvHandle.java
+++ b/bookkeeper-server/src/main/java/org/apache/bookkeeper/client/api/WriteAdvHandle.java
@@ -29,7 +29,7 @@
 import org.apache.bookkeeper.common.concurrent.FutureUtils;
 
 /**
- * Provide write access to a ledger. Using WriteAdvHandler the writer MUST explictly set an entryId. Beware that the
+ * Provide write access to a ledger. Using WriteAdvHandler the writer MUST explicitly set an entryId. Beware that the
  * write for a given entryId will be acknowledged if and only if all entries up to entryId - 1 have been acknowledged
  * too (expected from entryId 0)
  *
diff --git a/bookkeeper-server/src/main/java/org/apache/bookkeeper/conf/ClientConfiguration.java b/bookkeeper-server/src/main/java/org/apache/bookkeeper/conf/ClientConfiguration.java
index feae692..cb942d9 100644
--- a/bookkeeper-server/src/main/java/org/apache/bookkeeper/conf/ClientConfiguration.java
+++ b/bookkeeper-server/src/main/java/org/apache/bookkeeper/conf/ClientConfiguration.java
@@ -980,7 +980,7 @@
     }
 
     /**
-     * Multipler to use when determining time between successive speculative read requests.
+     * Multiplier to use when determining time between successive speculative read requests.
      *
      * @return speculative read timeout backoff multiplier.
      */
@@ -989,10 +989,10 @@
     }
 
     /**
-     * Set the multipler to use when determining time between successive speculative read requests.
+     * Set the multiplier to use when determining time between successive speculative read requests.
      *
      * @param speculativeReadTimeoutBackoffMultiplier
-     *          multipler to use when determining time between successive speculative read requests.
+     *          multiplier to use when determining time between successive speculative read requests.
      * @return client configuration.
      */
     public ClientConfiguration setSpeculativeReadTimeoutBackoffMultiplier(
@@ -1002,7 +1002,7 @@
     }
 
     /**
-     * Multipler to use when determining time between successive speculative read LAC requests.
+     * Multiplier to use when determining time between successive speculative read LAC requests.
      *
      * @return speculative read LAC timeout backoff multiplier.
      */
@@ -1011,10 +1011,10 @@
     }
 
     /**
-     * Set the multipler to use when determining time between successive speculative read LAC requests.
+     * Set the multiplier to use when determining time between successive speculative read LAC requests.
      *
      * @param speculativeReadLACTimeoutBackoffMultiplier
-     *          multipler to use when determining time between successive speculative read LAC requests.
+     *          multiplier to use when determining time between successive speculative read LAC requests.
      * @return client configuration.
      */
     public ClientConfiguration setSpeculativeReadLACTimeoutBackoffMultiplier(
@@ -1193,7 +1193,7 @@
      * preference) to read all entries for a ledger.
      *
      * <p>Having all the read to one bookie will increase the chances that
-     * a read request will be fullfilled by Bookie read cache (or OS file
+     * a read request will be fulfilled by Bookie read cache (or OS file
      * system cache) when doing sequential reads.
      *
      * @param enabled the flag to enable/disable sticky reads.
diff --git a/bookkeeper-server/src/main/java/org/apache/bookkeeper/conf/ServerConfiguration.java b/bookkeeper-server/src/main/java/org/apache/bookkeeper/conf/ServerConfiguration.java
index 51e45dc..7785bfa 100644
--- a/bookkeeper-server/src/main/java/org/apache/bookkeeper/conf/ServerConfiguration.java
+++ b/bookkeeper-server/src/main/java/org/apache/bookkeeper/conf/ServerConfiguration.java
@@ -409,7 +409,7 @@
     /**
      * Get Garbage collection wait time. Default value is 10 minutes.
      * The guideline is not to set a too low value for this, if using zookeeper based
-     * ledger manager. And it would be nice to align with the average lifecyle time of
+     * ledger manager. And it would be nice to align with the average lifecycle time of
      * ledgers in the system.
      *
      * @return gc wait time
@@ -1223,7 +1223,7 @@
      * Configure the bookie to advertise a specific BookieId.
      *
      * <p>By default, a bookie will advertise a BookieId computed
-     * from the primary network endpoint addresss.
+     * from the primary network endpoint address.
      *
      * @see #getBookieId()
      * @see #setAdvertisedAddress(java.lang.String)
diff --git a/bookkeeper-server/src/main/java/org/apache/bookkeeper/discover/ZKRegistrationClient.java b/bookkeeper-server/src/main/java/org/apache/bookkeeper/discover/ZKRegistrationClient.java
index 2f143c0..cca6310 100644
--- a/bookkeeper-server/src/main/java/org/apache/bookkeeper/discover/ZKRegistrationClient.java
+++ b/bookkeeper-server/src/main/java/org/apache/bookkeeper/discover/ZKRegistrationClient.java
@@ -369,7 +369,7 @@
                         .collect(bookieInfoUpdated)
                         .whenComplete((List<Versioned<BookieServiceInfo>> info, Throwable error) -> {
                             // we are ignoring errors intentionally
-                            // there could be bookies that publish unparseable information
+                            // there could be bookies that publish unparsable information
                             // or other temporary/permanent errors
                             future.complete(new Versioned<>(bookies, version));
                         });
diff --git a/bookkeeper-server/src/main/java/org/apache/bookkeeper/feature/SettableFeature.java b/bookkeeper-server/src/main/java/org/apache/bookkeeper/feature/SettableFeature.java
index bb6d1db..797f5ba 100644
--- a/bookkeeper-server/src/main/java/org/apache/bookkeeper/feature/SettableFeature.java
+++ b/bookkeeper-server/src/main/java/org/apache/bookkeeper/feature/SettableFeature.java
@@ -28,16 +28,16 @@
         super(name, initialAvailability);
     }
 
-    public SettableFeature(String name, boolean isAvailabile) {
-        super(name, isAvailabile);
+    public SettableFeature(String name, boolean isAvailable) {
+        super(name, isAvailable);
     }
 
     public void set(int availability) {
         this.availability = availability;
     }
 
-    public void set(boolean isAvailabile) {
-        this.availability = isAvailabile ? FEATURE_AVAILABILITY_MAX_VALUE : 0;
+    public void set(boolean isAvailable) {
+        this.availability = isAvailable ? FEATURE_AVAILABILITY_MAX_VALUE : 0;
     }
 
 }
diff --git a/bookkeeper-server/src/main/java/org/apache/bookkeeper/meta/AbstractHierarchicalLedgerManager.java b/bookkeeper-server/src/main/java/org/apache/bookkeeper/meta/AbstractHierarchicalLedgerManager.java
index c7a730c..4a1ad27 100644
--- a/bookkeeper-server/src/main/java/org/apache/bookkeeper/meta/AbstractHierarchicalLedgerManager.java
+++ b/bookkeeper-server/src/main/java/org/apache/bookkeeper/meta/AbstractHierarchicalLedgerManager.java
@@ -190,7 +190,7 @@
         NavigableSet<Long> zkActiveLedgers = new TreeSet<Long>();
 
         if (!path.startsWith(ledgerRootPath)) {
-            LOG.warn("Ledger path [{}] is not a valid path name, it should start wth {}", path, ledgerRootPath);
+            LOG.warn("Ledger path [{}] is not a valid path name, it should start with {}", path, ledgerRootPath);
             return zkActiveLedgers;
         }
 
diff --git a/bookkeeper-server/src/main/java/org/apache/bookkeeper/meta/AbstractZkLedgerManager.java b/bookkeeper-server/src/main/java/org/apache/bookkeeper/meta/AbstractZkLedgerManager.java
index 102c7d7..59b17be 100644
--- a/bookkeeper-server/src/main/java/org/apache/bookkeeper/meta/AbstractZkLedgerManager.java
+++ b/bookkeeper-server/src/main/java/org/apache/bookkeeper/meta/AbstractZkLedgerManager.java
@@ -553,7 +553,7 @@
      * Process ledgers in a single zk node.
      *
      * <p>
-     * for each ledger found in this zk node, processor#process(ledgerId) will be triggerred
+     * for each ledger found in this zk node, processor#process(ledgerId) will be triggered
      * to process a specific ledger. after all ledgers has been processed, the finalCb will
      * be called with provided context object. The RC passed to finalCb is decided by :
      * <ul>
diff --git a/bookkeeper-server/src/main/java/org/apache/bookkeeper/meta/LedgerManager.java b/bookkeeper-server/src/main/java/org/apache/bookkeeper/meta/LedgerManager.java
index 2f08c2e..db197ce 100644
--- a/bookkeeper-server/src/main/java/org/apache/bookkeeper/meta/LedgerManager.java
+++ b/bookkeeper-server/src/main/java/org/apache/bookkeeper/meta/LedgerManager.java
@@ -97,7 +97,7 @@
      * @param currentVersion
      *          The version of the metadata we expect to be overwriting.
      * @return Future which, when completed, contains the newly written metadata.
-     *         Comleted with an exceptione:<ul>
+     *         Completed with an exception:<ul>
      *          <li>{@link org.apache.bookkeeper.client.BKException.BKMetadataVersionException}
      *          if version in metadata doesn't match</li>
      *          <li>{@link org.apache.bookkeeper.client.BKException.ZKException} for other issue</li>
@@ -130,9 +130,9 @@
      * Loop to process all ledgers.
      * <p>
      * <ul>
-     * After all ledgers were processed, finalCb will be triggerred:
+     * After all ledgers were processed, finalCb will be triggered:
      * <li> if all ledgers are processed done with OK, success rc will be passed to finalCb.
-     * <li> if some ledgers are prcoessed failed, failure rc will be passed to finalCb.
+     * <li> if some ledgers are processed failed, failure rc will be passed to finalCb.
      * </ul>
      * </p>
      *
@@ -145,7 +145,7 @@
      * @param successRc
      *          Success RC code passed to finalCb when callback
      * @param failureRc
-     *          Failure RC code passed to finalCb when exceptions occured.
+     *          Failure RC code passed to finalCb when exceptions occurred.
      */
     void asyncProcessLedgers(Processor<Long> processor, AsyncCallback.VoidCallback finalCb,
                                     Object context, int successRc, int failureRc);
diff --git a/bookkeeper-server/src/main/java/org/apache/bookkeeper/meta/LedgerUnderreplicationManager.java b/bookkeeper-server/src/main/java/org/apache/bookkeeper/meta/LedgerUnderreplicationManager.java
index 256f781..64548fb 100644
--- a/bookkeeper-server/src/main/java/org/apache/bookkeeper/meta/LedgerUnderreplicationManager.java
+++ b/bookkeeper-server/src/main/java/org/apache/bookkeeper/meta/LedgerUnderreplicationManager.java
@@ -49,7 +49,7 @@
 
     /**
      * Mark a ledger as underreplicated with missing bookies. The replication should then
-     * check which fragements are underreplicated and rereplicate them.
+     * check which fragments are underreplicated and rereplicate them.
      *
      * @param ledgerId ledger id
      * @param missingReplicas missing replicas
diff --git a/bookkeeper-server/src/main/java/org/apache/bookkeeper/meta/MSLedgerManagerFactory.java b/bookkeeper-server/src/main/java/org/apache/bookkeeper/meta/MSLedgerManagerFactory.java
index fa4776b..bc9ae3a 100644
--- a/bookkeeper-server/src/main/java/org/apache/bookkeeper/meta/MSLedgerManagerFactory.java
+++ b/bookkeeper-server/src/main/java/org/apache/bookkeeper/meta/MSLedgerManagerFactory.java
@@ -488,7 +488,7 @@
                 @Override
                 public void complete(int rc, Version version, Object ctx) {
                     if (MSException.Code.BadVersion.getCode() == rc) {
-                        LOG.info("Bad version provided to updat metadata for ledger {}", ledgerId);
+                        LOG.info("Bad version provided to update metadata for ledger {}", ledgerId);
                         promise.completeExceptionally(new BKException.BKMetadataVersionException());
                     } else if (MSException.Code.NoKey.getCode() == rc) {
                         LOG.warn("Ledger {} doesn't exist when writing its ledger metadata.", ledgerId);
@@ -761,7 +761,7 @@
         try {
             MetastoreUtils.cleanTable(ledgerTable, conf.getMetastoreMaxEntriesPerScan());
         } catch (MSException mse) {
-            throw new IOException("Exception when cleanning up table " + TABLE_NAME, mse);
+            throw new IOException("Exception when cleaning up table " + TABLE_NAME, mse);
         }
         LOG.info("Finished cleaning up table {}.", TABLE_NAME);
         // Delete and recreate the LAYOUT information.
diff --git a/bookkeeper-server/src/main/java/org/apache/bookkeeper/meta/ZkLedgerUnderreplicationManager.java b/bookkeeper-server/src/main/java/org/apache/bookkeeper/meta/ZkLedgerUnderreplicationManager.java
index fd43fcf..4118e19 100644
--- a/bookkeeper-server/src/main/java/org/apache/bookkeeper/meta/ZkLedgerUnderreplicationManager.java
+++ b/bookkeeper-server/src/main/java/org/apache/bookkeeper/meta/ZkLedgerUnderreplicationManager.java
@@ -73,7 +73,7 @@
 
 /**
  * ZooKeeper implementation of underreplication manager.
- * This is implemented in a heirarchical fashion, so it'll work with
+ * This is implemented in a hierarchical fashion, so it'll work with
  * FlatLedgerManagerFactory and HierarchicalLedgerManagerFactory.
  *
  * <p>Layout is:
@@ -82,7 +82,7 @@
  *                         locks/(ledgerId)
  *
  * <p>The hierarchical path is created by splitting the ledger into 4 2byte
- * segments which are represented in hexidecimal.
+ * segments which are represented in hexadecimal.
  * e.g. For ledger id 0xcafebeef0000feed, the path is
  *  cafe/beef/0000/feed/
  */
diff --git a/bookkeeper-server/src/main/java/org/apache/bookkeeper/metastore/MSException.java b/bookkeeper-server/src/main/java/org/apache/bookkeeper/metastore/MSException.java
index 3fbcf53..f0d2c3b 100644
--- a/bookkeeper-server/src/main/java/org/apache/bookkeeper/metastore/MSException.java
+++ b/bookkeeper-server/src/main/java/org/apache/bookkeeper/metastore/MSException.java
@@ -40,7 +40,7 @@
         InterruptedException (-100, "Operation interrupted"),
         IllegalOp (-101, "Illegal operation"),
         ServiceDown (-102, "Metadata service is down"),
-        OperationFailure(-103, "Operaion failed on metadata storage server side");
+        OperationFailure(-103, "Operation failed on metadata storage server side");
 
         private static final Map<Integer, Code> codes = new HashMap<Integer, Code>();
 
diff --git a/bookkeeper-server/src/main/java/org/apache/bookkeeper/metastore/MetaStore.java b/bookkeeper-server/src/main/java/org/apache/bookkeeper/metastore/MetaStore.java
index 8d7c5d4..04eb74f 100644
--- a/bookkeeper-server/src/main/java/org/apache/bookkeeper/metastore/MetaStore.java
+++ b/bookkeeper-server/src/main/java/org/apache/bookkeeper/metastore/MetaStore.java
@@ -31,7 +31,7 @@
     String getName();
 
     /**
-     * Get the plugin verison.
+     * Get the plugin version.
      *
      * @return the plugin version.
      */
diff --git a/bookkeeper-server/src/main/java/org/apache/bookkeeper/net/NodeBase.java b/bookkeeper-server/src/main/java/org/apache/bookkeeper/net/NodeBase.java
index 46442f5..8e0233a 100644
--- a/bookkeeper-server/src/main/java/org/apache/bookkeeper/net/NodeBase.java
+++ b/bookkeeper-server/src/main/java/org/apache/bookkeeper/net/NodeBase.java
@@ -48,7 +48,7 @@
     /**
      * Construct a node from its path.
      * @param path
-     *   a concatenation of this node's location, the path seperator, and its name
+     *   a concatenation of this node's location, the path separator, and its name
      */
     public NodeBase(String path) {
         path = normalize(path);
diff --git a/bookkeeper-server/src/main/java/org/apache/bookkeeper/proto/BookieProtoEncoding.java b/bookkeeper-server/src/main/java/org/apache/bookkeeper/proto/BookieProtoEncoding.java
index d8bfb42..0c3b7bf 100644
--- a/bookkeeper-server/src/main/java/org/apache/bookkeeper/proto/BookieProtoEncoding.java
+++ b/bookkeeper-server/src/main/java/org/apache/bookkeeper/proto/BookieProtoEncoding.java
@@ -475,7 +475,7 @@
         // a heap buffer while serializing and pass it down to netty library.
         // In AbstractChannel#filterOutboundMessage(), netty copies that data to a direct buffer if
         // it is currently in heap (otherwise skips it and uses it directly).
-        // Allocating a direct buffer reducing unncessary CPU cycles for buffer copies in BK client
+        // Allocating a direct buffer reducing unnecessary CPU cycles for buffer copies in BK client
         // and also helps alleviate pressure off the GC, since there is less memory churn.
         // Bookies aren't usually CPU bound. This change improves READ_ENTRY code paths by a small factor as well.
         ByteBuf buf = allocator.directBuffer(frameSize, frameSize);
diff --git a/bookkeeper-server/src/main/java/org/apache/bookkeeper/proto/PerChannelBookieClient.java b/bookkeeper-server/src/main/java/org/apache/bookkeeper/proto/PerChannelBookieClient.java
index 0e5335c..5fe1a6e 100644
--- a/bookkeeper-server/src/main/java/org/apache/bookkeeper/proto/PerChannelBookieClient.java
+++ b/bookkeeper-server/src/main/java/org/apache/bookkeeper/proto/PerChannelBookieClient.java
@@ -588,7 +588,7 @@
         }
 
         // In the netty pipeline, we need to split packets based on length, so we
-        // use the {@link LengthFieldBasedFramDecoder}. Other than that all actions
+        // use the {@link LengthFieldBasedFrameDecoder}. Other than that all actions
         // are carried out in this class, e.g., making sense of received messages,
         // prepending the length to outgoing packets etc.
         bootstrap.handler(new ChannelInitializer<Channel>() {
@@ -2373,7 +2373,7 @@
         }
     }
 
-    // visable for testing
+    // visible for testing
     CompletionKey newCompletionKey(long txnId, OperationType operationType) {
         return new TxnCompletionKey(txnId, operationType);
     }
diff --git a/bookkeeper-server/src/main/java/org/apache/bookkeeper/proto/ReadEntryProcessorV3.java b/bookkeeper-server/src/main/java/org/apache/bookkeeper/proto/ReadEntryProcessorV3.java
index fb0b4f6..3a85ca0 100644
--- a/bookkeeper-server/src/main/java/org/apache/bookkeeper/proto/ReadEntryProcessorV3.java
+++ b/bookkeeper-server/src/main/java/org/apache/bookkeeper/proto/ReadEntryProcessorV3.java
@@ -200,7 +200,7 @@
             .setLedgerId(ledgerId)
             .setEntryId(entryId);
         try {
-            // handle fence reqest
+            // handle fence request
             if (RequestUtils.isFenceRequest(readRequest)) {
                 LOG.info("Ledger fence request received for ledger: {} from address: {}", ledgerId,
                     channel.remoteAddress());
diff --git a/bookkeeper-server/src/main/java/org/apache/bookkeeper/proto/WriteEntryProcessorV3.java b/bookkeeper-server/src/main/java/org/apache/bookkeeper/proto/WriteEntryProcessorV3.java
index 381b887..3aaa921 100644
--- a/bookkeeper-server/src/main/java/org/apache/bookkeeper/proto/WriteEntryProcessorV3.java
+++ b/bookkeeper-server/src/main/java/org/apache/bookkeeper/proto/WriteEntryProcessorV3.java
@@ -126,7 +126,7 @@
             status = StatusCode.EOK;
         } catch (OperationRejectedException e) {
             requestProcessor.getRequestStats().getAddEntryRejectedCounter().inc();
-            // Avoid to log each occurence of this exception as this can happen when the ledger storage is
+            // Avoid to log each occurrence of this exception as this can happen when the ledger storage is
             // unable to keep up with the write rate.
             if (logger.isDebugEnabled()) {
                 logger.debug("Operation rejected while writing {}", request, e);
diff --git a/bookkeeper-server/src/main/java/org/apache/bookkeeper/replication/ReplicationWorker.java b/bookkeeper-server/src/main/java/org/apache/bookkeeper/replication/ReplicationWorker.java
index cf1f2f2..c32487a 100644
--- a/bookkeeper-server/src/main/java/org/apache/bookkeeper/replication/ReplicationWorker.java
+++ b/bookkeeper-server/src/main/java/org/apache/bookkeeper/replication/ReplicationWorker.java
@@ -558,7 +558,7 @@
      *
      * <p>To avoid this situation, we need to check if bookies in the final open ensemble
      * are unavailable, and take action if so. The action to take is to close the ledger,
-     * after a grace period as the writting client may replace the faulty bookie on its
+     * after a grace period as the writing client may replace the faulty bookie on its
      * own.
      *
      * <p>Missing bookies in closed ledgers are fine, as we know the last confirmed add, so
diff --git a/bookkeeper-server/src/main/java/org/apache/bookkeeper/streaming/LedgerOutputStream.java b/bookkeeper-server/src/main/java/org/apache/bookkeeper/streaming/LedgerOutputStream.java
index ed73293..9486687 100644
--- a/bookkeeper-server/src/main/java/org/apache/bookkeeper/streaming/LedgerOutputStream.java
+++ b/bookkeeper-server/src/main/java/org/apache/bookkeeper/streaming/LedgerOutputStream.java
@@ -89,7 +89,7 @@
             try {
                 lh.addEntry(b);
             } catch (InterruptedException ie) {
-                LOG.warn("Interrupted while flusing " + ie);
+                LOG.warn("Interrupted while flushing " + ie);
                 Thread.currentThread().interrupt();
             } catch (BKException bke) {
                 LOG.warn("BookKeeper exception ", bke);
diff --git a/bookkeeper-server/src/main/java/org/apache/bookkeeper/tls/TLSContextFactory.java b/bookkeeper-server/src/main/java/org/apache/bookkeeper/tls/TLSContextFactory.java
index 03f9951..7570436 100644
--- a/bookkeeper-server/src/main/java/org/apache/bookkeeper/tls/TLSContextFactory.java
+++ b/bookkeeper-server/src/main/java/org/apache/bookkeeper/tls/TLSContextFactory.java
@@ -263,7 +263,7 @@
 
         // get key-file and trust-file locations and passwords
         if (!(config instanceof ClientConfiguration)) {
-            throw new SecurityException("Client configruation not provided");
+            throw new SecurityException("Client configuration not provided");
         }
 
         clientConf = (ClientConfiguration) config;
@@ -387,7 +387,7 @@
 
         // get key-file and trust-file locations and passwords
         if (!(config instanceof ServerConfiguration)) {
-            throw new SecurityException("Server configruation not provided");
+            throw new SecurityException("Server configuration not provided");
         }
 
         serverConf = (ServerConfiguration) config;
diff --git a/bookkeeper-server/src/main/java/org/apache/bookkeeper/tools/cli/commands/bookie/LedgerCommand.java b/bookkeeper-server/src/main/java/org/apache/bookkeeper/tools/cli/commands/bookie/LedgerCommand.java
index fa753e9..7434706 100644
--- a/bookkeeper-server/src/main/java/org/apache/bookkeeper/tools/cli/commands/bookie/LedgerCommand.java
+++ b/bookkeeper-server/src/main/java/org/apache/bookkeeper/tools/cli/commands/bookie/LedgerCommand.java
@@ -136,8 +136,8 @@
 
     private boolean dumpLedgerInfo(long ledgerId, ServerConfiguration conf) {
         try {
-            DbLedgerStorage.readLedgerIndexEntries(ledgerId, conf, (currentEntry, entryLodId, position) -> System.out
-                    .println("entry " + currentEntry + "\t:\t(log: " + entryLodId + ", pos: " + position + ")"));
+            DbLedgerStorage.readLedgerIndexEntries(ledgerId, conf, (currentEntry, entryLogId, position) -> System.out
+                    .println("entry " + currentEntry + "\t:\t(log: " + entryLogId + ", pos: " + position + ")"));
         } catch (IOException e) {
             System.err.printf("ERROR: initializing dbLedgerStorage %s", e.getMessage());
             return false;
diff --git a/bookkeeper-server/src/main/java/org/apache/bookkeeper/tools/cli/commands/bookie/ReadLedgerCommand.java b/bookkeeper-server/src/main/java/org/apache/bookkeeper/tools/cli/commands/bookie/ReadLedgerCommand.java
index 20b4232..e07dc9d 100644
--- a/bookkeeper-server/src/main/java/org/apache/bookkeeper/tools/cli/commands/bookie/ReadLedgerCommand.java
+++ b/bookkeeper-server/src/main/java/org/apache/bookkeeper/tools/cli/commands/bookie/ReadLedgerCommand.java
@@ -109,7 +109,7 @@
         private boolean forceRecovery;
 
         @Parameter(names = { "-b", "--bookie" }, description = "Only read from a specific bookie")
-        private String bookieAddresss;
+        private String bookieAddress;
 
         @Parameter(names = { "-lf", "--ledgeridformatter" }, description = "Set ledger id formatter")
         private String ledgerIdFormatter;
@@ -146,9 +146,9 @@
         long lastEntry = flags.lastEntryId;
 
         final BookieId bookie;
-        if (flags.bookieAddresss != null) {
+        if (flags.bookieAddress != null) {
             // A particular bookie was specified
-            bookie = BookieId.parse(flags.bookieAddresss);
+            bookie = BookieId.parse(flags.bookieAddress);
         } else {
             bookie = null;
         }
diff --git a/bookkeeper-server/src/main/java/org/apache/bookkeeper/tools/cli/commands/bookie/RegenerateInterleavedStorageIndexFileCommand.java b/bookkeeper-server/src/main/java/org/apache/bookkeeper/tools/cli/commands/bookie/RegenerateInterleavedStorageIndexFileCommand.java
index bbf933f..c50e808 100644
--- a/bookkeeper-server/src/main/java/org/apache/bookkeeper/tools/cli/commands/bookie/RegenerateInterleavedStorageIndexFileCommand.java
+++ b/bookkeeper-server/src/main/java/org/apache/bookkeeper/tools/cli/commands/bookie/RegenerateInterleavedStorageIndexFileCommand.java
@@ -79,7 +79,7 @@
             description = "The password in base64 encoding, for cases where the password is not UTF-8.")
         private String b64Password = DEFAULT;
 
-        @Parameter(names = { "-d", "--dryrun" }, description = "Process the entryLogger, but don't write anthing.")
+        @Parameter(names = { "-d", "--dryrun" }, description = "Process the entryLogger, but don't write anything.")
         private boolean dryRun;
 
         @Parameter(names = { "-l", "--ledgerids" },
diff --git a/bookkeeper-server/src/main/java/org/apache/bookkeeper/tools/cli/commands/client/LedgerMetaDataCommand.java b/bookkeeper-server/src/main/java/org/apache/bookkeeper/tools/cli/commands/client/LedgerMetaDataCommand.java
index 4bbb50e..430c0dd 100644
--- a/bookkeeper-server/src/main/java/org/apache/bookkeeper/tools/cli/commands/client/LedgerMetaDataCommand.java
+++ b/bookkeeper-server/src/main/java/org/apache/bookkeeper/tools/cli/commands/client/LedgerMetaDataCommand.java
@@ -137,7 +137,7 @@
                             throw be;
                         }
                         m.writeLedgerMetadata(flag.ledgerId, md, new LongVersion(-1L)).join();
-                        LOG.info("successsfully updated ledger metadata {}", flag.ledgerId);
+                        LOG.info("successfully updated ledger metadata {}", flag.ledgerId);
                     }
                 } else {
                     printLedgerMetadata(flag.ledgerId, m.readLedgerMetadata(flag.ledgerId).get().getValue(), true);
diff --git a/bookkeeper-server/src/main/java/org/apache/bookkeeper/util/AvailabilityOfEntriesOfLedger.java b/bookkeeper-server/src/main/java/org/apache/bookkeeper/util/AvailabilityOfEntriesOfLedger.java
index 24361ce..cf221f5 100644
--- a/bookkeeper-server/src/main/java/org/apache/bookkeeper/util/AvailabilityOfEntriesOfLedger.java
+++ b/bookkeeper-server/src/main/java/org/apache/bookkeeper/util/AvailabilityOfEntriesOfLedger.java
@@ -178,14 +178,14 @@
 
     public AvailabilityOfEntriesOfLedger(PrimitiveIterator.OfLong entriesOfLedgerItr) {
         while (entriesOfLedgerItr.hasNext()) {
-            this.addEntryToAvailabileEntriesOfLedger(entriesOfLedgerItr.nextLong());
+            this.addEntryToAvailableEntriesOfLedger(entriesOfLedgerItr.nextLong());
         }
         this.closeStateOfEntriesOfALedger();
     }
 
     public AvailabilityOfEntriesOfLedger(long[] entriesOfLedger) {
         for (long entry : entriesOfLedger) {
-            this.addEntryToAvailabileEntriesOfLedger(entry);
+            this.addEntryToAvailableEntriesOfLedger(entry);
         }
         this.closeStateOfEntriesOfALedger();
     }
@@ -310,7 +310,7 @@
         }
     }
 
-    private void addEntryToAvailabileEntriesOfLedger(long entryId) {
+    private void addEntryToAvailableEntriesOfLedger(long entryId) {
         if (!isCurSequenceInitialized()) {
             initializeCurSequence(entryId);
         } else if (isEntryExistingInCurSequence(entryId)) {
diff --git a/bookkeeper-server/src/main/java/org/apache/bookkeeper/util/DiskChecker.java b/bookkeeper-server/src/main/java/org/apache/bookkeeper/util/DiskChecker.java
index db8d350..0cac7b8 100644
--- a/bookkeeper-server/src/main/java/org/apache/bookkeeper/util/DiskChecker.java
+++ b/bookkeeper-server/src/main/java/org/apache/bookkeeper/util/DiskChecker.java
@@ -276,7 +276,7 @@
 
     private void validateThreshold(float diskSpaceThreshold, float diskSpaceWarnThreshold) {
         if (diskSpaceThreshold <= 0 || diskSpaceThreshold >= 1 || diskSpaceWarnThreshold - diskSpaceThreshold > 1e-6) {
-            throw new IllegalArgumentException("Disk space threashold: "
+            throw new IllegalArgumentException("Disk space threshold: "
                     + diskSpaceThreshold + " and warn threshold: " + diskSpaceWarnThreshold
                     + " are not valid. Should be > 0 and < 1 and diskSpaceThreshold >= diskSpaceWarnThreshold");
         }
diff --git a/bookkeeper-server/src/main/java/org/apache/bookkeeper/util/StringUtils.java b/bookkeeper-server/src/main/java/org/apache/bookkeeper/util/StringUtils.java
index 1a75200..edbe499 100644
--- a/bookkeeper-server/src/main/java/org/apache/bookkeeper/util/StringUtils.java
+++ b/bookkeeper-server/src/main/java/org/apache/bookkeeper/util/StringUtils.java
@@ -22,7 +22,7 @@
 import org.apache.bookkeeper.proto.BookkeeperProtocol;
 
 /**
- * Provided utilites for parsing network addresses, ledger-id from node paths
+ * Provided utilities for parsing network addresses, ledger-id from node paths
  * etc.
  *
  */
@@ -163,7 +163,7 @@
     }
 
     /**
-     * Builds string representation of teh request without extra (i.e. binary) data
+     * Builds string representation of the request without extra (i.e. binary) data
      *
      * @param request
      * @return string representation of request
diff --git a/bookkeeper-server/src/main/java/org/apache/bookkeeper/util/ZkUtils.java b/bookkeeper-server/src/main/java/org/apache/bookkeeper/util/ZkUtils.java
index 6b8ef86..48ea04a 100644
--- a/bookkeeper-server/src/main/java/org/apache/bookkeeper/util/ZkUtils.java
+++ b/bookkeeper-server/src/main/java/org/apache/bookkeeper/util/ZkUtils.java
@@ -41,7 +41,7 @@
 import org.slf4j.LoggerFactory;
 
 /**
- * Provided utilites for zookeeper access, etc.
+ * Provided utilities for zookeeper access, etc.
  */
 public class ZkUtils {
     private static final Logger LOG = LoggerFactory.getLogger(ZkUtils.class);
diff --git a/bookkeeper-server/src/main/java/org/apache/bookkeeper/util/collections/SynchronizedHashMultiMap.java b/bookkeeper-server/src/main/java/org/apache/bookkeeper/util/collections/SynchronizedHashMultiMap.java
index 942d6da..db7bd0b 100644
--- a/bookkeeper-server/src/main/java/org/apache/bookkeeper/util/collections/SynchronizedHashMultiMap.java
+++ b/bookkeeper-server/src/main/java/org/apache/bookkeeper/util/collections/SynchronizedHashMultiMap.java
@@ -33,7 +33,7 @@
  *
  * <p>Implementation is aimed at storing PerChannelBookieClient completions when there
  * are duplicates. If the key is a pooled object, it must not exist once the value
- * has been removed from the map, which can happen with guava multimap implemenations.
+ * has been removed from the map, which can happen with guava multimap implementations.
  *
  * <p>With this map is implemented with pretty heavy locking, but this shouldn't be an
  * issue as the multimap only needs to be used in rare cases, i.e. when a user tries
diff --git a/bookkeeper-server/src/test/java/org/apache/bookkeeper/client/HandleFailuresTest.java b/bookkeeper-server/src/test/java/org/apache/bookkeeper/client/HandleFailuresTest.java
index 8e8f564..d118266 100644
--- a/bookkeeper-server/src/test/java/org/apache/bookkeeper/client/HandleFailuresTest.java
+++ b/bookkeeper-server/src/test/java/org/apache/bookkeeper/client/HandleFailuresTest.java
@@ -171,7 +171,7 @@
     }
 
     @Test(timeout = 30000)
-    public void testHandlingFailuresMultipleBookieFailImmediatelyNotEnoughToReplace() throws Exception {
+    public void testHandlingFailuresMultipleBookieFailImmediatelyNotEnoughoReplace() throws Exception {
         MockClientContext clientCtx = MockClientContext.create();
         Versioned<LedgerMetadata> md = ClientUtil.setupLedger(clientCtx, 10L,
                                                    LedgerMetadataBuilder.create()
@@ -197,7 +197,7 @@
     }
 
     @Test(timeout = 30000)
-    public void testHandlingFailuresMultipleBookieFailAfterOneEntryNotEnoughToReplace() throws Exception {
+    public void testHandlingFailuresMultipleBookieFailAfterOneEntryNotEnoughoReplace() throws Exception {
         MockClientContext clientCtx = MockClientContext.create();
         Versioned<LedgerMetadata> md = ClientUtil.setupLedger(clientCtx, 10L,
                                                    LedgerMetadataBuilder.create()
diff --git a/bookkeeper-server/src/test/java/org/apache/bookkeeper/util/StaticDNSResolver.java b/bookkeeper-server/src/test/java/org/apache/bookkeeper/util/StaticDNSResolver.java
index 72e6121..07da660 100644
--- a/bookkeeper-server/src/test/java/org/apache/bookkeeper/util/StaticDNSResolver.java
+++ b/bookkeeper-server/src/test/java/org/apache/bookkeeper/util/StaticDNSResolver.java
@@ -73,7 +73,7 @@
     @Override
     public List<String> resolve(List<String> names) {
         if (getBookieAddressResolver() == null) {
-            // test that this istance has been properly initialized
+            // test that this instance has been properly initialized
             throw new IllegalStateException("bookieAddressResolver was not set");
         }
         List<String> racks = new ArrayList<String>();
diff --git a/conf/bk_server.conf b/conf/bk_server.conf
index ebedf20..555e920 100644
--- a/conf/bk_server.conf
+++ b/conf/bk_server.conf
@@ -419,14 +419,14 @@
 
 # @Deprecated - `sortedLedgerStorageEnabled` is deprecated in favor of using `ledgerStorageClass`
 # Whether sorted-ledger storage enabled (default true)
-# sortedLedgerStorageEnabled=ture
+# sortedLedgerStorageEnabled=true
 
 # Directory Bookkeeper outputs ledger snapshots
 # could define multi directories to store snapshots, separated by ','
 # For example:
 # ledgerDirectories=/tmp/bk1-data,/tmp/bk2-data
 #
-# Ideally ledger dirs and journal dir are each in a differet device,
+# Ideally ledger dirs and journal dir are each in a different device,
 # which reduce the contention between random i/o and sequential write.
 # It is possible to run with a single disk, but performance will be significantly lower.
 ledgerDirectories=/tmp/bk-data
@@ -636,7 +636,7 @@
 
 # For each ledger dir, maximum disk space which can be used.
 # Default is 0.95f. i.e. 95% of disk can be used at most after which nothing will
-# be written to that partition. If all ledger dir partions are full, then bookie
+# be written to that partition. If all ledger dir partitions are full, then bookie
 # will turn to readonly mode if 'readOnlyModeEnabled=true' is set, else it will
 # shutdown. Bookie will also suspend the minor and major compaction when usage threshold is exceed
 # if `isForceGCAllowWhenNoSpace` is disabled. When the usage becomes lower than the threshold, the major and minor
@@ -715,7 +715,7 @@
 
 # Size of a index page in ledger cache, in bytes
 # A larger index page can improve performance writing page to disk,
-# which is efficent when you have small number of ledgers and these
+# which is efficient when you have small number of ledgers and these
 # ledgers have similar number of entries.
 # If you have large number of ledgers and each ledger has fewer entries,
 # smaller index page would improve memory usage.
@@ -728,7 +728,7 @@
 # pageLimit*pageSize should not more than JVM max memory limitation,
 # otherwise you would got OutOfMemoryException.
 # In general, incrementing pageLimit, using smaller index page would
-# gain bettern performance in lager number of ledgers with fewer entries case
+# gain better performance in lager number of ledgers with fewer entries case
 # If pageLimit is -1, bookie server will use 1/3 of JVM memory to compute
 # the limitation of number of index pages.
 # pageLimit=-1
diff --git a/conf/zookeeper.conf b/conf/zookeeper.conf
index 89c9851..77e3524 100644
--- a/conf/zookeeper.conf
+++ b/conf/zookeeper.conf
@@ -58,7 +58,7 @@
 electionAlg=3
 
 # Leader accepts client connections. Default value is "yes". The leader
-# machine coordinates updates. For higher update throughput at thes slight
+# machine coordinates updates. For higher update throughput at these slight
 # expense of read throughput the leader can be configured to not accept
 # clients and focus on coordination.
 leaderServes=yes
diff --git a/src/owasp-dependency-check-suppressions.xml b/src/owasp-dependency-check-suppressions.xml
index a141bb3..337506b 100644
--- a/src/owasp-dependency-check-suppressions.xml
+++ b/src/owasp-dependency-check-suppressions.xml
@@ -20,7 +20,7 @@
 
 -->
 <suppressions xmlns="https://jeremylong.github.io/DependencyCheck/dependency-suppression.1.3.xsd">
-    <!-- add supressions for known vulnerabilities detected by OWASP Dependency Check -->
+    <!-- add suppressions for known vulnerabilities detected by OWASP Dependency Check -->
 
     <suppress>
         <notes>CVE-2021-43045 affects only .NET distro, see https://github.com/apache/avro/pull/1357</notes>
diff --git a/stats/bookkeeper-stats-providers/codahale-metrics-provider/src/main/java/org/apache/bookkeeper/stats/codahale/FastTimer.java b/stats/bookkeeper-stats-providers/codahale-metrics-provider/src/main/java/org/apache/bookkeeper/stats/codahale/FastTimer.java
index f3a3374..e7a65af 100644
--- a/stats/bookkeeper-stats-providers/codahale-metrics-provider/src/main/java/org/apache/bookkeeper/stats/codahale/FastTimer.java
+++ b/stats/bookkeeper-stats-providers/codahale-metrics-provider/src/main/java/org/apache/bookkeeper/stats/codahale/FastTimer.java
@@ -163,7 +163,7 @@
 
     /*
      * Buckets for percentiles store response times according to the definition in BUCKET_SPEC in the
-     * form of { numerOfBuckets , nanosecondResolutionPerBucket }.
+     * form of { numberOfBuckets , nanosecondResolutionPerBucket }.
      *
      * BUCKET_SPEC_FINE:
      * This bucket definition provides fine-grained timing for small values, and more coarse-grained timing
diff --git a/stats/bookkeeper-stats-providers/prometheus-metrics-provider/src/main/java/org/apache/bookkeeper/stats/prometheus/DataSketchesOpStatsLogger.java b/stats/bookkeeper-stats-providers/prometheus-metrics-provider/src/main/java/org/apache/bookkeeper/stats/prometheus/DataSketchesOpStatsLogger.java
index 057fed6..1abe850 100644
--- a/stats/bookkeeper-stats-providers/prometheus-metrics-provider/src/main/java/org/apache/bookkeeper/stats/prometheus/DataSketchesOpStatsLogger.java
+++ b/stats/bookkeeper-stats-providers/prometheus-metrics-provider/src/main/java/org/apache/bookkeeper/stats/prometheus/DataSketchesOpStatsLogger.java
@@ -145,12 +145,12 @@
         current = replacement;
         replacement = local;
 
-        final DoublesUnion aggregateSuccesss = new DoublesUnionBuilder().build();
+        final DoublesUnion aggregateSuccess = new DoublesUnionBuilder().build();
         final DoublesUnion aggregateFail = new DoublesUnionBuilder().build();
         local.map.forEach((localData, b) -> {
             long stamp = localData.lock.writeLock();
             try {
-                aggregateSuccesss.update(localData.successSketch);
+                aggregateSuccess.update(localData.successSketch);
                 localData.successSketch.reset();
                 aggregateFail.update(localData.failSketch);
                 localData.failSketch.reset();
@@ -159,7 +159,7 @@
             }
         });
 
-        successResult = aggregateSuccesss.getResultAndReset();
+        successResult = aggregateSuccess.getResultAndReset();
         failResult = aggregateFail.getResultAndReset();
     }
 
diff --git a/stream/api/src/main/java/org/apache/bookkeeper/api/kv/result/DeleteResult.java b/stream/api/src/main/java/org/apache/bookkeeper/api/kv/result/DeleteResult.java
index 5950c7e..e49ed1f 100644
--- a/stream/api/src/main/java/org/apache/bookkeeper/api/kv/result/DeleteResult.java
+++ b/stream/api/src/main/java/org/apache/bookkeeper/api/kv/result/DeleteResult.java
@@ -29,7 +29,7 @@
 
     /**
      * Returns the list of previous kv pairs of the keys
-     * deleted in ths op.
+     * deleted in this op.
      *
      * @return the list of previous kv pairs.
      */
diff --git a/stream/clients/java/all/src/main/java/org/apache/bookkeeper/clients/StorageClientBuilder.java b/stream/clients/java/all/src/main/java/org/apache/bookkeeper/clients/StorageClientBuilder.java
index f873e88..572282e 100644
--- a/stream/clients/java/all/src/main/java/org/apache/bookkeeper/clients/StorageClientBuilder.java
+++ b/stream/clients/java/all/src/main/java/org/apache/bookkeeper/clients/StorageClientBuilder.java
@@ -67,7 +67,7 @@
      * <p>The namespace name will be used for building the stream client for interacting with streams
      * within the namespace.
      *
-     * @param colName colletion name
+     * @param colName collection name
      * @return stream client builder.
      * @see #build()
      */
diff --git a/stream/clients/java/kv/src/main/java/org/apache/bookkeeper/clients/impl/kv/PByteBufTableImpl.java b/stream/clients/java/kv/src/main/java/org/apache/bookkeeper/clients/impl/kv/PByteBufTableImpl.java
index 1bda971..8b84d96 100644
--- a/stream/clients/java/kv/src/main/java/org/apache/bookkeeper/clients/impl/kv/PByteBufTableImpl.java
+++ b/stream/clients/java/kv/src/main/java/org/apache/bookkeeper/clients/impl/kv/PByteBufTableImpl.java
@@ -53,7 +53,7 @@
 import org.apache.bookkeeper.stream.proto.StreamProperties;
 
 /**
- * The default implemenation of {@link PTable}.
+ * The default implementation of {@link PTable}.
  */
 @Slf4j
 public class PByteBufTableImpl implements PTable<ByteBuf, ByteBuf> {
diff --git a/stream/distributedlog/core/src/main/java/org/apache/distributedlog/BKAsyncLogWriter.java b/stream/distributedlog/core/src/main/java/org/apache/distributedlog/BKAsyncLogWriter.java
index 31cbab6..da7723a 100644
--- a/stream/distributedlog/core/src/main/java/org/apache/distributedlog/BKAsyncLogWriter.java
+++ b/stream/distributedlog/core/src/main/java/org/apache/distributedlog/BKAsyncLogWriter.java
@@ -98,9 +98,9 @@
     }
 
     /**
-     * Last pending record in current log segment. After it is satisified, it would
+     * Last pending record in current log segment. After it is satisfied, it would
      * roll log segment.
-     * This implementation is based on the assumption that all future satisified in same
+     * This implementation is based on the assumption that all future satisfied in same
      * order future pool.
      */
     class LastPendingLogRecord extends PendingLogRecord {
diff --git a/stream/distributedlog/core/src/main/java/org/apache/distributedlog/BKLogReadHandler.java b/stream/distributedlog/core/src/main/java/org/apache/distributedlog/BKLogReadHandler.java
index 777e6b1..167915a 100644
--- a/stream/distributedlog/core/src/main/java/org/apache/distributedlog/BKLogReadHandler.java
+++ b/stream/distributedlog/core/src/main/java/org/apache/distributedlog/BKLogReadHandler.java
@@ -85,7 +85,7 @@
  * <li> `readahead_worker`/notification_execution: opstats. stats on executions over the notifications received from
  * zookeeper.
  * <li> `readahead_worker`/metadata_reinitialization: opstats. stats on metadata reinitialization after receiving
- * notifcation from log segments updates.
+ * notification from log segments updates.
  * <li> `readahead_worker`/idle_reader_warn: counter. it increases each time the readahead worker detects itself
  * becoming idle.
  * </ul>
diff --git a/stream/distributedlog/core/src/main/java/org/apache/distributedlog/BKLogSegmentWriter.java b/stream/distributedlog/core/src/main/java/org/apache/distributedlog/BKLogSegmentWriter.java
index 34c855a..fdf34e2 100644
--- a/stream/distributedlog/core/src/main/java/org/apache/distributedlog/BKLogSegmentWriter.java
+++ b/stream/distributedlog/core/src/main/java/org/apache/distributedlog/BKLogSegmentWriter.java
@@ -1298,7 +1298,7 @@
                 }
             }
 
-            // update last dlsn before satisifying future
+            // update last dlsn before satisfying future
             if (BKException.Code.OK == transmitResultUpdater.get(this)) {
                 DLSN lastDLSNInPacket = recordSet.finalizeTransmit(
                         logSegmentSequenceNumber, entryId);
diff --git a/stream/distributedlog/core/src/main/java/org/apache/distributedlog/DistributedLogConfiguration.java b/stream/distributedlog/core/src/main/java/org/apache/distributedlog/DistributedLogConfiguration.java
index f6d84a9..7a98fc0 100644
--- a/stream/distributedlog/core/src/main/java/org/apache/distributedlog/DistributedLogConfiguration.java
+++ b/stream/distributedlog/core/src/main/java/org/apache/distributedlog/DistributedLogConfiguration.java
@@ -782,7 +782,7 @@
     //
 
     /**
-     * Get BK's zookeeper session timout in milliseconds.
+     * Get BK's zookeeper session timeout in milliseconds.
      *
      * <p>This is the session timeout applied for zookeeper client used by bookkeeper client.
      * Use {@link #getZKSessionTimeoutMilliseconds()} for zookeeper client used
@@ -1399,7 +1399,7 @@
      * Get timeout for shutting down schedulers in dl manager, in milliseconds.
      * By default, it is 5 seconds.
      *
-     * @return timeout for shutting down schedulers in dl manager, in miliseconds.
+     * @return timeout for shutting down schedulers in dl manager, in milliseconds.
      */
     public int getSchedulerShutdownTimeoutMs() {
         return getInt(BKDL_SCHEDULER_SHUTDOWN_TIMEOUT_MS, BKDL_SCHEDULER_SHUTDOWN_TIMEOUT_MS_DEFAULT);
@@ -1647,7 +1647,7 @@
      * </ul>
      * By default it is 1.
      *
-     * @return log segment name verison.
+     * @return log segment name version.
      */
     public int getLogSegmentNameVersion() {
         return getInt(BKDL_LOGSEGMENT_NAME_VERSION, BKDL_LOGSEGMENT_NAME_VERSION_DEFAULT);
@@ -3264,7 +3264,7 @@
     }
 
     /**
-     * Enable check existence of a log if quering local cache of a federated namespace missed.
+     * Enable check existence of a log if querying local cache of a federated namespace missed.
      *
      * @param enabled
      *          flag to enable/disable this feature.
@@ -3585,7 +3585,7 @@
         long readerIdleWarnThresholdMs = getReaderIdleWarnThresholdMillis();
         if (readerIdleWarnThresholdMs > 0) { // NOTE: some test cases set the idle warn threshold to 0
             checkArgument(readerIdleWarnThresholdMs > 2 * getReadLACLongPollTimeout(),
-                    "Invalid configuration: ReaderIdleWarnThreshold should be 2x larget than readLACLongPollTimeout");
+                    "Invalid configuration: ReaderIdleWarnThreshold should be 2x larger than readLACLongPollTimeout");
         }
     }
 
diff --git a/stream/distributedlog/core/src/main/java/org/apache/distributedlog/ZooKeeperClient.java b/stream/distributedlog/core/src/main/java/org/apache/distributedlog/ZooKeeperClient.java
index 91af155..ff23ff0 100644
--- a/stream/distributedlog/core/src/main/java/org/apache/distributedlog/ZooKeeperClient.java
+++ b/stream/distributedlog/core/src/main/java/org/apache/distributedlog/ZooKeeperClient.java
@@ -212,7 +212,7 @@
                     + name + " failed on establishing zookeeper connection", ioe);
         }
 
-        // This indicates that the client was explictly closed
+        // This indicates that the client was explicitly closed
         if (closed) {
             throw new ZooKeeperConnectionException("Client " + name + " has already been closed");
         }
diff --git a/stream/distributedlog/core/src/main/java/org/apache/distributedlog/ZooKeeperClientBuilder.java b/stream/distributedlog/core/src/main/java/org/apache/distributedlog/ZooKeeperClientBuilder.java
index 755cd82..716cbc6 100644
--- a/stream/distributedlog/core/src/main/java/org/apache/distributedlog/ZooKeeperClientBuilder.java
+++ b/stream/distributedlog/core/src/main/java/org/apache/distributedlog/ZooKeeperClientBuilder.java
@@ -51,8 +51,8 @@
     private String name = "default";
     // sessionTimeoutMs
     private int sessionTimeoutMs = -1;
-    // conectionTimeoutMs
-    private int conectionTimeoutMs = -1;
+    // connectionTimeoutMs
+    private int connectionTimeoutMs = -1;
     // zkServers
     private String zkServers = null;
     // retry policy
@@ -92,8 +92,8 @@
      */
     public synchronized ZooKeeperClientBuilder sessionTimeoutMs(int sessionTimeoutMs) {
         this.sessionTimeoutMs = sessionTimeoutMs;
-        if (this.conectionTimeoutMs <= 0) {
-            this.conectionTimeoutMs = 2 * sessionTimeoutMs;
+        if (this.connectionTimeoutMs <= 0) {
+            this.connectionTimeoutMs = 2 * sessionTimeoutMs;
         }
         return this;
     }
@@ -116,7 +116,7 @@
      * @return builder
      */
     public synchronized ZooKeeperClientBuilder connectionTimeoutMs(int connectionTimeoutMs) {
-        this.conectionTimeoutMs = connectionTimeoutMs;
+        this.connectionTimeoutMs = connectionTimeoutMs;
         return this;
     }
 
@@ -191,8 +191,8 @@
 
     private void validateParameters() {
         checkNotNull(zkServers, "No zk servers provided.");
-        checkArgument(conectionTimeoutMs > 0,
-                "Invalid connection timeout : %d", conectionTimeoutMs);
+        checkArgument(connectionTimeoutMs > 0,
+                "Invalid connection timeout : %d", connectionTimeoutMs);
         checkArgument(sessionTimeoutMs > 0,
                 "Invalid session timeout : %d", sessionTimeoutMs);
         checkNotNull(statsLogger, "No stats logger provided.");
@@ -222,7 +222,7 @@
         return new ZooKeeperClient(
                 name,
                 sessionTimeoutMs,
-                conectionTimeoutMs,
+                connectionTimeoutMs,
                 zkServers,
                 retryPolicy,
                 statsLogger,
diff --git a/stream/distributedlog/core/src/main/java/org/apache/distributedlog/api/namespace/Namespace.java b/stream/distributedlog/core/src/main/java/org/apache/distributedlog/api/namespace/Namespace.java
index 1b2a787..f018d8c 100644
--- a/stream/distributedlog/core/src/main/java/org/apache/distributedlog/api/namespace/Namespace.java
+++ b/stream/distributedlog/core/src/main/java/org/apache/distributedlog/api/namespace/Namespace.java
@@ -139,7 +139,7 @@
      *
      * <p>This method allows the caller to override global configuration settings by
      * supplying log configuration overrides. Log config overrides come in two flavors,
-     * static and dynamic. Static config never changes in the lifecyle of <code>DistributedLogManager</code>,
+     * static and dynamic. Static config never changes in the lifecycle of <code>DistributedLogManager</code>,
      * dynamic config changes by reloading periodically and safe to access from any context.</p>
      *
      * @param logName
diff --git a/stream/distributedlog/core/src/main/java/org/apache/distributedlog/impl/acl/ZKAccessControlManager.java b/stream/distributedlog/core/src/main/java/org/apache/distributedlog/impl/acl/ZKAccessControlManager.java
index 9ca3efc..5906226 100644
--- a/stream/distributedlog/core/src/main/java/org/apache/distributedlog/impl/acl/ZKAccessControlManager.java
+++ b/stream/distributedlog/core/src/main/java/org/apache/distributedlog/impl/acl/ZKAccessControlManager.java
@@ -185,7 +185,7 @@
                                     @Override
                                     public void onSuccess(ZKAccessControl accessControl) {
                                         streamEntries.put(streamName, accessControl);
-                                        logger.info("Added overrided access control for stream {} : {}",
+                                        logger.info("Added override access control for stream {} : {}",
                                                 streamName, accessControl.getAccessControlEntry());
                                         complete();
                                     }
diff --git a/stream/distributedlog/core/src/main/java/org/apache/distributedlog/impl/federated/FederatedZKLogMetadataStore.java b/stream/distributedlog/core/src/main/java/org/apache/distributedlog/impl/federated/FederatedZKLogMetadataStore.java
index 028d329..79a746e 100644
--- a/stream/distributedlog/core/src/main/java/org/apache/distributedlog/impl/federated/FederatedZKLogMetadataStore.java
+++ b/stream/distributedlog/core/src/main/java/org/apache/distributedlog/impl/federated/FederatedZKLogMetadataStore.java
@@ -139,7 +139,7 @@
                     try {
                         oldLogs = FutureUtils.result(logsFuture);
                     } catch (Exception e) {
-                        logger.error("Unexpected exception when getting logs from a satisified future of {} : ",
+                        logger.error("Unexpected exception when getting logs from a satisfied future of {} : ",
                                 uri, e);
                     }
                     logsFuture = new CompletableFuture<Set<String>>();
diff --git a/stream/distributedlog/core/src/main/java/org/apache/distributedlog/lock/ZKDistributedLock.java b/stream/distributedlog/core/src/main/java/org/apache/distributedlog/lock/ZKDistributedLock.java
index 4d4c499..3fc32d5 100644
--- a/stream/distributedlog/core/src/main/java/org/apache/distributedlog/lock/ZKDistributedLock.java
+++ b/stream/distributedlog/core/src/main/java/org/apache/distributedlog/lock/ZKDistributedLock.java
@@ -313,7 +313,7 @@
     /**
      * Check if lock is held.
      * If not, error out and do not reacquire. Use this in cases where there are many waiters by default
-     * and reacquire is unlikley to succeed.
+     * and reacquire is unlikely to succeed.
      *
      * @throws LockingException     if the lock attempt fails
      */
diff --git a/stream/distributedlog/core/src/main/java/org/apache/distributedlog/lock/ZKSessionLock.java b/stream/distributedlog/core/src/main/java/org/apache/distributedlog/lock/ZKSessionLock.java
index 7d7a292..a0ce6b5 100644
--- a/stream/distributedlog/core/src/main/java/org/apache/distributedlog/lock/ZKSessionLock.java
+++ b/stream/distributedlog/core/src/main/java/org/apache/distributedlog/lock/ZKSessionLock.java
@@ -74,7 +74,7 @@
  * 1. prepare: create a sequential znode to identify the lock.
  * 2. check lock waiters: get all lock waiters to check after prepare. if it is the first waiter, claim the ownership;
  *    if it is not the first waiter, but first waiter was itself (same client id and same session id)
- *    claim the ownership too; otherwise, it would set watcher on its sibling and wait it to disappared.
+ *    claim the ownership too; otherwise, it would set watcher on its sibling and wait it to disappeared.
  * </p>
  *
  * <pre>
@@ -187,7 +187,7 @@
     }
 
     /**
-     * Convenience class for state management. Provide debuggability features by tracing unxpected state
+     * Convenience class for state management. Provide debuggability features by tracing unexpected state
      * transitions.
      */
     static class StateManagement {
@@ -860,7 +860,7 @@
     CompletableFuture<Void> asyncUnlock(final Throwable cause) {
         final CompletableFuture<Void> promise = new CompletableFuture<Void>();
 
-        // Use lock executor here rather than lock action, because we want this opertaion to be applied
+        // Use lock executor here rather than lock action, because we want this operation to be applied
         // whether the epoch has changed or not. The member node is EPHEMERAL_SEQUENTIAL so there's no
         // risk of an ABA problem where we delete and recreate a node and then delete it again here.
         lockStateExecutor.executeOrdered(lockPath, () -> {
diff --git a/stream/distributedlog/core/src/main/java/org/apache/distributedlog/logsegment/LogSegmentEntryReader.java b/stream/distributedlog/core/src/main/java/org/apache/distributedlog/logsegment/LogSegmentEntryReader.java
index decae9a..2ec7445 100644
--- a/stream/distributedlog/core/src/main/java/org/apache/distributedlog/logsegment/LogSegmentEntryReader.java
+++ b/stream/distributedlog/core/src/main/java/org/apache/distributedlog/logsegment/LogSegmentEntryReader.java
@@ -84,7 +84,7 @@
      *  <p><i>numEntries</i> will be best-effort.
      *
      * @param numEntries num entries to read from current log segment
-     * @return A promise that when satisified will contain a non-empty list of entries with their content.
+     * @return A promise that when satisfied will contain a non-empty list of entries with their content.
      * @throw {@link org.apache.distributedlog.exceptions.EndOfLogSegmentException} when
      *          read entries beyond the end of a <i>closed</i> log segment.
      */
diff --git a/stream/distributedlog/core/src/main/java/org/apache/distributedlog/namespace/NamespaceWatcher.java b/stream/distributedlog/core/src/main/java/org/apache/distributedlog/namespace/NamespaceWatcher.java
index 49da3d4..e977040 100644
--- a/stream/distributedlog/core/src/main/java/org/apache/distributedlog/namespace/NamespaceWatcher.java
+++ b/stream/distributedlog/core/src/main/java/org/apache/distributedlog/namespace/NamespaceWatcher.java
@@ -53,7 +53,7 @@
 
     /**
      * Watch the namespace changes. It would be triggered each time
-     * a namspace listener is added. The implementation should handle
+     * a namespace listener is added. The implementation should handle
      * this.
      */
     protected abstract void watchNamespaceChanges();
diff --git a/stream/distributedlog/core/src/main/java/org/apache/distributedlog/tools/DistributedLogTool.java b/stream/distributedlog/core/src/main/java/org/apache/distributedlog/tools/DistributedLogTool.java
index 9af7f6d..6588fc2 100644
--- a/stream/distributedlog/core/src/main/java/org/apache/distributedlog/tools/DistributedLogTool.java
+++ b/stream/distributedlog/core/src/main/java/org/apache/distributedlog/tools/DistributedLogTool.java
@@ -916,8 +916,8 @@
             super("show", "show metadata of a given stream and list segments");
             options.addOption("ns", "no-log-segments", false, "Do not list log segment metadata");
             options.addOption("lp", "placement-stats", false, "Show ensemble placement stats");
-            options.addOption("fl", "first-ledger", true, "First log sement no");
-            options.addOption("ll", "last-ledger", true, "Last log sement no");
+            options.addOption("fl", "first-ledger", true, "First log segment no");
+            options.addOption("ll", "last-ledger", true, "Last log segment no");
         }
 
         @Override
diff --git a/stream/distributedlog/core/src/main/java/org/apache/distributedlog/zk/DefaultZKOp.java b/stream/distributedlog/core/src/main/java/org/apache/distributedlog/zk/DefaultZKOp.java
index 22ae7cb..75dab16 100644
--- a/stream/distributedlog/core/src/main/java/org/apache/distributedlog/zk/DefaultZKOp.java
+++ b/stream/distributedlog/core/src/main/java/org/apache/distributedlog/zk/DefaultZKOp.java
@@ -24,7 +24,7 @@
 
 
 /**
- * Default zookeeper operation. No action on commiting or aborting.
+ * Default zookeeper operation. No action on committing or aborting.
  */
 public class DefaultZKOp extends ZKOp {
 
diff --git a/stream/distributedlog/core/src/main/java/org/apache/distributedlog/zk/ZKWatcherManager.java b/stream/distributedlog/core/src/main/java/org/apache/distributedlog/zk/ZKWatcherManager.java
index 1aabd59..afd06c2 100644
--- a/stream/distributedlog/core/src/main/java/org/apache/distributedlog/zk/ZKWatcherManager.java
+++ b/stream/distributedlog/core/src/main/java/org/apache/distributedlog/zk/ZKWatcherManager.java
@@ -82,9 +82,9 @@
     private final StatsLogger statsLogger;
     // Gauges and their labels
     private final Gauge<Number> totalWatchesGauge;
-    private static final String totalWatchesGauageLabel = "total_watches";
+    private static final String totalWatchesGaugeLabel = "total_watches";
     private final Gauge<Number> numChildWatchesGauge;
-    private static final String numChildWatchesGauageLabel = "num_child_watches";
+    private static final String numChildWatchesGaugeLabel = "num_child_watches";
 
     protected final ConcurrentMap<String, Set<Watcher>> childWatches;
     protected final LongAdder allWatchesGauge;
@@ -112,7 +112,7 @@
                 return allWatchesGauge.sum();
             }
         };
-        this.statsLogger.registerGauge(totalWatchesGauageLabel, totalWatchesGauge);
+        this.statsLogger.registerGauge(totalWatchesGaugeLabel, totalWatchesGauge);
 
         numChildWatchesGauge = new Gauge<Number>() {
             @Override
@@ -126,7 +126,7 @@
             }
         };
 
-        this.statsLogger.registerGauge(numChildWatchesGauageLabel, numChildWatchesGauge);
+        this.statsLogger.registerGauge(numChildWatchesGaugeLabel, numChildWatchesGauge);
     }
 
     public Watcher registerChildWatcher(String path, Watcher watcher) {
@@ -169,8 +169,8 @@
     }
 
     public void unregisterGauges() {
-        this.statsLogger.unregisterGauge(totalWatchesGauageLabel, totalWatchesGauge);
-        this.statsLogger.unregisterGauge(numChildWatchesGauageLabel, numChildWatchesGauge);
+        this.statsLogger.unregisterGauge(totalWatchesGaugeLabel, totalWatchesGauge);
+        this.statsLogger.unregisterGauge(numChildWatchesGaugeLabel, numChildWatchesGauge);
     }
 
     @Override
diff --git a/stream/distributedlog/core/src/test/resources/bk_server.conf b/stream/distributedlog/core/src/test/resources/bk_server.conf
index 0d3cd56..f094ef3 100644
--- a/stream/distributedlog/core/src/test/resources/bk_server.conf
+++ b/stream/distributedlog/core/src/test/resources/bk_server.conf
@@ -94,7 +94,7 @@
 
 # Size of a index page in ledger cache, in bytes
 # A larger index page can improve performance writing page to disk,
-# which is efficent when you have small number of ledgers and these
+# which is efficient when you have small number of ledgers and these
 # ledgers have similar number of entries.
 # If you have large number of ledgers and each ledger has fewer entries,
 # smaller index page would improve memory usage.
@@ -107,7 +107,7 @@
 # pageLimit*pageSize should not more than JVM max memory limitation,
 # otherwise you would got OutOfMemoryException.
 # In general, incrementing pageLimit, using smaller index page would
-# gain bettern performance in lager number of ledgers with fewer entries case
+# gain better performance in lager number of ledgers with fewer entries case
 # If pageLimit is -1, bookie server will use 1/3 of JVM memory to compute
 # the limitation of number of index pages.
 pageLimit=131072
diff --git a/stream/distributedlog/protocol/src/main/java/org/apache/distributedlog/LogRecord.java b/stream/distributedlog/protocol/src/main/java/org/apache/distributedlog/LogRecord.java
index 4de2249..7101420 100644
--- a/stream/distributedlog/protocol/src/main/java/org/apache/distributedlog/LogRecord.java
+++ b/stream/distributedlog/protocol/src/main/java/org/apache/distributedlog/LogRecord.java
@@ -509,7 +509,7 @@
                 try {
                     long metadata = in.readLong();
                     // Reading the first 8 bytes positions the record stream on the correct log record
-                    // By this time all components of the DLSN are valid so this is where we shoud
+                    // By this time all components of the DLSN are valid so this is where we should
                     // retrieve the currentDLSN and advance to the next
                     // Given that there are 20 bytes following the read position of the previous call
                     // to readLong, we should not have moved ahead in the stream.
diff --git a/stream/proto/src/main/proto/stream.proto b/stream/proto/src/main/proto/stream.proto
index b16bd22..accab87 100644
--- a/stream/proto/src/main/proto/stream.proto
+++ b/stream/proto/src/main/proto/stream.proto
@@ -83,7 +83,7 @@
 // Stream
 //
 
-// since stream and table are similar and exchangable,
+// since stream and table are similar and exchangeable,
 // from the beginning, we shared the metadata management
 // between streams and tables and distinguish them using
 // a flag that recorded in metadata.
diff --git a/stream/server/src/main/java/org/apache/bookkeeper/stream/server/service/RegistrationStateService.java b/stream/server/src/main/java/org/apache/bookkeeper/stream/server/service/RegistrationStateService.java
index afe33ac..8d5ad8b 100644
--- a/stream/server/src/main/java/org/apache/bookkeeper/stream/server/service/RegistrationStateService.java
+++ b/stream/server/src/main/java/org/apache/bookkeeper/stream/server/service/RegistrationStateService.java
@@ -88,7 +88,7 @@
                 log.info("Successfully register myself under registration path {}/{}",
                     regServiceProvider.getRegistrationPath(), NetUtils.endpointToString(myEndpoint));
             } catch (Exception e) {
-                throw new RuntimeException("Failed to intiailize a registration state service", e);
+                throw new RuntimeException("Failed to initialize a registration state service", e);
             }
         }
     }
diff --git a/stream/statelib/src/main/java/org/apache/bookkeeper/statelib/impl/mvcc/MVCCRecord.java b/stream/statelib/src/main/java/org/apache/bookkeeper/statelib/impl/mvcc/MVCCRecord.java
index 4e071c3..c8a2c0d 100644
--- a/stream/statelib/src/main/java/org/apache/bookkeeper/statelib/impl/mvcc/MVCCRecord.java
+++ b/stream/statelib/src/main/java/org/apache/bookkeeper/statelib/impl/mvcc/MVCCRecord.java
@@ -33,7 +33,7 @@
 import org.apache.bookkeeper.stream.proto.kv.store.ValueType;
 
 /**
- * An object represents the mvcc metdata and value for a given key.
+ * An object represents the mvcc metadata and value for a given key.
  */
 @Data
 @Setter
diff --git a/stream/statelib/src/main/java/org/apache/bookkeeper/statelib/impl/rocksdb/checkpoint/CheckpointInfo.java b/stream/statelib/src/main/java/org/apache/bookkeeper/statelib/impl/rocksdb/checkpoint/CheckpointInfo.java
index 49d06c9..f38bf28 100644
--- a/stream/statelib/src/main/java/org/apache/bookkeeper/statelib/impl/rocksdb/checkpoint/CheckpointInfo.java
+++ b/stream/statelib/src/main/java/org/apache/bookkeeper/statelib/impl/rocksdb/checkpoint/CheckpointInfo.java
@@ -38,7 +38,7 @@
 
 
 /**
- * CheckpointInfo encapsulated information and operatation for a checkpoint.
+ * CheckpointInfo encapsulated information and operation for a checkpoint.
  */
 @Slf4j
 public class CheckpointInfo implements Comparable<CheckpointInfo> {
diff --git a/stream/storage/api/src/main/java/org/apache/bookkeeper/stream/storage/api/cluster/ClusterInitializer.java b/stream/storage/api/src/main/java/org/apache/bookkeeper/stream/storage/api/cluster/ClusterInitializer.java
index 5808186..f938dee 100644
--- a/stream/storage/api/src/main/java/org/apache/bookkeeper/stream/storage/api/cluster/ClusterInitializer.java
+++ b/stream/storage/api/src/main/java/org/apache/bookkeeper/stream/storage/api/cluster/ClusterInitializer.java
@@ -31,10 +31,10 @@
      * return <tt>true</tt> if they understand the subprotocol specified in the URI and
      * <tt>false</tt> if they do not.
      *
-     * @param metatadataServiceUri the metadata service uri
+     * @param metadataServiceUri the metadata service uri
      * @return <tt>true</tt> if the implementation understands the given URI; <tt>false</tt> otherwise.
      */
-    boolean acceptsURI(URI metatadataServiceUri);
+    boolean acceptsURI(URI metadataServiceUri);
 
     /**
      * Create a new cluster under metadata service specified by {@code metadataServiceUri}.
diff --git a/stream/storage/impl/src/main/java/org/apache/bookkeeper/stream/storage/impl/cluster/ZkClusterMetadataStore.java b/stream/storage/impl/src/main/java/org/apache/bookkeeper/stream/storage/impl/cluster/ZkClusterMetadataStore.java
index 676da4e..0d2fd45 100644
--- a/stream/storage/impl/src/main/java/org/apache/bookkeeper/stream/storage/impl/cluster/ZkClusterMetadataStore.java
+++ b/stream/storage/impl/src/main/java/org/apache/bookkeeper/stream/storage/impl/cluster/ZkClusterMetadataStore.java
@@ -98,7 +98,7 @@
         ClusterMetadata metadata = ClusterMetadata.newBuilder()
             .setNumStorageContainers(numStorageContainers)
             .build();
-        ClusterAssignmentData assigmentData = ClusterAssignmentData.newBuilder()
+        ClusterAssignmentData assignmentData = ClusterAssignmentData.newBuilder()
             .build();
         try {
             // we are using dlog for the storage backend, so we need to initialize the dlog namespace
@@ -110,7 +110,7 @@
                 .forOperations(
                     client.transactionOp().create().forPath(zkRootPath),
                     client.transactionOp().create().forPath(zkClusterMetadataPath, metadata.toByteArray()),
-                    client.transactionOp().create().forPath(zkClusterAssignmentPath, assigmentData.toByteArray()),
+                    client.transactionOp().create().forPath(zkClusterAssignmentPath, assignmentData.toByteArray()),
                     client.transactionOp().create().forPath(getServersPath(zkRootPath)),
                     client.transactionOp().create().forPath(getWritableServersPath(zkRootPath)),
                     client.transactionOp().create().forPath(getStoragePath(zkRootPath), dlogMetadata.serialize()));
@@ -141,8 +141,8 @@
     }
 
     @Override
-    public void updateClusterAssignmentData(ClusterAssignmentData assigmentData) {
-        byte[] data = assigmentData.toByteArray();
+    public void updateClusterAssignmentData(ClusterAssignmentData assignmentData) {
+        byte[] data = assignmentData.toByteArray();
         try {
             client.setData().forPath(zkClusterAssignmentPath, data);
         } catch (Exception e) {
diff --git a/tools/perf/src/main/java/org/apache/bookkeeper/tools/perf/table/PerfClient.java b/tools/perf/src/main/java/org/apache/bookkeeper/tools/perf/table/PerfClient.java
index f8d7c2d..20b9422 100644
--- a/tools/perf/src/main/java/org/apache/bookkeeper/tools/perf/table/PerfClient.java
+++ b/tools/perf/src/main/java/org/apache/bookkeeper/tools/perf/table/PerfClient.java
@@ -144,7 +144,7 @@
             names = {
                 "-b", "--benchmarks"
             },
-            description = "List of benchamrks to run")
+            description = "List of benchmarks to run")
         public List<String> benchmarks;
 
     }